import openai
from dotenv import load_dotenv
import os

load_dotenv()

openai.api_key = os.environ.get("OPENAI_API_KEY")

ans = openai.ChatCompletion.create(
    model="gpt-3.5-turbo",
    messages=[
        # {"role": "system", "content": "You are a helpful assistant."},
        # {"role": "user", "content": "Who won the world series in 2020?"},
        # {"role": "assistant",
        #     "content": "The Los Angeles Dodgers won the World Series in 2020."},
        {"role": "user", "content": "Could you explain me about the difference btw nestjs and express?"}
    ]
)

print(ans)

@app.post("/gpt")
async def getGPT(txt: msg):
    gptmsg = txt.message
    ans = openai.ChatCompletion.create(
    model="gpt-3.5-turbo",
    messages=[
        # {"role": "system", "content": "You are a helpful assistant."},
        # {"role": "user", "content": "Who won the world series in 2020?"},
        # {"role": "assistant",
        #     "content": "The Los Angeles Dodgers won the World Series in 2020."},
        {"role": "user", "content": gptmsg}
        ]
    )
    print(ans.choices[0].message.content)
    
    return "done"
from fastapi import FastAPI
from fastapi.responses import JSONResponse
from pydantic import BaseModel
import requests
import config

from enum import Enum

import openai
from dotenv import load_dotenv
import os

load_dotenv()
openai.api_key = os.environ.get("OPENAI_API_KEY")
# source myprojectenv/bin/activate
# uvicorn index:app --reload

app = FastAPI()

class ModelName(str, Enum):
    devnet = "devnet"
    testnet = "testnet"
    mainnet = "mainnet"

class msg(BaseModel) :
    message : str      

fake_items_db = [{"item_name": "Foo"}, {"item_name": "Bar"}, {"item_name": "Baz"}]

@app.get("/")
async def root():
    return {"message": "Hello World"}

@app.get("/data")
async def get_data():
    return JSONResponse(content={"name" : "lsj"})
    
# @app.get("/items/{item_id}")
# async def read_item(item_id : int) :
#     return {"item_id" : item_id+1}

@app.get("/models/{model_name}")
async def get_model(model_name : ModelName):
    if model_name is ModelName.devnet:
        return {"model_name": model_name, "msg" : "req to devnet"}
    
    if model_name.value == "testnet" :
        return {"model_name": model_name, "msg" : "req to testnet"}
    
    return {"model_name" : model_name, "message" : "req to mainnet"}

@app.get("/files/{file_path:path}")
async def read_file(file_path:str):
    return {"file_path" : file_path}

##

# @app.get("/items/")
# async def read_item(skip : int = 0, limit:int = 10):
#     return fake_items_db[skip : skip + limit]

# @app.get("/items/{item_id}")
# async def read_item(item_id: str, q : str | None = None):
#     if q:
#         return {"item_id":item_id, "q" : q}
#     return {"item_id": item_id}

# @app.get("/items/{item_id}")
# async def read_item(item_id: str, q: str | None = None, short: bool = False):
#     item = {"item_id" : item_id}
#     if q:
#         item.update({"q" : q})
#     if not short:
#         item.update(
#             {"description" : "asdfghj"}
#         )
#     return item

#

# @app.get("/users/{user_id}/items/{item_id}")
# async def read_user_item(
#     user_id : int, item_id : str, q : str | None = None, short : bool = False
# ):
#     item = {"item_id" : item_id, "owner_id" : user_id}
#     if q:
#         item.update({"q" : q})
#     if not short:
#         item.update(
#             {"description": "does not matter where we are"}
#         )
#     return item

@app.get("/items/{item_id}")
async def read_user_item(item_id : str, needy :str):
    item = {"item_id" : item_id, "needy" : needy}
    return item

@app.get("/coinPrice/{ticker}")
async def getCoinPrice(ticker : str) :
    url = config.binanceAPI + ticker
    response = requests.get(url)
    # print(response.json())
    
    if response.status_code == 200:
        return response.json()
    else :
        return {"error": "failed req", "status" : 404}

#

 

@app.post("/gpt")
async def getGPT(txt: msg):
    gptmsg = txt.message
    ans = openai.ChatCompletion.create(
    model="gpt-3.5-turbo",
    messages=[
        # {"role": "system", "content": "You are a helpful assistant."},
        # {"role": "user", "content": "Who won the world series in 2020?"},
        # {"role": "assistant",
        #     "content": "The Los Angeles Dodgers won the World Series in 2020."},
        {"role": "user", "content": gptmsg}
        ]
    )
    
    gptans = ans.choices[0].message.content
    
    return gptans