Test / main.py
Test1hdhs718's picture
update
1328239
from fastapi import FastAPI, Form
import torch
from transformers import pipeline
app = FastAPI()
pipe = pipeline("text-generation", model="TinyLlama/TinyLlama-1.1B-Chat-v1.0", torch_dtype=torch.bfloat16, device_map="auto")
@app.get("/")
def read_root():
return {"message": "Hello, World!"}
@app.get("/items/{item_id}")
def read_item(item_id: int, query_param: str = None):
return {"item_id": item_id, "query_param": query_param}
@app.post("/chat")
async def chat(sentence: str = Form(...)):
print("start chat")
if sentence.lower() == "quit":
return {"response": "Chatbot session ended."}
messages = [
{
"role": "system",
"content": "You are a friendly chatbot who always responds in the style of a pirate",
},
{"role": "user", "content": sentence},
]
print("start apply_chat_template")
prompt = pipe.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
print("start outputs")
outputs = pipe(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95)
text = outputs[0]["generated_text"]
return {"response": text}