File size: 1,479 Bytes
76e9347 3513da1 7aa34d7 76e9347 7aa34d7 76e9347 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 |
from fastapi import FastAPI, Request
from fastapi.responses import HTMLResponse, StreamingResponse
from fastapi.staticfiles import StaticFiles
from modules.pmbl import PMBL
import huggingface_cli
import os
app = FastAPI(docs_url=None, redoc_url=None)
app.mount("/static", StaticFiles(directory="static"), name="static")
app.mount("/templates", StaticFiles(directory="templates"), name="templates")
# Initialize PMBL with the correct repository ID and filename
repo_id = "lmstudio-community/Meta-Llama-3-8B-Instruct-GGUF"
filename = "Meta-Llama-3-8B-Instruct-Q8_0.gguf"
pmbl = PMBL(repo_id, filename)
@app.head("/")
@app.get("/")
def index() -> HTMLResponse:
with open("templates/index.html") as f:
return HTMLResponse(content=f.read())
@app.post("/chat")
async def chat(request: Request):
try:
data = await request.json()
user_input = data["user_input"]
mode = data["mode"]
history = pmbl.get_chat_history(mode, user_input)
response_generator = pmbl.generate_response(user_input, history, mode)
return StreamingResponse(response_generator, media_type="text/plain")
except Exception as e:
print(f"[SYSTEM] Error: {str(e)}")
return {"error": str(e)}
@app.post("/sleep")
async def sleep():
try:
pmbl.sleep_mode()
return {"message": "Sleep mode completed successfully"}
except Exception as e:
print(f"[SYSTEM] Error: {str(e)}")
return {"error": str(e)} |