import os from fastapi import FastAPI, HTTPException from pydantic import BaseModel from transformers import AutoModelForCausalLM, AutoTokenizer import torch app = FastAPI() # ✅ Fix: Set writable cache directory for Hugging Face models os.environ["TRANSFORMERS_CACHE"] = "/tmp" os.environ["HF_HOME"] = "/tmp" # ✅ Ensure cache directory exists if not os.path.exists("/tmp"): os.makedirs("/tmp") # ✅ Load DeepSeek-Coder-V2-Base Model with `trust_remote_code=True` model_name = "deepseek-ai/DeepSeek-Coder-V2-Base" tokenizer = AutoTokenizer.from_pretrained(model_name, cache_dir="/tmp", trust_remote_code=True) model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.float16, device_map="auto", cache_dir="/tmp", trust_remote_code=True) class CodeRequest(BaseModel): user_story: str @app.post("/generate-code") def generate_code(request: CodeRequest): """Generates structured AI-powered code based on user story""" prompt = f"Generate structured code for: {request.user_story}" inputs = tokenizer(prompt, return_tensors="pt").to("cuda" if torch.cuda.is_available() else "cpu") output = model.generate(**inputs, max_length=300) generated_code = tokenizer.decode(output[0], skip_special_tokens=True) return {"generated_code": generated_code} if __name__ == "__main__": import uvicorn uvicorn.run(app, host="0.0.0.0", port=7860)