import os import json import uvicorn import torch from fastapi import FastAPI, HTTPException, UploadFile, File, Depends, BackgroundTasks, Request, status from fastapi.responses import StreamingResponse, JSONResponse, FileResponse, HTMLResponse from pydantic import BaseModel, validator, Field, root_validator, EmailStr, constr from transformers import ( AutoModelForCausalLM, AutoTokenizer, GenerationConfig, StoppingCriteriaList, pipeline, AutoProcessor, AutoModelForImageClassification, AutoModelForSeq2SeqLM, AutoModelForQuestionAnswering, AutoModelForSpeechSeq2Seq, AutoModelForImageSegmentation, AutoFeatureExtractor, AutoModelForTokenClassification, AutoModelForMaskedLM, AutoModelForObjectDetection, AutoImageProcessor, ) from io import BytesIO import boto3 from botocore.exceptions import ClientError from huggingface_hub import snapshot_download import tempfile import hashlib from PIL import Image from typing import Optional, List, Union, Dict, Any import uuid import logging from fastapi.exceptions import RequestValidationError from passlib.context import CryptContext from jose import JWTError, jwt from datetime import datetime, timedelta from fastapi.staticfiles import StaticFiles from fastapi.templating import Jinja2Templates from fastapi.middleware.gzip import GZipMiddleware from fastapi.security import APIKeyHeader, OAuth2PasswordBearer, OAuth2PasswordRequestForm from starlette.middleware.cors import CORSMiddleware import asyncpg logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(filename)s - %(lineno)d - %(message)s') logger = logging.getLogger(__name__) SECRET_KEY = os.getenv("SECRET_KEY") if not SECRET_KEY: raise ValueError("SECRET_KEY must be set.") ALGORITHM = "HS256" ACCESS_TOKEN_EXPIRE_MINUTES = 30 pwd_context = CryptContext(schemes=["bcrypt"], deprecated="auto") oauth2_scheme = OAuth2PasswordBearer(tokenUrl="token") API_KEY = os.getenv("API_KEY") api_key_header = APIKeyHeader(name="X-API-Key") AWS_ACCESS_KEY_ID = os.getenv("AWS_ACCESS_KEY_ID") AWS_SECRET_ACCESS_KEY = os.getenv("AWS_SECRET_ACCESS_KEY") AWS_REGION = os.getenv("AWS_REGION") S3_BUCKET_NAME = os.getenv("S3_BUCKET_NAME") HUGGINGFACE_HUB_TOKEN = os.getenv("HUGGINGFACE_HUB_TOKEN") TEMP_DIR = "/tmp" STATIC_DIR = "static" TEMPLATES = Jinja2Templates(directory="templates") DATABASE_URL = os.getenv("DATABASE_URL") app = FastAPI() app.mount("/static", StaticFiles(directory=STATIC_DIR), name="static") app.add_middleware(GZipMiddleware) app.add_middleware( CORSMiddleware, allow_origins=["*"], allow_credentials=True, allow_methods=["*"], allow_headers=["*"], ) class User(BaseModel): username: constr(min_length=3, max_length=50) email: EmailStr password: constr(min_length=8) class GenerateRequest(BaseModel): model_id: str input_text: Optional[str] = Field(None) task_type: str = Field(...) temperature: float = 1.0 max_new_tokens: int = 200 stream: bool = True top_p: float = 1.0 top_k: int = 50 repetition_penalty: float = 1.0 num_return_sequences: int = 1 do_sample: bool = True chunk_delay: float = 0.0 stop_sequences: List[str] = [] image_file: Optional[UploadFile] = None source_language: Optional[str] = None target_language: Optional[str] = None context: Optional[str] = None audio_file: Optional[UploadFile] = None raw_input: Optional[Union[str, bytes]] = None masked_text: Optional[str] = None mask_image: Optional[UploadFile] = None low_res_image: Optional[UploadFile] = None @validator('task_type') def validate_task_type(cls, value): allowed_types = [ "text", "image", "audio", "video", "classification", "translation", "question-answering", "speech-to-text", "text-to-speech", "image-segmentation", "feature-extraction", "token-classification", "fill-mask", "image-inpainting", "image-super-resolution", "object-detection", "image-captioning", "audio-transcription", "summarization", ] if value not in allowed_types: raise ValueError(f"Invalid task_type. Allowed types are: {allowed_types}") return value @root_validator(pre=True) def check_input(cls, values): task_type = values.get("task_type") if task_type == "text" and values.get("input_text") is None: raise ValueError("input_text is required for text generation.") elif task_type == "speech-to-text" and values.get("audio_file") is None: raise ValueError("audio_file is required for speech-to-text.") elif task_type == "classification" and values.get("image_file") is None: raise ValueError("image_file is required for image classification.") elif task_type == "image-segmentation" and values.get("image_file") is None: raise ValueError("image_file is required for image segmentation.") elif task_type == "feature-extraction" and values.get("raw_input") is None: raise ValueError("raw_input is required for feature extraction.") elif task_type == "fill-mask" and values.get("masked_text") is None: raise ValueError("masked_text is required for fill-mask.") elif task_type == "image-inpainting" and (values.get("image_file") is None or values.get("mask_image") is None): raise ValueError("image_file and mask_image are required for image inpainting.") elif task_type == "image-super-resolution" and values.get("low_res_image") is None: raise ValueError("low_res_image is required for image super-resolution.") return values class S3ModelLoader: def __init__(self, bucket_name, aws_access_key_id, aws_secret_access_key, aws_region): self.bucket_name = bucket_name self.s3 = boto3.client( 's3', aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key, region_name=aws_region ) def _get_s3_uri(self, model_name): return f"{self.bucket_name}/{model_name.replace('/', '-')}" def load_model_and_tokenizer(self, model_name, task_type): s3_uri = self._get_s3_uri(model_name) try: self.s3.head_object(Bucket=self.bucket_name, Key=f'{s3_uri}/config.json') except ClientError as e: if e.response['Error']['Code'] == '404': with tempfile.TemporaryDirectory() as tmpdir: model_path = snapshot_download(model_name, token=HUGGINGFACE_HUB_TOKEN, cache_dir=tmpdir) self._upload_model_to_s3(model_path, s3_uri) else: raise HTTPException(status_code=500, detail=f"Error accessing S3: {e}") return self._load_from_s3(s3_uri, task_type) def _upload_model_to_s3(self, model_path, s3_uri): for root, _, files in os.walk(model_path): for file in files: local_path = os.path.join(root, file) s3_path = os.path.join(s3_uri, os.path.relpath(local_path, model_path)) self.s3.upload_file(local_path, self.bucket_name, s3_path) def _load_from_s3(self, s3_uri, task_type): with tempfile.TemporaryDirectory() as tmpdir: model_path = os.path.join(tmpdir, s3_uri) os.makedirs(model_path, exist_ok=True) self.s3.download_file(self.bucket_name, f"{s3_uri}/config.json", os.path.join(model_path, "config.json")) if task_type == "text": model = AutoModelForCausalLM.from_pretrained(model_path) tokenizer = AutoTokenizer.from_pretrained(model_path) if tokenizer.eos_token_id is None: tokenizer.eos_token_id = tokenizer.pad_token_id return {"model": model, "tokenizer": tokenizer, "pad_token_id": tokenizer.pad_token_id, "eos_token_id": tokenizer.eos_token_id} elif task_type in ["image", "audio", "video"]: processor = AutoProcessor.from_pretrained(model_path) pipeline_function = pipeline(task_type, model=model_path, device=0 if torch.cuda.is_available() else -1, processor=processor) return {"pipeline": pipeline_function} elif task_type == "classification": model = AutoModelForImageClassification.from_pretrained(model_path) processor = AutoProcessor.from_pretrained(model_path) return {"model": model, "processor": processor} elif task_type == "translation": model = AutoModelForSeq2SeqLM.from_pretrained(model_path) tokenizer = AutoTokenizer.from_pretrained(model_path) return {"model": model, "tokenizer": tokenizer} elif task_type == "question-answering": model = AutoModelForQuestionAnswering.from_pretrained(model_path) tokenizer = AutoTokenizer.from_pretrained(model_path) return {"model": model, "tokenizer": tokenizer} elif task_type == "speech-to-text": model = pipeline("automatic-speech-recognition", model=model_path, device=0 if torch.cuda.is_available() else -1) return {"pipeline": model} elif task_type == "text-to-speech": model = pipeline("text-to-speech", model=model_path, device=0 if torch.cuda.is_available() else -1) return {"pipeline": model} elif task_type == "image-segmentation": model = pipeline("image-segmentation", model=model_path, device=0 if torch.cuda.is_available() else -1) return {"pipeline": model} elif task_type == "feature-extraction": feature_extractor = AutoFeatureExtractor.from_pretrained(model_path) return {"feature_extractor": feature_extractor} elif task_type == "token-classification": model = AutoModelForTokenClassification.from_pretrained(model_path) tokenizer = AutoTokenizer.from_pretrained(model_path) return {"model": model, "tokenizer": tokenizer} elif task_type == "fill-mask": model = AutoModelForMaskedLM.from_pretrained(model_path) tokenizer = AutoTokenizer.from_pretrained(model_path) return {"model": model, "tokenizer": tokenizer} elif task_type == "image-inpainting": model = pipeline("image-inpainting", model=model_path, device=0 if torch.cuda.is_available() else -1) return {"pipeline": model} elif task_type == "image-super-resolution": model = pipeline("image-super-resolution", model=model_path, device=0 if torch.cuda.is_available() else -1) return {"pipeline": model} elif task_type == "object-detection": model = pipeline("object-detection", model=model_path, device=0 if torch.cuda.is_available() else -1) image_processor = AutoImageProcessor.from_pretrained(model_path) return {"pipeline": model, "image_processor": image_processor} elif task_type == "image-captioning": model = pipeline("image-captioning", model=model_path, device=0 if torch.cuda.is_available() else -1) return {"pipeline": model} elif task_type == "audio-transcription": model = pipeline("automatic-speech-recognition", model=model_path, device=0 if torch.cuda.is_available() else -1) return {"pipeline": model} elif task_type == "summarization": model = pipeline("summarization", model=model_path, device=0 if torch.cuda.is_available() else -1) tokenizer = AutoTokenizer.from_pretrained(model_path) return {"model": model, "tokenizer": tokenizer} else: raise ValueError("Unsupported task type") async def stream_text(model, tokenizer, input_text, generation_config, stop_sequences, device, chunk_delay): try: encoded_input = tokenizer(input_text, return_tensors="pt", truncation=True).to(device) input_length = encoded_input["input_ids"].shape[1] max_length = model.config.max_length remaining_tokens = max_length - input_length if remaining_tokens <= 0: yield "" generation_config.max_new_tokens = min(remaining_tokens, generation_config.max_new_tokens) def stop_criteria(input_ids, scores): decoded_output = tokenizer.decode(input_ids[0][-1], skip_special_tokens=True) return decoded_output in stop_sequences stopping_criteria = StoppingCriteriaList([stop_criteria]) outputs = model.generate( **encoded_input, do_sample=generation_config.do_sample, max_new_tokens=generation_config.max_new_tokens, temperature=generation_config.temperature, top_p=generation_config.top_p, top_k=generation_config.top_k, repetition_penalty=generation_config.repetition_penalty, num_return_sequences=generation_config.num_return_sequences, stopping_criteria=stopping_criteria, output_scores=True, return_dict_in_generate=True ) for output in outputs.sequences: for token_id in output: token = tokenizer.decode(token_id, skip_special_tokens=True) yield token except Exception as e: yield f"Error during text generation: {e}" model_loader = S3ModelLoader(S3_BUCKET_NAME, AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, AWS_REGION) def get_model_data(request: GenerateRequest): return model_loader.load_model_and_tokenizer(request.model_id, request.task_type) async def verify_api_key(api_key: str = Depends(api_key_header)): if api_key != API_KEY: raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED, detail="Invalid API Key") @app.post("/generate", dependencies=[Depends(verify_api_key)]) async def generate(request: GenerateRequest, background_tasks: BackgroundTasks, model_data=Depends(get_model_data)): try: device = "cuda" if torch.cuda.is_available() else "cpu" if request.task_type == "text": model = model_data["model"].to(device) tokenizer = model_data["tokenizer"] generation_config = GenerationConfig( temperature=request.temperature, max_new_tokens=request.max_new_tokens, top_p=request.top_p, top_k=request.top_k, repetition_penalty=request.repetition_penalty, do_sample=request.do_sample, num_return_sequences=request.num_return_sequences, ) return StreamingResponse(stream_text(model, tokenizer, request.input_text, generation_config, request.stop_sequences, device, request.chunk_delay), media_type="text/plain") elif request.task_type in ["image", "audio", "video"]: pipeline_func = model_data["pipeline"] try: result = pipeline_func(request.input_text) if request.task_type == "image": image = result[0] img_byte_arr = BytesIO() image.save(img_byte_arr, format="PNG") img_byte_arr.seek(0) return StreamingResponse(img_byte_arr, media_type="image/png") elif request.task_type == "audio": audio = result[0] audio_byte_arr = BytesIO() audio.save(audio_byte_arr, format="wav") audio_byte_arr.seek(0) return StreamingResponse(audio_byte_arr, media_type="audio/wav") elif request.task_type == "video": video = result[0] video_byte_arr = BytesIO() video.save(video_byte_arr, format="mp4") video_byte_arr.seek(0) return StreamingResponse(video_byte_arr, media_type="video/mp4") except Exception as e: raise HTTPException(status_code=500, detail=f"Error processing {request.task_type}: {e}") elif request.task_type == "classification": if request.image_file is None: raise HTTPException(status_code=400, detail="Image file is required for classification.") contents = await request.image_file.read() image = Image.open(BytesIO(contents)).convert("RGB") model = model_data["model"].to(device) processor = model_data["processor"] inputs = processor(images=image, return_tensors="pt").to(device) with torch.no_grad(): outputs = model(**inputs) predicted_class_idx = outputs.logits.argmax().item() predicted_class = model.config.id2label[predicted_class_idx] return JSONResponse({"predicted_class": predicted_class}) elif request.task_type == "translation": if request.source_language is None or request.target_language is None: raise HTTPException(status_code=400, detail="Source and target languages are required for translation.") model = model_data["model"].to(device) tokenizer = model_data["tokenizer"] inputs = tokenizer(request.input_text, return_tensors="pt").to(device) with torch.no_grad(): outputs = model.generate(**inputs) translation = tokenizer.decode(outputs[0], skip_special_tokens=True) return JSONResponse({"translation": translation}) elif request.task_type == "question-answering": if request.context is None: raise HTTPException(status_code=400, detail="Context is required for question answering.") model = model_data["model"].to(device) tokenizer = model_data["tokenizer"] inputs = tokenizer(question=request.input_text, context=request.context, return_tensors="pt").to(device) with torch.no_grad(): outputs = model(**inputs) answer_start = torch.argmax(outputs.start_logits) answer_end = torch.argmax(outputs.end_logits) + 1 answer = tokenizer.convert_tokens_to_string(tokenizer.convert_ids_to_tokens(inputs["input_ids"][0][answer_start:answer_end])) return JSONResponse({"answer": answer}) elif request.task_type == "speech-to-text": if request.audio_file is None: raise HTTPException(status_code=400, detail="Audio file is required for speech-to-text.") contents = await request.audio_file.read() pipeline_func = model_data["pipeline"] try: transcription = pipeline_func(contents, sampling_rate=16000)[0]["text"] return JSONResponse({"transcription": transcription}) except Exception as e: logger.exception(f"Error during speech-to-text: {e}") raise HTTPException(status_code=500, detail=f"Error during speech-to-text: {str(e)}") from e elif request.task_type == "text-to-speech": if not request.input_text: raise HTTPException(status_code=400, detail="Input text is required for text-to-speech.") pipeline_func = model_data["pipeline"] try: audio = pipeline_func(request.input_text)[0] file_path = os.path.join(TEMP_DIR, f"{uuid.uuid4()}.wav") audio.save(file_path) background_tasks.add_task(os.remove, file_path) return FileResponse(file_path, media_type="audio/wav") except Exception as e: raise HTTPException(status_code=500, detail=f"Error during text-to-speech: {str(e)}") elif request.task_type == "image-segmentation": if request.image_file is None: raise HTTPException(status_code=400, detail="Image file is required for image segmentation.") contents = await request.image_file.read() image = Image.open(BytesIO(contents)).convert("RGB") pipeline_func = model_data["pipeline"] try: result = pipeline_func(image) mask = result[0]['mask'] mask_byte_arr = BytesIO() mask.save(mask_byte_arr, format="PNG") mask_byte_arr.seek(0) return StreamingResponse(mask_byte_arr, media_type="image/png") except Exception as e: raise HTTPException(status_code=500, detail=f"Error during image segmentation: {e}") elif request.task_type == "feature-extraction": if request.raw_input is None: raise HTTPException(status_code=400, detail="raw_input is required for feature extraction.") feature_extractor = model_data["feature_extractor"] try: if isinstance(request.raw_input, str): inputs = feature_extractor(text=request.raw_input, return_tensors="pt") elif isinstance(request.raw_input, bytes): image = Image.open(BytesIO(request.raw_input)).convert("RGB") inputs = feature_extractor(images=image, return_tensors="pt") else: raise ValueError("Unsupported raw_input type.") features = inputs.pixel_values return JSONResponse({"features": features.tolist()}) except Exception as fe: raise HTTPException(status_code=400, detail=f"Error during feature extraction: {fe}") elif request.task_type == "token-classification": if request.input_text is None: raise HTTPException(status_code=400, detail="Input text is required for token classification.") model = model_data["model"].to(device) tokenizer = model_data["tokenizer"] inputs = tokenizer(request.input_text, return_tensors="pt", padding=True, truncation=True) with torch.no_grad(): outputs = model(**inputs) predictions = outputs.logits.argmax(dim=-1) predicted_labels = [model.config.id2label[label_id] for label_id in predictions[0].tolist()] return JSONResponse({"predicted_labels": predicted_labels}) elif request.task_type == "fill-mask": if request.masked_text is None: raise HTTPException(status_code=400, detail="masked_text is required for fill-mask.") model = model_data["model"].to(device) tokenizer = model_data["tokenizer"] inputs = tokenizer(request.masked_text, return_tensors="pt") with torch.no_grad(): outputs = model(**inputs) logits = outputs.logits masked_index = torch.where(inputs.input_ids == tokenizer.mask_token_id)[1] predicted_token_id = torch.argmax(logits[0, masked_index]) predicted_token = tokenizer.decode(predicted_token_id) return JSONResponse({"predicted_token": predicted_token}) elif request.task_type == "image-inpainting": if request.image_file is None or request.mask_image is None: raise HTTPException(status_code=400, detail="image_file and mask_image are required for image inpainting.") image_contents = await request.image_file.read() mask_contents = await request.mask_image.read() image = Image.open(BytesIO(image_contents)).convert("RGB") mask = Image.open(BytesIO(mask_contents)).convert("L") pipeline_func = model_data["pipeline"] try: result = pipeline_func(image, mask) inpainted_image = result[0] img_byte_arr = BytesIO() inpainted_image.save(img_byte_arr, format="PNG") img_byte_arr.seek(0) return StreamingResponse(img_byte_arr, media_type="image/png") except Exception as e: raise HTTPException(status_code=500, detail=f"Error during image inpainting: {e}") elif request.task_type == "image-super-resolution": if request.low_res_image is None: raise HTTPException(status_code=400, detail="low_res_image is required for image super-resolution.") contents = await request.low_res_image.read() image = Image.open(BytesIO(contents)).convert("RGB") pipeline_func = model_data["pipeline"] try: result = pipeline_func(image) upscaled_image = result[0] img_byte_arr = BytesIO() upscaled_image.save(img_byte_arr, format="PNG") img_byte_arr.seek(0) return StreamingResponse(img_byte_arr, media_type="image/png") except Exception as e: raise HTTPException(status_code=500, detail=f"Error during image super-resolution: {e}") elif request.task_type == "object-detection": if request.image_file is None: raise HTTPException(status_code=400, detail="Image file is required for object detection.") contents = await request.image_file.read() image = Image.open(BytesIO(contents)).convert("RGB") pipeline_func = model_data["pipeline"] image_processor = model_data["image_processor"] inputs = image_processor(images=image, return_tensors="pt") with torch.no_grad(): try: outputs = pipeline_func(image) detections = outputs return JSONResponse({"detections": detections}) except Exception as e: raise HTTPException(status_code=500, detail=f"Error during object detection: {e}") elif request.task_type == "image-captioning": if request.image_file is None: raise HTTPException(status_code=400, detail="Image file is required for image captioning.") contents = await request.image_file.read() image = Image.open(BytesIO(contents)).convert("RGB") pipeline_func = model_data["pipeline"] try: caption = pipeline_func(image)[0]['generated_text'] return JSONResponse({"caption": caption}) except Exception as e: raise HTTPException(status_code=500, detail=f"Error during image captioning: {e}") elif request.task_type == "audio-transcription": if request.audio_file is None: raise HTTPException(status_code=400, detail="Audio file is required for audio transcription.") contents = await request.audio_file.read() pipeline_func = model_data["pipeline"] try: transcription = pipeline_func(contents, sampling_rate=16000)[0]["text"] return JSONResponse({"transcription": transcription}) except Exception as e: raise HTTPException(status_code=500, detail=f"Error during audio transcription: {str(e)}") elif request.task_type == "summarization": if request.input_text is None: raise HTTPException(status_code=400, detail="Input text is required for summarization.") model = model_data["model"].to(device) tokenizer = model_data["tokenizer"] inputs = tokenizer(request.input_text, return_tensors="pt", truncation=True, max_length=512) with torch.no_grad(): try: outputs = model.generate(**inputs) summary = tokenizer.decode(outputs[0], skip_special_tokens=True) return JSONResponse({"summary": summary}) except Exception as e: raise HTTPException(status_code=500, detail=f"Error during summarization: {e}") else: raise HTTPException(status_code=500, detail=f"Unsupported task type") except Exception as e: logger.exception(f"Internal server error: {str(e)}") raise HTTPException(status_code=500, detail=f"Internal server error: {str(e)}") @app.get("/health") async def health_check(): return {"status": "healthy"} class Token(BaseModel): access_token: str token_type: str async def get_db(): async with asyncpg.create_pool(DATABASE_URL) as pool: async with pool.acquire() as conn: yield conn async def authenticate_user(username, password, conn): row = await conn.fetchrow("SELECT * FROM users WHERE username = $1", username) if row is not None and pwd_context.verify(password, row["hashed_password"]): return {"username": username} return None @app.post("/token", response_model=Token) async def login_for_access_token(form_data: OAuth2PasswordRequestForm = Depends(), conn = Depends(get_db)): user = await authenticate_user(form_data.username, form_data.password, conn) if not user: raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED, detail="Incorrect username or password", headers={"WWW-Authenticate": "Bearer"}) access_token_expires = timedelta(minutes=ACCESS_TOKEN_EXPIRE_MINUTES) access_token = create_access_token(data={"sub": user["username"]}, expires_delta=access_token_expires) return {"access_token": access_token, "token_type": "bearer"} async def get_current_user(token: str = Depends(oauth2_scheme), conn = Depends(get_db)): credentials_exception = HTTPException(status_code=status.HTTP_401_UNAUTHORIZED, detail="Could not validate credentials", headers={"WWW-Authenticate": "Bearer"}) try: payload = jwt.decode(token, SECRET_KEY, algorithms=[ALGORITHM]) username: str = payload.get("sub") if username is None: raise credentials_exception user = await conn.fetchrow("SELECT * FROM users WHERE username = $1", username) if user is None: raise credentials_exception return username except JWTError: raise credentials_exception @app.post("/register", response_model=User, status_code=status.HTTP_201_CREATED) async def create_user(user: User, conn = Depends(get_db)): hashed_password = pwd_context.hash(user.password) try: await conn.execute("INSERT INTO users (username, email, hashed_password) VALUES ($1, $2, $3)", user.username, user.email, hashed_password) return user except asyncpg.exceptions.UniqueViolationError: raise HTTPException(status_code=400, detail="Username or email already exists") @app.put("/users/{username}", response_model=User, dependencies=[Depends(get_current_user)]) async def update_user_data(username: str, user: User, conn = Depends(get_db)): hashed_password = pwd_context.hash(user.password) try: await conn.execute("UPDATE users SET email = $1, hashed_password = $2 WHERE username = $3", user.email, hashed_password, username) return user except Exception as e: logger.error(f"Error updating user: {e}") raise HTTPException(status_code=500, detail="Error updating user.") @app.delete("/users/{username}", dependencies=[Depends(get_current_user)]) async def delete_user_account(username: str, conn = Depends(get_db)): try: await conn.execute("DELETE FROM users WHERE username = $1", username) return JSONResponse({"message": "User deleted successfully."}, status_code=200) except Exception as e: logger.error(f"Error deleting user: {e}") raise HTTPException(status_code=500, detail="Error deleting user.") @app.get("/users", dependencies=[Depends(get_current_user)]) async def get_all_users_route(conn = Depends(get_db)): rows = await conn.fetch("SELECT username, email FROM users") return [{"username": row["username"], "email": row["email"]} for row in rows] @app.get("/users/me", dependencies=[Depends(get_current_user)]) # Requires authentication async def read_users_me(current_user: str = Depends(get_current_user), conn=Depends(get_db)): user = await conn.fetchrow("SELECT username, email FROM users WHERE username = $1", current_user) if user: return {"username": user["username"], "email": user["email"]} raise HTTPException(status_code=404, detail="User not found") @app.exception_handler(RequestValidationError) async def validation_exception_handler(request: Request, exc: RequestValidationError): return JSONResponse( status_code=status.HTTP_422_UNPROCESSABLE_ENTITY, content=json.dumps({"detail": exc.errors(), "body": exc.body}), ) def create_access_token(data: Dict[str, Any], expires_delta: timedelta = None): to_encode = data.copy() if expires_delta: expire = datetime.utcnow() + expires_delta else: expire = datetime.utcnow() + timedelta(minutes=15) to_encode.update({"exp": expire}) encoded_jwt = jwt.encode(to_encode, SECRET_KEY, algorithm=ALGORITHM) return encoded_jwt if __name__ == "__main__": uvicorn.run(app, host="0.0.0.0", port=7860)