Hjgugugjhuhjggg commited on
Commit
ebec48b
verified
1 Parent(s): 09e6b0b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +78 -125
app.py CHANGED
@@ -1,13 +1,14 @@
1
  import os
2
- import json
3
  import logging
4
  import boto3
5
- from fastapi import FastAPI, HTTPException, Query
6
- from fastapi.responses import JSONResponse
7
- from transformers import AutoModelForCausalLM, AutoTokenizer
8
- from huggingface_hub import hf_hub_download
 
9
  import asyncio
10
 
 
11
  logger = logging.getLogger(__name__)
12
  logger.setLevel(logging.INFO)
13
  console_handler = logging.StreamHandler()
@@ -15,6 +16,7 @@ formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
15
  console_handler.setFormatter(formatter)
16
  logger.addHandler(console_handler)
17
 
 
18
  AWS_ACCESS_KEY_ID = os.getenv("AWS_ACCESS_KEY_ID")
19
  AWS_SECRET_ACCESS_KEY = os.getenv("AWS_SECRET_ACCESS_KEY")
20
  AWS_REGION = os.getenv("AWS_REGION")
@@ -30,8 +32,16 @@ s3_client = boto3.client(
30
  region_name=AWS_REGION
31
  )
32
 
 
33
  app = FastAPI()
34
 
 
 
 
 
 
 
 
35
  class S3DirectStream:
36
  def __init__(self, bucket_name):
37
  self.s3_client = boto3.client(
@@ -42,6 +52,7 @@ class S3DirectStream:
42
  )
43
  self.bucket_name = bucket_name
44
 
 
45
  async def stream_from_s3(self, key):
46
  loop = asyncio.get_event_loop()
47
  return await loop.run_in_executor(None, self._stream_from_s3, key)
@@ -49,155 +60,97 @@ class S3DirectStream:
49
  def _stream_from_s3(self, key):
50
  try:
51
  response = self.s3_client.get_object(Bucket=self.bucket_name, Key=key)
52
- return response['Body']
 
 
 
53
  except self.s3_client.exceptions.NoSuchKey:
54
  raise HTTPException(status_code=404, detail=f"El archivo {key} no existe en el bucket S3.")
55
  except Exception as e:
56
  raise HTTPException(status_code=500, detail=f"Error al descargar {key} desde S3: {str(e)}")
57
 
58
- async def get_model_file_parts(self, model_name):
59
- loop = asyncio.get_event_loop()
60
- return await loop.run_in_executor(None, self._get_model_file_parts, model_name)
61
-
62
- def _get_model_file_parts(self, model_name):
63
- try:
64
- model_name = model_name.replace("/", "-").lower()
65
- files = self.s3_client.list_objects_v2(Bucket=self.bucket_name, Prefix=model_name)
66
- model_files = [obj['Key'] for obj in files.get('Contents', []) if model_name in obj['Key']]
67
- return model_files
68
- except Exception as e:
69
- raise HTTPException(status_code=500, detail=f"Error al obtener archivos del modelo {model_name} desde S3: {e}")
70
-
71
  async def load_model_from_s3(self, model_name):
72
  try:
73
  model_name = model_name.replace("/", "-").lower()
74
- model_files = await self.get_model_file_parts(model_name)
75
-
76
- if not model_files:
77
- await self.download_and_upload_to_s3(model_name)
78
-
79
- config_stream = await self.stream_from_s3(f"{model_name}/config.json")
80
- config_data = config_stream.read()
81
-
82
- if not config_data:
83
- raise HTTPException(status_code=500, detail=f"El archivo de configuraci贸n {model_name}/config.json est谩 vac铆o o no se pudo leer.")
84
-
85
- config_text = config_data.decode("utf-8")
86
- config_json = json.loads(config_text)
87
-
88
- model = AutoModelForCausalLM.from_pretrained(f"s3://{self.bucket_name}/{model_name}", config=config_json, from_tf=False)
89
  return model
90
-
91
  except HTTPException as e:
92
  raise e
93
  except Exception as e:
94
  raise HTTPException(status_code=500, detail=f"Error al cargar el modelo desde S3: {e}")
95
 
 
96
  async def load_tokenizer_from_s3(self, model_name):
97
  try:
98
  model_name = model_name.replace("/", "-").lower()
99
- tokenizer_stream = await self.stream_from_s3(f"{model_name}/tokenizer.json")
100
- tokenizer_data = tokenizer_stream.read().decode("utf-8")
101
-
102
- tokenizer = AutoTokenizer.from_pretrained(f"s3://{self.bucket_name}/{model_name}")
103
  return tokenizer
104
  except Exception as e:
105
  raise HTTPException(status_code=500, detail=f"Error al cargar el tokenizer desde S3: {e}")
106
 
107
- async def create_s3_folders(self, s3_key):
108
- try:
109
- folder_keys = s3_key.split('-')
110
- for i in range(1, len(folder_keys)):
111
- folder_key = '-'.join(folder_keys[:i]) + '/'
112
- if not await self.file_exists_in_s3(folder_key):
113
- logger.info(f"Creando carpeta en S3: {folder_key}")
114
- self.s3_client.put_object(Bucket=self.bucket_name, Key=folder_key, Body='')
115
-
116
- except Exception as e:
117
- raise HTTPException(status_code=500, detail=f"Error al crear carpetas en S3: {e}")
118
-
119
- async def file_exists_in_s3(self, s3_key):
120
- try:
121
- self.s3_client.head_object(Bucket=self.bucket_name, Key=s3_key)
122
- return True
123
- except self.s3_client.exceptions.ClientError:
124
- return False
125
-
126
- async def download_and_upload_to_s3(self, model_name, force_download=False):
127
  try:
128
- if force_download:
129
- logger.info(f"Forzando la descarga del modelo {model_name} y la carga a S3.")
130
-
131
  model_name = model_name.replace("/", "-").lower()
132
-
133
- if not await self.file_exists_in_s3(f"{model_name}/config.json") or not await self.file_exists_in_s3(f"{model_name}/tokenizer.json"):
134
- config_file = hf_hub_download(repo_id=model_name, filename="config.json", token=HUGGINGFACE_HUB_TOKEN, force_download=force_download)
135
- tokenizer_file = hf_hub_download(repo_id=model_name, filename="tokenizer.json", token=HUGGINGFACE_HUB_TOKEN, force_download=force_download)
136
-
137
- await self.create_s3_folders(f"{model_name}/")
138
-
139
- if not await self.file_exists_in_s3(f"{model_name}/config.json"):
140
- with open(config_file, "rb") as file:
141
- self.s3_client.put_object(Bucket=self.bucket_name, Key=f"{model_name}/config.json", Body=file)
142
-
143
- if not await self.file_exists_in_s3(f"{model_name}/tokenizer.json"):
144
- with open(tokenizer_file, "rb") as file:
145
- self.s3_client.put_object(Bucket=self.bucket_name, Key=f"{model_name}/tokenizer.json", Body=file)
146
- else:
147
- logger.info(f"Los archivos del modelo {model_name} ya existen en S3. No es necesario descargarlos de nuevo.")
148
-
149
- except Exception as e:
150
- raise HTTPException(status_code=500, detail=f"Error al descargar o cargar archivos desde Hugging Face a S3: {e}")
151
-
152
- async def resume_download(self, model_name):
153
- try:
154
- logger.info(f"Reanudando la descarga del modelo {model_name} desde Hugging Face.")
155
- config_file = hf_hub_download(repo_id=model_name, filename="config.json", token=HUGGINGFACE_HUB_TOKEN, resume_download=True)
156
- tokenizer_file = hf_hub_download(repo_id=model_name, filename="tokenizer.json", token=HUGGINGFACE_HUB_TOKEN, resume_download=True)
157
-
158
- if not await self.file_exists_in_s3(f"{model_name}/config.json"):
159
- with open(config_file, "rb") as file:
160
- self.s3_client.put_object(Bucket=self.bucket_name, Key=f"{model_name}/config.json", Body=file)
161
-
162
- if not await self.file_exists_in_s3(f"{model_name}/tokenizer.json"):
163
- with open(tokenizer_file, "rb") as file:
164
- self.s3_client.put_object(Bucket=self.bucket_name, Key=f"{model_name}/tokenizer.json", Body=file)
165
-
166
  except Exception as e:
167
- raise HTTPException(status_code=500, detail=f"Error al reanudar la descarga o cargar archivos desde Hugging Face a S3: {e}")
168
-
169
- def split_text_by_tokens(text, tokenizer, max_tokens=MAX_TOKENS):
170
- tokens = tokenizer.encode(text)
171
- chunks = []
172
- for i in range(0, len(tokens), max_tokens):
173
- chunk = tokens[i:i+max_tokens]
174
- chunks.append(tokenizer.decode(chunk))
175
- return chunks
176
-
177
- def continue_generation(input_text, model, tokenizer, max_tokens=MAX_TOKENS):
178
- generated_text = ""
179
- while len(input_text) > 0:
180
- chunks = split_text_by_tokens(input_text, tokenizer, max_tokens)
181
- for chunk in chunks:
182
- generated_text += model.generate(chunk)
183
- return generated_text
184
 
 
185
  @app.post("/generate")
186
- async def generate_text(model_name: str = Query(...), input_text: str = Query(...)):
187
  try:
188
- model_loader = S3DirectStream(S3_BUCKET_NAME)
189
- model = await model_loader.load_model_from_s3(model_name)
190
- tokenizer = await model_loader.load_tokenizer_from_s3(model_name)
191
-
192
- chunks = split_text_by_tokens(input_text, tokenizer, max_tokens=MAX_TOKENS)
193
-
194
- generated_text = continue_generation(input_text, model, tokenizer)
195
-
196
- return {"generated_text": generated_text}
197
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
198
  except Exception as e:
199
- raise HTTPException(status_code=500, detail=str(e))
200
 
 
201
  if __name__ == "__main__":
202
  import uvicorn
203
  uvicorn.run(app, host="0.0.0.0", port=7860)
 
1
  import os
 
2
  import logging
3
  import boto3
4
+ from fastapi import FastAPI, HTTPException
5
+ from pydantic import BaseModel
6
+ from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
7
+ from safetensors.torch import load_file
8
+ import torch
9
  import asyncio
10
 
11
+ # Configuraci贸n de logs
12
  logger = logging.getLogger(__name__)
13
  logger.setLevel(logging.INFO)
14
  console_handler = logging.StreamHandler()
 
16
  console_handler.setFormatter(formatter)
17
  logger.addHandler(console_handler)
18
 
19
+ # Configuraci贸n de AWS y S3
20
  AWS_ACCESS_KEY_ID = os.getenv("AWS_ACCESS_KEY_ID")
21
  AWS_SECRET_ACCESS_KEY = os.getenv("AWS_SECRET_ACCESS_KEY")
22
  AWS_REGION = os.getenv("AWS_REGION")
 
32
  region_name=AWS_REGION
33
  )
34
 
35
+ # Crear la aplicaci贸n FastAPI
36
  app = FastAPI()
37
 
38
+ # Modelo de datos para la solicitud
39
+ class GenerateRequest(BaseModel):
40
+ model_name: str
41
+ input_text: str
42
+ task_type: str
43
+
44
+ # Clase para gestionar el acceso a S3
45
  class S3DirectStream:
46
  def __init__(self, bucket_name):
47
  self.s3_client = boto3.client(
 
52
  )
53
  self.bucket_name = bucket_name
54
 
55
+ # Funci贸n para obtener el archivo desde S3
56
  async def stream_from_s3(self, key):
57
  loop = asyncio.get_event_loop()
58
  return await loop.run_in_executor(None, self._stream_from_s3, key)
 
60
  def _stream_from_s3(self, key):
61
  try:
62
  response = self.s3_client.get_object(Bucket=self.bucket_name, Key=key)
63
+ file_content = response['Body'].read()
64
+ if not file_content:
65
+ raise HTTPException(status_code=404, detail=f"El archivo {key} est谩 vac铆o.")
66
+ return file_content
67
  except self.s3_client.exceptions.NoSuchKey:
68
  raise HTTPException(status_code=404, detail=f"El archivo {key} no existe en el bucket S3.")
69
  except Exception as e:
70
  raise HTTPException(status_code=500, detail=f"Error al descargar {key} desde S3: {str(e)}")
71
 
72
+ # Cargar el modelo directamente desde S3
 
 
 
 
 
 
 
 
 
 
 
 
73
  async def load_model_from_s3(self, model_name):
74
  try:
75
  model_name = model_name.replace("/", "-").lower()
76
+ model_bytes = await self.stream_from_s3(f"{model_name}/pytorch_model.bin")
77
+ if model_bytes:
78
+ model = load_file(model_bytes)
79
+ return model
80
+ model = AutoModelForCausalLM.from_pretrained(f"s3://{self.bucket_name}/{model_name}/pytorch_model.bin")
 
 
 
 
 
 
 
 
 
 
81
  return model
 
82
  except HTTPException as e:
83
  raise e
84
  except Exception as e:
85
  raise HTTPException(status_code=500, detail=f"Error al cargar el modelo desde S3: {e}")
86
 
87
+ # Cargar el tokenizer desde S3
88
  async def load_tokenizer_from_s3(self, model_name):
89
  try:
90
  model_name = model_name.replace("/", "-").lower()
91
+ tokenizer_bytes = await self.stream_from_s3(f"{model_name}/tokenizer.json")
92
+ if not tokenizer_bytes:
93
+ raise HTTPException(status_code=404, detail="El archivo tokenizer.json est谩 vac铆o o no existe.")
94
+ tokenizer = AutoTokenizer.from_pretrained(f"s3://{self.bucket_name}/{model_name}/tokenizer.json")
95
  return tokenizer
96
  except Exception as e:
97
  raise HTTPException(status_code=500, detail=f"Error al cargar el tokenizer desde S3: {e}")
98
 
99
+ # Obtener los archivos del modelo desde S3
100
+ async def get_model_file_parts(self, model_name):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
101
  try:
 
 
 
102
  model_name = model_name.replace("/", "-").lower()
103
+ files = self.s3_client.list_objects_v2(Bucket=self.bucket_name, Prefix=model_name)
104
+ model_files = [obj['Key'] for obj in files.get('Contents', []) if model_name in obj['Key']]
105
+ if not model_files:
106
+ raise HTTPException(status_code=404, detail=f"Archivos del modelo {model_name} no encontrados.")
107
+ return model_files
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
108
  except Exception as e:
109
+ raise HTTPException(status_code=500, detail=f"Error al obtener archivos del modelo {model_name} desde S3: {e}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
110
 
111
+ # Endpoint para la generaci贸n
112
  @app.post("/generate")
113
+ async def generate(request: GenerateRequest):
114
  try:
115
+ task_type = request.task_type
116
+ model_name = request.model_name
117
+ input_text = request.input_text
118
+
119
+ s3_direct_stream = S3DirectStream(S3_BUCKET_NAME)
120
+
121
+ # Cargar el modelo y tokenizer desde S3
122
+ model = await s3_direct_stream.load_model_from_s3(model_name)
123
+ tokenizer = await s3_direct_stream.load_tokenizer_from_s3(model_name)
124
+
125
+ # Generar dependiendo del tipo de tarea
126
+ if task_type == "text-to-text":
127
+ generator = pipeline("text-generation", model=model, tokenizer=tokenizer, device=0)
128
+ result = generator(input_text, max_length=MAX_TOKENS, num_return_sequences=1)
129
+ return {"result": result[0]["generated_text"]}
130
+
131
+ elif task_type == "text-to-image":
132
+ generator = pipeline("text-to-image", model=model, tokenizer=tokenizer, device=0)
133
+ image = generator(input_text)
134
+ return {"image": image}
135
+
136
+ elif task_type == "text-to-audio" or task_type == "text-to-speech":
137
+ generator = pipeline("text-to-speech", model=model, tokenizer=tokenizer, device=0)
138
+ audio = generator(input_text)
139
+ return {"audio": audio}
140
+
141
+ elif task_type == "text-to-video":
142
+ generator = pipeline("text-to-video", model=model, tokenizer=tokenizer, device=0)
143
+ video = generator(input_text)
144
+ return {"video": video}
145
+
146
+ else:
147
+ raise HTTPException(status_code=400, detail="Tipo de tarea no soportado.")
148
+ except HTTPException as e:
149
+ raise e
150
  except Exception as e:
151
+ raise HTTPException(status_code=500, detail=f"Error en la generaci贸n: {str(e)}")
152
 
153
+ # Ejecutar la aplicaci贸n
154
  if __name__ == "__main__":
155
  import uvicorn
156
  uvicorn.run(app, host="0.0.0.0", port=7860)