Spaces:
Sleeping
Sleeping
prlabs2023
commited on
Commit
•
301c130
1
Parent(s):
7e1aade
Update app.py
Browse files
app.py
CHANGED
@@ -1,28 +1,16 @@
|
|
1 |
-
import firebase_admin
|
2 |
-
from firebase_admin import credentials
|
3 |
-
from firebase_admin import firestore
|
4 |
import io
|
5 |
from fastapi import FastAPI, File, UploadFile
|
6 |
-
from werkzeug.utils import secure_filename
|
7 |
-
import speech_recognition as sr
|
8 |
import subprocess
|
9 |
import os
|
10 |
import requests
|
11 |
import random
|
12 |
-
import pandas as pd
|
13 |
-
from pydub import AudioSegment
|
14 |
from datetime import datetime
|
15 |
from datetime import date
|
16 |
-
import numpy as np
|
17 |
-
from sklearn.ensemble import RandomForestRegressor
|
18 |
-
import shutil
|
19 |
import json
|
20 |
-
from transformers import AutoModelForQuestionAnswering, AutoTokenizer, pipeline
|
21 |
from pydantic import BaseModel
|
22 |
from typing import Annotated
|
23 |
-
from transformers import BertTokenizerFast, EncoderDecoderModel
|
24 |
-
import torch
|
25 |
import random
|
|
|
26 |
import string
|
27 |
import time
|
28 |
from huggingface_hub import InferenceClient
|
@@ -40,13 +28,12 @@ class Query2(BaseModel):
|
|
40 |
filename:str
|
41 |
host:str
|
42 |
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
|
51 |
|
52 |
|
@@ -81,15 +68,66 @@ async def startup_event():
|
|
81 |
|
82 |
audio_space="https://audiospace-1-u9912847.deta.app/uphoto"
|
83 |
|
84 |
-
# @app.post("/code")
|
85 |
-
# async def get_code(request: Request):
|
86 |
-
# data = await request.form()
|
87 |
-
# code = data.get("code")
|
88 |
-
# global audio_space
|
89 |
-
# print("code ="+code)
|
90 |
-
# audio_space= audio_space+code
|
91 |
-
|
92 |
import threading
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
93 |
@app.post("/")
|
94 |
async def get_answer(q: Query ):
|
95 |
|
@@ -137,13 +175,12 @@ async def get_answer(q: Query2 ):
|
|
137 |
|
138 |
import requests
|
139 |
import io
|
140 |
-
import torch
|
141 |
import io
|
142 |
from PIL import Image
|
143 |
import json
|
144 |
|
145 |
|
146 |
-
|
147 |
# client = InferenceClient(model="SG161222/Realistic_Vision_V1.4")
|
148 |
|
149 |
|
@@ -180,3 +217,9 @@ def do_ML(filename:str,text:str,code:str,host:str):
|
|
180 |
|
181 |
|
182 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import io
|
2 |
from fastapi import FastAPI, File, UploadFile
|
|
|
|
|
3 |
import subprocess
|
4 |
import os
|
5 |
import requests
|
6 |
import random
|
|
|
|
|
7 |
from datetime import datetime
|
8 |
from datetime import date
|
|
|
|
|
|
|
9 |
import json
|
|
|
10 |
from pydantic import BaseModel
|
11 |
from typing import Annotated
|
|
|
|
|
12 |
import random
|
13 |
+
from fastapi import FastAPI, Response
|
14 |
import string
|
15 |
import time
|
16 |
from huggingface_hub import InferenceClient
|
|
|
28 |
filename:str
|
29 |
host:str
|
30 |
|
31 |
+
class QueryM(BaseModel):
|
32 |
+
text: str
|
33 |
+
tokens:int
|
34 |
+
temp:float
|
35 |
+
topp:float
|
36 |
+
topk:float
|
|
|
37 |
|
38 |
|
39 |
|
|
|
68 |
|
69 |
audio_space="https://audiospace-1-u9912847.deta.app/uphoto"
|
70 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
71 |
import threading
|
72 |
+
from huggingface_hub.inference_api import InferenceApi
|
73 |
+
client = InferenceClient()
|
74 |
+
|
75 |
+
|
76 |
+
@app.post("/image")
|
77 |
+
async def get_answer(q: Query ):
|
78 |
+
text = q.text
|
79 |
+
try:
|
80 |
+
global client
|
81 |
+
imagei = client.text_to_image(text)
|
82 |
+
byte_array = io.BytesIO()
|
83 |
+
imagei.save(byte_array, format='JPEG')
|
84 |
+
response = Response(content=byte_array.getvalue(), media_type="image/png")
|
85 |
+
return response
|
86 |
+
|
87 |
+
except:
|
88 |
+
return JSONResponse({"status":False})
|
89 |
+
|
90 |
+
|
91 |
+
@app.post("/mistral")
|
92 |
+
async def get_answer(q: QueryM ):
|
93 |
+
text = q.text
|
94 |
+
try:
|
95 |
+
client = InferenceClient()
|
96 |
+
generate_kwargs = dict(
|
97 |
+
max_new_tokens= int(q.tokens),
|
98 |
+
do_sample=True,
|
99 |
+
top_p= q.topp,
|
100 |
+
top_k=int(q.topk),
|
101 |
+
temperature=q.temp,
|
102 |
+
)
|
103 |
+
inputs= text
|
104 |
+
response = client.post(json={"inputs": inputs, "parameters": generate_kwargs}, model="mistralai/Mistral-7B-Instruct-v0.1")
|
105 |
+
json_string = response.decode('utf-8')
|
106 |
+
list_of_dicts = json.loads(json_string)
|
107 |
+
result_dict = list_of_dicts[0]
|
108 |
+
x=(result_dict['generated_text'])
|
109 |
+
x=x.replace(inputs,'')
|
110 |
+
return JSONResponse({"result":x,"status":True})
|
111 |
+
except Exception as e:
|
112 |
+
print(e)
|
113 |
+
return JSONResponse({"status":False})
|
114 |
+
|
115 |
+
|
116 |
+
|
117 |
+
|
118 |
+
|
119 |
+
|
120 |
+
|
121 |
+
|
122 |
+
|
123 |
+
|
124 |
+
|
125 |
+
|
126 |
+
|
127 |
+
|
128 |
+
|
129 |
+
''' to be removed when main code is updated '''
|
130 |
+
|
131 |
@app.post("/")
|
132 |
async def get_answer(q: Query ):
|
133 |
|
|
|
175 |
|
176 |
import requests
|
177 |
import io
|
|
|
178 |
import io
|
179 |
from PIL import Image
|
180 |
import json
|
181 |
|
182 |
|
183 |
+
|
184 |
# client = InferenceClient(model="SG161222/Realistic_Vision_V1.4")
|
185 |
|
186 |
|
|
|
217 |
|
218 |
|
219 |
|
220 |
+
|
221 |
+
|
222 |
+
|
223 |
+
|
224 |
+
|
225 |
+
|