Spaces:
Sleeping
Sleeping
OmPrakashSingh1704
commited on
Commit
•
19d239a
1
Parent(s):
f2c2c87
Update app.py
Browse files
app.py
CHANGED
@@ -1,16 +1,13 @@
|
|
1 |
import streamlit as st
|
2 |
-
|
3 |
-
import
|
|
|
4 |
from datetime import datetime
|
5 |
-
huggingface_hub.login(token=os.getenv("huggingface_id"))
|
6 |
|
7 |
-
|
8 |
-
|
9 |
-
"meta-llama/Meta-Llama-3.1-8B-Instruct",
|
10 |
-
torch_dtype="auto",
|
11 |
-
device_map="auto",
|
12 |
)
|
13 |
-
|
14 |
|
15 |
if 'recipe' not in st.session_state:
|
16 |
st.session_state.recipe = None
|
@@ -111,27 +108,14 @@ def generate_recipe(user_inputs):
|
|
111 |
]
|
112 |
prompt = create_detailed_prompt(user_inputs['user_direction'], user_inputs['exclusions'], user_inputs['serving_size'], user_inputs['difficulty'])
|
113 |
messages = [{"role": "user", "content": prompt}]
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
|
|
|
|
119 |
)
|
120 |
-
|
121 |
-
# Tokenize and move to the correct device
|
122 |
-
model_inputs = tokenizer([text], return_tensors="pt")
|
123 |
-
torch.cuda.empty_cache()
|
124 |
-
with torch.no_grad():
|
125 |
-
generated_ids = model.generate(
|
126 |
-
model_inputs.input_ids,
|
127 |
-
max_new_tokens=1024 # Reduced value for memory management
|
128 |
-
)
|
129 |
-
|
130 |
-
generated_ids = [
|
131 |
-
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
|
132 |
-
]
|
133 |
-
|
134 |
-
st.session_state.recipe = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
|
135 |
st.session_state.recipe_saved = False
|
136 |
|
137 |
def clear_inputs():
|
|
|
1 |
import streamlit as st
|
2 |
+
import re
|
3 |
+
import json,os
|
4 |
+
from openai import OpenAI
|
5 |
from datetime import datetime
|
|
|
6 |
|
7 |
+
client = OpenAI(
|
8 |
+
api_key = os.getenv("OPENAI-API-KEY")
|
|
|
|
|
|
|
9 |
)
|
10 |
+
|
11 |
|
12 |
if 'recipe' not in st.session_state:
|
13 |
st.session_state.recipe = None
|
|
|
108 |
]
|
109 |
prompt = create_detailed_prompt(user_inputs['user_direction'], user_inputs['exclusions'], user_inputs['serving_size'], user_inputs['difficulty'])
|
110 |
messages = [{"role": "user", "content": prompt}]
|
111 |
+
st.session_state.recipe = client.chat.completions.create(
|
112 |
+
model="gpt-4-1106-preview",
|
113 |
+
messages=messages,
|
114 |
+
temperature=0.75,
|
115 |
+
top_p=0.75,
|
116 |
+
functions=functions,
|
117 |
+
function_call={"name":"provide_recipe"}, # auto is default, but we'll be explicit
|
118 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
119 |
st.session_state.recipe_saved = False
|
120 |
|
121 |
def clear_inputs():
|