Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -59,64 +59,64 @@ def create_detailed_prompt(user_direction, exclusions, serving_size, difficulty)
|
|
59 |
|
60 |
def generate_recipe(user_inputs):
|
61 |
with st.spinner('Building the perfect recipe...'):
|
62 |
-
provide_recipe_schema = {
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
}
|
116 |
prompt = create_detailed_prompt(user_inputs['user_direction'], user_inputs['exclusions'], user_inputs['serving_size'], user_inputs['difficulty'])
|
117 |
-
messages = [{"role": "user", "content": prompt}]
|
118 |
-
tool_section = "\n".join([f"{tool['function']['name']}({json.dumps(tool['function']['parameters'])})" for tool in [provide_recipe_schema]])
|
119 |
-
text = f"{prompt}\n\nTools:\n{tool_section}"
|
120 |
|
121 |
# Tokenize and move to the correct device
|
122 |
model_inputs = tokenizer(prompt, return_tensors="pt")
|
@@ -129,18 +129,18 @@ def generate_recipe(user_inputs):
|
|
129 |
|
130 |
# Tokenize and move to the correct device
|
131 |
# model_inputs = tokenizer([text], return_tensors="pt")
|
132 |
-
torch.cuda.empty_cache()
|
133 |
-
with torch.no_grad():
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
|
139 |
# generated_ids = [
|
140 |
# output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
|
141 |
# ]
|
142 |
|
143 |
-
st.session_state.recipe = tokenizer.decode(generated_ids[0]
|
144 |
st.session_state.recipe_saved = False
|
145 |
|
146 |
def clear_inputs():
|
|
|
59 |
|
60 |
def generate_recipe(user_inputs):
|
61 |
with st.spinner('Building the perfect recipe...'):
|
62 |
+
# provide_recipe_schema = {
|
63 |
+
# 'type': 'function',
|
64 |
+
# 'function': {
|
65 |
+
# 'name': 'provide_recipe',
|
66 |
+
# 'description': 'Provides a detailed recipe strictly adhering to the user input/specifications, especially ingredient exclusions and the recipe difficulty',
|
67 |
+
# 'parameters': {
|
68 |
+
# 'type': 'object',
|
69 |
+
# 'properties': {
|
70 |
+
# 'name': {
|
71 |
+
# 'type': 'string',
|
72 |
+
# 'description': 'A creative name for the recipe'
|
73 |
+
# },
|
74 |
+
# 'description': {
|
75 |
+
# 'type': 'string',
|
76 |
+
# 'description': 'a brief one-sentence description of the provided recipe'
|
77 |
+
# },
|
78 |
+
# 'ingredients': {
|
79 |
+
# 'type': 'array',
|
80 |
+
# 'items': {
|
81 |
+
# 'type': 'object',
|
82 |
+
# 'properties': {
|
83 |
+
# 'name': {
|
84 |
+
# 'type': 'string',
|
85 |
+
# 'description': 'Quantity and name of the ingredient'
|
86 |
+
# }
|
87 |
+
# }
|
88 |
+
# }
|
89 |
+
# },
|
90 |
+
# 'instructions': {
|
91 |
+
# 'type': 'array',
|
92 |
+
# 'items': {
|
93 |
+
# 'type': 'object',
|
94 |
+
# 'properties': {
|
95 |
+
# 'step_number': {
|
96 |
+
# 'type': 'number',
|
97 |
+
# 'description': 'The sequence number of this step'
|
98 |
+
# },
|
99 |
+
# 'instruction': {
|
100 |
+
# 'type': 'string',
|
101 |
+
# 'description': 'Detailed description of what to do in this step'
|
102 |
+
# }
|
103 |
+
# }
|
104 |
+
# }
|
105 |
+
# }
|
106 |
+
# },
|
107 |
+
# 'required': [
|
108 |
+
# 'name',
|
109 |
+
# 'description',
|
110 |
+
# 'ingredients',
|
111 |
+
# 'instructions'
|
112 |
+
# ]
|
113 |
+
# }
|
114 |
+
# }
|
115 |
+
# }
|
116 |
prompt = create_detailed_prompt(user_inputs['user_direction'], user_inputs['exclusions'], user_inputs['serving_size'], user_inputs['difficulty'])
|
117 |
+
# messages = [{"role": "user", "content": prompt}]
|
118 |
+
# tool_section = "\n".join([f"{tool['function']['name']}({json.dumps(tool['function']['parameters'])})" for tool in [provide_recipe_schema]])
|
119 |
+
# text = f"{prompt}\n\nTools:\n{tool_section}"
|
120 |
|
121 |
# Tokenize and move to the correct device
|
122 |
model_inputs = tokenizer(prompt, return_tensors="pt")
|
|
|
129 |
|
130 |
# Tokenize and move to the correct device
|
131 |
# model_inputs = tokenizer([text], return_tensors="pt")
|
132 |
+
# torch.cuda.empty_cache()
|
133 |
+
# with torch.no_grad():
|
134 |
+
generated_ids = model.generate(
|
135 |
+
**model_inputs,
|
136 |
+
# max_new_tokens=512,
|
137 |
+
)
|
138 |
|
139 |
# generated_ids = [
|
140 |
# output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
|
141 |
# ]
|
142 |
|
143 |
+
st.session_state.recipe = tokenizer.decode(generated_ids[0])
|
144 |
st.session_state.recipe_saved = False
|
145 |
|
146 |
def clear_inputs():
|