OmPrakashSingh1704 commited on
Commit
2b340e8
1 Parent(s): a065feb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +31 -11
app.py CHANGED
@@ -1,13 +1,17 @@
1
  import streamlit as st
2
  import re
3
  import json,os
4
- from openai import OpenAI
5
  from datetime import datetime
6
 
7
- client = OpenAI(
8
- api_key = os.getenv("OPENAI-API-KEY")
 
 
 
9
  )
10
 
 
11
  if 'recipe' not in st.session_state:
12
  st.session_state.recipe = None
13
 
@@ -53,7 +57,8 @@ def create_detailed_prompt(user_direction, exclusions, serving_size, difficulty)
53
  def generate_recipe(user_inputs):
54
  with st.spinner('Building the perfect recipe...'):
55
  functions = [
56
- {
 
57
  "name": "provide_recipe",
58
  "description": "Provides a detailed recipe strictly adhering to the user input/specifications, especially ingredient exclusions and the recipe difficulty",
59
  "parameters": {
@@ -104,17 +109,32 @@ def generate_recipe(user_inputs):
104
  ],
105
  },
106
  }
 
107
  ]
108
  prompt = create_detailed_prompt(user_inputs['user_direction'], user_inputs['exclusions'], user_inputs['serving_size'], user_inputs['difficulty'])
109
  messages = [{"role": "user", "content": prompt}]
110
- st.session_state.recipe = client.chat.completions.create(
111
- model="gpt-3.5-turbo",
112
- messages=messages,
113
- temperature=0.75,
114
- top_p=0.75,
115
- functions=functions,
116
- function_call={"name":"provide_recipe"}, # auto is default, but we'll be explicit
117
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
118
  st.session_state.recipe_saved = False
119
 
120
  def clear_inputs():
 
1
  import streamlit as st
2
  import re
3
  import json,os
4
+ from transformers import AutoModelForCausalLM, AutoTokenizer
5
  from datetime import datetime
6
 
7
+ # Load model and tokenizer
8
+ model = AutoModelForCausalLM.from_pretrained(
9
+ "Qwen/Qwen1.5-0.5B-Chat",
10
+ torch_dtype="auto",
11
+ device_map="auto",
12
  )
13
 
14
+ tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen1.5-0.5B-Chat")
15
  if 'recipe' not in st.session_state:
16
  st.session_state.recipe = None
17
 
 
57
  def generate_recipe(user_inputs):
58
  with st.spinner('Building the perfect recipe...'):
59
  functions = [
60
+ {'type': 'function',
61
+ 'function':{
62
  "name": "provide_recipe",
63
  "description": "Provides a detailed recipe strictly adhering to the user input/specifications, especially ingredient exclusions and the recipe difficulty",
64
  "parameters": {
 
109
  ],
110
  },
111
  }
112
+ }
113
  ]
114
  prompt = create_detailed_prompt(user_inputs['user_direction'], user_inputs['exclusions'], user_inputs['serving_size'], user_inputs['difficulty'])
115
  messages = [{"role": "user", "content": prompt}]
116
+ text = tokenizer.apply_chat_template(
117
+ messages,
118
+ tokenize=False,
119
+ add_generation_prompt=True,
120
+ tools=functions
 
 
121
  )
122
+
123
+ # Tokenize and move to the correct device
124
+ model_inputs = tokenizer([text], return_tensors="pt")
125
+ torch.cuda.empty_cache()
126
+ with torch.no_grad():
127
+ generated_ids = model.generate(
128
+ model_inputs.input_ids,
129
+ max_new_tokens=64,
130
+ tools=functions# Reduced value for memory management
131
+ )
132
+
133
+ generated_ids = [
134
+ output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
135
+ ]
136
+
137
+ st.session_state.recipe = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
138
  st.session_state.recipe_saved = False
139
 
140
  def clear_inputs():