Spaces:
Sleeping
Sleeping
Mikeplockhart
commited on
Update app.py
Browse files
app.py
CHANGED
@@ -2,7 +2,7 @@ import gradio as gr
|
|
2 |
import os
|
3 |
from langchain_community.document_loaders import JSONLoader
|
4 |
from langchain_community.vectorstores import Qdrant
|
5 |
-
from langchain_community.embeddings import HuggingFaceEmbeddings
|
6 |
from sentence_transformers.cross_encoder import CrossEncoder
|
7 |
from groq import Groq
|
8 |
|
@@ -28,7 +28,6 @@ def reranking_results(query, top_k_results, rerank_model):
|
|
28 |
return reranked_results
|
29 |
|
30 |
|
31 |
-
json_path = "format_food.json"
|
32 |
loader = JSONLoader(
|
33 |
file_path=json_path,
|
34 |
jq_schema='.dishes[].dish',
|
@@ -36,20 +35,29 @@ loader = JSONLoader(
|
|
36 |
content_key='doc',
|
37 |
metadata_func=metadata_func
|
38 |
)
|
|
|
39 |
data = loader.load()
|
40 |
|
41 |
# Models
|
42 |
-
model_name = "Snowflake/snowflake-arctic-embed-xs"
|
43 |
# rerank_model = CrossEncoder("mixedbread-ai/mxbai-rerank-xsmall-v1")
|
44 |
|
45 |
# Embedding
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
46 |
model_kwargs = {"device": "cpu"}
|
47 |
encode_kwargs = {"normalize_embeddings": True}
|
48 |
-
hf_embedding =
|
49 |
model_name=model_name,
|
50 |
-
|
51 |
-
|
52 |
-
show_progress=True
|
53 |
)
|
54 |
|
55 |
qdrant = Qdrant.from_documents(
|
@@ -64,32 +72,36 @@ def format_to_markdown(response_list):
|
|
64 |
temp_string = "\n- ".join(response_list)
|
65 |
return temp_string
|
66 |
|
67 |
-
def run_query(query):
|
68 |
print("Running Query")
|
69 |
answer = qdrant.similarity_search(query=query, k=10)
|
70 |
title_and_description = f"# Best Choice:\nA {answer[0].metadata['title']}: {answer[0].page_content}"
|
71 |
instructions = format_to_markdown(answer[0].metadata['instructions'])
|
72 |
recipe = f"# Standard Method\n## Cooking time:\n{answer[0].metadata['time']}\n\n## Recipe:\n{instructions}"
|
73 |
print("Returning query")
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
|
|
|
|
|
|
84 |
return title_and_description, recipe, groq_update
|
85 |
|
86 |
with gr.Blocks() as demo:
|
87 |
gr.Markdown("Start typing below and then click **Run** to see the output.")
|
88 |
inp = gr.Textbox(placeholder="What sort of meal are you after?")
|
|
|
89 |
title_output = gr.Markdown(label="Title and description")
|
90 |
instructions_output = gr.Markdown(label="Recipe")
|
91 |
updated_recipe = gr.Markdown(label="Updated Recipe")
|
92 |
btn = gr.Button("Run")
|
93 |
-
btn.click(fn=run_query, inputs=inp, outputs=[title_output, instructions_output, updated_recipe])
|
94 |
|
95 |
demo.launch()
|
|
|
2 |
import os
|
3 |
from langchain_community.document_loaders import JSONLoader
|
4 |
from langchain_community.vectorstores import Qdrant
|
5 |
+
from langchain_community.embeddings import HuggingFaceEmbeddings, HuggingFaceBgeEmbeddings
|
6 |
from sentence_transformers.cross_encoder import CrossEncoder
|
7 |
from groq import Groq
|
8 |
|
|
|
28 |
return reranked_results
|
29 |
|
30 |
|
|
|
31 |
loader = JSONLoader(
|
32 |
file_path=json_path,
|
33 |
jq_schema='.dishes[].dish',
|
|
|
35 |
content_key='doc',
|
36 |
metadata_func=metadata_func
|
37 |
)
|
38 |
+
|
39 |
data = loader.load()
|
40 |
|
41 |
# Models
|
42 |
+
# model_name = "Snowflake/snowflake-arctic-embed-xs"
|
43 |
# rerank_model = CrossEncoder("mixedbread-ai/mxbai-rerank-xsmall-v1")
|
44 |
|
45 |
# Embedding
|
46 |
+
# model_kwargs = {"device": "cpu"}
|
47 |
+
# encode_kwargs = {"normalize_embeddings": True}
|
48 |
+
# hf_embedding = HuggingFaceEmbeddings(
|
49 |
+
# model_name=model_name,
|
50 |
+
# encode_kwargs=encode_kwargs,
|
51 |
+
# model_kwargs=model_kwargs,
|
52 |
+
# show_progress=True
|
53 |
+
# )
|
54 |
+
model_name = "BAAI/bge-small-en"
|
55 |
model_kwargs = {"device": "cpu"}
|
56 |
encode_kwargs = {"normalize_embeddings": True}
|
57 |
+
hf_embedding = HuggingFaceBgeEmbeddings(
|
58 |
model_name=model_name,
|
59 |
+
model_kwargs=model_kwargs,
|
60 |
+
encode_kwargs=encode_kwargs
|
|
|
61 |
)
|
62 |
|
63 |
qdrant = Qdrant.from_documents(
|
|
|
72 |
temp_string = "\n- ".join(response_list)
|
73 |
return temp_string
|
74 |
|
75 |
+
def run_query(query: str, groq: bool):
|
76 |
print("Running Query")
|
77 |
answer = qdrant.similarity_search(query=query, k=10)
|
78 |
title_and_description = f"# Best Choice:\nA {answer[0].metadata['title']}: {answer[0].page_content}"
|
79 |
instructions = format_to_markdown(answer[0].metadata['instructions'])
|
80 |
recipe = f"# Standard Method\n## Cooking time:\n{answer[0].metadata['time']}\n\n## Recipe:\n{instructions}"
|
81 |
print("Returning query")
|
82 |
+
if groq:
|
83 |
+
chat_completion = client.chat.completions.create(
|
84 |
+
messages=[
|
85 |
+
{
|
86 |
+
"role": "user",
|
87 |
+
"content": f"please write a more detailed recipe for the following recipe:\n{recipe}\n\n please return it in the same format.",
|
88 |
+
}
|
89 |
+
],
|
90 |
+
model="Llama3-70b-8192",
|
91 |
+
)
|
92 |
+
groq_update = "# Groq Update\n"+chat_completion.choices[0].message.content
|
93 |
+
else:
|
94 |
+
groq_update = "# Groq Update \nPlease select the tick box if you need more information."
|
95 |
return title_and_description, recipe, groq_update
|
96 |
|
97 |
with gr.Blocks() as demo:
|
98 |
gr.Markdown("Start typing below and then click **Run** to see the output.")
|
99 |
inp = gr.Textbox(placeholder="What sort of meal are you after?")
|
100 |
+
groq_button = gr.Checkbox(value=False, label="Use Llama for a better recipe?")
|
101 |
title_output = gr.Markdown(label="Title and description")
|
102 |
instructions_output = gr.Markdown(label="Recipe")
|
103 |
updated_recipe = gr.Markdown(label="Updated Recipe")
|
104 |
btn = gr.Button("Run")
|
105 |
+
btn.click(fn=run_query, inputs=[inp, groq_button], outputs=[title_output, instructions_output, updated_recipe])
|
106 |
|
107 |
demo.launch()
|