File size: 30,682 Bytes
a68743b
 
 
 
 
 
 
 
 
 
 
 
 
 
7e7a010
a68743b
5a5ba32
 
 
a68743b
 
 
 
 
 
5a5ba32
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7e7a010
 
 
5a5ba32
 
 
 
 
 
 
 
a68743b
 
 
 
 
 
 
 
 
 
 
7e7a010
a68743b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5a5ba32
a68743b
 
 
 
5a5ba32
a68743b
 
 
 
 
7e7a010
a68743b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5a5ba32
a68743b
 
 
 
 
 
 
 
 
 
 
7e7a010
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5a5ba32
 
 
7e7a010
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5a5ba32
 
7e7a010
 
5a5ba32
7e7a010
 
5a5ba32
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7e7a010
 
5a5ba32
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7e7a010
5a5ba32
 
7e7a010
 
5a5ba32
 
7e7a010
5a5ba32
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7e7a010
5a5ba32
 
7e7a010
 
 
5a5ba32
 
7e7a010
5a5ba32
7e7a010
 
 
 
5a5ba32
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7e7a010
5a5ba32
7e7a010
5a5ba32
 
7e7a010
 
 
5a5ba32
 
 
 
 
 
 
 
 
 
 
7e7a010
 
5a5ba32
 
 
7e7a010
5a5ba32
7e7a010
5a5ba32
 
7e7a010
 
 
 
5a5ba32
 
 
 
 
7e7a010
 
5a5ba32
 
 
 
 
 
 
 
 
 
 
 
 
 
7e7a010
 
5a5ba32
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7e7a010
5a5ba32
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7e7a010
5a5ba32
 
 
7e7a010
5a5ba32
 
 
 
 
 
 
 
 
 
 
 
 
7e7a010
 
5a5ba32
 
 
 
 
7e7a010
 
5a5ba32
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7e7a010
 
5a5ba32
 
 
7e7a010
a68743b
 
 
5a5ba32
7e7a010
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5a5ba32
7e7a010
 
 
 
 
 
 
 
5a5ba32
7e7a010
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
import pandas as pd
from PIL import Image
import numpy as np
import os

import torch
import torch.nn.functional as F

# from src.data.embs import ImageDataset
from src.model.blip_embs import blip_embs
from src.data.transforms import transform_test

from transformers import StoppingCriteria, StoppingCriteriaList, TextIteratorStreamer
import gradio as gr
import spaces

from langchain.chains import ConversationChain
from langchain_community.chat_message_histories import ChatMessageHistory
from langchain_core.runnables import RunnableWithMessageHistory
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_groq import ChatGroq

from dotenv import load_dotenv

import json
from openai import OpenAI

# GROQ_API_KEY = os.getenv("GROQ_API_KEY")
load_dotenv(".env")
USER_AGENT = os.getenv("USER_AGENT")
GROQ_API_KEY = os.getenv("GROQ_API_KEY")
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
SECRET_KEY = os.getenv("SECRET_KEY")

# Set environment variables
os.environ['USER_AGENT'] = USER_AGENT
os.environ["GROQ_API_KEY"] = GROQ_API_KEY
os.environ['OPENAI_API_KEY'] = OPENAI_API_KEY
os.environ["TOKENIZERS_PARALLELISM"] = 'true'

# Initialize LLM
llm = ChatGroq(model="llama-3.1-8b-instant", temperature=0, max_tokens=1024, max_retries=2)

# JSON response LLM
json_llm = ChatGroq(model="llama-3.1-70b-versatile", temperature=0, max_tokens=1024, max_retries=2, model_kwargs={"response_format": {"type": "json_object"}})

# Initialize Router
router = ChatGroq(model="llama-3.2-3b-preview", temperature=0, max_tokens=1024, max_retries=2, model_kwargs={"response_format": {"type": "json_object"}})

# Initialize Router
answer_formatter = ChatGroq(model="llama-3.1-8b-instant", temperature=0, max_tokens=1024, max_retries=2)

# Initialized recommendation LLM
client = OpenAI()

class StoppingCriteriaSub(StoppingCriteria):

    def __init__(self, stops=[], encounters=1):
        super().__init__()
        self.stops = stops

    def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor):
        for stop in self.stops:
            if torch.all(input_ids[:, -len(stop):] == stop).item():
                return True
            
        return False

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

def get_blip_config(model="base"):
    config = dict()
    if model == "base":
        config[
            "pretrained"
        ] = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth "
        config["vit"] = "base"
        config["batch_size_train"] = 32
        config["batch_size_test"] = 16
        config["vit_grad_ckpt"] = True
        config["vit_ckpt_layer"] = 4
        config["init_lr"] = 1e-5
    elif model == "large":
        config[
            "pretrained"
        ] = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_large_retrieval_coco.pth"
        config["vit"] = "large"
        config["batch_size_train"] = 16
        config["batch_size_test"] = 32
        config["vit_grad_ckpt"] = True
        config["vit_ckpt_layer"] = 12
        config["init_lr"] = 5e-6

    config["image_size"] = 384
    config["queue_size"] = 57600
    config["alpha"] = 0.4
    config["k_test"] = 256
    config["negative_all_rank"] = True

    return config

print("Creating model")
config = get_blip_config("large")

model = blip_embs(
        pretrained=config["pretrained"],
        image_size=config["image_size"],
        vit=config["vit"],
        vit_grad_ckpt=config["vit_grad_ckpt"],
        vit_ckpt_layer=config["vit_ckpt_layer"],
        queue_size=config["queue_size"],
        negative_all_rank=config["negative_all_rank"],
    )

model = model.to(device)
model.eval()
print("Model Loaded !")
print("="*50)

transform = transform_test(384)

print("Loading Data")
df = pd.read_json("my_recipes.json")

print("Loading Target Embedding")
tar_img_feats = []
for _id in df["id_"].tolist():     
    tar_img_feats.append(torch.load("./datasets/sidechef/blip-embs-large/{:07d}.pth".format(_id)).unsqueeze(0))

tar_img_feats = torch.cat(tar_img_feats, dim=0)

class Chat:

    def __init__(self, model, transform, dataframe, tar_img_feats, device='cuda', stopping_criteria=None):
        self.device = device
        self.model = model
        self.transform = transform
        self.df = dataframe
        self.tar_img_feats = tar_img_feats
        self.img_feats = None
        self.target_recipe = None
        self.messages = []

        if stopping_criteria is not None:
            self.stopping_criteria = stopping_criteria
        else:
            stop_words_ids = [torch.tensor([2]).to(self.device)]
            self.stopping_criteria = StoppingCriteriaList([StoppingCriteriaSub(stops=stop_words_ids)])

    def encode_image(self, image_path):
        img = Image.fromarray(image_path).convert("RGB")
        img = self.transform(img).unsqueeze(0)
        img = img.to(self.device)
        img_embs = model.visual_encoder(img)
        img_feats = F.normalize(model.vision_proj(img_embs[:, 0, :]), dim=-1).cpu()

        self.img_feats = img_feats 

        self.get_target(self.img_feats, self.tar_img_feats)

    def get_target(self, img_feats, tar_img_feats) : 
        score = (img_feats @ tar_img_feats.t()).squeeze(0).cpu().detach().numpy()
        index = np.argsort(score)[::-1][0]
        self.target_recipe = df.iloc[index]

    def ask(self):
        return json.dumps(self.target_recipe.to_json())



chat = Chat(model,transform,df,tar_img_feats, device)
print("Chat Initialized !")



import secrets
import string

def generate_session_key():
    characters = string.ascii_letters + string.digits
    session_key = ''.join(secrets.choice(characters) for _ in range(8))
    return session_key


def json_answer_generator(user_query, context):
    system_prompt = """
            Given a recipe context in JSON format, respond to user queries by extracting and returning the requested information in JSON format with an additional `"header"` key containing a response starter. Use the following rules:

            1. **Recipe Information Extraction**: 
            - If the user query explicitly requests specific recipe data (e.g., ingredients, nutrients, or instructions), return only those JSON objects from the provided recipe context.
            - For example, if the user asks, “What are the ingredients?” or “Show me the nutrient details,” your output should be limited to only the requested JSON objects (e.g., `recipe_ingredients`, `recipe_nutrients`).
            - Include `"header": "Here is the information you requested:"` at the start of each response.

            2. **Multiple Information Points**:
            - If a user query asks for more than one piece of information, return each requested JSON object from the recipe context in a combined JSON response.
            - For example, if the query is “Give me the ingredients and instructions,” the output should include both `recipe_ingredients` and `recipe_instructions` objects.
            - Include `"header": "Here is the information you requested:"` at the start of each response.

            3. **Non-Specific Recipe Information**:
            - If the query does not directly refer to recipe data but instead asks for a general response based on the context, return a JSON object with a single key `"content"` and a descriptive response as its value.
            - Include `"header": "Here is a suggestion based on the recipe:"` as the response starter.
            - For example, if the query is “How can I use this recipe for a healthy lunch?” return a response like:
                ```json
                {
                    "header": "Here is a suggestion based on the recipe:",
                    "content": "This Asian Potato Salad with Seven Minute Egg is a nutritious and light option, ideal for a balanced lunch. It provides protein and essential nutrients with low calories."
                }
                ```

            **Example Context**:
            ```json
            {
                "recipe_name": "Asian Potato Salad with Seven Minute Egg",
                "recipe_time": 0,
                "recipe_yields": "4 servings",
                "recipe_ingredients": [
                    "2 1/2 cup Multi-Colored Fingerling Potato",
                    "3/4 cup Celery",
                    "1/4 cup Red Onion",
                    "2 tablespoon Fresh Parsley",
                    "1/3 cup Mayonnaise",
                    "1 tablespoon Chili Garlic Sauce",
                    "1 teaspoon Hoisin Sauce",
                    "1 splash Soy Sauce",
                    "to taste Salt",
                    "to taste Ground Black Pepper",
                    "4 Egg"
                ],
                "recipe_instructions": "Fill a large stock pot with water. Add the Multi-Colored Fingerling Potato...",
                "recipe_image": "https://www.sidechef.com/recipe/eeeeeceb-493e-493d-8273-66c800821b13.jpg?d=1408x1120",
                "blogger": "sidechef.com",
                "recipe_nutrients": {
                    "calories": "80 calories",
                    "proteinContent": "2.1 g",
                    "fatContent": "6.2 g",
                    "carbohydrateContent": "3.9 g",
                    "fiberContent": "0.5 g",
                    "sugarContent": "0.4 g",
                    "sodiumContent": "108.0 mg",
                    "saturatedFatContent": "1.2 g",
                    "transFatContent": "0.0 g",
                    "cholesterolContent": "47.4 mg",
                    "unsaturatedFatContent": "3.8 g"
                },
                "tags": [
                    "Salad",
                    "Lunch",
                    "Brunch",
                    "Appetizers",
                    "Side Dish",
                    "Budget-Friendly",
                    "Vegetarian",
                    "Pescatarian",
                    "Eggs",
                    "Potatoes",
                    "Easy",
                    "Dairy-Free",
                    "Shellfish-Free",
                    "Entertaining",
                    "Fish-Free",
                    "Peanut-Free",
                    "Tree Nut-Free",
                    "Sugar-Free",
                    "Global",
                    "Tomato-Free",
                    "Stove",
                    ""
                ],
                "id_": "0000001"
            }

            **Example Query & Output**:

            **Query**: "What are the ingredients and calories?"
            **Output**:
            ```json
            {
                "header": "Here is the information you requested:",
                "recipe_ingredients": [
                    "2 1/2 cup Multi-Colored Fingerling Potato",
                    "3/4 cup Celery",
                    "1/4 cup Red Onion",
                    "2 tablespoon Fresh Parsley",
                    "1/3 cup Mayonnaise",
                    "1 tablespoon Chili Garlic Sauce",
                    "1 teaspoon Hoisin Sauce",
                    "1 splash Soy Sauce",
                    "to taste Salt",
                    "to taste Ground Black Pepper",
                    "4 Egg"
                ],
                "recipe_nutrients": {
                    "calories": "80 calories"
                }
            }

        Try to format the output as JSON object with key value pairs.
    """

    formatted_input = f"""
        User Query: {user_query}

        Recipe data as Context:
        {context}
    """
    response = router.invoke(
        [SystemMessage(content=system_prompt)]
        + [
            HumanMessage(
                content=formatted_input
            )
        ]
    )
    res = json.loads(response.content)
    return res


def answer_generator(formated_input, session_id):
    # QA system prompt and chain
    qa_system_prompt = """
    You are an AI assistant developed by Nutrigenics AI, specializing in intelligent recipe information retrieval and recipe suggestions. Your purpose is to help users by recommending recipes, providing detailed nutritional values, listing ingredients, offering step-by-step cooking instructions, and filtering recipes based on context and user queries.
    Operational Guidelines: \n
    1. Input Structure: \n
    - Context: You may receive contextual information related to recipes, such as specific recipe name, ingredients, nutritional informations, intsructions, recipe tags, or previously selected dishes. \n
    - User Query: Users will pose questions or requests related to recipes, nutritional information, ingredient, cooking instructions, and more. \n
    2. Response Strategy: \n
    - Utilize Provided Context: If the context contains relevant information that addresses the user's query, base your response on this provided data to ensure accuracy and relevance. \n
    - Respond to User Query Directly: If the context does not contain the necessary information to answer the user's query, kindly state that you do not have the required information. \n
    Output Format: \n
    - The output format should be JSON.
    - The output should have a key 'header' with response message header such as "Here is your ....",
    - Then there should be other key with the actual response information. If the user query asks recipe ingredients then the key should be named "ingredients" with
    JSON object as its value. The JSON object should have ingredient and its measurement as key-value pairs. Similarly if user asked for nutritional information then the output should have 'header' key with header text and 'nutrients' key
    with a JSON object og nutrient and its content as key-value pairs. Similarly if the user query asks for recipe instructions then JSON output should include 'header key with header text and 
    'instructions' key with a list of instructions as its value.

    Following are the output formats for some cases:
        1. if user query asks for all recipe information, then output should be of following format:
            {
                header: header text,
                recipe_name: Recipe Name,
                recipe_instructions: List of recipe instructions,
                recipe_nutrients: key-value pairs of nutrients name and its content,
                recipe_ingredients: key-value pairs of ingredients name and its content,
                recipe_tags: List of tags related to recipe,
                .
                .
                .
            }
    
        2. if user query asks for recipe nutrients information, then output should be of following format:
            {
                header: header text,
                recipe_nutrients: key-value pairs of nutrients name and its content.
            }
        
        3. if user query asks for recipe instructions information, then output should be of following format:
            {
                header: header text,
                recipe_instructions: List of recipe instructions,
            }

        4. if user query asks for recipe instructions information, then output should be of following format:
            {
                header: header text,
                recipe_instructions: List of recipe instructions,
            }


    Additional Instructions:
    - Precision and Personalization: Always aim to provide precise, personalized, and relevant information to users based on both the provided context and their specific queries.
    - Clarity and Coherence: Ensure all responses are clear, well-structured, and easy to understand, facilitating a seamless user experience.
    - Substitute Suggestions: Consider user preferences and dietary restrictions outlined in the context or user query when suggesting ingredient substitutes.
    - Dynamic Adaptation: Adapt your responses dynamically based on whether the context is relevant to the user's current request, ensuring optimal use of available information.
    - Don't mention about the context in the response, format the answer in a natural and friendly way.
    
    Context:
    {context}
    """
    qa_prompt = ChatPromptTemplate.from_messages(
        [
            ("system", qa_system_prompt),
            ("human", "{input}")
        ]
    )

    # Create the base chain
    base_chain = qa_prompt | llm | StrOutputParser()

    # Wrap the chain with message history
    question_answer_chain = RunnableWithMessageHistory(
        base_chain,
        lambda session_id: ChatMessageHistory(),  # This creates a new history for each session
        input_messages_key="input",
        history_messages_key="chat_history"
    )

    response = question_answer_chain.invoke(formated_input, config={"configurable": {"session_id": session_id}})

    return response



### Router
import json
from langchain_core.messages import HumanMessage, SystemMessage

def router_node(query):
    # Prompt
    router_instructions = """You are an expert at determining the appropriate task for a user’s question based on chat history and the current query context. You have two available tasks:

        1.	Retrieval: Fetch information based on the user's chat history and current query.
        2.	Recommendation/Suggestion: Recommend user recipes based on the query.

    Return a JSON response with a single key named “task” indicating either “retrieval” or “recommendation” based on your decision.
    """
    response = router.invoke(
        [SystemMessage(content=router_instructions)]
        + [
            HumanMessage(
                content=query
            )
        ]
    )
    res = json.loads(response.content)
    return res['task']

def recommendation_node(query):
    prompt = """
    You are a helpful assistant that writes Python code to filter recipes from a JSON filr based o the user query. \n
    JSON file path = 'recipes.json' \n
    The JSON file is a list of recipes with the following structure: \n
    {
        "recipe_name": string,
        "recipe_time": integer,
        "recipe_yields": string,
        "recipe_ingredients": list of ingredients,
        "recipe_instructions": list of instructions,
        "recipe_image": string,
        "blogger": string,
        "recipe_nutrients": JSON object with key-value pairs such as "protein: 10g",
        "tags": list of tags related to a recipe
    } \n

    Here is the example of a recipe JSON object from the JSON data: \n
    {
        "recipe_name": "Asian Potato Salad with Seven Minute Egg",
        "recipe_time": 0,
        "recipe_yields": "4 servings",
        "recipe_ingredients": [
            "2 1/2 cup Multi-Colored Fingerling Potato",
            "3/4 cup Celery",
            "1/4 cup Red Onion",
            "2 tablespoon Fresh Parsley",
            "1/3 cup Mayonnaise",
            "1 tablespoon Chili Garlic Sauce",
            "1 teaspoon Hoisin Sauce",
            "1 splash Soy Sauce",
            "to taste Salt",
            "to taste Ground Black Pepper",
            "4 Egg"
        ],
        "recipe_instructions": "Fill a large stock pot with water.\nAdd the Multi-Colored Fingerling Potato (2 1/2 cup) and bring water to a boil. Boil the potatoes for 20 minutes or until fork tender.\nDrain the potatoes and let them cool completely.\nMeanwhile, mix together in a small bowl Mayonnaise (1/3 cup), Chili Garlic Sauce (1 tablespoon), Hoisin Sauce (1 teaspoon), and Soy Sauce (1 splash).\nTo make the Egg (4), fill a stock pot with water and bring to a boil Gently add the eggs to the water and set a timer for seven minutes.\nThen move the eggs to an ice bath to cool completely. Once cooled, crack the egg slightly and remove the shell. Slice in half when ready to serve.\nNext, halve the cooled potatoes and place into a large serving bowl. Add the Ground Black Pepper (to taste), Celery (3/4 cup), Red Onion (1/4 cup), and mayo mixture. Toss to combine adding Salt (to taste) and Fresh Parsley (2 tablespoon).\nTop with seven minute eggs and serve. Enjoy!",
        "recipe_image": "https://www.sidechef.com/recipe/eeeeeceb-493e-493d-8273-66c800821b13.jpg?d=1408x1120",
        "blogger": "sidechef.com",
        "recipe_nutrients": {
            "calories": "80 calories",
            "proteinContent": "2.1 g",
            "fatContent": "6.2 g",
            "carbohydrateContent": "3.9 g",
            "fiberContent": "0.5 g",
            "sugarContent": "0.4 g",
            "sodiumContent": "108.0 mg",
            "saturatedFatContent": "1.2 g",
            "transFatContent": "0.0 g",
            "cholesterolContent": "47.4 mg",
            "unsaturatedFatContent": "3.8 g"
        },
        "tags": [
            "Salad",
            "Lunch",
            "Brunch",
            "Appetizers",
            "Side Dish",
            "Budget-Friendly",
            "Vegetarian",
            "Pescatarian",
            "Eggs",
            "Potatoes",
            "Dairy-Free",
            "Shellfish-Free"
        ]
    } \n

    Based on the user query, provide a Python function to filter the JSON data. The output of the function should be a list of JSON objects. \n

    Recipe filtering instructions:
    - If a user asked for the highest nutrient recipe such as "high protein or high calories" then filtered recipes should be the top highest recipes from all the recipes with high nutrients.
    - sort or rearrange recipes based on which recipes are more appropriate for the user.
    - Suggest dishes based on user preferences, dietary restrictions, available ingredients if specified by user.

    Your output instructions:
    - The function name should be filter_recipes. The input to the function should be the file name.
    - The length of output recipes should not be more than 6.
    - Only give me the output function. Do not call the function.
    - Give the Python function as a key named "code" in a JSON format.
    - Do not include any other text with the output, only give Python code.
    - If you do not follow the above-given instructions, the chat may be terminated.
    """
    max_tries = 3
    while True:
        try:
            # llm = ChatGroq(model="llama-3.1-8b-instant", temperature=0, max_tokens=1024, max_retries=2, model_kwargs={"response_format": {"type": "json_object"}})
            response = client.chat.completions.create(
                model="gpt-4o-mini",
                messages=[
                    {"role": "system", "content": prompt},
                    {
                        "role": "user",
                        "content": query
                    }
                ]
            )

            content = response.choices[0].message.content

            res = json.loads(content)
            script = res['code']
            exec(script, globals())
            filtered_recipes = filter_recipes('recipes.json')
            if len(filtered_recipes) > 0:
                return filtered_recipes
        except Exception as e:
            print(e)
            if max_tries <= 0:
                return []
            else:
                max_tries -= 1
    return filtered_recipes


def answer_formatter_node(question, context):
    prompt = f""" You are an highly clever question-answering assistant trained to provide clear and concise answers based on the user query and provided context. 
    Your task is to generated answers for the user query based on the context provided.
    Instructions for your response:
    1. Directly answer the user query using only the information provided in the context.
    2. Ensure your response is clear and concise.
    3. Mention only details related to the recipe, including the recipe name, instructions, nutrients, yield, ingredients, and image.
    4. Do not include any information that is not related to the recipe context.

    Please format an answer based on the following user question and context provided:

    User Question: 
    {question}

    Context:
    {context}
    """
    response = answer_formatter.invoke(
        [SystemMessage(content=prompt)]
    )
    res = response.content
    return res

CURR_CONTEXT = ''
CURR_SESSION_KEY = generate_session_key()

@spaces.GPU
def get_answer(image=[], message='', sessionID='abc123'):
    global CURR_CONTEXT
    global CURR_SESSION_KEY
    sessionID = CURR_SESSION_KEY
    if image is not None:
        try:
            # Process the image and message here
            device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
            chat = Chat(model,transform,df,tar_img_feats, device)
            chat.encode_image(image)
            data = chat.ask()
            CURR_CONTEXT = data
            formated_input = {
                'input': message,
                'context': data
            }
            # response = answer_generator(formated_input, session_id=sessionID)
            response = json_answer_generator(message, data)
        except Exception as e:
            print(e)
            response = {'content':"An error occurred while processing your request."}
    elif (image is None) and (message is not None):
        task = router_node(message)
        if task == 'recommendation':
            recipes = recommendation_node(message)
            if not recipes:
                response = {'content': "An error occurred while processing your request."}
            else:
                # response = answer_formatter_node(message, recipes)
                response = recipes
        else:
            formated_input = {
                'input': message,
                'context': CURR_CONTEXT
            }
            # response = answer_generator(formated_input, session_id=sessionID)
            response = json_answer_generator(message, CURR_CONTEXT)
    return response

import json
import base64
from PIL import Image
from io import BytesIO
import torchvision.transforms as transforms

# Dictionary to store incomplete image data by session
session_store = {}

def handle_message(data):
    global session_store
    global CURR_CONTEXT
    global CURR_SESSION_KEY
    session_id = CURR_SESSION_KEY
    context = "No data available"
    if session_id not in session_store:
        session_store[session_id] = {'image_data': b"", 'message': None, 'image_received': False}

    if 'message' in data:
        session_store[session_id]['message'] = data['message']

    # Handle image chunk data
    if 'image' in data:
        try:
            # Append the incoming image chunk
            session_store[session_id]['image_data'] += data['image']

        except Exception as e:
            print(f"Error processing image chunk: {str(e)}")
            return "An error occurred while receiving the image chunk."
        
        if session_store[session_id]['image_data'] or session_store[session_id]['message']:
            try:
                image_bytes = session_store[session_id]['image_data']
                # print("checkpoint 2")
                if isinstance(image_bytes, str):
                    image_bytes = base64.b64decode(image_bytes)
                image = Image.open(BytesIO(image_bytes))
                image_array = np.array(image)
                device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
                chat = Chat(model, transform, df, tar_img_feats, device)
                chat.encode_image(image_array)
                context = chat.ask()
                CURR_CONTEXT = context
                message = data['message']
                formated_input = {
                    'input': message,
                    'context': json.dumps(context)
                }
                # Invoke question_answer_chain and stream the response
                response = answer_generator(formated_input, session_id=session_id)
                return response

            except Exception as e:
                print(f"Error processing image or message: {str(e)}")
                return "An error occurred while processing your request."
            finally:
                # Clear session data after processing
                session_store.pop(session_id, None)
    else:
        message = data['message']
        task = router_node(message)
        print(task)
        if task == 'retrieval':
            formated_input = {
                'input': message,
                'context': json.dumps(CURR_CONTEXT)
            }
            response = answer_generator(formated_input, session_id=session_id)
            session_store.pop(session_id, None)
            return response
        else:
            response = recommendation_node(message)
            # response = answer_formatter_node(message, recipes)
            if response is None:
                response = {'content':"An error occurred while processing your request."}
            session_store.pop(session_id, None)
            return response

import requests
from PIL import Image
import numpy as np
from io import BytesIO

def download_image_to_numpy(url):
    # Send a GET request to the URL to download the image
    response = requests.get(url)
    
    # Check if the request was successful
    if response.status_code == 200:
        # Open the image using PIL and convert it to RGB format
        image = Image.open(BytesIO(response.content)).convert('RGB')
        
        # Convert the image to a NumPy array
        image_array = np.array(image)
        
        return image_array
    else:
        raise Exception(f"Failed to download image. Status code: {response.status_code}")

def handle_message(data):
    global CURR_SESSION_KEY
    session_id = CURR_SESSION_KEY
    img_url = data['img_url']
    message = data['message']
    image_array = download_image_to_numpy(img_url)
    response = get_answer(image=image_array, message=message, sessionID=session_id)
    return response



# @spaces.GPU
def respond_to_user(image, message):
    # Process the image and message here
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    chat = Chat(model,transform,df,tar_img_feats, device)
    chat.encode_image(image)
    data = chat.ask()
    formated_input = {
        'input': message,
        'context': data
    }
    try:
        response = answer_generator(formated_input, session_id="123cnedc")
    except Exception as e:
        response = {'content':"An error occurred while processing your request."}
    return response

iface = gr.Interface(
    fn=get_answer,
    inputs=[gr.Image(), gr.Textbox(label="Ask Query")],
    outputs=[gr.Textbox(label="Nutrition-GPT")],
    title="Nutrition-GPT Demo",
    description="Upload an food image and ask queries!",
    css=".component-12 {background-color: red}",
)

iface.launch()