TeamGreenEdifai commited on
Commit
96fbdea
·
verified ·
1 Parent(s): 2acbeb1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +104 -37
app.py CHANGED
@@ -1,56 +1,123 @@
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
 
 
 
 
 
 
 
 
3
 
4
- """
5
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
- """
7
- client = InferenceClient("meta-llama/Llama-3.2-3B-Instruct")
8
 
9
 
10
- def respond(
11
- message,
12
- system_message,
13
- max_tokens,
14
- temperature,
15
- top_p,
16
- ):
17
- messages = [{"role": "system", "content": system_message}]
 
 
 
 
 
18
 
19
- messages.append({"role": "user", "content": message})
 
 
 
 
 
20
 
21
- response = ""
 
 
 
22
 
23
- for message in client.chat_completion(
24
- messages,
25
- max_tokens=max_tokens,
26
- stream=True,
27
- temperature=temperature,
28
- top_p=top_p,
29
- ):
30
- token = message.choices[0].delta.content
31
 
32
- response += token
33
- yield response
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
34
 
35
  demo = gr.Interface(
36
  respond,
37
  inputs=[
38
  gr.Textbox(label="Movie review") # Set label for the input
39
  ],
40
- additional_inputs=[
41
- gr.Textbox(value="You are a movie review classifier. You respond to given movie reviews with a predicted star rating 0-5 (inclusive) for that review with no explanation.", label="System message"),
42
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
43
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
44
- gr.Slider(
45
- minimum=0.1,
46
- maximum=1.0,
47
- value=0.95,
48
- step=0.05,
49
- label="Top-p (nucleus sampling)",
50
- ),
51
- ],
52
  outputs=[
53
- gr.Textbox(label="Predicted Star rating") # Set label for the output
54
  ],
55
  )
56
 
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
+ import os
4
+ from openai import OpenAI
5
+ import pandas as pd
6
+ import numpy as np
7
+ import json
8
+ from typing import List, Dict
9
+ import time
10
+ from haystack.utils import Secret # For securely storing the OpenAI API key
11
 
12
+ # Setup OpenAI client
13
+ client = OpenAI(api_key=Secret.from_token(os.getenv('OPENAI_KEY')))
 
 
14
 
15
 
16
+ def query_openai(messages: List[Dict[str, str]],
17
+ model: str = "gpt-4o-mini",
18
+ temperature: float = 0.7) -> str:
19
+ try:
20
+ response = client.chat.completions.create(
21
+ model=model,
22
+ messages=messages,
23
+ temperature=temperature
24
+ )
25
+ return response.choices[0].message.content
26
+ except Exception as e:
27
+ print(f"Error querying OpenAI: {e}")
28
+ return None
29
 
30
+ def evaluate_prompt(prompt_template: dict, review: str) -> pd.DataFrame:
31
+ messages = [
32
+ {"role": "system", "content": prompt_template['system']},
33
+ {"role": "user", "content": prompt_template['user'].replace('{{REVIEW}}', review)}
34
+ ]
35
+ response = query_openai(messages)
36
 
37
+ # Extract classification from XML tags
38
+ import re
39
+ classification_match = re.search(r'<classification>(.*?)</classification>', response, re.IGNORECASE)
40
+ predicted = classification_match.group(1).strip().lower() if classification_match else "unknown"
41
 
42
+ return {
43
+ 'predicted': predicted,
44
+ 'response': response
45
+ }
 
 
 
 
46
 
47
+ prompt_v3 = {
48
+ 'system': """You are Roger Ebert and you are here to help us understand the sentiment of movie reviews.""",
49
+
50
+ 'user': """Classify the following review as <classification>positive</classification> or <classification>negative</classification> (please stick to these labels), using a step-by-step analysis.
51
+
52
+ Output Format:
53
+ 1. List key positive sentiment indicators
54
+ 2. List key negative sentiment indicators
55
+ 3. List anything that has a sentiment but is not relevant to the movie itself
56
+ 4. Provide reasoning for your classification decision
57
+ 5. End with classification in XML tags:
58
+ <classification>positive</classification> or <classification>negative</classification>
59
+
60
+ Example 1:
61
+
62
+ Input: I loved this movie. However the main actor didn't fit the role. But the plot totally saved it.
63
+ Output: <classification>positive</classification>
64
+
65
+ Example 2:
66
+
67
+ Input: The movie was actually not THAT bad, especially plot-wise, but the doughy (and hairy!) actor they chose for the leading role was a little chintzy in the acting department. I would have chosen someone else. The idea of "going to America" was very ingenious, and the main character questioning everything that he'd ever known made him somewhat likable, but not very much so when there's a pasty blob for a leading actor.<br /><br />The storyline was interesting. It brings about the question of how the subject of cloning will be handled in the future. Certainly cloning wouldn't be allowed for the purposes in the movie, but it's still a valid argument even for today. Clones ARE still people... right?<br /><br />The movie wasn't particularly special, but it still is a lot better than some of the cheese released during the 70s. Let us not forget the "Giant Spider Invasion." I give it a 4, since it didn't TOTALLY stink, but the MST3K version makes this movie a 10. (I still like Dr. Super Mario!) You'll like this movie, but it won't be your favorite.
68
+ Output: <classification>negative</classification>
69
+
70
+ Review to classify: {{REVIEW}}"""
71
+ }
72
+
73
+ # def respond(
74
+ # message,
75
+ # system_message,
76
+ # max_tokens,
77
+ # temperature,
78
+ # top_p,
79
+ # ):
80
+ # messages = [{"role": "system", "content": system_message}]
81
+
82
+ # messages.append({"role": "user", "content": message})
83
+
84
+ # response = ""
85
+
86
+ # for message in client.chat_completion(
87
+ # messages,
88
+ # max_tokens=max_tokens,
89
+ # stream=True,
90
+ # temperature=temperature,
91
+ # top_p=top_p,
92
+ # ):
93
+ # token = message.choices[0].delta.content
94
+
95
+ # response += token
96
+ # yield response
97
+
98
+ def respond(review):
99
+ result = evaluate_prompt(prompt_v3, review)
100
+ return result['predicted']
101
 
102
  demo = gr.Interface(
103
  respond,
104
  inputs=[
105
  gr.Textbox(label="Movie review") # Set label for the input
106
  ],
107
+ # additional_inputs=[
108
+ # gr.Textbox(value="You are a movie review classifier. You respond to given movie reviews with a predicted star rating 0-5 (inclusive) for that review with no explanation.", label="System message"),
109
+ # gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
110
+ # gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
111
+ # gr.Slider(
112
+ # minimum=0.1,
113
+ # maximum=1.0,
114
+ # value=0.95,
115
+ # step=0.05,
116
+ # label="Top-p (nucleus sampling)",
117
+ # ),
118
+ # ],
119
  outputs=[
120
+ gr.Textbox(label="Predicted Sentiment") # Set label for the output
121
  ],
122
  )
123