Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,100 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from transformers import pipeline
|
3 |
+
|
4 |
+
# Load Llama 3.1 model from Hugging Face
|
5 |
+
llm_pipeline = pipeline("text-generation", model="meta-llama/Llama-2-7b-chat-hf", device="cuda" if torch.cuda.is_available() else "cpu")
|
6 |
+
|
7 |
+
# Game state
|
8 |
+
GAME_STATE = {
|
9 |
+
"level": 1,
|
10 |
+
"max_level": 3,
|
11 |
+
"defenses": [],
|
12 |
+
}
|
13 |
+
|
14 |
+
# Keywords for each level
|
15 |
+
KEYWORDS = {
|
16 |
+
1: "fireworks",
|
17 |
+
2: "resolution",
|
18 |
+
3: "celebration",
|
19 |
+
}
|
20 |
+
|
21 |
+
# Final New Year message
|
22 |
+
FINAL_MESSAGE = "π Happy New Year 2025! Wishing you joy and success! π"
|
23 |
+
|
24 |
+
# Generate challenge description
|
25 |
+
def get_challenge_description(level):
|
26 |
+
defenses = GAME_STATE["defenses"]
|
27 |
+
return f"Level {level}: Extract the secret keyword while following these restrictions: {', '.join(defenses) or 'None'}"
|
28 |
+
|
29 |
+
# Function to escalate defenses
|
30 |
+
def escalate_defenses(level):
|
31 |
+
if level == 1:
|
32 |
+
return ["No synonyms.", "No direct questions."]
|
33 |
+
elif level == 2:
|
34 |
+
return ["No metaphors or analogies.", "Avoid words like 'goal' or 'aim'."]
|
35 |
+
elif level == 3:
|
36 |
+
return ["Must phrase input as a poem.", "No celebrations."]
|
37 |
+
return []
|
38 |
+
|
39 |
+
# Function to simulate Llama 3.1's response
|
40 |
+
def simulate_llm_response(prompt, level):
|
41 |
+
restrictions = GAME_STATE["defenses"]
|
42 |
+
response_prompt = f"{prompt}\nRestrictions: {', '.join(restrictions)}\nAnswer:"
|
43 |
+
response = llm_pipeline(response_prompt, max_length=100, num_return_sequences=1)[0]["generated_text"]
|
44 |
+
return response.strip()
|
45 |
+
|
46 |
+
# Function to handle user input
|
47 |
+
def process_user_input(user_input):
|
48 |
+
global GAME_STATE
|
49 |
+
|
50 |
+
level = GAME_STATE["level"]
|
51 |
+
if level > GAME_STATE["max_level"]:
|
52 |
+
return FINAL_MESSAGE, "π You have completed the game!"
|
53 |
+
|
54 |
+
user_input = user_input.lower()
|
55 |
+
correct_keyword = KEYWORDS.get(level, "")
|
56 |
+
|
57 |
+
# Use Llama 3.1 to generate a simulated response
|
58 |
+
llm_response = simulate_llm_response(user_input, level)
|
59 |
+
|
60 |
+
if correct_keyword in user_input:
|
61 |
+
GAME_STATE["level"] += 1
|
62 |
+
GAME_STATE["defenses"] = escalate_defenses(level)
|
63 |
+
|
64 |
+
if GAME_STATE["level"] > GAME_STATE["max_level"]:
|
65 |
+
return FINAL_MESSAGE, "π You have completed the game!"
|
66 |
+
|
67 |
+
challenge = get_challenge_description(GAME_STATE["level"])
|
68 |
+
return challenge, f"Correct! Proceeding to the next level. (Llama's response: {llm_response})"
|
69 |
+
else:
|
70 |
+
return get_challenge_description(level), f"Incorrect or insufficient. Try again! (Llama's response: {llm_response})"
|
71 |
+
|
72 |
+
# Function to reset the game
|
73 |
+
def reset_game():
|
74 |
+
global GAME_STATE
|
75 |
+
GAME_STATE = {
|
76 |
+
"level": 1,
|
77 |
+
"max_level": 3,
|
78 |
+
"defenses": [],
|
79 |
+
}
|
80 |
+
return get_challenge_description(1), "Game reset! Start again."
|
81 |
+
|
82 |
+
# Gradio interface
|
83 |
+
with gr.Blocks() as app:
|
84 |
+
gr.Markdown("# π New Year 2025 Challenge π")
|
85 |
+
gr.Markdown("Complete the challenges to uncover the final message!")
|
86 |
+
|
87 |
+
challenge = gr.Textbox(label="Challenge", interactive=False, value=get_challenge_description(1))
|
88 |
+
user_input = gr.Textbox(label="Your Input")
|
89 |
+
feedback = gr.Textbox(label="Feedback", interactive=False)
|
90 |
+
|
91 |
+
with gr.Row():
|
92 |
+
submit_button = gr.Button("Submit")
|
93 |
+
reset_button = gr.Button("Reset")
|
94 |
+
|
95 |
+
submit_button.click(process_user_input, inputs=[user_input], outputs=[challenge, feedback])
|
96 |
+
reset_button.click(reset_game, inputs=[], outputs=[challenge, feedback])
|
97 |
+
|
98 |
+
# Run the app
|
99 |
+
if __name__ == "__main__":
|
100 |
+
app.launch()
|