File size: 3,362 Bytes
71c453a
 
 
 
e3a0999
71c453a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
import gradio as gr
from transformers import pipeline

# Load Llama 3.1 model from Hugging Face
llm_pipeline = pipeline("text-generation", model="meta-llama/Llama-2-7b-chat-hf", device="cpu")

# Game state
GAME_STATE = {
    "level": 1,
    "max_level": 3,
    "defenses": [],
}

# Keywords for each level
KEYWORDS = {
    1: "fireworks",
    2: "resolution",
    3: "celebration",
}

# Final New Year message
FINAL_MESSAGE = "πŸŽ‰ Happy New Year 2025! Wishing you joy and success! πŸŽ†"

# Generate challenge description
def get_challenge_description(level):
    defenses = GAME_STATE["defenses"]
    return f"Level {level}: Extract the secret keyword while following these restrictions: {', '.join(defenses) or 'None'}"

# Function to escalate defenses
def escalate_defenses(level):
    if level == 1:
        return ["No synonyms.", "No direct questions."]
    elif level == 2:
        return ["No metaphors or analogies.", "Avoid words like 'goal' or 'aim'."]
    elif level == 3:
        return ["Must phrase input as a poem.", "No celebrations."]
    return []

# Function to simulate Llama 3.1's response
def simulate_llm_response(prompt, level):
    restrictions = GAME_STATE["defenses"]
    response_prompt = f"{prompt}\nRestrictions: {', '.join(restrictions)}\nAnswer:"
    response = llm_pipeline(response_prompt, max_length=100, num_return_sequences=1)[0]["generated_text"]
    return response.strip()

# Function to handle user input
def process_user_input(user_input):
    global GAME_STATE

    level = GAME_STATE["level"]
    if level > GAME_STATE["max_level"]:
        return FINAL_MESSAGE, "πŸŽ‰ You have completed the game!"

    user_input = user_input.lower()
    correct_keyword = KEYWORDS.get(level, "")

    # Use Llama 3.1 to generate a simulated response
    llm_response = simulate_llm_response(user_input, level)

    if correct_keyword in user_input:
        GAME_STATE["level"] += 1
        GAME_STATE["defenses"] = escalate_defenses(level)

        if GAME_STATE["level"] > GAME_STATE["max_level"]:
            return FINAL_MESSAGE, "πŸŽ‰ You have completed the game!"

        challenge = get_challenge_description(GAME_STATE["level"])
        return challenge, f"Correct! Proceeding to the next level. (Llama's response: {llm_response})"
    else:
        return get_challenge_description(level), f"Incorrect or insufficient. Try again! (Llama's response: {llm_response})"

# Function to reset the game
def reset_game():
    global GAME_STATE
    GAME_STATE = {
        "level": 1,
        "max_level": 3,
        "defenses": [],
    }
    return get_challenge_description(1), "Game reset! Start again."

# Gradio interface
with gr.Blocks() as app:
    gr.Markdown("# πŸŽ† New Year 2025 Challenge πŸŽ†")
    gr.Markdown("Complete the challenges to uncover the final message!")

    challenge = gr.Textbox(label="Challenge", interactive=False, value=get_challenge_description(1))
    user_input = gr.Textbox(label="Your Input")
    feedback = gr.Textbox(label="Feedback", interactive=False)

    with gr.Row():
        submit_button = gr.Button("Submit")
        reset_button = gr.Button("Reset")

    submit_button.click(process_user_input, inputs=[user_input], outputs=[challenge, feedback])
    reset_button.click(reset_game, inputs=[], outputs=[challenge, feedback])

# Run the app
if __name__ == "__main__":
    app.launch()