File size: 4,386 Bytes
f18de00
9c3d676
 
8081ccc
 
 
 
 
 
 
0f5897f
9c3d676
 
 
e1b0723
8846773
 
 
9c3d676
8081ccc
9c3d676
 
8846773
9c3d676
 
 
 
 
 
 
 
 
8846773
3ec3dc0
8846773
14e48d1
ae69077
70f3c4a
5080cee
70f3c4a
5080cee
3ec3dc0
9c3d676
 
8081ccc
 
 
 
 
 
70f3c4a
8081ccc
 
70f3c4a
 
5080cee
8081ccc
 
 
8846773
9c3d676
 
 
 
 
 
 
 
 
 
 
 
 
 
8846773
 
9c3d676
8081ccc
 
9c3d676
 
 
 
 
 
8081ccc
9c3d676
8081ccc
 
9c3d676
8081ccc
 
9c3d676
8081ccc
 
9c3d676
8081ccc
 
 
9c3d676
8081ccc
 
9c3d676
8081ccc
9c3d676
 
 
 
 
 
 
 
8081ccc
 
 
8846773
 
8081ccc
8846773
8081ccc
 
 
 
 
 
 
 
8846773
8081ccc
 
 
 
 
 
 
 
 
 
 
8846773
3f58090
 
8081ccc
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
import gradio as gr
from huggingface_hub import hf_hub_download
from llama_cpp import Llama
import re
from datasets import load_dataset
import random
import logging
import os
import autopep8
import textwrap

# Set up logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

# Model settings
MODEL_NAME = "leetmonkey_peft__q8_0.gguf"
REPO_ID = "sugiv/leetmonkey-peft-gguf"

def download_model(model_name):
    logger.info(f"Downloading model: {model_name}")
    model_path = hf_hub_download(
        repo_id=REPO_ID,
        filename=model_name,
        cache_dir="./models",
        force_download=True,
        resume_download=True
    )
    logger.info(f"Model downloaded: {model_path}")
    return model_path

# Download and load the 8-bit model at startup
model_path = download_model(MODEL_NAME)
llm = Llama(
    model_path=model_path,
    n_ctx=1024,
    n_threads=8,
    n_gpu_layers=-1,  # Use all available GPU layers
    verbose=False,
    n_batch=512,
    mlock=True
)
logger.info("8-bit model loaded successfully")

# Load the dataset
dataset = load_dataset("sugiv/leetmonkey_python_dataset")
train_dataset = dataset["train"]

# Generation parameters
generation_kwargs = {
    "max_tokens": 512,
    "stop": ["```", "### Instruction:", "### Response:"],
    "echo": False,
    "temperature": 0.05,
    "top_k": 10,
    "top_p": 0.9,
    "repeat_penalty": 1.1
}

def generate_solution(instruction):
    system_prompt = "You are a Python coding assistant specialized in solving LeetCode problems. Provide only the complete implementation of the given function. Ensure proper indentation and formatting. Do not include any explanations or multiple solutions."
    full_prompt = f"""### Instruction:
{system_prompt}

Implement the following function for the LeetCode problem:

{instruction}

### Response:
Here's the complete Python function implementation:

```python
"""
    
    for chunk in llm(full_prompt, stream=True, **generation_kwargs):
        yield chunk["choices"][0]["text"]

def extract_and_format_code(text):
    # Extract code between triple backticks
    code_match = re.search(r'```python\s*(.*?)\s*```', text, re.DOTALL)
    if code_match:
        code = code_match.group(1)
    else:
        code = text

    # Remove any text before the function definition
    code = re.sub(r'^.*?(?=def\s+\w+\s*\()', '', code, flags=re.DOTALL)

    # Dedent the code to remove any common leading whitespace
    code = textwrap.dedent(code)

    # Split the code into lines
    lines = code.split('\n')

    # Find the function definition line
    func_def_index = next((i for i, line in enumerate(lines) if line.strip().startswith('def ')), 0)

    # Ensure proper indentation
    indented_lines = [lines[func_def_index]]  # Keep the function definition as is
    for line in lines[func_def_index + 1:]:
        if line.strip():  # If the line is not empty
            indented_lines.append('    ' + line)  # Add 4 spaces of indentation
        else:
            indented_lines.append(line)  # Keep empty lines as is

    formatted_code = '\n'.join(indented_lines)

    try:
        return autopep8.fix_code(formatted_code)
    except:
        return formatted_code

def select_random_problem():
    return random.choice(train_dataset)['instruction']

def stream_solution(problem):
    logger.info("Generating solution")
    generated_text = ""
    for token in generate_solution(problem):
        generated_text += token
        yield generated_text
    
    formatted_code = extract_and_format_code(generated_text)
    logger.info("Solution generated successfully")
    yield formatted_code

with gr.Blocks() as demo:
    gr.Markdown("# LeetCode Problem Solver (8-bit GGUF Model)")
    
    with gr.Row():
        with gr.Column():
            problem_display = gr.Textbox(label="LeetCode Problem", lines=10)
            select_problem_btn = gr.Button("Select Random Problem")
        
        with gr.Column():
            solution_display = gr.Code(label="Generated Solution", language="python", lines=25)
            generate_btn = gr.Button("Generate Solution")
    
    select_problem_btn.click(select_random_problem, outputs=problem_display)
    generate_btn.click(stream_solution, inputs=[problem_display], outputs=solution_display)

if __name__ == "__main__":
    logger.info("Starting Gradio interface")
    demo.launch(share=True)