Manoj Kumar
commited on
Commit
·
c1abd5c
1
Parent(s):
9c8236d
updated question structure
Browse files
app.py
CHANGED
@@ -10,7 +10,7 @@ db_schema = {
|
|
10 |
}
|
11 |
|
12 |
# Load the model and tokenizer
|
13 |
-
model_name = "EleutherAI/gpt-
|
14 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
15 |
model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto", torch_dtype=torch.float16)
|
16 |
|
@@ -35,7 +35,10 @@ def generate_sql_query(context, question):
|
|
35 |
Query:
|
36 |
"""
|
37 |
# Tokenize input
|
38 |
-
inputs = tokenizer(prompt, return_tensors="pt", truncation=True, max_length=
|
|
|
|
|
|
|
39 |
|
40 |
# Generate SQL query
|
41 |
output = model.generate(inputs.input_ids, max_length=512, num_beams=5, early_stopping=True)
|
|
|
10 |
}
|
11 |
|
12 |
# Load the model and tokenizer
|
13 |
+
model_name = "EleutherAI/gpt-neo-2.7B" # You can also use "Llama-2-7b" or another model
|
14 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
15 |
model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto", torch_dtype=torch.float16)
|
16 |
|
|
|
35 |
Query:
|
36 |
"""
|
37 |
# Tokenize input
|
38 |
+
inputs = tokenizer(prompt, return_tensors="pt", truncation=True, max_length=512).to("cuda" if torch.cuda.is_available() else "cpu")
|
39 |
+
|
40 |
+
print("Prompt Sent to Model:")
|
41 |
+
print(prompt)
|
42 |
|
43 |
# Generate SQL query
|
44 |
output = model.generate(inputs.input_ids, max_length=512, num_beams=5, early_stopping=True)
|