cheberle commited on
Commit
80b5781
·
1 Parent(s): 17b5cd4
Files changed (1) hide show
  1. app.py +7 -7
app.py CHANGED
@@ -12,7 +12,7 @@ peft_config = PeftConfig.from_pretrained(ADAPTER_REPO)
12
  print("PEFT Base Model:", peft_config.base_model_name_or_path)
13
 
14
  # 2. Load the tokenizer & base model
15
- tokenizer = AutoTokenizer.from_pretrained(BASE_MODEL, trust_remote_code=True)
16
  base_model = AutoModelForCausalLM.from_pretrained(
17
  BASE_MODEL,
18
  revision="4831ee1375be5b4ff5a4abf7984e13628db44e35",
@@ -30,9 +30,9 @@ model = PeftModel.from_pretrained(
30
 
31
  def extract_food_term(text):
32
  """
33
- Extract or simplify a food term to a single word or best descriptor.
34
  """
35
- prompt = f"Extract the best single word or term that describes this food item:\nInput: {text}\nOutput:"
36
  inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
37
  with torch.no_grad():
38
  outputs = model.generate(
@@ -48,11 +48,11 @@ def extract_food_term(text):
48
  return answer
49
 
50
  with gr.Blocks() as demo:
51
- gr.Markdown("## Qwen + LoRA Adapter: Food Term Extraction Demo")
52
- input_box = gr.Textbox(lines=1, label="Enter a food item (e.g., 'blaubeertorte')")
53
- output_box = gr.Textbox(lines=1, label="Best single-word descriptor")
54
 
55
- extract_btn = gr.Button("Extract Term")
56
  extract_btn.click(fn=extract_food_term, inputs=input_box, outputs=output_box)
57
 
58
  if __name__ == "__main__":
 
12
  print("PEFT Base Model:", peft_config.base_model_name_or_path)
13
 
14
  # 2. Load the tokenizer & base model
15
+ tokenizer = AutoTokenizer.from_pretrained(BASE_MODEL, trust_remote_code=True, language='de')
16
  base_model = AutoModelForCausalLM.from_pretrained(
17
  BASE_MODEL,
18
  revision="4831ee1375be5b4ff5a4abf7984e13628db44e35",
 
30
 
31
  def extract_food_term(text):
32
  """
33
+ Extract or simplify a German food term to a single word or best descriptor.
34
  """
35
+ prompt = f"Extrahiere das beste ein Wort oder den Begriff, der dieses Nahrungsmittel beschreibt:\nInput: {text}\nOutput:"
36
  inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
37
  with torch.no_grad():
38
  outputs = model.generate(
 
48
  return answer
49
 
50
  with gr.Blocks() as demo:
51
+ gr.Markdown("## Qwen + LoRA Adapter: Lebensmittelbegriffserkennung Demo")
52
+ input_box = gr.Textbox(lines=1, label="Geben Sie ein Nahrungsmittel ein (z.B., 'Blaubeertorte')")
53
+ output_box = gr.Textbox(lines=1, label="Beste ein Wort-Beschreibung")
54
 
55
+ extract_btn = gr.Button("Begriff extrahieren")
56
  extract_btn.click(fn=extract_food_term, inputs=input_box, outputs=output_box)
57
 
58
  if __name__ == "__main__":