vilarin commited on
Commit
c9792bc
·
verified ·
1 Parent(s): fa107ca

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -12
app.py CHANGED
@@ -62,8 +62,6 @@ def translate(
62
 
63
  prompt = Prompt_template(source_text, source_lang, target_lang)
64
  input_ids = tokenizer(prompt, return_tensors="pt").input_ids.to(model.device)
65
-
66
- prompt_length = input_ids.shape[1]
67
 
68
  generate_kwargs = dict(
69
  input_ids=input_ids,
@@ -75,10 +73,8 @@ def translate(
75
  outputs = model.generate(**generate_kwargs)
76
 
77
  resp = tokenizer.decode(outputs[0], skip_special_tokens=True, clean_up_tokenization_spaces=False)
78
-
79
- print(resp)
80
 
81
- yield resp[prompt_length:]
82
 
83
  CSS = """
84
  h1 {
@@ -110,13 +106,6 @@ with gr.Blocks(theme="soft", css=CSS) as demo:
110
  label="Target Lang",
111
  value="Spanish",
112
  )
113
- max_chunk = gr.Slider(
114
- label="Max tokens Per Chunk",
115
- minimum=512,
116
- maximum=2046,
117
- value=1000,
118
- step=8,
119
- )
120
  max_length = gr.Slider(
121
  label="Context Window",
122
  minimum=512,
 
62
 
63
  prompt = Prompt_template(source_text, source_lang, target_lang)
64
  input_ids = tokenizer(prompt, return_tensors="pt").input_ids.to(model.device)
 
 
65
 
66
  generate_kwargs = dict(
67
  input_ids=input_ids,
 
73
  outputs = model.generate(**generate_kwargs)
74
 
75
  resp = tokenizer.decode(outputs[0], skip_special_tokens=True, clean_up_tokenization_spaces=False)
 
 
76
 
77
+ yield resp[len(prompt):]
78
 
79
  CSS = """
80
  h1 {
 
106
  label="Target Lang",
107
  value="Spanish",
108
  )
 
 
 
 
 
 
 
109
  max_length = gr.Slider(
110
  label="Context Window",
111
  minimum=512,