Lingo-IITGN commited on
Commit
528fe1a
·
verified ·
1 Parent(s): 7f2c1db

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -12
app.py CHANGED
@@ -5,23 +5,12 @@ from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
5
  tokenizer = AutoTokenizer.from_pretrained("LingoIITGN/ganga-1b")
6
  model = AutoModelForCausalLM.from_pretrained("LingoIITGN/ganga-1b")
7
 
8
- @spaces.GPU
9
  def greet(input_text):
10
  input_token = tokenizer.encode(input_text, return_tensors="pt")
11
  output = model.generate(input_token, max_new_tokens=100, num_return_sequences=1, do_sample=True, top_k=50, top_p=0.95, temperature=0.7)
12
  output_text = tokenizer.batch_decode(output)[0]
13
  return output_text
14
 
15
- # demo = gr.Interface(fn=greet, inputs=["text"], outputs=["text"],)
16
-
17
- # @spaces.GPU
18
- # def greet(input_text):
19
- # input_token = tokenizer.encode(input_text, return_tensors="pt").to("cpu")
20
-
21
- # output = model.generate(input_token, max_new_tokens=100, num_return_sequences=1, do_sample=True, top_k=50, top_p=0.95, temperature=0.7)
22
- # output_text = tokenizer.batch_decode(output)[0]
23
- # return output_text
24
-
25
-
26
  demo = gr.Interface(fn=greet, inputs=["text"], outputs=["text"],)
27
  demo.launch()
 
5
  tokenizer = AutoTokenizer.from_pretrained("LingoIITGN/ganga-1b")
6
  model = AutoModelForCausalLM.from_pretrained("LingoIITGN/ganga-1b")
7
 
8
+ @spaces.GPU(duration=120)
9
  def greet(input_text):
10
  input_token = tokenizer.encode(input_text, return_tensors="pt")
11
  output = model.generate(input_token, max_new_tokens=100, num_return_sequences=1, do_sample=True, top_k=50, top_p=0.95, temperature=0.7)
12
  output_text = tokenizer.batch_decode(output)[0]
13
  return output_text
14
 
 
 
 
 
 
 
 
 
 
 
 
15
  demo = gr.Interface(fn=greet, inputs=["text"], outputs=["text"],)
16
  demo.launch()