benjamin-paine commited on
Commit
754da99
Β·
verified Β·
1 Parent(s): 67bfdbf

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +19 -7
app.py CHANGED
@@ -3,25 +3,37 @@ import spaces
3
  import torch
4
  import torch.amp as amp
5
 
6
- from transformers import pipeline
7
 
8
- upsampler = pipeline(
9
- "text-generation",
10
- "appmana/Cosmos-1.0-Prompt-Upsampler-12B-Text2World-hf",
 
 
 
 
 
11
  torch_dtype=torch.bfloat16
12
  )
13
 
14
  @spaces.GPU
15
  def upsample(prompt):
16
- return upsampler(
17
  [{"role": "user", "content": f"Upsample the short caption to a long caption: {prompt}"}],
 
 
 
 
 
18
  min_p=0.01,
19
  top_p=0.95,
20
  top_k=40,
 
21
  temperature=0.2,
22
  max_new_tokens=512,
23
- do_sample=True
24
- )[0]["generated_text"][-1]["content"]
 
25
 
26
  demo = gr.Interface(
27
  title="NVIDIA Cosmos 🌌 Prompt Upsampler",
 
3
  import torch
4
  import torch.amp as amp
5
 
6
+ from transformers import AutoModel, AutoTokenizer, pipeline
7
 
8
+ repo_id = "appmana/Cosmos-1.0-Prompt-Upsampler-12B-Text2World-hf"
9
+
10
+ model = AutoModel.from_pretrained(repo_id, torch_dtype=torch.bfloat16)
11
+ tokenizer = AutoTokenizer.from_pretrained(repo_id)
12
+ pipe = pipeline(
13
+ "text-generation",
14
+ model=model,
15
+ tokenizer=tokenizer,
16
  torch_dtype=torch.bfloat16
17
  )
18
 
19
  @spaces.GPU
20
  def upsample(prompt):
21
+ template = tokenizer.apply_chat_template(
22
  [{"role": "user", "content": f"Upsample the short caption to a long caption: {prompt}"}],
23
+ tokenize=False,
24
+ add_generation_prompt=True
25
+ )
26
+ response = pipe(
27
+ template,
28
  min_p=0.01,
29
  top_p=0.95,
30
  top_k=40,
31
+ do_sample=True,
32
  temperature=0.2,
33
  max_new_tokens=512,
34
+ pad_token_id=tokenizer.eos_token_id
35
+ )
36
+ return response[0]["generated_text"]
37
 
38
  demo = gr.Interface(
39
  title="NVIDIA Cosmos 🌌 Prompt Upsampler",