multimodalart HF staff commited on
Commit
695475e
1 Parent(s): 337c060

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -1
app.py CHANGED
@@ -12,8 +12,9 @@ from previewer.modules import Previewer
12
  os.environ['TOKENIZERS_PARALLELISM'] = 'false'
13
 
14
  DESCRIPTION = "# Würstchen"
 
15
  if not torch.cuda.is_available():
16
- DESCRIPTION += "\n<p>Running on CPU 🥶 This demo does not work on CPU.</p>"
17
 
18
  MAX_SEED = np.iinfo(np.int32).max
19
  CACHE_EXAMPLES = torch.cuda.is_available() and os.getenv("CACHE_EXAMPLES") == "1"
 
12
  os.environ['TOKENIZERS_PARALLELISM'] = 'false'
13
 
14
  DESCRIPTION = "# Würstchen"
15
+ DESCRIPTION += "\n<p style=\"text-align: center\"><a href='https://huggingface.co/warp-ai/wuerstchen' target='_blank'>Würstchen</a> is a new fast and efficient high resolution text-to-image architecture and model</p>"
16
  if not torch.cuda.is_available():
17
+ DESCRIPTION += "\n<p>Running on CPU 🥶</p>"
18
 
19
  MAX_SEED = np.iinfo(np.int32).max
20
  CACHE_EXAMPLES = torch.cuda.is_available() and os.getenv("CACHE_EXAMPLES") == "1"