freddyaboulton HF staff commited on
Commit
649f52e
·
verified ·
1 Parent(s): f840812

Upload folder using huggingface_hub

Browse files
Files changed (4) hide show
  1. README.md +1 -1
  2. requirements.txt +5 -8
  3. run.ipynb +1 -1
  4. run.py +18 -3
README.md CHANGED
@@ -5,7 +5,7 @@ emoji: 🔥
5
  colorFrom: indigo
6
  colorTo: indigo
7
  sdk: gradio
8
- sdk_version: 5.13.0
9
  app_file: run.py
10
  pinned: false
11
  hf_oauth: true
 
5
  colorFrom: indigo
6
  colorTo: indigo
7
  sdk: gradio
8
+ sdk_version: 5.13.1
9
  app_file: run.py
10
  pinned: false
11
  hf_oauth: true
requirements.txt CHANGED
@@ -1,8 +1,5 @@
1
- gradio-client @ git+https://github.com/gradio-app/gradio@36084466301c1c7242bb44ab2d5271d508cafa65#subdirectory=client/python
2
- https://gradio-pypi-previews.s3.amazonaws.com/36084466301c1c7242bb44ab2d5271d508cafa65/gradio-5.13.0-py3-none-any.whl
3
- diffusers
4
- transformers
5
- nvidia-ml-py3
6
- ftfy
7
- torch
8
- Pillow
 
1
+ gradio-client @ git+https://github.com/gradio-app/gradio@f40747c9fd12d160ac9f7b3c5273be6be815efac#subdirectory=client/python
2
+ https://gradio-pypi-previews.s3.amazonaws.com/f40747c9fd12d160ac9f7b3c5273be6be815efac/gradio-5.13.1-py3-none-any.whl
3
+ diffusers==0.32.2
4
+ torch==2.5.1
5
+
 
 
 
run.ipynb CHANGED
@@ -1 +1 @@
1
- {"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: stable-diffusion\n", "### Note: This is a simplified version of the code needed to create the Stable Diffusion demo. See full code here: https://hf.co/spaces/stabilityai/stable-diffusion/tree/main\n", " "]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio diffusers transformers nvidia-ml-py3 ftfy torch Pillow "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import torch\n", "from diffusers import StableDiffusionPipeline # type: ignore\n", "from PIL import Image\n", "import os\n", "\n", "auth_token = os.getenv(\"auth_token\")\n", "model_id = \"CompVis/stable-diffusion-v1-4\"\n", "device = \"cpu\"\n", "pipe = StableDiffusionPipeline.from_pretrained(\n", " model_id, use_auth_token=auth_token, revision=\"fp16\", torch_dtype=torch.float16\n", ")\n", "pipe = pipe.to(device)\n", "\n", "def infer(prompt, samples, steps, scale, seed):\n", " generator = torch.Generator(device=device).manual_seed(seed)\n", " images_list = pipe( # type: ignore\n", " [prompt] * samples,\n", " num_inference_steps=steps,\n", " guidance_scale=scale,\n", " generator=generator,\n", " )\n", " images = []\n", " safe_image = Image.open(r\"unsafe.png\")\n", " for i, image in enumerate(images_list[\"sample\"]): # type: ignore\n", " if images_list[\"nsfw_content_detected\"][i]: # type: ignore\n", " images.append(safe_image)\n", " else:\n", " images.append(image)\n", " return images\n", "\n", "block = gr.Blocks()\n", "\n", "with block:\n", " with gr.Group():\n", " with gr.Row():\n", " text = gr.Textbox(\n", " label=\"Enter your prompt\",\n", " max_lines=1,\n", " placeholder=\"Enter your prompt\",\n", " container=False,\n", " )\n", " btn = gr.Button(\"Generate image\")\n", " gallery = gr.Gallery(\n", " label=\"Generated images\",\n", " show_label=False,\n", " elem_id=\"gallery\",\n", " columns=[2],\n", " )\n", "\n", " advanced_button = gr.Button(\"Advanced options\", elem_id=\"advanced-btn\")\n", "\n", " with gr.Row(elem_id=\"advanced-options\"):\n", " samples = gr.Slider(label=\"Images\", minimum=1, maximum=4, value=4, step=1)\n", " steps = gr.Slider(label=\"Steps\", minimum=1, maximum=50, value=45, step=1)\n", " scale = gr.Slider(\n", " label=\"Guidance Scale\", minimum=0, maximum=50, value=7.5, step=0.1\n", " )\n", " seed = gr.Slider(\n", " label=\"Seed\",\n", " minimum=0,\n", " maximum=2147483647,\n", " step=1,\n", " randomize=True,\n", " )\n", " gr.on([text.submit, btn.click], infer, inputs=[text, samples, steps, scale, seed], outputs=gallery)\n", " advanced_button.click(\n", " None,\n", " [],\n", " text,\n", " )\n", "\n", "block.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
 
1
+ {"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: stable-diffusion\n", "### Note: This is a simplified version of the code needed to create the Stable Diffusion demo. See full code here: https://hf.co/spaces/stabilityai/stable-diffusion/tree/main\n", " "]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio diffusers==0.32.2 torch==2.5.1 "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import torch\n", "from diffusers import StableDiffusionPipeline # type: ignore\n", "from PIL import Image\n", "import os\n", "\n", "auth_token = os.getenv(\"HF_TOKEN\")\n", "if not auth_token:\n", " print(\n", " \"ERROR: No Hugging Face access token found.\\n\"\n", " \"Please define an environment variable 'auth_token' before running.\\n\"\n", " \"Example:\\n\"\n", " \" export HF_TOKEN=XXXXXXXX\\n\"\n", " )\n", "\n", "model_id = \"CompVis/stable-diffusion-v1-4\"\n", "device = \"cpu\"\n", "pipe = StableDiffusionPipeline.from_pretrained(\n", " model_id, token=auth_token, variant=\"fp16\", torch_dtype=torch.float16,\n", ")\n", "pipe = pipe.to(device)\n", "\n", "\n", "def infer(prompt, samples, steps, scale, seed):\n", " generator = torch.Generator(device=device).manual_seed(seed)\n", " images_list = pipe( # type: ignore\n", " [prompt] * samples,\n", " num_inference_steps=steps,\n", " guidance_scale=scale,\n", " generator=generator,\n", " )\n", " images = []\n", " safe_image = Image.open(r\"unsafe.png\")\n", " for i, image in enumerate(images_list[\"sample\"]): # type: ignore\n", " if images_list[\"nsfw_content_detected\"][i]: # type: ignore\n", " images.append(safe_image)\n", " else:\n", " images.append(image)\n", " return images\n", "\n", "\n", "block = gr.Blocks()\n", "\n", "with block:\n", " with gr.Group():\n", " with gr.Row():\n", " text = gr.Textbox(\n", " label=\"Enter your prompt\",\n", " max_lines=1,\n", " placeholder=\"Enter your prompt\",\n", " container=False,\n", " )\n", " btn = gr.Button(\"Generate image\")\n", " gallery = gr.Gallery(\n", " label=\"Generated images\",\n", " show_label=False,\n", " elem_id=\"gallery\",\n", " columns=[2],\n", " )\n", "\n", " advanced_button = gr.Button(\"Advanced options\", elem_id=\"advanced-btn\")\n", "\n", " with gr.Row(elem_id=\"advanced-options\"):\n", " samples = gr.Slider(label=\"Images\", minimum=1, maximum=4, value=4, step=1)\n", " steps = gr.Slider(label=\"Steps\", minimum=1, maximum=50, value=45, step=1)\n", " scale = gr.Slider(\n", " label=\"Guidance Scale\", minimum=0, maximum=50, value=7.5, step=0.1\n", " )\n", " seed = gr.Slider(\n", " label=\"Seed\",\n", " minimum=0,\n", " maximum=2147483647,\n", " step=1,\n", " randomize=True,\n", " )\n", " gr.on(\n", " [text.submit, btn.click],\n", " infer,\n", " inputs=[text, samples, steps, scale, seed],\n", " outputs=gallery,\n", " )\n", " advanced_button.click(\n", " None,\n", " [],\n", " text,\n", " )\n", "\n", "block.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
run.py CHANGED
@@ -4,14 +4,23 @@ from diffusers import StableDiffusionPipeline # type: ignore
4
  from PIL import Image
5
  import os
6
 
7
- auth_token = os.getenv("auth_token")
 
 
 
 
 
 
 
 
8
  model_id = "CompVis/stable-diffusion-v1-4"
9
  device = "cpu"
10
  pipe = StableDiffusionPipeline.from_pretrained(
11
- model_id, use_auth_token=auth_token, revision="fp16", torch_dtype=torch.float16
12
  )
13
  pipe = pipe.to(device)
14
 
 
15
  def infer(prompt, samples, steps, scale, seed):
16
  generator = torch.Generator(device=device).manual_seed(seed)
17
  images_list = pipe( # type: ignore
@@ -29,6 +38,7 @@ def infer(prompt, samples, steps, scale, seed):
29
  images.append(image)
30
  return images
31
 
 
32
  block = gr.Blocks()
33
 
34
  with block:
@@ -63,7 +73,12 @@ with block:
63
  step=1,
64
  randomize=True,
65
  )
66
- gr.on([text.submit, btn.click], infer, inputs=[text, samples, steps, scale, seed], outputs=gallery)
 
 
 
 
 
67
  advanced_button.click(
68
  None,
69
  [],
 
4
  from PIL import Image
5
  import os
6
 
7
+ auth_token = os.getenv("HF_TOKEN")
8
+ if not auth_token:
9
+ print(
10
+ "ERROR: No Hugging Face access token found.\n"
11
+ "Please define an environment variable 'auth_token' before running.\n"
12
+ "Example:\n"
13
+ " export HF_TOKEN=XXXXXXXX\n"
14
+ )
15
+
16
  model_id = "CompVis/stable-diffusion-v1-4"
17
  device = "cpu"
18
  pipe = StableDiffusionPipeline.from_pretrained(
19
+ model_id, token=auth_token, variant="fp16", torch_dtype=torch.float16,
20
  )
21
  pipe = pipe.to(device)
22
 
23
+
24
  def infer(prompt, samples, steps, scale, seed):
25
  generator = torch.Generator(device=device).manual_seed(seed)
26
  images_list = pipe( # type: ignore
 
38
  images.append(image)
39
  return images
40
 
41
+
42
  block = gr.Blocks()
43
 
44
  with block:
 
73
  step=1,
74
  randomize=True,
75
  )
76
+ gr.on(
77
+ [text.submit, btn.click],
78
+ infer,
79
+ inputs=[text, samples, steps, scale, seed],
80
+ outputs=gallery,
81
+ )
82
  advanced_button.click(
83
  None,
84
  [],