File size: 3,797 Bytes
649f52e
1
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: stable-diffusion\n", "### Note: This is a simplified version of the code needed to create the Stable Diffusion demo. See full code here: https://hf.co/spaces/stabilityai/stable-diffusion/tree/main\n", "        "]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio diffusers==0.32.2 torch==2.5.1  "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import torch\n", "from diffusers import StableDiffusionPipeline  # type: ignore\n", "from PIL import Image\n", "import os\n", "\n", "auth_token = os.getenv(\"HF_TOKEN\")\n", "if not auth_token:\n", "    print(\n", "        \"ERROR: No Hugging Face access token found.\\n\"\n", "        \"Please define an environment variable 'auth_token' before running.\\n\"\n", "        \"Example:\\n\"\n", "        \"  export HF_TOKEN=XXXXXXXX\\n\"\n", "    )\n", "\n", "model_id = \"CompVis/stable-diffusion-v1-4\"\n", "device = \"cpu\"\n", "pipe = StableDiffusionPipeline.from_pretrained(\n", "    model_id, token=auth_token, variant=\"fp16\", torch_dtype=torch.float16,\n", ")\n", "pipe = pipe.to(device)\n", "\n", "\n", "def infer(prompt, samples, steps, scale, seed):\n", "    generator = torch.Generator(device=device).manual_seed(seed)\n", "    images_list = pipe(  # type: ignore\n", "        [prompt] * samples,\n", "        num_inference_steps=steps,\n", "        guidance_scale=scale,\n", "        generator=generator,\n", "    )\n", "    images = []\n", "    safe_image = Image.open(r\"unsafe.png\")\n", "    for i, image in enumerate(images_list[\"sample\"]):  # type: ignore\n", "        if images_list[\"nsfw_content_detected\"][i]:  # type: ignore\n", "            images.append(safe_image)\n", "        else:\n", "            images.append(image)\n", "    return images\n", "\n", "\n", "block = gr.Blocks()\n", "\n", "with block:\n", "    with gr.Group():\n", "        with gr.Row():\n", "            text = gr.Textbox(\n", "                label=\"Enter your prompt\",\n", "                max_lines=1,\n", "                placeholder=\"Enter your prompt\",\n", "                container=False,\n", "            )\n", "            btn = gr.Button(\"Generate image\")\n", "        gallery = gr.Gallery(\n", "            label=\"Generated images\",\n", "            show_label=False,\n", "            elem_id=\"gallery\",\n", "            columns=[2],\n", "        )\n", "\n", "        advanced_button = gr.Button(\"Advanced options\", elem_id=\"advanced-btn\")\n", "\n", "        with gr.Row(elem_id=\"advanced-options\"):\n", "            samples = gr.Slider(label=\"Images\", minimum=1, maximum=4, value=4, step=1)\n", "            steps = gr.Slider(label=\"Steps\", minimum=1, maximum=50, value=45, step=1)\n", "            scale = gr.Slider(\n", "                label=\"Guidance Scale\", minimum=0, maximum=50, value=7.5, step=0.1\n", "            )\n", "            seed = gr.Slider(\n", "                label=\"Seed\",\n", "                minimum=0,\n", "                maximum=2147483647,\n", "                step=1,\n", "                randomize=True,\n", "            )\n", "        gr.on(\n", "            [text.submit, btn.click],\n", "            infer,\n", "            inputs=[text, samples, steps, scale, seed],\n", "            outputs=gallery,\n", "        )\n", "        advanced_button.click(\n", "            None,\n", "            [],\n", "            text,\n", "        )\n", "\n", "block.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}