{ "cells": [ { "cell_type": "code", "execution_count": 2, "metadata": {}, "outputs": [], "source": [ "import os\n", "os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"4\" \n", "os.environ[\"WORLD_SIZE\"] = \"1\"" ] }, { "cell_type": "code", "execution_count": 3, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "/fsx/homes/afruchtman/.envs/ms_env/lib/python3.10/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n", " from .autonotebook import tqdm as notebook_tqdm\n", "2023-10-17 14:09:13.213394: I tensorflow/core/platform/cpu_feature_guard.cc:182] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations.\n", "To enable the following instructions: AVX2 AVX512F FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags.\n", "2023-10-17 14:09:18.049981: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT\n" ] } ], "source": [ "import time\n", "import torch\n", "import gradio as gr\n", "from diffusers import StableDiffusionPipeline, AutoencoderKL, ControlNetModel, StableDiffusionControlNetPipeline\n", "from diffusers import DDPMScheduler, DEISMultistepScheduler, EulerAncestralDiscreteScheduler, DDIMScheduler, EulerDiscreteScheduler, HeunDiscreteScheduler\n", "\n", "from PIL import Image\n", "\n", "from ip_adapter import IPAdapterPlus, IPAdapter" ] }, { "cell_type": "code", "execution_count": 4, "metadata": {}, "outputs": [], "source": [ "# def image_grid(imgs, rows, cols):\n", "# assert len(imgs) == rows*cols\n", "\n", "# w, h = imgs[0].size\n", "# grid = Image.new('RGB', size=(cols*w, rows*h))\n", "# grid_w, grid_h = grid.size\n", " \n", "# for i, img in enumerate(imgs):\n", "# grid.paste(img, box=(i%cols*w, i//cols*h))\n", "# return grid\n", "\n", "# def init_pipe(base_model, ip_ckpt_path, vae_model_path, image_encoder_path, device, noise_sampler):\n", "# base_model = str(base_model)\n", "# noise_sampler = noise_sampler.from_pretrained(base_model, subfolder=\"scheduler\")\n", "# print(f\"{noise_sampler._class_name} was successfully loaded\")\n", "# vae = AutoencoderKL.from_pretrained(str(vae_model_path)).to(dtype=torch.float16)\n", "# torch.cuda.empty_cache()\n", "# pipe = StableDiffusionPipeline.from_pretrained(\n", "# base_model,\n", "# torch_dtype=torch.float16,\n", "# scheduler=noise_sampler,\n", "# vae=vae,\n", "# feature_extractor=None,\n", "# safety_checker=None\n", "# )\n", "# # load ip-adapter\n", "# ip_model = IPAdapterPlus(pipe, image_encoder_path, str(ip_ckpt_path), device, num_tokens=16)\n", "# print(f\"{base_model} was successfully loaded\")\n", "\n", "# return ip_model\n" ] }, { "cell_type": "code", "execution_count": 5, "metadata": {}, "outputs": [], "source": [ "# def init_pipe(base_model, ip_ckpt_path, vae_model_path, image_encoder_path, device, noise_sampler):\n", "# base_model = str(base_model)\n", "# noise_sampler = noise_sampler.from_pretrained(base_model, subfolder=\"scheduler\")\n", "# print(f\"{noise_sampler._class_name} was successfully loaded\")\n", "# vae = AutoencoderKL.from_pretrained(str(vae_model_path)).to(dtype=torch.float16)\n", "# torch.cuda.empty_cache()\n", "# pipe = StableDiffusionPipeline.from_pretrained(\n", "# base_model,\n", "# torch_dtype=torch.float16,\n", "# scheduler=noise_sampler,\n", "# vae=vae,\n", "# feature_extractor=None,\n", "# safety_checker=None\n", "# )\n", "# # load ip-adapter\n", "# ip_model = IPAdapterPlus(pipe, image_encoder_path, str(ip_ckpt_path), device, num_tokens=16)\n", "# print(f\"{base_model} was successfully loaded\")\n", "\n", "# return ip_model\n", "\n", "def init_pipe(base_model, ip_ckpt_path, vae_model_path, image_encoder_path, device, noise_sampler, controlnet_model_path=None):\n", " torch.cuda.empty_cache()\n", " base_model = str(base_model)\n", " controlnet = ControlNetModel.from_pretrained(controlnet_model_path, torch_dtype=torch.float16) if controlnet_model_path is not None else None\n", " sd_pipeline = StableDiffusionControlNetPipeline if controlnet else StableDiffusionPipeline\n", " noise_sampler = noise_sampler.from_pretrained(base_model, subfolder=\"scheduler\")\n", " \n", " print(f\"{noise_sampler._class_name} was successfully loaded\")\n", "\n", "\n", " vae = AutoencoderKL.from_pretrained(str(vae_model_path)).to(dtype=torch.float16)\n", "\n", " args = {\n", " \"pretrained_model_name_or_path\": base_model,\n", " \"torch_dtype\": torch.float16,\n", " \"scheduler\": noise_sampler,\n", " \"vae\": vae,\n", " \"feature_extractor\": None,\n", " \"safety_checker\": None,\n", " }\n", "\n", " if controlnet:\n", " args[\"controlnet\"] = controlnet\n", " \n", " pipe = sd_pipeline.from_pretrained(**args)\n", " print(pipe)\n", " ip_adapt_cls = IPAdapterPlus if \"plus\" in ip_ckpt_path else IPAdapter\n", " num_tokens = 16 if \"plus\" in ip_ckpt_path else 4\n", " print(ip_adapt_cls)\n", " ip_model = ip_adapt_cls(pipe, image_encoder_path, str(ip_ckpt_path), device, num_tokens=num_tokens)\n", " print(f\"{base_model} was successfully loaded\")\n", "\n", " return ip_model\n" ] }, { "cell_type": "code", "execution_count": 6, "metadata": {}, "outputs": [], "source": [ "import numpy as np\n", "\n", "def generate_image(ip_pipe, prompt, negative_prompt, pil_image, image=None, num_samples=4, height=512, width=512,\n", " num_inference_steps=20, seed=42, scale=1.0, guidance_scale=7.5):\n", " torch.cuda.empty_cache()\n", " print(image)\n", " args = {\n", " \"prompt\":prompt, \n", " \"negative_prompt\":negative_prompt, \n", " \"pil_image\":pil_image, \n", " \"num_samples\":int(num_samples), \n", " \"height\":int(height), \n", " \"width\":int(width),\n", " \"num_inference_steps\":int(num_inference_steps), \n", " \"seed\":int(seed), \n", " \"scale\":scale, \n", " \"guidance_scale\":guidance_scale\n", " }\n", "\n", " if image is not None:\n", " print(image.size)\n", " args[\"image\"] = image # [..., np.newaxis] #.transpose(1,2,0)[np.newaxis, ...] \n", " images = ip_pipe.generate(**args)\n", "\n", " return images " ] }, { "cell_type": "code", "execution_count": 7, "metadata": {}, "outputs": [], "source": [ "ip_dir = 'models/ip_adapters'\n", "base_model_choices = [\n", " 'Lykon/dreamshaper-8', 'runwayml/stable-diffusion-v1-5', \n", " 'dreamlike-art/dreamlike-anime-1.0','Lykon/AbsoluteReality',\n", " 'SG161222/Realistic_Vision_V5.1_noVAE'\n", "]\n", "\n", "noise_sampler_dict = {\n", " \"DDPM\" : DDPMScheduler,\n", " \"DEISMultiStep\" : DEISMultistepScheduler,\n", " \"DDIM\" : DDIMScheduler,\n", " \"Euler a\" : EulerAncestralDiscreteScheduler,\n", " \"Euler\" : EulerDiscreteScheduler,\n", " \"Heun\" : HeunDiscreteScheduler,\n", "}\n", "\n", "sampler_choices = noise_sampler_dict.keys()\n", "\n", "ip_choices = [os.path.join(ip_dir, f) for f in sorted(os.listdir(ip_dir)) if f]\n", "\n", "prompt_examples = [\n", " [\"A portrait of pretty person, smile, holding one cute dragon, as a beautiful fairy with glittering wings, decorations from outlandish stones, wearing a turtleneck decorated with glitter, nice even light, lofi, in a magical forest, digital art, trending on artstation, behance, deviantart, insanely detailed and intricate, many patterns\",\n", " \"dogs, wrinkled old face, cat, classic outfit, classic turtleneck, neckline, big breasts, enlarged breasts, big boobs, legs, closed eyes, deformed, distorted, disfigured, poorly drawn, bad anatomy, wrong anatomy, extra limb, missing limb, floating limbs, mutated hands and fingers, disconnected limbs, mutation, mutated, nudes, nude breasts, ugly, disgusting, blurry, amputation, unclear, bindi, pottu\"],\n", "\n", " [\"A person, high resolution, best quality, sharp focus, 8k, highly detailed, digital art, digital painting, trending art, smooth skin, detailed facial skin, symmetric\",\n", " \"blurry, unclear, low resolution, bad quality, nsfw, nudes, nude breasts, nude, shirtless, deformed iris, deformed pupils, mutated hands and fingers, nude, naked, deformed, distorted, disfigured, poorly drawn, bad anatomy, wrong anatomy, extra limb, missing limb, floating limbs, disconnected limbs, mutation, mutated, ugly, disgusting, amputation, closed eyes, big breasts, sign, mutated hands and fingers, bindi, pottu,\"],\n", " [\"high res\",\" low res\"],\n", "\n", "]" ] }, { "cell_type": "code", "execution_count": 9, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Running on local URL: http://127.0.0.1:7861\n", "Running on public URL: https://30456c2767331a4c45.gradio.live\n", "\n", "This share link expires in 72 hours. For free permanent hosting and GPU upgrades, run `gradio deploy` from Terminal to deploy to Spaces (https://huggingface.co/spaces)\n" ] }, { "data": { "text/html": [ "
" ], "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "text/plain": [] }, "execution_count": 9, "metadata": {}, "output_type": "execute_result" }, { "name": "stdout", "output_type": "stream", "text": [ "Base model: dreamlike-art/dreamlike-anime-1.0\n", "IP ckpt: models/ip_adapters/ip-adapter-plus-face_sd15.bin\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "/fsx/homes/afruchtman/.envs/ms_env/lib/python3.10/site-packages/diffusers/configuration_utils.py:134: FutureWarning: Accessing config attribute `_class_name` directly via 'DDIMScheduler' object attribute is deprecated. Please access '_class_name' over 'DDIMScheduler's config object instead, e.g. 'scheduler.config._class_name'.\n", " deprecate(\"direct config name access\", \"1.0.0\", deprecation_message, standard_warn=False)\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "DDIMScheduler was successfully loaded\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "Loading pipeline components...: 100%|██████████| 5/5 [00:00<00:00, 6.50it/s]\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "StableDiffusionControlNetPipeline {\n", " \"_class_name\": \"StableDiffusionControlNetPipeline\",\n", " \"_diffusers_version\": \"0.21.2\",\n", " \"_name_or_path\": \"dreamlike-art/dreamlike-anime-1.0\",\n", " \"controlnet\": [\n", " \"diffusers\",\n", " \"ControlNetModel\"\n", " ],\n", " \"feature_extractor\": [\n", " null,\n", " null\n", " ],\n", " \"requires_safety_checker\": null,\n", " \"safety_checker\": [\n", " null,\n", " null\n", " ],\n", " \"scheduler\": [\n", " \"diffusers\",\n", " \"DDIMScheduler\"\n", " ],\n", " \"text_encoder\": [\n", " \"transformers\",\n", " \"CLIPTextModel\"\n", " ],\n", " \"tokenizer\": [\n", " \"transformers\",\n", " \"CLIPTokenizer\"\n", " ],\n", " \"unet\": [\n", " \"diffusers\",\n", " \"UNet2DConditionModel\"\n", " ],\n", " \"vae\": [\n", " \"diffusers\",\n", " \"AutoencoderKL\"\n", " ]\n", "}\n", "\n", "\n", "dreamlike-art/dreamlike-anime-1.0 was successfully loaded\n", "\n", "(512, 768)\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "/fsx/homes/afruchtman/.envs/ms_env/lib/python3.10/site-packages/diffusers/pipelines/controlnet/pipeline_controlnet.py:226: FutureWarning: `_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple.\n", " deprecate(\"_encode_prompt()\", \"1.0.0\", deprecation_message, standard_warn=False)\n", "100%|██████████| 20/20 [00:02<00:00, 8.84it/s]\n", "/fsx/homes/afruchtman/.envs/ms_env/lib/python3.10/site-packages/diffusers/pipelines/controlnet/pipeline_controlnet.py:226: FutureWarning: `_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple.\n", " deprecate(\"_encode_prompt()\", \"1.0.0\", deprecation_message, standard_warn=False)\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "\n", "(512, 768)\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "100%|██████████| 20/20 [00:03<00:00, 5.74it/s]\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Base model: dreamlike-art/dreamlike-anime-1.0\n", "IP ckpt: models/ip_adapters/ip-adapter-plus-face_sd15.bin\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "/fsx/homes/afruchtman/.envs/ms_env/lib/python3.10/site-packages/diffusers/configuration_utils.py:134: FutureWarning: Accessing config attribute `_class_name` directly via 'DDIMScheduler' object attribute is deprecated. Please access '_class_name' over 'DDIMScheduler's config object instead, e.g. 'scheduler.config._class_name'.\n", " deprecate(\"direct config name access\", \"1.0.0\", deprecation_message, standard_warn=False)\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "DDIMScheduler was successfully loaded\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "Loading pipeline components...: 100%|██████████| 5/5 [00:01<00:00, 3.15it/s]\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "StableDiffusionPipeline {\n", " \"_class_name\": \"StableDiffusionPipeline\",\n", " \"_diffusers_version\": \"0.21.2\",\n", " \"_name_or_path\": \"dreamlike-art/dreamlike-anime-1.0\",\n", " \"feature_extractor\": [\n", " null,\n", " null\n", " ],\n", " \"requires_safety_checker\": null,\n", " \"safety_checker\": [\n", " null,\n", " null\n", " ],\n", " \"scheduler\": [\n", " \"diffusers\",\n", " \"DDIMScheduler\"\n", " ],\n", " \"text_encoder\": [\n", " \"transformers\",\n", " \"CLIPTextModel\"\n", " ],\n", " \"tokenizer\": [\n", " \"transformers\",\n", " \"CLIPTokenizer\"\n", " ],\n", " \"unet\": [\n", " \"diffusers\",\n", " \"UNet2DConditionModel\"\n", " ],\n", " \"vae\": [\n", " \"diffusers\",\n", " \"AutoencoderKL\"\n", " ]\n", "}\n", "\n", "\n", "dreamlike-art/dreamlike-anime-1.0 was successfully loaded\n", "None\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "Traceback (most recent call last):\n", " File \"/fsx/homes/afruchtman/.envs/ms_env/lib/python3.10/site-packages/gradio/routes.py\", line 439, in run_predict\n", " output = await app.get_blocks().process_api(\n", " File \"/fsx/homes/afruchtman/.envs/ms_env/lib/python3.10/site-packages/gradio/blocks.py\", line 1389, in process_api\n", " result = await self.call_function(\n", " File \"/fsx/homes/afruchtman/.envs/ms_env/lib/python3.10/site-packages/gradio/blocks.py\", line 1094, in call_function\n", " prediction = await anyio.to_thread.run_sync(\n", " File \"/fsx/homes/afruchtman/.envs/ms_env/lib/python3.10/site-packages/anyio/to_thread.py\", line 33, in run_sync\n", " return await get_asynclib().run_sync_in_worker_thread(\n", " File \"/fsx/homes/afruchtman/.envs/ms_env/lib/python3.10/site-packages/anyio/_backends/_asyncio.py\", line 877, in run_sync_in_worker_thread\n", " return await future\n", " File \"/fsx/homes/afruchtman/.envs/ms_env/lib/python3.10/site-packages/anyio/_backends/_asyncio.py\", line 807, in run\n", " result = context.run(func, *args)\n", " File \"/fsx/homes/afruchtman/.envs/ms_env/lib/python3.10/site-packages/gradio/utils.py\", line 704, in wrapper\n", " response = f(*args, **kwargs)\n", " File \"/tmp/ipykernel_82457/348967860.py\", line 23, in generate_image\n", " images = ip_pipe.generate(**args)\n", "AttributeError: 'NoneType' object has no attribute 'generate'\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "None\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "Traceback (most recent call last):\n", " File \"/fsx/homes/afruchtman/.envs/ms_env/lib/python3.10/site-packages/gradio/routes.py\", line 439, in run_predict\n", " output = await app.get_blocks().process_api(\n", " File \"/fsx/homes/afruchtman/.envs/ms_env/lib/python3.10/site-packages/gradio/blocks.py\", line 1389, in process_api\n", " result = await self.call_function(\n", " File \"/fsx/homes/afruchtman/.envs/ms_env/lib/python3.10/site-packages/gradio/blocks.py\", line 1094, in call_function\n", " prediction = await anyio.to_thread.run_sync(\n", " File \"/fsx/homes/afruchtman/.envs/ms_env/lib/python3.10/site-packages/anyio/to_thread.py\", line 33, in run_sync\n", " return await get_asynclib().run_sync_in_worker_thread(\n", " File \"/fsx/homes/afruchtman/.envs/ms_env/lib/python3.10/site-packages/anyio/_backends/_asyncio.py\", line 877, in run_sync_in_worker_thread\n", " return await future\n", " File \"/fsx/homes/afruchtman/.envs/ms_env/lib/python3.10/site-packages/anyio/_backends/_asyncio.py\", line 807, in run\n", " result = context.run(func, *args)\n", " File \"/fsx/homes/afruchtman/.envs/ms_env/lib/python3.10/site-packages/gradio/utils.py\", line 704, in wrapper\n", " response = f(*args, **kwargs)\n", " File \"/tmp/ipykernel_82457/348967860.py\", line 23, in generate_image\n", " images = ip_pipe.generate(**args)\n", "AttributeError: 'NoneType' object has no attribute 'generate'\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "None\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "Traceback (most recent call last):\n", " File \"/fsx/homes/afruchtman/.envs/ms_env/lib/python3.10/site-packages/gradio/routes.py\", line 439, in run_predict\n", " output = await app.get_blocks().process_api(\n", " File \"/fsx/homes/afruchtman/.envs/ms_env/lib/python3.10/site-packages/gradio/blocks.py\", line 1389, in process_api\n", " result = await self.call_function(\n", " File \"/fsx/homes/afruchtman/.envs/ms_env/lib/python3.10/site-packages/gradio/blocks.py\", line 1094, in call_function\n", " prediction = await anyio.to_thread.run_sync(\n", " File \"/fsx/homes/afruchtman/.envs/ms_env/lib/python3.10/site-packages/anyio/to_thread.py\", line 33, in run_sync\n", " return await get_asynclib().run_sync_in_worker_thread(\n", " File \"/fsx/homes/afruchtman/.envs/ms_env/lib/python3.10/site-packages/anyio/_backends/_asyncio.py\", line 877, in run_sync_in_worker_thread\n", " return await future\n", " File \"/fsx/homes/afruchtman/.envs/ms_env/lib/python3.10/site-packages/anyio/_backends/_asyncio.py\", line 807, in run\n", " result = context.run(func, *args)\n", " File \"/fsx/homes/afruchtman/.envs/ms_env/lib/python3.10/site-packages/gradio/utils.py\", line 704, in wrapper\n", " response = f(*args, **kwargs)\n", " File \"/tmp/ipykernel_82457/348967860.py\", line 23, in generate_image\n", " images = ip_pipe.generate(**args)\n", "AttributeError: 'NoneType' object has no attribute 'generate'\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Base model: dreamlike-art/dreamlike-anime-1.0\n", "IP ckpt: models/ip_adapters/ip-adapter-plus-face_sd15.bin\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "/fsx/homes/afruchtman/.envs/ms_env/lib/python3.10/site-packages/diffusers/configuration_utils.py:134: FutureWarning: Accessing config attribute `_class_name` directly via 'DDIMScheduler' object attribute is deprecated. Please access '_class_name' over 'DDIMScheduler's config object instead, e.g. 'scheduler.config._class_name'.\n", " deprecate(\"direct config name access\", \"1.0.0\", deprecation_message, standard_warn=False)\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "DDIMScheduler was successfully loaded\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "Loading pipeline components...: 100%|██████████| 5/5 [00:01<00:00, 3.00it/s]\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "StableDiffusionPipeline {\n", " \"_class_name\": \"StableDiffusionPipeline\",\n", " \"_diffusers_version\": \"0.21.2\",\n", " \"_name_or_path\": \"dreamlike-art/dreamlike-anime-1.0\",\n", " \"feature_extractor\": [\n", " null,\n", " null\n", " ],\n", " \"requires_safety_checker\": null,\n", " \"safety_checker\": [\n", " null,\n", " null\n", " ],\n", " \"scheduler\": [\n", " \"diffusers\",\n", " \"DDIMScheduler\"\n", " ],\n", " \"text_encoder\": [\n", " \"transformers\",\n", " \"CLIPTextModel\"\n", " ],\n", " \"tokenizer\": [\n", " \"transformers\",\n", " \"CLIPTokenizer\"\n", " ],\n", " \"unet\": [\n", " \"diffusers\",\n", " \"UNet2DConditionModel\"\n", " ],\n", " \"vae\": [\n", " \"diffusers\",\n", " \"AutoencoderKL\"\n", " ]\n", "}\n", "\n", "\n", "dreamlike-art/dreamlike-anime-1.0 was successfully loaded\n", "None\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "/fsx/homes/afruchtman/.envs/ms_env/lib/python3.10/site-packages/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py:237: FutureWarning: `_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple.\n", " deprecate(\"_encode_prompt()\", \"1.0.0\", deprecation_message, standard_warn=False)\n", "100%|██████████| 20/20 [00:01<00:00, 11.87it/s]\n", "/fsx/homes/afruchtman/.envs/ms_env/lib/python3.10/site-packages/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py:237: FutureWarning: `_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple.\n", " deprecate(\"_encode_prompt()\", \"1.0.0\", deprecation_message, standard_warn=False)\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "None\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "100%|██████████| 20/20 [00:01<00:00, 12.96it/s]\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "\n", "(726, 911)\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "/fsx/homes/afruchtman/.envs/ms_env/lib/python3.10/site-packages/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py:237: FutureWarning: `_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple.\n", " deprecate(\"_encode_prompt()\", \"1.0.0\", deprecation_message, standard_warn=False)\n", "Traceback (most recent call last):\n", " File \"/fsx/homes/afruchtman/.envs/ms_env/lib/python3.10/site-packages/gradio/routes.py\", line 439, in run_predict\n", " output = await app.get_blocks().process_api(\n", " File \"/fsx/homes/afruchtman/.envs/ms_env/lib/python3.10/site-packages/gradio/blocks.py\", line 1389, in process_api\n", " result = await self.call_function(\n", " File \"/fsx/homes/afruchtman/.envs/ms_env/lib/python3.10/site-packages/gradio/blocks.py\", line 1094, in call_function\n", " prediction = await anyio.to_thread.run_sync(\n", " File \"/fsx/homes/afruchtman/.envs/ms_env/lib/python3.10/site-packages/anyio/to_thread.py\", line 33, in run_sync\n", " return await get_asynclib().run_sync_in_worker_thread(\n", " File \"/fsx/homes/afruchtman/.envs/ms_env/lib/python3.10/site-packages/anyio/_backends/_asyncio.py\", line 877, in run_sync_in_worker_thread\n", " return await future\n", " File \"/fsx/homes/afruchtman/.envs/ms_env/lib/python3.10/site-packages/anyio/_backends/_asyncio.py\", line 807, in run\n", " result = context.run(func, *args)\n", " File \"/fsx/homes/afruchtman/.envs/ms_env/lib/python3.10/site-packages/gradio/utils.py\", line 704, in wrapper\n", " response = f(*args, **kwargs)\n", " File \"/tmp/ipykernel_82457/348967860.py\", line 23, in generate_image\n", " images = ip_pipe.generate(**args)\n", " File \"/fsx/homes/mshuvi/IP-Adapter/ip_adapter/ip_adapter.py\", line 177, in generate\n", " images = self.pipe(\n", " File \"/fsx/homes/afruchtman/.envs/ms_env/lib/python3.10/site-packages/torch/utils/_contextlib.py\", line 115, in decorate_context\n", " return func(*args, **kwargs)\n", "TypeError: StableDiffusionPipeline.__call__() got an unexpected keyword argument 'image'\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "\n", "(726, 911)\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "/fsx/homes/afruchtman/.envs/ms_env/lib/python3.10/site-packages/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py:237: FutureWarning: `_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple.\n", " deprecate(\"_encode_prompt()\", \"1.0.0\", deprecation_message, standard_warn=False)\n", "Traceback (most recent call last):\n", " File \"/fsx/homes/afruchtman/.envs/ms_env/lib/python3.10/site-packages/gradio/routes.py\", line 439, in run_predict\n", " output = await app.get_blocks().process_api(\n", " File \"/fsx/homes/afruchtman/.envs/ms_env/lib/python3.10/site-packages/gradio/blocks.py\", line 1389, in process_api\n", " result = await self.call_function(\n", " File \"/fsx/homes/afruchtman/.envs/ms_env/lib/python3.10/site-packages/gradio/blocks.py\", line 1094, in call_function\n", " prediction = await anyio.to_thread.run_sync(\n", " File \"/fsx/homes/afruchtman/.envs/ms_env/lib/python3.10/site-packages/anyio/to_thread.py\", line 33, in run_sync\n", " return await get_asynclib().run_sync_in_worker_thread(\n", " File \"/fsx/homes/afruchtman/.envs/ms_env/lib/python3.10/site-packages/anyio/_backends/_asyncio.py\", line 877, in run_sync_in_worker_thread\n", " return await future\n", " File \"/fsx/homes/afruchtman/.envs/ms_env/lib/python3.10/site-packages/anyio/_backends/_asyncio.py\", line 807, in run\n", " result = context.run(func, *args)\n", " File \"/fsx/homes/afruchtman/.envs/ms_env/lib/python3.10/site-packages/gradio/utils.py\", line 704, in wrapper\n", " response = f(*args, **kwargs)\n", " File \"/tmp/ipykernel_82457/348967860.py\", line 23, in generate_image\n", " images = ip_pipe.generate(**args)\n", " File \"/fsx/homes/mshuvi/IP-Adapter/ip_adapter/ip_adapter.py\", line 177, in generate\n", " images = self.pipe(\n", " File \"/fsx/homes/afruchtman/.envs/ms_env/lib/python3.10/site-packages/torch/utils/_contextlib.py\", line 115, in decorate_context\n", " return func(*args, **kwargs)\n", "TypeError: StableDiffusionPipeline.__call__() got an unexpected keyword argument 'image'\n", "/fsx/homes/afruchtman/.envs/ms_env/lib/python3.10/site-packages/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py:237: FutureWarning: `_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple.\n", " deprecate(\"_encode_prompt()\", \"1.0.0\", deprecation_message, standard_warn=False)\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "None\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "100%|██████████| 20/20 [00:01<00:00, 12.95it/s]\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "\n", "(3088, 2320)\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "/fsx/homes/afruchtman/.envs/ms_env/lib/python3.10/site-packages/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py:237: FutureWarning: `_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple.\n", " deprecate(\"_encode_prompt()\", \"1.0.0\", deprecation_message, standard_warn=False)\n", "Traceback (most recent call last):\n", " File \"/fsx/homes/afruchtman/.envs/ms_env/lib/python3.10/site-packages/gradio/routes.py\", line 439, in run_predict\n", " output = await app.get_blocks().process_api(\n", " File \"/fsx/homes/afruchtman/.envs/ms_env/lib/python3.10/site-packages/gradio/blocks.py\", line 1389, in process_api\n", " result = await self.call_function(\n", " File \"/fsx/homes/afruchtman/.envs/ms_env/lib/python3.10/site-packages/gradio/blocks.py\", line 1094, in call_function\n", " prediction = await anyio.to_thread.run_sync(\n", " File \"/fsx/homes/afruchtman/.envs/ms_env/lib/python3.10/site-packages/anyio/to_thread.py\", line 33, in run_sync\n", " return await get_asynclib().run_sync_in_worker_thread(\n", " File \"/fsx/homes/afruchtman/.envs/ms_env/lib/python3.10/site-packages/anyio/_backends/_asyncio.py\", line 877, in run_sync_in_worker_thread\n", " return await future\n", " File \"/fsx/homes/afruchtman/.envs/ms_env/lib/python3.10/site-packages/anyio/_backends/_asyncio.py\", line 807, in run\n", " result = context.run(func, *args)\n", " File \"/fsx/homes/afruchtman/.envs/ms_env/lib/python3.10/site-packages/gradio/utils.py\", line 704, in wrapper\n", " response = f(*args, **kwargs)\n", " File \"/tmp/ipykernel_82457/348967860.py\", line 23, in generate_image\n", " images = ip_pipe.generate(**args)\n", " File \"/fsx/homes/mshuvi/IP-Adapter/ip_adapter/ip_adapter.py\", line 177, in generate\n", " images = self.pipe(\n", " File \"/fsx/homes/afruchtman/.envs/ms_env/lib/python3.10/site-packages/torch/utils/_contextlib.py\", line 115, in decorate_context\n", " return func(*args, **kwargs)\n", "TypeError: StableDiffusionPipeline.__call__() got an unexpected keyword argument 'image'\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "\n", "(3088, 2320)\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "/fsx/homes/afruchtman/.envs/ms_env/lib/python3.10/site-packages/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py:237: FutureWarning: `_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple.\n", " deprecate(\"_encode_prompt()\", \"1.0.0\", deprecation_message, standard_warn=False)\n", "Traceback (most recent call last):\n", " File \"/fsx/homes/afruchtman/.envs/ms_env/lib/python3.10/site-packages/gradio/routes.py\", line 439, in run_predict\n", " output = await app.get_blocks().process_api(\n", " File \"/fsx/homes/afruchtman/.envs/ms_env/lib/python3.10/site-packages/gradio/blocks.py\", line 1389, in process_api\n", " result = await self.call_function(\n", " File \"/fsx/homes/afruchtman/.envs/ms_env/lib/python3.10/site-packages/gradio/blocks.py\", line 1094, in call_function\n", " prediction = await anyio.to_thread.run_sync(\n", " File \"/fsx/homes/afruchtman/.envs/ms_env/lib/python3.10/site-packages/anyio/to_thread.py\", line 33, in run_sync\n", " return await get_asynclib().run_sync_in_worker_thread(\n", " File \"/fsx/homes/afruchtman/.envs/ms_env/lib/python3.10/site-packages/anyio/_backends/_asyncio.py\", line 877, in run_sync_in_worker_thread\n", " return await future\n", " File \"/fsx/homes/afruchtman/.envs/ms_env/lib/python3.10/site-packages/anyio/_backends/_asyncio.py\", line 807, in run\n", " result = context.run(func, *args)\n", " File \"/fsx/homes/afruchtman/.envs/ms_env/lib/python3.10/site-packages/gradio/utils.py\", line 704, in wrapper\n", " response = f(*args, **kwargs)\n", " File \"/tmp/ipykernel_82457/348967860.py\", line 23, in generate_image\n", " images = ip_pipe.generate(**args)\n", " File \"/fsx/homes/mshuvi/IP-Adapter/ip_adapter/ip_adapter.py\", line 177, in generate\n", " images = self.pipe(\n", " File \"/fsx/homes/afruchtman/.envs/ms_env/lib/python3.10/site-packages/torch/utils/_contextlib.py\", line 115, in decorate_context\n", " return func(*args, **kwargs)\n", "TypeError: StableDiffusionPipeline.__call__() got an unexpected keyword argument 'image'\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "\n", "(3088, 2320)\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "/fsx/homes/afruchtman/.envs/ms_env/lib/python3.10/site-packages/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py:237: FutureWarning: `_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple.\n", " deprecate(\"_encode_prompt()\", \"1.0.0\", deprecation_message, standard_warn=False)\n", "Traceback (most recent call last):\n", " File \"/fsx/homes/afruchtman/.envs/ms_env/lib/python3.10/site-packages/gradio/routes.py\", line 439, in run_predict\n", " output = await app.get_blocks().process_api(\n", " File \"/fsx/homes/afruchtman/.envs/ms_env/lib/python3.10/site-packages/gradio/blocks.py\", line 1389, in process_api\n", " result = await self.call_function(\n", " File \"/fsx/homes/afruchtman/.envs/ms_env/lib/python3.10/site-packages/gradio/blocks.py\", line 1094, in call_function\n", " prediction = await anyio.to_thread.run_sync(\n", " File \"/fsx/homes/afruchtman/.envs/ms_env/lib/python3.10/site-packages/anyio/to_thread.py\", line 33, in run_sync\n", " return await get_asynclib().run_sync_in_worker_thread(\n", " File \"/fsx/homes/afruchtman/.envs/ms_env/lib/python3.10/site-packages/anyio/_backends/_asyncio.py\", line 877, in run_sync_in_worker_thread\n", " return await future\n", " File \"/fsx/homes/afruchtman/.envs/ms_env/lib/python3.10/site-packages/anyio/_backends/_asyncio.py\", line 807, in run\n", " result = context.run(func, *args)\n", " File \"/fsx/homes/afruchtman/.envs/ms_env/lib/python3.10/site-packages/gradio/utils.py\", line 704, in wrapper\n", " response = f(*args, **kwargs)\n", " File \"/tmp/ipykernel_82457/348967860.py\", line 23, in generate_image\n", " images = ip_pipe.generate(**args)\n", " File \"/fsx/homes/mshuvi/IP-Adapter/ip_adapter/ip_adapter.py\", line 177, in generate\n", " images = self.pipe(\n", " File \"/fsx/homes/afruchtman/.envs/ms_env/lib/python3.10/site-packages/torch/utils/_contextlib.py\", line 115, in decorate_context\n", " return func(*args, **kwargs)\n", "TypeError: StableDiffusionPipeline.__call__() got an unexpected keyword argument 'image'\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "\n", "(3088, 2320)\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "/fsx/homes/afruchtman/.envs/ms_env/lib/python3.10/site-packages/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py:237: FutureWarning: `_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple.\n", " deprecate(\"_encode_prompt()\", \"1.0.0\", deprecation_message, standard_warn=False)\n", "Traceback (most recent call last):\n", " File \"/fsx/homes/afruchtman/.envs/ms_env/lib/python3.10/site-packages/gradio/routes.py\", line 439, in run_predict\n", " output = await app.get_blocks().process_api(\n", " File \"/fsx/homes/afruchtman/.envs/ms_env/lib/python3.10/site-packages/gradio/blocks.py\", line 1389, in process_api\n", " result = await self.call_function(\n", " File \"/fsx/homes/afruchtman/.envs/ms_env/lib/python3.10/site-packages/gradio/blocks.py\", line 1094, in call_function\n", " prediction = await anyio.to_thread.run_sync(\n", " File \"/fsx/homes/afruchtman/.envs/ms_env/lib/python3.10/site-packages/anyio/to_thread.py\", line 33, in run_sync\n", " return await get_asynclib().run_sync_in_worker_thread(\n", " File \"/fsx/homes/afruchtman/.envs/ms_env/lib/python3.10/site-packages/anyio/_backends/_asyncio.py\", line 877, in run_sync_in_worker_thread\n", " return await future\n", " File \"/fsx/homes/afruchtman/.envs/ms_env/lib/python3.10/site-packages/anyio/_backends/_asyncio.py\", line 807, in run\n", " result = context.run(func, *args)\n", " File \"/fsx/homes/afruchtman/.envs/ms_env/lib/python3.10/site-packages/gradio/utils.py\", line 704, in wrapper\n", " response = f(*args, **kwargs)\n", " File \"/tmp/ipykernel_82457/348967860.py\", line 23, in generate_image\n", " images = ip_pipe.generate(**args)\n", " File \"/fsx/homes/mshuvi/IP-Adapter/ip_adapter/ip_adapter.py\", line 177, in generate\n", " images = self.pipe(\n", " File \"/fsx/homes/afruchtman/.envs/ms_env/lib/python3.10/site-packages/torch/utils/_contextlib.py\", line 115, in decorate_context\n", " return func(*args, **kwargs)\n", "TypeError: StableDiffusionPipeline.__call__() got an unexpected keyword argument 'image'\n" ] } ], "source": [ "theme = gr.themes.Default(\n", " primary_hue=\"violet\",\n", ").set(\n", " button_secondary_background_fill='linear-gradient(to bottom right, *primary_100, *primary_300)'\n", ")\n", "\n", "with gr.Blocks(theme=theme) as demo:\n", " controlnet_dict = {\n", " \"Openpose\" : \"lllyasviel/control_v11p_sd15_openpose\",\n", " \"Depth\" : \"lllyasviel/control_v11f1p_sd15_depth\",\n", " \"Canny\" : \"lllyasviel/control_v11p_sd15_canny\"\n", " }\n", "\n", " vae_model_path = gr.State(\"stabilityai/sd-vae-ft-mse\")\n", " image_encoder_path = gr.State(\"models/image_encoder/\")\n", " ip_ckpt_path = gr.State(\"models/ip-adapter-plus-face_sd15.bin\")\n", " device = gr.State('cuda') if torch.cuda.is_available() else gr.State('cpu')\n", " noise_scheduler = gr.State()\n", " img2 = gr.State()\n", " ip_pipe = gr.State()\n", " control_type = gr.State()\n", "\n", " gr.Markdown(\n", " \"\"\"\n", " # IP-Adapter WebUI\n", "\n", " Choose base model, IP checkpoint, noise scheduler
\n", " Note: base model can be copied from huggingFace as a repo id
\n", "\n", " Define wehther or not to use ControlNet + its type (currently Depth (midas), OpenPose and Canny are supported). \n", " Press \"Load model\".

\n", "\n", " Load your image + add prompts (you can load recommended prompt from Example section, at bottom of the page).
\n", " Set the params as you wish, if you use controlNet load an image of preprocessor output (depth image / 2d openpose / canny)
\n", " + Generate image
\n", "\n", " See this [deck](\"https://docs.google.com/presentation/d/1UzJDiC9SHOb7Ek2yFEPQwmAmVQDNVjM5XSIitDEiNc0/edit?usp=sharing\") for more guidance.\n", " ENJOY!\n", "\n", " \"\"\")\n", " with gr.Column() as col:\n", " with gr.Row() as row:\n", " drop_base_model = gr.Dropdown(base_model_choices, allow_custom_value=True, label='Base Model', value=\"dreamlike-art/dreamlike-anime-1.0\")\n", " drop_ip_ckpt = gr.Dropdown(ip_choices, allow_custom_value=False, label='IP ckpt', value=\"models/ip_adapters/ip-adapter-plus-face_sd15.bin\")\n", " drop_nsampler = gr.Dropdown(sampler_choices, label='Noise Sampler', value=\"DDIM\")\n", " control_cbox = gr.Checkbox(label='Use ControlNet', value=False) \n", " radio_control = gr.Radio(list(controlnet_dict.keys()), label=\"ControlNet type\", info=\"what kind of control-net do you want?\", visible=False)\n", " \n", " btn_models = gr.Button(value='Load Model')\n", " with gr.Row() as row:\n", " txt1 = gr.Textbox(lines=1, max_lines=10, interactive=True, label=\"prompt\")\n", " txt2 = gr.Textbox(lines=1, max_lines=10, interactive=True, label=\"negative prompt\")\n", " with gr.Row() as row:\n", " img1 = gr.Image(height=300, width=300, image_mode='RGB',type='pil', label=\"Input image\", show_label=True, tool='select', container=True)\n", " img_control = gr.Image(height=300, width=300, type='pil', visible=False, label='ControlNet image', interactive=True)\n", " with gr.Row() as row:\n", " btn2 = gr.Button(value=\"Generate Image\")\n", " with gr.Row() as row:\n", " # output_img = gr.Image(label=\"Output image\")\n", " output_img = gr.Gallery(label=\"Generated images\", show_label=False, elem_id=\"gallery\", rows=[2], object_fit=\"contain\", height=\"auto\", preview=False) \n", " with gr.Row() as row: \n", " slider1 = gr.Slider(minimum=0, maximum=8, value=4, step=1, label=\"num samples\")\n", " slider2 = gr.Slider(minimum=0, maximum=100, value=20, step=1, label='num steps')\n", " slider3 = gr.Slider(0, 1, value=0.7, label='scale')\n", " slider4 = gr.Slider(minimum=0, maximum=20, value=7.5, step=0.1, label='CFG')\n", " txt3 = gr.Textbox(value=512, label='height')\n", " txt4 = gr.Textbox(value=512, label='width')\n", " txt5 = gr.Textbox(value=42, label=\"seed\")\n", " with gr.Row() as row:\n", " ex = gr.Examples(\n", " examples=prompt_examples,\n", " inputs=[txt1, txt2],\n", " )\n", " \n", " def get_ip_pipe(input, ip_ckpt_path, vae_model_path, image_encoder_path, device, noise_scheduler, controlnet_path):\n", " print('Base model:', input)\n", " print('IP ckpt:', ip_ckpt_path)\n", "\n", " dict = {ip_pipe: init_pipe(input, ip_ckpt_path, vae_model_path, image_encoder_path, device, noise_scheduler, controlnet_path)}\n", " # btn2.update(interactive=True)\n", " return dict\n", " \n", " def get_controlnet_type(input): \n", " return {control_type: controlnet_dict.get(input, None)}\n", " \n", " # lambda x: gr.Checkbox.update(value=x), inputs=control_cbox, outputs=control_cbox\n", " control_cbox.change(lambda x: gr.Checkbox.update(value=x), inputs=[control_cbox], outputs=[control_cbox]).then(lambda x: gr.Image.update(visible=x), inputs=control_cbox, outputs=img_control).then(lambda x: gr.Radio.update(visible=x), \n", " inputs=control_cbox, outputs=radio_control).then(lambda x: {img2: x}, inputs=img_control, outputs=img2)\n", "\n", " radio_control.change(get_controlnet_type ,inputs=radio_control, outputs=control_type).then(lambda: gr.Button.update(interactive=True), inputs=[], outputs=btn2)\n", "\n", " drop_nsampler.change(lambda x: noise_sampler_dict[x] ,inputs=[drop_nsampler], outputs=[noise_scheduler]).then(\n", " lambda: gr.Button.update(interactive=True), inputs=[], outputs=btn_models).then(\n", " lambda: gr.Button.update(interactive=False), inputs=[], outputs=btn2)\n", " \n", " drop_base_model.change(lambda: gr.Button.update(interactive=True), inputs=[], outputs=btn_models)\n", " drop_ip_ckpt.change(lambda: gr.Button.update(interactive=True), inputs=[], outputs=btn_models)\n", " \n", " btn_models.click(lambda x: noise_sampler_dict[x] ,inputs=[drop_nsampler], outputs=[noise_scheduler]).then(lambda: gr.Button.update(interactive=False), inputs=[], outputs=btn2).then(\n", " lambda: gr.Button.update(interactive=False), inputs=[], outputs=btn_models).then(\n", " get_ip_pipe, inputs=[drop_base_model, drop_ip_ckpt, vae_model_path, image_encoder_path, device, noise_scheduler, control_type], outputs=[ip_pipe, output_img], \n", " show_progress=True).then(lambda: gr.Button.update(interactive=True), inputs=[], outputs=btn2)\n", "\n", " btn2.click(lambda x: {img2: x}, inputs=img_control, outputs=img2).then(\n", " fn=generate_image,\n", " inputs=[ip_pipe, txt1, txt2, \n", " img1, img2,\n", " slider1 , txt3, txt4, \n", " slider2, txt5, slider3, slider4],\n", " outputs=[output_img],\n", " )\n", "\n", "demo.queue()\n", "demo.launch(share=True)\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "kernelspec": { "display_name": "ms_env", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.12" }, "orig_nbformat": 4 }, "nbformat": 4, "nbformat_minor": 2 }