diff --git a/annotator/__pycache__/annotator_path.cpython-38.pyc b/annotator/__pycache__/annotator_path.cpython-38.pyc index d1441b5b495e4e2a7660e4c6b17ece64d1724915..b0c2d14afd26ae97bd062d1b3bce56d8b2addb7f 100644 Binary files a/annotator/__pycache__/annotator_path.cpython-38.pyc and b/annotator/__pycache__/annotator_path.cpython-38.pyc differ diff --git a/annotator/__pycache__/util.cpython-38.pyc b/annotator/__pycache__/util.cpython-38.pyc index 7de71069561a570720b8cd8e5d2b7130425007be..9c33de462a49b6f0d09b3199bf6e67666ba8beca 100644 Binary files a/annotator/__pycache__/util.cpython-38.pyc and b/annotator/__pycache__/util.cpython-38.pyc differ diff --git a/annotator/canny/__pycache__/__init__.cpython-38.pyc b/annotator/canny/__pycache__/__init__.cpython-38.pyc index fc27fbbe6d27de8e29749d730c469785ef239751..23820d06ff94a4c358cb8c766edddfdddebafff4 100644 Binary files a/annotator/canny/__pycache__/__init__.cpython-38.pyc and b/annotator/canny/__pycache__/__init__.cpython-38.pyc differ diff --git a/annotator/hed/__pycache__/__init__.cpython-38.pyc b/annotator/hed/__pycache__/__init__.cpython-38.pyc index 9adcd081b6eab9ca59db501dc030229b888265b9..06e96097569b2a13f32c8a1e3887884a1afa31d9 100644 Binary files a/annotator/hed/__pycache__/__init__.cpython-38.pyc and b/annotator/hed/__pycache__/__init__.cpython-38.pyc differ diff --git a/annotator/leres/__pycache__/__init__.cpython-38.pyc b/annotator/leres/__pycache__/__init__.cpython-38.pyc index 4a7fc8c9f6abe7fa36211d9fa156b8a19c771511..b5cec01fcfc2f1df3d65d39bdbd4659e0614804e 100644 Binary files a/annotator/leres/__pycache__/__init__.cpython-38.pyc and b/annotator/leres/__pycache__/__init__.cpython-38.pyc differ diff --git a/annotator/leres/leres/__pycache__/Resnet.cpython-38.pyc b/annotator/leres/leres/__pycache__/Resnet.cpython-38.pyc index 5bc68561febb3bfca6b04792c448bfe1c67ee4a6..9374e50289fa91f24634cbad97ec5219f2aa27e6 100644 Binary files a/annotator/leres/leres/__pycache__/Resnet.cpython-38.pyc and b/annotator/leres/leres/__pycache__/Resnet.cpython-38.pyc differ diff --git a/annotator/leres/leres/__pycache__/Resnext_torch.cpython-38.pyc b/annotator/leres/leres/__pycache__/Resnext_torch.cpython-38.pyc index 0cfea532fefca0f97a294b22c5c103a5a2d3b102..53324dad32ba8e3a904a65b2f2a7145722e10b2b 100644 Binary files a/annotator/leres/leres/__pycache__/Resnext_torch.cpython-38.pyc and b/annotator/leres/leres/__pycache__/Resnext_torch.cpython-38.pyc differ diff --git a/annotator/leres/leres/__pycache__/depthmap.cpython-38.pyc b/annotator/leres/leres/__pycache__/depthmap.cpython-38.pyc index 6efe0ca7dad9ef05f0c2475c53988d839b08c4aa..8752b2505093dc3420af8d57c1bd2ca21f8b4b91 100644 Binary files a/annotator/leres/leres/__pycache__/depthmap.cpython-38.pyc and b/annotator/leres/leres/__pycache__/depthmap.cpython-38.pyc differ diff --git a/annotator/leres/leres/__pycache__/multi_depth_model_woauxi.cpython-38.pyc b/annotator/leres/leres/__pycache__/multi_depth_model_woauxi.cpython-38.pyc index f84e5fcbaae273779e27ef710304334959f89863..d3eee3d5a632a7cd8207f3c71ab95020ab86913f 100644 Binary files a/annotator/leres/leres/__pycache__/multi_depth_model_woauxi.cpython-38.pyc and b/annotator/leres/leres/__pycache__/multi_depth_model_woauxi.cpython-38.pyc differ diff --git a/annotator/leres/leres/__pycache__/net_tools.cpython-38.pyc b/annotator/leres/leres/__pycache__/net_tools.cpython-38.pyc index 439e01689db31a1c3efe5beb478ee348085d33be..36ca9cbe6e354e01c89f7c2be683f622642db155 100644 Binary files a/annotator/leres/leres/__pycache__/net_tools.cpython-38.pyc and b/annotator/leres/leres/__pycache__/net_tools.cpython-38.pyc differ diff --git a/annotator/leres/leres/__pycache__/network_auxi.cpython-38.pyc b/annotator/leres/leres/__pycache__/network_auxi.cpython-38.pyc index 18325511369f1142ffa0740206b0a1c0938e757e..65f490e5eca70785d06905875d4029a7225a6d3a 100644 Binary files a/annotator/leres/leres/__pycache__/network_auxi.cpython-38.pyc and b/annotator/leres/leres/__pycache__/network_auxi.cpython-38.pyc differ diff --git a/annotator/leres/pix2pix/models/__pycache__/__init__.cpython-38.pyc b/annotator/leres/pix2pix/models/__pycache__/__init__.cpython-38.pyc index 99134849434c5f7546b4a3d23c79e8b5376950b8..d66ede8307a9401af150ba43b80731e4186c57e4 100644 Binary files a/annotator/leres/pix2pix/models/__pycache__/__init__.cpython-38.pyc and b/annotator/leres/pix2pix/models/__pycache__/__init__.cpython-38.pyc differ diff --git a/annotator/leres/pix2pix/models/__pycache__/base_model.cpython-38.pyc b/annotator/leres/pix2pix/models/__pycache__/base_model.cpython-38.pyc index 419485277f97ead6c7eb896126fe494e19211e9a..4f580a4f5df0f0ef31e0a6de1ddb0558157daa3e 100644 Binary files a/annotator/leres/pix2pix/models/__pycache__/base_model.cpython-38.pyc and b/annotator/leres/pix2pix/models/__pycache__/base_model.cpython-38.pyc differ diff --git a/annotator/leres/pix2pix/models/__pycache__/networks.cpython-38.pyc b/annotator/leres/pix2pix/models/__pycache__/networks.cpython-38.pyc index eb20b9e80f348c93bd63a8dc7139073a7b2cd8e5..386d2cfa8aa96ef531f178d6156a1a769984b358 100644 Binary files a/annotator/leres/pix2pix/models/__pycache__/networks.cpython-38.pyc and b/annotator/leres/pix2pix/models/__pycache__/networks.cpython-38.pyc differ diff --git a/annotator/leres/pix2pix/models/__pycache__/pix2pix4depth_model.cpython-38.pyc b/annotator/leres/pix2pix/models/__pycache__/pix2pix4depth_model.cpython-38.pyc index 0ff04d831f071badb47d9f9fdb71f8b64fc8851c..d5606023ef4267a3681fcdcdc2383ce77bf229a6 100644 Binary files a/annotator/leres/pix2pix/models/__pycache__/pix2pix4depth_model.cpython-38.pyc and b/annotator/leres/pix2pix/models/__pycache__/pix2pix4depth_model.cpython-38.pyc differ diff --git a/annotator/leres/pix2pix/options/__pycache__/__init__.cpython-38.pyc b/annotator/leres/pix2pix/options/__pycache__/__init__.cpython-38.pyc index c558da54603e51dcb91044694b532199285f17c8..fa7869857829a66f810fe72fafb3cf532fce4ed1 100644 Binary files a/annotator/leres/pix2pix/options/__pycache__/__init__.cpython-38.pyc and b/annotator/leres/pix2pix/options/__pycache__/__init__.cpython-38.pyc differ diff --git a/annotator/leres/pix2pix/options/__pycache__/base_options.cpython-38.pyc b/annotator/leres/pix2pix/options/__pycache__/base_options.cpython-38.pyc index fb6c3d92159d8a3db212a0461545472c7ab4ebe5..a27af0a1edfcd0f7bcb5a5e8ed898f572814b237 100644 Binary files a/annotator/leres/pix2pix/options/__pycache__/base_options.cpython-38.pyc and b/annotator/leres/pix2pix/options/__pycache__/base_options.cpython-38.pyc differ diff --git a/annotator/leres/pix2pix/options/__pycache__/test_options.cpython-38.pyc b/annotator/leres/pix2pix/options/__pycache__/test_options.cpython-38.pyc index cb4aaaf7a67697e816b49ea09b63586bc1911d5c..53f13d9b9bbf8856dd9cd1669b79ff2ff6b92952 100644 Binary files a/annotator/leres/pix2pix/options/__pycache__/test_options.cpython-38.pyc and b/annotator/leres/pix2pix/options/__pycache__/test_options.cpython-38.pyc differ diff --git a/annotator/leres/pix2pix/util/__pycache__/__init__.cpython-38.pyc b/annotator/leres/pix2pix/util/__pycache__/__init__.cpython-38.pyc index 612e1a540b90be961449f320ab768ad61c73555c..da3163fbb2adaf463d4aeff7ff134f72f2fa43c6 100644 Binary files a/annotator/leres/pix2pix/util/__pycache__/__init__.cpython-38.pyc and b/annotator/leres/pix2pix/util/__pycache__/__init__.cpython-38.pyc differ diff --git a/annotator/leres/pix2pix/util/__pycache__/util.cpython-38.pyc b/annotator/leres/pix2pix/util/__pycache__/util.cpython-38.pyc index f9515a1c8e81389df185a4f7e86ed25b35be61b2..1b34f0f25f81f5f945a8aa6512b2f5cada9b79e9 100644 Binary files a/annotator/leres/pix2pix/util/__pycache__/util.cpython-38.pyc and b/annotator/leres/pix2pix/util/__pycache__/util.cpython-38.pyc differ diff --git a/annotator/lineart/__pycache__/__init__.cpython-38.pyc b/annotator/lineart/__pycache__/__init__.cpython-38.pyc index d1b7047cffe8560fd6ece6318567d6187dbdc107..cbaa4d47c75da87667dc3c6dfc63b4f4da804fa9 100644 Binary files a/annotator/lineart/__pycache__/__init__.cpython-38.pyc and b/annotator/lineart/__pycache__/__init__.cpython-38.pyc differ diff --git a/annotator/lineart_anime/__pycache__/__init__.cpython-38.pyc b/annotator/lineart_anime/__pycache__/__init__.cpython-38.pyc index 235383b547c83288ef94fd93b2e2a67bba3a0035..e0abb3449200e9f2450b6791d62f291ec0b9b1a1 100644 Binary files a/annotator/lineart_anime/__pycache__/__init__.cpython-38.pyc and b/annotator/lineart_anime/__pycache__/__init__.cpython-38.pyc differ diff --git a/annotator/manga_line/__pycache__/__init__.cpython-38.pyc b/annotator/manga_line/__pycache__/__init__.cpython-38.pyc index 647532eae30232f15e0e1ff8c7566e3baf8108a5..b64a9ea69844c0f4a1991ce948f3c88eca51ed15 100644 Binary files a/annotator/manga_line/__pycache__/__init__.cpython-38.pyc and b/annotator/manga_line/__pycache__/__init__.cpython-38.pyc differ diff --git a/annotator/midas/__pycache__/__init__.cpython-38.pyc b/annotator/midas/__pycache__/__init__.cpython-38.pyc index 1a78a122a9dc7c7336a83cfbaebe38324b537f00..a856613c9d00d867114cc56e0ca22abafd637eef 100644 Binary files a/annotator/midas/__pycache__/__init__.cpython-38.pyc and b/annotator/midas/__pycache__/__init__.cpython-38.pyc differ diff --git a/annotator/midas/__pycache__/api.cpython-38.pyc b/annotator/midas/__pycache__/api.cpython-38.pyc index b2b90eba199bea1bf99701591dfab78eada12016..5203ed1d29fff4140d9b55d5c2caf5b92936b30c 100644 Binary files a/annotator/midas/__pycache__/api.cpython-38.pyc and b/annotator/midas/__pycache__/api.cpython-38.pyc differ diff --git a/annotator/midas/midas/__pycache__/__init__.cpython-38.pyc b/annotator/midas/midas/__pycache__/__init__.cpython-38.pyc index 88ca7c8b9cbbdf2654ae235f3ed4ad790a6f08bc..13f3dbcc8fb34b5ffdcc5570391637b056768ed2 100644 Binary files a/annotator/midas/midas/__pycache__/__init__.cpython-38.pyc and b/annotator/midas/midas/__pycache__/__init__.cpython-38.pyc differ diff --git a/annotator/midas/midas/__pycache__/base_model.cpython-38.pyc b/annotator/midas/midas/__pycache__/base_model.cpython-38.pyc index 236af7e76e820fcc7d15635a8a739420e8d562ab..64f8d8880549da2da0377593fddc426efc81d998 100644 Binary files a/annotator/midas/midas/__pycache__/base_model.cpython-38.pyc and b/annotator/midas/midas/__pycache__/base_model.cpython-38.pyc differ diff --git a/annotator/midas/midas/__pycache__/blocks.cpython-38.pyc b/annotator/midas/midas/__pycache__/blocks.cpython-38.pyc index fb79c1f1ed1869da226d0f8a839b0c3ff0e08414..38f0f02fcb47aa229acff0d27beef13129567b38 100644 Binary files a/annotator/midas/midas/__pycache__/blocks.cpython-38.pyc and b/annotator/midas/midas/__pycache__/blocks.cpython-38.pyc differ diff --git a/annotator/midas/midas/__pycache__/dpt_depth.cpython-38.pyc b/annotator/midas/midas/__pycache__/dpt_depth.cpython-38.pyc index 1c1a0af87bdd792f6cb03a265fa9e363457bc0a6..6a270369dec9803ce9f4fe5a1f203f57b35f1737 100644 Binary files a/annotator/midas/midas/__pycache__/dpt_depth.cpython-38.pyc and b/annotator/midas/midas/__pycache__/dpt_depth.cpython-38.pyc differ diff --git a/annotator/midas/midas/__pycache__/midas_net.cpython-38.pyc b/annotator/midas/midas/__pycache__/midas_net.cpython-38.pyc index 0b139fbcdcc2786507c393648a120447d1078f2e..bc249e125d63b47219f225574705d3af6922e89a 100644 Binary files a/annotator/midas/midas/__pycache__/midas_net.cpython-38.pyc and b/annotator/midas/midas/__pycache__/midas_net.cpython-38.pyc differ diff --git a/annotator/midas/midas/__pycache__/midas_net_custom.cpython-38.pyc b/annotator/midas/midas/__pycache__/midas_net_custom.cpython-38.pyc index e1a677fc5a16aa46c09760f10d727905c5906f2b..edcaa5ab325ed6f4e232f589d94767833b6c93e2 100644 Binary files a/annotator/midas/midas/__pycache__/midas_net_custom.cpython-38.pyc and b/annotator/midas/midas/__pycache__/midas_net_custom.cpython-38.pyc differ diff --git a/annotator/midas/midas/__pycache__/transforms.cpython-38.pyc b/annotator/midas/midas/__pycache__/transforms.cpython-38.pyc index 67788856bba05771671dde53791d8df3fedf4351..ece7471c38c15208b6a872e9e1099e487b3eb694 100644 Binary files a/annotator/midas/midas/__pycache__/transforms.cpython-38.pyc and b/annotator/midas/midas/__pycache__/transforms.cpython-38.pyc differ diff --git a/annotator/midas/midas/__pycache__/vit.cpython-38.pyc b/annotator/midas/midas/__pycache__/vit.cpython-38.pyc index 89518da178b00c9c2f37343cff2bfc1f92af2ee9..123f852dc073736ff940504855363da170e6acf0 100644 Binary files a/annotator/midas/midas/__pycache__/vit.cpython-38.pyc and b/annotator/midas/midas/__pycache__/vit.cpython-38.pyc differ diff --git a/annotator/pidinet/__pycache__/__init__.cpython-38.pyc b/annotator/pidinet/__pycache__/__init__.cpython-38.pyc index 3fa82ae59ed9604e4f7bc1f6256dbd9040f073b0..43687e2368bb1f15b12aba21c92a753a4b005a37 100644 Binary files a/annotator/pidinet/__pycache__/__init__.cpython-38.pyc and b/annotator/pidinet/__pycache__/__init__.cpython-38.pyc differ diff --git a/annotator/pidinet/__pycache__/model.cpython-38.pyc b/annotator/pidinet/__pycache__/model.cpython-38.pyc index 7f8bd847b4fad2c9839e1e5959bcfc85bc4876da..b7c35dd472b57ee31eab7223081a579602a8cd83 100644 Binary files a/annotator/pidinet/__pycache__/model.cpython-38.pyc and b/annotator/pidinet/__pycache__/model.cpython-38.pyc differ diff --git a/annotator/zoe/__pycache__/__init__.cpython-38.pyc b/annotator/zoe/__pycache__/__init__.cpython-38.pyc index 81144df495a79aff5084834168b8e246622cf7bb..f5b8fafbf38508cd44bc04ea2674071c86703692 100644 Binary files a/annotator/zoe/__pycache__/__init__.cpython-38.pyc and b/annotator/zoe/__pycache__/__init__.cpython-38.pyc differ diff --git a/annotator/zoe/zoedepth/models/__pycache__/__init__.cpython-38.pyc b/annotator/zoe/zoedepth/models/__pycache__/__init__.cpython-38.pyc index f08c6d4c976345738891772be21135716817cabb..a61b7861284bfe3be0c1f43cc873baad6b025bb1 100644 Binary files a/annotator/zoe/zoedepth/models/__pycache__/__init__.cpython-38.pyc and b/annotator/zoe/zoedepth/models/__pycache__/__init__.cpython-38.pyc differ diff --git a/annotator/zoe/zoedepth/models/__pycache__/depth_model.cpython-38.pyc b/annotator/zoe/zoedepth/models/__pycache__/depth_model.cpython-38.pyc index 858751e616384ed7d3d0ebb87c03a755c51f19c8..f9832a9d0345d1be336ec579334899eaf6ae4672 100644 Binary files a/annotator/zoe/zoedepth/models/__pycache__/depth_model.cpython-38.pyc and b/annotator/zoe/zoedepth/models/__pycache__/depth_model.cpython-38.pyc differ diff --git a/annotator/zoe/zoedepth/models/__pycache__/model_io.cpython-38.pyc b/annotator/zoe/zoedepth/models/__pycache__/model_io.cpython-38.pyc index 20da2c48b3520731584967b1d52267303e0c0812..f63682d4f3cb2814e6cd650399d75033e53fc08b 100644 Binary files a/annotator/zoe/zoedepth/models/__pycache__/model_io.cpython-38.pyc and b/annotator/zoe/zoedepth/models/__pycache__/model_io.cpython-38.pyc differ diff --git a/annotator/zoe/zoedepth/models/base_models/__pycache__/__init__.cpython-38.pyc b/annotator/zoe/zoedepth/models/base_models/__pycache__/__init__.cpython-38.pyc index e171ed0c5163cbed968e0ec055eb73fa20800667..73e4ca1aab002602ff5657ef0332380505291ef1 100644 Binary files a/annotator/zoe/zoedepth/models/base_models/__pycache__/__init__.cpython-38.pyc and b/annotator/zoe/zoedepth/models/base_models/__pycache__/__init__.cpython-38.pyc differ diff --git a/annotator/zoe/zoedepth/models/base_models/__pycache__/midas.cpython-38.pyc b/annotator/zoe/zoedepth/models/base_models/__pycache__/midas.cpython-38.pyc index 6176fb965624472b6f75106ef081a0f62b40e0e4..2ef2c9fd7f187c05a3cd6b54dda0c09522d48771 100644 Binary files a/annotator/zoe/zoedepth/models/base_models/__pycache__/midas.cpython-38.pyc and b/annotator/zoe/zoedepth/models/base_models/__pycache__/midas.cpython-38.pyc differ diff --git a/annotator/zoe/zoedepth/models/layers/__pycache__/attractor.cpython-38.pyc b/annotator/zoe/zoedepth/models/layers/__pycache__/attractor.cpython-38.pyc index 8f1e95bd88560a72f7175760a20236b5ef5307f0..bf3bf8b0f65c65d475e3e765929dae92ac3d497d 100644 Binary files a/annotator/zoe/zoedepth/models/layers/__pycache__/attractor.cpython-38.pyc and b/annotator/zoe/zoedepth/models/layers/__pycache__/attractor.cpython-38.pyc differ diff --git a/annotator/zoe/zoedepth/models/layers/__pycache__/dist_layers.cpython-38.pyc b/annotator/zoe/zoedepth/models/layers/__pycache__/dist_layers.cpython-38.pyc index 5d3fc3050ba1f82c53cb85fcde29132bc5347168..746956e865f1930d560f8af6e7728a930a94680b 100644 Binary files a/annotator/zoe/zoedepth/models/layers/__pycache__/dist_layers.cpython-38.pyc and b/annotator/zoe/zoedepth/models/layers/__pycache__/dist_layers.cpython-38.pyc differ diff --git a/annotator/zoe/zoedepth/models/layers/__pycache__/localbins_layers.cpython-38.pyc b/annotator/zoe/zoedepth/models/layers/__pycache__/localbins_layers.cpython-38.pyc index 865c37c2e3ab813f59aa9047f473f8218e1d3efd..314c87ea7efff7776c42a558210f587e8ae13acf 100644 Binary files a/annotator/zoe/zoedepth/models/layers/__pycache__/localbins_layers.cpython-38.pyc and b/annotator/zoe/zoedepth/models/layers/__pycache__/localbins_layers.cpython-38.pyc differ diff --git a/annotator/zoe/zoedepth/models/zoedepth/__pycache__/__init__.cpython-38.pyc b/annotator/zoe/zoedepth/models/zoedepth/__pycache__/__init__.cpython-38.pyc index 74ad5fc4d66198dd66efc83800be5d4f6652a1d5..db04385fd58697ff2f1aef244d490b0ecb5b594c 100644 Binary files a/annotator/zoe/zoedepth/models/zoedepth/__pycache__/__init__.cpython-38.pyc and b/annotator/zoe/zoedepth/models/zoedepth/__pycache__/__init__.cpython-38.pyc differ diff --git a/annotator/zoe/zoedepth/models/zoedepth/__pycache__/zoedepth_v1.cpython-38.pyc b/annotator/zoe/zoedepth/models/zoedepth/__pycache__/zoedepth_v1.cpython-38.pyc index 5dce8f64be9acf664d588c9cfac15d34e17b5770..77de2a9c6b6248ab885418b64eca9d1e15ea7ec7 100644 Binary files a/annotator/zoe/zoedepth/models/zoedepth/__pycache__/zoedepth_v1.cpython-38.pyc and b/annotator/zoe/zoedepth/models/zoedepth/__pycache__/zoedepth_v1.cpython-38.pyc differ diff --git a/annotator/zoe/zoedepth/utils/__pycache__/__init__.cpython-38.pyc b/annotator/zoe/zoedepth/utils/__pycache__/__init__.cpython-38.pyc index f64a243ccdce7213de08ca1c859d3cad911cf7a5..00bfe2d2630eac562c16972e8f1326bc58664288 100644 Binary files a/annotator/zoe/zoedepth/utils/__pycache__/__init__.cpython-38.pyc and b/annotator/zoe/zoedepth/utils/__pycache__/__init__.cpython-38.pyc differ diff --git a/annotator/zoe/zoedepth/utils/__pycache__/arg_utils.cpython-38.pyc b/annotator/zoe/zoedepth/utils/__pycache__/arg_utils.cpython-38.pyc index a5ab0b6e7775642225678ecf62d80e810a5323bd..ac335e6536a7193a6797b2a5fd576e90932d0d78 100644 Binary files a/annotator/zoe/zoedepth/utils/__pycache__/arg_utils.cpython-38.pyc and b/annotator/zoe/zoedepth/utils/__pycache__/arg_utils.cpython-38.pyc differ diff --git a/annotator/zoe/zoedepth/utils/__pycache__/config.cpython-38.pyc b/annotator/zoe/zoedepth/utils/__pycache__/config.cpython-38.pyc index d01ae4f5bb18efe49d653a6abb496d0b58ee0aed..d58256f70b55d1dac7565b7a78a02df84b241664 100644 Binary files a/annotator/zoe/zoedepth/utils/__pycache__/config.cpython-38.pyc and b/annotator/zoe/zoedepth/utils/__pycache__/config.cpython-38.pyc differ diff --git a/annotator/zoe/zoedepth/utils/easydict/__pycache__/__init__.cpython-38.pyc b/annotator/zoe/zoedepth/utils/easydict/__pycache__/__init__.cpython-38.pyc index 660045e10b0da84321b9b98b547db5d5c3597532..4313f5905e268fc4605e39285f1d22f8afab0caf 100644 Binary files a/annotator/zoe/zoedepth/utils/easydict/__pycache__/__init__.cpython-38.pyc and b/annotator/zoe/zoedepth/utils/easydict/__pycache__/__init__.cpython-38.pyc differ diff --git a/app.py b/app.py index 316a16a08b8e8668cee07426affb730f8942a5fe..3cd22af2d83ac2f5bacfb04b10bee8354fe3c10e 100644 --- a/app.py +++ b/app.py @@ -1,319 +1,18 @@ import gradio as gr -import cv2 -import os -import torch -import argparse -import os -import sys -import yaml -import datetime -sys.path.append(os.path.dirname(os.getcwd())) -from pipelines.sd_controlnet_rave import RAVE -from pipelines.sd_multicontrolnet_rave import RAVE_MultiControlNet -import shutil -import subprocess -import utils.constants as const -import utils.video_grid_utils as vgu -import warnings -warnings.filterwarnings("ignore") -import pprint -import glob +from transformers import pipeline +pipeline = pipeline(task="image-classification", model="julien-c/hotdog-not-hotdog") -def init_device(): - device_name = 'cuda' if torch.cuda.is_available() else 'cpu' - device = torch.device(device_name) - return device +def predict(input_img): + predictions = pipeline(input_img) + return input_img, {p["label"]: p["score"] for p in predictions} -def init_paths(input_ns): - if input_ns.save_folder == None or input_ns.save_folder == '': - input_ns.save_folder = input_ns.video_name - else: - input_ns.save_folder = os.path.join(input_ns.save_folder, input_ns.video_name) - save_dir = os.path.join(const.OUTPUT_PATH, input_ns.save_folder) - os.makedirs(save_dir, exist_ok=True) - save_idx = max([int(x[-5:]) for x in os.listdir(save_dir)])+1 if os.listdir(save_dir) != [] else 0 - input_ns.save_path = os.path.join(save_dir, f'{input_ns.positive_prompts}-{str(save_idx).zfill(5)}') - - - if '-' in input_ns.preprocess_name: - input_ns.hf_cn_path = [const.PREPROCESSOR_DICT[i] for i in input_ns.preprocess_name.split('-')] - else: - input_ns.hf_cn_path = const.PREPROCESSOR_DICT[input_ns.preprocess_name] - input_ns.hf_path = "runwayml/stable-diffusion-v1-5" - - input_ns.inverse_path = os.path.join(const.GENERATED_DATA_PATH, 'inverses', input_ns.video_name, f'{input_ns.preprocess_name}_{input_ns.model_id}_{input_ns.grid_size}x{input_ns.grid_size}_{input_ns.pad}') - input_ns.control_path = os.path.join(const.GENERATED_DATA_PATH, 'controls', input_ns.video_name, f'{input_ns.preprocess_name}_{input_ns.grid_size}x{input_ns.grid_size}_{input_ns.pad}') - os.makedirs(input_ns.control_path, exist_ok=True) - os.makedirs(input_ns.inverse_path, exist_ok=True) - os.makedirs(input_ns.save_path, exist_ok=True) - return input_ns - -def install_civitai_model(model_id): - full_path = os.path.join(const.CWD, 'CIVIT_AI', 'diffusers_models', model_id, '*') - if len(glob.glob(full_path)) > 0: - full_path = glob.glob(full_path)[0] - return full_path - install_path = os.path.join(const.CWD, 'CIVIT_AI', 'safetensors') - install_path_model = os.path.join(const.CWD, 'CIVIT_AI', 'safetensors', model_id) - diffusers_path = os.path.join(const.CWD, 'CIVIT_AI', 'diffusers_models', model_id) - convert_py_path = os.path.join(const.CWD, 'CIVIT_AI', 'convert.py') - os.makedirs(install_path, exist_ok=True) - os.makedirs(diffusers_path, exist_ok=True) - subprocess.run(f'wget https://civitai.com/api/download/models/{model_id} --content-disposition --directory {install_path_model}'.split()) - model_name = glob.glob(os.path.join(install_path, model_id, '*'))[0] - model_name2 = os.path.basename(glob.glob(os.path.join(install_path, model_id, '*'))[0]).replace('.safetensors', '') - diffusers_path_model_name = os.path.join(const.CWD, 'CIVIT_AI', 'diffusers_models', model_id, model_name2) - print(model_name) - subprocess.run(f'python {convert_py_path} --checkpoint_path {model_name} --dump_path {diffusers_path_model_name} --from_safetensors'.split()) - subprocess.run(f'rm -rf {install_path}'.split()) - return diffusers_path_model_name - -def run(*args): - list_of_inputs = [x for x in args] - input_ns = argparse.Namespace(**{}) - input_ns.video_path = list_of_inputs[0] # video_path - input_ns.video_name = os.path.basename(input_ns.video_path).replace('.mp4', '').replace('.gif', '') - input_ns.preprocess_name = list_of_inputs[1] - - input_ns.batch_size = list_of_inputs[2] - input_ns.batch_size_vae = list_of_inputs[3] - - input_ns.cond_step_start = list_of_inputs[4] - input_ns.controlnet_conditioning_scale = list_of_inputs[5] - input_ns.controlnet_guidance_end = list_of_inputs[6] - input_ns.controlnet_guidance_start = list_of_inputs[7] - - input_ns.give_control_inversion = list_of_inputs[8] - - input_ns.grid_size = list_of_inputs[9] - input_ns.sample_size = list_of_inputs[10] - input_ns.pad = list_of_inputs[11] - input_ns.guidance_scale = list_of_inputs[12] - input_ns.inversion_prompt = list_of_inputs[13] - - input_ns.is_ddim_inversion = list_of_inputs[14] - input_ns.is_shuffle = list_of_inputs[15] - - input_ns.negative_prompts = list_of_inputs[16] - input_ns.num_inference_steps = list_of_inputs[17] - input_ns.num_inversion_step = list_of_inputs[18] - input_ns.positive_prompts = list_of_inputs[19] - input_ns.save_folder = list_of_inputs[20] - - input_ns.seed = list_of_inputs[21] - input_ns.model_id = const.MODEL_IDS[list_of_inputs[22]] - # input_ns.width = list_of_inputs[23] - # input_ns.height = list_of_inputs[24] - # input_ns.original_size = list_of_inputs[25] - diffusers_model_path = os.path.join(const.CWD, 'CIVIT_AI', 'diffusers_models') - os.makedirs(diffusers_model_path, exist_ok=True) - if 'model_id' not in list(input_ns.__dict__.keys()): - input_ns.model_id = "None" - - if str(input_ns.model_id) != 'None': - input_ns.model_id = install_civitai_model(input_ns.model_id) - - - device = init_device() - input_ns = init_paths(input_ns) - - input_ns.image_pil_list = vgu.prepare_video_to_grid(input_ns.video_path, input_ns.sample_size, input_ns.grid_size, input_ns.pad) - - print(input_ns.video_path) - input_ns.sample_size = len(input_ns.image_pil_list) - print(f'Frame count: {len(input_ns.image_pil_list)}') - - controlnet_class = RAVE_MultiControlNet if '-' in str(input_ns.controlnet_conditioning_scale) else RAVE - - - CN = controlnet_class(device) - - CN.init_models(input_ns.hf_cn_path, input_ns.hf_path, input_ns.preprocess_name, input_ns.model_id) - - input_dict = vars(input_ns) - pp = pprint.PrettyPrinter(indent=4) - pp.pprint(input_dict) - yaml_dict = {k:v for k,v in input_dict.items() if k != 'image_pil_list'} - - start_time = datetime.datetime.now() - if '-' in str(input_ns.controlnet_conditioning_scale): - res_vid, control_vid_1, control_vid_2 = CN(input_dict) - else: - res_vid, control_vid = CN(input_dict) - end_time = datetime.datetime.now() - save_name = f"{'-'.join(input_ns.positive_prompts.split())}_cstart-{input_ns.controlnet_guidance_start}_gs-{input_ns.guidance_scale}_pre-{'-'.join((input_ns.preprocess_name.replace('-','+').split('_')))}_cscale-{input_ns.controlnet_conditioning_scale}_grid-{input_ns.grid_size}_pad-{input_ns.pad}_model-{os.path.basename(input_ns.model_id)}" - res_vid[0].save(os.path.join(input_ns.save_path, f'{save_name}.gif'), save_all=True, append_images=res_vid[1:], loop=10000) - control_vid[0].save(os.path.join(input_ns.save_path, f'control_{save_name}.gif'), save_all=True, append_images=control_vid[1:], optimize=False, loop=10000) - - yaml_dict['total_time'] = (end_time - start_time).total_seconds() - yaml_dict['total_number_of_frames'] = len(res_vid) - yaml_dict['sec_per_frame'] = yaml_dict['total_time']/yaml_dict['total_number_of_frames'] - with open(os.path.join(input_ns.save_path, 'config.yaml'), 'w') as yaml_file: - yaml.dump(yaml_dict, yaml_file) - - return os.path.join(input_ns.save_path, f'{save_name}.gif'), os.path.join(input_ns.save_path, f'control_{save_name}.gif') - - -def output_video_fn(video_path): - fold_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "example_videos") - video_path = os.path.join(fold_path, os.path.basename(video_path).replace('input', 'output')) - return video_path - -block = gr.Blocks().queue() -with block: - with gr.Row(): - gr.Markdown('## RAVE') - with gr.Row(): - with gr.Column(): - with gr.Row(): - input_path = gr.File(label='Upload Input Video', file_types=['.mp4'], scale=1) - - inputs = gr.Video(label='Input Video', - format='mp4', - visible=True, - interactive=False, - scale=5) - input_path.upload(lambda x:x, inputs=[input_path], outputs=[inputs]) - - with gr.Row(): - positive_prompts = gr.Textbox(label='Positive prompts') - negative_prompts = gr.Textbox(label='Negative prompts') - with gr.Row(): - preprocess_name = gr.Dropdown(const.PREPROCESSOR_DICT.keys(), - label='Control type', - value='depth_zoe') - guidance_scale = gr.Slider(label='Guidance scale', - minimum=0, - maximum=40, - step=0.1, - value=7.5) - - with gr.Row(): - inversion_prompt = gr.Textbox(label='Inversion prompt') - seed = gr.Slider(label='Seed', - minimum=0, - maximum=2147483647, - step=1, - value=0, - randomize=True) - - with gr.Row(): - model_id = gr.Dropdown(const.MODEL_IDS, - label='Model id', - value='SD 1.5') - save_folder = gr.Textbox(label='Save folder') - - run_button = gr.Button(value='Run All') - with gr.Accordion('Configuration', - open=False): - with gr.Row(): - batch_size = gr.Slider(label='Batch size', - minimum=1, - maximum=36, - value=4, - step=1) - batch_size_vae = gr.Slider(label='Batch size of VAE', - minimum=1, - maximum=36, - value=1, - step=1) - - with gr.Row(): - is_ddim_inversion = gr.Checkbox( - label='Use DDIM Inversion', - value=True) - is_shuffle = gr.Checkbox( - label='Shuffle', - value=True) - - with gr.Row(): - num_inference_steps = gr.Slider(label='Number of inference steps', - minimum=1, - maximum=100, - value=20, - step=1) - num_inversion_step = gr.Slider(label='Number of inversion steps', - minimum=1, - maximum=100, - value=20, - step=1) - cond_step_start = gr.Slider(label='Conditioning step start', - minimum=0, - maximum=1.0, - value=0.0, - step=0.1) - - with gr.Row(): - controlnet_conditioning_scale = gr.Slider(label='ControlNet conditioning scale', - minimum=0.0, - maximum=1.0, - value=1.0, - step=0.01) - controlnet_guidance_end = gr.Slider(label='ControlNet guidance end', - minimum=0.0, - maximum=1.0, - value=1.0, - step=0.01) - controlnet_guidance_start = gr.Slider(label='ControlNet guidance start', - minimum=0.0, - maximum=1.0, - value=0.0, - step=0.01) - give_control_inversion = gr.Checkbox( - label='Give control during inversion', - value=True) - - with gr.Row(): - grid_size = gr.Slider(label='Grid size', - minimum=1, - maximum=10, - value=3, - step=1) - sample_size = gr.Slider(label='Sample size', - minimum=-1, - maximum=100, - value=-1, - step=1) - pad = gr.Slider(label='Pad', - minimum=1, - maximum=10, - value=1, - step=1) - - - - with gr.Column(): - with gr.Row(): - result_video = gr.Image(label='Edited Video', - interactive=False) - control_video = gr.Image(label='Control Video', - interactive=False) - - with gr.Row(): - example_input = gr.Video(label='Input Example', - format='mp4', - visible=True, - interactive=False) - example_output = gr.Video(label='Output Example', - format='mp4', - visible=True, - interactive=False) - # input(os.path.join(os.path.dirname(os.path.abspath(__file__)), "example_videos", "exp_input_1.mp4")) - gr.Markdown("## Video Examples") - gr.Examples( - examples=[os.path.join(os.path.dirname(os.path.abspath(__file__)), "example_videos", "exp_input_1.mp4")], - inputs=example_input, - outputs=example_output, - fn=output_video_fn, - cache_examples=True,) - - inputs = [input_path, preprocess_name, batch_size, batch_size_vae, cond_step_start, controlnet_conditioning_scale, controlnet_guidance_end, controlnet_guidance_start, give_control_inversion, grid_size, sample_size, pad, guidance_scale, inversion_prompt, is_ddim_inversion, is_shuffle, negative_prompts, num_inference_steps, num_inversion_step, positive_prompts, save_folder, seed, model_id] - - run_button.click(fn=run, - inputs=inputs, - outputs=[result_video, control_video]) +gradio_app = gr.Interface( + predict, + inputs=gr.Image(label="Select hot dog candidate", sources=['upload', 'webcam'], type="pil"), + outputs=[gr.Image(label="Processed Image"), gr.Label(label="Result", num_top_classes=2)], + title="Hot Dog? Or Not?", +) if __name__ == "__main__": - - block.launch() \ No newline at end of file + gradio_app.launch() \ No newline at end of file diff --git a/app_.py b/app_.py new file mode 100644 index 0000000000000000000000000000000000000000..84aff79882f45fd916046fabcb6564ac7735998d --- /dev/null +++ b/app_.py @@ -0,0 +1,319 @@ +import gradio as gr +import cv2 +import os +import torch +import argparse +import os +import sys +import yaml +import datetime +sys.path.append(os.path.dirname(os.getcwd())) +from pipelines.sd_controlnet_rave import RAVE +from pipelines.sd_multicontrolnet_rave import RAVE_MultiControlNet +import shutil +import subprocess +import utils.constants as const +import utils.video_grid_utils as vgu +import warnings +warnings.filterwarnings("ignore") +import pprint +import glob + + +def init_device(): + device_name = 'cuda' if torch.cuda.is_available() else 'cpu' + device = torch.device(device_name) + return device + +def init_paths(input_ns): + if input_ns.save_folder == None or input_ns.save_folder == '': + input_ns.save_folder = input_ns.video_name + else: + input_ns.save_folder = os.path.join(input_ns.save_folder, input_ns.video_name) + save_dir = os.path.join(const.OUTPUT_PATH, input_ns.save_folder) + os.makedirs(save_dir, exist_ok=True) + save_idx = max([int(x[-5:]) for x in os.listdir(save_dir)])+1 if os.listdir(save_dir) != [] else 0 + input_ns.save_path = os.path.join(save_dir, f'{input_ns.positive_prompts}-{str(save_idx).zfill(5)}') + + + if '-' in input_ns.preprocess_name: + input_ns.hf_cn_path = [const.PREPROCESSOR_DICT[i] for i in input_ns.preprocess_name.split('-')] + else: + input_ns.hf_cn_path = const.PREPROCESSOR_DICT[input_ns.preprocess_name] + input_ns.hf_path = "runwayml/stable-diffusion-v1-5" + + input_ns.inverse_path = os.path.join(const.GENERATED_DATA_PATH, 'inverses', input_ns.video_name, f'{input_ns.preprocess_name}_{input_ns.model_id}_{input_ns.grid_size}x{input_ns.grid_size}_{input_ns.pad}') + input_ns.control_path = os.path.join(const.GENERATED_DATA_PATH, 'controls', input_ns.video_name, f'{input_ns.preprocess_name}_{input_ns.grid_size}x{input_ns.grid_size}_{input_ns.pad}') + os.makedirs(input_ns.control_path, exist_ok=True) + os.makedirs(input_ns.inverse_path, exist_ok=True) + os.makedirs(input_ns.save_path, exist_ok=True) + return input_ns + +def install_civitai_model(model_id): + full_path = os.path.join(const.CWD, 'CIVIT_AI', 'diffusers_models', model_id, '*') + if len(glob.glob(full_path)) > 0: + full_path = glob.glob(full_path)[0] + return full_path + install_path = os.path.join(const.CWD, 'CIVIT_AI', 'safetensors') + install_path_model = os.path.join(const.CWD, 'CIVIT_AI', 'safetensors', model_id) + diffusers_path = os.path.join(const.CWD, 'CIVIT_AI', 'diffusers_models', model_id) + convert_py_path = os.path.join(const.CWD, 'CIVIT_AI', 'convert.py') + os.makedirs(install_path, exist_ok=True) + os.makedirs(diffusers_path, exist_ok=True) + subprocess.run(f'wget https://civitai.com/api/download/models/{model_id} --content-disposition --directory {install_path_model}'.split()) + model_name = glob.glob(os.path.join(install_path, model_id, '*'))[0] + model_name2 = os.path.basename(glob.glob(os.path.join(install_path, model_id, '*'))[0]).replace('.safetensors', '') + diffusers_path_model_name = os.path.join(const.CWD, 'CIVIT_AI', 'diffusers_models', model_id, model_name2) + print(model_name) + subprocess.run(f'python {convert_py_path} --checkpoint_path {model_name} --dump_path {diffusers_path_model_name} --from_safetensors'.split()) + subprocess.run(f'rm -rf {install_path}'.split()) + return diffusers_path_model_name + +def run(*args): + list_of_inputs = [x for x in args] + input_ns = argparse.Namespace(**{}) + input_ns.video_path = list_of_inputs[0] # video_path + input_ns.video_name = os.path.basename(input_ns.video_path).replace('.mp4', '').replace('.gif', '') + input_ns.preprocess_name = list_of_inputs[1] + + input_ns.batch_size = list_of_inputs[2] + input_ns.batch_size_vae = list_of_inputs[3] + + input_ns.cond_step_start = list_of_inputs[4] + input_ns.controlnet_conditioning_scale = list_of_inputs[5] + input_ns.controlnet_guidance_end = list_of_inputs[6] + input_ns.controlnet_guidance_start = list_of_inputs[7] + + input_ns.give_control_inversion = list_of_inputs[8] + + input_ns.grid_size = list_of_inputs[9] + input_ns.sample_size = list_of_inputs[10] + input_ns.pad = list_of_inputs[11] + input_ns.guidance_scale = list_of_inputs[12] + input_ns.inversion_prompt = list_of_inputs[13] + + input_ns.is_ddim_inversion = list_of_inputs[14] + input_ns.is_shuffle = list_of_inputs[15] + + input_ns.negative_prompts = list_of_inputs[16] + input_ns.num_inference_steps = list_of_inputs[17] + input_ns.num_inversion_step = list_of_inputs[18] + input_ns.positive_prompts = list_of_inputs[19] + input_ns.save_folder = list_of_inputs[20] + + input_ns.seed = list_of_inputs[21] + input_ns.model_id = const.MODEL_IDS[list_of_inputs[22]] + # input_ns.width = list_of_inputs[23] + # input_ns.height = list_of_inputs[24] + # input_ns.original_size = list_of_inputs[25] + diffusers_model_path = os.path.join(const.CWD, 'CIVIT_AI', 'diffusers_models') + os.makedirs(diffusers_model_path, exist_ok=True) + if 'model_id' not in list(input_ns.__dict__.keys()): + input_ns.model_id = "None" + + if str(input_ns.model_id) != 'None': + input_ns.model_id = install_civitai_model(input_ns.model_id) + + + device = init_device() + input_ns = init_paths(input_ns) + + input_ns.image_pil_list = vgu.prepare_video_to_grid(input_ns.video_path, input_ns.sample_size, input_ns.grid_size, input_ns.pad) + + print(input_ns.video_path) + input_ns.sample_size = len(input_ns.image_pil_list) + print(f'Frame count: {len(input_ns.image_pil_list)}') + + controlnet_class = RAVE_MultiControlNet if '-' in str(input_ns.controlnet_conditioning_scale) else RAVE + + + CN = controlnet_class(device) + + CN.init_models(input_ns.hf_cn_path, input_ns.hf_path, input_ns.preprocess_name, input_ns.model_id) + + input_dict = vars(input_ns) + pp = pprint.PrettyPrinter(indent=4) + pp.pprint(input_dict) + yaml_dict = {k:v for k,v in input_dict.items() if k != 'image_pil_list'} + + start_time = datetime.datetime.now() + if '-' in str(input_ns.controlnet_conditioning_scale): + res_vid, control_vid_1, control_vid_2 = CN(input_dict) + else: + res_vid, control_vid = CN(input_dict) + end_time = datetime.datetime.now() + save_name = f"{'-'.join(input_ns.positive_prompts.split())}_cstart-{input_ns.controlnet_guidance_start}_gs-{input_ns.guidance_scale}_pre-{'-'.join((input_ns.preprocess_name.replace('-','+').split('_')))}_cscale-{input_ns.controlnet_conditioning_scale}_grid-{input_ns.grid_size}_pad-{input_ns.pad}_model-{os.path.basename(input_ns.model_id)}" + res_vid[0].save(os.path.join(input_ns.save_path, f'{save_name}.gif'), save_all=True, append_images=res_vid[1:], loop=10000) + control_vid[0].save(os.path.join(input_ns.save_path, f'control_{save_name}.gif'), save_all=True, append_images=control_vid[1:], optimize=False, loop=10000) + + yaml_dict['total_time'] = (end_time - start_time).total_seconds() + yaml_dict['total_number_of_frames'] = len(res_vid) + yaml_dict['sec_per_frame'] = yaml_dict['total_time']/yaml_dict['total_number_of_frames'] + with open(os.path.join(input_ns.save_path, 'config.yaml'), 'w') as yaml_file: + yaml.dump(yaml_dict, yaml_file) + + return os.path.join(input_ns.save_path, f'{save_name}.gif'), os.path.join(input_ns.save_path, f'control_{save_name}.gif') + + +def output_video_fn(video_path): + fold_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "example_videos") + video_path = os.path.join(fold_path, os.path.basename(video_path).replace('input', 'output')) + return video_path + +block = gr.Blocks().queue() +with block: + with gr.Row(): + gr.Markdown('## RAVE') + with gr.Row(): + with gr.Column(): + with gr.Row(): + input_path = gr.File(label='Upload Input Video', file_types=['.mp4'], scale=1) + + inputs = gr.Video(label='Input Video', + format='mp4', + visible=True, + interactive=False, + scale=5) + input_path.upload(lambda x:x, inputs=[input_path], outputs=[inputs]) + + with gr.Row(): + positive_prompts = gr.Textbox(label='Positive prompts') + negative_prompts = gr.Textbox(label='Negative prompts') + with gr.Row(): + preprocess_name = gr.Dropdown(const.PREPROCESSOR_DICT.keys(), + label='Control type', + value='depth_zoe') + guidance_scale = gr.Slider(label='Guidance scale', + minimum=0, + maximum=40, + step=0.1, + value=7.5) + + with gr.Row(): + inversion_prompt = gr.Textbox(label='Inversion prompt') + seed = gr.Slider(label='Seed', + minimum=0, + maximum=2147483647, + step=1, + value=0, + randomize=True) + + with gr.Row(): + model_id = gr.Dropdown(const.MODEL_IDS, + label='Model id', + value='SD 1.5') + save_folder = gr.Textbox(label='Save folder') + + run_button = gr.Button(value='Run All') + with gr.Accordion('Configuration', + open=False): + with gr.Row(): + batch_size = gr.Slider(label='Batch size', + minimum=1, + maximum=36, + value=4, + step=1) + batch_size_vae = gr.Slider(label='Batch size of VAE', + minimum=1, + maximum=36, + value=1, + step=1) + + with gr.Row(): + is_ddim_inversion = gr.Checkbox( + label='Use DDIM Inversion', + value=True) + is_shuffle = gr.Checkbox( + label='Shuffle', + value=True) + + with gr.Row(): + num_inference_steps = gr.Slider(label='Number of inference steps', + minimum=1, + maximum=100, + value=20, + step=1) + num_inversion_step = gr.Slider(label='Number of inversion steps', + minimum=1, + maximum=100, + value=20, + step=1) + cond_step_start = gr.Slider(label='Conditioning step start', + minimum=0, + maximum=1.0, + value=0.0, + step=0.1) + + with gr.Row(): + controlnet_conditioning_scale = gr.Slider(label='ControlNet conditioning scale', + minimum=0.0, + maximum=1.0, + value=1.0, + step=0.01) + controlnet_guidance_end = gr.Slider(label='ControlNet guidance end', + minimum=0.0, + maximum=1.0, + value=1.0, + step=0.01) + controlnet_guidance_start = gr.Slider(label='ControlNet guidance start', + minimum=0.0, + maximum=1.0, + value=0.0, + step=0.01) + give_control_inversion = gr.Checkbox( + label='Give control during inversion', + value=True) + + with gr.Row(): + grid_size = gr.Slider(label='Grid size', + minimum=1, + maximum=10, + value=3, + step=1) + sample_size = gr.Slider(label='Sample size', + minimum=-1, + maximum=100, + value=-1, + step=1) + pad = gr.Slider(label='Pad', + minimum=1, + maximum=10, + value=1, + step=1) + + + + with gr.Column(): + with gr.Row(): + result_video = gr.Image(label='Edited Video', + interactive=False) + control_video = gr.Image(label='Control Video', + interactive=False) + + with gr.Row(): + example_input = gr.Video(label='Input Example', + format='mp4', + visible=True, + interactive=False) + example_output = gr.Video(label='Output Example', + format='mp4', + visible=True, + interactive=False) + # input(os.path.join(os.path.dirname(os.path.abspath(__file__)), "example_videos", "exp_input_1.mp4")) + gr.Markdown("## Video Examples") + gr.Examples( + examples=[os.path.join(os.path.dirname(os.path.abspath(__file__)), "example_videos", "exp_input_1.mp4")], + inputs=example_input, + outputs=example_output, + fn=output_video_fn, + cache_examples=True,) + + inputs = [input_path, preprocess_name, batch_size, batch_size_vae, cond_step_start, controlnet_conditioning_scale, controlnet_guidance_end, controlnet_guidance_start, give_control_inversion, grid_size, sample_size, pad, guidance_scale, inversion_prompt, is_ddim_inversion, is_shuffle, negative_prompts, num_inference_steps, num_inversion_step, positive_prompts, save_folder, seed, model_id] + + run_button.click(fn=run, + inputs=inputs, + outputs=[result_video, control_video]) + +if __name__ == "__main__": + + block.launch(share=True) \ No newline at end of file diff --git a/gradio_cached_examples/59/Output Example/994f36ecf77e57c9b298/exp_output_1.mp4 b/gradio_cached_examples/59/Output Example/994f36ecf77e57c9b298/exp_output_1.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..c54ca9104699be6d7ac1ccd29a3ee5b1e4813376 Binary files /dev/null and b/gradio_cached_examples/59/Output Example/994f36ecf77e57c9b298/exp_output_1.mp4 differ diff --git a/gradio_cached_examples/59/log.csv b/gradio_cached_examples/59/log.csv new file mode 100644 index 0000000000000000000000000000000000000000..ba13f3bfa531dc5d368ab0d34e8728c5a4d7a50a --- /dev/null +++ b/gradio_cached_examples/59/log.csv @@ -0,0 +1,2 @@ +Output Example,flag,username,timestamp +"{""video"":{""path"":""gradio_cached_examples/59/Output Example/994f36ecf77e57c9b298/exp_output_1.mp4"",""url"":null,""size"":null,""orig_name"":""exp_output_1.mp4"",""mime_type"":null},""subtitles"":null}",,,2023-12-17 12:08:44.852661 diff --git a/pipelines/__pycache__/sd_controlnet_rave.cpython-38.pyc b/pipelines/__pycache__/sd_controlnet_rave.cpython-38.pyc index beeb9a82bdcfc55e50a85d45371ca34b0bddac11..f69a67754e39098e98f97d84df5d7bc3e7c2ceb1 100644 Binary files a/pipelines/__pycache__/sd_controlnet_rave.cpython-38.pyc and b/pipelines/__pycache__/sd_controlnet_rave.cpython-38.pyc differ diff --git a/pipelines/__pycache__/sd_multicontrolnet_rave.cpython-38.pyc b/pipelines/__pycache__/sd_multicontrolnet_rave.cpython-38.pyc index 4d4ff993bac21aafeb5b10c5403eae094b66133b..e2a18af0f8f0dcc03ad7fcdf6a18e71f7a77b0fb 100644 Binary files a/pipelines/__pycache__/sd_multicontrolnet_rave.cpython-38.pyc and b/pipelines/__pycache__/sd_multicontrolnet_rave.cpython-38.pyc differ diff --git a/utils/__pycache__/constants.cpython-38.pyc b/utils/__pycache__/constants.cpython-38.pyc index 648a729788771e687ff7bd7d5c607674c7afa3f0..1227ec94c82419025d41dba5dd58a427d2b702e1 100644 Binary files a/utils/__pycache__/constants.cpython-38.pyc and b/utils/__pycache__/constants.cpython-38.pyc differ diff --git a/utils/__pycache__/feature_utils.cpython-38.pyc b/utils/__pycache__/feature_utils.cpython-38.pyc index dd06d8e54bc79b60ae91f8adf6d7d4ddbef7f975..883b8cc3f33f49cf044a41ea1247ff45fe427ed5 100644 Binary files a/utils/__pycache__/feature_utils.cpython-38.pyc and b/utils/__pycache__/feature_utils.cpython-38.pyc differ diff --git a/utils/__pycache__/image_process_utils.cpython-38.pyc b/utils/__pycache__/image_process_utils.cpython-38.pyc index 13f0009166b577750d837e3c874a2880a25ce0cb..db31cd03d77279478dfd054939d4dfb7e5c2b2c0 100644 Binary files a/utils/__pycache__/image_process_utils.cpython-38.pyc and b/utils/__pycache__/image_process_utils.cpython-38.pyc differ diff --git a/utils/__pycache__/preprocesser_utils.cpython-38.pyc b/utils/__pycache__/preprocesser_utils.cpython-38.pyc index 266369d5ca9c4a0de1ef56738547aa46ca9fcfd6..906d9e8b41aa182b049da1876638c7699c93a347 100644 Binary files a/utils/__pycache__/preprocesser_utils.cpython-38.pyc and b/utils/__pycache__/preprocesser_utils.cpython-38.pyc differ diff --git a/utils/__pycache__/video_grid_utils.cpython-38.pyc b/utils/__pycache__/video_grid_utils.cpython-38.pyc index 8f928127272541396147fe9ba3af965186f1aa66..2784e63ed9b7c509b0ef0cddd247ef57c831c77e 100644 Binary files a/utils/__pycache__/video_grid_utils.cpython-38.pyc and b/utils/__pycache__/video_grid_utils.cpython-38.pyc differ