End of training
Browse files- README.md +6 -6
- checkpoint-800000/optimizer.bin +3 -0
- checkpoint-800000/random_states_0.pkl +3 -0
- checkpoint-800000/scaler.pt +3 -0
- checkpoint-800000/scheduler.bin +3 -0
- checkpoint-800000/unet/config.json +67 -0
- checkpoint-800000/unet/diffusion_pytorch_model.safetensors +3 -0
- model_index.json +1 -1
- safety_checker/config.json +2 -4
- text_encoder/config.json +2 -2
- unet/config.json +2 -1
- unet/diffusion_pytorch_model.safetensors +1 -1
- vae/config.json +1 -1
- val_imgs_grid.png +2 -2
README.md
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
|
2 |
---
|
3 |
license: creativeml-openrail-m
|
4 |
-
base_model:
|
5 |
datasets:
|
6 |
- iamkaikai/amazing_logos_v4
|
7 |
tags:
|
@@ -14,7 +14,7 @@ inference: true
|
|
14 |
|
15 |
# Text-to-image finetuning - iamkaikai/amazing-logos-v4
|
16 |
|
17 |
-
This pipeline was finetuned from **
|
18 |
|
19 |
![val_imgs_grid](./val_imgs_grid.png)
|
20 |
|
@@ -28,7 +28,7 @@ from diffusers import DiffusionPipeline
|
|
28 |
import torch
|
29 |
|
30 |
pipeline = DiffusionPipeline.from_pretrained("iamkaikai/amazing-logos-v4", torch_dtype=torch.float16)
|
31 |
-
prompt = "Simple elegant logo for
|
32 |
image = pipeline(prompt).images[0]
|
33 |
image.save("my_image.png")
|
34 |
```
|
@@ -37,12 +37,12 @@ image.save("my_image.png")
|
|
37 |
|
38 |
These are the key hyperparameters used during training:
|
39 |
|
40 |
-
* Epochs:
|
41 |
-
* Learning rate:
|
42 |
* Batch size: 1
|
43 |
* Gradient accumulation steps: 1
|
44 |
* Image resolution: 512
|
45 |
* Mixed-precision: fp16
|
46 |
|
47 |
|
48 |
-
More information on all the CLI arguments and the environment are available on your [`wandb` run page](https://wandb.ai/iam-kai-kai/text2image-fine-tune/runs/
|
|
|
1 |
|
2 |
---
|
3 |
license: creativeml-openrail-m
|
4 |
+
base_model: runwayml/stable-diffusion-v1-5
|
5 |
datasets:
|
6 |
- iamkaikai/amazing_logos_v4
|
7 |
tags:
|
|
|
14 |
|
15 |
# Text-to-image finetuning - iamkaikai/amazing-logos-v4
|
16 |
|
17 |
+
This pipeline was finetuned from **runwayml/stable-diffusion-v1-5** on the **iamkaikai/amazing_logos_v4** dataset. Below are some example images generated with the finetuned pipeline using the following prompts: ['Simple elegant logo for Grupo Altair Publicidad, Circle Lines Venezuela, Publishing, successful vibe, minimalist, thought-provoking, abstract, recognizable, relatable, sharp, vector art, even edges']:
|
18 |
|
19 |
![val_imgs_grid](./val_imgs_grid.png)
|
20 |
|
|
|
28 |
import torch
|
29 |
|
30 |
pipeline = DiffusionPipeline.from_pretrained("iamkaikai/amazing-logos-v4", torch_dtype=torch.float16)
|
31 |
+
prompt = "Simple elegant logo for Grupo Altair Publicidad, Circle Lines Venezuela, Publishing, successful vibe, minimalist, thought-provoking, abstract, recognizable, relatable, sharp, vector art, even edges"
|
32 |
image = pipeline(prompt).images[0]
|
33 |
image.save("my_image.png")
|
34 |
```
|
|
|
37 |
|
38 |
These are the key hyperparameters used during training:
|
39 |
|
40 |
+
* Epochs: 3
|
41 |
+
* Learning rate: 5e-07
|
42 |
* Batch size: 1
|
43 |
* Gradient accumulation steps: 1
|
44 |
* Image resolution: 512
|
45 |
* Mixed-precision: fp16
|
46 |
|
47 |
|
48 |
+
More information on all the CLI arguments and the environment are available on your [`wandb` run page](https://wandb.ai/iam-kai-kai/text2image-fine-tune/runs/838t61tw).
|
checkpoint-800000/optimizer.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3e588735149719cdcb256d6c001af304a09f0c582eb164d3d62e768d1b149efb
|
3 |
+
size 6876749715
|
checkpoint-800000/random_states_0.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:fae66cc33e0f48ca8c3439a2fa9bb969c3aedfe78f755b7b5ac7a3d0fa397bde
|
3 |
+
size 14727
|
checkpoint-800000/scaler.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:53a0c13267e785e7fd862fd97ecfab782ec3081e706dfae7328f38f21c40e120
|
3 |
+
size 557
|
checkpoint-800000/scheduler.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:afce1e9791af1576ac13ac3f488490ea8dc42400935515f7f69e69883a742a8f
|
3 |
+
size 563
|
checkpoint-800000/unet/config.json
ADDED
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_class_name": "UNet2DConditionModel",
|
3 |
+
"_diffusers_version": "0.21.0.dev0",
|
4 |
+
"_name_or_path": "/amazing-logos-v4/checkpoint-400000",
|
5 |
+
"act_fn": "silu",
|
6 |
+
"addition_embed_type": null,
|
7 |
+
"addition_embed_type_num_heads": 64,
|
8 |
+
"addition_time_embed_dim": null,
|
9 |
+
"attention_head_dim": 8,
|
10 |
+
"attention_type": "default",
|
11 |
+
"block_out_channels": [
|
12 |
+
320,
|
13 |
+
640,
|
14 |
+
1280,
|
15 |
+
1280
|
16 |
+
],
|
17 |
+
"center_input_sample": false,
|
18 |
+
"class_embed_type": null,
|
19 |
+
"class_embeddings_concat": false,
|
20 |
+
"conv_in_kernel": 3,
|
21 |
+
"conv_out_kernel": 3,
|
22 |
+
"cross_attention_dim": 768,
|
23 |
+
"cross_attention_norm": null,
|
24 |
+
"down_block_types": [
|
25 |
+
"CrossAttnDownBlock2D",
|
26 |
+
"CrossAttnDownBlock2D",
|
27 |
+
"CrossAttnDownBlock2D",
|
28 |
+
"DownBlock2D"
|
29 |
+
],
|
30 |
+
"downsample_padding": 1,
|
31 |
+
"dropout": 0.0,
|
32 |
+
"dual_cross_attention": false,
|
33 |
+
"encoder_hid_dim": null,
|
34 |
+
"encoder_hid_dim_type": null,
|
35 |
+
"flip_sin_to_cos": true,
|
36 |
+
"freq_shift": 0,
|
37 |
+
"in_channels": 4,
|
38 |
+
"layers_per_block": 2,
|
39 |
+
"mid_block_only_cross_attention": null,
|
40 |
+
"mid_block_scale_factor": 1,
|
41 |
+
"mid_block_type": "UNetMidBlock2DCrossAttn",
|
42 |
+
"norm_eps": 1e-05,
|
43 |
+
"norm_num_groups": 32,
|
44 |
+
"num_attention_heads": null,
|
45 |
+
"num_class_embeds": null,
|
46 |
+
"only_cross_attention": false,
|
47 |
+
"out_channels": 4,
|
48 |
+
"projection_class_embeddings_input_dim": null,
|
49 |
+
"resnet_out_scale_factor": 1.0,
|
50 |
+
"resnet_skip_time_act": false,
|
51 |
+
"resnet_time_scale_shift": "default",
|
52 |
+
"sample_size": 64,
|
53 |
+
"time_cond_proj_dim": null,
|
54 |
+
"time_embedding_act_fn": null,
|
55 |
+
"time_embedding_dim": null,
|
56 |
+
"time_embedding_type": "positional",
|
57 |
+
"timestep_post_act": null,
|
58 |
+
"transformer_layers_per_block": 1,
|
59 |
+
"up_block_types": [
|
60 |
+
"UpBlock2D",
|
61 |
+
"CrossAttnUpBlock2D",
|
62 |
+
"CrossAttnUpBlock2D",
|
63 |
+
"CrossAttnUpBlock2D"
|
64 |
+
],
|
65 |
+
"upcast_attention": false,
|
66 |
+
"use_linear_projection": false
|
67 |
+
}
|
checkpoint-800000/unet/diffusion_pytorch_model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:474276c5d9c9bff575b0326ed1343f35a4b4d0f4c17be7f409f9875405a48252
|
3 |
+
size 3438167536
|
model_index.json
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
{
|
2 |
"_class_name": "StableDiffusionPipeline",
|
3 |
"_diffusers_version": "0.21.0.dev0",
|
4 |
-
"_name_or_path": "
|
5 |
"feature_extractor": [
|
6 |
"transformers",
|
7 |
"CLIPImageProcessor"
|
|
|
1 |
{
|
2 |
"_class_name": "StableDiffusionPipeline",
|
3 |
"_diffusers_version": "0.21.0.dev0",
|
4 |
+
"_name_or_path": "runwayml/stable-diffusion-v1-5",
|
5 |
"feature_extractor": [
|
6 |
"transformers",
|
7 |
"CLIPImageProcessor"
|
safety_checker/config.json
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
{
|
2 |
-
"_name_or_path": "/root/.cache/huggingface/hub/models--
|
3 |
"architectures": [
|
4 |
"StableDiffusionSafetyChecker"
|
5 |
],
|
@@ -8,16 +8,14 @@
|
|
8 |
"model_type": "clip",
|
9 |
"projection_dim": 768,
|
10 |
"text_config": {
|
11 |
-
"bos_token_id": 0,
|
12 |
"dropout": 0.0,
|
13 |
-
"eos_token_id": 2,
|
14 |
"hidden_size": 768,
|
15 |
"intermediate_size": 3072,
|
16 |
"model_type": "clip_text_model",
|
17 |
"num_attention_heads": 12
|
18 |
},
|
19 |
"torch_dtype": "float32",
|
20 |
-
"transformers_version": "4.
|
21 |
"vision_config": {
|
22 |
"dropout": 0.0,
|
23 |
"hidden_size": 1024,
|
|
|
1 |
{
|
2 |
+
"_name_or_path": "/root/.cache/huggingface/hub/models--runwayml--stable-diffusion-v1-5/snapshots/1d0c4ebf6ff58a5caecab40fa1406526bca4b5b9/safety_checker",
|
3 |
"architectures": [
|
4 |
"StableDiffusionSafetyChecker"
|
5 |
],
|
|
|
8 |
"model_type": "clip",
|
9 |
"projection_dim": 768,
|
10 |
"text_config": {
|
|
|
11 |
"dropout": 0.0,
|
|
|
12 |
"hidden_size": 768,
|
13 |
"intermediate_size": 3072,
|
14 |
"model_type": "clip_text_model",
|
15 |
"num_attention_heads": 12
|
16 |
},
|
17 |
"torch_dtype": "float32",
|
18 |
+
"transformers_version": "4.34.0.dev0",
|
19 |
"vision_config": {
|
20 |
"dropout": 0.0,
|
21 |
"hidden_size": 1024,
|
text_encoder/config.json
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
{
|
2 |
-
"_name_or_path": "
|
3 |
"architectures": [
|
4 |
"CLIPTextModel"
|
5 |
],
|
@@ -20,6 +20,6 @@
|
|
20 |
"pad_token_id": 1,
|
21 |
"projection_dim": 768,
|
22 |
"torch_dtype": "float16",
|
23 |
-
"transformers_version": "4.
|
24 |
"vocab_size": 49408
|
25 |
}
|
|
|
1 |
{
|
2 |
+
"_name_or_path": "runwayml/stable-diffusion-v1-5",
|
3 |
"architectures": [
|
4 |
"CLIPTextModel"
|
5 |
],
|
|
|
20 |
"pad_token_id": 1,
|
21 |
"projection_dim": 768,
|
22 |
"torch_dtype": "float16",
|
23 |
+
"transformers_version": "4.34.0.dev0",
|
24 |
"vocab_size": 49408
|
25 |
}
|
unet/config.json
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
{
|
2 |
"_class_name": "UNet2DConditionModel",
|
3 |
"_diffusers_version": "0.21.0.dev0",
|
4 |
-
"_name_or_path": "
|
5 |
"act_fn": "silu",
|
6 |
"addition_embed_type": null,
|
7 |
"addition_embed_type_num_heads": 64,
|
@@ -28,6 +28,7 @@
|
|
28 |
"DownBlock2D"
|
29 |
],
|
30 |
"downsample_padding": 1,
|
|
|
31 |
"dual_cross_attention": false,
|
32 |
"encoder_hid_dim": null,
|
33 |
"encoder_hid_dim_type": null,
|
|
|
1 |
{
|
2 |
"_class_name": "UNet2DConditionModel",
|
3 |
"_diffusers_version": "0.21.0.dev0",
|
4 |
+
"_name_or_path": "/amazing-logos-v4/checkpoint-800000",
|
5 |
"act_fn": "silu",
|
6 |
"addition_embed_type": null,
|
7 |
"addition_embed_type_num_heads": 64,
|
|
|
28 |
"DownBlock2D"
|
29 |
],
|
30 |
"downsample_padding": 1,
|
31 |
+
"dropout": 0.0,
|
32 |
"dual_cross_attention": false,
|
33 |
"encoder_hid_dim": null,
|
34 |
"encoder_hid_dim_type": null,
|
unet/diffusion_pytorch_model.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 3438167536
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1422cbf1c22a4f50116d73dcec8e2f90096d50900c2693b50b862067716a311a
|
3 |
size 3438167536
|
vae/config.json
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
{
|
2 |
"_class_name": "AutoencoderKL",
|
3 |
"_diffusers_version": "0.21.0.dev0",
|
4 |
-
"_name_or_path": "
|
5 |
"act_fn": "silu",
|
6 |
"block_out_channels": [
|
7 |
128,
|
|
|
1 |
{
|
2 |
"_class_name": "AutoencoderKL",
|
3 |
"_diffusers_version": "0.21.0.dev0",
|
4 |
+
"_name_or_path": "runwayml/stable-diffusion-v1-5",
|
5 |
"act_fn": "silu",
|
6 |
"block_out_channels": [
|
7 |
128,
|
val_imgs_grid.png
CHANGED
Git LFS Details
|
Git LFS Details
|