text
stringlengths
7
328k
id
stringlengths
14
166
metadata
dict
__index_level_0__
int64
0
459
#![allow(unused)] use candle::{DType, IndexOp, Layout, Module, Result, Shape, Tensor, D}; use candle_nn::{conv1d, Conv1d, Conv1dConfig, ConvTranspose1d, VarBuilder}; // Encodec Model // https://github.com/huggingface/transformers/blob/main/src/transformers/models/encodec/modeling_encodec.py #[derive(Debug, Copy, Clone, PartialEq, Eq, serde::Deserialize)] pub enum NormType { WeightNorm, TimeGroupNorm, None, } #[derive(Debug, Copy, Clone, PartialEq, Eq, serde::Deserialize)] pub enum PadMode { Constant, Reflect, Replicate, } #[derive(Debug, Clone, PartialEq, serde::Deserialize)] pub struct Config { pub target_bandwidths: Vec<f64>, pub sampling_rate: usize, pub audio_channels: usize, pub normalize: bool, pub chunk_length_s: Option<usize>, pub overlap: Option<usize>, pub hidden_size: usize, pub num_filters: usize, pub num_residual_layers: usize, pub upsampling_ratios: Vec<usize>, pub norm_type: NormType, pub kernel_size: usize, pub last_kernel_size: usize, pub residual_kernel_size: usize, pub dilation_growth_rate: usize, pub use_causal_conv: bool, pub pad_mode: PadMode, pub compress: usize, pub num_lstm_layers: usize, pub trim_right_ratio: f64, pub codebook_size: usize, pub codebook_dim: Option<usize>, pub use_conv_shortcut: bool, } impl Default for Config { fn default() -> Self { Self { target_bandwidths: vec![1.5, 3.0, 6.0, 12.0, 24.0], sampling_rate: 24_000, audio_channels: 1, normalize: false, chunk_length_s: None, overlap: None, hidden_size: 128, num_filters: 32, num_residual_layers: 1, upsampling_ratios: vec![8, 5, 4, 2], norm_type: NormType::WeightNorm, kernel_size: 7, last_kernel_size: 7, residual_kernel_size: 3, dilation_growth_rate: 2, use_causal_conv: true, // This should be PadMode::Reflect which is currently unsupported in candle. pad_mode: PadMode::Replicate, compress: 2, num_lstm_layers: 2, trim_right_ratio: 1.0, codebook_size: 1024, codebook_dim: None, use_conv_shortcut: true, } } } impl Config { fn codebook_dim(&self) -> usize { self.codebook_dim.unwrap_or(self.hidden_size) } fn frame_rate(&self) -> usize { let hop_length: usize = self.upsampling_ratios.iter().product(); (self.sampling_rate + hop_length - 1) / hop_length } fn num_quantizers(&self) -> usize { let num = 1000f64 * self .target_bandwidths .last() .expect("empty target_bandwidths"); (num as usize) / (self.frame_rate() * 10) } } fn get_extra_padding_for_conv1d( xs: &Tensor, k_size: usize, stride: usize, padding_total: usize, ) -> Result<usize> { let len = xs.dim(D::Minus1)?; let n_frames = (len + padding_total).saturating_sub(k_size) as f64 / stride as f64 + 1.0; let ideal_len = ((n_frames.ceil() as usize - 1) * stride + k_size).saturating_sub(padding_total); Ok(ideal_len.saturating_sub(len)) } fn pad1d(xs: &Tensor, pad_l: usize, pad_r: usize, mode: PadMode) -> Result<Tensor> { match mode { PadMode::Constant => xs.pad_with_zeros(D::Minus1, pad_l, pad_r), PadMode::Reflect => candle::bail!("pad-mode 'reflect' is not supported"), PadMode::Replicate => xs.pad_with_same(D::Minus1, pad_l, pad_r), } } // Applies weight norm for inference by recomputing the weight tensor. This // does not apply to training. // https://pytorch.org/docs/stable/generated/torch.nn.utils.weight_norm.html pub fn conv1d_weight_norm( in_c: usize, out_c: usize, kernel_size: usize, config: candle_nn::Conv1dConfig, vb: VarBuilder, ) -> Result<Conv1d> { let weight_g = vb.get((out_c, 1, 1), "weight_g")?; let weight_v = vb.get((out_c, in_c, kernel_size), "weight_v")?; let norm_v = weight_v.sqr()?.sum_keepdim((1, 2))?.sqrt()?; let weight = weight_v.broadcast_mul(&weight_g)?.broadcast_div(&norm_v)?; let bias = vb.get(out_c, "bias")?; Ok(Conv1d::new(weight, Some(bias), config)) } fn conv_transpose1d_weight_norm( in_c: usize, out_c: usize, kernel_size: usize, bias: bool, config: candle_nn::ConvTranspose1dConfig, vb: VarBuilder, ) -> Result<ConvTranspose1d> { let weight_g = vb.get((in_c, 1, 1), "weight_g")?; let weight_v = vb.get((in_c, out_c, kernel_size), "weight_v")?; let norm_v = weight_v.sqr()?.sum_keepdim((1, 2))?.sqrt()?; let weight = weight_v.broadcast_mul(&weight_g)?.broadcast_div(&norm_v)?; let bias = if bias { Some(vb.get(out_c, "bias")?) } else { None }; Ok(ConvTranspose1d::new(weight, bias, config)) } struct CodebookEncode; impl candle::CustomOp2 for CodebookEncode { fn name(&self) -> &'static str { "cb" } fn cpu_fwd( &self, lhs_storage: &candle::CpuStorage, lhs_layout: &Layout, rhs_storage: &candle::CpuStorage, rhs_layout: &Layout, ) -> Result<(candle::CpuStorage, Shape)> { use rayon::prelude::*; let (lhs_dim1, lhs_dim2) = lhs_layout.shape().dims2()?; let (rhs_dim1, rhs_dim2) = rhs_layout.shape().dims2()?; if lhs_dim2 != rhs_dim2 { candle::bail!("CodebookEncode, mismatch on last dim, {lhs_layout:?} {rhs_layout:?}"); } if lhs_dim2 == 0 { candle::bail!("CodebookEncode, empty last dim {lhs_layout:?}") } let lhs = match lhs_layout.contiguous_offsets() { None => candle::bail!("CodebookEncode, lhs has to be contiguous, got {lhs_layout:?}"), Some((o1, o2)) => { let slice = lhs_storage.as_slice::<f32>()?; &slice[o1..o2] } }; let rhs = match rhs_layout.contiguous_offsets() { None => candle::bail!("CodebookEncode, rhs has to be contiguous, got {rhs_layout:?}"), Some((o1, o2)) => { let slice = rhs_storage.as_slice::<f32>()?; &slice[o1..o2] } }; let dst = (0..lhs_dim1) .into_par_iter() .map(|idx1| { let mut where_min = 0; let mut min_dist = f32::INFINITY; let lhs = &lhs[idx1 * lhs_dim2..(idx1 + 1) * lhs_dim2]; for idx2 in 0..rhs_dim1 { let rhs = &rhs[idx2 * rhs_dim2..(idx2 + 1) * rhs_dim2]; let mut dist = 0f32; for (a, b) in lhs.iter().zip(rhs.iter()) { dist += (a - b) * (a - b) } if dist < min_dist { min_dist = dist; where_min = idx2; } } where_min as u32 }) .collect(); let storage = candle::WithDType::to_cpu_storage_owned(dst); Ok((storage, (lhs_dim1,).into())) } } // https://github.com/huggingface/transformers/blob/abaca9f9432a84cfaa95531de4c72334f38a42f2/src/transformers/models/encodec/modeling_encodec.py#L340 #[derive(Clone, Debug)] pub struct EuclideanCodebook { inited: Tensor, cluster_size: Tensor, embed: candle_nn::Embedding, embed_avg: Tensor, c2: Tensor, } impl EuclideanCodebook { pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let inited = vb.get(1, "inited")?; let cluster_size = vb.get(cfg.codebook_size, "cluster_size")?; let e_shape = (cfg.codebook_size, cfg.codebook_dim()); let embed = vb.get(e_shape, "embed")?; let c2 = ((&embed * &embed)?.sum(D::Minus1)? / 2.0)?; let embed_avg = vb.get(e_shape, "embed_avg")?; Ok(Self { inited, cluster_size, embed: candle_nn::Embedding::new(embed, cfg.codebook_dim()), embed_avg, c2, }) } pub fn encode_slow(&self, xs: &Tensor) -> Result<Tensor> { let mut target_shape = xs.dims().to_vec(); target_shape.pop(); let xs = xs.flatten_to(D::Minus2)?; let _ = xs.dims2()?; let dot_prod = xs.matmul(&self.embed.embeddings().t()?)?; let codes = self.c2.broadcast_sub(&dot_prod)?.argmin(D::Minus1)?; codes.reshape(target_shape) } pub fn encode(&self, xs: &Tensor) -> Result<Tensor> { let mut target_shape = xs.dims().to_vec(); target_shape.pop(); let xs = xs.flatten_to(D::Minus2)?; let _ = xs.dims2()?; let codes = Tensor::apply_op2(&xs, self.embed.embeddings(), CodebookEncode)?; codes.reshape(target_shape) } pub fn decode(&self, embed_ind: &Tensor) -> Result<Tensor> { let quantize = self.embed.forward(embed_ind)?; Ok(quantize) } } #[derive(Clone, Debug)] pub struct VectorQuantization { codebook: EuclideanCodebook, } impl VectorQuantization { pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let codebook = EuclideanCodebook::new(cfg, vb.pp("codebook"))?; Ok(Self { codebook }) } pub fn encode(&self, xs: &Tensor) -> Result<Tensor> { let xs = xs.transpose(1, 2)?; self.codebook.encode_slow(&xs) } pub fn decode(&self, embed_ind: &Tensor) -> Result<Tensor> { let quantize = self.codebook.decode(embed_ind)?; let quantize = quantize.transpose(1, 2)?; Ok(quantize) } } #[derive(Clone, Debug)] pub struct ResidualVectorQuantizer { layers: Vec<VectorQuantization>, dtype: DType, } impl ResidualVectorQuantizer { pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let vb = &vb.pp("layers"); let layers = (0..cfg.num_quantizers()) .map(|i| VectorQuantization::new(cfg, vb.pp(i))) .collect::<Result<Vec<_>>>()?; Ok(Self { layers, dtype: vb.dtype(), }) } pub fn encode(&self, xs: &Tensor) -> Result<Tensor> { let mut codes = Vec::with_capacity(self.layers.len()); let mut residual = xs.clone(); for layer in self.layers.iter() { let indices = layer.encode(&residual)?; let quantized = layer.decode(&indices)?; residual = (residual - quantized)?; codes.push(indices) } Tensor::stack(&codes, 0) } pub fn decode(&self, codes: &Tensor) -> Result<Tensor> { let mut quantized_out = Tensor::zeros((), self.dtype, codes.device())?; let ncodes = codes.dim(0)?; if ncodes > self.layers.len() { candle::bail!( "codes shape {:?} does not match the number of quantization layers {}", codes.shape(), self.layers.len() ) } for (i, layer) in self.layers.iter().take(ncodes).enumerate() { let quantized = layer.decode(&codes.i(i)?)?; quantized_out = quantized.broadcast_add(&quantized_out)?; } Ok(quantized_out) } } // https://github.com/huggingface/transformers/blob/abaca9f9432a84cfaa95531de4c72334f38a42f2/src/transformers/models/encodec/modeling_encodec.py#L226 #[derive(Clone, Debug)] pub struct EncodecLSTM { layers: Vec<candle_nn::LSTM>, } impl EncodecLSTM { pub fn new(dim: usize, cfg: &Config, vb: VarBuilder) -> Result<Self> { let vb = &vb.pp("lstm"); let mut layers = vec![]; for layer_idx in 0..cfg.num_lstm_layers { let config = candle_nn::LSTMConfig { layer_idx, ..Default::default() }; let lstm = candle_nn::lstm(dim, dim, config, vb.clone())?; layers.push(lstm) } Ok(Self { layers }) } } impl Module for EncodecLSTM { fn forward(&self, xs: &Tensor) -> Result<Tensor> { use candle_nn::RNN; // This is different from the Python transformers version as candle LSTM is batch first. let xs = xs.t()?; let residual = &xs; let mut xs = xs.clone(); for layer in self.layers.iter() { let states = layer.seq(&xs)?; xs = layer.states_to_tensor(&states)?; } let xs = (xs + residual)?.t()?; Ok(xs) } } #[derive(Clone, Debug)] pub struct EncodecConvTranspose1d { conv: ConvTranspose1d, } impl EncodecConvTranspose1d { fn new( in_c: usize, out_c: usize, k: usize, stride: usize, _cfg: &Config, vb: VarBuilder, ) -> Result<Self> { let cfg = candle_nn::ConvTranspose1dConfig { stride, ..Default::default() }; let conv = conv_transpose1d_weight_norm(in_c, out_c, k, true, cfg, vb.pp("conv"))?; Ok(Self { conv }) } } impl Module for EncodecConvTranspose1d { fn forward(&self, xs: &Tensor) -> Result<Tensor> { xs.apply(&self.conv) } } #[derive(Clone, Debug)] pub struct EncodecConv1d { causal: bool, conv: Conv1d, norm: Option<candle_nn::GroupNorm>, pad_mode: PadMode, } impl EncodecConv1d { pub fn new( in_c: usize, out_c: usize, kernel_size: usize, stride: usize, dilation: usize, cfg: &Config, vb: VarBuilder, ) -> Result<Self> { let conv = match cfg.norm_type { NormType::WeightNorm => conv1d_weight_norm( in_c, out_c, kernel_size, candle_nn::Conv1dConfig { stride, dilation, ..Default::default() }, vb.pp("conv"), )?, NormType::None | NormType::TimeGroupNorm => conv1d( in_c, out_c, kernel_size, candle_nn::Conv1dConfig { padding: 0, stride, groups: 1, dilation: 1, }, vb.pp("conv"), )?, }; let norm = match cfg.norm_type { NormType::None | NormType::WeightNorm => None, NormType::TimeGroupNorm => { let gn = candle_nn::group_norm(1, out_c, 1e-5, vb.pp("norm"))?; Some(gn) } }; Ok(Self { causal: cfg.use_causal_conv, conv, norm, pad_mode: cfg.pad_mode, }) } } impl Module for EncodecConv1d { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let (_b, _t, _c) = xs.dims3()?; let k_size = self.conv.weight().dim(D::Minus1)?; let conv_cfg = self.conv.config(); // Effective kernel size with dilations. let k_size = (k_size - 1) * conv_cfg.dilation + 1; let padding_total = k_size - conv_cfg.stride; let extra_padding = get_extra_padding_for_conv1d(xs, k_size, conv_cfg.stride, padding_total)?; let xs = if self.causal { pad1d(xs, padding_total, extra_padding, self.pad_mode)? } else { let padding_right = padding_total / 2; let padding_left = padding_total - padding_right; pad1d( xs, padding_left, padding_right + extra_padding, self.pad_mode, )? }; let xs = self.conv.forward(&xs)?; match &self.norm { None => Ok(xs), Some(norm) => xs.apply(norm), } } } #[derive(Clone, Debug)] pub struct EncodecResnetBlock { block_conv1: EncodecConv1d, block_conv2: EncodecConv1d, shortcut: Option<EncodecConv1d>, } impl EncodecResnetBlock { pub fn new( dim: usize, (dilation1, dilation2): (usize, usize), cfg: &Config, vb: VarBuilder, ) -> Result<Self> { let h = dim / cfg.compress; let mut layer = Layer::new(vb.pp("block")); // TODO: Apply dilations! layer.inc(); let block_conv1 = EncodecConv1d::new( dim, h, cfg.residual_kernel_size, 1, dilation1, cfg, layer.next(), )?; layer.inc(); let block_conv2 = EncodecConv1d::new(h, dim, 1, 1, dilation2, cfg, layer.next())?; let shortcut = if cfg.use_conv_shortcut { let conv = EncodecConv1d::new(dim, dim, 1, 1, 1, cfg, vb.pp("shortcut"))?; Some(conv) } else { None }; Ok(Self { block_conv1, block_conv2, shortcut, }) } } impl Module for EncodecResnetBlock { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let residual = xs.clone(); let xs = xs.elu(1.)?; let xs = self.block_conv1.forward(&xs)?; let xs = xs.elu(1.)?; let xs = self.block_conv2.forward(&xs)?; let xs = match &self.shortcut { None => (xs + residual)?, Some(shortcut) => xs.add(&shortcut.forward(&residual)?)?, }; Ok(xs) } } struct Layer<'a> { vb: VarBuilder<'a>, cnt: usize, } impl<'a> Layer<'a> { fn new(vb: VarBuilder<'a>) -> Self { Self { vb, cnt: 0 } } fn inc(&mut self) { self.cnt += 1; } fn next(&mut self) -> VarBuilder { let vb = self.vb.pp(&self.cnt.to_string()); self.cnt += 1; vb } } #[derive(Clone, Debug)] pub struct Encoder { init_conv: EncodecConv1d, sampling_layers: Vec<(Vec<EncodecResnetBlock>, EncodecConv1d)>, final_lstm: EncodecLSTM, final_conv: EncodecConv1d, } impl Encoder { pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let mut layer = Layer::new(vb.pp("layers")); let init_conv = EncodecConv1d::new( cfg.audio_channels, cfg.num_filters, cfg.kernel_size, 1, 1, cfg, layer.next(), )?; let mut sampling_layers = vec![]; let mut scaling = 1; for &ratio in cfg.upsampling_ratios.iter().rev() { let current_scale = scaling * cfg.num_filters; let mut resnets = vec![]; for j in 0..(cfg.num_residual_layers as u32) { let resnet = EncodecResnetBlock::new( current_scale, (cfg.dilation_growth_rate.pow(j), 1), cfg, layer.next(), )?; resnets.push(resnet) } layer.inc(); // ELU let conv1d = EncodecConv1d::new( current_scale, current_scale * 2, ratio * 2, ratio, 1, cfg, layer.next(), )?; sampling_layers.push((resnets, conv1d)); scaling *= 2; } let final_lstm = EncodecLSTM::new(cfg.num_filters * scaling, cfg, layer.next())?; layer.inc(); // ELU let final_conv = EncodecConv1d::new( cfg.num_filters * scaling, cfg.hidden_size, cfg.last_kernel_size, 1, 1, cfg, layer.next(), )?; Ok(Self { init_conv, sampling_layers, final_conv, final_lstm, }) } } impl Module for Encoder { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let mut xs = xs.apply(&self.init_conv)?; for (resnets, conv) in self.sampling_layers.iter() { for resnet in resnets.iter() { xs = xs.apply(resnet)?; } xs = xs.elu(1.0)?.apply(conv)?; } xs.apply(&self.final_lstm)? .elu(1.0)? .apply(&self.final_conv) } } #[derive(Clone, Debug)] pub struct Decoder { init_conv: EncodecConv1d, init_lstm: EncodecLSTM, sampling_layers: Vec<(EncodecConvTranspose1d, Vec<EncodecResnetBlock>)>, final_conv: EncodecConv1d, } impl Decoder { pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let mut layer = Layer::new(vb.pp("layers")); let mut scaling = usize::pow(2, cfg.upsampling_ratios.len() as u32); let init_conv = EncodecConv1d::new( cfg.hidden_size, cfg.num_filters * scaling, cfg.last_kernel_size, 1, 1, cfg, layer.next(), )?; let init_lstm = EncodecLSTM::new(cfg.num_filters * scaling, cfg, layer.next())?; let mut sampling_layers = vec![]; for &ratio in cfg.upsampling_ratios.iter() { let current_scale = scaling * cfg.num_filters; layer.inc(); // ELU let conv1d = EncodecConvTranspose1d::new( current_scale, current_scale / 2, ratio * 2, ratio, cfg, layer.next(), )?; let mut resnets = vec![]; for j in 0..(cfg.num_residual_layers as u32) { let resnet = EncodecResnetBlock::new( current_scale / 2, (cfg.dilation_growth_rate.pow(j), 1), cfg, layer.next(), )?; resnets.push(resnet) } sampling_layers.push((conv1d, resnets)); scaling /= 2; } layer.inc(); // ELU let final_conv = EncodecConv1d::new( cfg.num_filters, cfg.audio_channels, cfg.last_kernel_size, 1, 1, cfg, layer.next(), )?; Ok(Self { init_conv, init_lstm, sampling_layers, final_conv, }) } } impl Module for Decoder { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let mut xs = xs.apply(&self.init_conv)?.apply(&self.init_lstm)?; for (conv, resnets) in self.sampling_layers.iter() { xs = xs.elu(1.)?.apply(conv)?; for resnet in resnets.iter() { xs = xs.apply(resnet)? } } xs.elu(1.)?.apply(&self.final_conv) } } #[derive(Debug)] pub struct Model { encoder: Encoder, decoder: Decoder, quantizer: ResidualVectorQuantizer, } impl Model { pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let encoder = Encoder::new(cfg, vb.pp("encoder"))?; let decoder = Decoder::new(cfg, vb.pp("decoder"))?; let quantizer = ResidualVectorQuantizer::new(cfg, vb.pp("quantizer"))?; Ok(Self { encoder, decoder, quantizer, }) } pub fn encode(&self, xs: &Tensor) -> Result<Tensor> { let xs = self.encoder.forward(xs)?; let codes = self.quantizer.encode(&xs)?; codes.transpose(0, 1) } pub fn decode(&self, codes: &Tensor) -> Result<Tensor> { let (_b_sz, _codebooks, _seqlen) = codes.dims3()?; let codes = codes.transpose(0, 1)?; let embeddings = self.quantizer.decode(&codes)?; let outputs = self.decoder.forward(&embeddings)?; Ok(outputs) } }
candle/candle-transformers/src/models/encodec.rs/0
{ "file_path": "candle/candle-transformers/src/models/encodec.rs", "repo_id": "candle", "token_count": 12642 }
40
use candle::DType; use serde::Deserialize; pub const DTYPE: DType = DType::F32; #[derive(Debug, Clone, Copy, PartialEq, Eq, Deserialize)] #[serde(rename_all = "lowercase")] pub enum PositionEmbeddingType { Absolute, Alibi, } // https://github.com/huggingface/transformers/blob/main/src/transformers/models/persimmon/configuration_persimmon.py #[derive(Debug, Clone, PartialEq, Deserialize)] pub struct Config { pub vocab_size: usize, pub hidden_size: usize, pub intermediate_size: usize, pub num_hidden_layers: usize, pub num_attention_heads: usize, pub num_key_value_heads: usize, pub hidden_act: candle_nn::Activation, pub max_position_embeddings: usize, pub initializer_range: f64, pub layer_norm_eps: f64, pub rms_norm_eps: f64, pub use_cache: bool, pub tie_word_embeddings: bool, pub rope_theta: f64, pub qk_layernorm: bool, pub partial_rotary_factor: f64, } impl Config { pub fn base_8b() -> Self { // https://huggingface.co/adept/persimmon-8b-base/blob/main/config.json Self { hidden_act: candle_nn::Activation::Relu, hidden_size: 4096, initializer_range: 0.02, intermediate_size: 16384, layer_norm_eps: 1e-05, max_position_embeddings: 16384, num_attention_heads: 64, num_hidden_layers: 36, num_key_value_heads: 64, qk_layernorm: true, rms_norm_eps: 1e-06, rope_theta: 25000.0, tie_word_embeddings: false, use_cache: true, vocab_size: 262144, partial_rotary_factor: 0.5, } } }
candle/candle-transformers/src/models/persimmon.rs/0
{ "file_path": "candle/candle-transformers/src/models/persimmon.rs", "repo_id": "candle", "token_count": 814 }
41
//! ResNet implementation. //! //! See "Deep Residual Learning for Image Recognition" He et al. 2015 //! <https://arxiv.org/abs/1512.03385> use candle::{Result, D}; use candle_nn::{batch_norm, Conv2d, Func, VarBuilder}; fn conv2d( c_in: usize, c_out: usize, ksize: usize, padding: usize, stride: usize, vb: VarBuilder, ) -> Result<Conv2d> { let conv2d_cfg = candle_nn::Conv2dConfig { stride, padding, ..Default::default() }; candle_nn::conv2d_no_bias(c_in, c_out, ksize, conv2d_cfg, vb) } fn downsample(c_in: usize, c_out: usize, stride: usize, vb: VarBuilder) -> Result<Func> { if stride != 1 || c_in != c_out { let conv = conv2d(c_in, c_out, 1, 0, stride, vb.pp(0))?; let bn = batch_norm(c_out, 1e-5, vb.pp(1))?; Ok(Func::new(move |xs| xs.apply(&conv)?.apply_t(&bn, false))) } else { Ok(Func::new(|xs| Ok(xs.clone()))) } } fn basic_block(c_in: usize, c_out: usize, stride: usize, vb: VarBuilder) -> Result<Func> { let conv1 = conv2d(c_in, c_out, 3, 1, stride, vb.pp("conv1"))?; let bn1 = batch_norm(c_out, 1e-5, vb.pp("bn1"))?; let conv2 = conv2d(c_out, c_out, 3, 1, 1, vb.pp("conv2"))?; let bn2 = batch_norm(c_out, 1e-5, vb.pp("bn2"))?; let downsample = downsample(c_in, c_out, stride, vb.pp("downsample"))?; Ok(Func::new(move |xs| { let ys = xs .apply(&conv1)? .apply_t(&bn1, false)? .relu()? .apply(&conv2)? .apply_t(&bn2, false)?; (xs.apply(&downsample)? + ys)?.relu() })) } fn basic_layer( c_in: usize, c_out: usize, stride: usize, cnt: usize, vb: VarBuilder, ) -> Result<Func> { let mut layers = Vec::with_capacity(cnt); for index in 0..cnt { let l_in = if index == 0 { c_in } else { c_out }; let stride = if index == 0 { stride } else { 1 }; layers.push(basic_block(l_in, c_out, stride, vb.pp(index))?) } Ok(Func::new(move |xs| { let mut xs = xs.clone(); for layer in layers.iter() { xs = xs.apply(layer)? } Ok(xs) })) } fn resnet( nclasses: Option<usize>, c1: usize, c2: usize, c3: usize, c4: usize, vb: VarBuilder, ) -> Result<Func> { let conv1 = conv2d(3, 64, 7, 3, 2, vb.pp("conv1"))?; let bn1 = batch_norm(64, 1e-5, vb.pp("bn1"))?; let layer1 = basic_layer(64, 64, 1, c1, vb.pp("layer1"))?; let layer2 = basic_layer(64, 128, 2, c2, vb.pp("layer2"))?; let layer3 = basic_layer(128, 256, 2, c3, vb.pp("layer3"))?; let layer4 = basic_layer(256, 512, 2, c4, vb.pp("layer4"))?; let fc = match nclasses { None => None, Some(nclasses) => { let linear = candle_nn::linear(512, nclasses, vb.pp("fc"))?; Some(linear) } }; Ok(Func::new(move |xs| { let xs = xs .apply(&conv1)? .apply_t(&bn1, false)? .relu()? .pad_with_same(D::Minus1, 1, 1)? .pad_with_same(D::Minus2, 1, 1)? .max_pool2d_with_stride(3, 2)? .apply(&layer1)? .apply(&layer2)? .apply(&layer3)? .apply(&layer4)? .mean(D::Minus1)? .mean(D::Minus1)?; match &fc { None => Ok(xs), Some(fc) => xs.apply(fc), } })) } /// Creates a ResNet-18 model. pub fn resnet18(num_classes: usize, vb: VarBuilder) -> Result<Func> { resnet(Some(num_classes), 2, 2, 2, 2, vb) } pub fn resnet18_no_final_layer(vb: VarBuilder) -> Result<Func> { resnet(None, 2, 2, 2, 2, vb) } /// Creates a ResNet-34 model. pub fn resnet34(num_classes: usize, vb: VarBuilder) -> Result<Func> { resnet(Some(num_classes), 3, 4, 6, 3, vb) } pub fn resnet34_no_final_layer(vb: VarBuilder) -> Result<Func> { resnet(None, 3, 4, 6, 3, vb) } // Bottleneck versions for ResNet 50, 101, and 152. fn bottleneck_block( c_in: usize, c_out: usize, stride: usize, e: usize, vb: VarBuilder, ) -> Result<Func> { let e_dim = e * c_out; let conv1 = conv2d(c_in, c_out, 1, 0, 1, vb.pp("conv1"))?; let bn1 = batch_norm(c_out, 1e-5, vb.pp("bn1"))?; let conv2 = conv2d(c_out, c_out, 3, 1, stride, vb.pp("conv2"))?; let bn2 = batch_norm(c_out, 1e-5, vb.pp("bn2"))?; let conv3 = conv2d(c_out, e_dim, 1, 0, 1, vb.pp("conv3"))?; let bn3 = batch_norm(e_dim, 1e-5, vb.pp("bn3"))?; let downsample = downsample(c_in, e_dim, stride, vb.pp("downsample"))?; Ok(Func::new(move |xs| { let ys = xs .apply(&conv1)? .apply_t(&bn1, false)? .relu()? .apply(&conv2)? .apply_t(&bn2, false)? .relu()? .apply(&conv3)? .apply_t(&bn3, false)?; (xs.apply(&downsample)? + ys)?.relu() })) } fn bottleneck_layer( c_in: usize, c_out: usize, stride: usize, cnt: usize, vb: VarBuilder, ) -> Result<Func> { let mut layers = Vec::with_capacity(cnt); for index in 0..cnt { let l_in = if index == 0 { c_in } else { 4 * c_out }; let stride = if index == 0 { stride } else { 1 }; layers.push(bottleneck_block(l_in, c_out, stride, 4, vb.pp(index))?) } Ok(Func::new(move |xs| { let mut xs = xs.clone(); for layer in layers.iter() { xs = xs.apply(layer)? } Ok(xs) })) } fn bottleneck_resnet( nclasses: Option<usize>, c1: usize, c2: usize, c3: usize, c4: usize, vb: VarBuilder, ) -> Result<Func> { let conv1 = conv2d(3, 64, 7, 3, 2, vb.pp("conv1"))?; let bn1 = batch_norm(64, 1e-5, vb.pp("bn1"))?; let layer1 = bottleneck_layer(64, 64, 1, c1, vb.pp("layer1"))?; let layer2 = bottleneck_layer(4 * 64, 128, 2, c2, vb.pp("layer2"))?; let layer3 = bottleneck_layer(4 * 128, 256, 2, c3, vb.pp("layer3"))?; let layer4 = bottleneck_layer(4 * 256, 512, 2, c4, vb.pp("layer4"))?; let fc = match nclasses { None => None, Some(nclasses) => { let linear = candle_nn::linear(4 * 512, nclasses, vb.pp("fc"))?; Some(linear) } }; Ok(Func::new(move |xs| { let xs = xs .apply(&conv1)? .apply_t(&bn1, false)? .relu()? .pad_with_same(D::Minus1, 1, 1)? .pad_with_same(D::Minus2, 1, 1)? .max_pool2d_with_stride(3, 2)? .apply(&layer1)? .apply(&layer2)? .apply(&layer3)? .apply(&layer4)? .mean(D::Minus1)? .mean(D::Minus1)?; match &fc { None => Ok(xs), Some(fc) => xs.apply(fc), } })) } pub fn resnet50(num_classes: usize, vb: VarBuilder) -> Result<Func> { bottleneck_resnet(Some(num_classes), 3, 4, 6, 3, vb) } pub fn resnet50_no_final_layer(vb: VarBuilder) -> Result<Func> { bottleneck_resnet(None, 3, 4, 6, 3, vb) } pub fn resnet101(num_classes: usize, vb: VarBuilder) -> Result<Func> { bottleneck_resnet(Some(num_classes), 3, 4, 23, 3, vb) } pub fn resnet101_no_final_layer(vb: VarBuilder) -> Result<Func> { bottleneck_resnet(None, 3, 4, 23, 3, vb) } pub fn resnet152(num_classes: usize, vb: VarBuilder) -> Result<Func> { bottleneck_resnet(Some(num_classes), 3, 8, 36, 3, vb) } pub fn resnet152_no_final_layer(vb: VarBuilder) -> Result<Func> { bottleneck_resnet(None, 3, 8, 36, 3, vb) }
candle/candle-transformers/src/models/resnet.rs/0
{ "file_path": "candle/candle-transformers/src/models/resnet.rs", "repo_id": "candle", "token_count": 3959 }
42
//! Ancestral sampling with Euler method steps. //! //! Reference implementation in Rust: //! //! https://github.com/pykeio/diffusers/blob/250b9ad1898af41e76a74c0d8d4292652823338a/src/schedulers/euler_ancestral_discrete.rs //! //! Based on the original [`k-diffusion` implementation by Katherine Crowson][kd]. /// /// [kd]: https://github.com/crowsonkb/k-diffusion/blob/481677d114f6ea445aa009cf5bd7a9cdee909e47/k_diffusion/sampling.py#L72 use super::{ schedulers::{ betas_for_alpha_bar, BetaSchedule, PredictionType, Scheduler, SchedulerConfig, TimestepSpacing, }, utils::interp, }; use candle::{bail, Error, Result, Tensor}; /// The configuration for the EulerAncestral Discrete scheduler. #[derive(Debug, Clone, Copy)] pub struct EulerAncestralDiscreteSchedulerConfig { /// The value of beta at the beginning of training.n pub beta_start: f64, /// The value of beta at the end of training. pub beta_end: f64, /// How beta evolved during training. pub beta_schedule: BetaSchedule, /// Adjust the indexes of the inference schedule by this value. pub steps_offset: usize, /// prediction type of the scheduler function, one of `epsilon` (predicting /// the noise of the diffusion process), `sample` (directly predicting the noisy sample`) /// or `v_prediction` (see section 2.4 https://imagen.research.google/video/paper.pdf) pub prediction_type: PredictionType, /// number of diffusion steps used to train the model pub train_timesteps: usize, /// time step spacing for the diffusion process pub timestep_spacing: TimestepSpacing, } impl Default for EulerAncestralDiscreteSchedulerConfig { fn default() -> Self { Self { beta_start: 0.00085f64, beta_end: 0.012f64, beta_schedule: BetaSchedule::ScaledLinear, steps_offset: 1, prediction_type: PredictionType::Epsilon, train_timesteps: 1000, timestep_spacing: TimestepSpacing::Leading, } } } impl SchedulerConfig for EulerAncestralDiscreteSchedulerConfig { fn build(&self, inference_steps: usize) -> Result<Box<dyn Scheduler>> { Ok(Box::new(EulerAncestralDiscreteScheduler::new( inference_steps, *self, )?)) } } /// The EulerAncestral Discrete scheduler. #[derive(Debug, Clone)] pub struct EulerAncestralDiscreteScheduler { timesteps: Vec<usize>, sigmas: Vec<f64>, init_noise_sigma: f64, pub config: EulerAncestralDiscreteSchedulerConfig, } // clip_sample: False, set_alpha_to_one: False impl EulerAncestralDiscreteScheduler { /// Creates a new EulerAncestral Discrete scheduler given the number of steps to be /// used for inference as well as the number of steps that was used /// during training. pub fn new( inference_steps: usize, config: EulerAncestralDiscreteSchedulerConfig, ) -> Result<Self> { let step_ratio = config.train_timesteps / inference_steps; let timesteps: Vec<usize> = match config.timestep_spacing { TimestepSpacing::Leading => (0..(inference_steps)) .map(|s| s * step_ratio + config.steps_offset) .rev() .collect(), TimestepSpacing::Trailing => std::iter::successors(Some(config.train_timesteps), |n| { if *n > step_ratio { Some(n - step_ratio) } else { None } }) .map(|n| n - 1) .collect(), TimestepSpacing::Linspace => { super::utils::linspace(0.0, (config.train_timesteps - 1) as f64, inference_steps)? .to_vec1::<f64>()? .iter() .map(|&f| f as usize) .rev() .collect() } }; let betas = match config.beta_schedule { BetaSchedule::ScaledLinear => super::utils::linspace( config.beta_start.sqrt(), config.beta_end.sqrt(), config.train_timesteps, )? .sqr()?, BetaSchedule::Linear => { super::utils::linspace(config.beta_start, config.beta_end, config.train_timesteps)? } BetaSchedule::SquaredcosCapV2 => betas_for_alpha_bar(config.train_timesteps, 0.999)?, }; let betas = betas.to_vec1::<f64>()?; let mut alphas_cumprod = Vec::with_capacity(betas.len()); for &beta in betas.iter() { let alpha = 1.0 - beta; alphas_cumprod.push(alpha * *alphas_cumprod.last().unwrap_or(&1f64)) } let sigmas: Vec<f64> = alphas_cumprod .iter() .map(|&f| ((1. - f) / f).sqrt()) .collect(); let sigmas_xa: Vec<_> = (0..sigmas.len()).map(|i| i as f64).collect(); let mut sigmas_int = interp( &timesteps.iter().map(|&t| t as f64).collect::<Vec<_>>(), &sigmas_xa, &sigmas, ); sigmas_int.push(0.0); // standard deviation of the initial noise distribution // f64 does not implement Ord such that there is no `max`, so we need to use this workaround let init_noise_sigma = *sigmas_int .iter() .chain(std::iter::once(&0.0)) .reduce(|a, b| if a > b { a } else { b }) .expect("init_noise_sigma could not be reduced from sigmas - this should never happen"); Ok(Self { sigmas: sigmas_int, timesteps, init_noise_sigma, config, }) } } impl Scheduler for EulerAncestralDiscreteScheduler { fn timesteps(&self) -> &[usize] { self.timesteps.as_slice() } /// Ensures interchangeability with schedulers that need to scale the denoising model input /// depending on the current timestep. /// /// Scales the denoising model input by `(sigma**2 + 1) ** 0.5` to match the K-LMS algorithm fn scale_model_input(&self, sample: Tensor, timestep: usize) -> Result<Tensor> { let step_index = match self.timesteps.iter().position(|&t| t == timestep) { Some(i) => i, None => bail!("timestep out of this schedulers bounds: {timestep}"), }; let sigma = self .sigmas .get(step_index) .expect("step_index out of sigma bounds - this shouldn't happen"); sample / ((sigma.powi(2) + 1.).sqrt()) } /// Performs a backward step during inference. fn step(&self, model_output: &Tensor, timestep: usize, sample: &Tensor) -> Result<Tensor> { let step_index = self .timesteps .iter() .position(|&p| p == timestep) .ok_or_else(|| Error::Msg("timestep out of this schedulers bounds".to_string()))?; let sigma_from = &self.sigmas[step_index]; let sigma_to = &self.sigmas[step_index + 1]; // 1. compute predicted original sample (x_0) from sigma-scaled predicted noise let pred_original_sample = match self.config.prediction_type { PredictionType::Epsilon => (sample - (model_output * *sigma_from))?, PredictionType::VPrediction => { ((model_output * (-sigma_from / (sigma_from.powi(2) + 1.0).sqrt()))? + (sample / (sigma_from.powi(2) + 1.0))?)? } PredictionType::Sample => bail!("prediction_type not implemented yet: sample"), }; let sigma_up = (sigma_to.powi(2) * (sigma_from.powi(2) - sigma_to.powi(2)) / sigma_from.powi(2)) .sqrt(); let sigma_down = (sigma_to.powi(2) - sigma_up.powi(2)).sqrt(); // 2. convert to a ODE derivative let derivative = ((sample - pred_original_sample)? / *sigma_from)?; let dt = sigma_down - *sigma_from; let prev_sample = (sample + derivative * dt)?; let noise = prev_sample.randn_like(0.0, 1.0)?; prev_sample + noise * sigma_up } fn add_noise(&self, original: &Tensor, noise: Tensor, timestep: usize) -> Result<Tensor> { let step_index = self .timesteps .iter() .position(|&p| p == timestep) .ok_or_else(|| Error::Msg("timestep out of this schedulers bounds".to_string()))?; let sigma = self .sigmas .get(step_index) .expect("step_index out of sigma bounds - this shouldn't happen"); original + (noise * *sigma)? } fn init_noise_sigma(&self) -> f64 { match self.config.timestep_spacing { TimestepSpacing::Trailing | TimestepSpacing::Linspace => self.init_noise_sigma, TimestepSpacing::Leading => (self.init_noise_sigma.powi(2) + 1.0).sqrt(), } } }
candle/candle-transformers/src/models/stable_diffusion/euler_ancestral_discrete.rs/0
{ "file_path": "candle/candle-transformers/src/models/stable_diffusion/euler_ancestral_discrete.rs", "repo_id": "candle", "token_count": 4176 }
43
use super::Config; use crate::models::with_tracing::{linear, linear_no_bias, Linear}; use candle::{Device, IndexOp, Result, Tensor, D}; use candle_nn::{embedding, Conv1d, Conv1dConfig, Embedding, LayerNorm, Module, VarBuilder}; fn conv1d( in_channels: usize, out_channels: usize, kernel_size: usize, config: Conv1dConfig, vb: VarBuilder, ) -> Result<Conv1d> { let weight = vb.get((out_channels, in_channels, kernel_size), "weight")?; let bias = vb.get(out_channels, "bias")?; Ok(Conv1d::new(weight, Some(bias), config)) } fn layer_norm(size: usize, vb: VarBuilder) -> Result<LayerNorm> { let weight = vb.get(size, "weight")?; let bias = vb.get(size, "bias")?; Ok(LayerNorm::new(weight, bias, 1e-5)) } // https://github.com/openai/whisper/blob/f572f2161ba831bae131364c3bffdead7af6d210/whisper/model.py#L62 #[derive(Debug, Clone)] struct MultiHeadAttention { query: Linear, key: Linear, value: Linear, out: Linear, n_head: usize, span: tracing::Span, softmax_span: tracing::Span, matmul_span: tracing::Span, kv_cache: Option<(Tensor, Tensor)>, } impl MultiHeadAttention { fn load(n_state: usize, n_head: usize, vb: VarBuilder) -> Result<Self> { let span = tracing::span!(tracing::Level::TRACE, "multi-head-attn"); let softmax_span = tracing::span!(tracing::Level::TRACE, "multi-head-attn-softmax"); let matmul_span = tracing::span!(tracing::Level::TRACE, "multi-head-attn-matmul"); let query = linear(n_state, n_state, vb.pp("q_proj"))?; let value = linear(n_state, n_state, vb.pp("v_proj"))?; let key = linear_no_bias(n_state, n_state, vb.pp("k_proj"))?; let out = linear(n_state, n_state, vb.pp("out_proj"))?; Ok(Self { query, key, value, out, n_head, span, softmax_span, matmul_span, kv_cache: None, }) } fn forward( &mut self, x: &Tensor, xa: Option<&Tensor>, mask: Option<&Tensor>, flush_cache: bool, ) -> Result<Tensor> { let _enter = self.span.enter(); let q = self.query.forward(x)?; let (k, v) = match xa { None => { let k = self.key.forward(x)?; let v = self.value.forward(x)?; (k, v) } Some(x) => { if flush_cache { self.kv_cache = None; } if let Some((k, v)) = &self.kv_cache { (k.clone(), v.clone()) } else { let k = self.key.forward(x)?; let v = self.value.forward(x)?; self.kv_cache = Some((k.clone(), v.clone())); (k, v) } } }; let wv = self.qkv_attention(&q, &k, &v, mask)?; let out = self.out.forward(&wv)?; Ok(out) } fn reshape_head(&self, x: &Tensor) -> Result<Tensor> { let (n_batch, n_ctx, n_state) = x.dims3()?; let target_dims = &[n_batch, n_ctx, self.n_head, n_state / self.n_head]; x.reshape(target_dims)?.transpose(1, 2) } fn qkv_attention( &self, q: &Tensor, k: &Tensor, v: &Tensor, mask: Option<&Tensor>, ) -> Result<Tensor> { let (_, n_ctx, n_state) = q.dims3()?; let scale = ((n_state / self.n_head) as f64).powf(-0.25); let q = (self.reshape_head(q)? * scale)?; let k = (self.reshape_head(k)?.transpose(2, 3)? * scale)?; let v = self.reshape_head(v)?.contiguous()?; let mut qk = { let _enter = self.matmul_span.enter(); q.matmul(&k)? }; if let Some(mask) = mask { let mask = mask.i((0..n_ctx, 0..n_ctx))?; qk = qk.broadcast_add(&mask)? } let w = { let _enter = self.softmax_span.enter(); candle_nn::ops::softmax_last_dim(&qk)? }; let wv = { let _enter = self.matmul_span.enter(); w.matmul(&v)? } .transpose(1, 2)? .flatten_from(2)?; Ok(wv) } fn reset_kv_cache(&mut self) { self.kv_cache = None; } } // https://github.com/openai/whisper/blob/f572f2161ba831bae131364c3bffdead7af6d210/whisper/model.py#L111 #[derive(Debug, Clone)] struct ResidualAttentionBlock { attn: MultiHeadAttention, attn_ln: LayerNorm, cross_attn: Option<(MultiHeadAttention, LayerNorm)>, mlp_linear1: Linear, mlp_linear2: Linear, mlp_ln: LayerNorm, span: tracing::Span, } impl ResidualAttentionBlock { fn load(n_state: usize, n_head: usize, ca: bool, vb: VarBuilder) -> Result<Self> { let span = tracing::span!(tracing::Level::TRACE, "residual-attn"); let attn = MultiHeadAttention::load(n_state, n_head, vb.pp("self_attn"))?; let attn_ln = layer_norm(n_state, vb.pp("self_attn_layer_norm"))?; let cross_attn = if ca { let cross_attn = MultiHeadAttention::load(n_state, n_head, vb.pp("encoder_attn"))?; let cross_attn_ln = layer_norm(n_state, vb.pp("encoder_attn_layer_norm"))?; Some((cross_attn, cross_attn_ln)) } else { None }; let n_mlp = n_state * 4; let mlp_linear1 = linear(n_state, n_mlp, vb.pp("fc1"))?; let mlp_linear2 = linear(n_mlp, n_state, vb.pp("fc2"))?; let mlp_ln = layer_norm(n_state, vb.pp("final_layer_norm"))?; Ok(Self { attn, attn_ln, cross_attn, mlp_linear1, mlp_linear2, mlp_ln, span, }) } fn forward( &mut self, x: &Tensor, xa: Option<&Tensor>, mask: Option<&Tensor>, flush_kv_cache: bool, ) -> Result<Tensor> { let _enter = self.span.enter(); let attn = self .attn .forward(&self.attn_ln.forward(x)?, None, mask, flush_kv_cache)?; let mut x = (x + attn)?; if let Some((attn, ln)) = &mut self.cross_attn { x = (&x + attn.forward(&ln.forward(&x)?, xa, None, flush_kv_cache)?)?; } let mlp = self.mlp_linear2.forward( &self .mlp_linear1 .forward(&self.mlp_ln.forward(&x)?)? .gelu()?, )?; x + mlp } fn reset_kv_cache(&mut self) { self.attn.reset_kv_cache(); if let Some((attn, _)) = &mut self.cross_attn { attn.reset_kv_cache(); } } } fn sinusoids(length: usize, channels: usize, device: &Device) -> Result<Tensor> { let max_timescale = 10000f32; let log_timescale_increment = max_timescale.ln() / (channels / 2 - 1) as f32; let inv_timescales: Vec<_> = (0..channels / 2) .map(|i| (i as f32 * (-log_timescale_increment)).exp()) .collect(); let inv_timescales = Tensor::new(inv_timescales.as_slice(), device)?.unsqueeze(0)?; let arange = Tensor::arange(0, length as u32, device)? .to_dtype(candle::DType::F32)? .unsqueeze(1)?; let sh = (length, channels / 2); let scaled_time = (arange.broadcast_as(sh)? * inv_timescales.broadcast_as(sh)?)?; let sincos = Tensor::cat(&[scaled_time.sin()?, scaled_time.cos()?], 1)?; Ok(sincos) } // https://github.com/openai/whisper/blob/f572f2161ba831bae131364c3bffdead7af6d210/whisper/model.py#L143 #[derive(Debug, Clone)] pub struct AudioEncoder { conv1: Conv1d, conv2: Conv1d, positional_embedding: Tensor, blocks: Vec<ResidualAttentionBlock>, ln_post: LayerNorm, span: tracing::Span, conv1_span: tracing::Span, conv2_span: tracing::Span, } impl AudioEncoder { fn load(vb: VarBuilder, cfg: &Config) -> Result<Self> { let span = tracing::span!(tracing::Level::TRACE, "audio-encoder"); let conv1_span = tracing::span!(tracing::Level::TRACE, "conv1"); let conv2_span = tracing::span!(tracing::Level::TRACE, "conv2"); let n_state = cfg.d_model; let n_head = cfg.encoder_attention_heads; let n_ctx = cfg.max_source_positions; let cfg1 = Conv1dConfig { padding: 1, stride: 1, groups: 1, dilation: 1, }; let cfg2 = Conv1dConfig { padding: 1, stride: 2, groups: 1, dilation: 1, }; let conv1 = conv1d(cfg.num_mel_bins, n_state, 3, cfg1, vb.pp("conv1"))?; let conv2 = conv1d(n_state, n_state, 3, cfg2, vb.pp("conv2"))?; let positional_embedding = sinusoids(n_ctx, n_state, vb.device())?; let blocks = (0..cfg.encoder_layers) .map(|i| { ResidualAttentionBlock::load(n_state, n_head, false, vb.pp(&format!("layers.{i}"))) }) .collect::<Result<Vec<_>>>()?; let ln_post = layer_norm(n_state, vb.pp("layer_norm"))?; Ok(Self { conv1, conv2, positional_embedding, blocks, ln_post, conv1_span, conv2_span, span, }) } pub fn forward(&mut self, x: &Tensor, flush_kv_cache: bool) -> Result<Tensor> { let _enter = self.span.enter(); let x = { let _enter = self.conv1_span.enter(); self.conv1.forward(x)?.gelu()? }; let x = { let _enter = self.conv2_span.enter(); self.conv2.forward(&x)?.gelu()? }; let x = x.transpose(1, 2)?; let (_bsize, seq_len, _hidden) = x.dims3()?; let positional_embedding = self.positional_embedding.narrow(0, 0, seq_len)?; let mut x = x.broadcast_add(&positional_embedding)?; for block in self.blocks.iter_mut() { x = block.forward(&x, None, None, flush_kv_cache)? } let x = self.ln_post.forward(&x)?; Ok(x) } } // https://github.com/openai/whisper/blob/f572f2161ba831bae131364c3bffdead7af6d210/whisper/model.py#L176 #[derive(Debug, Clone)] pub struct TextDecoder { token_embedding: Embedding, positional_embedding: Tensor, blocks: Vec<ResidualAttentionBlock>, ln: LayerNorm, mask: Tensor, span: tracing::Span, span_final: tracing::Span, } impl TextDecoder { fn load(vb: VarBuilder, cfg: &Config) -> Result<Self> { let span = tracing::span!(tracing::Level::TRACE, "text-decoder"); let span_final = tracing::span!(tracing::Level::TRACE, "text-decoder-final"); let n_state = cfg.d_model; let n_head = cfg.decoder_attention_heads; let n_ctx = cfg.max_target_positions; let token_embedding = embedding(cfg.vocab_size, n_state, vb.pp("embed_tokens"))?; let positional_embedding = vb.get((n_ctx, n_state), "embed_positions.weight")?; let blocks = (0..cfg.decoder_layers) .map(|i| { ResidualAttentionBlock::load(n_state, n_head, true, vb.pp(&format!("layers.{i}"))) }) .collect::<Result<Vec<_>>>()?; let ln = layer_norm(n_state, vb.pp("layer_norm"))?; let mask: Vec<_> = (0..n_ctx) .flat_map(|i| (0..n_ctx).map(move |j| if j > i { f32::NEG_INFINITY } else { 0f32 })) .collect(); let mask = Tensor::from_vec(mask, (n_ctx, n_ctx), vb.device())?; Ok(Self { token_embedding, positional_embedding, blocks, ln, mask, span, span_final, }) } pub fn forward(&mut self, x: &Tensor, xa: &Tensor, flush_kv_cache: bool) -> Result<Tensor> { let _enter = self.span.enter(); let last = x.dim(D::Minus1)?; let token_embedding = self.token_embedding.forward(x)?; let positional_embedding = self.positional_embedding.narrow(0, 0, last)?; let mut x = token_embedding.broadcast_add(&positional_embedding)?; for block in self.blocks.iter_mut() { x = block.forward(&x, Some(xa), Some(&self.mask), flush_kv_cache)?; } self.ln.forward(&x) } pub fn final_linear(&self, x: &Tensor) -> Result<Tensor> { let b_size = x.dim(0)?; let w = self.token_embedding.embeddings().broadcast_left(b_size)?; let logits = { let _enter = self.span_final.enter(); x.matmul(&w.t()?)? }; Ok(logits) } pub fn reset_kv_cache(&mut self) { for block in self.blocks.iter_mut() { block.reset_kv_cache(); } } } // https://github.com/openai/whisper/blob/f572f2161ba831bae131364c3bffdead7af6d210/whisper/model.py#L221 #[derive(Debug, Clone)] pub struct Whisper { pub encoder: AudioEncoder, pub decoder: TextDecoder, pub config: Config, } impl Whisper { pub fn load(vb: &VarBuilder, config: Config) -> Result<Self> { let encoder = AudioEncoder::load(vb.pp("model.encoder"), &config)?; let decoder = TextDecoder::load(vb.pp("model.decoder"), &config)?; Ok(Self { encoder, decoder, config, }) } pub fn reset_kv_cache(&mut self) { self.encoder .blocks .iter_mut() .for_each(|b| b.reset_kv_cache()); self.decoder.reset_kv_cache(); } }
candle/candle-transformers/src/models/whisper/model.rs/0
{ "file_path": "candle/candle-transformers/src/models/whisper/model.rs", "repo_id": "candle", "token_count": 7050 }
44
use candle::{Result, Tensor}; pub fn apply_repeat_penalty(logits: &Tensor, penalty: f32, context: &[u32]) -> Result<Tensor> { let device = logits.device(); let mut logits = logits.to_vec1::<f32>()?; let context: std::collections::HashSet<_> = context.iter().collect(); for (token_id, logit) in logits.iter_mut().enumerate() { if context.contains(&(token_id as u32)) { if *logit >= 0. { *logit /= penalty } else { *logit *= penalty } } } let logits_len = logits.len(); Tensor::from_vec(logits, logits_len, device) }
candle/candle-transformers/src/utils.rs/0
{ "file_path": "candle/candle-transformers/src/utils.rs", "repo_id": "candle", "token_count": 299 }
45
//load Candle Bert Module wasm module let init, ModelEncoder; async function fetchArrayBuffer(url) { const cacheName = "t5-candle-cache"; const cache = await caches.open(cacheName); const cachedResponse = await cache.match(url); if (cachedResponse) { const data = await cachedResponse.arrayBuffer(); return new Uint8Array(data); } const res = await fetch(url, { cache: "force-cache" }); cache.put(url, res.clone()); return new Uint8Array(await res.arrayBuffer()); } class Encoder { static instance = {}; static async getInstance(weightsURL, tokenizerURL, configURL, modelID) { if (modelID.includes("quantized")) { ({ default: init, ModelEncoder } = await import( "./build/m-quantized.js" )); } else { ({ default: init, ModelEncoder } = await import("./build/m.js")); } if (!this.instance[modelID]) { await init(); self.postMessage({ status: "loading", message: "Loading Model" }); const [weightsArrayU8, tokenizerArrayU8, configArrayU8] = await Promise.all([ fetchArrayBuffer(weightsURL), fetchArrayBuffer(tokenizerURL), fetchArrayBuffer(configURL), ]); this.instance[modelID] = new ModelEncoder( weightsArrayU8, tokenizerArrayU8, configArrayU8 ); } else { self.postMessage({ status: "ready", message: "Model Already Loaded" }); } return this.instance[modelID]; } } self.addEventListener("message", async (event) => { const { weightsURL, tokenizerURL, configURL, modelID, sentences, normalize_embeddings, } = event.data; try { self.postMessage({ status: "ready", message: "Starting T5 Encoder" }); const model = await Encoder.getInstance( weightsURL, tokenizerURL, configURL, modelID ); self.postMessage({ status: "encoding", message: "Encoding Sentences", }); const output = model.decode({ sentences: sentences, normalize_embeddings: normalize_embeddings || true, }); self.postMessage({ status: "complete", message: "complete", output: output, }); } catch (e) { self.postMessage({ error: e }); } });
candle/candle-wasm-examples/t5/T5ModelEncoderWorker.js/0
{ "file_path": "candle/candle-wasm-examples/t5/T5ModelEncoderWorker.js", "repo_id": "candle", "token_count": 873 }
46
use candle_wasm_example_whisper::worker::{Decoder as D, ModelData}; use wasm_bindgen::prelude::*; #[wasm_bindgen] pub struct Decoder { decoder: D, } #[wasm_bindgen] impl Decoder { #[wasm_bindgen(constructor)] #[allow(clippy::too_many_arguments)] pub fn new( weights: Vec<u8>, tokenizer: Vec<u8>, mel_filters: Vec<u8>, config: Vec<u8>, quantized: bool, is_multilingual: bool, timestamps: bool, task: Option<String>, language: Option<String>, ) -> Result<Decoder, JsError> { let decoder = D::load(ModelData { tokenizer, mel_filters, config, quantized, weights, is_multilingual, timestamps, task, language, }); match decoder { Ok(decoder) => Ok(Self { decoder }), Err(e) => Err(JsError::new(&e.to_string())), } } #[wasm_bindgen] pub fn decode(&mut self, wav_input: Vec<u8>) -> Result<String, JsError> { let segments = self .decoder .convert_and_run(&wav_input) .map_err(|e| JsError::new(&e.to_string()))?; let json = serde_json::to_string(&segments)?; Ok(json) } } fn main() {}
candle/candle-wasm-examples/whisper/src/bin/m.rs/0
{ "file_path": "candle/candle-wasm-examples/whisper/src/bin/m.rs", "repo_id": "candle", "token_count": 694 }
47
mod app; pub mod coco_classes; pub mod model; pub mod worker; pub use app::App; pub use worker::Worker;
candle/candle-wasm-examples/yolo/src/lib.rs/0
{ "file_path": "candle/candle-wasm-examples/yolo/src/lib.rs", "repo_id": "candle", "token_count": 37 }
48
declare module "*.ttf" { const value: ArrayBuffer; export default value; }
chat-ui/src/ambient.d.ts/0
{ "file_path": "chat-ui/src/ambient.d.ts", "repo_id": "chat-ui", "token_count": 26 }
49
<script lang="ts"> import { createEventDispatcher, onDestroy, onMount } from "svelte"; import { cubicOut } from "svelte/easing"; import { fade } from "svelte/transition"; import Portal from "./Portal.svelte"; import { browser } from "$app/environment"; export let width = "max-w-sm"; let backdropEl: HTMLDivElement; let modalEl: HTMLDivElement; const dispatch = createEventDispatcher<{ close: void }>(); function handleKeydown(event: KeyboardEvent) { // close on ESC if (event.key === "Escape") { event.preventDefault(); dispatch("close"); } } function handleBackdropClick(event: MouseEvent) { if (window?.getSelection()?.toString()) { return; } if (event.target === backdropEl) { dispatch("close"); } } onMount(() => { document.getElementById("app")?.setAttribute("inert", "true"); modalEl.focus(); }); onDestroy(() => { if (!browser) return; // remove inert attribute if this is the last modal if (document.querySelectorAll('[role="dialog"]:not(#app *)').length === 1) { document.getElementById("app")?.removeAttribute("inert"); } }); </script> <Portal> <!-- svelte-ignore a11y-no-noninteractive-element-interactions --> <div role="presentation" tabindex="-1" bind:this={backdropEl} on:click|stopPropagation={handleBackdropClick} transition:fade|global={{ easing: cubicOut, duration: 300 }} class="fixed inset-0 z-40 flex items-center justify-center bg-black/80 p-8 backdrop-blur-sm dark:bg-black/50" > <div role="dialog" tabindex="-1" bind:this={modalEl} on:keydown={handleKeydown} class="max-h-[90dvh] overflow-y-auto overflow-x-hidden rounded-2xl bg-white shadow-2xl outline-none sm:-mt-10 {width}" > <slot /> </div> </div> </Portal>
chat-ui/src/lib/components/Modal.svelte/0
{ "file_path": "chat-ui/src/lib/components/Modal.svelte", "repo_id": "chat-ui", "token_count": 675 }
50
<script lang="ts"> import { webSearchParameters } from "$lib/stores/webSearchParameters"; import CarbonInformation from "~icons/carbon/information"; import Switch from "./Switch.svelte"; const toggle = () => ($webSearchParameters.useSearch = !$webSearchParameters.useSearch); </script> <div class="flex h-8 cursor-pointer select-none items-center gap-2 rounded-lg border bg-white p-1.5 shadow-sm hover:shadow-none dark:border-gray-800 dark:bg-gray-900" on:click={toggle} on:keydown={toggle} aria-checked={$webSearchParameters.useSearch} aria-label="web search toggle" role="switch" tabindex="0" > <Switch name="useSearch" bind:checked={$webSearchParameters.useSearch} on:click on:keydown /> <div class="whitespace-nowrap text-sm text-gray-800 dark:text-gray-200">Search web</div> <div class="group relative w-max"> <CarbonInformation class="text-xs text-gray-500" /> <div class="pointer-events-none absolute -top-20 left-1/2 w-max -translate-x-1/2 rounded-md bg-gray-100 p-2 opacity-0 transition-opacity group-hover:opacity-100 dark:bg-gray-800" > <p class="max-w-sm text-sm text-gray-800 dark:text-gray-200"> When enabled, the model will try to complement its answer with information queried from the web. </p> </div> </div> </div>
chat-ui/src/lib/components/WebSearchToggle.svelte/0
{ "file_path": "chat-ui/src/lib/components/WebSearchToggle.svelte", "repo_id": "chat-ui", "token_count": 447 }
51
import { collections } from "$lib/server/database"; export async function acquireLock(key = "migrations") { try { const insert = await collections.semaphores.insertOne({ key, createdAt: new Date(), updatedAt: new Date(), }); return !!insert.acknowledged; // true if the document was inserted } catch (e) { // unique index violation, so there must already be a lock return false; } } export async function releaseLock(key = "migrations") { await collections.semaphores.deleteOne({ key, }); } export async function isDBLocked(key = "migrations"): Promise<boolean> { const res = await collections.semaphores.countDocuments({ key, }); return res > 0; } export async function refreshLock(key = "migrations") { await collections.semaphores.updateOne( { key, }, { $set: { updatedAt: new Date(), }, } ); }
chat-ui/src/lib/migrations/lock.ts/0
{ "file_path": "chat-ui/src/lib/migrations/lock.ts", "repo_id": "chat-ui", "token_count": 306 }
52
import { HF_ACCESS_TOKEN, HF_TOKEN } from "$env/static/private"; import { buildPrompt } from "$lib/buildPrompt"; import type { TextGenerationStreamOutput } from "@huggingface/inference"; import type { Endpoint } from "../endpoints"; import { z } from "zod"; export const endpointLlamacppParametersSchema = z.object({ weight: z.number().int().positive().default(1), model: z.any(), type: z.literal("llamacpp"), url: z.string().url().default("http://127.0.0.1:8080"), accessToken: z .string() .min(1) .default(HF_TOKEN ?? HF_ACCESS_TOKEN), }); export function endpointLlamacpp( input: z.input<typeof endpointLlamacppParametersSchema> ): Endpoint { const { url, model } = endpointLlamacppParametersSchema.parse(input); return async ({ messages, preprompt, continueMessage }) => { const prompt = await buildPrompt({ messages, continueMessage, preprompt, model, }); const r = await fetch(`${url}/completion`, { method: "POST", headers: { "Content-Type": "application/json", }, body: JSON.stringify({ prompt, stream: true, temperature: model.parameters.temperature, top_p: model.parameters.top_p, top_k: model.parameters.top_k, stop: model.parameters.stop, repeat_penalty: model.parameters.repetition_penalty, n_predict: model.parameters.max_new_tokens, cache_prompt: true, }), }); if (!r.ok) { throw new Error(`Failed to generate text: ${await r.text()}`); } const encoder = new TextDecoderStream(); const reader = r.body?.pipeThrough(encoder).getReader(); return (async function* () { let stop = false; let generatedText = ""; let tokenId = 0; let accumulatedData = ""; // Buffer to accumulate data chunks while (!stop) { // Read the stream and log the outputs to console const out = (await reader?.read()) ?? { done: false, value: undefined }; // If it's done, we cancel if (out.done) { reader?.cancel(); return; } if (!out.value) { return; } // Accumulate the data chunk accumulatedData += out.value; // Process each complete JSON object in the accumulated data while (accumulatedData.includes("\n")) { // Assuming each JSON object ends with a newline const endIndex = accumulatedData.indexOf("\n"); let jsonString = accumulatedData.substring(0, endIndex).trim(); // Remove the processed part from the buffer accumulatedData = accumulatedData.substring(endIndex + 1); if (jsonString.startsWith("data: ")) { jsonString = jsonString.slice(6); let data = null; try { data = JSON.parse(jsonString); } catch (e) { console.error("Failed to parse JSON", e); console.error("Problematic JSON string:", jsonString); continue; // Skip this iteration and try the next chunk } // Handle the parsed data if (data.content || data.stop) { generatedText += data.content; const output: TextGenerationStreamOutput = { token: { id: tokenId++, text: data.content ?? "", logprob: 0, special: false, }, generated_text: data.stop ? generatedText : null, details: null, }; if (data.stop) { stop = true; output.token.special = true; reader?.cancel(); } yield output; } } } } })(); }; } export default endpointLlamacpp;
chat-ui/src/lib/server/endpoints/llamacpp/endpointLlamacpp.ts/0
{ "file_path": "chat-ui/src/lib/server/endpoints/llamacpp/endpointLlamacpp.ts", "repo_id": "chat-ui", "token_count": 1417 }
53
import type { Message } from "$lib/types/Message"; import { format } from "date-fns"; import { generateFromDefaultEndpoint } from "../generateFromDefaultEndpoint"; export async function generateQuery(messages: Message[]) { const currentDate = format(new Date(), "MMMM d, yyyy"); const userMessages = messages.filter(({ from }) => from === "user"); const previousUserMessages = userMessages.slice(0, -1); const lastMessage = userMessages.slice(-1)[0]; const convQuery: Array<Omit<Message, "id">> = [ { from: "user", content: `Previous Questions: - Who is the president of France? Current Question: What about Mexico? `, }, { from: "assistant", content: "President of Mexico", }, { from: "user", content: `Previous questions: - When is the next formula 1 grand prix? Current Question: Where is it being hosted?`, }, { from: "assistant", content: "location of next formula 1 grand prix", }, { from: "user", content: "Current Question: What type of printhead does the Epson F2270 DTG printer use?", }, { from: "assistant", content: "Epson F2270 DTG printer printhead", }, { from: "user", content: "What were the news yesterday?" }, { from: "assistant", content: `news ${format(new Date(Date.now() - 864e5), "MMMM d, yyyy")}`, }, { from: "user", content: "What is the current weather in Paris?" }, { from: "assistant", content: `weather in Paris ${currentDate}` }, { from: "user", content: (previousUserMessages.length > 0 ? `Previous questions: \n${previousUserMessages .map(({ content }) => `- ${content}`) .join("\n")}` : "") + "\n\nCurrent Question: " + lastMessage.content, }, ]; const webQuery = await generateFromDefaultEndpoint({ messages: convQuery, preprompt: `You are tasked with generating web search queries. Give me an appropriate query to answer my question for google search. Answer with only the query. Today is ${currentDate}`, }); return webQuery.trim(); }
chat-ui/src/lib/server/websearch/generateQuery.ts/0
{ "file_path": "chat-ui/src/lib/server/websearch/generateQuery.ts", "repo_id": "chat-ui", "token_count": 723 }
54
import type { ObjectId } from "mongodb"; import type { User } from "./User"; import type { Timestamps } from "./Timestamps"; export interface Assistant extends Timestamps { _id: ObjectId; createdById: User["_id"] | string; // user id or session createdByName?: User["username"]; avatar?: string; name: string; description?: string; modelId: string; exampleInputs: string[]; preprompt: string; userCount?: number; featured?: boolean; rag?: { allowAllDomains: boolean; allowedDomains: string[]; allowedLinks: string[]; }; searchTokens: string[]; }
chat-ui/src/lib/types/Assistant.ts/0
{ "file_path": "chat-ui/src/lib/types/Assistant.ts", "repo_id": "chat-ui", "token_count": 190 }
55
/* eslint-disable no-shadow */ export enum UrlDependency { ConversationList = "conversation:list", Conversation = "conversation", }
chat-ui/src/lib/types/UrlDependency.ts/0
{ "file_path": "chat-ui/src/lib/types/UrlDependency.ts", "repo_id": "chat-ui", "token_count": 47 }
56
import * as fs from "fs"; import { setGlobalDispatcher, Agent } from "undici"; /** * Load client certificates for mutual TLS authentication. This function must be called before any HTTP requests are made. * This is a global setting that affects all HTTP requests made by the application using the native fetch API. * * @param clientCertPath Path to client certificate * @param clientKeyPath Path to client key * @param caCertPath Path to CA certificate [optional] * @param clientKeyPassword Password for client key [optional] * @param rejectUnauthorized Reject unauthorized certificates. * Only use for testing/development, not recommended in production environments [optional] * * @returns void * * @example * ```typescript * loadClientCertificates("cert.pem", "key.pem", "ca.pem", "password", false); * ``` * * @see * [Undici Agent](https://undici.nodejs.org/#/docs/api/Agent) * @see * [Undici Dispatcher](https://undici.nodejs.org/#/docs/api/Dispatcher) * @see * [NodeJS Native Fetch API](https://nodejs.org/docs/latest-v19.x/api/globals.html#fetch) */ export function loadClientCertificates( clientCertPath: string, clientKeyPath: string, caCertPath?: string, clientKeyPassword?: string, rejectUnauthorized?: boolean ): void { const clientCert = fs.readFileSync(clientCertPath); const clientKey = fs.readFileSync(clientKeyPath); const caCert = caCertPath ? fs.readFileSync(caCertPath) : undefined; const agent = new Agent({ connect: { cert: clientCert, key: clientKey, ca: caCert, passphrase: clientKeyPassword, rejectUnauthorized, }, }); setGlobalDispatcher(agent); }
chat-ui/src/lib/utils/loadClientCerts.ts/0
{ "file_path": "chat-ui/src/lib/utils/loadClientCerts.ts", "repo_id": "chat-ui", "token_count": 551 }
57
import type { Conversation } from "$lib/types/Conversation"; import type { Message } from "$lib/types/Message"; export function buildSubtree( conv: Pick<Conversation, "messages" | "rootMessageId">, id: Message["id"] ): Message[] { if (!conv.rootMessageId) { if (conv.messages.length === 0) return []; // legacy conversation slice up to id const index = conv.messages.findIndex((m) => m.id === id); if (index === -1) throw new Error("Message not found"); return conv.messages.slice(0, index + 1); } else { // find the message with the right id then create the ancestor tree const message = conv.messages.find((m) => m.id === id); if (!message) throw new Error("Message not found"); return [ ...(message.ancestors?.map((ancestorId) => { const ancestor = conv.messages.find((m) => m.id === ancestorId); if (!ancestor) throw new Error("Ancestor not found"); return ancestor; }) ?? []), message, ]; } }
chat-ui/src/lib/utils/tree/buildSubtree.ts/0
{ "file_path": "chat-ui/src/lib/utils/tree/buildSubtree.ts", "repo_id": "chat-ui", "token_count": 329 }
58
import { base } from "$app/paths"; import { collections } from "$lib/server/database.js"; import { redirect } from "@sveltejs/kit"; import { ObjectId } from "mongodb"; export const load = async ({ params }) => { try { const assistant = await collections.assistants.findOne({ _id: new ObjectId(params.assistantId), }); if (!assistant) { throw redirect(302, `${base}`); } return { assistant: JSON.parse(JSON.stringify(assistant)) }; } catch { throw redirect(302, `${base}`); } };
chat-ui/src/routes/assistant/[assistantId]/+page.server.ts/0
{ "file_path": "chat-ui/src/routes/assistant/[assistantId]/+page.server.ts", "repo_id": "chat-ui", "token_count": 178 }
59
import { redirect } from "@sveltejs/kit"; import { getOIDCAuthorizationUrl } from "$lib/server/auth"; import { base } from "$app/paths"; import { ALTERNATIVE_REDIRECT_URLS } from "$env/static/private"; export const actions = { async default({ url, locals, request }) { const referer = request.headers.get("referer"); let redirectURI = `${(referer ? new URL(referer) : url).origin}${base}/login/callback`; // TODO: Handle errors if provider is not responding if (url.searchParams.has("callback")) { const callback = url.searchParams.get("callback") || redirectURI; if (ALTERNATIVE_REDIRECT_URLS.includes(callback)) { redirectURI = callback; } } const authorizationUrl = await getOIDCAuthorizationUrl( { redirectURI }, { sessionId: locals.sessionId } ); throw redirect(303, authorizationUrl); }, };
chat-ui/src/routes/login/+page.server.ts/0
{ "file_path": "chat-ui/src/routes/login/+page.server.ts", "repo_id": "chat-ui", "token_count": 286 }
60
import { base } from "$app/paths"; import { redirect } from "@sveltejs/kit"; export async function load({ parent, params }) { const data = await parent(); const model = data.models.find((m: { id: string }) => m.id === params.model); if (!model || model.unlisted) { throw redirect(302, `${base}/settings`); } return data; }
chat-ui/src/routes/settings/(nav)/[...model]/+page.ts/0
{ "file_path": "chat-ui/src/routes/settings/(nav)/[...model]/+page.ts", "repo_id": "chat-ui", "token_count": 111 }
61
# This is the list of HuggingFace Datasets authors for copyright purposes. # # This does not necessarily list everyone who has contributed code, since in # some cases, their employer may be the copyright holder. To see the full list # of contributors, see the revision history in source control. Google Inc. HuggingFace Inc.
datasets/AUTHORS/0
{ "file_path": "datasets/AUTHORS", "repo_id": "datasets", "token_count": 78 }
62
# Create an image dataset There are two methods for creating and sharing an image dataset. This guide will show you how to: * Create an image dataset with `ImageFolder` and some metadata. This is a no-code solution for quickly creating an image dataset with several thousand images. * Create an image dataset by writing a loading script. This method is a bit more involved, but you have greater flexibility over how a dataset is defined, downloaded, and generated which can be useful for more complex or large scale image datasets. <Tip> You can control access to your dataset by requiring users to share their contact information first. Check out the [Gated datasets](https://huggingface.co/docs/hub/datasets-gated) guide for more information about how to enable this feature on the Hub. </Tip> ## ImageFolder The `ImageFolder` is a dataset builder designed to quickly load an image dataset with several thousand images without requiring you to write any code. <Tip> 💡 Take a look at the [Split pattern hierarchy](repository_structure#split-pattern-hierarchy) to learn more about how `ImageFolder` creates dataset splits based on your dataset repository structure. </Tip> `ImageFolder` automatically infers the class labels of your dataset based on the directory name. Store your dataset in a directory structure like: ``` folder/train/dog/golden_retriever.png folder/train/dog/german_shepherd.png folder/train/dog/chihuahua.png folder/train/cat/maine_coon.png folder/train/cat/bengal.png folder/train/cat/birman.png ``` Then users can load your dataset by specifying `imagefolder` in [`load_dataset`] and the directory in `data_dir`: ```py >>> from datasets import load_dataset >>> dataset = load_dataset("imagefolder", data_dir="/path/to/folder") ``` You can also use `imagefolder` to load datasets involving multiple splits. To do so, your dataset directory should have the following structure: ``` folder/train/dog/golden_retriever.png folder/train/cat/maine_coon.png folder/test/dog/german_shepherd.png folder/test/cat/bengal.png ``` <Tip warning={true}> If all image files are contained in a single directory or if they are not on the same level of directory structure, `label` column won't be added automatically. If you need it, set `drop_labels=False` explicitly. </Tip> If there is additional information you'd like to include about your dataset, like text captions or bounding boxes, add it as a `metadata.csv` file in your folder. This lets you quickly create datasets for different computer vision tasks like text captioning or object detection. You can also use a JSONL file `metadata.jsonl`. ``` folder/train/metadata.csv folder/train/0001.png folder/train/0002.png folder/train/0003.png ``` You can also zip your images: ``` folder/metadata.csv folder/train.zip folder/test.zip folder/valid.zip ``` Your `metadata.csv` file must have a `file_name` column which links image files with their metadata: ```csv file_name,additional_feature 0001.png,This is a first value of a text feature you added to your images 0002.png,This is a second value of a text feature you added to your images 0003.png,This is a third value of a text feature you added to your images ``` or using `metadata.jsonl`: ```jsonl {"file_name": "0001.png", "additional_feature": "This is a first value of a text feature you added to your images"} {"file_name": "0002.png", "additional_feature": "This is a second value of a text feature you added to your images"} {"file_name": "0003.png", "additional_feature": "This is a third value of a text feature you added to your images"} ``` <Tip> If metadata files are present, the inferred labels based on the directory name are dropped by default. To include those labels, set `drop_labels=False` in `load_dataset`. </Tip> ### Image captioning Image captioning datasets have text describing an image. An example `metadata.csv` may look like: ```csv file_name,text 0001.png,This is a golden retriever playing with a ball 0002.png,A german shepherd 0003.png,One chihuahua ``` Load the dataset with `ImageFolder`, and it will create a `text` column for the image captions: ```py >>> dataset = load_dataset("imagefolder", data_dir="/path/to/folder", split="train") >>> dataset[0]["text"] "This is a golden retriever playing with a ball" ``` ### Object detection Object detection datasets have bounding boxes and categories identifying objects in an image. An example `metadata.jsonl` may look like: ```jsonl {"file_name": "0001.png", "objects": {"bbox": [[302.0, 109.0, 73.0, 52.0]], "categories": [0]}} {"file_name": "0002.png", "objects": {"bbox": [[810.0, 100.0, 57.0, 28.0]], "categories": [1]}} {"file_name": "0003.png", "objects": {"bbox": [[160.0, 31.0, 248.0, 616.0], [741.0, 68.0, 202.0, 401.0]], "categories": [2, 2]}} ``` Load the dataset with `ImageFolder`, and it will create a `objects` column with the bounding boxes and the categories: ```py >>> dataset = load_dataset("imagefolder", data_dir="/path/to/folder", split="train") >>> dataset[0]["objects"] {"bbox": [[302.0, 109.0, 73.0, 52.0]], "categories": [0]} ``` ### Upload dataset to the Hub Once you've created a dataset, you can share it to the Hub with the [`~datasets.DatasetDict.push_to_hub`] method. Make sure you have the [huggingface_hub](https://huggingface.co/docs/huggingface_hub/index) library installed and you're logged in to your Hugging Face account (see the [Upload with Python tutorial](upload_dataset#upload-with-python) for more details). Upload your dataset with [`~datasets.DatasetDict.push_to_hub`]: ```py >>> from datasets import load_dataset >>> dataset = load_dataset("imagefolder", data_dir="/path/to/folder", split="train") >>> dataset.push_to_hub("stevhliu/my-image-captioning-dataset") ``` ## WebDataset The [WebDataset](https://github.com/webdataset/webdataset) format is based on TAR archives and is suitable for big image datasets. Indeed you can group your images in TAR archives (e.g. 1GB of images per TAR archive) and have thousands of TAR archives: ``` folder/train/00000.tar folder/train/00001.tar folder/train/00002.tar ... ``` In the archives, each example is made of files sharing the same prefix: ``` e39871fd9fd74f55.jpg e39871fd9fd74f55.json f18b91585c4d3f3e.jpg f18b91585c4d3f3e.json ede6e66b2fb59aab.jpg ede6e66b2fb59aab.json ed600d57fcee4f94.jpg ed600d57fcee4f94.json ... ``` You can put your images labels/captions/bounding boxes using JSON or text files for example. For more details on the WebDataset format and the python library, please check the [WebDataset documentation](https://webdataset.github.io/webdataset). Load your WebDataset and it will create on column per file suffix (here "jpg" and "json"): ```python >>> from datasets import load_dataset >>> dataset = load_dataset("webdataset", data_dir="/path/to/folder", split="train") >>> dataset[0]["json"] {"bbox": [[302.0, 109.0, 73.0, 52.0]], "categories": [0]} ``` ## Loading script Write a dataset loading script to share a dataset. It defines a dataset's splits and configurations, and handles downloading and generating a dataset. The script is located in the same folder or repository as the dataset and should have the same name. ``` my_dataset/ ├── README.md ├── my_dataset.py └── data/ # optional, may contain your images or TAR archives ``` This structure allows your dataset to be loaded in one line: ```py >>> from datasets import load_dataset >>> dataset = load_dataset("path/to/my_dataset") ``` This guide will show you how to create a dataset loading script for image datasets, which is a bit different from <a class="underline decoration-green-400 decoration-2 font-semibold" href="./dataset_script">creating a loading script for text datasets</a>. You'll learn how to: * Create a dataset builder class. * Create dataset configurations. * Add dataset metadata. * Download and define the dataset splits. * Generate the dataset. * Generate the dataset metadata (optional). * Upload the dataset to the Hub. The best way to learn is to open up an existing image dataset loading script, like [Food-101](https://huggingface.co/datasets/food101/blob/main/food101.py), and follow along! <Tip> To help you get started, we created a loading script [template](https://github.com/huggingface/datasets/blob/main/templates/new_dataset_script.py) you can copy and use as a starting point! </Tip> ### Create a dataset builder class [`GeneratorBasedBuilder`] is the base class for datasets generated from a dictionary generator. Within this class, there are three methods to help create your dataset: * `info` stores information about your dataset like its description, license, and features. * `split_generators` downloads the dataset and defines its splits. * `generate_examples` generates the images and labels for each split. Start by creating your dataset class as a subclass of [`GeneratorBasedBuilder`] and add the three methods. Don't worry about filling in each of these methods yet, you'll develop those over the next few sections: ```py class Food101(datasets.GeneratorBasedBuilder): """Food-101 Images dataset""" def _info(self): def _split_generators(self, dl_manager): def _generate_examples(self, images, metadata_path): ``` #### Multiple configurations In some cases, a dataset may have more than one configuration. For example, if you check out the [Imagenette dataset](https://huggingface.co/datasets/frgfm/imagenette), you'll notice there are three subsets. To create different configurations, use the [`BuilderConfig`] class to create a subclass for your dataset. Provide the links to download the images and labels in `data_url` and `metadata_urls`: ```py class Food101Config(datasets.BuilderConfig): """Builder Config for Food-101""" def __init__(self, data_url, metadata_urls, **kwargs): """BuilderConfig for Food-101. Args: data_url: `string`, url to download the zip file from. metadata_urls: dictionary with keys 'train' and 'validation' containing the archive metadata URLs **kwargs: keyword arguments forwarded to super. """ super(Food101Config, self).__init__(version=datasets.Version("1.0.0"), **kwargs) self.data_url = data_url self.metadata_urls = metadata_urls ``` Now you can define your subsets at the top of [`GeneratorBasedBuilder`]. Imagine you want to create two subsets in the Food-101 dataset based on whether it is a breakfast or dinner food. 1. Define your subsets with `Food101Config` in a list in `BUILDER_CONFIGS`. 2. For each configuration, provide a name, description, and where to download the images and labels from. ```py class Food101(datasets.GeneratorBasedBuilder): """Food-101 Images dataset""" BUILDER_CONFIGS = [ Food101Config( name="breakfast", description="Food types commonly eaten during breakfast.", data_url="https://link-to-breakfast-foods.zip", metadata_urls={ "train": "https://link-to-breakfast-foods-train.txt", "validation": "https://link-to-breakfast-foods-validation.txt" }, , Food101Config( name="dinner", description="Food types commonly eaten during dinner.", data_url="https://link-to-dinner-foods.zip", metadata_urls={ "train": "https://link-to-dinner-foods-train.txt", "validation": "https://link-to-dinner-foods-validation.txt" }, )... ] ``` Now if users want to load the `breakfast` configuration, they can use the configuration name: ```py >>> from datasets import load_dataset >>> ds = load_dataset("food101", "breakfast", split="train") ``` ### Add dataset metadata Adding information about your dataset is useful for users to learn more about it. This information is stored in the [`DatasetInfo`] class which is returned by the `info` method. Users can access this information by: ```py >>> from datasets import load_dataset_builder >>> ds_builder = load_dataset_builder("food101") >>> ds_builder.info ``` There is a lot of information you can specify about your dataset, but some important ones to include are: 1. `description` provides a concise description of the dataset. 2. `features` specify the dataset column types. Since you're creating an image loading script, you'll need to include the [`Image`] feature. 3. `supervised_keys` specify the input feature and label. 4. `homepage` provides a link to the dataset homepage. 5. `citation` is a BibTeX citation of the dataset. 6. `license` states the dataset's license. <Tip> You'll notice a lot of the dataset information is defined earlier in the loading script which makes it easier to read. There are also other [`~Datasets.Features`] you can input, so be sure to check out the full list for more details. </Tip> ```py def _info(self): return datasets.DatasetInfo( description=_DESCRIPTION, features=datasets.Features( { "image": datasets.Image(), "label": datasets.ClassLabel(names=_NAMES), } ), supervised_keys=("image", "label"), homepage=_HOMEPAGE, citation=_CITATION, license=_LICENSE, task_templates=[ImageClassification(image_column="image", label_column="label")], ) ``` ### Download and define the dataset splits Now that you've added some information about your dataset, the next step is to download the dataset and generate the splits. 1. Use the [`DownloadManager.download`] method to download the dataset and any other metadata you'd like to associate with it. This method accepts: * a name to a file inside a Hub dataset repository (in other words, the `data/` folder) * a URL to a file hosted somewhere else * a list or dictionary of file names or URLs In the Food-101 loading script, you'll notice again the URLs are defined earlier in the script. 2. After you've downloaded the dataset, use the [`SplitGenerator`] to organize the images and labels in each split. Name each split with a standard name like: `Split.TRAIN`, `Split.TEST`, and `SPLIT.Validation`. In the `gen_kwargs` parameter, specify the file paths to the `images` to iterate over and load. If necessary, you can use [`DownloadManager.iter_archive`] to iterate over images in TAR archives. You can also specify the associated labels in the `metadata_path`. The `images` and `metadata_path` are actually passed onto the next step where you'll actually generate the dataset. <Tip warning={true}> To stream a TAR archive file, you need to use [`DownloadManager.iter_archive`]! The [`DownloadManager.download_and_extract`] function does not support TAR archives in streaming mode. </Tip> ```py def _split_generators(self, dl_manager): archive_path = dl_manager.download(_BASE_URL) split_metadata_paths = dl_manager.download(_METADATA_URLS) return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={ "images": dl_manager.iter_archive(archive_path), "metadata_path": split_metadata_paths["train"], }, ), datasets.SplitGenerator( name=datasets.Split.VALIDATION, gen_kwargs={ "images": dl_manager.iter_archive(archive_path), "metadata_path": split_metadata_paths["test"], }, ), ] ``` ### Generate the dataset The last method in the [`GeneratorBasedBuilder`] class actually generates the images and labels in the dataset. It yields a dataset according to the stucture specified in `features` from the `info` method. As you can see, `generate_examples` accepts the `images` and `metadata_path` from the previous method as arguments. <Tip warning={true}> To stream a TAR archive file, the `metadata_path` needs to be opened and read first. TAR files are accessed and yielded sequentially. This means you need to have the metadata information in hand first so you can yield it with its corresponding image. </Tip> Now you can write a function for opening and loading examples from the dataset: ```py def _generate_examples(self, images, metadata_path): """Generate images and labels for splits.""" with open(metadata_path, encoding="utf-8") as f: files_to_keep = set(f.read().split("\n")) for file_path, file_obj in images: if file_path.startswith(_IMAGES_DIR): if file_path[len(_IMAGES_DIR) : -len(".jpg")] in files_to_keep: label = file_path.split("/")[2] yield file_path, { "image": {"path": file_path, "bytes": file_obj.read()}, "label": label, } ``` ### Generate the dataset metadata (optional) The dataset metadata can be generated and stored in the dataset card (`README.md` file). Run the following command to generate your dataset metadata in `README.md` and make sure your new loading script works correctly: ```bash datasets-cli test path/to/<your-dataset-loading-script> --save_info --all_configs ``` If your loading script passed the test, you should now have the `dataset_info` YAML fields in the header of the `README.md` file in your dataset folder. ### Upload the dataset to the Hub Once your script is ready, [create a dataset card](./dataset_card) and [upload it to the Hub](./share). Congratulations, you can now load your dataset from the Hub! 🥳 ```py >>> from datasets import load_dataset >>> load_dataset("<username>/my_dataset") ```
datasets/docs/source/image_dataset.mdx/0
{ "file_path": "datasets/docs/source/image_dataset.mdx", "repo_id": "datasets", "token_count": 5724 }
63
# Table Classes Each `Dataset` object is backed by a PyArrow Table. A Table can be loaded from either the disk (memory mapped) or in memory. Several Table types are available, and they all inherit from [`table.Table`]. ## Table [[autodoc]] datasets.table.Table - validate - equals - to_batches - to_pydict - to_pandas - to_string - field - column - itercolumns - schema - columns - num_columns - num_rows - shape - nbytes ## InMemoryTable [[autodoc]] datasets.table.InMemoryTable - validate - equals - to_batches - to_pydict - to_pandas - to_string - field - column - itercolumns - schema - columns - num_columns - num_rows - shape - nbytes - column_names - slice - filter - flatten - combine_chunks - cast - replace_schema_metadata - add_column - append_column - remove_column - set_column - rename_columns - select - drop - from_file - from_buffer - from_pandas - from_arrays - from_pydict - from_batches ## MemoryMappedTable [[autodoc]] datasets.table.MemoryMappedTable - validate - equals - to_batches - to_pydict - to_pandas - to_string - field - column - itercolumns - schema - columns - num_columns - num_rows - shape - nbytes - column_names - slice - filter - flatten - combine_chunks - cast - replace_schema_metadata - add_column - append_column - remove_column - set_column - rename_columns - select - drop - from_file ## ConcatenationTable [[autodoc]] datasets.table.ConcatenationTable - validate - equals - to_batches - to_pydict - to_pandas - to_string - field - column - itercolumns - schema - columns - num_columns - num_rows - shape - nbytes - column_names - slice - filter - flatten - combine_chunks - cast - replace_schema_metadata - add_column - append_column - remove_column - set_column - rename_columns - select - drop - from_blocks - from_tables ## Utils [[autodoc]] datasets.table.concat_tables [[autodoc]] datasets.table.list_table_cache_files
datasets/docs/source/package_reference/table_classes.mdx/0
{ "file_path": "datasets/docs/source/package_reference/table_classes.mdx", "repo_id": "datasets", "token_count": 1029 }
64
# Use with Spark This document is a quick introduction to using 🤗 Datasets with Spark, with a particular focus on how to load a Spark DataFrame into a [`Dataset`] object. From there, you have fast access to any element and you can use it as a data loader to train models. ## Load from Spark A [`Dataset`] object is a wrapper of an Arrow table, which allows fast reads from arrays in the dataset to PyTorch, TensorFlow and JAX tensors. The Arrow table is memory mapped from disk, which can load datasets bigger than your available RAM. You can get a [`Dataset`] from a Spark DataFrame using [`Dataset.from_spark`]: ```py >>> from datasets import Dataset >>> df = spark.createDataFrame( ... data=[[1, "Elia"], [2, "Teo"], [3, "Fang"]], ... columns=["id", "name"], ... ) >>> ds = Dataset.from_spark(df) ``` The Spark workers write the dataset on disk in a cache directory as Arrow files, and the [`Dataset`] is loaded from there. Alternatively, you can skip materialization by using [`IterableDataset.from_spark`], which returns an [`IterableDataset`]: ```py >>> from datasets import IterableDataset >>> df = spark.createDataFrame( ... data=[[1, "Elia"], [2, "Teo"], [3, "Fang"]], ... columns=["id", "name"], ... ) >>> ds = IterableDataset.from_spark(df) >>> print(next(iter(ds))) {"id": 1, "name": "Elia"} ``` ### Caching When using [`Dataset.from_spark`], the resulting [`Dataset`] is cached; if you call [`Dataset.from_spark`] multiple times on the same DataFrame it won't re-run the Spark job that writes the dataset as Arrow files on disk. You can set the cache location by passing `cache_dir=` to [`Dataset.from_spark`]. Make sure to use a disk that is available to both your workers and your current machine (the driver). <Tip warning={true}> In a different session, a Spark DataFrame doesn't have the same [semantic hash](https://spark.apache.org/docs/3.2.0/api/python/reference/api/pyspark.sql.DataFrame.semanticHash.html), and it will rerun a Spark job and store it in a new cache. </Tip> ### Feature types If your dataset is made of images, audio data or N-dimensional arrays, you can specify the `features=` argument in [`Dataset.from_spark`] (or [`IterableDataset.from_spark`]): ```py >>> from datasets import Dataset, Features, Image, Value >>> data = [(0, open("image.png", "rb").read())] >>> df = spark.createDataFrame(data, "idx: int, image: binary") >>> # Also works if you have arrays >>> # data = [(0, np.zeros(shape=(32, 32, 3), dtype=np.int32).tolist())] >>> # df = spark.createDataFrame(data, "idx: int, image: array<array<array<int>>>") >>> features = Features({"idx": Value("int64"), "image": Image()}) >>> dataset = Dataset.from_spark(df, features=features) >>> dataset[0] {'idx': 0, 'image': <PIL.PngImagePlugin.PngImageFile image mode=RGB size=32x32>} ``` You can check the [`Features`] documentation to know about all the feature types available.
datasets/docs/source/use_with_spark.mdx/0
{ "file_path": "datasets/docs/source/use_with_spark.mdx", "repo_id": "datasets", "token_count": 962 }
65
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """The CodeEval metric estimates the pass@k metric for code synthesis. This is an evaluation harness for the HumanEval problem solving dataset described in the paper "Evaluating Large Language Models Trained on Code" (https://arxiv.org/abs/2107.03374).""" import itertools import os from collections import Counter, defaultdict from concurrent.futures import ThreadPoolExecutor, as_completed import numpy as np import datasets from .execute import check_correctness _CITATION = """\ @misc{chen2021evaluating, title={Evaluating Large Language Models Trained on Code}, author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \ and Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \ and Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \ and Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \ and Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \ and Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \ and Mohammad Bavarian and Clemens Winter and Philippe Tillet \ and Felipe Petroski Such and Dave Cummings and Matthias Plappert \ and Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \ and William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \ and Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \ and William Saunders and Christopher Hesse and Andrew N. Carr \ and Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \ and Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \ and Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \ and Sam McCandlish and Ilya Sutskever and Wojciech Zaremba}, year={2021}, eprint={2107.03374}, archivePrefix={arXiv}, primaryClass={cs.LG} } """ _DESCRIPTION = """\ This metric implements the evaluation harness for the HumanEval problem solving dataset described in the paper "Evaluating Large Language Models Trained on Code" (https://arxiv.org/abs/2107.03374). """ _KWARGS_DESCRIPTION = """ Calculates how good are predictions given some references, using certain scores Args: predictions: list of candidates to evaluate. Each candidates should be a list of strings with several code candidates to solve the problem. references: a list with a test for each prediction. Each test should evaluate the correctness of a code candidate. k: number of code candidates to consider in the evaluation (Default: [1, 10, 100]) num_workers: number of workers used to evaluate the canidate programs (Default: 4). timeout: Returns: pass_at_k: dict with pass rates for each k results: dict with granular results of each unittest Examples: >>> code_eval = datasets.load_metric("code_eval") >>> test_cases = ["assert add(2,3)==5"] >>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]] >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2]) >>> print(pass_at_k) {'pass@1': 0.5, 'pass@2': 1.0} """ _WARNING = """ ################################################################################ !!!WARNING!!! ################################################################################ The "code_eval" metric executes untrusted model-generated code in Python. Although it is highly unlikely that model-generated code will do something overtly malicious in response to this test suite, model-generated code may act destructively due to a lack of model capability or alignment. Users are strongly encouraged to sandbox this evaluation suite so that it does not perform destructive actions on their host or network. For more information on how OpenAI sandboxes its code, see the paper "Evaluating Large Language Models Trained on Code" (https://arxiv.org/abs/2107.03374). Once you have read this disclaimer and taken appropriate precautions, set the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this with: >>> import os >>> os.environ["HF_ALLOW_CODE_EVAL"] = "1" ################################################################################\ """ _LICENSE = """The MIT License Copyright (c) OpenAI (https://openai.com) Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.""" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION) class CodeEval(datasets.Metric): def _info(self): return datasets.MetricInfo( # This is the description that will appear on the metrics page. description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, # This defines the format of each prediction and reference features=datasets.Features( { "predictions": datasets.Sequence(datasets.Value("string")), "references": datasets.Value("string"), } ), homepage="https://github.com/openai/human-eval", codebase_urls=["https://github.com/openai/human-eval"], reference_urls=["https://github.com/openai/human-eval"], license=_LICENSE, ) def _compute(self, predictions, references, k=[1, 10, 100], num_workers=4, timeout=3.0): """Returns the scores""" if os.getenv("HF_ALLOW_CODE_EVAL", 0) != "1": raise ValueError(_WARNING) if os.name == "nt": raise NotImplementedError("This metric is currently not supported on Windows.") with ThreadPoolExecutor(max_workers=num_workers) as executor: futures = [] completion_id = Counter() n_samples = 0 results = defaultdict(list) for task_id, (candidates, test_case) in enumerate(zip(predictions, references)): for candidate in candidates: test_program = candidate + "\n" + test_case args = (test_program, timeout, task_id, completion_id[task_id]) future = executor.submit(check_correctness, *args) futures.append(future) completion_id[task_id] += 1 n_samples += 1 for future in as_completed(futures): result = future.result() results[result["task_id"]].append((result["completion_id"], result)) total, correct = [], [] for result in results.values(): result.sort() passed = [r[1]["passed"] for r in result] total.append(len(passed)) correct.append(sum(passed)) total = np.array(total) correct = np.array(correct) ks = k pass_at_k = {f"pass@{k}": estimate_pass_at_k(total, correct, k).mean() for k in ks if (total >= k).all()} return pass_at_k, results def estimate_pass_at_k(num_samples, num_correct, k): """Estimates pass@k of each problem and returns them in an array.""" def estimator(n: int, c: int, k: int) -> float: """Calculates 1 - comb(n - c, k) / comb(n, k).""" if n - c < k: return 1.0 return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1, n + 1)) if isinstance(num_samples, int): num_samples_it = itertools.repeat(num_samples, len(num_correct)) else: assert len(num_samples) == len(num_correct) num_samples_it = iter(num_samples) return np.array([estimator(int(n), int(c), k) for n, c in zip(num_samples_it, num_correct)])
datasets/metrics/code_eval/code_eval.py/0
{ "file_path": "datasets/metrics/code_eval/code_eval.py", "repo_id": "datasets", "token_count": 3175 }
66
# Copyright 2022 The HuggingFace Datasets Authors and the current metric script contributor. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """FrugalScore metric.""" import torch from transformers import AutoModelForSequenceClassification, AutoTokenizer, Trainer, TrainingArguments import datasets _CITATION = """\ @article{eddine2021frugalscore, title={FrugalScore: Learning Cheaper, Lighter and Faster Evaluation Metrics for Automatic Text Generation}, author={Eddine, Moussa Kamal and Shang, Guokan and Tixier, Antoine J-P and Vazirgiannis, Michalis}, journal={arXiv preprint arXiv:2110.08559}, year={2021} } """ _DESCRIPTION = """\ FrugalScore is a reference-based metric for NLG models evaluation. It is based on a distillation approach that allows to learn a fixed, low cost version of any expensive NLG metric, while retaining most of its original performance. """ _KWARGS_DESCRIPTION = """ Calculates how good are predictions given some references, using certain scores. Args: predictions (list of str): list of predictions to score. Each predictions should be a string. references (list of str): list of reference for each prediction. Each reference should be a string. batch_size (int): the batch size for predictions. max_length (int): maximum sequence length. device (str): either gpu or cpu Returns: scores (list of int): list of scores. Examples: >>> frugalscore = datasets.load_metric("frugalscore") >>> results = frugalscore.compute(predictions=['hello there', 'huggingface'], references=['hello world', 'hugging face']) >>> print([round(s, 3) for s in results["scores"]]) [0.631, 0.645] """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION) class FRUGALSCORE(datasets.Metric): def _info(self): return datasets.MetricInfo( description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features( { "predictions": datasets.Value("string"), "references": datasets.Value("string"), } ), homepage="https://github.com/moussaKam/FrugalScore", ) def _download_and_prepare(self, dl_manager): if self.config_name == "default": checkpoint = "moussaKam/frugalscore_tiny_bert-base_bert-score" else: checkpoint = self.config_name self.model = AutoModelForSequenceClassification.from_pretrained(checkpoint) self.tokenizer = AutoTokenizer.from_pretrained(checkpoint) def _compute( self, predictions, references, batch_size=32, max_length=128, device=None, ): """Returns the scores""" assert len(predictions) == len( references ), "predictions and references should have the same number of sentences." if device is not None: assert device in ["gpu", "cpu"], "device should be either gpu or cpu." else: device = "gpu" if torch.cuda.is_available() else "cpu" training_args = TrainingArguments( "trainer", fp16=(device == "gpu"), per_device_eval_batch_size=batch_size, report_to="all", no_cuda=(device == "cpu"), log_level="warning", ) dataset = {"sentence1": predictions, "sentence2": references} raw_datasets = datasets.Dataset.from_dict(dataset) def tokenize_function(data): return self.tokenizer( data["sentence1"], data["sentence2"], max_length=max_length, truncation=True, padding=True ) tokenized_datasets = raw_datasets.map(tokenize_function, batched=True) tokenized_datasets.remove_columns(["sentence1", "sentence2"]) trainer = Trainer(self.model, training_args, tokenizer=self.tokenizer) predictions = trainer.predict(tokenized_datasets) return {"scores": list(predictions.predictions.squeeze(-1))}
datasets/metrics/frugalscore/frugalscore.py/0
{ "file_path": "datasets/metrics/frugalscore/frugalscore.py", "repo_id": "datasets", "token_count": 1754 }
67
# Copyright 2022 The HuggingFace Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Mean IoU (Intersection-over-Union) metric.""" from typing import Dict, Optional import numpy as np import datasets _DESCRIPTION = """ IoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union between the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation, the mean IoU of the image is calculated by taking the IoU of each class and averaging them. """ _KWARGS_DESCRIPTION = """ Args: predictions (`List[ndarray]`): List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size. references (`List[ndarray]`): List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size. num_labels (`int`): Number of classes (categories). ignore_index (`int`): Index that will be ignored during evaluation. nan_to_num (`int`, *optional*): If specified, NaN values will be replaced by the number defined by the user. label_map (`dict`, *optional*): If specified, dictionary mapping old label indices to new label indices. reduce_labels (`bool`, *optional*, defaults to `False`): Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background, and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255. Returns: `Dict[str, float | ndarray]` comprising various elements: - *mean_iou* (`float`): Mean Intersection-over-Union (IoU averaged over all categories). - *mean_accuracy* (`float`): Mean accuracy (averaged over all categories). - *overall_accuracy* (`float`): Overall accuracy on all images. - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`): Per category accuracy. - *per_category_iou* (`ndarray` of shape `(num_labels,)`): Per category IoU. Examples: >>> import numpy as np >>> mean_iou = datasets.load_metric("mean_iou") >>> # suppose one has 3 different segmentation maps predicted >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]]) >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]]) >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]]) >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]]) >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]]) >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]]) >>> predicted = [predicted_1, predicted_2, predicted_3] >>> ground_truth = [actual_1, actual_2, actual_3] >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False) >>> print(results) # doctest: +NORMALIZE_WHITESPACE {'mean_iou': 0.47750000000000004, 'mean_accuracy': 0.5916666666666666, 'overall_accuracy': 0.5263157894736842, 'per_category_iou': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), 'per_category_accuracy': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])} """ _CITATION = """\ @software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020, author = {{MMSegmentation Contributors}}, license = {Apache-2.0}, month = {7}, title = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}}, url = {https://github.com/open-mmlab/mmsegmentation}, year = {2020} }""" def intersect_and_union( pred_label, label, num_labels, ignore_index: bool, label_map: Optional[Dict[int, int]] = None, reduce_labels: bool = False, ): """Calculate intersection and Union. Args: pred_label (`ndarray`): Prediction segmentation map of shape (height, width). label (`ndarray`): Ground truth segmentation map of shape (height, width). num_labels (`int`): Number of categories. ignore_index (`int`): Index that will be ignored during evaluation. label_map (`dict`, *optional*): Mapping old labels to new labels. The parameter will work only when label is str. reduce_labels (`bool`, *optional*, defaults to `False`): Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background, and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255. Returns: area_intersect (`ndarray`): The intersection of prediction and ground truth histogram on all classes. area_union (`ndarray`): The union of prediction and ground truth histogram on all classes. area_pred_label (`ndarray`): The prediction histogram on all classes. area_label (`ndarray`): The ground truth histogram on all classes. """ if label_map is not None: for old_id, new_id in label_map.items(): label[label == old_id] = new_id # turn into Numpy arrays pred_label = np.array(pred_label) label = np.array(label) if reduce_labels: label[label == 0] = 255 label = label - 1 label[label == 254] = 255 mask = label != ignore_index mask = np.not_equal(label, ignore_index) pred_label = pred_label[mask] label = np.array(label)[mask] intersect = pred_label[pred_label == label] area_intersect = np.histogram(intersect, bins=num_labels, range=(0, num_labels - 1))[0] area_pred_label = np.histogram(pred_label, bins=num_labels, range=(0, num_labels - 1))[0] area_label = np.histogram(label, bins=num_labels, range=(0, num_labels - 1))[0] area_union = area_pred_label + area_label - area_intersect return area_intersect, area_union, area_pred_label, area_label def total_intersect_and_union( results, gt_seg_maps, num_labels, ignore_index: bool, label_map: Optional[Dict[int, int]] = None, reduce_labels: bool = False, ): """Calculate Total Intersection and Union, by calculating `intersect_and_union` for each (predicted, ground truth) pair. Args: results (`ndarray`): List of prediction segmentation maps, each of shape (height, width). gt_seg_maps (`ndarray`): List of ground truth segmentation maps, each of shape (height, width). num_labels (`int`): Number of categories. ignore_index (`int`): Index that will be ignored during evaluation. label_map (`dict`, *optional*): Mapping old labels to new labels. The parameter will work only when label is str. reduce_labels (`bool`, *optional*, defaults to `False`): Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background, and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255. Returns: total_area_intersect (`ndarray`): The intersection of prediction and ground truth histogram on all classes. total_area_union (`ndarray`): The union of prediction and ground truth histogram on all classes. total_area_pred_label (`ndarray`): The prediction histogram on all classes. total_area_label (`ndarray`): The ground truth histogram on all classes. """ total_area_intersect = np.zeros((num_labels,), dtype=np.float64) total_area_union = np.zeros((num_labels,), dtype=np.float64) total_area_pred_label = np.zeros((num_labels,), dtype=np.float64) total_area_label = np.zeros((num_labels,), dtype=np.float64) for result, gt_seg_map in zip(results, gt_seg_maps): area_intersect, area_union, area_pred_label, area_label = intersect_and_union( result, gt_seg_map, num_labels, ignore_index, label_map, reduce_labels ) total_area_intersect += area_intersect total_area_union += area_union total_area_pred_label += area_pred_label total_area_label += area_label return total_area_intersect, total_area_union, total_area_pred_label, total_area_label def mean_iou( results, gt_seg_maps, num_labels, ignore_index: bool, nan_to_num: Optional[int] = None, label_map: Optional[Dict[int, int]] = None, reduce_labels: bool = False, ): """Calculate Mean Intersection and Union (mIoU). Args: results (`ndarray`): List of prediction segmentation maps, each of shape (height, width). gt_seg_maps (`ndarray`): List of ground truth segmentation maps, each of shape (height, width). num_labels (`int`): Number of categories. ignore_index (`int`): Index that will be ignored during evaluation. nan_to_num (`int`, *optional*): If specified, NaN values will be replaced by the number defined by the user. label_map (`dict`, *optional*): Mapping old labels to new labels. The parameter will work only when label is str. reduce_labels (`bool`, *optional*, defaults to `False`): Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background, and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255. Returns: `Dict[str, float | ndarray]` comprising various elements: - *mean_iou* (`float`): Mean Intersection-over-Union (IoU averaged over all categories). - *mean_accuracy* (`float`): Mean accuracy (averaged over all categories). - *overall_accuracy* (`float`): Overall accuracy on all images. - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`): Per category accuracy. - *per_category_iou* (`ndarray` of shape `(num_labels,)`): Per category IoU. """ total_area_intersect, total_area_union, total_area_pred_label, total_area_label = total_intersect_and_union( results, gt_seg_maps, num_labels, ignore_index, label_map, reduce_labels ) # compute metrics metrics = {} all_acc = total_area_intersect.sum() / total_area_label.sum() iou = total_area_intersect / total_area_union acc = total_area_intersect / total_area_label metrics["mean_iou"] = np.nanmean(iou) metrics["mean_accuracy"] = np.nanmean(acc) metrics["overall_accuracy"] = all_acc metrics["per_category_iou"] = iou metrics["per_category_accuracy"] = acc if nan_to_num is not None: metrics = {metric: np.nan_to_num(metric_value, nan=nan_to_num) for metric, metric_value in metrics.items()} return metrics @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION) class MeanIoU(datasets.Metric): def _info(self): return datasets.MetricInfo( description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features( # 1st Seq - height dim, 2nd - width dim { "predictions": datasets.Sequence(datasets.Sequence(datasets.Value("uint16"))), "references": datasets.Sequence(datasets.Sequence(datasets.Value("uint16"))), } ), reference_urls=[ "https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py" ], ) def _compute( self, predictions, references, num_labels: int, ignore_index: bool, nan_to_num: Optional[int] = None, label_map: Optional[Dict[int, int]] = None, reduce_labels: bool = False, ): iou_result = mean_iou( results=predictions, gt_seg_maps=references, num_labels=num_labels, ignore_index=ignore_index, nan_to_num=nan_to_num, label_map=label_map, reduce_labels=reduce_labels, ) return iou_result
datasets/metrics/mean_iou/mean_iou.py/0
{ "file_path": "datasets/metrics/mean_iou/mean_iou.py", "repo_id": "datasets", "token_count": 5236 }
68
# Copyright 2020 The HuggingFace Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ROUGE metric from Google Research github repo.""" # The dependencies in https://github.com/google-research/google-research/blob/master/rouge/requirements.txt import absl # noqa: F401 # Here to have a nice missing dependency error message early on import nltk # noqa: F401 # Here to have a nice missing dependency error message early on import numpy # noqa: F401 # Here to have a nice missing dependency error message early on import six # noqa: F401 # Here to have a nice missing dependency error message early on from rouge_score import rouge_scorer, scoring import datasets _CITATION = """\ @inproceedings{lin-2004-rouge, title = "{ROUGE}: A Package for Automatic Evaluation of Summaries", author = "Lin, Chin-Yew", booktitle = "Text Summarization Branches Out", month = jul, year = "2004", address = "Barcelona, Spain", publisher = "Association for Computational Linguistics", url = "https://www.aclweb.org/anthology/W04-1013", pages = "74--81", } """ _DESCRIPTION = """\ ROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for evaluating automatic summarization and machine translation software in natural language processing. The metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation. Note that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters. This metrics is a wrapper around Google Research reimplementation of ROUGE: https://github.com/google-research/google-research/tree/master/rouge """ _KWARGS_DESCRIPTION = """ Calculates average rouge scores for a list of hypotheses and references Args: predictions: list of predictions to score. Each prediction should be a string with tokens separated by spaces. references: list of reference for each prediction. Each reference should be a string with tokens separated by spaces. rouge_types: A list of rouge types to calculate. Valid names: `"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring, `"rougeL"`: Longest common subsequence based scoring. `"rougeLSum"`: rougeLsum splits text using `"\n"`. See details in https://github.com/huggingface/datasets/issues/617 use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes. use_aggregator: Return aggregates if this is set to True Returns: rouge1: rouge_1 (precision, recall, f1), rouge2: rouge_2 (precision, recall, f1), rougeL: rouge_l (precision, recall, f1), rougeLsum: rouge_lsum (precision, recall, f1) Examples: >>> rouge = datasets.load_metric('rouge') >>> predictions = ["hello there", "general kenobi"] >>> references = ["hello there", "general kenobi"] >>> results = rouge.compute(predictions=predictions, references=references) >>> print(list(results.keys())) ['rouge1', 'rouge2', 'rougeL', 'rougeLsum'] >>> print(results["rouge1"]) AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0)) >>> print(results["rouge1"].mid.fmeasure) 1.0 """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION) class Rouge(datasets.Metric): def _info(self): return datasets.MetricInfo( description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features( { "predictions": datasets.Value("string", id="sequence"), "references": datasets.Value("string", id="sequence"), } ), codebase_urls=["https://github.com/google-research/google-research/tree/master/rouge"], reference_urls=[ "https://en.wikipedia.org/wiki/ROUGE_(metric)", "https://github.com/google-research/google-research/tree/master/rouge", ], ) def _compute(self, predictions, references, rouge_types=None, use_aggregator=True, use_stemmer=False): if rouge_types is None: rouge_types = ["rouge1", "rouge2", "rougeL", "rougeLsum"] scorer = rouge_scorer.RougeScorer(rouge_types=rouge_types, use_stemmer=use_stemmer) if use_aggregator: aggregator = scoring.BootstrapAggregator() else: scores = [] for ref, pred in zip(references, predictions): score = scorer.score(ref, pred) if use_aggregator: aggregator.add_scores(score) else: scores.append(score) if use_aggregator: result = aggregator.aggregate() else: result = {} for key in scores[0]: result[key] = [score[key] for score in scores] return result
datasets/metrics/rouge/rouge.py/0
{ "file_path": "datasets/metrics/rouge/rouge.py", "repo_id": "datasets", "token_count": 2100 }
69
""" Official evaluation script for ReCoRD v1.0. (Some functions are adopted from the SQuAD evaluation script.) """ import argparse import json import re import string import sys from collections import Counter def normalize_answer(s): """Lower text and remove punctuation, articles and extra whitespace.""" def remove_articles(text): return re.sub(r"\b(a|an|the)\b", " ", text) def white_space_fix(text): return " ".join(text.split()) def remove_punc(text): exclude = set(string.punctuation) return "".join(ch for ch in text if ch not in exclude) def lower(text): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(s)))) def f1_score(prediction, ground_truth): prediction_tokens = normalize_answer(prediction).split() ground_truth_tokens = normalize_answer(ground_truth).split() common = Counter(prediction_tokens) & Counter(ground_truth_tokens) num_same = sum(common.values()) if num_same == 0: return 0 precision = 1.0 * num_same / len(prediction_tokens) recall = 1.0 * num_same / len(ground_truth_tokens) f1 = (2 * precision * recall) / (precision + recall) return f1 def exact_match_score(prediction, ground_truth): return normalize_answer(prediction) == normalize_answer(ground_truth) def metric_max_over_ground_truths(metric_fn, prediction, ground_truths): scores_for_ground_truths = [] for ground_truth in ground_truths: score = metric_fn(prediction, ground_truth) scores_for_ground_truths.append(score) return max(scores_for_ground_truths) def evaluate(dataset, predictions): f1 = exact_match = total = 0 correct_ids = [] for passage in dataset: for qa in passage["qas"]: total += 1 if qa["id"] not in predictions: message = f'Unanswered question {qa["id"]} will receive score 0.' print(message, file=sys.stderr) continue ground_truths = [x["text"] for x in qa["answers"]] prediction = predictions[qa["id"]] _exact_match = metric_max_over_ground_truths(exact_match_score, prediction, ground_truths) if int(_exact_match) == 1: correct_ids.append(qa["id"]) exact_match += _exact_match f1 += metric_max_over_ground_truths(f1_score, prediction, ground_truths) exact_match = exact_match / total f1 = f1 / total return {"exact_match": exact_match, "f1": f1}, correct_ids if __name__ == "__main__": expected_version = "1.0" parser = argparse.ArgumentParser("Official evaluation script for ReCoRD v1.0.") parser.add_argument("data_file", help="The dataset file in JSON format.") parser.add_argument("pred_file", help="The model prediction file in JSON format.") parser.add_argument("--output_correct_ids", action="store_true", help="Output the correctly answered query IDs.") args = parser.parse_args() with open(args.data_file) as data_file: dataset_json = json.load(data_file) if dataset_json["version"] != expected_version: print( f'Evaluation expects v-{expected_version}, but got dataset with v-{dataset_json["version"]}', file=sys.stderr, ) dataset = dataset_json["data"] with open(args.pred_file) as pred_file: predictions = json.load(pred_file) metrics, correct_ids = evaluate(dataset, predictions) if args.output_correct_ids: print(f"Output {len(correct_ids)} correctly answered question IDs.") with open("correct_ids.json", "w") as f: json.dump(correct_ids, f)
datasets/metrics/super_glue/record_evaluation.py/0
{ "file_path": "datasets/metrics/super_glue/record_evaluation.py", "repo_id": "datasets", "token_count": 1480 }
70
# ruff: noqa # Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. __version__ = "2.18.1.dev0" from .arrow_dataset import Dataset from .arrow_reader import ReadInstruction from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder from .combine import concatenate_datasets, interleave_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .download import * from .features import * from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled from .info import DatasetInfo, MetricInfo from .inspect import ( get_dataset_config_info, get_dataset_config_names, get_dataset_default_config_name, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, list_datasets, list_metrics, ) from .iterable_dataset import IterableDataset from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric from .metric import Metric from .splits import ( NamedSplit, NamedSplitAll, Split, SplitBase, SplitDict, SplitGenerator, SplitInfo, SubSplitInfo, percent, ) from .tasks import * from .utils import * from .utils import logging # deprecated modules from datasets import arrow_dataset as _arrow_dataset # isort:skip from datasets import utils as _utils # isort:skip from datasets.utils import download_manager as _deprecated_download_manager # isort:skip _arrow_dataset.concatenate_datasets = concatenate_datasets _utils.DownloadConfig = DownloadConfig _utils.DownloadManager = DownloadManager _utils.DownloadMode = DownloadMode _deprecated_download_manager.DownloadConfig = DownloadConfig _deprecated_download_manager.DownloadMode = DownloadMode _deprecated_download_manager.DownloadManager = DownloadManager del _arrow_dataset, _utils, _deprecated_download_manager
datasets/src/datasets/__init__.py/0
{ "file_path": "datasets/src/datasets/__init__.py", "repo_id": "datasets", "token_count": 772 }
71
from typing import TypeVar from .arrow_dataset import Dataset, _split_by_node_map_style_dataset from .iterable_dataset import IterableDataset, _split_by_node_iterable_dataset DatasetType = TypeVar("DatasetType", Dataset, IterableDataset) def split_dataset_by_node(dataset: DatasetType, rank: int, world_size: int) -> DatasetType: """ Split a dataset for the node at rank `rank` in a pool of nodes of size `world_size`. For map-style datasets: Each node is assigned a chunk of data, e.g. rank 0 is given the first chunk of the dataset. To maximize data loading throughput, chunks are made of contiguous data on disk if possible. For iterable datasets: If the dataset has a number of shards that is a factor of `world_size` (i.e. if `dataset.n_shards % world_size == 0`), then the shards are evenly assigned across the nodes, which is the most optimized. Otherwise, each node keeps 1 example out of `world_size`, skipping the other examples. Args: dataset ([`Dataset`] or [`IterableDataset`]): The dataset to split by node. rank (`int`): Rank of the current node. world_size (`int`): Total number of nodes. Returns: [`Dataset`] or [`IterableDataset`]: The dataset to be used on the node at rank `rank`. """ if isinstance(dataset, Dataset): return _split_by_node_map_style_dataset(dataset, rank=rank, world_size=world_size) else: return _split_by_node_iterable_dataset(dataset, rank=rank, world_size=world_size)
datasets/src/datasets/distributed.py/0
{ "file_path": "datasets/src/datasets/distributed.py", "repo_id": "datasets", "token_count": 582 }
72
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ruff: noqa from typing import Dict, List, Optional, Type from .. import config from ..utils import logging from .formatting import ( ArrowFormatter, CustomFormatter, Formatter, PandasFormatter, PythonFormatter, TensorFormatter, format_table, query_table, ) from .np_formatter import NumpyFormatter logger = logging.get_logger(__name__) _FORMAT_TYPES: Dict[Optional[str], Type[Formatter]] = {} _FORMAT_TYPES_ALIASES: Dict[Optional[str], str] = {} _FORMAT_TYPES_ALIASES_UNAVAILABLE: Dict[Optional[str], Exception] = {} def _register_formatter( formatter_cls: type, format_type: Optional[str], aliases: Optional[List[str]] = None, ): """ Register a Formatter object using a name and optional aliases. This function must be used on a Formatter class. """ aliases = aliases if aliases is not None else [] if format_type in _FORMAT_TYPES: logger.warning( f"Overwriting format type '{format_type}' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})" ) _FORMAT_TYPES[format_type] = formatter_cls for alias in set(aliases + [format_type]): if alias in _FORMAT_TYPES_ALIASES: logger.warning( f"Overwriting format type alias '{alias}' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})" ) _FORMAT_TYPES_ALIASES[alias] = format_type def _register_unavailable_formatter( unavailable_error: Exception, format_type: Optional[str], aliases: Optional[List[str]] = None ): """ Register an unavailable Formatter object using a name and optional aliases. This function must be used on an Exception object that is raised when trying to get the unavailable formatter. """ aliases = aliases if aliases is not None else [] for alias in set(aliases + [format_type]): _FORMAT_TYPES_ALIASES_UNAVAILABLE[alias] = unavailable_error # Here we define all the available formatting functions that can be used by `Dataset.set_format` _register_formatter(PythonFormatter, None, aliases=["python"]) _register_formatter(ArrowFormatter, "arrow", aliases=["pa", "pyarrow"]) _register_formatter(NumpyFormatter, "numpy", aliases=["np"]) _register_formatter(PandasFormatter, "pandas", aliases=["pd"]) _register_formatter(CustomFormatter, "custom") if config.POLARS_AVAILABLE: from .polars_formatter import PolarsFormatter _register_formatter(PolarsFormatter, "polars", aliases=["pl"]) else: _polars_error = ValueError("Polars needs to be installed to be able to return Polars dataframes.") _register_unavailable_formatter(_polars_error, "polars", aliases=["pl"]) if config.TORCH_AVAILABLE: from .torch_formatter import TorchFormatter _register_formatter(TorchFormatter, "torch", aliases=["pt", "pytorch"]) else: _torch_error = ValueError("PyTorch needs to be installed to be able to return PyTorch tensors.") _register_unavailable_formatter(_torch_error, "torch", aliases=["pt", "pytorch"]) if config.TF_AVAILABLE: from .tf_formatter import TFFormatter _register_formatter(TFFormatter, "tensorflow", aliases=["tf"]) else: _tf_error = ValueError("Tensorflow needs to be installed to be able to return Tensorflow tensors.") _register_unavailable_formatter(_tf_error, "tensorflow", aliases=["tf"]) if config.JAX_AVAILABLE: from .jax_formatter import JaxFormatter _register_formatter(JaxFormatter, "jax", aliases=[]) else: _jax_error = ValueError("JAX needs to be installed to be able to return JAX arrays.") _register_unavailable_formatter(_jax_error, "jax", aliases=[]) def get_format_type_from_alias(format_type: Optional[str]) -> Optional[str]: """If the given format type is a known alias, then return its main type name. Otherwise return the type with no change.""" if format_type in _FORMAT_TYPES_ALIASES: return _FORMAT_TYPES_ALIASES[format_type] else: return format_type def get_formatter(format_type: Optional[str], **format_kwargs) -> Formatter: """ Factory function to get a Formatter given its type name and keyword arguments. A formatter is an object that extracts and formats data from pyarrow table. It defines the formatting for rows, colums and batches. If the formatter for a given type name doesn't exist or is not available, an error is raised. """ format_type = get_format_type_from_alias(format_type) if format_type in _FORMAT_TYPES: return _FORMAT_TYPES[format_type](**format_kwargs) if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE: raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type] else: raise ValueError( f"Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None)}, but got '{format_type}'" )
datasets/src/datasets/formatting/__init__.py/0
{ "file_path": "datasets/src/datasets/formatting/__init__.py", "repo_id": "datasets", "token_count": 1932 }
73
import multiprocessing from typing import TYPE_CHECKING, Optional, Union from .. import Dataset, Features, config from ..formatting import query_table from ..packaged_modules.sql.sql import Sql from ..utils import tqdm as hf_tqdm from .abc import AbstractDatasetInputStream if TYPE_CHECKING: import sqlite3 import sqlalchemy class SqlDatasetReader(AbstractDatasetInputStream): def __init__( self, sql: Union[str, "sqlalchemy.sql.Selectable"], con: Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"], features: Optional[Features] = None, cache_dir: str = None, keep_in_memory: bool = False, **kwargs, ): super().__init__(features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, **kwargs) self.builder = Sql( cache_dir=cache_dir, features=features, sql=sql, con=con, **kwargs, ) def read(self): download_config = None download_mode = None verification_mode = None base_path = None self.builder.download_and_prepare( download_config=download_config, download_mode=download_mode, verification_mode=verification_mode, base_path=base_path, ) # Build dataset for splits dataset = self.builder.as_dataset( split="train", verification_mode=verification_mode, in_memory=self.keep_in_memory ) return dataset class SqlDatasetWriter: def __init__( self, dataset: Dataset, name: str, con: Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"], batch_size: Optional[int] = None, num_proc: Optional[int] = None, **to_sql_kwargs, ): if num_proc is not None and num_proc <= 0: raise ValueError(f"num_proc {num_proc} must be an integer > 0.") self.dataset = dataset self.name = name self.con = con self.batch_size = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE self.num_proc = num_proc self.to_sql_kwargs = to_sql_kwargs def write(self) -> int: _ = self.to_sql_kwargs.pop("sql", None) _ = self.to_sql_kwargs.pop("con", None) index = self.to_sql_kwargs.pop("index", False) written = self._write(index=index, **self.to_sql_kwargs) return written def _batch_sql(self, args): offset, index, to_sql_kwargs = args to_sql_kwargs = {**to_sql_kwargs, "if_exists": "append"} if offset > 0 else to_sql_kwargs batch = query_table( table=self.dataset.data, key=slice(offset, offset + self.batch_size), indices=self.dataset._indices, ) df = batch.to_pandas() num_rows = df.to_sql(self.name, self.con, index=index, **to_sql_kwargs) return num_rows or len(df) def _write(self, index, **to_sql_kwargs) -> int: """Writes the pyarrow table as SQL to a database. Caller is responsible for opening and closing the SQL connection. """ written = 0 if self.num_proc is None or self.num_proc == 1: for offset in hf_tqdm( range(0, len(self.dataset), self.batch_size), unit="ba", desc="Creating SQL from Arrow format", ): written += self._batch_sql((offset, index, to_sql_kwargs)) else: num_rows, batch_size = len(self.dataset), self.batch_size with multiprocessing.Pool(self.num_proc) as pool: for num_rows in hf_tqdm( pool.imap( self._batch_sql, [(offset, index, to_sql_kwargs) for offset in range(0, num_rows, batch_size)], ), total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size, unit="ba", desc="Creating SQL from Arrow format", ): written += num_rows return written
datasets/src/datasets/io/sql.py/0
{ "file_path": "datasets/src/datasets/io/sql.py", "repo_id": "datasets", "token_count": 2007 }
74
from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Value from .base import TaskTemplate @dataclass(frozen=True) class LanguageModeling(TaskTemplate): task: str = field(default="language-modeling", metadata={"include_in_asdict_even_if_is_default": True}) input_schema: ClassVar[Features] = Features({"text": Value("string")}) label_schema: ClassVar[Features] = Features({}) text_column: str = "text" @property def column_mapping(self) -> Dict[str, str]: return {self.text_column: "text"}
datasets/src/datasets/tasks/language_modeling.py/0
{ "file_path": "datasets/src/datasets/tasks/language_modeling.py", "repo_id": "datasets", "token_count": 195 }
75
import time from functools import partial from huggingface_hub import HfApi, hf_hub_url from huggingface_hub.hf_api import RepoFile from packaging import version from requests import ConnectionError, HTTPError from .. import config from . import logging logger = logging.get_logger(__name__) # Retry `preupload_lfs_files` in `huggingface_hub<0.20.0` on the "500 (Internal Server Error)" and "503 (Service Unavailable)" HTTP errors if config.HF_HUB_VERSION.release < version.parse("0.20.0").release: def preupload_lfs_files(hf_api: HfApi, **kwargs): max_retries = 5 base_wait_time = 1 max_wait_time = 8 retry = 0 while True: try: hf_api.preupload_lfs_files(**kwargs) except (RuntimeError, HTTPError, ConnectionError) as err: if isinstance(err, RuntimeError): if isinstance(err.__cause__, (HTTPError, ConnectionError)): err = err.__cause__ else: raise err if retry >= max_retries or err.response and err.response.status_code not in [500, 503]: raise err else: sleep_time = min(max_wait_time, base_wait_time * 2**retry) # Exponential backoff logger.info( f"{hf_api.preupload_lfs_files} timed out, retrying in {sleep_time}s... [{retry/max_retries}]" ) time.sleep(sleep_time) retry += 1 else: break else: def preupload_lfs_files(hf_api: HfApi, **kwargs): hf_api.preupload_lfs_files(**kwargs) # `list_files_info` is deprecated in favor of `list_repo_tree` in `huggingface_hub>=0.20.0` if config.HF_HUB_VERSION.release < version.parse("0.20.0").release: def list_files_info(hf_api: HfApi, **kwargs): yield from hf_api.list_files_info(**kwargs) else: def list_files_info(hf_api: HfApi, **kwargs): kwargs = {**kwargs, "recursive": True} for repo_path in hf_api.list_repo_tree(**kwargs): if isinstance(repo_path, RepoFile): yield repo_path # bakckward compatibility hf_hub_url = partial(hf_hub_url, repo_type="dataset")
datasets/src/datasets/utils/hub.py/0
{ "file_path": "datasets/src/datasets/utils/hub.py", "repo_id": "datasets", "token_count": 1118 }
76
"""Utility helpers to handle progress bars in `datasets`. Example: 1. Use `datasets.utils.tqdm` as you would use `tqdm.tqdm` or `tqdm.auto.tqdm`. 2. To disable progress bars, either use `disable_progress_bars()` helper or set the environment variable `HF_DATASETS_DISABLE_PROGRESS_BARS` to 1. 3. To re-enable progress bars, use `enable_progress_bars()`. 4. To check whether progress bars are disabled, use `are_progress_bars_disabled()`. NOTE: Environment variable `HF_DATASETS_DISABLE_PROGRESS_BARS` has the priority. Example: ```py from datasets.utils import ( are_progress_bars_disabled, disable_progress_bars, enable_progress_bars, tqdm, ) # Disable progress bars globally disable_progress_bars() # Use as normal `tqdm` for _ in tqdm(range(5)): do_something() # Still not showing progress bars, as `disable=False` is overwritten to `True`. for _ in tqdm(range(5), disable=False): do_something() are_progress_bars_disabled() # True # Re-enable progress bars globally enable_progress_bars() # Progress bar will be shown ! for _ in tqdm(range(5)): do_something() ``` """ import warnings from tqdm.auto import tqdm as old_tqdm from ..config import HF_DATASETS_DISABLE_PROGRESS_BARS # `HF_DATASETS_DISABLE_PROGRESS_BARS` is `Optional[bool]` while `_hf_datasets_progress_bars_disabled` # is a `bool`. If `HF_DATASETS_DISABLE_PROGRESS_BARS` is set to True or False, it has priority. # If `HF_DATASETS_DISABLE_PROGRESS_BARS` is None, it means the user have not set the # environment variable and is free to enable/disable progress bars programmatically. # TL;DR: env variable has priority over code. # # By default, progress bars are enabled. _hf_datasets_progress_bars_disabled: bool = HF_DATASETS_DISABLE_PROGRESS_BARS or False def disable_progress_bars() -> None: """ Disable globally progress bars used in `datasets` except if `HF_DATASETS_DISABLE_PROGRESS_BAR` environment variable has been set. Use [`~utils.enable_progress_bars`] to re-enable them. """ if HF_DATASETS_DISABLE_PROGRESS_BARS is False: warnings.warn( "Cannot disable progress bars: environment variable `HF_DATASETS_DISABLE_PROGRESS_BAR=0` is set and has" " priority." ) return global _hf_datasets_progress_bars_disabled _hf_datasets_progress_bars_disabled = True def enable_progress_bars() -> None: """ Enable globally progress bars used in `datasets` except if `HF_DATASETS_DISABLE_PROGRESS_BAR` environment variable has been set. Use [`~utils.disable_progress_bars`] to disable them. """ if HF_DATASETS_DISABLE_PROGRESS_BARS is True: warnings.warn( "Cannot enable progress bars: environment variable `HF_DATASETS_DISABLE_PROGRESS_BAR=1` is set and has" " priority." ) return global _hf_datasets_progress_bars_disabled _hf_datasets_progress_bars_disabled = False def are_progress_bars_disabled() -> bool: """Return whether progress bars are globally disabled or not. Progress bars used in `datasets` can be enable or disabled globally using [`~utils.enable_progress_bars`] and [`~utils.disable_progress_bars`] or by setting `HF_DATASETS_DISABLE_PROGRESS_BAR` as environment variable. """ global _hf_datasets_progress_bars_disabled return _hf_datasets_progress_bars_disabled class tqdm(old_tqdm): """ Class to override `disable` argument in case progress bars are globally disabled. Taken from https://github.com/tqdm/tqdm/issues/619#issuecomment-619639324. """ def __init__(self, *args, **kwargs): if are_progress_bars_disabled(): kwargs["disable"] = True super().__init__(*args, **kwargs) def __delattr__(self, attr: str) -> None: """Fix for https://github.com/huggingface/datasets/issues/6066""" try: super().__delattr__(attr) except AttributeError: if attr != "_lock": raise # backward compatibility enable_progress_bar = enable_progress_bars disable_progress_bar = disable_progress_bars def is_progress_bar_enabled(): return not are_progress_bars_disabled()
datasets/src/datasets/utils/tqdm.py/0
{ "file_path": "datasets/src/datasets/utils/tqdm.py", "repo_id": "datasets", "token_count": 1662 }
77
from unittest.mock import patch import pyspark from datasets.packaged_modules.spark.spark import ( Spark, SparkExamplesIterable, _generate_iterable_examples, ) from ..utils import ( require_dill_gt_0_3_2, require_not_windows, ) def _get_expected_row_ids_and_row_dicts_for_partition_order(df, partition_order): expected_row_ids_and_row_dicts = [] for part_id in partition_order: partition = df.where(f"SPARK_PARTITION_ID() = {part_id}").collect() for row_idx, row in enumerate(partition): expected_row_ids_and_row_dicts.append((f"{part_id}_{row_idx}", row.asDict())) return expected_row_ids_and_row_dicts @require_not_windows @require_dill_gt_0_3_2 def test_repartition_df_if_needed(): spark = pyspark.sql.SparkSession.builder.master("local[*]").appName("pyspark").getOrCreate() df = spark.range(100).repartition(1) spark_builder = Spark(df) # The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means # that each partition can hold 2 rows. spark_builder._repartition_df_if_needed(max_shard_size=16) # Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions. assert spark_builder.df.rdd.getNumPartitions() == 50 @require_not_windows @require_dill_gt_0_3_2 def test_generate_iterable_examples(): spark = pyspark.sql.SparkSession.builder.master("local[*]").appName("pyspark").getOrCreate() df = spark.range(10).repartition(2) partition_order = [1, 0] generate_fn = _generate_iterable_examples(df, partition_order) # Reverse the partitions. expected_row_ids_and_row_dicts = _get_expected_row_ids_and_row_dicts_for_partition_order(df, partition_order) for i, (row_id, row_dict) in enumerate(generate_fn()): expected_row_id, expected_row_dict = expected_row_ids_and_row_dicts[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def test_spark_examples_iterable(): spark = pyspark.sql.SparkSession.builder.master("local[*]").appName("pyspark").getOrCreate() df = spark.range(10).repartition(1) it = SparkExamplesIterable(df) assert it.n_shards == 1 for i, (row_id, row_dict) in enumerate(it): assert row_id == f"0_{i}" assert row_dict == {"id": i} @require_not_windows @require_dill_gt_0_3_2 def test_spark_examples_iterable_shuffle(): spark = pyspark.sql.SparkSession.builder.master("local[*]").appName("pyspark").getOrCreate() df = spark.range(30).repartition(3) # Mock the generator so that shuffle reverses the partition indices. with patch("numpy.random.Generator") as generator_mock: generator_mock.shuffle.side_effect = lambda x: x.reverse() expected_row_ids_and_row_dicts = _get_expected_row_ids_and_row_dicts_for_partition_order(df, [2, 1, 0]) shuffled_it = SparkExamplesIterable(df).shuffle_data_sources(generator_mock) assert shuffled_it.n_shards == 3 for i, (row_id, row_dict) in enumerate(shuffled_it): expected_row_id, expected_row_dict = expected_row_ids_and_row_dicts[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def test_spark_examples_iterable_shard(): spark = pyspark.sql.SparkSession.builder.master("local[*]").appName("pyspark").getOrCreate() df = spark.range(20).repartition(4) # Partitions 0 and 2 shard_it_1 = SparkExamplesIterable(df).shard_data_sources(worker_id=0, num_workers=2) assert shard_it_1.n_shards == 2 expected_row_ids_and_row_dicts_1 = _get_expected_row_ids_and_row_dicts_for_partition_order(df, [0, 2]) for i, (row_id, row_dict) in enumerate(shard_it_1): expected_row_id, expected_row_dict = expected_row_ids_and_row_dicts_1[i] assert row_id == expected_row_id assert row_dict == expected_row_dict # Partitions 1 and 3 shard_it_2 = SparkExamplesIterable(df).shard_data_sources(worker_id=1, num_workers=2) assert shard_it_2.n_shards == 2 expected_row_ids_and_row_dicts_2 = _get_expected_row_ids_and_row_dicts_for_partition_order(df, [1, 3]) for i, (row_id, row_dict) in enumerate(shard_it_2): expected_row_id, expected_row_dict = expected_row_ids_and_row_dicts_2[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def test_repartition_df_if_needed_max_num_df_rows(): spark = pyspark.sql.SparkSession.builder.master("local[*]").appName("pyspark").getOrCreate() df = spark.range(100).repartition(1) spark_builder = Spark(df) # Choose a small max_shard_size for maximum partitioning. spark_builder._repartition_df_if_needed(max_shard_size=1) # The new number of partitions should not be greater than the number of rows. assert spark_builder.df.rdd.getNumPartitions() == 100
datasets/tests/packaged_modules/test_spark.py/0
{ "file_path": "datasets/tests/packaged_modules/test_spark.py", "repo_id": "datasets", "token_count": 2054 }
78
import os from datasets.utils._filelock import FileLock def test_long_path(tmpdir): filename = "a" * 1000 + ".lock" lock1 = FileLock(str(tmpdir / filename)) assert lock1.lock_file.endswith(".lock") assert not lock1.lock_file.endswith(filename) assert len(os.path.basename(lock1.lock_file)) <= 255
datasets/tests/test_filelock.py/0
{ "file_path": "datasets/tests/test_filelock.py", "repo_id": "datasets", "token_count": 120 }
79
from datasets.utils.patching import _PatchedModuleObj, patch_submodule from . import _test_patching def test_patch_submodule(): import os as original_os from os import path as original_path from os import rename as original_rename from os.path import dirname as original_dirname from os.path import join as original_join assert _test_patching.os is original_os assert _test_patching.path is original_path assert _test_patching.join is original_join assert _test_patching.renamed_os is original_os assert _test_patching.renamed_path is original_path assert _test_patching.renamed_join is original_join mock = "__test_patch_submodule_mock__" with patch_submodule(_test_patching, "os.path.join", mock): # Every way to access os.path.join must be patched, and the rest must stay untouched # check os.path.join assert isinstance(_test_patching.os, _PatchedModuleObj) assert isinstance(_test_patching.os.path, _PatchedModuleObj) assert _test_patching.os.path.join is mock # check path.join assert isinstance(_test_patching.path, _PatchedModuleObj) assert _test_patching.path.join is mock # check join assert _test_patching.join is mock # check that the other attributes are untouched assert _test_patching.os.rename is original_rename assert _test_patching.path.dirname is original_dirname assert _test_patching.os.path.dirname is original_dirname # Even renamed modules or objects must be patched # check renamed_os.path.join assert isinstance(_test_patching.renamed_os, _PatchedModuleObj) assert isinstance(_test_patching.renamed_os.path, _PatchedModuleObj) assert _test_patching.renamed_os.path.join is mock # check renamed_path.join assert isinstance(_test_patching.renamed_path, _PatchedModuleObj) assert _test_patching.renamed_path.join is mock # check renamed_join assert _test_patching.renamed_join is mock # check that the other attributes are untouched assert _test_patching.renamed_os.rename is original_rename assert _test_patching.renamed_path.dirname is original_dirname assert _test_patching.renamed_os.path.dirname is original_dirname # check that everthing is back to normal when the patch is over assert _test_patching.os is original_os assert _test_patching.path is original_path assert _test_patching.join is original_join assert _test_patching.renamed_os is original_os assert _test_patching.renamed_path is original_path assert _test_patching.renamed_join is original_join def test_patch_submodule_builtin(): assert _test_patching.open is open mock = "__test_patch_submodule_builtin_mock__" # _test_patching has "open" in its globals assert _test_patching.open is open with patch_submodule(_test_patching, "open", mock): assert _test_patching.open is mock # check that everthing is back to normal when the patch is over assert _test_patching.open is open def test_patch_submodule_missing(): # pandas.read_csv is not present in _test_patching mock = "__test_patch_submodule_missing_mock__" with patch_submodule(_test_patching, "pandas.read_csv", mock): pass def test_patch_submodule_missing_builtin(): # builtin should always be mocked even if they're not in the globals # in case they're loaded at one point mock = "__test_patch_submodule_missing_builtin_mock__" # _test_patching doesn't have "len" in its globals assert getattr(_test_patching, "len", None) is None with patch_submodule(_test_patching, "len", mock): assert _test_patching.len is mock assert _test_patching.len is len def test_patch_submodule_start_and_stop(): mock = "__test_patch_submodule_start_and_stop_mock__" patch = patch_submodule(_test_patching, "open", mock) assert _test_patching.open is open patch.start() assert _test_patching.open is mock patch.stop() assert _test_patching.open is open def test_patch_submodule_successive(): from os import rename as original_rename from os.path import dirname as original_dirname from os.path import join as original_join mock_join = "__test_patch_submodule_successive_join__" mock_dirname = "__test_patch_submodule_successive_dirname__" mock_rename = "__test_patch_submodule_successive_rename__" assert _test_patching.os.path.join is original_join assert _test_patching.os.path.dirname is original_dirname assert _test_patching.os.rename is original_rename with patch_submodule(_test_patching, "os.path.join", mock_join): with patch_submodule(_test_patching, "os.rename", mock_rename): with patch_submodule(_test_patching, "os.path.dirname", mock_dirname): assert _test_patching.os.path.join is mock_join assert _test_patching.os.path.dirname is mock_dirname assert _test_patching.os.rename is mock_rename # try another order with patch_submodule(_test_patching, "os.rename", mock_rename): with patch_submodule(_test_patching, "os.path.join", mock_join): with patch_submodule(_test_patching, "os.path.dirname", mock_dirname): assert _test_patching.os.path.join is mock_join assert _test_patching.os.path.dirname is mock_dirname assert _test_patching.os.rename is mock_rename assert _test_patching.os.path.join is original_join assert _test_patching.os.path.dirname is original_dirname assert _test_patching.os.rename is original_rename def test_patch_submodule_doesnt_exist(): mock = "__test_patch_submodule_doesnt_exist_mock__" with patch_submodule(_test_patching, "__module_that_doesn_exist__.__attribute_that_doesn_exist__", mock): pass with patch_submodule(_test_patching, "os.__attribute_that_doesn_exist__", mock): pass
datasets/tests/test_patching.py/0
{ "file_path": "datasets/tests/test_patching.py", "repo_id": "datasets", "token_count": 2274 }
80
# [The Hugging Face Deep Reinforcement Learning Course 🤗 (v2.0)](https://huggingface.co/deep-rl-course/unit0/introduction) <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit0/thumbnail.jpg" alt="Thumbnail"/> If you like the course, don't hesitate to **⭐ star this repository. This helps us 🤗**. This repository contains the Deep Reinforcement Learning Course mdx files and notebooks. **The website is here**: https://huggingface.co/deep-rl-course/unit0/introduction?fw=pt - The syllabus 📚: https://simoninithomas.github.io/deep-rl-course - The course 📚: https://huggingface.co/deep-rl-course/unit0/introduction?fw=pt - **Sign up here** ➡️➡️➡️ http://eepurl.com/ic5ZUD ## Citing the project To cite this repository in publications: ```bibtex @misc{deep-rl-course, author = {Simonini, Thomas and Sanseviero, Omar}, title = {The Hugging Face Deep Reinforcement Learning Class}, year = {2023}, publisher = {GitHub}, journal = {GitHub repository}, howpublished = {\url{https://github.com/huggingface/deep-rl-class}}, } ```
deep-rl-class/README.md/0
{ "file_path": "deep-rl-class/README.md", "repo_id": "deep-rl-class", "token_count": 388 }
81
# The certification process The certification process is **completely free**: - To get a *certificate of completion*: you need **to pass 80% of the assignments**. - To get a *certificate of excellence*: you need **to pass 100% of the assignments**. There's **no deadlines, the course is self-paced**. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit0/certification.jpg" alt="Course certification" width="100%"/> When we say pass, **we mean that your model must be pushed to the Hub and get a result equal or above the minimal requirement**. To check your progression and which unit you passed/not passed: https://huggingface.co/spaces/ThomasSimonini/Check-my-progress-Deep-RL-Course Now that you're ready for the certification process, you need to: 1. Go here: https://huggingface.co/spaces/huggingface-projects/Deep-RL-Course-Certification/ 2. Type your *hugging face username*, your *first name*, *last name* 3. Click on "Generate my certificate". - If you passed 80% of the assignments, **congratulations** you've just got the certificate of completion. - If you passed 100% of the assignments, **congratulations** you've just got the excellence certificate. - If you are below 80%, don't be discouraged! Check which units you need to do again to get your certificate. 4. You can download your certificate in pdf format and png format. Don't hesitate to share your certificate on Twitter (tag me @ThomasSimonini and @huggingface) and on Linkedin.
deep-rl-class/units/en/communication/certification.mdx/0
{ "file_path": "deep-rl-class/units/en/communication/certification.mdx", "repo_id": "deep-rl-class", "token_count": 418 }
82
# Type of tasks [[tasks]] A task is an **instance** of a Reinforcement Learning problem. We can have two types of tasks: **episodic** and **continuing**. ## Episodic task [[episodic-task]] In this case, we have a starting point and an ending point **(a terminal state). This creates an episode**: a list of States, Actions, Rewards, and new States. For instance, think about Super Mario Bros: an episode begin at the launch of a new Mario Level and ends **when you’re killed or you reached the end of the level.** <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/mario.jpg" alt="Mario"> <figcaption>Beginning of a new episode. </figcaption> </figure> ## Continuing tasks [[continuing-tasks]] These are tasks that continue forever (**no terminal state**). In this case, the agent must **learn how to choose the best actions and simultaneously interact with the environment.** For instance, an agent that does automated stock trading. For this task, there is no starting point and terminal state. **The agent keeps running until we decide to stop it.** <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/stock.jpg" alt="Stock Market" width="100%"> To recap: <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/tasks.jpg" alt="Tasks recap" width="100%">
deep-rl-class/units/en/unit1/tasks.mdx/0
{ "file_path": "deep-rl-class/units/en/unit1/tasks.mdx", "repo_id": "deep-rl-class", "token_count": 436 }
83
# Two types of value-based methods [[two-types-value-based-methods]] In value-based methods, **we learn a value function** that **maps a state to the expected value of being at that state.** <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/vbm-1.jpg" alt="Value Based Methods"/> The value of a state is the **expected discounted return** the agent can get if it **starts at that state and then acts according to our policy.** <Tip> But what does it mean to act according to our policy? After all, we don't have a policy in value-based methods since we train a value function and not a policy. </Tip> Remember that the goal of an **RL agent is to have an optimal policy π\*.** To find the optimal policy, we learned about two different methods: - *Policy-based methods:* **Directly train the policy** to select what action to take given a state (or a probability distribution over actions at that state). In this case, we **don't have a value function.** <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/two-approaches-2.jpg" alt="Two RL approaches"/> The policy takes a state as input and outputs what action to take at that state (deterministic policy: a policy that output one action given a state, contrary to stochastic policy that output a probability distribution over actions). And consequently, **we don't define by hand the behavior of our policy; it's the training that will define it.** - *Value-based methods:* **Indirectly, by training a value function** that outputs the value of a state or a state-action pair. Given this value function, our policy **will take an action.** Since the policy is not trained/learned, **we need to specify its behavior.** For instance, if we want a policy that, given the value function, will take actions that always lead to the biggest reward, **we'll create a Greedy Policy.** <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/two-approaches-3.jpg" alt="Two RL approaches"/> <figcaption>Given a state, our action-value function (that we train) outputs the value of each action at that state. Then, our pre-defined Greedy Policy selects the action that will yield the highest value given a state or a state action pair.</figcaption> </figure> Consequently, whatever method you use to solve your problem, **you will have a policy**. In the case of value-based methods, you don't train the policy: your policy **is just a simple pre-specified function** (for instance, the Greedy Policy) that uses the values given by the value-function to select its actions. So the difference is: - In policy-based training, **the optimal policy (denoted π\*) is found by training the policy directly.** - In value-based training, **finding an optimal value function (denoted Q\* or V\*, we'll study the difference below) leads to having an optimal policy.** <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/link-value-policy.jpg" alt="Link between value and policy"/> In fact, most of the time, in value-based methods, you'll use **an Epsilon-Greedy Policy** that handles the exploration/exploitation trade-off; we'll talk about this when we talk about Q-Learning in the second part of this unit. As we mentioned above, we have two types of value-based functions: ## The state-value function [[state-value-function]] We write the state value function under a policy π like this: <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/state-value-function-1.jpg" alt="State value function"/> For each state, the state-value function outputs the expected return if the agent **starts at that state** and then follows the policy forever afterward (for all future timesteps, if you prefer). <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/state-value-function-2.jpg" alt="State value function"/> <figcaption>If we take the state with value -7: it's the expected return starting at that state and taking actions according to our policy (greedy policy), so right, right, right, down, down, right, right.</figcaption> </figure> ## The action-value function [[action-value-function]] In the action-value function, for each state and action pair, the action-value function **outputs the expected return** if the agent starts in that state, takes that action, and then follows the policy forever after. The value of taking action \\(a\\) in state \\(s\\) under a policy \\(π\\) is: <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/action-state-value-function-1.jpg" alt="Action State value function"/> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/action-state-value-function-2.jpg" alt="Action State value function"/> We see that the difference is: - For the state-value function, we calculate **the value of a state \\(S_t\\)** - For the action-value function, we calculate **the value of the state-action pair ( \\(S_t, A_t\\) ) hence the value of taking that action at that state.** <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/two-types.jpg" alt="Two types of value function"/> <figcaption> Note: We didn't fill all the state-action pairs for the example of Action-value function</figcaption> </figure> In either case, whichever value function we choose (state-value or action-value function), **the returned value is the expected return.** However, the problem is that **to calculate EACH value of a state or a state-action pair, we need to sum all the rewards an agent can get if it starts at that state.** This can be a computationally expensive process, and that's **where the Bellman equation comes in to help us.**
deep-rl-class/units/en/unit2/two-types-value-based-methods.mdx/0
{ "file_path": "deep-rl-class/units/en/unit2/two-types-value-based-methods.mdx", "repo_id": "deep-rl-class", "token_count": 1727 }
84
# Introduction [[introduction]] <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit6/thumbnail.png" alt="thumbnail"/> In the last unit, we learned about Deep Q-Learning. In this value-based deep reinforcement learning algorithm, we **used a deep neural network to approximate the different Q-values for each possible action at a state.** Since the beginning of the course, we have only studied value-based methods, **where we estimate a value function as an intermediate step towards finding an optimal policy.** <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/link-value-policy.jpg" alt="Link value policy" /> In value-based methods, the policy ** \(π\) only exists because of the action value estimates since the policy is just a function** (for instance, greedy-policy) that will select the action with the highest value given a state. With policy-based methods, we want to optimize the policy directly **without having an intermediate step of learning a value function.** So today, **we'll learn about policy-based methods and study a subset of these methods called policy gradient**. Then we'll implement our first policy gradient algorithm called Monte Carlo **Reinforce** from scratch using PyTorch. Then, we'll test its robustness using the CartPole-v1 and PixelCopter environments. You'll then be able to iterate and improve this implementation for more advanced environments. <figure class="image table text-center m-0 w-full"> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit6/envs.gif" alt="Environments"/> </figure> Let's get started!
deep-rl-class/units/en/unit4/introduction.mdx/0
{ "file_path": "deep-rl-class/units/en/unit4/introduction.mdx", "repo_id": "deep-rl-class", "token_count": 462 }
85
# Conclusion [[conclusion]] Congrats on finishing this unit and the tutorial. You've just trained your first virtual robots 🥳. **Take time to grasp the material before continuing**. You can also look at the additional reading materials we provided in the *additional reading* section. Finally, we would love **to hear what you think of the course and how we can improve it**. If you have some feedback then please 👉 [fill out this form](https://forms.gle/BzKXWzLAGZESGNaE9) See you in next unit! ### Keep learning, stay awesome 🤗
deep-rl-class/units/en/unit6/conclusion.mdx/0
{ "file_path": "deep-rl-class/units/en/unit6/conclusion.mdx", "repo_id": "deep-rl-class", "token_count": 145 }
86
# Conclusion [[Conclusion]] That’s all for today. Congrats on finishing this unit and the tutorial! The best way to learn is to practice and try stuff. **Why not improve the implementation to handle frames as input?**. See you on second part of this Unit 🔥 ## Keep Learning, Stay awesome 🤗
deep-rl-class/units/en/unit8/conclusion.mdx/0
{ "file_path": "deep-rl-class/units/en/unit8/conclusion.mdx", "repo_id": "deep-rl-class", "token_count": 78 }
87
# Decision Transformers The Decision Transformer model was introduced by ["Decision Transformer: Reinforcement Learning via Sequence Modeling” by Chen L. et al](https://arxiv.org/abs/2106.01345). It abstracts Reinforcement Learning as a conditional-sequence modeling problem. The main idea is that instead of training a policy using RL methods, such as fitting a value function, that will tell us what action to take to maximize the return (cumulative reward), **we use a sequence modeling algorithm (Transformer) that, given a desired return, past states, and actions, will generate future actions to achieve this desired return**. It’s an autoregressive model conditioned on the desired return, past states, and actions to generate future actions that achieve the desired return. This is a complete shift in the Reinforcement Learning paradigm since we use generative trajectory modeling (modeling the joint distribution of the sequence of states, actions, and rewards) to replace conventional RL algorithms. This means that in Decision Transformers, we don’t maximize the return but rather generate a series of future actions that achieve the desired return. The 🤗 Transformers team integrated the Decision Transformer, an Offline Reinforcement Learning method, into the library as well as the Hugging Face Hub. ## Learn about Decision Transformers To learn more about Decision Transformers, you should read the blogpost we wrote about it [Introducing Decision Transformers on Hugging Face](https://huggingface.co/blog/decision-transformers) ## Train your first Decision Transformers Now that you understand how Decision Transformers work thanks to [Introducing Decision Transformers on Hugging Face](https://huggingface.co/blog/decision-transformers), you’re ready to learn to train your first Offline Decision Transformer model from scratch to make a half-cheetah run. Start the tutorial here 👉 https://huggingface.co/blog/train-decision-transformers ## Further reading For more information, we recommend that you check out the following resources: - [Decision Transformer: Reinforcement Learning via Sequence Modeling](https://arxiv.org/abs/2106.01345) - [Online Decision Transformer](https://arxiv.org/abs/2202.05607) ## Author This section was written by <a href="https://twitter.com/edwardbeeching">Edward Beeching</a>
deep-rl-class/units/en/unitbonus3/decision-transformers.mdx/0
{ "file_path": "deep-rl-class/units/en/unitbonus3/decision-transformers.mdx", "repo_id": "deep-rl-class", "token_count": 543 }
88
import argparse import sys sys.path.append(".") from base_classes import T2IAdapterBenchmark, T2IAdapterSDXLBenchmark # noqa: E402 if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--ckpt", type=str, default="TencentARC/t2iadapter_canny_sd14v1", choices=["TencentARC/t2iadapter_canny_sd14v1", "TencentARC/t2i-adapter-canny-sdxl-1.0"], ) parser.add_argument("--batch_size", type=int, default=1) parser.add_argument("--num_inference_steps", type=int, default=50) parser.add_argument("--model_cpu_offload", action="store_true") parser.add_argument("--run_compile", action="store_true") args = parser.parse_args() benchmark_pipe = ( T2IAdapterBenchmark(args) if args.ckpt == "TencentARC/t2iadapter_canny_sd14v1" else T2IAdapterSDXLBenchmark(args) ) benchmark_pipe.benchmark(args)
diffusers/benchmarks/benchmark_t2i_adapter.py/0
{ "file_path": "diffusers/benchmarks/benchmark_t2i_adapter.py", "repo_id": "diffusers", "token_count": 393 }
89
# docstyle-ignore INSTALL_CONTENT = """ # Diffusers installation ! pip install diffusers transformers datasets accelerate # To install from source instead of the last release, comment the command above and uncomment the following one. # ! pip install git+https://github.com/huggingface/diffusers.git """ notebook_first_cells = [{"type": "code", "content": INSTALL_CONTENT}]
diffusers/docs/source/_config.py/0
{ "file_path": "diffusers/docs/source/_config.py", "repo_id": "diffusers", "token_count": 102 }
90
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # AutoencoderKL The variational autoencoder (VAE) model with KL loss was introduced in [Auto-Encoding Variational Bayes](https://arxiv.org/abs/1312.6114v11) by Diederik P. Kingma and Max Welling. The model is used in 🤗 Diffusers to encode images into latents and to decode latent representations into images. The abstract from the paper is: *How can we perform efficient inference and learning in directed probabilistic models, in the presence of continuous latent variables with intractable posterior distributions, and large datasets? We introduce a stochastic variational inference and learning algorithm that scales to large datasets and, under some mild differentiability conditions, even works in the intractable case. Our contributions are two-fold. First, we show that a reparameterization of the variational lower bound yields a lower bound estimator that can be straightforwardly optimized using standard stochastic gradient methods. Second, we show that for i.i.d. datasets with continuous latent variables per datapoint, posterior inference can be made especially efficient by fitting an approximate inference model (also called a recognition model) to the intractable posterior using the proposed lower bound estimator. Theoretical advantages are reflected in experimental results.* ## Loading from the original format By default the [`AutoencoderKL`] should be loaded with [`~ModelMixin.from_pretrained`], but it can also be loaded from the original format using [`FromOriginalVAEMixin.from_single_file`] as follows: ```py from diffusers import AutoencoderKL url = "https://huggingface.co/stabilityai/sd-vae-ft-mse-original/blob/main/vae-ft-mse-840000-ema-pruned.safetensors" # can also be a local file model = AutoencoderKL.from_single_file(url) ``` ## AutoencoderKL [[autodoc]] AutoencoderKL - decode - encode - all ## AutoencoderKLOutput [[autodoc]] models.autoencoders.autoencoder_kl.AutoencoderKLOutput ## DecoderOutput [[autodoc]] models.autoencoders.vae.DecoderOutput ## FlaxAutoencoderKL [[autodoc]] FlaxAutoencoderKL ## FlaxAutoencoderKLOutput [[autodoc]] models.vae_flax.FlaxAutoencoderKLOutput ## FlaxDecoderOutput [[autodoc]] models.vae_flax.FlaxDecoderOutput
diffusers/docs/source/en/api/models/autoencoderkl.md/0
{ "file_path": "diffusers/docs/source/en/api/models/autoencoderkl.md", "repo_id": "diffusers", "token_count": 784 }
91
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # aMUSEd aMUSEd was introduced in [aMUSEd: An Open MUSE Reproduction](https://huggingface.co/papers/2401.01808) by Suraj Patil, William Berman, Robin Rombach, and Patrick von Platen. Amused is a lightweight text to image model based off of the [MUSE](https://arxiv.org/abs/2301.00704) architecture. Amused is particularly useful in applications that require a lightweight and fast model such as generating many images quickly at once. Amused is a vqvae token based transformer that can generate an image in fewer forward passes than many diffusion models. In contrast with muse, it uses the smaller text encoder CLIP-L/14 instead of t5-xxl. Due to its small parameter count and few forward pass generation process, amused can generate many images quickly. This benefit is seen particularly at larger batch sizes. The abstract from the paper is: *We present aMUSEd, an open-source, lightweight masked image model (MIM) for text-to-image generation based on MUSE. With 10 percent of MUSE's parameters, aMUSEd is focused on fast image generation. We believe MIM is under-explored compared to latent diffusion, the prevailing approach for text-to-image generation. Compared to latent diffusion, MIM requires fewer inference steps and is more interpretable. Additionally, MIM can be fine-tuned to learn additional styles with only a single image. We hope to encourage further exploration of MIM by demonstrating its effectiveness on large-scale text-to-image generation and releasing reproducible training code. We also release checkpoints for two models which directly produce images at 256x256 and 512x512 resolutions.* | Model | Params | |-------|--------| | [amused-256](https://huggingface.co/amused/amused-256) | 603M | | [amused-512](https://huggingface.co/amused/amused-512) | 608M | ## AmusedPipeline [[autodoc]] AmusedPipeline - __call__ - all - enable_xformers_memory_efficient_attention - disable_xformers_memory_efficient_attention [[autodoc]] AmusedImg2ImgPipeline - __call__ - all - enable_xformers_memory_efficient_attention - disable_xformers_memory_efficient_attention [[autodoc]] AmusedInpaintPipeline - __call__ - all - enable_xformers_memory_efficient_attention - disable_xformers_memory_efficient_attention
diffusers/docs/source/en/api/pipelines/amused.md/0
{ "file_path": "diffusers/docs/source/en/api/pipelines/amused.md", "repo_id": "diffusers", "token_count": 771 }
92
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Shap-E The Shap-E model was proposed in [Shap-E: Generating Conditional 3D Implicit Functions](https://huggingface.co/papers/2305.02463) by Alex Nichol and Heewoo Jun from [OpenAI](https://github.com/openai). The abstract from the paper is: *We present Shap-E, a conditional generative model for 3D assets. Unlike recent work on 3D generative models which produce a single output representation, Shap-E directly generates the parameters of implicit functions that can be rendered as both textured meshes and neural radiance fields. We train Shap-E in two stages: first, we train an encoder that deterministically maps 3D assets into the parameters of an implicit function; second, we train a conditional diffusion model on outputs of the encoder. When trained on a large dataset of paired 3D and text data, our resulting models are capable of generating complex and diverse 3D assets in a matter of seconds. When compared to Point-E, an explicit generative model over point clouds, Shap-E converges faster and reaches comparable or better sample quality despite modeling a higher-dimensional, multi-representation output space.* The original codebase can be found at [openai/shap-e](https://github.com/openai/shap-e). <Tip> See the [reuse components across pipelines](../../using-diffusers/loading#reuse-components-across-pipelines) section to learn how to efficiently load the same components into multiple pipelines. </Tip> ## ShapEPipeline [[autodoc]] ShapEPipeline - all - __call__ ## ShapEImg2ImgPipeline [[autodoc]] ShapEImg2ImgPipeline - all - __call__ ## ShapEPipelineOutput [[autodoc]] pipelines.shap_e.pipeline_shap_e.ShapEPipelineOutput
diffusers/docs/source/en/api/pipelines/shap_e.md/0
{ "file_path": "diffusers/docs/source/en/api/pipelines/shap_e.md", "repo_id": "diffusers", "token_count": 594 }
93
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # How to run Stable Diffusion with Core ML [Core ML](https://developer.apple.com/documentation/coreml) is the model format and machine learning library supported by Apple frameworks. If you are interested in running Stable Diffusion models inside your macOS or iOS/iPadOS apps, this guide will show you how to convert existing PyTorch checkpoints into the Core ML format and use them for inference with Python or Swift. Core ML models can leverage all the compute engines available in Apple devices: the CPU, the GPU, and the Apple Neural Engine (or ANE, a tensor-optimized accelerator available in Apple Silicon Macs and modern iPhones/iPads). Depending on the model and the device it's running on, Core ML can mix and match compute engines too, so some portions of the model may run on the CPU while others run on GPU, for example. <Tip> You can also run the `diffusers` Python codebase on Apple Silicon Macs using the `mps` accelerator built into PyTorch. This approach is explained in depth in [the mps guide](mps), but it is not compatible with native apps. </Tip> ## Stable Diffusion Core ML Checkpoints Stable Diffusion weights (or checkpoints) are stored in the PyTorch format, so you need to convert them to the Core ML format before we can use them inside native apps. Thankfully, Apple engineers developed [a conversion tool](https://github.com/apple/ml-stable-diffusion#-converting-models-to-core-ml) based on `diffusers` to convert the PyTorch checkpoints to Core ML. Before you convert a model, though, take a moment to explore the Hugging Face Hub – chances are the model you're interested in is already available in Core ML format: - the [Apple](https://huggingface.co/apple) organization includes Stable Diffusion versions 1.4, 1.5, 2.0 base, and 2.1 base - [coreml community](https://huggingface.co/coreml-community) includes custom finetuned models - use this [filter](https://huggingface.co/models?pipeline_tag=text-to-image&library=coreml&p=2&sort=likes) to return all available Core ML checkpoints If you can't find the model you're interested in, we recommend you follow the instructions for [Converting Models to Core ML](https://github.com/apple/ml-stable-diffusion#-converting-models-to-core-ml) by Apple. ## Selecting the Core ML Variant to Use Stable Diffusion models can be converted to different Core ML variants intended for different purposes: - The type of attention blocks used. The attention operation is used to "pay attention" to the relationship between different areas in the image representations and to understand how the image and text representations are related. Attention is compute- and memory-intensive, so different implementations exist that consider the hardware characteristics of different devices. For Core ML Stable Diffusion models, there are two attention variants: * `split_einsum` ([introduced by Apple](https://machinelearning.apple.com/research/neural-engine-transformers)) is optimized for ANE devices, which is available in modern iPhones, iPads and M-series computers. * The "original" attention (the base implementation used in `diffusers`) is only compatible with CPU/GPU and not ANE. It can be *faster* to run your model on CPU + GPU using `original` attention than ANE. See [this performance benchmark](https://huggingface.co/blog/fast-mac-diffusers#performance-benchmarks) as well as some [additional measures provided by the community](https://github.com/huggingface/swift-coreml-diffusers/issues/31) for additional details. - The supported inference framework. * `packages` are suitable for Python inference. This can be used to test converted Core ML models before attempting to integrate them inside native apps, or if you want to explore Core ML performance but don't need to support native apps. For example, an application with a web UI could perfectly use a Python Core ML backend. * `compiled` models are required for Swift code. The `compiled` models in the Hub split the large UNet model weights into several files for compatibility with iOS and iPadOS devices. This corresponds to the [`--chunk-unet` conversion option](https://github.com/apple/ml-stable-diffusion#-converting-models-to-core-ml). If you want to support native apps, then you need to select the `compiled` variant. The official Core ML Stable Diffusion [models](https://huggingface.co/apple/coreml-stable-diffusion-v1-4/tree/main) include these variants, but the community ones may vary: ``` coreml-stable-diffusion-v1-4 ├── README.md ├── original │ ├── compiled │ └── packages └── split_einsum ├── compiled └── packages ``` You can download and use the variant you need as shown below. ## Core ML Inference in Python Install the following libraries to run Core ML inference in Python: ```bash pip install huggingface_hub pip install git+https://github.com/apple/ml-stable-diffusion ``` ### Download the Model Checkpoints To run inference in Python, use one of the versions stored in the `packages` folders because the `compiled` ones are only compatible with Swift. You may choose whether you want to use `original` or `split_einsum` attention. This is how you'd download the `original` attention variant from the Hub to a directory called `models`: ```Python from huggingface_hub import snapshot_download from pathlib import Path repo_id = "apple/coreml-stable-diffusion-v1-4" variant = "original/packages" model_path = Path("./models") / (repo_id.split("/")[-1] + "_" + variant.replace("/", "_")) snapshot_download(repo_id, allow_patterns=f"{variant}/*", local_dir=model_path, local_dir_use_symlinks=False) print(f"Model downloaded at {model_path}") ``` ### Inference[[python-inference]] Once you have downloaded a snapshot of the model, you can test it using Apple's Python script. ```shell python -m python_coreml_stable_diffusion.pipeline --prompt "a photo of an astronaut riding a horse on mars" -i models/coreml-stable-diffusion-v1-4_original_packages -o </path/to/output/image> --compute-unit CPU_AND_GPU --seed 93 ``` Pass the path of the downloaded checkpoint with `-i` flag to the script. `--compute-unit` indicates the hardware you want to allow for inference. It must be one of the following options: `ALL`, `CPU_AND_GPU`, `CPU_ONLY`, `CPU_AND_NE`. You may also provide an optional output path, and a seed for reproducibility. The inference script assumes you're using the original version of the Stable Diffusion model, `CompVis/stable-diffusion-v1-4`. If you use another model, you *have* to specify its Hub id in the inference command line, using the `--model-version` option. This works for models already supported and custom models you trained or fine-tuned yourself. For example, if you want to use [`runwayml/stable-diffusion-v1-5`](https://huggingface.co/runwayml/stable-diffusion-v1-5): ```shell python -m python_coreml_stable_diffusion.pipeline --prompt "a photo of an astronaut riding a horse on mars" --compute-unit ALL -o output --seed 93 -i models/coreml-stable-diffusion-v1-5_original_packages --model-version runwayml/stable-diffusion-v1-5 ``` ## Core ML inference in Swift Running inference in Swift is slightly faster than in Python because the models are already compiled in the `mlmodelc` format. This is noticeable on app startup when the model is loaded but shouldn’t be noticeable if you run several generations afterward. ### Download To run inference in Swift on your Mac, you need one of the `compiled` checkpoint versions. We recommend you download them locally using Python code similar to the previous example, but with one of the `compiled` variants: ```Python from huggingface_hub import snapshot_download from pathlib import Path repo_id = "apple/coreml-stable-diffusion-v1-4" variant = "original/compiled" model_path = Path("./models") / (repo_id.split("/")[-1] + "_" + variant.replace("/", "_")) snapshot_download(repo_id, allow_patterns=f"{variant}/*", local_dir=model_path, local_dir_use_symlinks=False) print(f"Model downloaded at {model_path}") ``` ### Inference[[swift-inference]] To run inference, please clone Apple's repo: ```bash git clone https://github.com/apple/ml-stable-diffusion cd ml-stable-diffusion ``` And then use Apple's command line tool, [Swift Package Manager](https://www.swift.org/package-manager/#): ```bash swift run StableDiffusionSample --resource-path models/coreml-stable-diffusion-v1-4_original_compiled --compute-units all "a photo of an astronaut riding a horse on mars" ``` You have to specify in `--resource-path` one of the checkpoints downloaded in the previous step, so please make sure it contains compiled Core ML bundles with the extension `.mlmodelc`. The `--compute-units` has to be one of these values: `all`, `cpuOnly`, `cpuAndGPU`, `cpuAndNeuralEngine`. For more details, please refer to the [instructions in Apple's repo](https://github.com/apple/ml-stable-diffusion). ## Supported Diffusers Features The Core ML models and inference code don't support many of the features, options, and flexibility of 🧨 Diffusers. These are some of the limitations to keep in mind: - Core ML models are only suitable for inference. They can't be used for training or fine-tuning. - Only two schedulers have been ported to Swift, the default one used by Stable Diffusion and `DPMSolverMultistepScheduler`, which we ported to Swift from our `diffusers` implementation. We recommend you use `DPMSolverMultistepScheduler`, since it produces the same quality in about half the steps. - Negative prompts, classifier-free guidance scale, and image-to-image tasks are available in the inference code. Advanced features such as depth guidance, ControlNet, and latent upscalers are not available yet. Apple's [conversion and inference repo](https://github.com/apple/ml-stable-diffusion) and our own [swift-coreml-diffusers](https://github.com/huggingface/swift-coreml-diffusers) repos are intended as technology demonstrators to enable other developers to build upon. If you feel strongly about any missing features, please feel free to open a feature request or, better yet, a contribution PR 🙂. ## Native Diffusers Swift app One easy way to run Stable Diffusion on your own Apple hardware is to use [our open-source Swift repo](https://github.com/huggingface/swift-coreml-diffusers), based on `diffusers` and Apple's conversion and inference repo. You can study the code, compile it with [Xcode](https://developer.apple.com/xcode/) and adapt it for your own needs. For your convenience, there's also a [standalone Mac app in the App Store](https://apps.apple.com/app/diffusers/id1666309574), so you can play with it without having to deal with the code or IDE. If you are a developer and have determined that Core ML is the best solution to build your Stable Diffusion app, then you can use the rest of this guide to get started with your project. We can't wait to see what you'll build 🙂.
diffusers/docs/source/en/optimization/coreml.md/0
{ "file_path": "diffusers/docs/source/en/optimization/coreml.md", "repo_id": "diffusers", "token_count": 3088 }
94
# Create a dataset for training There are many datasets on the [Hub](https://huggingface.co/datasets?task_categories=task_categories:text-to-image&sort=downloads) to train a model on, but if you can't find one you're interested in or want to use your own, you can create a dataset with the 🤗 [Datasets](hf.co/docs/datasets) library. The dataset structure depends on the task you want to train your model on. The most basic dataset structure is a directory of images for tasks like unconditional image generation. Another dataset structure may be a directory of images and a text file containing their corresponding text captions for tasks like text-to-image generation. This guide will show you two ways to create a dataset to finetune on: - provide a folder of images to the `--train_data_dir` argument - upload a dataset to the Hub and pass the dataset repository id to the `--dataset_name` argument <Tip> 💡 Learn more about how to create an image dataset for training in the [Create an image dataset](https://huggingface.co/docs/datasets/image_dataset) guide. </Tip> ## Provide a dataset as a folder For unconditional generation, you can provide your own dataset as a folder of images. The training script uses the [`ImageFolder`](https://huggingface.co/docs/datasets/en/image_dataset#imagefolder) builder from 🤗 Datasets to automatically build a dataset from the folder. Your directory structure should look like: ```bash data_dir/xxx.png data_dir/xxy.png data_dir/[...]/xxz.png ``` Pass the path to the dataset directory to the `--train_data_dir` argument, and then you can start training: ```bash accelerate launch train_unconditional.py \ --train_data_dir <path-to-train-directory> \ <other-arguments> ``` ## Upload your data to the Hub <Tip> 💡 For more details and context about creating and uploading a dataset to the Hub, take a look at the [Image search with 🤗 Datasets](https://huggingface.co/blog/image-search-datasets) post. </Tip> Start by creating a dataset with the [`ImageFolder`](https://huggingface.co/docs/datasets/image_load#imagefolder) feature, which creates an `image` column containing the PIL-encoded images. You can use the `data_dir` or `data_files` parameters to specify the location of the dataset. The `data_files` parameter supports mapping specific files to dataset splits like `train` or `test`: ```python from datasets import load_dataset # example 1: local folder dataset = load_dataset("imagefolder", data_dir="path_to_your_folder") # example 2: local files (supported formats are tar, gzip, zip, xz, rar, zstd) dataset = load_dataset("imagefolder", data_files="path_to_zip_file") # example 3: remote files (supported formats are tar, gzip, zip, xz, rar, zstd) dataset = load_dataset( "imagefolder", data_files="https://download.microsoft.com/download/3/E/1/3E1C3F21-ECDB-4869-8368-6DEBA77B919F/kagglecatsanddogs_3367a.zip", ) # example 4: providing several splits dataset = load_dataset( "imagefolder", data_files={"train": ["path/to/file1", "path/to/file2"], "test": ["path/to/file3", "path/to/file4"]} ) ``` Then use the [`~datasets.Dataset.push_to_hub`] method to upload the dataset to the Hub: ```python # assuming you have ran the huggingface-cli login command in a terminal dataset.push_to_hub("name_of_your_dataset") # if you want to push to a private repo, simply pass private=True: dataset.push_to_hub("name_of_your_dataset", private=True) ``` Now the dataset is available for training by passing the dataset name to the `--dataset_name` argument: ```bash accelerate launch --mixed_precision="fp16" train_text_to_image.py \ --pretrained_model_name_or_path="runwayml/stable-diffusion-v1-5" \ --dataset_name="name_of_your_dataset" \ <other-arguments> ``` ## Next steps Now that you've created a dataset, you can plug it into the `train_data_dir` (if your dataset is local) or `dataset_name` (if your dataset is on the Hub) arguments of a training script. For your next steps, feel free to try and use your dataset to train a model for [unconditional generation](unconditional_training) or [text-to-image generation](text2image)!
diffusers/docs/source/en/training/create_dataset.md/0
{ "file_path": "diffusers/docs/source/en/training/create_dataset.md", "repo_id": "diffusers", "token_count": 1301 }
95
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # AutoPipeline 🤗 Diffusers is able to complete many different tasks, and you can often reuse the same pretrained weights for multiple tasks such as text-to-image, image-to-image, and inpainting. If you're new to the library and diffusion models though, it may be difficult to know which pipeline to use for a task. For example, if you're using the [runwayml/stable-diffusion-v1-5](https://huggingface.co/runwayml/stable-diffusion-v1-5) checkpoint for text-to-image, you might not know that you could also use it for image-to-image and inpainting by loading the checkpoint with the [`StableDiffusionImg2ImgPipeline`] and [`StableDiffusionInpaintPipeline`] classes respectively. The `AutoPipeline` class is designed to simplify the variety of pipelines in 🤗 Diffusers. It is a generic, *task-first* pipeline that lets you focus on the task. The `AutoPipeline` automatically detects the correct pipeline class to use, which makes it easier to load a checkpoint for a task without knowing the specific pipeline class name. <Tip> Take a look at the [AutoPipeline](../api/pipelines/auto_pipeline) reference to see which tasks are supported. Currently, it supports text-to-image, image-to-image, and inpainting. </Tip> This tutorial shows you how to use an `AutoPipeline` to automatically infer the pipeline class to load for a specific task, given the pretrained weights. ## Choose an AutoPipeline for your task Start by picking a checkpoint. For example, if you're interested in text-to-image with the [runwayml/stable-diffusion-v1-5](https://huggingface.co/runwayml/stable-diffusion-v1-5) checkpoint, use [`AutoPipelineForText2Image`]: ```py from diffusers import AutoPipelineForText2Image import torch pipeline = AutoPipelineForText2Image.from_pretrained( "runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16, use_safetensors=True ).to("cuda") prompt = "peasant and dragon combat, wood cutting style, viking era, bevel with rune" image = pipeline(prompt, num_inference_steps=25).images[0] image ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/autopipeline-text2img.png" alt="generated image of peasant fighting dragon in wood cutting style"/> </div> Under the hood, [`AutoPipelineForText2Image`]: 1. automatically detects a `"stable-diffusion"` class from the [`model_index.json`](https://huggingface.co/runwayml/stable-diffusion-v1-5/blob/main/model_index.json) file 2. loads the corresponding text-to-image [`StableDiffusionPipeline`] based on the `"stable-diffusion"` class name Likewise, for image-to-image, [`AutoPipelineForImage2Image`] detects a `"stable-diffusion"` checkpoint from the `model_index.json` file and it'll load the corresponding [`StableDiffusionImg2ImgPipeline`] behind the scenes. You can also pass any additional arguments specific to the pipeline class such as `strength`, which determines the amount of noise or variation added to an input image: ```py from diffusers import AutoPipelineForImage2Image import torch import requests from PIL import Image from io import BytesIO pipeline = AutoPipelineForImage2Image.from_pretrained( "runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16, use_safetensors=True, ).to("cuda") prompt = "a portrait of a dog wearing a pearl earring" url = "https://upload.wikimedia.org/wikipedia/commons/thumb/0/0f/1665_Girl_with_a_Pearl_Earring.jpg/800px-1665_Girl_with_a_Pearl_Earring.jpg" response = requests.get(url) image = Image.open(BytesIO(response.content)).convert("RGB") image.thumbnail((768, 768)) image = pipeline(prompt, image, num_inference_steps=200, strength=0.75, guidance_scale=10.5).images[0] image ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/autopipeline-img2img.png" alt="generated image of a vermeer portrait of a dog wearing a pearl earring"/> </div> And if you want to do inpainting, then [`AutoPipelineForInpainting`] loads the underlying [`StableDiffusionInpaintPipeline`] class in the same way: ```py from diffusers import AutoPipelineForInpainting from diffusers.utils import load_image import torch pipeline = AutoPipelineForInpainting.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16, use_safetensors=True ).to("cuda") img_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png" mask_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png" init_image = load_image(img_url).convert("RGB") mask_image = load_image(mask_url).convert("RGB") prompt = "A majestic tiger sitting on a bench" image = pipeline(prompt, image=init_image, mask_image=mask_image, num_inference_steps=50, strength=0.80).images[0] image ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/autopipeline-inpaint.png" alt="generated image of a tiger sitting on a bench"/> </div> If you try to load an unsupported checkpoint, it'll throw an error: ```py from diffusers import AutoPipelineForImage2Image import torch pipeline = AutoPipelineForImage2Image.from_pretrained( "openai/shap-e-img2img", torch_dtype=torch.float16, use_safetensors=True ) "ValueError: AutoPipeline can't find a pipeline linked to ShapEImg2ImgPipeline for None" ``` ## Use multiple pipelines For some workflows or if you're loading many pipelines, it is more memory-efficient to reuse the same components from a checkpoint instead of reloading them which would unnecessarily consume additional memory. For example, if you're using a checkpoint for text-to-image and you want to use it again for image-to-image, use the [`~AutoPipelineForImage2Image.from_pipe`] method. This method creates a new pipeline from the components of a previously loaded pipeline at no additional memory cost. The [`~AutoPipelineForImage2Image.from_pipe`] method detects the original pipeline class and maps it to the new pipeline class corresponding to the task you want to do. For example, if you load a `"stable-diffusion"` class pipeline for text-to-image: ```py from diffusers import AutoPipelineForText2Image, AutoPipelineForImage2Image import torch pipeline_text2img = AutoPipelineForText2Image.from_pretrained( "runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16, use_safetensors=True ) print(type(pipeline_text2img)) "<class 'diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline'>" ``` Then [`~AutoPipelineForImage2Image.from_pipe`] maps the original `"stable-diffusion"` pipeline class to [`StableDiffusionImg2ImgPipeline`]: ```py pipeline_img2img = AutoPipelineForImage2Image.from_pipe(pipeline_text2img) print(type(pipeline_img2img)) "<class 'diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.StableDiffusionImg2ImgPipeline'>" ``` If you passed an optional argument - like disabling the safety checker - to the original pipeline, this argument is also passed on to the new pipeline: ```py from diffusers import AutoPipelineForText2Image, AutoPipelineForImage2Image import torch pipeline_text2img = AutoPipelineForText2Image.from_pretrained( "runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16, use_safetensors=True, requires_safety_checker=False, ).to("cuda") pipeline_img2img = AutoPipelineForImage2Image.from_pipe(pipeline_text2img) print(pipeline_img2img.config.requires_safety_checker) "False" ``` You can overwrite any of the arguments and even configuration from the original pipeline if you want to change the behavior of the new pipeline. For example, to turn the safety checker back on and add the `strength` argument: ```py pipeline_img2img = AutoPipelineForImage2Image.from_pipe(pipeline_text2img, requires_safety_checker=True, strength=0.3) print(pipeline_img2img.config.requires_safety_checker) "True" ```
diffusers/docs/source/en/tutorials/autopipeline.md/0
{ "file_path": "diffusers/docs/source/en/tutorials/autopipeline.md", "repo_id": "diffusers", "token_count": 2761 }
96
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Improve generation quality with FreeU [[open-in-colab]] The UNet is responsible for denoising during the reverse diffusion process, and there are two distinct features in its architecture: 1. Backbone features primarily contribute to the denoising process 2. Skip features mainly introduce high-frequency features into the decoder module and can make the network overlook the semantics in the backbone features However, the skip connection can sometimes introduce unnatural image details. [FreeU](https://hf.co/papers/2309.11497) is a technique for improving image quality by rebalancing the contributions from the UNet’s skip connections and backbone feature maps. FreeU is applied during inference and it does not require any additional training. The technique works for different tasks such as text-to-image, image-to-image, and text-to-video. In this guide, you will apply FreeU to the [`StableDiffusionPipeline`], [`StableDiffusionXLPipeline`], and [`TextToVideoSDPipeline`]. You need to install Diffusers from source to run the examples below. ## StableDiffusionPipeline Load the pipeline: ```py from diffusers import DiffusionPipeline import torch pipeline = DiffusionPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16, safety_checker=None ).to("cuda") ``` Then enable the FreeU mechanism with the FreeU-specific hyperparameters. These values are scaling factors for the backbone and skip features. ```py pipeline.enable_freeu(s1=0.9, s2=0.2, b1=1.2, b2=1.4) ``` The values above are from the official FreeU [code repository](https://github.com/ChenyangSi/FreeU) where you can also find [reference hyperparameters](https://github.com/ChenyangSi/FreeU#range-for-more-parameters) for different models. <Tip> Disable the FreeU mechanism by calling `disable_freeu()` on a pipeline. </Tip> And then run inference: ```py prompt = "A squirrel eating a burger" seed = 2023 image = pipeline(prompt, generator=torch.manual_seed(seed)).images[0] image ``` The figure below compares non-FreeU and FreeU results respectively for the same hyperparameters used above (`prompt` and `seed`): ![](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/freeu/sdv1_5_freeu.jpg) Let's see how Stable Diffusion 2 results are impacted: ```py from diffusers import DiffusionPipeline import torch pipeline = DiffusionPipeline.from_pretrained( "stabilityai/stable-diffusion-2-1", torch_dtype=torch.float16, safety_checker=None ).to("cuda") prompt = "A squirrel eating a burger" seed = 2023 pipeline.enable_freeu(s1=0.9, s2=0.2, b1=1.1, b2=1.2) image = pipeline(prompt, generator=torch.manual_seed(seed)).images[0] image ``` ![](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/freeu/sdv2_1_freeu.jpg) ## Stable Diffusion XL Finally, let's take a look at how FreeU affects Stable Diffusion XL results: ```py from diffusers import DiffusionPipeline import torch pipeline = DiffusionPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16, ).to("cuda") prompt = "A squirrel eating a burger" seed = 2023 # Comes from # https://wandb.ai/nasirk24/UNET-FreeU-SDXL/reports/FreeU-SDXL-Optimal-Parameters--Vmlldzo1NDg4NTUw pipeline.enable_freeu(s1=0.6, s2=0.4, b1=1.1, b2=1.2) image = pipeline(prompt, generator=torch.manual_seed(seed)).images[0] image ``` ![](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/freeu/sdxl_freeu.jpg) ## Text-to-video generation FreeU can also be used to improve video quality: ```python from diffusers import DiffusionPipeline from diffusers.utils import export_to_video import torch model_id = "cerspense/zeroscope_v2_576w" pipe = DiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to("cuda") prompt = "an astronaut riding a horse on mars" seed = 2023 # The values come from # https://github.com/lyn-rgb/FreeU_Diffusers#video-pipelines pipe.enable_freeu(b1=1.2, b2=1.4, s1=0.9, s2=0.2) video_frames = pipe(prompt, height=320, width=576, num_frames=30, generator=torch.manual_seed(seed)).frames[0] export_to_video(video_frames, "astronaut_rides_horse.mp4") ``` Thanks to [kadirnar](https://github.com/kadirnar/) for helping to integrate the feature, and to [justindujardin](https://github.com/justindujardin) for the helpful discussions.
diffusers/docs/source/en/using-diffusers/freeu.md/0
{ "file_path": "diffusers/docs/source/en/using-diffusers/freeu.md", "repo_id": "diffusers", "token_count": 1636 }
97
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Create reproducible pipelines [[open-in-colab]] Reproducibility is important for testing, replicating results, and can even be used to [improve image quality](reusing_seeds). However, the randomness in diffusion models is a desired property because it allows the pipeline to generate different images every time it is run. While you can't expect to get the exact same results across platforms, you can expect results to be reproducible across releases and platforms within a certain tolerance range. Even then, tolerance varies depending on the diffusion pipeline and checkpoint. This is why it's important to understand how to control sources of randomness in diffusion models or use deterministic algorithms. <Tip> 💡 We strongly recommend reading PyTorch's [statement about reproducibility](https://pytorch.org/docs/stable/notes/randomness.html): > Completely reproducible results are not guaranteed across PyTorch releases, individual commits, or different platforms. Furthermore, results may not be reproducible between CPU and GPU executions, even when using identical seeds. </Tip> ## Control randomness During inference, pipelines rely heavily on random sampling operations which include creating the Gaussian noise tensors to denoise and adding noise to the scheduling step. Take a look at the tensor values in the [`DDIMPipeline`] after two inference steps: ```python from diffusers import DDIMPipeline import numpy as np model_id = "google/ddpm-cifar10-32" # load model and scheduler ddim = DDIMPipeline.from_pretrained(model_id, use_safetensors=True) # run pipeline for just two steps and return numpy tensor image = ddim(num_inference_steps=2, output_type="np").images print(np.abs(image).sum()) ``` Running the code above prints one value, but if you run it again you get a different value. What is going on here? Every time the pipeline is run, [`torch.randn`](https://pytorch.org/docs/stable/generated/torch.randn.html) uses a different random seed to create Gaussian noise which is denoised stepwise. This leads to a different result each time it is run, which is great for diffusion pipelines since it generates a different random image each time. But if you need to reliably generate the same image, that'll depend on whether you're running the pipeline on a CPU or GPU. ### CPU To generate reproducible results on a CPU, you'll need to use a PyTorch [`Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) and set a seed: ```python import torch from diffusers import DDIMPipeline import numpy as np model_id = "google/ddpm-cifar10-32" # load model and scheduler ddim = DDIMPipeline.from_pretrained(model_id, use_safetensors=True) # create a generator for reproducibility generator = torch.Generator(device="cpu").manual_seed(0) # run pipeline for just two steps and return numpy tensor image = ddim(num_inference_steps=2, output_type="np", generator=generator).images print(np.abs(image).sum()) ``` Now when you run the code above, it always prints a value of `1491.1711` no matter what because the `Generator` object with the seed is passed to all the random functions of the pipeline. If you run this code example on your specific hardware and PyTorch version, you should get a similar, if not the same, result. <Tip> 💡 It might be a bit unintuitive at first to pass `Generator` objects to the pipeline instead of just integer values representing the seed, but this is the recommended design when dealing with probabilistic models in PyTorch, as `Generator`s are *random states* that can be passed to multiple pipelines in a sequence. </Tip> ### GPU Writing a reproducible pipeline on a GPU is a bit trickier, and full reproducibility across different hardware is not guaranteed because matrix multiplication - which diffusion pipelines require a lot of - is less deterministic on a GPU than a CPU. For example, if you run the same code example above on a GPU: ```python import torch from diffusers import DDIMPipeline import numpy as np model_id = "google/ddpm-cifar10-32" # load model and scheduler ddim = DDIMPipeline.from_pretrained(model_id, use_safetensors=True) ddim.to("cuda") # create a generator for reproducibility generator = torch.Generator(device="cuda").manual_seed(0) # run pipeline for just two steps and return numpy tensor image = ddim(num_inference_steps=2, output_type="np", generator=generator).images print(np.abs(image).sum()) ``` The result is not the same even though you're using an identical seed because the GPU uses a different random number generator than the CPU. To circumvent this problem, 🧨 Diffusers has a [`~diffusers.utils.torch_utils.randn_tensor`] function for creating random noise on the CPU, and then moving the tensor to a GPU if necessary. The `randn_tensor` function is used everywhere inside the pipeline, allowing the user to **always** pass a CPU `Generator` even if the pipeline is run on a GPU. You'll see the results are much closer now! ```python import torch from diffusers import DDIMPipeline import numpy as np model_id = "google/ddpm-cifar10-32" # load model and scheduler ddim = DDIMPipeline.from_pretrained(model_id, use_safetensors=True) ddim.to("cuda") # create a generator for reproducibility; notice you don't place it on the GPU! generator = torch.manual_seed(0) # run pipeline for just two steps and return numpy tensor image = ddim(num_inference_steps=2, output_type="np", generator=generator).images print(np.abs(image).sum()) ``` <Tip> 💡 If reproducibility is important, we recommend always passing a CPU generator. The performance loss is often neglectable, and you'll generate much more similar values than if the pipeline had been run on a GPU. </Tip> Finally, for more complex pipelines such as [`UnCLIPPipeline`], these are often extremely susceptible to precision error propagation. Don't expect similar results across different GPU hardware or PyTorch versions. In this case, you'll need to run exactly the same hardware and PyTorch version for full reproducibility. ## Deterministic algorithms You can also configure PyTorch to use deterministic algorithms to create a reproducible pipeline. However, you should be aware that deterministic algorithms may be slower than nondeterministic ones and you may observe a decrease in performance. But if reproducibility is important to you, then this is the way to go! Nondeterministic behavior occurs when operations are launched in more than one CUDA stream. To avoid this, set the environment variable [`CUBLAS_WORKSPACE_CONFIG`](https://docs.nvidia.com/cuda/cublas/index.html#results-reproducibility) to `:16:8` to only use one buffer size during runtime. PyTorch typically benchmarks multiple algorithms to select the fastest one, but if you want reproducibility, you should disable this feature because the benchmark may select different algorithms each time. Lastly, pass `True` to [`torch.use_deterministic_algorithms`](https://pytorch.org/docs/stable/generated/torch.use_deterministic_algorithms.html) to enable deterministic algorithms. ```py import os import torch os.environ["CUBLAS_WORKSPACE_CONFIG"] = ":16:8" torch.backends.cudnn.benchmark = False torch.use_deterministic_algorithms(True) ``` Now when you run the same pipeline twice, you'll get identical results. ```py import torch from diffusers import DDIMScheduler, StableDiffusionPipeline model_id = "runwayml/stable-diffusion-v1-5" pipe = StableDiffusionPipeline.from_pretrained(model_id, use_safetensors=True).to("cuda") pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config) g = torch.Generator(device="cuda") prompt = "A bear is playing a guitar on Times Square" g.manual_seed(0) result1 = pipe(prompt=prompt, num_inference_steps=50, generator=g, output_type="latent").images g.manual_seed(0) result2 = pipe(prompt=prompt, num_inference_steps=50, generator=g, output_type="latent").images print("L_inf dist =", abs(result1 - result2).max()) "L_inf dist = tensor(0., device='cuda:0')" ```
diffusers/docs/source/en/using-diffusers/reproducibility.md/0
{ "file_path": "diffusers/docs/source/en/using-diffusers/reproducibility.md", "repo_id": "diffusers", "token_count": 2403 }
98
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # インストール お使いのディープラーニングライブラリに合わせてDiffusersをインストールできます。 🤗 DiffusersはPython 3.8+、PyTorch 1.7.0+、Flaxでテストされています。使用するディープラーニングライブラリの以下のインストール手順に従ってください: - [PyTorch](https://pytorch.org/get-started/locally/)のインストール手順。 - [Flax](https://flax.readthedocs.io/en/latest/)のインストール手順。 ## pip でインストール Diffusersは[仮想環境](https://docs.python.org/3/library/venv.html)の中でインストールすることが推奨されています。 Python の仮想環境についてよく知らない場合は、こちらの [ガイド](https://packaging.python.org/guides/installing-using-pip-and-virtual-environments/) を参照してください。 仮想環境は異なるプロジェクトの管理を容易にし、依存関係間の互換性の問題を回避します。 ではさっそく、プロジェクトディレクトリに仮想環境を作ってみます: ```bash python -m venv .env ``` 仮想環境をアクティブにします: ```bash source .env/bin/activate ``` 🤗 Diffusers もまた 🤗 Transformers ライブラリに依存しており、以下のコマンドで両方をインストールできます: <frameworkcontent> <pt> ```bash pip install diffusers["torch"] transformers ``` </pt> <jax> ```bash pip install diffusers["flax"] transformers ``` </jax> </frameworkcontent> ## ソースからのインストール ソースから🤗 Diffusersをインストールする前に、`torch`と🤗 Accelerateがインストールされていることを確認してください。 `torch`のインストールについては、`torch` [インストール](https://pytorch.org/get-started/locally/#start-locally)ガイドを参照してください。 🤗 Accelerateをインストールするには: ```bash pip install accelerate ``` 以下のコマンドでソースから🤗 Diffusersをインストールできます: ```bash pip install git+https://github.com/huggingface/diffusers ``` このコマンドは最新の `stable` バージョンではなく、最先端の `main` バージョンをインストールします。 `main`バージョンは最新の開発に対応するのに便利です。 例えば、前回の公式リリース以降にバグが修正されたが、新しいリリースがまだリリースされていない場合などには都合がいいです。 しかし、これは `main` バージョンが常に安定しているとは限らないです。 私たちは `main` バージョンを運用し続けるよう努力しており、ほとんどの問題は通常数時間から1日以内に解決されます。 もし問題が発生した場合は、[Issue](https://github.com/huggingface/diffusers/issues/new/choose) を開いてください! ## 編集可能なインストール 以下の場合、編集可能なインストールが必要です: * ソースコードの `main` バージョンを使用する。 * 🤗 Diffusers に貢献し、コードの変更をテストする必要がある場合。 リポジトリをクローンし、次のコマンドで 🤗 Diffusers をインストールしてください: ```bash git clone https://github.com/huggingface/diffusers.git cd diffusers ``` <frameworkcontent> <pt> ```bash pip install -e ".[torch]" ``` </pt> <jax> ```bash pip install -e ".[flax]" ``` </jax> </frameworkcontent> これらのコマンドは、リポジトリをクローンしたフォルダと Python のライブラリパスをリンクします。 Python は通常のライブラリパスに加えて、クローンしたフォルダの中を探すようになります。 例えば、Python パッケージが通常 `~/anaconda3/envs/main/lib/python3.8/site-packages/` にインストールされている場合、Python はクローンした `~/diffusers/` フォルダも同様に参照します。 <Tip warning={true}> ライブラリを使い続けたい場合は、`diffusers`フォルダを残しておく必要があります。 </Tip> これで、以下のコマンドで簡単にクローンを最新版の🤗 Diffusersにアップデートできます: ```bash cd ~/diffusers/ git pull ``` Python環境は次の実行時に `main` バージョンの🤗 Diffusersを見つけます。 ## テレメトリー・ロギングに関するお知らせ このライブラリは `from_pretrained()` リクエスト中にデータを収集します。 このデータには Diffusers と PyTorch/Flax のバージョン、要求されたモデルやパイプラインクラスが含まれます。 また、Hubでホストされている場合は、事前に学習されたチェックポイントへのパスが含まれます。 この使用データは問題のデバッグや新機能の優先順位付けに役立ちます。 テレメトリーはHuggingFace Hubからモデルやパイプラインをロードするときのみ送信されます。ローカルでの使用中は収集されません。 我々は、すべての人が追加情報を共有したくないことを理解し、あなたのプライバシーを尊重します。 そのため、ターミナルから `DISABLE_TELEMETRY` 環境変数を設定することで、データ収集を無効にすることができます: Linux/MacOSの場合 ```bash export DISABLE_TELEMETRY=YES ``` Windows の場合 ```bash set DISABLE_TELEMETRY=YES ```
diffusers/docs/source/ja/installation.md/0
{ "file_path": "diffusers/docs/source/ja/installation.md", "repo_id": "diffusers", "token_count": 2493 }
99
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # 개요 노이즈가 많은 출력에서 적은 출력으로 만드는 과정으로 고품질 생성 모델의 출력을 만드는 각각의 반복되는 스텝은 많은 계산이 필요합니다. 🧨 Diffuser의 목표 중 하나는 모든 사람이 이 기술을 널리 이용할 수 있도록 하는 것이며, 여기에는 소비자 및 특수 하드웨어에서 빠른 추론을 가능하게 하는 것을 포함합니다. 이 섹션에서는 추론 속도를 최적화하고 메모리 소비를 줄이기 위한 반정밀(half-precision) 가중치 및 sliced attention과 같은 팁과 요령을 다룹니다. 또한 [`torch.compile`](https://pytorch.org/tutorials/intermediate/torch_compile_tutorial.html) 또는 [ONNX Runtime](https://onnxruntime.ai/docs/)을 사용하여 PyTorch 코드의 속도를 높이고, [xFormers](https://facebookresearch.github.io/xformers/)를 사용하여 memory-efficient attention을 활성화하는 방법을 배울 수 있습니다. Apple Silicon, Intel 또는 Habana 프로세서와 같은 특정 하드웨어에서 추론을 실행하기 위한 가이드도 있습니다.
diffusers/docs/source/ko/optimization/opt_overview.md/0
{ "file_path": "diffusers/docs/source/ko/optimization/opt_overview.md", "repo_id": "diffusers", "token_count": 942 }
100
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Textual-Inversion [[open-in-colab]] [textual-inversion](https://arxiv.org/abs/2208.01618)은 소수의 예시 이미지에서 새로운 콘셉트를 포착하는 기법입니다. 이 기술은 원래 [Latent Diffusion](https://github.com/CompVis/latent-diffusion)에서 시연되었지만, 이후 [Stable Diffusion](https://huggingface.co/docs/diffusers/main/en/conceptual/stable_diffusion)과 같은 유사한 다른 모델에도 적용되었습니다. 학습된 콘셉트는 text-to-image 파이프라인에서 생성된 이미지를 더 잘 제어하는 데 사용할 수 있습니다. 이 모델은 텍스트 인코더의 임베딩 공간에서 새로운 '단어'를 학습하여 개인화된 이미지 생성을 위한 텍스트 프롬프트 내에서 사용됩니다. ![Textual Inversion example](https://textual-inversion.github.io/static/images/editing/colorful_teapot.JPG) <small>By using just 3-5 images you can teach new concepts to a model such as Stable Diffusion for personalized image generation <a href="https://github.com/rinongal/textual_inversion">(image source)</a>.</small> 이 가이드에서는 textual-inversion으로 [`runwayml/stable-diffusion-v1-5`](https://huggingface.co/runwayml/stable-diffusion-v1-5) 모델을 학습하는 방법을 설명합니다. 이 가이드에서 사용된 모든 textual-inversion 학습 스크립트는 [여기](https://github.com/huggingface/diffusers/tree/main/examples/textual_inversion)에서 확인할 수 있습니다. 내부적으로 어떻게 작동하는지 자세히 살펴보고 싶으시다면 해당 링크를 참조해주시기 바랍니다. <Tip> [Stable Diffusion Textual Inversion Concepts Library](https://huggingface.co/sd-concepts-library)에는 커뮤니티에서 제작한 학습된 textual-inversion 모델들이 있습니다. 시간이 지남에 따라 더 많은 콘셉트들이 추가되어 유용한 리소스로 성장할 것입니다! </Tip> 시작하기 전에 학습을 위한 의존성 라이브러리들을 설치해야 합니다: ```bash pip install diffusers accelerate transformers ``` 의존성 라이브러리들의 설치가 완료되면, [🤗Accelerate](https://github.com/huggingface/accelerate/) 환경을 초기화시킵니다. ```bash accelerate config ``` 별도의 설정없이, 기본 🤗Accelerate 환경을 설정하려면 다음과 같이 하세요: ```bash accelerate config default ``` 또는 사용 중인 환경이 노트북과 같은 대화형 셸을 지원하지 않는다면, 다음과 같이 사용할 수 있습니다: ```py from accelerate.utils import write_basic_config write_basic_config() ``` 마지막으로, Memory-Efficient Attention을 통해 메모리 사용량을 줄이기 위해 [xFormers](https://huggingface.co/docs/diffusers/main/en/training/optimization/xformers)를 설치합니다. xFormers를 설치한 후, 학습 스크립트에 `--enable_xformers_memory_efficient_attention` 인자를 추가합니다. xFormers는 Flax에서 지원되지 않습니다. ## 허브에 모델 업로드하기 모델을 허브에 저장하려면, 학습 스크립트에 다음 인자를 추가해야 합니다. ```bash --push_to_hub ``` ## 체크포인트 저장 및 불러오기 학습중에 모델의 체크포인트를 정기적으로 저장하는 것이 좋습니다. 이렇게 하면 어떤 이유로든 학습이 중단된 경우 저장된 체크포인트에서 학습을 다시 시작할 수 있습니다. 학습 스크립트에 다음 인자를 전달하면 500단계마다 전체 학습 상태가 `output_dir`의 하위 폴더에 체크포인트로서 저장됩니다. ```bash --checkpointing_steps=500 ``` 저장된 체크포인트에서 학습을 재개하려면, 학습 스크립트와 재개할 특정 체크포인트에 다음 인자를 전달하세요. ```bash --resume_from_checkpoint="checkpoint-1500" ``` ## 파인 튜닝 학습용 데이터셋으로 [고양이 장난감 데이터셋](https://huggingface.co/datasets/diffusers/cat_toy_example)을 다운로드하여 디렉토리에 저장하세요. 여러분만의 고유한 데이터셋을 사용하고자 한다면, [학습용 데이터셋 만들기](https://huggingface.co/docs/diffusers/training/create_dataset) 가이드를 살펴보시기 바랍니다. ```py from huggingface_hub import snapshot_download local_dir = "./cat" snapshot_download( "diffusers/cat_toy_example", local_dir=local_dir, repo_type="dataset", ignore_patterns=".gitattributes" ) ``` 모델의 리포지토리 ID(또는 모델 가중치가 포함된 디렉터리 경로)를 `MODEL_NAME` 환경 변수에 할당하고, 해당 값을 [`pretrained_model_name_or_path`](https://huggingface.co/docs/diffusers/en/api/diffusion_pipeline#diffusers.DiffusionPipeline.from_pretrained.pretrained_model_name_or_path) 인자에 전달합니다. 그리고 이미지가 포함된 디렉터리 경로를 `DATA_DIR` 환경 변수에 할당합니다. 이제 [학습 스크립트](https://github.com/huggingface/diffusers/blob/main/examples/textual_inversion/textual_inversion.py)를 실행할 수 있습니다. 스크립트는 다음 파일을 생성하고 리포지토리에 저장합니다. - `learned_embeds.bin` - `token_identifier.txt` - `type_of_concept.txt`. <Tip> 💡V100 GPU 1개를 기준으로 전체 학습에는 최대 1시간이 걸립니다. 학습이 완료되기를 기다리는 동안 궁금한 점이 있으면 아래 섹션에서 [textual-inversion이 어떻게 작동하는지](https://huggingface.co/docs/diffusers/training/text_inversion#how-it-works) 자유롭게 확인하세요 ! </Tip> <frameworkcontent> <pt> ```bash export MODEL_NAME="runwayml/stable-diffusion-v1-5" export DATA_DIR="./cat" accelerate launch textual_inversion.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --train_data_dir=$DATA_DIR \ --learnable_property="object" \ --placeholder_token="<cat-toy>" --initializer_token="toy" \ --resolution=512 \ --train_batch_size=1 \ --gradient_accumulation_steps=4 \ --max_train_steps=3000 \ --learning_rate=5.0e-04 --scale_lr \ --lr_scheduler="constant" \ --lr_warmup_steps=0 \ --output_dir="textual_inversion_cat" \ --push_to_hub ``` <Tip> 💡학습 성능을 올리기 위해, 플레이스홀더 토큰(`<cat-toy>`)을 (단일한 임베딩 벡터가 아닌) 복수의 임베딩 벡터로 표현하는 것 역시 고려할 있습니다. 이러한 트릭이 모델이 보다 복잡한 이미지의 스타일(앞서 말한 콘셉트)을 더 잘 캡처하는 데 도움이 될 수 있습니다. 복수의 임베딩 벡터 학습을 활성화하려면 다음 옵션을 전달하십시오. ```bash --num_vectors=5 ``` </Tip> </pt> <jax> TPU에 액세스할 수 있는 경우, [Flax 학습 스크립트](https://github.com/huggingface/diffusers/blob/main/examples/textual_inversion/textual_inversion_flax.py)를 사용하여 더 빠르게 모델을 학습시켜보세요. (물론 GPU에서도 작동합니다.) 동일한 설정에서 Flax 학습 스크립트는 PyTorch 학습 스크립트보다 최소 70% 더 빨라야 합니다! ⚡️ 시작하기 앞서 Flax에 대한 의존성 라이브러리들을 설치해야 합니다. ```bash pip install -U -r requirements_flax.txt ``` 모델의 리포지토리 ID(또는 모델 가중치가 포함된 디렉터리 경로)를 `MODEL_NAME` 환경 변수에 할당하고, 해당 값을 [`pretrained_model_name_or_path`](https://huggingface.co/docs/diffusers/en/api/diffusion_pipeline#diffusers.DiffusionPipeline.from_pretrained.pretrained_model_name_or_path) 인자에 전달합니다. 그런 다음 [학습 스크립트](https://github.com/huggingface/diffusers/blob/main/examples/textual_inversion/textual_inversion_flax.py)를 시작할 수 있습니다. ```bash export MODEL_NAME="duongna/stable-diffusion-v1-4-flax" export DATA_DIR="./cat" python textual_inversion_flax.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --train_data_dir=$DATA_DIR \ --learnable_property="object" \ --placeholder_token="<cat-toy>" --initializer_token="toy" \ --resolution=512 \ --train_batch_size=1 \ --max_train_steps=3000 \ --learning_rate=5.0e-04 --scale_lr \ --output_dir="textual_inversion_cat" \ --push_to_hub ``` </jax> </frameworkcontent> ### 중간 로깅 모델의 학습 진행 상황을 추적하는 데 관심이 있는 경우, 학습 과정에서 생성된 이미지를 저장할 수 있습니다. 학습 스크립트에 다음 인수를 추가하여 중간 로깅을 활성화합니다. - `validation_prompt` : 샘플을 생성하는 데 사용되는 프롬프트(기본값은 `None`으로 설정되며, 이 때 중간 로깅은 비활성화됨) - `num_validation_images` : 생성할 샘플 이미지 수 - `validation_steps` : `validation_prompt`로부터 샘플 이미지를 생성하기 전 스텝의 수 ```bash --validation_prompt="A <cat-toy> backpack" --num_validation_images=4 --validation_steps=100 ``` ## 추론 모델을 학습한 후에는, 해당 모델을 [`StableDiffusionPipeline`]을 사용하여 추론에 사용할 수 있습니다. textual-inversion 스크립트는 기본적으로 textual-inversion을 통해 얻어진 임베딩 벡터만을 저장합니다. 해당 임베딩 벡터들은 텍스트 인코더의 임베딩 행렬에 추가되어 있습습니다. <frameworkcontent> <pt> <Tip> 💡 커뮤니티는 [sd-concepts-library](https://huggingface.co/sd-concepts-library) 라는 대규모의 textual-inversion 임베딩 벡터 라이브러리를 만들었습니다. textual-inversion 임베딩을 밑바닥부터 학습하는 대신, 해당 라이브러리에 본인이 찾는 textual-inversion 임베딩이 이미 추가되어 있지 않은지를 확인하는 것도 좋은 방법이 될 것 같습니다. </Tip> textual-inversion 임베딩 벡터을 불러오기 위해서는, 먼저 해당 임베딩 벡터를 학습할 때 사용한 모델을 불러와야 합니다. 여기서는 [`runwayml/stable-diffusion-v1-5`](https://huggingface.co/docs/diffusers/training/runwayml/stable-diffusion-v1-5) 모델이 사용되었다고 가정하고 불러오겠습니다. ```python from diffusers import StableDiffusionPipeline import torch model_id = "runwayml/stable-diffusion-v1-5" pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to("cuda") ``` 다음으로 `TextualInversionLoaderMixin.load_textual_inversion` 함수를 통해, textual-inversion 임베딩 벡터를 불러와야 합니다. 여기서 우리는 이전의 `<cat-toy>` 예제의 임베딩을 불러올 것입니다. ```python pipe.load_textual_inversion("sd-concepts-library/cat-toy") ``` 이제 플레이스홀더 토큰(`<cat-toy>`)이 잘 동작하는지를 확인하는 파이프라인을 실행할 수 있습니다. ```python prompt = "A <cat-toy> backpack" image = pipe(prompt, num_inference_steps=50).images[0] image.save("cat-backpack.png") ``` `TextualInversionLoaderMixin.load_textual_inversion`은 Diffusers 형식으로 저장된 텍스트 임베딩 벡터를 로드할 수 있을 뿐만 아니라, [Automatic1111](https://github.com/AUTOMATIC1111/stable-diffusion-webui) 형식으로 저장된 임베딩 벡터도 로드할 수 있습니다. 이렇게 하려면, 먼저 [civitAI](https://civitai.com/models/3036?modelVersionId=8387)에서 임베딩 벡터를 다운로드한 다음 로컬에서 불러와야 합니다. ```python pipe.load_textual_inversion("./charturnerv2.pt") ``` </pt> <jax> 현재 Flax에 대한 `load_textual_inversion` 함수는 없습니다. 따라서 학습 후 textual-inversion 임베딩 벡터가 모델의 일부로서 저장되었는지를 확인해야 합니다. 그런 다음은 다른 Flax 모델과 마찬가지로 실행할 수 있습니다. ```python import jax import numpy as np from flax.jax_utils import replicate from flax.training.common_utils import shard from diffusers import FlaxStableDiffusionPipeline model_path = "path-to-your-trained-model" pipeline, params = FlaxStableDiffusionPipeline.from_pretrained(model_path, dtype=jax.numpy.bfloat16) prompt = "A <cat-toy> backpack" prng_seed = jax.random.PRNGKey(0) num_inference_steps = 50 num_samples = jax.device_count() prompt = num_samples * [prompt] prompt_ids = pipeline.prepare_inputs(prompt) # shard inputs and rng params = replicate(params) prng_seed = jax.random.split(prng_seed, jax.device_count()) prompt_ids = shard(prompt_ids) images = pipeline(prompt_ids, params, prng_seed, num_inference_steps, jit=True).images images = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:]))) image.save("cat-backpack.png") ``` </jax> </frameworkcontent> ## 작동 방식 ![Diagram from the paper showing overview](https://textual-inversion.github.io/static/images/training/training.JPG) <small>Architecture overview from the Textual Inversion <a href="https://textual-inversion.github.io/">blog post.</a></small> 일반적으로 텍스트 프롬프트는 모델에 전달되기 전에 임베딩으로 토큰화됩니다. textual-inversion은 비슷한 작업을 수행하지만, 위 다이어그램의 특수 토큰 `S*`로부터 새로운 토큰 임베딩 `v*`를 학습합니다. 모델의 아웃풋은 디퓨전 모델을 조정하는 데 사용되며, 디퓨전 모델이 단 몇 개의 예제 이미지에서 신속하고 새로운 콘셉트를 이해하는 데 도움을 줍니다. 이를 위해 textual-inversion은 제너레이터 모델과 학습용 이미지의 노이즈 버전을 사용합니다. 제너레이터는 노이즈가 적은 버전의 이미지를 예측하려고 시도하며 토큰 임베딩 `v*`은 제너레이터의 성능에 따라 최적화됩니다. 토큰 임베딩이 새로운 콘셉트를 성공적으로 포착하면 디퓨전 모델에 더 유용한 정보를 제공하고 노이즈가 적은 더 선명한 이미지를 생성하는 데 도움이 됩니다. 이러한 최적화 프로세스는 일반적으로 다양한 프롬프트와 이미지에 수천 번에 노출됨으로써 이루어집니다.
diffusers/docs/source/ko/training/text_inversion.md/0
{ "file_path": "diffusers/docs/source/ko/training/text_inversion.md", "repo_id": "diffusers", "token_count": 9076 }
101
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Overview 파이프라인은 독립적으로 훈련된 모델과 스케줄러를 함께 모아서 추론을 위해 diffusion 시스템을 빠르고 쉽게 사용할 수 있는 방법을 제공하는 end-to-end 클래스입니다. 모델과 스케줄러의 특정 조합은 특수한 기능과 함께 [`StableDiffusionPipeline`] 또는 [`StableDiffusionControlNetPipeline`]과 같은 특정 파이프라인 유형을 정의합니다. 모든 파이프라인 유형은 기본 [`DiffusionPipeline`] 클래스에서 상속됩니다. 어느 체크포인트를 전달하면, 파이프라인 유형을 자동으로 감지하고 필요한 구성 요소들을 불러옵니다. 이 섹션에서는 unconditional 이미지 생성, text-to-image 생성의 다양한 테크닉과 변화를 파이프라인에서 지원하는 작업들을 소개합니다. 프롬프트에 있는 특정 단어가 출력에 영향을 미치는 것을 조정하기 위해 재현성을 위한 시드 설정과 프롬프트에 가중치를 부여하는 것으로 생성 프로세스를 더 잘 제어하는 방법에 대해 배울 수 있습니다. 마지막으로 음성에서부터 이미지 생성과 같은 커스텀 작업을 위한 커뮤니티 파이프라인을 만드는 방법을 알 수 있습니다.
diffusers/docs/source/ko/using-diffusers/pipeline_overview.md/0
{ "file_path": "diffusers/docs/source/ko/using-diffusers/pipeline_overview.md", "repo_id": "diffusers", "token_count": 1173 }
102
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # 安装 在你正在使用的任意深度学习框架中安装 🤗 Diffusers 。 🤗 Diffusers已在Python 3.8+、PyTorch 1.7.0+和Flax上进行了测试。按照下面的安装说明,针对你正在使用的深度学习框架进行安装: - [PyTorch](https://pytorch.org/get-started/locally/) installation instructions. - [Flax](https://flax.readthedocs.io/en/latest/) installation instructions. ## 使用pip安装 你需要在[虚拟环境](https://docs.python.org/3/library/venv.html)中安装 🤗 Diffusers 。 如果你对 Python 虚拟环境不熟悉,可以看看这个[教程](https://packaging.python.org/guides/installing-using-pip-and-virtual-environments/). 在虚拟环境中,你可以轻松管理不同的项目,避免依赖项之间的兼容性问题。 首先,在你的项目目录下创建一个虚拟环境: ```bash python -m venv .env ``` 激活虚拟环境: ```bash source .env/bin/activate ``` 现在,你就可以安装 🤗 Diffusers了!使用下边这个命令: **PyTorch** ```bash pip install diffusers["torch"] ``` **Flax** ```bash pip install diffusers["flax"] ``` ## 从源代码安装 在从源代码安装 `diffusers` 之前,确保你已经安装了 `torch` 和 `accelerate`。 `torch`的安装教程可以看 `torch` [文档](https://pytorch.org/get-started/locally/#start-locally). 安装 `accelerate` ```bash pip install accelerate ``` 从源码安装 🤗 Diffusers 需要使用以下命令: ```bash pip install git+https://github.com/huggingface/diffusers ``` 这个命令安装的是最新的 `main`版本,而不是最近的`stable`版。 `main`是一直和最新进展保持一致的。比如,上次发布的正式版中有bug,在`main`中可以看到这个bug被修复了,但是新的正式版此时尚未推出。 但是这也意味着 `main`版本不保证是稳定的。 我们努力保持`main`版本正常运行,大多数问题都能在几个小时或一天之内解决 如果你遇到了问题,可以提 [Issue](https://github.com/huggingface/transformers/issues),这样我们就能更快修复问题了。 ## 可修改安装 如果你想做以下两件事,那你可能需要一个可修改代码的安装方式: * 使用 `main`版本的源代码。 * 为 🤗 Diffusers 贡献,需要测试代码中的变化。 使用以下命令克隆并安装 🤗 Diffusers: ```bash git clone https://github.com/huggingface/diffusers.git cd diffusers ``` **PyTorch** ``` pip install -e ".[torch]" ``` **Flax** ``` pip install -e ".[flax]" ``` 这些命令将连接到你克隆的版本库和你的 Python 库路径。 现在,不只是在通常的库路径,Python 还会在你克隆的文件夹内寻找包。 例如,如果你的 Python 包通常安装在 `~/anaconda3/envs/main/lib/python3.8/Site-packages/`,Python 也会搜索你克隆到的文件夹。`~/diffusers/`。 <Tip warning={true}> 如果你想继续使用这个库,你必须保留 `diffusers` 文件夹。 </Tip> 现在你可以用下面的命令轻松地将你克隆的 🤗 Diffusers 库更新到最新版本。 ```bash cd ~/diffusers/ git pull ``` 你的Python环境将在下次运行时找到`main`版本的 🤗 Diffusers。 ## 注意 Telemetry 日志 我们的库会在使用`from_pretrained()`请求期间收集 telemetry 信息。这些数据包括Diffusers和PyTorch/Flax的版本,请求的模型或管道类,以及预训练检查点的路径(如果它被托管在Hub上的话)。 这些使用数据有助于我们调试问题并确定新功能的开发优先级。 Telemetry 数据仅在从 HuggingFace Hub 中加载模型和管道时发送,而不会在本地使用期间收集。 我们知道,并不是每个人都想分享这些的信息,我们尊重您的隐私, 因此您可以通过在终端中设置 `DISABLE_TELEMETRY` 环境变量从而禁用 Telemetry 数据收集: Linux/MacOS : ```bash export DISABLE_TELEMETRY=YES ``` Windows : ```bash set DISABLE_TELEMETRY=YES ```
diffusers/docs/source/zh/installation.md/0
{ "file_path": "diffusers/docs/source/zh/installation.md", "repo_id": "diffusers", "token_count": 2455 }
103
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect from typing import Callable, List, Optional, Union import torch from packaging import version from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import DiffusionPipeline from diffusers.configuration_utils import FrozenDict from diffusers.models import AutoencoderKL, UNet2DConditionModel from diffusers.pipelines.pipeline_utils import StableDiffusionMixin from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.schedulers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, ) from diffusers.utils import deprecate, logging logger = logging.get_logger(__name__) # pylint: disable=invalid-name class ComposableStableDiffusionPipeline(DiffusionPipeline, StableDiffusionMixin): r""" Pipeline for text-to-image generation using Stable Diffusion. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) Args: vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. text_encoder ([`CLIPTextModel`]): Frozen text-encoder. Stable Diffusion uses the text portion of [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. tokenizer (`CLIPTokenizer`): Tokenizer of class [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. safety_checker ([`StableDiffusionSafetyChecker`]): Classification module that estimates whether generated images could be considered offensive or harmful. Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details. feature_extractor ([`CLIPImageProcessor`]): Model that extracts features from generated images to be used as inputs for the `safety_checker`. """ _optional_components = ["safety_checker", "feature_extractor"] def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet2DConditionModel, scheduler: Union[ DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler, EulerDiscreteScheduler, EulerAncestralDiscreteScheduler, DPMSolverMultistepScheduler, ], safety_checker: StableDiffusionSafetyChecker, feature_extractor: CLIPImageProcessor, requires_safety_checker: bool = True, ): super().__init__() if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1: deprecation_message = ( f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`" f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure " "to update the config accordingly as leaving `steps_offset` might led to incorrect results" " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" " file" ) deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config["steps_offset"] = 1 scheduler._internal_dict = FrozenDict(new_config) if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True: deprecation_message = ( f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`." " `clip_sample` should be set to False in the configuration file. Please make sure to update the" " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in" " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very" " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file" ) deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config["clip_sample"] = False scheduler._internal_dict = FrozenDict(new_config) if safety_checker is None and requires_safety_checker: logger.warning( f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" " results in services or applications open to the public. Both the diffusers team and Hugging Face" " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" " it only for use-cases that involve analyzing network behavior or auditing its results. For more" " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." ) if safety_checker is not None and feature_extractor is None: raise ValueError( "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." ) is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse( version.parse(unet.config._diffusers_version).base_version ) < version.parse("0.9.0.dev0") is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64 if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64: deprecation_message = ( "The configuration file of the unet has set the default `sample_size` to smaller than" " 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the" " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-" " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5" " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the" " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`" " in the config might lead to incorrect results in future versions. If you have downloaded this" " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for" " the `unet/config.json` file" ) deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(unet.config) new_config["sample_size"] = 64 unet._internal_dict = FrozenDict(new_config) self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor, ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.register_to_config(requires_safety_checker=requires_safety_checker) def _encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `list(int)`): prompt to be encoded device: (`torch.device`): torch device num_images_per_prompt (`int`): number of images that should be generated per prompt do_classifier_free_guidance (`bool`): whether to use classifier free guidance or not negative_prompt (`str` or `List[str]`): The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). """ batch_size = len(prompt) if isinstance(prompt, list) else 1 text_inputs = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None text_embeddings = self.text_encoder( text_input_ids.to(device), attention_mask=attention_mask, ) text_embeddings = text_embeddings[0] # duplicate text embeddings for each generation per prompt, using mps friendly method bs_embed, seq_len, _ = text_embeddings.shape text_embeddings = text_embeddings.repeat(1, num_images_per_prompt, 1) text_embeddings = text_embeddings.view(bs_embed * num_images_per_prompt, seq_len, -1) # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [""] * batch_size elif type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = negative_prompt max_length = text_input_ids.shape[-1] uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None uncond_embeddings = self.text_encoder( uncond_input.input_ids.to(device), attention_mask=attention_mask, ) uncond_embeddings = uncond_embeddings[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = uncond_embeddings.shape[1] uncond_embeddings = uncond_embeddings.repeat(1, num_images_per_prompt, 1) uncond_embeddings = uncond_embeddings.view(batch_size * num_images_per_prompt, seq_len, -1) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes text_embeddings = torch.cat([uncond_embeddings, text_embeddings]) return text_embeddings def run_safety_checker(self, image, device, dtype): if self.safety_checker is not None: safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(device) image, has_nsfw_concept = self.safety_checker( images=image, clip_input=safety_checker_input.pixel_values.to(dtype) ) else: has_nsfw_concept = None return image, has_nsfw_concept def decode_latents(self, latents): latents = 1 / 0.18215 * latents image = self.vae.decode(latents).sample image = (image / 2 + 0.5).clamp(0, 1) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 image = image.cpu().permute(0, 2, 3, 1).float().numpy() return image def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs def check_inputs(self, prompt, height, width, callback_steps): if not isinstance(prompt, str) and not isinstance(prompt, list): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") if (callback_steps is None) or ( callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) ): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor) if latents is None: if device.type == "mps": # randn does not work reproducibly on mps latents = torch.randn(shape, generator=generator, device="cpu", dtype=dtype).to(device) else: latents = torch.randn(shape, generator=generator, device=device, dtype=dtype) else: if latents.shape != shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") latents = latents.to(device) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma return latents @torch.no_grad() def __call__( self, prompt: Union[str, List[str]], height: Optional[int] = None, width: Optional[int] = None, num_inference_steps: int = 50, guidance_scale: float = 7.5, negative_prompt: Optional[Union[str, List[str]]] = None, num_images_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[torch.Generator] = None, latents: Optional[torch.FloatTensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, callback_steps: int = 1, weights: Optional[str] = "", ): r""" Function invoked when calling the pipeline for generation. Args: prompt (`str` or `List[str]`): The prompt or prompts to guide the image generation. height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): The height in pixels of the generated image. width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): The width in pixels of the generated image. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. guidance_scale (`float`, *optional*, defaults to 5.0): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to [`schedulers.DDIMScheduler`], will be ignored for others. generator (`torch.Generator`, *optional*): A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will ge generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a plain tuple. callback (`Callable`, *optional*): A function that will be called every `callback_steps` steps during inference. The function will be called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function will be called. If not specified, the callback will be called at every step. Returns: [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple. When returning a tuple, the first element is a list with the generated images, and the second element is a list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work" (nsfw) content, according to the `safety_checker`. """ # 0. Default height and width to unet height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor # 1. Check inputs. Raise error if not correct self.check_inputs(prompt, height, width, callback_steps) # 2. Define call parameters batch_size = 1 if isinstance(prompt, str) else len(prompt) device = self._execution_device # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 if "|" in prompt: prompt = [x.strip() for x in prompt.split("|")] print(f"composing {prompt}...") if not weights: # specify weights for prompts (excluding the unconditional score) print("using equal positive weights (conjunction) for all prompts...") weights = torch.tensor([guidance_scale] * len(prompt), device=self.device).reshape(-1, 1, 1, 1) else: # set prompt weight for each num_prompts = len(prompt) if isinstance(prompt, list) else 1 weights = [float(w.strip()) for w in weights.split("|")] # guidance scale as the default if len(weights) < num_prompts: weights.append(guidance_scale) else: weights = weights[:num_prompts] assert len(weights) == len(prompt), "weights specified are not equal to the number of prompts" weights = torch.tensor(weights, device=self.device).reshape(-1, 1, 1, 1) else: weights = guidance_scale # 3. Encode input prompt text_embeddings = self._encode_prompt( prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt ) # 4. Prepare timesteps self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps # 5. Prepare latent variables num_channels_latents = self.unet.config.in_channels latents = self.prepare_latents( batch_size * num_images_per_prompt, num_channels_latents, height, width, text_embeddings.dtype, device, generator, latents, ) # composable diffusion if isinstance(prompt, list) and batch_size == 1: # remove extra unconditional embedding # N = one unconditional embed + conditional embeds text_embeddings = text_embeddings[len(prompt) - 1 :] # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) # 7. Denoising loop num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) # predict the noise residual noise_pred = [] for j in range(text_embeddings.shape[0]): noise_pred.append( self.unet(latent_model_input[:1], t, encoder_hidden_states=text_embeddings[j : j + 1]).sample ) noise_pred = torch.cat(noise_pred, dim=0) # perform guidance if do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred[:1], noise_pred[1:] noise_pred = noise_pred_uncond + (weights * (noise_pred_text - noise_pred_uncond)).sum( dim=0, keepdims=True ) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, "order", 1) callback(step_idx, t, latents) # 8. Post-processing image = self.decode_latents(latents) # 9. Run safety checker image, has_nsfw_concept = self.run_safety_checker(image, device, text_embeddings.dtype) # 10. Convert to PIL if output_type == "pil": image = self.numpy_to_pil(image) if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
diffusers/examples/community/composable_stable_diffusion.py/0
{ "file_path": "diffusers/examples/community/composable_stable_diffusion.py", "repo_id": "diffusers", "token_count": 11830 }
104
import inspect import re from typing import Callable, List, Optional, Union import numpy as np import PIL.Image import torch from packaging import version from transformers import CLIPImageProcessor, CLIPTokenizer import diffusers from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, SchedulerMixin from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.utils import logging try: from diffusers.pipelines.onnx_utils import ORT_TO_NP_TYPE except ImportError: ORT_TO_NP_TYPE = { "tensor(bool)": np.bool_, "tensor(int8)": np.int8, "tensor(uint8)": np.uint8, "tensor(int16)": np.int16, "tensor(uint16)": np.uint16, "tensor(int32)": np.int32, "tensor(uint32)": np.uint32, "tensor(int64)": np.int64, "tensor(uint64)": np.uint64, "tensor(float16)": np.float16, "tensor(float)": np.float32, "tensor(double)": np.float64, } try: from diffusers.utils import PIL_INTERPOLATION except ImportError: if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("9.1.0"): PIL_INTERPOLATION = { "linear": PIL.Image.Resampling.BILINEAR, "bilinear": PIL.Image.Resampling.BILINEAR, "bicubic": PIL.Image.Resampling.BICUBIC, "lanczos": PIL.Image.Resampling.LANCZOS, "nearest": PIL.Image.Resampling.NEAREST, } else: PIL_INTERPOLATION = { "linear": PIL.Image.LINEAR, "bilinear": PIL.Image.BILINEAR, "bicubic": PIL.Image.BICUBIC, "lanczos": PIL.Image.LANCZOS, "nearest": PIL.Image.NEAREST, } # ------------------------------------------------------------------------------ logger = logging.get_logger(__name__) # pylint: disable=invalid-name re_attention = re.compile( r""" \\\(| \\\)| \\\[| \\]| \\\\| \\| \(| \[| :([+-]?[.\d]+)\)| \)| ]| [^\\()\[\]:]+| : """, re.X, ) def parse_prompt_attention(text): """ Parses a string with attention tokens and returns a list of pairs: text and its associated weight. Accepted tokens are: (abc) - increases attention to abc by a multiplier of 1.1 (abc:3.12) - increases attention to abc by a multiplier of 3.12 [abc] - decreases attention to abc by a multiplier of 1.1 \\( - literal character '(' \\[ - literal character '[' \\) - literal character ')' \\] - literal character ']' \\ - literal character '\' anything else - just text >>> parse_prompt_attention('normal text') [['normal text', 1.0]] >>> parse_prompt_attention('an (important) word') [['an ', 1.0], ['important', 1.1], [' word', 1.0]] >>> parse_prompt_attention('(unbalanced') [['unbalanced', 1.1]] >>> parse_prompt_attention('\\(literal\\]') [['(literal]', 1.0]] >>> parse_prompt_attention('(unnecessary)(parens)') [['unnecessaryparens', 1.1]] >>> parse_prompt_attention('a (((house:1.3)) [on] a (hill:0.5), sun, (((sky))).') [['a ', 1.0], ['house', 1.5730000000000004], [' ', 1.1], ['on', 1.0], [' a ', 1.1], ['hill', 0.55], [', sun, ', 1.1], ['sky', 1.4641000000000006], ['.', 1.1]] """ res = [] round_brackets = [] square_brackets = [] round_bracket_multiplier = 1.1 square_bracket_multiplier = 1 / 1.1 def multiply_range(start_position, multiplier): for p in range(start_position, len(res)): res[p][1] *= multiplier for m in re_attention.finditer(text): text = m.group(0) weight = m.group(1) if text.startswith("\\"): res.append([text[1:], 1.0]) elif text == "(": round_brackets.append(len(res)) elif text == "[": square_brackets.append(len(res)) elif weight is not None and len(round_brackets) > 0: multiply_range(round_brackets.pop(), float(weight)) elif text == ")" and len(round_brackets) > 0: multiply_range(round_brackets.pop(), round_bracket_multiplier) elif text == "]" and len(square_brackets) > 0: multiply_range(square_brackets.pop(), square_bracket_multiplier) else: res.append([text, 1.0]) for pos in round_brackets: multiply_range(pos, round_bracket_multiplier) for pos in square_brackets: multiply_range(pos, square_bracket_multiplier) if len(res) == 0: res = [["", 1.0]] # merge runs of identical weights i = 0 while i + 1 < len(res): if res[i][1] == res[i + 1][1]: res[i][0] += res[i + 1][0] res.pop(i + 1) else: i += 1 return res def get_prompts_with_weights(pipe, prompt: List[str], max_length: int): r""" Tokenize a list of prompts and return its tokens with weights of each token. No padding, starting or ending token is included. """ tokens = [] weights = [] truncated = False for text in prompt: texts_and_weights = parse_prompt_attention(text) text_token = [] text_weight = [] for word, weight in texts_and_weights: # tokenize and discard the starting and the ending token token = pipe.tokenizer(word, return_tensors="np").input_ids[0, 1:-1] text_token += list(token) # copy the weight by length of token text_weight += [weight] * len(token) # stop if the text is too long (longer than truncation limit) if len(text_token) > max_length: truncated = True break # truncate if len(text_token) > max_length: truncated = True text_token = text_token[:max_length] text_weight = text_weight[:max_length] tokens.append(text_token) weights.append(text_weight) if truncated: logger.warning("Prompt was truncated. Try to shorten the prompt or increase max_embeddings_multiples") return tokens, weights def pad_tokens_and_weights(tokens, weights, max_length, bos, eos, pad, no_boseos_middle=True, chunk_length=77): r""" Pad the tokens (with starting and ending tokens) and weights (with 1.0) to max_length. """ max_embeddings_multiples = (max_length - 2) // (chunk_length - 2) weights_length = max_length if no_boseos_middle else max_embeddings_multiples * chunk_length for i in range(len(tokens)): tokens[i] = [bos] + tokens[i] + [pad] * (max_length - 1 - len(tokens[i]) - 1) + [eos] if no_boseos_middle: weights[i] = [1.0] + weights[i] + [1.0] * (max_length - 1 - len(weights[i])) else: w = [] if len(weights[i]) == 0: w = [1.0] * weights_length else: for j in range(max_embeddings_multiples): w.append(1.0) # weight for starting token in this chunk w += weights[i][j * (chunk_length - 2) : min(len(weights[i]), (j + 1) * (chunk_length - 2))] w.append(1.0) # weight for ending token in this chunk w += [1.0] * (weights_length - len(w)) weights[i] = w[:] return tokens, weights def get_unweighted_text_embeddings( pipe, text_input: np.array, chunk_length: int, no_boseos_middle: Optional[bool] = True, ): """ When the length of tokens is a multiple of the capacity of the text encoder, it should be split into chunks and sent to the text encoder individually. """ max_embeddings_multiples = (text_input.shape[1] - 2) // (chunk_length - 2) if max_embeddings_multiples > 1: text_embeddings = [] for i in range(max_embeddings_multiples): # extract the i-th chunk text_input_chunk = text_input[:, i * (chunk_length - 2) : (i + 1) * (chunk_length - 2) + 2].copy() # cover the head and the tail by the starting and the ending tokens text_input_chunk[:, 0] = text_input[0, 0] text_input_chunk[:, -1] = text_input[0, -1] text_embedding = pipe.text_encoder(input_ids=text_input_chunk)[0] if no_boseos_middle: if i == 0: # discard the ending token text_embedding = text_embedding[:, :-1] elif i == max_embeddings_multiples - 1: # discard the starting token text_embedding = text_embedding[:, 1:] else: # discard both starting and ending tokens text_embedding = text_embedding[:, 1:-1] text_embeddings.append(text_embedding) text_embeddings = np.concatenate(text_embeddings, axis=1) else: text_embeddings = pipe.text_encoder(input_ids=text_input)[0] return text_embeddings def get_weighted_text_embeddings( pipe, prompt: Union[str, List[str]], uncond_prompt: Optional[Union[str, List[str]]] = None, max_embeddings_multiples: Optional[int] = 4, no_boseos_middle: Optional[bool] = False, skip_parsing: Optional[bool] = False, skip_weighting: Optional[bool] = False, **kwargs, ): r""" Prompts can be assigned with local weights using brackets. For example, prompt 'A (very beautiful) masterpiece' highlights the words 'very beautiful', and the embedding tokens corresponding to the words get multiplied by a constant, 1.1. Also, to regularize of the embedding, the weighted embedding would be scaled to preserve the original mean. Args: pipe (`OnnxStableDiffusionPipeline`): Pipe to provide access to the tokenizer and the text encoder. prompt (`str` or `List[str]`): The prompt or prompts to guide the image generation. uncond_prompt (`str` or `List[str]`): The unconditional prompt or prompts for guide the image generation. If unconditional prompt is provided, the embeddings of prompt and uncond_prompt are concatenated. max_embeddings_multiples (`int`, *optional*, defaults to `1`): The max multiple length of prompt embeddings compared to the max output length of text encoder. no_boseos_middle (`bool`, *optional*, defaults to `False`): If the length of text token is multiples of the capacity of text encoder, whether reserve the starting and ending token in each of the chunk in the middle. skip_parsing (`bool`, *optional*, defaults to `False`): Skip the parsing of brackets. skip_weighting (`bool`, *optional*, defaults to `False`): Skip the weighting. When the parsing is skipped, it is forced True. """ max_length = (pipe.tokenizer.model_max_length - 2) * max_embeddings_multiples + 2 if isinstance(prompt, str): prompt = [prompt] if not skip_parsing: prompt_tokens, prompt_weights = get_prompts_with_weights(pipe, prompt, max_length - 2) if uncond_prompt is not None: if isinstance(uncond_prompt, str): uncond_prompt = [uncond_prompt] uncond_tokens, uncond_weights = get_prompts_with_weights(pipe, uncond_prompt, max_length - 2) else: prompt_tokens = [ token[1:-1] for token in pipe.tokenizer(prompt, max_length=max_length, truncation=True, return_tensors="np").input_ids ] prompt_weights = [[1.0] * len(token) for token in prompt_tokens] if uncond_prompt is not None: if isinstance(uncond_prompt, str): uncond_prompt = [uncond_prompt] uncond_tokens = [ token[1:-1] for token in pipe.tokenizer( uncond_prompt, max_length=max_length, truncation=True, return_tensors="np", ).input_ids ] uncond_weights = [[1.0] * len(token) for token in uncond_tokens] # round up the longest length of tokens to a multiple of (model_max_length - 2) max_length = max([len(token) for token in prompt_tokens]) if uncond_prompt is not None: max_length = max(max_length, max([len(token) for token in uncond_tokens])) max_embeddings_multiples = min( max_embeddings_multiples, (max_length - 1) // (pipe.tokenizer.model_max_length - 2) + 1, ) max_embeddings_multiples = max(1, max_embeddings_multiples) max_length = (pipe.tokenizer.model_max_length - 2) * max_embeddings_multiples + 2 # pad the length of tokens and weights bos = pipe.tokenizer.bos_token_id eos = pipe.tokenizer.eos_token_id pad = getattr(pipe.tokenizer, "pad_token_id", eos) prompt_tokens, prompt_weights = pad_tokens_and_weights( prompt_tokens, prompt_weights, max_length, bos, eos, pad, no_boseos_middle=no_boseos_middle, chunk_length=pipe.tokenizer.model_max_length, ) prompt_tokens = np.array(prompt_tokens, dtype=np.int32) if uncond_prompt is not None: uncond_tokens, uncond_weights = pad_tokens_and_weights( uncond_tokens, uncond_weights, max_length, bos, eos, pad, no_boseos_middle=no_boseos_middle, chunk_length=pipe.tokenizer.model_max_length, ) uncond_tokens = np.array(uncond_tokens, dtype=np.int32) # get the embeddings text_embeddings = get_unweighted_text_embeddings( pipe, prompt_tokens, pipe.tokenizer.model_max_length, no_boseos_middle=no_boseos_middle, ) prompt_weights = np.array(prompt_weights, dtype=text_embeddings.dtype) if uncond_prompt is not None: uncond_embeddings = get_unweighted_text_embeddings( pipe, uncond_tokens, pipe.tokenizer.model_max_length, no_boseos_middle=no_boseos_middle, ) uncond_weights = np.array(uncond_weights, dtype=uncond_embeddings.dtype) # assign weights to the prompts and normalize in the sense of mean # TODO: should we normalize by chunk or in a whole (current implementation)? if (not skip_parsing) and (not skip_weighting): previous_mean = text_embeddings.mean(axis=(-2, -1)) text_embeddings *= prompt_weights[:, :, None] text_embeddings *= (previous_mean / text_embeddings.mean(axis=(-2, -1)))[:, None, None] if uncond_prompt is not None: previous_mean = uncond_embeddings.mean(axis=(-2, -1)) uncond_embeddings *= uncond_weights[:, :, None] uncond_embeddings *= (previous_mean / uncond_embeddings.mean(axis=(-2, -1)))[:, None, None] # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes if uncond_prompt is not None: return text_embeddings, uncond_embeddings return text_embeddings def preprocess_image(image): w, h = image.size w, h = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 image = image.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]) image = np.array(image).astype(np.float32) / 255.0 image = image[None].transpose(0, 3, 1, 2) return 2.0 * image - 1.0 def preprocess_mask(mask, scale_factor=8): mask = mask.convert("L") w, h = mask.size w, h = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 mask = mask.resize((w // scale_factor, h // scale_factor), resample=PIL_INTERPOLATION["nearest"]) mask = np.array(mask).astype(np.float32) / 255.0 mask = np.tile(mask, (4, 1, 1)) mask = mask[None].transpose(0, 1, 2, 3) # what does this step do? mask = 1 - mask # repaint white, keep black return mask class OnnxStableDiffusionLongPromptWeightingPipeline(OnnxStableDiffusionPipeline): r""" Pipeline for text-to-image generation using Stable Diffusion without tokens length limit, and support parsing weighting in prompt. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) """ if version.parse(version.parse(diffusers.__version__).base_version) >= version.parse("0.9.0"): def __init__( self, vae_encoder: OnnxRuntimeModel, vae_decoder: OnnxRuntimeModel, text_encoder: OnnxRuntimeModel, tokenizer: CLIPTokenizer, unet: OnnxRuntimeModel, scheduler: SchedulerMixin, safety_checker: OnnxRuntimeModel, feature_extractor: CLIPImageProcessor, requires_safety_checker: bool = True, ): super().__init__( vae_encoder=vae_encoder, vae_decoder=vae_decoder, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor, requires_safety_checker=requires_safety_checker, ) self.__init__additional__() else: def __init__( self, vae_encoder: OnnxRuntimeModel, vae_decoder: OnnxRuntimeModel, text_encoder: OnnxRuntimeModel, tokenizer: CLIPTokenizer, unet: OnnxRuntimeModel, scheduler: SchedulerMixin, safety_checker: OnnxRuntimeModel, feature_extractor: CLIPImageProcessor, ): super().__init__( vae_encoder=vae_encoder, vae_decoder=vae_decoder, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor, ) self.__init__additional__() def __init__additional__(self): self.unet.config.in_channels = 4 self.vae_scale_factor = 8 def _encode_prompt( self, prompt, num_images_per_prompt, do_classifier_free_guidance, negative_prompt, max_embeddings_multiples, ): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `list(int)`): prompt to be encoded num_images_per_prompt (`int`): number of images that should be generated per prompt do_classifier_free_guidance (`bool`): whether to use classifier free guidance or not negative_prompt (`str` or `List[str]`): The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). max_embeddings_multiples (`int`, *optional*, defaults to `3`): The max multiple length of prompt embeddings compared to the max output length of text encoder. """ batch_size = len(prompt) if isinstance(prompt, list) else 1 if negative_prompt is None: negative_prompt = [""] * batch_size elif isinstance(negative_prompt, str): negative_prompt = [negative_prompt] * batch_size if batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) text_embeddings, uncond_embeddings = get_weighted_text_embeddings( pipe=self, prompt=prompt, uncond_prompt=negative_prompt if do_classifier_free_guidance else None, max_embeddings_multiples=max_embeddings_multiples, ) text_embeddings = text_embeddings.repeat(num_images_per_prompt, 0) if do_classifier_free_guidance: uncond_embeddings = uncond_embeddings.repeat(num_images_per_prompt, 0) text_embeddings = np.concatenate([uncond_embeddings, text_embeddings]) return text_embeddings def check_inputs(self, prompt, height, width, strength, callback_steps): if not isinstance(prompt, str) and not isinstance(prompt, list): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if strength < 0 or strength > 1: raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}") if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") if (callback_steps is None) or ( callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) ): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) def get_timesteps(self, num_inference_steps, strength, is_text2img): if is_text2img: return self.scheduler.timesteps, num_inference_steps else: # get the original timestep using init_timestep offset = self.scheduler.config.get("steps_offset", 0) init_timestep = int(num_inference_steps * strength) + offset init_timestep = min(init_timestep, num_inference_steps) t_start = max(num_inference_steps - init_timestep + offset, 0) timesteps = self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def run_safety_checker(self, image): if self.safety_checker is not None: safety_checker_input = self.feature_extractor( self.numpy_to_pil(image), return_tensors="np" ).pixel_values.astype(image.dtype) # There will throw an error if use safety_checker directly and batchsize>1 images, has_nsfw_concept = [], [] for i in range(image.shape[0]): image_i, has_nsfw_concept_i = self.safety_checker( clip_input=safety_checker_input[i : i + 1], images=image[i : i + 1] ) images.append(image_i) has_nsfw_concept.append(has_nsfw_concept_i[0]) image = np.concatenate(images) else: has_nsfw_concept = None return image, has_nsfw_concept def decode_latents(self, latents): latents = 1 / 0.18215 * latents # image = self.vae_decoder(latent_sample=latents)[0] # it seems likes there is a strange result for using half-precision vae decoder if batchsize>1 image = np.concatenate( [self.vae_decoder(latent_sample=latents[i : i + 1])[0] for i in range(latents.shape[0])] ) image = np.clip(image / 2 + 0.5, 0, 1) image = image.transpose((0, 2, 3, 1)) return image def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs def prepare_latents(self, image, timestep, batch_size, height, width, dtype, generator, latents=None): if image is None: shape = ( batch_size, self.unet.config.in_channels, height // self.vae_scale_factor, width // self.vae_scale_factor, ) if latents is None: latents = torch.randn(shape, generator=generator, device="cpu").numpy().astype(dtype) else: if latents.shape != shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") # scale the initial noise by the standard deviation required by the scheduler latents = (torch.from_numpy(latents) * self.scheduler.init_noise_sigma).numpy() return latents, None, None else: init_latents = self.vae_encoder(sample=image)[0] init_latents = 0.18215 * init_latents init_latents = np.concatenate([init_latents] * batch_size, axis=0) init_latents_orig = init_latents shape = init_latents.shape # add noise to latents using the timesteps noise = torch.randn(shape, generator=generator, device="cpu").numpy().astype(dtype) latents = self.scheduler.add_noise( torch.from_numpy(init_latents), torch.from_numpy(noise), timestep ).numpy() return latents, init_latents_orig, noise @torch.no_grad() def __call__( self, prompt: Union[str, List[str]], negative_prompt: Optional[Union[str, List[str]]] = None, image: Union[np.ndarray, PIL.Image.Image] = None, mask_image: Union[np.ndarray, PIL.Image.Image] = None, height: int = 512, width: int = 512, num_inference_steps: int = 50, guidance_scale: float = 7.5, strength: float = 0.8, num_images_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[torch.Generator] = None, latents: Optional[np.ndarray] = None, max_embeddings_multiples: Optional[int] = 3, output_type: Optional[str] = "pil", return_dict: bool = True, callback: Optional[Callable[[int, int, np.ndarray], None]] = None, is_cancelled_callback: Optional[Callable[[], bool]] = None, callback_steps: int = 1, **kwargs, ): r""" Function invoked when calling the pipeline for generation. Args: prompt (`str` or `List[str]`): The prompt or prompts to guide the image generation. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). image (`np.ndarray` or `PIL.Image.Image`): `Image`, or tensor representing an image batch, that will be used as the starting point for the process. mask_image (`np.ndarray` or `PIL.Image.Image`): `Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be replaced by noise and therefore repainted, while black pixels will be preserved. If `mask_image` is a PIL image, it will be converted to a single channel (luminance) before use. If it's a tensor, it should contain one color channel (L) instead of 3, so the expected shape would be `(B, H, W, 1)`. height (`int`, *optional*, defaults to 512): The height in pixels of the generated image. width (`int`, *optional*, defaults to 512): The width in pixels of the generated image. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. guidance_scale (`float`, *optional*, defaults to 7.5): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. strength (`float`, *optional*, defaults to 0.8): Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1. `image` will be used as a starting point, adding more noise to it the larger the `strength`. The number of denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise will be maximum and the denoising process will run for the full number of iterations specified in `num_inference_steps`. A value of 1, therefore, essentially ignores `image`. num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to [`schedulers.DDIMScheduler`], will be ignored for others. generator (`torch.Generator`, *optional*): A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`np.ndarray`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will ge generated by sampling using the supplied random `generator`. max_embeddings_multiples (`int`, *optional*, defaults to `3`): The max multiple length of prompt embeddings compared to the max output length of text encoder. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a plain tuple. callback (`Callable`, *optional*): A function that will be called every `callback_steps` steps during inference. The function will be called with the following arguments: `callback(step: int, timestep: int, latents: np.ndarray)`. is_cancelled_callback (`Callable`, *optional*): A function that will be called every `callback_steps` steps during inference. If the function returns `True`, the inference will be cancelled. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function will be called. If not specified, the callback will be called at every step. Returns: `None` if cancelled by `is_cancelled_callback`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple. When returning a tuple, the first element is a list with the generated images, and the second element is a list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work" (nsfw) content, according to the `safety_checker`. """ # 0. Default height and width to unet height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor # 1. Check inputs. Raise error if not correct self.check_inputs(prompt, height, width, strength, callback_steps) # 2. Define call parameters batch_size = 1 if isinstance(prompt, str) else len(prompt) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 # 3. Encode input prompt text_embeddings = self._encode_prompt( prompt, num_images_per_prompt, do_classifier_free_guidance, negative_prompt, max_embeddings_multiples, ) dtype = text_embeddings.dtype # 4. Preprocess image and mask if isinstance(image, PIL.Image.Image): image = preprocess_image(image) if image is not None: image = image.astype(dtype) if isinstance(mask_image, PIL.Image.Image): mask_image = preprocess_mask(mask_image, self.vae_scale_factor) if mask_image is not None: mask = mask_image.astype(dtype) mask = np.concatenate([mask] * batch_size * num_images_per_prompt) else: mask = None # 5. set timesteps self.scheduler.set_timesteps(num_inference_steps) timestep_dtype = next( (input.type for input in self.unet.model.get_inputs() if input.name == "timestep"), "tensor(float)" ) timestep_dtype = ORT_TO_NP_TYPE[timestep_dtype] timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, image is None) latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) # 6. Prepare latent variables latents, init_latents_orig, noise = self.prepare_latents( image, latent_timestep, batch_size * num_images_per_prompt, height, width, dtype, generator, latents, ) # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) # 8. Denoising loop for i, t in enumerate(self.progress_bar(timesteps)): # expand the latents if we are doing classifier free guidance latent_model_input = np.concatenate([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(torch.from_numpy(latent_model_input), t) latent_model_input = latent_model_input.numpy() # predict the noise residual noise_pred = self.unet( sample=latent_model_input, timestep=np.array([t], dtype=timestep_dtype), encoder_hidden_states=text_embeddings, ) noise_pred = noise_pred[0] # perform guidance if do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = np.split(noise_pred, 2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 scheduler_output = self.scheduler.step( torch.from_numpy(noise_pred), t, torch.from_numpy(latents), **extra_step_kwargs ) latents = scheduler_output.prev_sample.numpy() if mask is not None: # masking init_latents_proper = self.scheduler.add_noise( torch.from_numpy(init_latents_orig), torch.from_numpy(noise), t, ).numpy() latents = (init_latents_proper * mask) + (latents * (1 - mask)) # call the callback, if provided if i % callback_steps == 0: if callback is not None: step_idx = i // getattr(self.scheduler, "order", 1) callback(step_idx, t, latents) if is_cancelled_callback is not None and is_cancelled_callback(): return None # 9. Post-processing image = self.decode_latents(latents) # 10. Run safety checker image, has_nsfw_concept = self.run_safety_checker(image) # 11. Convert to PIL if output_type == "pil": image = self.numpy_to_pil(image) if not return_dict: return image, has_nsfw_concept return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) def text2img( self, prompt: Union[str, List[str]], negative_prompt: Optional[Union[str, List[str]]] = None, height: int = 512, width: int = 512, num_inference_steps: int = 50, guidance_scale: float = 7.5, num_images_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[torch.Generator] = None, latents: Optional[np.ndarray] = None, max_embeddings_multiples: Optional[int] = 3, output_type: Optional[str] = "pil", return_dict: bool = True, callback: Optional[Callable[[int, int, np.ndarray], None]] = None, callback_steps: int = 1, **kwargs, ): r""" Function for text-to-image generation. Args: prompt (`str` or `List[str]`): The prompt or prompts to guide the image generation. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). height (`int`, *optional*, defaults to 512): The height in pixels of the generated image. width (`int`, *optional*, defaults to 512): The width in pixels of the generated image. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. guidance_scale (`float`, *optional*, defaults to 7.5): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to [`schedulers.DDIMScheduler`], will be ignored for others. generator (`torch.Generator`, *optional*): A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`np.ndarray`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will ge generated by sampling using the supplied random `generator`. max_embeddings_multiples (`int`, *optional*, defaults to `3`): The max multiple length of prompt embeddings compared to the max output length of text encoder. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a plain tuple. callback (`Callable`, *optional*): A function that will be called every `callback_steps` steps during inference. The function will be called with the following arguments: `callback(step: int, timestep: int, latents: np.ndarray)`. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function will be called. If not specified, the callback will be called at every step. Returns: [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple. When returning a tuple, the first element is a list with the generated images, and the second element is a list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work" (nsfw) content, according to the `safety_checker`. """ return self.__call__( prompt=prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=num_inference_steps, guidance_scale=guidance_scale, num_images_per_prompt=num_images_per_prompt, eta=eta, generator=generator, latents=latents, max_embeddings_multiples=max_embeddings_multiples, output_type=output_type, return_dict=return_dict, callback=callback, callback_steps=callback_steps, **kwargs, ) def img2img( self, image: Union[np.ndarray, PIL.Image.Image], prompt: Union[str, List[str]], negative_prompt: Optional[Union[str, List[str]]] = None, strength: float = 0.8, num_inference_steps: Optional[int] = 50, guidance_scale: Optional[float] = 7.5, num_images_per_prompt: Optional[int] = 1, eta: Optional[float] = 0.0, generator: Optional[torch.Generator] = None, max_embeddings_multiples: Optional[int] = 3, output_type: Optional[str] = "pil", return_dict: bool = True, callback: Optional[Callable[[int, int, np.ndarray], None]] = None, callback_steps: int = 1, **kwargs, ): r""" Function for image-to-image generation. Args: image (`np.ndarray` or `PIL.Image.Image`): `Image`, or ndarray representing an image batch, that will be used as the starting point for the process. prompt (`str` or `List[str]`): The prompt or prompts to guide the image generation. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). strength (`float`, *optional*, defaults to 0.8): Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1. `image` will be used as a starting point, adding more noise to it the larger the `strength`. The number of denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise will be maximum and the denoising process will run for the full number of iterations specified in `num_inference_steps`. A value of 1, therefore, essentially ignores `image`. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. This parameter will be modulated by `strength`. guidance_scale (`float`, *optional*, defaults to 7.5): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to [`schedulers.DDIMScheduler`], will be ignored for others. generator (`torch.Generator`, *optional*): A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. max_embeddings_multiples (`int`, *optional*, defaults to `3`): The max multiple length of prompt embeddings compared to the max output length of text encoder. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a plain tuple. callback (`Callable`, *optional*): A function that will be called every `callback_steps` steps during inference. The function will be called with the following arguments: `callback(step: int, timestep: int, latents: np.ndarray)`. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function will be called. If not specified, the callback will be called at every step. Returns: [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple. When returning a tuple, the first element is a list with the generated images, and the second element is a list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work" (nsfw) content, according to the `safety_checker`. """ return self.__call__( prompt=prompt, negative_prompt=negative_prompt, image=image, num_inference_steps=num_inference_steps, guidance_scale=guidance_scale, strength=strength, num_images_per_prompt=num_images_per_prompt, eta=eta, generator=generator, max_embeddings_multiples=max_embeddings_multiples, output_type=output_type, return_dict=return_dict, callback=callback, callback_steps=callback_steps, **kwargs, ) def inpaint( self, image: Union[np.ndarray, PIL.Image.Image], mask_image: Union[np.ndarray, PIL.Image.Image], prompt: Union[str, List[str]], negative_prompt: Optional[Union[str, List[str]]] = None, strength: float = 0.8, num_inference_steps: Optional[int] = 50, guidance_scale: Optional[float] = 7.5, num_images_per_prompt: Optional[int] = 1, eta: Optional[float] = 0.0, generator: Optional[torch.Generator] = None, max_embeddings_multiples: Optional[int] = 3, output_type: Optional[str] = "pil", return_dict: bool = True, callback: Optional[Callable[[int, int, np.ndarray], None]] = None, callback_steps: int = 1, **kwargs, ): r""" Function for inpaint. Args: image (`np.ndarray` or `PIL.Image.Image`): `Image`, or tensor representing an image batch, that will be used as the starting point for the process. This is the image whose masked region will be inpainted. mask_image (`np.ndarray` or `PIL.Image.Image`): `Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be replaced by noise and therefore repainted, while black pixels will be preserved. If `mask_image` is a PIL image, it will be converted to a single channel (luminance) before use. If it's a tensor, it should contain one color channel (L) instead of 3, so the expected shape would be `(B, H, W, 1)`. prompt (`str` or `List[str]`): The prompt or prompts to guide the image generation. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). strength (`float`, *optional*, defaults to 0.8): Conceptually, indicates how much to inpaint the masked area. Must be between 0 and 1. When `strength` is 1, the denoising process will be run on the masked area for the full number of iterations specified in `num_inference_steps`. `image` will be used as a reference for the masked area, adding more noise to that region the larger the `strength`. If `strength` is 0, no inpainting will occur. num_inference_steps (`int`, *optional*, defaults to 50): The reference number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. This parameter will be modulated by `strength`, as explained above. guidance_scale (`float`, *optional*, defaults to 7.5): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to [`schedulers.DDIMScheduler`], will be ignored for others. generator (`torch.Generator`, *optional*): A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. max_embeddings_multiples (`int`, *optional*, defaults to `3`): The max multiple length of prompt embeddings compared to the max output length of text encoder. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a plain tuple. callback (`Callable`, *optional*): A function that will be called every `callback_steps` steps during inference. The function will be called with the following arguments: `callback(step: int, timestep: int, latents: np.ndarray)`. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function will be called. If not specified, the callback will be called at every step. Returns: [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple. When returning a tuple, the first element is a list with the generated images, and the second element is a list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work" (nsfw) content, according to the `safety_checker`. """ return self.__call__( prompt=prompt, negative_prompt=negative_prompt, image=image, mask_image=mask_image, num_inference_steps=num_inference_steps, guidance_scale=guidance_scale, strength=strength, num_images_per_prompt=num_images_per_prompt, eta=eta, generator=generator, max_embeddings_multiples=max_embeddings_multiples, output_type=output_type, return_dict=return_dict, callback=callback, callback_steps=callback_steps, **kwargs, )
diffusers/examples/community/lpw_stable_diffusion_onnx.py/0
{ "file_path": "diffusers/examples/community/lpw_stable_diffusion_onnx.py", "repo_id": "diffusers", "token_count": 24240 }
105
# Copyright 2024 The Intel Labs Team Authors and the HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect from typing import Any, Callable, Dict, List, Optional, Union import numpy as np import PIL import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import DiffusionPipeline from diffusers.image_processor import PipelineDepthInput, PipelineImageInput, VaeImageProcessorLDM3D from diffusers.loaders import FromSingleFileMixin, LoraLoaderMixin, TextualInversionLoaderMixin from diffusers.models import AutoencoderKL, UNet2DConditionModel from diffusers.models.lora import adjust_lora_scale_text_encoder from diffusers.pipelines.stable_diffusion import StableDiffusionSafetyChecker from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_ldm3d import LDM3DPipelineOutput from diffusers.schedulers import DDPMScheduler, KarrasDiffusionSchedulers from diffusers.utils import ( USE_PEFT_BACKEND, deprecate, logging, scale_lora_layers, unscale_lora_layers, ) from diffusers.utils.torch_utils import randn_tensor logger = logging.get_logger(__name__) # pylint: disable=invalid-name EXAMPLE_DOC_STRING = """ Examples: ```python >>> from diffusers import StableDiffusionUpscaleLDM3DPipeline >>> from PIL import Image >>> from io import BytesIO >>> import requests >>> pipe = StableDiffusionUpscaleLDM3DPipeline.from_pretrained("Intel/ldm3d-sr") >>> pipe = pipe.to("cuda") >>> rgb_path = "https://huggingface.co/Intel/ldm3d-sr/resolve/main/lemons_ldm3d_rgb.jpg" >>> depth_path = "https://huggingface.co/Intel/ldm3d-sr/resolve/main/lemons_ldm3d_depth.png" >>> low_res_rgb = Image.open(BytesIO(requests.get(rgb_path).content)).convert("RGB") >>> low_res_depth = Image.open(BytesIO(requests.get(depth_path).content)).convert("L") >>> output = pipe( ... prompt="high quality high resolution uhd 4k image", ... rgb=low_res_rgb, ... depth=low_res_depth, ... num_inference_steps=50, ... target_res=[1024, 1024], ... ) >>> rgb_image, depth_image = output.rgb, output.depth >>> rgb_image[0].save("hr_ldm3d_rgb.jpg") >>> depth_image[0].save("hr_ldm3d_depth.png") ``` """ class StableDiffusionUpscaleLDM3DPipeline( DiffusionPipeline, TextualInversionLoaderMixin, LoraLoaderMixin, FromSingleFileMixin ): r""" Pipeline for text-to-image and 3D generation using LDM3D. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.). The pipeline also inherits the following loading methods: - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings - [`~loaders.LoraLoaderMixin.load_lora_weights`] for loading LoRA weights - [`~loaders.LoraLoaderMixin.save_lora_weights`] for saving LoRA weights - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files Args: vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. text_encoder ([`~transformers.CLIPTextModel`]): Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). tokenizer ([`~transformers.CLIPTokenizer`]): A `CLIPTokenizer` to tokenize text. unet ([`UNet2DConditionModel`]): A `UNet2DConditionModel` to denoise the encoded image latents. low_res_scheduler ([`SchedulerMixin`]): A scheduler used to add initial noise to the low resolution conditioning image. It must be an instance of [`DDPMScheduler`]. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. safety_checker ([`StableDiffusionSafetyChecker`]): Classification module that estimates whether generated images could be considered offensive or harmful. Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details about a model's potential harms. feature_extractor ([`~transformers.CLIPImageProcessor`]): A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. """ _optional_components = ["safety_checker", "feature_extractor"] def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet2DConditionModel, low_res_scheduler: DDPMScheduler, scheduler: KarrasDiffusionSchedulers, safety_checker: StableDiffusionSafetyChecker, feature_extractor: CLIPImageProcessor, requires_safety_checker: bool = True, watermarker: Optional[Any] = None, max_noise_level: int = 350, ): super().__init__() if safety_checker is None and requires_safety_checker: logger.warning( f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" " results in services or applications open to the public. Both the diffusers team and Hugging Face" " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" " it only for use-cases that involve analyzing network behavior or auditing its results. For more" " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." ) if safety_checker is not None and feature_extractor is None: raise ValueError( "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." ) self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, low_res_scheduler=low_res_scheduler, scheduler=scheduler, safety_checker=safety_checker, watermarker=watermarker, feature_extractor=feature_extractor, ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.image_processor = VaeImageProcessorLDM3D(vae_scale_factor=self.vae_scale_factor, resample="bilinear") # self.register_to_config(requires_safety_checker=requires_safety_checker) self.register_to_config(max_noise_level=max_noise_level) # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_ldm3d.StableDiffusionLDM3DPipeline._encode_prompt def _encode_prompt( self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.FloatTensor] = None, negative_prompt_embeds: Optional[torch.FloatTensor] = None, lora_scale: Optional[float] = None, **kwargs, ): deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple." deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False) prompt_embeds_tuple = self.encode_prompt( prompt=prompt, device=device, num_images_per_prompt=num_images_per_prompt, do_classifier_free_guidance=do_classifier_free_guidance, negative_prompt=negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, lora_scale=lora_scale, **kwargs, ) # concatenate for backwards comp prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) return prompt_embeds # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_ldm3d.StableDiffusionLDM3DPipeline.encode_prompt def encode_prompt( self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.FloatTensor] = None, negative_prompt_embeds: Optional[torch.FloatTensor] = None, lora_scale: Optional[float] = None, clip_skip: Optional[int] = None, ): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `List[str]`, *optional*): prompt to be encoded device: (`torch.device`): torch device num_images_per_prompt (`int`): number of images that should be generated per prompt do_classifier_free_guidance (`bool`): whether to use classifier free guidance or not negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. lora_scale (`float`, *optional*): A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. clip_skip (`int`, *optional*): Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that the output of the pre-final layer will be used for computing the prompt embeddings. """ # set lora scale so that monkey patched LoRA # function of text encoder can correctly access it if lora_scale is not None and isinstance(self, LoraLoaderMixin): self._lora_scale = lora_scale # dynamically adjust the LoRA scale if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) else: scale_lora_layers(self.text_encoder, lora_scale) if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: # textual inversion: process multi-vector tokens if necessary if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, self.tokenizer) text_inputs = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( text_input_ids, untruncated_ids ): removed_text = self.tokenizer.batch_decode( untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] ) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None if clip_skip is None: prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) prompt_embeds = prompt_embeds[0] else: prompt_embeds = self.text_encoder( text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True ) # Access the `hidden_states` first, that contains a tuple of # all the hidden states from the encoder layers. Then index into # the tuple to access the hidden states from the desired layer. prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] # We also need to apply the final LayerNorm here to not mess with the # representations. The `last_hidden_states` that we typically use for # obtaining the final prompt representations passes through the LayerNorm # layer. prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) if self.text_encoder is not None: prompt_embeds_dtype = self.text_encoder.dtype elif self.unet is not None: prompt_embeds_dtype = self.unet.dtype else: prompt_embeds_dtype = prompt_embeds.dtype prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) bs_embed, seq_len, _ = prompt_embeds.shape # duplicate text embeddings for each generation per prompt, using mps friendly method prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [""] * batch_size elif prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = negative_prompt # textual inversion: process multi-vector tokens if necessary if isinstance(self, TextualInversionLoaderMixin): uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) max_length = prompt_embeds.shape[1] uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None negative_prompt_embeds = self.text_encoder( uncond_input.input_ids.to(device), attention_mask=attention_mask, ) negative_prompt_embeds = negative_prompt_embeds[0] if do_classifier_free_guidance: # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND: # Retrieve the original scale by scaling back the LoRA layers unscale_lora_layers(self.text_encoder, lora_scale) return prompt_embeds, negative_prompt_embeds def run_safety_checker(self, image, device, dtype): if self.safety_checker is None: has_nsfw_concept = None else: if torch.is_tensor(image): feature_extractor_input = self.image_processor.postprocess(image, output_type="pil") else: feature_extractor_input = self.image_processor.numpy_to_pil(image) rgb_feature_extractor_input = feature_extractor_input[0] safety_checker_input = self.feature_extractor(rgb_feature_extractor_input, return_tensors="pt").to(device) image, has_nsfw_concept = self.safety_checker( images=image, clip_input=safety_checker_input.pixel_values.to(dtype) ) return image, has_nsfw_concept # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs def check_inputs( self, prompt, image, noise_level, callback_steps, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, target_res=None, ): if (callback_steps is None) or ( callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) ): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError( "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" f" {negative_prompt_embeds.shape}." ) if ( not isinstance(image, torch.Tensor) and not isinstance(image, PIL.Image.Image) and not isinstance(image, np.ndarray) and not isinstance(image, list) ): raise ValueError( f"`image` has to be of type `torch.Tensor`, `np.ndarray`, `PIL.Image.Image` or `list` but is {type(image)}" ) # verify batch size of prompt and image are same if image is a list or tensor or numpy array if isinstance(image, list) or isinstance(image, torch.Tensor) or isinstance(image, np.ndarray): if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if isinstance(image, list): image_batch_size = len(image) else: image_batch_size = image.shape[0] if batch_size != image_batch_size: raise ValueError( f"`prompt` has batch size {batch_size} and `image` has batch size {image_batch_size}." " Please make sure that passed `prompt` matches the batch size of `image`." ) # check noise level if noise_level > self.config.max_noise_level: raise ValueError(f"`noise_level` has to be <= {self.config.max_noise_level} but is {noise_level}") if (callback_steps is None) or ( callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) ): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): shape = (batch_size, num_channels_latents, height, width) if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: if latents.shape != shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") latents = latents.to(device) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma return latents # def upcast_vae(self): # dtype = self.vae.dtype # self.vae.to(dtype=torch.float32) # use_torch_2_0_or_xformers = isinstance( # self.vae.decoder.mid_block.attentions[0].processor, # ( # AttnProcessor2_0, # XFormersAttnProcessor, # LoRAXFormersAttnProcessor, # LoRAAttnProcessor2_0, # ), # ) # # if xformers or torch_2_0 is used attention block does not need # # to be in float32 which can save lots of memory # if use_torch_2_0_or_xformers: # self.vae.post_quant_conv.to(dtype) # self.vae.decoder.conv_in.to(dtype) # self.vae.decoder.mid_block.to(dtype) @torch.no_grad() def __call__( self, prompt: Union[str, List[str]] = None, rgb: PipelineImageInput = None, depth: PipelineDepthInput = None, num_inference_steps: int = 75, guidance_scale: float = 9.0, noise_level: int = 20, negative_prompt: Optional[Union[str, List[str]]] = None, num_images_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.FloatTensor] = None, prompt_embeds: Optional[torch.FloatTensor] = None, negative_prompt_embeds: Optional[torch.FloatTensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, callback_steps: int = 1, cross_attention_kwargs: Optional[Dict[str, Any]] = None, target_res: Optional[List[int]] = [1024, 1024], ): r""" The call function to the pipeline for generation. Args: prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. image (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`): `Image` or tensor representing an image batch to be upscaled. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. guidance_scale (`float`, *optional*, defaults to 5.0): A higher guidance scale value encourages the model to generate images closely linked to the text `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide what to not include in image generation. If not defined, you need to pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor is generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not provided, text embeddings are generated from the `prompt` input argument. negative_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generated image. Choose between `PIL.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a plain tuple. callback (`Callable`, *optional*): A function that calls every `callback_steps` steps during inference. The function is called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function is called. If not specified, the callback is called at every step. cross_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). Examples: Returns: [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated images and the second element is a list of `bool`s indicating whether the corresponding generated image contains "not-safe-for-work" (nsfw) content. """ # 1. Check inputs. Raise error if not correct self.check_inputs( prompt, rgb, noise_level, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds, ) # 2. Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 # 3. Encode input prompt prompt_embeds, negative_prompt_embeds = self.encode_prompt( prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes if do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) # 4. Preprocess image rgb, depth = self.image_processor.preprocess(rgb, depth, target_res=target_res) rgb = rgb.to(dtype=prompt_embeds.dtype, device=device) depth = depth.to(dtype=prompt_embeds.dtype, device=device) # 5. set timesteps self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps # 6. Encode low resolutiom image to latent space image = torch.cat([rgb, depth], axis=1) latent_space_image = self.vae.encode(image).latent_dist.sample(generator) latent_space_image *= self.vae.scaling_factor noise_level = torch.tensor([noise_level], dtype=torch.long, device=device) # noise_rgb = randn_tensor(rgb.shape, generator=generator, device=device, dtype=prompt_embeds.dtype) # rgb = self.low_res_scheduler.add_noise(rgb, noise_rgb, noise_level) # noise_depth = randn_tensor(depth.shape, generator=generator, device=device, dtype=prompt_embeds.dtype) # depth = self.low_res_scheduler.add_noise(depth, noise_depth, noise_level) batch_multiplier = 2 if do_classifier_free_guidance else 1 latent_space_image = torch.cat([latent_space_image] * batch_multiplier * num_images_per_prompt) noise_level = torch.cat([noise_level] * latent_space_image.shape[0]) # 7. Prepare latent variables height, width = latent_space_image.shape[2:] num_channels_latents = self.vae.config.latent_channels latents = self.prepare_latents( batch_size * num_images_per_prompt, num_channels_latents, height, width, prompt_embeds.dtype, device, generator, latents, ) # 8. Check that sizes of image and latents match num_channels_image = latent_space_image.shape[1] if num_channels_latents + num_channels_image != self.unet.config.in_channels: raise ValueError( f"Incorrect configuration settings! The config of `pipeline.unet`: {self.unet.config} expects" f" {self.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} +" f" `num_channels_image`: {num_channels_image} " f" = {num_channels_latents+num_channels_image}. Please verify the config of" " `pipeline.unet` or your `image` input." ) # 9. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) # 10. Denoising loop num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents # concat latents, mask, masked_image_latents in the channel dimension latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) latent_model_input = torch.cat([latent_model_input, latent_space_image], dim=1) # predict the noise residual noise_pred = self.unet( latent_model_input, t, encoder_hidden_states=prompt_embeds, cross_attention_kwargs=cross_attention_kwargs, class_labels=noise_level, return_dict=False, )[0] # perform guidance if do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: callback(i, t, latents) if not output_type == "latent": # make sure the VAE is in float32 mode, as it overflows in float16 needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast if needs_upcasting: self.upcast_vae() latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) image = self.vae.decode(latents / self.vae.scaling_factor, return_dict=False)[0] # cast back to fp16 if needed if needs_upcasting: self.vae.to(dtype=torch.float16) image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) else: image = latents has_nsfw_concept = None if has_nsfw_concept is None: do_denormalize = [True] * image.shape[0] else: do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] rgb, depth = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) # 11. Apply watermark if output_type == "pil" and self.watermarker is not None: rgb = self.watermarker.apply_watermark(rgb) # Offload last model to CPU if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: self.final_offload_hook.offload() if not return_dict: return ((rgb, depth), has_nsfw_concept) return LDM3DPipelineOutput(rgb=rgb, depth=depth, nsfw_content_detected=has_nsfw_concept)
diffusers/examples/community/pipeline_stable_diffusion_upscale_ldm3d.py/0
{ "file_path": "diffusers/examples/community/pipeline_stable_diffusion_upscale_ldm3d.py", "repo_id": "diffusers", "token_count": 17030 }
106
# Inspired by: https://github.com/haofanwang/ControlNet-for-Diffusers/ import inspect from typing import Any, Callable, Dict, List, Optional, Tuple, Union import numpy as np import PIL.Image import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, ControlNetModel, UNet2DConditionModel, logging from diffusers.pipelines.controlnet.multicontrolnet import MultiControlNetModel from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput, StableDiffusionSafetyChecker from diffusers.schedulers import KarrasDiffusionSchedulers from diffusers.utils import ( PIL_INTERPOLATION, replace_example_docstring, ) from diffusers.utils.torch_utils import randn_tensor logger = logging.get_logger(__name__) # pylint: disable=invalid-name EXAMPLE_DOC_STRING = """ Examples: ```py >>> import numpy as np >>> import torch >>> from PIL import Image >>> from diffusers import ControlNetModel, UniPCMultistepScheduler >>> from diffusers.utils import load_image >>> input_image = load_image("https://hf.co/datasets/huggingface/documentation-images/resolve/main/diffusers/input_image_vermeer.png") >>> controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny", torch_dtype=torch.float16) >>> pipe_controlnet = StableDiffusionControlNetImg2ImgPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", controlnet=controlnet, safety_checker=None, torch_dtype=torch.float16 ) >>> pipe_controlnet.scheduler = UniPCMultistepScheduler.from_config(pipe_controlnet.scheduler.config) >>> pipe_controlnet.enable_xformers_memory_efficient_attention() >>> pipe_controlnet.enable_model_cpu_offload() # using image with edges for our canny controlnet >>> control_image = load_image( "https://hf.co/datasets/huggingface/documentation-images/resolve/main/diffusers/vermeer_canny_edged.png") >>> result_img = pipe_controlnet(controlnet_conditioning_image=control_image, image=input_image, prompt="an android robot, cyberpank, digitl art masterpiece", num_inference_steps=20).images[0] >>> result_img.show() ``` """ def prepare_image(image): if isinstance(image, torch.Tensor): # Batch single image if image.ndim == 3: image = image.unsqueeze(0) image = image.to(dtype=torch.float32) else: # preprocess image if isinstance(image, (PIL.Image.Image, np.ndarray)): image = [image] if isinstance(image, list) and isinstance(image[0], PIL.Image.Image): image = [np.array(i.convert("RGB"))[None, :] for i in image] image = np.concatenate(image, axis=0) elif isinstance(image, list) and isinstance(image[0], np.ndarray): image = np.concatenate([i[None, :] for i in image], axis=0) image = image.transpose(0, 3, 1, 2) image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0 return image def prepare_controlnet_conditioning_image( controlnet_conditioning_image, width, height, batch_size, num_images_per_prompt, device, dtype, do_classifier_free_guidance, ): if not isinstance(controlnet_conditioning_image, torch.Tensor): if isinstance(controlnet_conditioning_image, PIL.Image.Image): controlnet_conditioning_image = [controlnet_conditioning_image] if isinstance(controlnet_conditioning_image[0], PIL.Image.Image): controlnet_conditioning_image = [ np.array(i.resize((width, height), resample=PIL_INTERPOLATION["lanczos"]))[None, :] for i in controlnet_conditioning_image ] controlnet_conditioning_image = np.concatenate(controlnet_conditioning_image, axis=0) controlnet_conditioning_image = np.array(controlnet_conditioning_image).astype(np.float32) / 255.0 controlnet_conditioning_image = controlnet_conditioning_image.transpose(0, 3, 1, 2) controlnet_conditioning_image = torch.from_numpy(controlnet_conditioning_image) elif isinstance(controlnet_conditioning_image[0], torch.Tensor): controlnet_conditioning_image = torch.cat(controlnet_conditioning_image, dim=0) image_batch_size = controlnet_conditioning_image.shape[0] if image_batch_size == 1: repeat_by = batch_size else: # image batch size is the same as prompt batch size repeat_by = num_images_per_prompt controlnet_conditioning_image = controlnet_conditioning_image.repeat_interleave(repeat_by, dim=0) controlnet_conditioning_image = controlnet_conditioning_image.to(device=device, dtype=dtype) if do_classifier_free_guidance: controlnet_conditioning_image = torch.cat([controlnet_conditioning_image] * 2) return controlnet_conditioning_image class StableDiffusionControlNetImg2ImgPipeline(DiffusionPipeline, StableDiffusionMixin): """ Inspired by: https://github.com/haofanwang/ControlNet-for-Diffusers/ """ _optional_components = ["safety_checker", "feature_extractor"] def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet2DConditionModel, controlnet: Union[ControlNetModel, List[ControlNetModel], Tuple[ControlNetModel], MultiControlNetModel], scheduler: KarrasDiffusionSchedulers, safety_checker: StableDiffusionSafetyChecker, feature_extractor: CLIPImageProcessor, requires_safety_checker: bool = True, ): super().__init__() if safety_checker is None and requires_safety_checker: logger.warning( f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" " results in services or applications open to the public. Both the diffusers team and Hugging Face" " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" " it only for use-cases that involve analyzing network behavior or auditing its results. For more" " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." ) if safety_checker is not None and feature_extractor is None: raise ValueError( "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." ) if isinstance(controlnet, (list, tuple)): controlnet = MultiControlNetModel(controlnet) self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, controlnet=controlnet, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor, ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.register_to_config(requires_safety_checker=requires_safety_checker) def _encode_prompt( self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.FloatTensor] = None, negative_prompt_embeds: Optional[torch.FloatTensor] = None, ): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `List[str]`, *optional*): prompt to be encoded device: (`torch.device`): torch device num_images_per_prompt (`int`): number of images that should be generated per prompt do_classifier_free_guidance (`bool`): whether to use classifier free guidance or not negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. """ if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: text_inputs = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( text_input_ids, untruncated_ids ): removed_text = self.tokenizer.batch_decode( untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] ) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None prompt_embeds = self.text_encoder( text_input_ids.to(device), attention_mask=attention_mask, ) prompt_embeds = prompt_embeds[0] prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device) bs_embed, seq_len, _ = prompt_embeds.shape # duplicate text embeddings for each generation per prompt, using mps friendly method prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [""] * batch_size elif type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = negative_prompt max_length = prompt_embeds.shape[1] uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None negative_prompt_embeds = self.text_encoder( uncond_input.input_ids.to(device), attention_mask=attention_mask, ) negative_prompt_embeds = negative_prompt_embeds[0] if do_classifier_free_guidance: # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder.dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) return prompt_embeds def run_safety_checker(self, image, device, dtype): if self.safety_checker is not None: safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(device) image, has_nsfw_concept = self.safety_checker( images=image, clip_input=safety_checker_input.pixel_values.to(dtype) ) else: has_nsfw_concept = None return image, has_nsfw_concept def decode_latents(self, latents): latents = 1 / self.vae.config.scaling_factor * latents image = self.vae.decode(latents).sample image = (image / 2 + 0.5).clamp(0, 1) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 image = image.cpu().permute(0, 2, 3, 1).float().numpy() return image def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs def check_controlnet_conditioning_image(self, image, prompt, prompt_embeds): image_is_pil = isinstance(image, PIL.Image.Image) image_is_tensor = isinstance(image, torch.Tensor) image_is_pil_list = isinstance(image, list) and isinstance(image[0], PIL.Image.Image) image_is_tensor_list = isinstance(image, list) and isinstance(image[0], torch.Tensor) if not image_is_pil and not image_is_tensor and not image_is_pil_list and not image_is_tensor_list: raise TypeError( "image must be passed and be one of PIL image, torch tensor, list of PIL images, or list of torch tensors" ) if image_is_pil: image_batch_size = 1 elif image_is_tensor: image_batch_size = image.shape[0] elif image_is_pil_list: image_batch_size = len(image) elif image_is_tensor_list: image_batch_size = len(image) else: raise ValueError("controlnet condition image is not valid") if prompt is not None and isinstance(prompt, str): prompt_batch_size = 1 elif prompt is not None and isinstance(prompt, list): prompt_batch_size = len(prompt) elif prompt_embeds is not None: prompt_batch_size = prompt_embeds.shape[0] else: raise ValueError("prompt or prompt_embeds are not valid") if image_batch_size != 1 and image_batch_size != prompt_batch_size: raise ValueError( f"If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}" ) def check_inputs( self, prompt, image, controlnet_conditioning_image, height, width, callback_steps, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, strength=None, controlnet_guidance_start=None, controlnet_guidance_end=None, controlnet_conditioning_scale=None, ): if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") if (callback_steps is None) or ( callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) ): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError( "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" f" {negative_prompt_embeds.shape}." ) # check controlnet condition image if isinstance(self.controlnet, ControlNetModel): self.check_controlnet_conditioning_image(controlnet_conditioning_image, prompt, prompt_embeds) elif isinstance(self.controlnet, MultiControlNetModel): if not isinstance(controlnet_conditioning_image, list): raise TypeError("For multiple controlnets: `image` must be type `list`") if len(controlnet_conditioning_image) != len(self.controlnet.nets): raise ValueError( "For multiple controlnets: `image` must have the same length as the number of controlnets." ) for image_ in controlnet_conditioning_image: self.check_controlnet_conditioning_image(image_, prompt, prompt_embeds) else: assert False # Check `controlnet_conditioning_scale` if isinstance(self.controlnet, ControlNetModel): if not isinstance(controlnet_conditioning_scale, float): raise TypeError("For single controlnet: `controlnet_conditioning_scale` must be type `float`.") elif isinstance(self.controlnet, MultiControlNetModel): if isinstance(controlnet_conditioning_scale, list) and len(controlnet_conditioning_scale) != len( self.controlnet.nets ): raise ValueError( "For multiple controlnets: When `controlnet_conditioning_scale` is specified as `list`, it must have" " the same length as the number of controlnets" ) else: assert False if isinstance(image, torch.Tensor): if image.ndim != 3 and image.ndim != 4: raise ValueError("`image` must have 3 or 4 dimensions") if image.ndim == 3: image_batch_size = 1 image_channels, image_height, image_width = image.shape elif image.ndim == 4: image_batch_size, image_channels, image_height, image_width = image.shape else: assert False if image_channels != 3: raise ValueError("`image` must have 3 channels") if image.min() < -1 or image.max() > 1: raise ValueError("`image` should be in range [-1, 1]") if self.vae.config.latent_channels != self.unet.config.in_channels: raise ValueError( f"The config of `pipeline.unet` expects {self.unet.config.in_channels} but received" f" latent channels: {self.vae.config.latent_channels}," f" Please verify the config of `pipeline.unet` and the `pipeline.vae`" ) if strength < 0 or strength > 1: raise ValueError(f"The value of `strength` should in [0.0, 1.0] but is {strength}") if controlnet_guidance_start < 0 or controlnet_guidance_start > 1: raise ValueError( f"The value of `controlnet_guidance_start` should in [0.0, 1.0] but is {controlnet_guidance_start}" ) if controlnet_guidance_end < 0 or controlnet_guidance_end > 1: raise ValueError( f"The value of `controlnet_guidance_end` should in [0.0, 1.0] but is {controlnet_guidance_end}" ) if controlnet_guidance_start > controlnet_guidance_end: raise ValueError( "The value of `controlnet_guidance_start` should be less than `controlnet_guidance_end`, but got" f" `controlnet_guidance_start` {controlnet_guidance_start} >= `controlnet_guidance_end` {controlnet_guidance_end}" ) def get_timesteps(self, num_inference_steps, strength, device): # get the original timestep using init_timestep init_timestep = min(int(num_inference_steps * strength), num_inference_steps) t_start = max(num_inference_steps - init_timestep, 0) timesteps = self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def prepare_latents(self, image, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None): if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)): raise ValueError( f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}" ) image = image.to(device=device, dtype=dtype) batch_size = batch_size * num_images_per_prompt if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if isinstance(generator, list): init_latents = [ self.vae.encode(image[i : i + 1]).latent_dist.sample(generator[i]) for i in range(batch_size) ] init_latents = torch.cat(init_latents, dim=0) else: init_latents = self.vae.encode(image).latent_dist.sample(generator) init_latents = self.vae.config.scaling_factor * init_latents if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0: raise ValueError( f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts." ) else: init_latents = torch.cat([init_latents], dim=0) shape = init_latents.shape noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) # get latents init_latents = self.scheduler.add_noise(init_latents, noise, timestep) latents = init_latents return latents def _default_height_width(self, height, width, image): if isinstance(image, list): image = image[0] if height is None: if isinstance(image, PIL.Image.Image): height = image.height elif isinstance(image, torch.Tensor): height = image.shape[3] height = (height // 8) * 8 # round down to nearest multiple of 8 if width is None: if isinstance(image, PIL.Image.Image): width = image.width elif isinstance(image, torch.Tensor): width = image.shape[2] width = (width // 8) * 8 # round down to nearest multiple of 8 return height, width @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, prompt: Union[str, List[str]] = None, image: Union[torch.Tensor, PIL.Image.Image] = None, controlnet_conditioning_image: Union[ torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image] ] = None, strength: float = 0.8, height: Optional[int] = None, width: Optional[int] = None, num_inference_steps: int = 50, guidance_scale: float = 7.5, negative_prompt: Optional[Union[str, List[str]]] = None, num_images_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.FloatTensor] = None, prompt_embeds: Optional[torch.FloatTensor] = None, negative_prompt_embeds: Optional[torch.FloatTensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, callback_steps: int = 1, cross_attention_kwargs: Optional[Dict[str, Any]] = None, controlnet_conditioning_scale: Union[float, List[float]] = 1.0, controlnet_guidance_start: float = 0.0, controlnet_guidance_end: float = 1.0, ): r""" Function invoked when calling the pipeline for generation. Args: prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. instead. image (`torch.Tensor` or `PIL.Image.Image`): `Image`, or tensor representing an image batch which will be inpainted, *i.e.* parts of the image will be masked out with `mask_image` and repainted according to `prompt`. controlnet_conditioning_image (`torch.FloatTensor`, `PIL.Image.Image`, `List[torch.FloatTensor]` or `List[PIL.Image.Image]`): The ControlNet input condition. ControlNet uses this input condition to generate guidance to Unet. If the type is specified as `Torch.FloatTensor`, it is passed to ControlNet as is. PIL.Image.Image` can also be accepted as an image. The control image is automatically resized to fit the output image. strength (`float`, *optional*): Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1. `image` will be used as a starting point, adding more noise to it the larger the `strength`. The number of denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise will be maximum and the denoising process will run for the full number of iterations specified in `num_inference_steps`. A value of 1, therefore, essentially ignores `image`. height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): The height in pixels of the generated image. width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): The width in pixels of the generated image. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. guidance_scale (`float`, *optional*, defaults to 7.5): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to [`schedulers.DDIMScheduler`], will be ignored for others. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a plain tuple. callback (`Callable`, *optional*): A function that will be called every `callback_steps` steps during inference. The function will be called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function will be called. If not specified, the callback will be called at every step. cross_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under `self.processor` in [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). controlnet_conditioning_scale (`float`, *optional*, defaults to 1.0): The outputs of the controlnet are multiplied by `controlnet_conditioning_scale` before they are added to the residual in the original unet. controlnet_guidance_start ('float', *optional*, defaults to 0.0): The percentage of total steps the controlnet starts applying. Must be between 0 and 1. controlnet_guidance_end ('float', *optional*, defaults to 1.0): The percentage of total steps the controlnet ends applying. Must be between 0 and 1. Must be greater than `controlnet_guidance_start`. Examples: Returns: [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple. When returning a tuple, the first element is a list with the generated images, and the second element is a list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work" (nsfw) content, according to the `safety_checker`. """ # 0. Default height and width to unet height, width = self._default_height_width(height, width, controlnet_conditioning_image) # 1. Check inputs. Raise error if not correct self.check_inputs( prompt, image, controlnet_conditioning_image, height, width, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds, strength, controlnet_guidance_start, controlnet_guidance_end, controlnet_conditioning_scale, ) # 2. Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 if isinstance(self.controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float): controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(self.controlnet.nets) # 3. Encode input prompt prompt_embeds = self._encode_prompt( prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, ) # 4. Prepare image, and controlnet_conditioning_image image = prepare_image(image) # condition image(s) if isinstance(self.controlnet, ControlNetModel): controlnet_conditioning_image = prepare_controlnet_conditioning_image( controlnet_conditioning_image=controlnet_conditioning_image, width=width, height=height, batch_size=batch_size * num_images_per_prompt, num_images_per_prompt=num_images_per_prompt, device=device, dtype=self.controlnet.dtype, do_classifier_free_guidance=do_classifier_free_guidance, ) elif isinstance(self.controlnet, MultiControlNetModel): controlnet_conditioning_images = [] for image_ in controlnet_conditioning_image: image_ = prepare_controlnet_conditioning_image( controlnet_conditioning_image=image_, width=width, height=height, batch_size=batch_size * num_images_per_prompt, num_images_per_prompt=num_images_per_prompt, device=device, dtype=self.controlnet.dtype, do_classifier_free_guidance=do_classifier_free_guidance, ) controlnet_conditioning_images.append(image_) controlnet_conditioning_image = controlnet_conditioning_images else: assert False # 5. Prepare timesteps self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device) latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) # 6. Prepare latent variables latents = self.prepare_latents( image, latent_timestep, batch_size, num_images_per_prompt, prompt_embeds.dtype, device, generator, ) # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) # 8. Denoising loop num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) # compute the percentage of total steps we are at current_sampling_percent = i / len(timesteps) if ( current_sampling_percent < controlnet_guidance_start or current_sampling_percent > controlnet_guidance_end ): # do not apply the controlnet down_block_res_samples = None mid_block_res_sample = None else: # apply the controlnet down_block_res_samples, mid_block_res_sample = self.controlnet( latent_model_input, t, encoder_hidden_states=prompt_embeds, controlnet_cond=controlnet_conditioning_image, conditioning_scale=controlnet_conditioning_scale, return_dict=False, ) # predict the noise residual noise_pred = self.unet( latent_model_input, t, encoder_hidden_states=prompt_embeds, cross_attention_kwargs=cross_attention_kwargs, down_block_additional_residuals=down_block_res_samples, mid_block_additional_residual=mid_block_res_sample, ).sample # perform guidance if do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, "order", 1) callback(step_idx, t, latents) # If we do sequential model offloading, let's offload unet and controlnet # manually for max memory savings if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: self.unet.to("cpu") self.controlnet.to("cpu") torch.cuda.empty_cache() if output_type == "latent": image = latents has_nsfw_concept = None elif output_type == "pil": # 8. Post-processing image = self.decode_latents(latents) # 9. Run safety checker image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) # 10. Convert to PIL image = self.numpy_to_pil(image) else: # 8. Post-processing image = self.decode_latents(latents) # 9. Run safety checker image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) # Offload last model to CPU if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: self.final_offload_hook.offload() if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
diffusers/examples/community/stable_diffusion_controlnet_img2img.py/0
{ "file_path": "diffusers/examples/community/stable_diffusion_controlnet_img2img.py", "repo_id": "diffusers", "token_count": 19336 }
107
import inspect from typing import List, Optional, Tuple, Union import torch from torch.nn import functional as F from transformers import CLIPTextModelWithProjection, CLIPTokenizer from transformers.models.clip.modeling_clip import CLIPTextModelOutput from diffusers import ( DiffusionPipeline, ImagePipelineOutput, PriorTransformer, UnCLIPScheduler, UNet2DConditionModel, UNet2DModel, ) from diffusers.pipelines.unclip import UnCLIPTextProjModel from diffusers.utils import logging from diffusers.utils.torch_utils import randn_tensor logger = logging.get_logger(__name__) # pylint: disable=invalid-name def slerp(val, low, high): """ Find the interpolation point between the 'low' and 'high' values for the given 'val'. See https://en.wikipedia.org/wiki/Slerp for more details on the topic. """ low_norm = low / torch.norm(low) high_norm = high / torch.norm(high) omega = torch.acos((low_norm * high_norm)) so = torch.sin(omega) res = (torch.sin((1.0 - val) * omega) / so) * low + (torch.sin(val * omega) / so) * high return res class UnCLIPTextInterpolationPipeline(DiffusionPipeline): """ Pipeline for prompt-to-prompt interpolation on CLIP text embeddings and using the UnCLIP / Dall-E to decode them to images. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) Args: text_encoder ([`CLIPTextModelWithProjection`]): Frozen text-encoder. tokenizer (`CLIPTokenizer`): Tokenizer of class [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). prior ([`PriorTransformer`]): The canonical unCLIP prior to approximate the image embedding from the text embedding. text_proj ([`UnCLIPTextProjModel`]): Utility class to prepare and combine the embeddings before they are passed to the decoder. decoder ([`UNet2DConditionModel`]): The decoder to invert the image embedding into an image. super_res_first ([`UNet2DModel`]): Super resolution unet. Used in all but the last step of the super resolution diffusion process. super_res_last ([`UNet2DModel`]): Super resolution unet. Used in the last step of the super resolution diffusion process. prior_scheduler ([`UnCLIPScheduler`]): Scheduler used in the prior denoising process. Just a modified DDPMScheduler. decoder_scheduler ([`UnCLIPScheduler`]): Scheduler used in the decoder denoising process. Just a modified DDPMScheduler. super_res_scheduler ([`UnCLIPScheduler`]): Scheduler used in the super resolution denoising process. Just a modified DDPMScheduler. """ prior: PriorTransformer decoder: UNet2DConditionModel text_proj: UnCLIPTextProjModel text_encoder: CLIPTextModelWithProjection tokenizer: CLIPTokenizer super_res_first: UNet2DModel super_res_last: UNet2DModel prior_scheduler: UnCLIPScheduler decoder_scheduler: UnCLIPScheduler super_res_scheduler: UnCLIPScheduler # Copied from diffusers.pipelines.unclip.pipeline_unclip.UnCLIPPipeline.__init__ def __init__( self, prior: PriorTransformer, decoder: UNet2DConditionModel, text_encoder: CLIPTextModelWithProjection, tokenizer: CLIPTokenizer, text_proj: UnCLIPTextProjModel, super_res_first: UNet2DModel, super_res_last: UNet2DModel, prior_scheduler: UnCLIPScheduler, decoder_scheduler: UnCLIPScheduler, super_res_scheduler: UnCLIPScheduler, ): super().__init__() self.register_modules( prior=prior, decoder=decoder, text_encoder=text_encoder, tokenizer=tokenizer, text_proj=text_proj, super_res_first=super_res_first, super_res_last=super_res_last, prior_scheduler=prior_scheduler, decoder_scheduler=decoder_scheduler, super_res_scheduler=super_res_scheduler, ) # Copied from diffusers.pipelines.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents def prepare_latents(self, shape, dtype, device, generator, latents, scheduler): if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: if latents.shape != shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") latents = latents.to(device) latents = latents * scheduler.init_noise_sigma return latents # Copied from diffusers.pipelines.unclip.pipeline_unclip.UnCLIPPipeline._encode_prompt def _encode_prompt( self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, text_model_output: Optional[Union[CLIPTextModelOutput, Tuple]] = None, text_attention_mask: Optional[torch.Tensor] = None, ): if text_model_output is None: batch_size = len(prompt) if isinstance(prompt, list) else 1 # get prompt text embeddings text_inputs = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids text_mask = text_inputs.attention_mask.bool().to(device) untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( text_input_ids, untruncated_ids ): removed_text = self.tokenizer.batch_decode( untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] ) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) text_input_ids = text_input_ids[:, : self.tokenizer.model_max_length] text_encoder_output = self.text_encoder(text_input_ids.to(device)) prompt_embeds = text_encoder_output.text_embeds text_encoder_hidden_states = text_encoder_output.last_hidden_state else: batch_size = text_model_output[0].shape[0] prompt_embeds, text_encoder_hidden_states = text_model_output[0], text_model_output[1] text_mask = text_attention_mask prompt_embeds = prompt_embeds.repeat_interleave(num_images_per_prompt, dim=0) text_encoder_hidden_states = text_encoder_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) text_mask = text_mask.repeat_interleave(num_images_per_prompt, dim=0) if do_classifier_free_guidance: uncond_tokens = [""] * batch_size uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) uncond_text_mask = uncond_input.attention_mask.bool().to(device) negative_prompt_embeds_text_encoder_output = self.text_encoder(uncond_input.input_ids.to(device)) negative_prompt_embeds = negative_prompt_embeds_text_encoder_output.text_embeds uncond_text_encoder_hidden_states = negative_prompt_embeds_text_encoder_output.last_hidden_state # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len) seq_len = uncond_text_encoder_hidden_states.shape[1] uncond_text_encoder_hidden_states = uncond_text_encoder_hidden_states.repeat(1, num_images_per_prompt, 1) uncond_text_encoder_hidden_states = uncond_text_encoder_hidden_states.view( batch_size * num_images_per_prompt, seq_len, -1 ) uncond_text_mask = uncond_text_mask.repeat_interleave(num_images_per_prompt, dim=0) # done duplicates # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) text_encoder_hidden_states = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states]) text_mask = torch.cat([uncond_text_mask, text_mask]) return prompt_embeds, text_encoder_hidden_states, text_mask @torch.no_grad() def __call__( self, start_prompt: str, end_prompt: str, steps: int = 5, prior_num_inference_steps: int = 25, decoder_num_inference_steps: int = 25, super_res_num_inference_steps: int = 7, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, prior_guidance_scale: float = 4.0, decoder_guidance_scale: float = 8.0, enable_sequential_cpu_offload=True, gpu_id=0, output_type: Optional[str] = "pil", return_dict: bool = True, ): """ Function invoked when calling the pipeline for generation. Args: start_prompt (`str`): The prompt to start the image generation interpolation from. end_prompt (`str`): The prompt to end the image generation interpolation at. steps (`int`, *optional*, defaults to 5): The number of steps over which to interpolate from start_prompt to end_prompt. The pipeline returns the same number of images as this value. prior_num_inference_steps (`int`, *optional*, defaults to 25): The number of denoising steps for the prior. More denoising steps usually lead to a higher quality image at the expense of slower inference. decoder_num_inference_steps (`int`, *optional*, defaults to 25): The number of denoising steps for the decoder. More denoising steps usually lead to a higher quality image at the expense of slower inference. super_res_num_inference_steps (`int`, *optional*, defaults to 7): The number of denoising steps for super resolution. More denoising steps usually lead to a higher quality image at the expense of slower inference. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. prior_guidance_scale (`float`, *optional*, defaults to 4.0): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. decoder_guidance_scale (`float`, *optional*, defaults to 4.0): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generated image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. enable_sequential_cpu_offload (`bool`, *optional*, defaults to `True`): If True, offloads all models to CPU using accelerate, significantly reducing memory usage. When called, the pipeline's models have their state dicts saved to CPU and then are moved to a `torch.device('meta') and loaded to GPU only when their specific submodule has its `forward` method called. gpu_id (`int`, *optional*, defaults to `0`): The gpu_id to be passed to enable_sequential_cpu_offload. Only works when enable_sequential_cpu_offload is set to True. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. """ if not isinstance(start_prompt, str) or not isinstance(end_prompt, str): raise ValueError( f"`start_prompt` and `end_prompt` should be of type `str` but got {type(start_prompt)} and" f" {type(end_prompt)} instead" ) if enable_sequential_cpu_offload: self.enable_sequential_cpu_offload(gpu_id=gpu_id) device = self._execution_device # Turn the prompts into embeddings. inputs = self.tokenizer( [start_prompt, end_prompt], padding="max_length", truncation=True, max_length=self.tokenizer.model_max_length, return_tensors="pt", ) inputs.to(device) text_model_output = self.text_encoder(**inputs) text_attention_mask = torch.max(inputs.attention_mask[0], inputs.attention_mask[1]) text_attention_mask = torch.cat([text_attention_mask.unsqueeze(0)] * steps).to(device) # Interpolate from the start to end prompt using slerp and add the generated images to an image output pipeline batch_text_embeds = [] batch_last_hidden_state = [] for interp_val in torch.linspace(0, 1, steps): text_embeds = slerp(interp_val, text_model_output.text_embeds[0], text_model_output.text_embeds[1]) last_hidden_state = slerp( interp_val, text_model_output.last_hidden_state[0], text_model_output.last_hidden_state[1] ) batch_text_embeds.append(text_embeds.unsqueeze(0)) batch_last_hidden_state.append(last_hidden_state.unsqueeze(0)) batch_text_embeds = torch.cat(batch_text_embeds) batch_last_hidden_state = torch.cat(batch_last_hidden_state) text_model_output = CLIPTextModelOutput( text_embeds=batch_text_embeds, last_hidden_state=batch_last_hidden_state ) batch_size = text_model_output[0].shape[0] do_classifier_free_guidance = prior_guidance_scale > 1.0 or decoder_guidance_scale > 1.0 prompt_embeds, text_encoder_hidden_states, text_mask = self._encode_prompt( prompt=None, device=device, num_images_per_prompt=1, do_classifier_free_guidance=do_classifier_free_guidance, text_model_output=text_model_output, text_attention_mask=text_attention_mask, ) # prior self.prior_scheduler.set_timesteps(prior_num_inference_steps, device=device) prior_timesteps_tensor = self.prior_scheduler.timesteps embedding_dim = self.prior.config.embedding_dim prior_latents = self.prepare_latents( (batch_size, embedding_dim), prompt_embeds.dtype, device, generator, None, self.prior_scheduler, ) for i, t in enumerate(self.progress_bar(prior_timesteps_tensor)): # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([prior_latents] * 2) if do_classifier_free_guidance else prior_latents predicted_image_embedding = self.prior( latent_model_input, timestep=t, proj_embedding=prompt_embeds, encoder_hidden_states=text_encoder_hidden_states, attention_mask=text_mask, ).predicted_image_embedding if do_classifier_free_guidance: predicted_image_embedding_uncond, predicted_image_embedding_text = predicted_image_embedding.chunk(2) predicted_image_embedding = predicted_image_embedding_uncond + prior_guidance_scale * ( predicted_image_embedding_text - predicted_image_embedding_uncond ) if i + 1 == prior_timesteps_tensor.shape[0]: prev_timestep = None else: prev_timestep = prior_timesteps_tensor[i + 1] prior_latents = self.prior_scheduler.step( predicted_image_embedding, timestep=t, sample=prior_latents, generator=generator, prev_timestep=prev_timestep, ).prev_sample prior_latents = self.prior.post_process_latents(prior_latents) image_embeddings = prior_latents # done prior # decoder text_encoder_hidden_states, additive_clip_time_embeddings = self.text_proj( image_embeddings=image_embeddings, prompt_embeds=prompt_embeds, text_encoder_hidden_states=text_encoder_hidden_states, do_classifier_free_guidance=do_classifier_free_guidance, ) if device.type == "mps": # HACK: MPS: There is a panic when padding bool tensors, # so cast to int tensor for the pad and back to bool afterwards text_mask = text_mask.type(torch.int) decoder_text_mask = F.pad(text_mask, (self.text_proj.clip_extra_context_tokens, 0), value=1) decoder_text_mask = decoder_text_mask.type(torch.bool) else: decoder_text_mask = F.pad(text_mask, (self.text_proj.clip_extra_context_tokens, 0), value=True) self.decoder_scheduler.set_timesteps(decoder_num_inference_steps, device=device) decoder_timesteps_tensor = self.decoder_scheduler.timesteps num_channels_latents = self.decoder.config.in_channels height = self.decoder.config.sample_size width = self.decoder.config.sample_size decoder_latents = self.prepare_latents( (batch_size, num_channels_latents, height, width), text_encoder_hidden_states.dtype, device, generator, None, self.decoder_scheduler, ) for i, t in enumerate(self.progress_bar(decoder_timesteps_tensor)): # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([decoder_latents] * 2) if do_classifier_free_guidance else decoder_latents noise_pred = self.decoder( sample=latent_model_input, timestep=t, encoder_hidden_states=text_encoder_hidden_states, class_labels=additive_clip_time_embeddings, attention_mask=decoder_text_mask, ).sample if do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred_uncond, _ = noise_pred_uncond.split(latent_model_input.shape[1], dim=1) noise_pred_text, predicted_variance = noise_pred_text.split(latent_model_input.shape[1], dim=1) noise_pred = noise_pred_uncond + decoder_guidance_scale * (noise_pred_text - noise_pred_uncond) noise_pred = torch.cat([noise_pred, predicted_variance], dim=1) if i + 1 == decoder_timesteps_tensor.shape[0]: prev_timestep = None else: prev_timestep = decoder_timesteps_tensor[i + 1] # compute the previous noisy sample x_t -> x_t-1 decoder_latents = self.decoder_scheduler.step( noise_pred, t, decoder_latents, prev_timestep=prev_timestep, generator=generator ).prev_sample decoder_latents = decoder_latents.clamp(-1, 1) image_small = decoder_latents # done decoder # super res self.super_res_scheduler.set_timesteps(super_res_num_inference_steps, device=device) super_res_timesteps_tensor = self.super_res_scheduler.timesteps channels = self.super_res_first.config.in_channels // 2 height = self.super_res_first.config.sample_size width = self.super_res_first.config.sample_size super_res_latents = self.prepare_latents( (batch_size, channels, height, width), image_small.dtype, device, generator, None, self.super_res_scheduler, ) if device.type == "mps": # MPS does not support many interpolations image_upscaled = F.interpolate(image_small, size=[height, width]) else: interpolate_antialias = {} if "antialias" in inspect.signature(F.interpolate).parameters: interpolate_antialias["antialias"] = True image_upscaled = F.interpolate( image_small, size=[height, width], mode="bicubic", align_corners=False, **interpolate_antialias ) for i, t in enumerate(self.progress_bar(super_res_timesteps_tensor)): # no classifier free guidance if i == super_res_timesteps_tensor.shape[0] - 1: unet = self.super_res_last else: unet = self.super_res_first latent_model_input = torch.cat([super_res_latents, image_upscaled], dim=1) noise_pred = unet( sample=latent_model_input, timestep=t, ).sample if i + 1 == super_res_timesteps_tensor.shape[0]: prev_timestep = None else: prev_timestep = super_res_timesteps_tensor[i + 1] # compute the previous noisy sample x_t -> x_t-1 super_res_latents = self.super_res_scheduler.step( noise_pred, t, super_res_latents, prev_timestep=prev_timestep, generator=generator ).prev_sample image = super_res_latents # done super res # post processing image = image * 0.5 + 0.5 image = image.clamp(0, 1) image = image.cpu().permute(0, 2, 3, 1).float().numpy() if output_type == "pil": image = self.numpy_to_pil(image) if not return_dict: return (image,) return ImagePipelineOutput(images=image)
diffusers/examples/community/unclip_text_interpolation.py/0
{ "file_path": "diffusers/examples/community/unclip_text_interpolation.py", "repo_id": "diffusers", "token_count": 10699 }
108
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import os import sys import tempfile import safetensors sys.path.append("..") from test_examples_utils import ExamplesTestsAccelerate, run_command # noqa: E402 from diffusers import DiffusionPipeline # noqa: E402 logging.basicConfig(level=logging.DEBUG) logger = logging.getLogger() stream_handler = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class DreamBoothLoRA(ExamplesTestsAccelerate): def test_dreambooth_lora(self): with tempfile.TemporaryDirectory() as tmpdir: test_args = f""" examples/dreambooth/train_dreambooth_lora.py --pretrained_model_name_or_path hf-internal-testing/tiny-stable-diffusion-pipe --instance_data_dir docs/source/en/imgs --instance_prompt photo --resolution 64 --train_batch_size 1 --gradient_accumulation_steps 1 --max_train_steps 2 --learning_rate 5.0e-04 --scale_lr --lr_scheduler constant --lr_warmup_steps 0 --output_dir {tmpdir} """.split() run_command(self._launch_args + test_args) # save_pretrained smoke test self.assertTrue(os.path.isfile(os.path.join(tmpdir, "pytorch_lora_weights.safetensors"))) # make sure the state_dict has the correct naming in the parameters. lora_state_dict = safetensors.torch.load_file(os.path.join(tmpdir, "pytorch_lora_weights.safetensors")) is_lora = all("lora" in k for k in lora_state_dict.keys()) self.assertTrue(is_lora) # when not training the text encoder, all the parameters in the state dict should start # with `"unet"` in their names. starts_with_unet = all(key.startswith("unet") for key in lora_state_dict.keys()) self.assertTrue(starts_with_unet) def test_dreambooth_lora_with_text_encoder(self): with tempfile.TemporaryDirectory() as tmpdir: test_args = f""" examples/dreambooth/train_dreambooth_lora.py --pretrained_model_name_or_path hf-internal-testing/tiny-stable-diffusion-pipe --instance_data_dir docs/source/en/imgs --instance_prompt photo --resolution 64 --train_batch_size 1 --gradient_accumulation_steps 1 --max_train_steps 2 --learning_rate 5.0e-04 --scale_lr --lr_scheduler constant --lr_warmup_steps 0 --train_text_encoder --output_dir {tmpdir} """.split() run_command(self._launch_args + test_args) # save_pretrained smoke test self.assertTrue(os.path.isfile(os.path.join(tmpdir, "pytorch_lora_weights.safetensors"))) # check `text_encoder` is present at all. lora_state_dict = safetensors.torch.load_file(os.path.join(tmpdir, "pytorch_lora_weights.safetensors")) keys = lora_state_dict.keys() is_text_encoder_present = any(k.startswith("text_encoder") for k in keys) self.assertTrue(is_text_encoder_present) # the names of the keys of the state dict should either start with `unet` # or `text_encoder`. is_correct_naming = all(k.startswith("unet") or k.startswith("text_encoder") for k in keys) self.assertTrue(is_correct_naming) def test_dreambooth_lora_checkpointing_checkpoints_total_limit(self): with tempfile.TemporaryDirectory() as tmpdir: test_args = f""" examples/dreambooth/train_dreambooth_lora.py --pretrained_model_name_or_path=hf-internal-testing/tiny-stable-diffusion-pipe --instance_data_dir=docs/source/en/imgs --output_dir={tmpdir} --instance_prompt=prompt --resolution=64 --train_batch_size=1 --gradient_accumulation_steps=1 --max_train_steps=6 --checkpoints_total_limit=2 --checkpointing_steps=2 """.split() run_command(self._launch_args + test_args) self.assertEqual( {x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-4", "checkpoint-6"}, ) def test_dreambooth_lora_checkpointing_checkpoints_total_limit_removes_multiple_checkpoints(self): with tempfile.TemporaryDirectory() as tmpdir: test_args = f""" examples/dreambooth/train_dreambooth_lora.py --pretrained_model_name_or_path=hf-internal-testing/tiny-stable-diffusion-pipe --instance_data_dir=docs/source/en/imgs --output_dir={tmpdir} --instance_prompt=prompt --resolution=64 --train_batch_size=1 --gradient_accumulation_steps=1 --max_train_steps=4 --checkpointing_steps=2 """.split() run_command(self._launch_args + test_args) self.assertEqual({x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-2", "checkpoint-4"}) resume_run_args = f""" examples/dreambooth/train_dreambooth_lora.py --pretrained_model_name_or_path=hf-internal-testing/tiny-stable-diffusion-pipe --instance_data_dir=docs/source/en/imgs --output_dir={tmpdir} --instance_prompt=prompt --resolution=64 --train_batch_size=1 --gradient_accumulation_steps=1 --max_train_steps=8 --checkpointing_steps=2 --resume_from_checkpoint=checkpoint-4 --checkpoints_total_limit=2 """.split() run_command(self._launch_args + resume_run_args) self.assertEqual({x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-6", "checkpoint-8"}) def test_dreambooth_lora_if_model(self): with tempfile.TemporaryDirectory() as tmpdir: test_args = f""" examples/dreambooth/train_dreambooth_lora.py --pretrained_model_name_or_path hf-internal-testing/tiny-if-pipe --instance_data_dir docs/source/en/imgs --instance_prompt photo --resolution 64 --train_batch_size 1 --gradient_accumulation_steps 1 --max_train_steps 2 --learning_rate 5.0e-04 --scale_lr --lr_scheduler constant --lr_warmup_steps 0 --output_dir {tmpdir} --pre_compute_text_embeddings --tokenizer_max_length=77 --text_encoder_use_attention_mask """.split() run_command(self._launch_args + test_args) # save_pretrained smoke test self.assertTrue(os.path.isfile(os.path.join(tmpdir, "pytorch_lora_weights.safetensors"))) # make sure the state_dict has the correct naming in the parameters. lora_state_dict = safetensors.torch.load_file(os.path.join(tmpdir, "pytorch_lora_weights.safetensors")) is_lora = all("lora" in k for k in lora_state_dict.keys()) self.assertTrue(is_lora) # when not training the text encoder, all the parameters in the state dict should start # with `"unet"` in their names. starts_with_unet = all(key.startswith("unet") for key in lora_state_dict.keys()) self.assertTrue(starts_with_unet) class DreamBoothLoRASDXL(ExamplesTestsAccelerate): def test_dreambooth_lora_sdxl(self): with tempfile.TemporaryDirectory() as tmpdir: test_args = f""" examples/dreambooth/train_dreambooth_lora_sdxl.py --pretrained_model_name_or_path hf-internal-testing/tiny-stable-diffusion-xl-pipe --instance_data_dir docs/source/en/imgs --instance_prompt photo --resolution 64 --train_batch_size 1 --gradient_accumulation_steps 1 --max_train_steps 2 --learning_rate 5.0e-04 --scale_lr --lr_scheduler constant --lr_warmup_steps 0 --output_dir {tmpdir} """.split() run_command(self._launch_args + test_args) # save_pretrained smoke test self.assertTrue(os.path.isfile(os.path.join(tmpdir, "pytorch_lora_weights.safetensors"))) # make sure the state_dict has the correct naming in the parameters. lora_state_dict = safetensors.torch.load_file(os.path.join(tmpdir, "pytorch_lora_weights.safetensors")) is_lora = all("lora" in k for k in lora_state_dict.keys()) self.assertTrue(is_lora) # when not training the text encoder, all the parameters in the state dict should start # with `"unet"` in their names. starts_with_unet = all(key.startswith("unet") for key in lora_state_dict.keys()) self.assertTrue(starts_with_unet) def test_dreambooth_lora_sdxl_with_text_encoder(self): with tempfile.TemporaryDirectory() as tmpdir: test_args = f""" examples/dreambooth/train_dreambooth_lora_sdxl.py --pretrained_model_name_or_path hf-internal-testing/tiny-stable-diffusion-xl-pipe --instance_data_dir docs/source/en/imgs --instance_prompt photo --resolution 64 --train_batch_size 1 --gradient_accumulation_steps 1 --max_train_steps 2 --learning_rate 5.0e-04 --scale_lr --lr_scheduler constant --lr_warmup_steps 0 --output_dir {tmpdir} --train_text_encoder """.split() run_command(self._launch_args + test_args) # save_pretrained smoke test self.assertTrue(os.path.isfile(os.path.join(tmpdir, "pytorch_lora_weights.safetensors"))) # make sure the state_dict has the correct naming in the parameters. lora_state_dict = safetensors.torch.load_file(os.path.join(tmpdir, "pytorch_lora_weights.safetensors")) is_lora = all("lora" in k for k in lora_state_dict.keys()) self.assertTrue(is_lora) # when not training the text encoder, all the parameters in the state dict should start # with `"unet"` or `"text_encoder"` or `"text_encoder_2"` in their names. keys = lora_state_dict.keys() starts_with_unet = all( k.startswith("unet") or k.startswith("text_encoder") or k.startswith("text_encoder_2") for k in keys ) self.assertTrue(starts_with_unet) def test_dreambooth_lora_sdxl_custom_captions(self): with tempfile.TemporaryDirectory() as tmpdir: test_args = f""" examples/dreambooth/train_dreambooth_lora_sdxl.py --pretrained_model_name_or_path hf-internal-testing/tiny-stable-diffusion-xl-pipe --dataset_name hf-internal-testing/dummy_image_text_data --caption_column text --instance_prompt photo --resolution 64 --train_batch_size 1 --gradient_accumulation_steps 1 --max_train_steps 2 --learning_rate 5.0e-04 --scale_lr --lr_scheduler constant --lr_warmup_steps 0 --output_dir {tmpdir} """.split() run_command(self._launch_args + test_args) def test_dreambooth_lora_sdxl_text_encoder_custom_captions(self): with tempfile.TemporaryDirectory() as tmpdir: test_args = f""" examples/dreambooth/train_dreambooth_lora_sdxl.py --pretrained_model_name_or_path hf-internal-testing/tiny-stable-diffusion-xl-pipe --dataset_name hf-internal-testing/dummy_image_text_data --caption_column text --instance_prompt photo --resolution 64 --train_batch_size 1 --gradient_accumulation_steps 1 --max_train_steps 2 --learning_rate 5.0e-04 --scale_lr --lr_scheduler constant --lr_warmup_steps 0 --output_dir {tmpdir} --train_text_encoder """.split() run_command(self._launch_args + test_args) def test_dreambooth_lora_sdxl_checkpointing_checkpoints_total_limit(self): pipeline_path = "hf-internal-testing/tiny-stable-diffusion-xl-pipe" with tempfile.TemporaryDirectory() as tmpdir: test_args = f""" examples/dreambooth/train_dreambooth_lora_sdxl.py --pretrained_model_name_or_path {pipeline_path} --instance_data_dir docs/source/en/imgs --instance_prompt photo --resolution 64 --train_batch_size 1 --gradient_accumulation_steps 1 --max_train_steps 6 --checkpointing_steps=2 --checkpoints_total_limit=2 --learning_rate 5.0e-04 --scale_lr --lr_scheduler constant --lr_warmup_steps 0 --output_dir {tmpdir} """.split() run_command(self._launch_args + test_args) pipe = DiffusionPipeline.from_pretrained(pipeline_path) pipe.load_lora_weights(tmpdir) pipe("a prompt", num_inference_steps=1) # check checkpoint directories exist # checkpoint-2 should have been deleted self.assertEqual({x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-4", "checkpoint-6"}) def test_dreambooth_lora_sdxl_text_encoder_checkpointing_checkpoints_total_limit(self): pipeline_path = "hf-internal-testing/tiny-stable-diffusion-xl-pipe" with tempfile.TemporaryDirectory() as tmpdir: test_args = f""" examples/dreambooth/train_dreambooth_lora_sdxl.py --pretrained_model_name_or_path {pipeline_path} --instance_data_dir docs/source/en/imgs --instance_prompt photo --resolution 64 --train_batch_size 1 --gradient_accumulation_steps 1 --max_train_steps 7 --checkpointing_steps=2 --checkpoints_total_limit=2 --train_text_encoder --learning_rate 5.0e-04 --scale_lr --lr_scheduler constant --lr_warmup_steps 0 --output_dir {tmpdir} """.split() run_command(self._launch_args + test_args) pipe = DiffusionPipeline.from_pretrained(pipeline_path) pipe.load_lora_weights(tmpdir) pipe("a prompt", num_inference_steps=2) # check checkpoint directories exist self.assertEqual( {x for x in os.listdir(tmpdir) if "checkpoint" in x}, # checkpoint-2 should have been deleted {"checkpoint-4", "checkpoint-6"}, )
diffusers/examples/dreambooth/test_dreambooth_lora.py/0
{ "file_path": "diffusers/examples/dreambooth/test_dreambooth_lora.py", "repo_id": "diffusers", "token_count": 8107 }
109
# Multi Subject Dreambooth for Inpainting Models Please note that this project is not actively maintained. However, you can open an issue and tag @gzguevara. [DreamBooth](https://arxiv.org/abs/2208.12242) is a method to personalize text2image models like stable diffusion given just a few(3~5) images of a subject. This project consists of **two parts**. Training Stable Diffusion for inpainting requieres prompt-image-mask pairs. The Unet of inpainiting models have 5 additional input channels (4 for the encoded masked-image and 1 for the mask itself). **The first part**, the `multi_inpaint_dataset.ipynb` notebook, demonstrates how make a 🤗 dataset of prompt-image-mask pairs. You can, however, skip the first part and move straight to the second part with the example datasets in this project. ([cat toy dataset masked](https://huggingface.co/datasets/gzguevara/cat_toy_masked), [mr. potato head dataset masked](https://huggingface.co/datasets/gzguevara/mr_potato_head_masked)) **The second part**, the `train_multi_subject_inpainting.py` training script, demonstrates how to implement a training procedure for one or more subjects and adapt it for stable diffusion for inpainting. ## 1. Data Collection: Make Prompt-Image-Mask Pairs Earlier training scripts have provided approaches like random masking for the training images. This project provides a notebook for more precise mask setting. The notebook can be found here: [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1JNEASI_B7pLW1srxhgln6nM0HoGAQT32?usp=sharing) The `multi_inpaint_dataset.ipynb` notebook, takes training & validation images, on which the user draws masks and provides prompts to make a prompt-image-mask pairs. This ensures that during training, the loss is computed on the area masking the object of interest, rather than on random areas. Moreover, the `multi_inpaint_dataset.ipynb` notebook allows you to build a validation dataset with corresponding masks for monitoring the training process. Example below: ![train_val_pairs](https://drive.google.com/uc?id=1PzwH8E3icl_ubVmA19G0HZGLImFX3x5I) You can build multiple datasets for every subject and upload them to the 🤗 hub. Later, when launching the training script you can indicate the paths of the datasets, on which you would like to finetune Stable Diffusion for inpaining. ## 2. Train Multi Subject Dreambooth for Inpainting ### 2.1. Setting The Training Configuration Before launching the training script, make sure to select the inpainting the target model, the output directory and the 🤗 datasets. ```bash export MODEL_NAME="runwayml/stable-diffusion-inpainting" export OUTPUT_DIR="path-to-save-model" export DATASET_1="gzguevara/mr_potato_head_masked" export DATASET_2="gzguevara/cat_toy_masked" ... # Further paths to 🤗 datasets ``` ### 2.2. Launching The Training Script ```bash accelerate launch train_multi_subject_dreambooth_inpaint.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --instance_data_dir $DATASET_1 $DATASET_2 \ --output_dir=$OUTPUT_DIR \ --resolution=512 \ --train_batch_size=1 \ --gradient_accumulation_steps=2 \ --learning_rate=3e-6 \ --max_train_steps=500 \ --report_to_wandb ``` ### 2.3. Fine-tune text encoder with the UNet. The script also allows to fine-tune the `text_encoder` along with the `unet`. It's been observed experimentally that fine-tuning `text_encoder` gives much better results especially on faces. Pass the `--train_text_encoder` argument to the script to enable training `text_encoder`. ___Note: Training text encoder requires more memory, with this option the training won't fit on 16GB GPU. It needs at least 24GB VRAM.___ ```bash accelerate launch train_multi_subject_dreambooth_inpaint.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --instance_data_dir $DATASET_1 $DATASET_2 \ --output_dir=$OUTPUT_DIR \ --resolution=512 \ --train_batch_size=1 \ --gradient_accumulation_steps=2 \ --learning_rate=2e-6 \ --max_train_steps=500 \ --report_to_wandb \ --train_text_encoder ``` ## 3. Results A [![Weights & Biases](https://img.shields.io/badge/Weights%20&%20Biases-Report-blue)](https://wandb.ai/gzguevara/uncategorized/reports/Multi-Subject-Dreambooth-for-Inpainting--Vmlldzo2MzY5NDQ4?accessToken=y0nya2d7baguhbryxaikbfr1203amvn1jsmyl07vk122mrs7tnph037u1nqgse8t) is provided showing the training progress by every 50 steps. Note, the reported weights & baises run was performed on a A100 GPU with the following stetting: ```bash accelerate launch train_multi_subject_dreambooth_inpaint.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --instance_data_dir $DATASET_1 $DATASET_2 \ --output_dir=$OUTPUT_DIR \ --resolution=512 \ --train_batch_size=10 \ --gradient_accumulation_steps=1 \ --learning_rate=1e-6 \ --max_train_steps=500 \ --report_to_wandb \ --train_text_encoder ``` Here you can see the target objects on my desk and next to my plant: ![Results](https://drive.google.com/uc?id=1kQisOiiF5cj4rOYjdq8SCZenNsUP2aK0)
diffusers/examples/research_projects/multi_subject_dreambooth_inpainting/README.md/0
{ "file_path": "diffusers/examples/research_projects/multi_subject_dreambooth_inpainting/README.md", "repo_id": "diffusers", "token_count": 1665 }
110
## Training examples Creating a training image set is [described in a different document](https://huggingface.co/docs/datasets/image_process#image-datasets). ### Installing the dependencies Before running the scripts, make sure to install the library's training dependencies: **Important** To make sure you can successfully run the latest versions of the example scripts, we highly recommend **installing from source** and keeping the install up to date as we update the example scripts frequently and install some example-specific requirements. To do this, execute the following steps in a new virtual environment: ```bash git clone https://github.com/huggingface/diffusers cd diffusers pip install . ``` Then cd in the example folder and run ```bash pip install -r requirements.txt ``` And initialize an [🤗Accelerate](https://github.com/huggingface/accelerate/) environment with: ```bash accelerate config ``` #### Use ONNXRuntime to accelerate training In order to leverage onnxruntime to accelerate training, please use train_unconditional_ort.py The command to train a DDPM UNet model on the Oxford Flowers dataset with onnxruntime: ```bash accelerate launch train_unconditional.py \ --dataset_name="huggan/flowers-102-categories" \ --resolution=64 --center_crop --random_flip \ --output_dir="ddpm-ema-flowers-64" \ --use_ema \ --train_batch_size=16 \ --num_epochs=1 \ --gradient_accumulation_steps=1 \ --learning_rate=1e-4 \ --lr_warmup_steps=500 \ --mixed_precision=fp16 ``` Please contact Prathik Rao (prathikr), Sunghoon Choi (hanbitmyths), Ashwini Khade (askhade), or Peng Wang (pengwa) on github with any questions.
diffusers/examples/research_projects/onnxruntime/unconditional_image_generation/README.md/0
{ "file_path": "diffusers/examples/research_projects/onnxruntime/unconditional_image_generation/README.md", "repo_id": "diffusers", "token_count": 500 }
111
import time import jax import jax.numpy as jnp import numpy as np from flax.jax_utils import replicate from jax import pmap # Let's cache the model compilation, so that it doesn't take as long the next time around. from jax.experimental.compilation_cache import compilation_cache as cc from diffusers import FlaxStableDiffusionXLPipeline cc.initialize_cache("/tmp/sdxl_cache") NUM_DEVICES = jax.device_count() # 1. Let's start by downloading the model and loading it into our pipeline class # Adhering to JAX's functional approach, the model's parameters are returned seperatetely and # will have to be passed to the pipeline during inference pipeline, params = FlaxStableDiffusionXLPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", revision="refs/pr/95", split_head_dim=True ) # 2. We cast all parameters to bfloat16 EXCEPT the scheduler which we leave in # float32 to keep maximal precision scheduler_state = params.pop("scheduler") params = jax.tree_util.tree_map(lambda x: x.astype(jnp.bfloat16), params) params["scheduler"] = scheduler_state # 3. Next, we define the different inputs to the pipeline default_prompt = "a colorful photo of a castle in the middle of a forest with trees and bushes, by Ismail Inceoglu, shadows, high contrast, dynamic shading, hdr, detailed vegetation, digital painting, digital drawing, detailed painting, a detailed digital painting, gothic art, featured on deviantart" default_neg_prompt = "fog, grainy, purple" default_seed = 33 default_guidance_scale = 5.0 default_num_steps = 25 width = 1024 height = 1024 # 4. In order to be able to compile the pipeline # all inputs have to be tensors or strings # Let's tokenize the prompt and negative prompt def tokenize_prompt(prompt, neg_prompt): prompt_ids = pipeline.prepare_inputs(prompt) neg_prompt_ids = pipeline.prepare_inputs(neg_prompt) return prompt_ids, neg_prompt_ids # 5. To make full use of JAX's parallelization capabilities # the parameters and input tensors are duplicated across devices # To make sure every device generates a different image, we create # different seeds for each image. The model parameters won't change # during inference so we do not wrap them into a function p_params = replicate(params) def replicate_all(prompt_ids, neg_prompt_ids, seed): p_prompt_ids = replicate(prompt_ids) p_neg_prompt_ids = replicate(neg_prompt_ids) rng = jax.random.PRNGKey(seed) rng = jax.random.split(rng, NUM_DEVICES) return p_prompt_ids, p_neg_prompt_ids, rng # 6. To compile the pipeline._generate function, we must pass all parameters # to the function and tell JAX which are static arguments, that is, arguments that # are known at compile time and won't change. In our case, it is num_inference_steps, # height, width and return_latents. # Once the function is compiled, these parameters are ommited from future calls and # cannot be changed without modifying the code and recompiling. def aot_compile( prompt=default_prompt, negative_prompt=default_neg_prompt, seed=default_seed, guidance_scale=default_guidance_scale, num_inference_steps=default_num_steps, ): prompt_ids, neg_prompt_ids = tokenize_prompt(prompt, negative_prompt) prompt_ids, neg_prompt_ids, rng = replicate_all(prompt_ids, neg_prompt_ids, seed) g = jnp.array([guidance_scale] * prompt_ids.shape[0], dtype=jnp.float32) g = g[:, None] return ( pmap(pipeline._generate, static_broadcasted_argnums=[3, 4, 5, 9]) .lower( prompt_ids, p_params, rng, num_inference_steps, # num_inference_steps height, # height width, # width g, None, neg_prompt_ids, False, # return_latents ) .compile() ) start = time.time() print("Compiling ...") p_generate = aot_compile() print(f"Compiled in {time.time() - start}") # 7. Let's now put it all together in a generate function. def generate(prompt, negative_prompt, seed=default_seed, guidance_scale=default_guidance_scale): prompt_ids, neg_prompt_ids = tokenize_prompt(prompt, negative_prompt) prompt_ids, neg_prompt_ids, rng = replicate_all(prompt_ids, neg_prompt_ids, seed) g = jnp.array([guidance_scale] * prompt_ids.shape[0], dtype=jnp.float32) g = g[:, None] images = p_generate(prompt_ids, p_params, rng, g, None, neg_prompt_ids) # convert the images to PIL images = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:]) return pipeline.numpy_to_pil(np.array(images)) # 8. The first forward pass after AOT compilation still takes a while longer than # subsequent passes, this is because on the first pass, JAX uses Python dispatch, which # Fills the C++ dispatch cache. # When using jit, this extra step is done automatically, but when using AOT compilation, # it doesn't happen until the function call is made. start = time.time() prompt = "photo of a rhino dressed suit and tie sitting at a table in a bar with a bar stools, award winning photography, Elke vogelsang" neg_prompt = "cartoon, illustration, animation. face. male, female" images = generate(prompt, neg_prompt) print(f"First inference in {time.time() - start}") # 9. From this point forward, any calls to generate should result in a faster inference # time and it won't change. start = time.time() prompt = "photo of a rhino dressed suit and tie sitting at a table in a bar with a bar stools, award winning photography, Elke vogelsang" neg_prompt = "cartoon, illustration, animation. face. male, female" images = generate(prompt, neg_prompt) print(f"Inference in {time.time() - start}") for i, image in enumerate(images): image.save(f"castle_{i}.png")
diffusers/examples/research_projects/sdxl_flax/sdxl_single_aot.py/0
{ "file_path": "diffusers/examples/research_projects/sdxl_flax/sdxl_single_aot.py", "repo_id": "diffusers", "token_count": 1969 }
112
#!/usr/bin/env python # coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Fine-tuning script for Stable Diffusion for text2image with support for LoRA.""" import argparse import logging import math import os import random import shutil from pathlib import Path import datasets import numpy as np import torch import torch.nn.functional as F import torch.utils.checkpoint import transformers from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import ProjectConfiguration, set_seed from datasets import load_dataset from huggingface_hub import create_repo, upload_folder from packaging import version from peft import LoraConfig from peft.utils import get_peft_model_state_dict from torchvision import transforms from tqdm.auto import tqdm from transformers import CLIPTextModel, CLIPTokenizer import diffusers from diffusers import AutoencoderKL, DDPMScheduler, DiffusionPipeline, StableDiffusionPipeline, UNet2DConditionModel from diffusers.optimization import get_scheduler from diffusers.training_utils import cast_training_params, compute_snr from diffusers.utils import check_min_version, convert_state_dict_to_diffusers, is_wandb_available from diffusers.utils.hub_utils import load_or_create_model_card, populate_model_card from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.torch_utils import is_compiled_module # Will error if the minimal version of diffusers is not installed. Remove at your own risks. check_min_version("0.28.0.dev0") logger = get_logger(__name__, log_level="INFO") def save_model_card( repo_id: str, images: list = None, base_model: str = None, dataset_name: str = None, repo_folder: str = None, ): img_str = "" if images is not None: for i, image in enumerate(images): image.save(os.path.join(repo_folder, f"image_{i}.png")) img_str += f"![img_{i}](./image_{i}.png)\n" model_description = f""" # LoRA text2image fine-tuning - {repo_id} These are LoRA adaption weights for {base_model}. The weights were fine-tuned on the {dataset_name} dataset. You can find some example images in the following. \n {img_str} """ model_card = load_or_create_model_card( repo_id_or_path=repo_id, from_training=True, license="creativeml-openrail-m", base_model=base_model, model_description=model_description, inference=True, ) tags = [ "stable-diffusion", "stable-diffusion-diffusers", "text-to-image", "diffusers", "diffusers-training", "lora", ] model_card = populate_model_card(model_card, tags=tags) model_card.save(os.path.join(repo_folder, "README.md")) def parse_args(): parser = argparse.ArgumentParser(description="Simple example of a training script.") parser.add_argument( "--pretrained_model_name_or_path", type=str, default=None, required=True, help="Path to pretrained model or model identifier from huggingface.co/models.", ) parser.add_argument( "--revision", type=str, default=None, required=False, help="Revision of pretrained model identifier from huggingface.co/models.", ) parser.add_argument( "--variant", type=str, default=None, help="Variant of the model files of the pretrained model identifier from huggingface.co/models, 'e.g.' fp16", ) parser.add_argument( "--dataset_name", type=str, default=None, help=( "The name of the Dataset (from the HuggingFace hub) to train on (could be your own, possibly private," " dataset). It can also be a path pointing to a local copy of a dataset in your filesystem," " or to a folder containing files that 🤗 Datasets can understand." ), ) parser.add_argument( "--dataset_config_name", type=str, default=None, help="The config of the Dataset, leave as None if there's only one config.", ) parser.add_argument( "--train_data_dir", type=str, default=None, help=( "A folder containing the training data. Folder contents must follow the structure described in" " https://huggingface.co/docs/datasets/image_dataset#imagefolder. In particular, a `metadata.jsonl` file" " must exist to provide the captions for the images. Ignored if `dataset_name` is specified." ), ) parser.add_argument( "--image_column", type=str, default="image", help="The column of the dataset containing an image." ) parser.add_argument( "--caption_column", type=str, default="text", help="The column of the dataset containing a caption or a list of captions.", ) parser.add_argument( "--validation_prompt", type=str, default=None, help="A prompt that is sampled during training for inference." ) parser.add_argument( "--num_validation_images", type=int, default=4, help="Number of images that should be generated during validation with `validation_prompt`.", ) parser.add_argument( "--validation_epochs", type=int, default=1, help=( "Run fine-tuning validation every X epochs. The validation process consists of running the prompt" " `args.validation_prompt` multiple times: `args.num_validation_images`." ), ) parser.add_argument( "--max_train_samples", type=int, default=None, help=( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ), ) parser.add_argument( "--output_dir", type=str, default="sd-model-finetuned-lora", help="The output directory where the model predictions and checkpoints will be written.", ) parser.add_argument( "--cache_dir", type=str, default=None, help="The directory where the downloaded models and datasets will be stored.", ) parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") parser.add_argument( "--resolution", type=int, default=512, help=( "The resolution for input images, all the images in the train/validation dataset will be resized to this" " resolution" ), ) parser.add_argument( "--center_crop", default=False, action="store_true", help=( "Whether to center crop the input images to the resolution. If not set, the images will be randomly" " cropped. The images will be resized to the resolution first before cropping." ), ) parser.add_argument( "--random_flip", action="store_true", help="whether to randomly flip images horizontally", ) parser.add_argument( "--train_batch_size", type=int, default=16, help="Batch size (per device) for the training dataloader." ) parser.add_argument("--num_train_epochs", type=int, default=100) parser.add_argument( "--max_train_steps", type=int, default=None, help="Total number of training steps to perform. If provided, overrides num_train_epochs.", ) parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument( "--gradient_checkpointing", action="store_true", help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.", ) parser.add_argument( "--learning_rate", type=float, default=1e-4, help="Initial learning rate (after the potential warmup period) to use.", ) parser.add_argument( "--scale_lr", action="store_true", default=False, help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.", ) parser.add_argument( "--lr_scheduler", type=str, default="constant", help=( 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",' ' "constant", "constant_with_warmup"]' ), ) parser.add_argument( "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler." ) parser.add_argument( "--snr_gamma", type=float, default=None, help="SNR weighting gamma to be used if rebalancing the loss. Recommended value is 5.0. " "More details here: https://arxiv.org/abs/2303.09556.", ) parser.add_argument( "--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes." ) parser.add_argument( "--allow_tf32", action="store_true", help=( "Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see" " https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices" ), ) parser.add_argument( "--dataloader_num_workers", type=int, default=0, help=( "Number of subprocesses to use for data loading. 0 means that the data will be loaded in the main process." ), ) parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.") parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.") parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.") parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer") parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.") parser.add_argument( "--prediction_type", type=str, default=None, help="The prediction_type that shall be used for training. Choose between 'epsilon' or 'v_prediction' or leave `None`. If left to `None` the default prediction type of the scheduler: `noise_scheduler.config.prediction_type` is chosen.", ) parser.add_argument( "--hub_model_id", type=str, default=None, help="The name of the repository to keep in sync with the local `output_dir`.", ) parser.add_argument( "--logging_dir", type=str, default="logs", help=( "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to" " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***." ), ) parser.add_argument( "--mixed_precision", type=str, default=None, choices=["no", "fp16", "bf16"], help=( "Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >=" " 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the" " flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config." ), ) parser.add_argument( "--report_to", type=str, default="tensorboard", help=( 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`' ' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.' ), ) parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") parser.add_argument( "--checkpointing_steps", type=int, default=500, help=( "Save a checkpoint of the training state every X updates. These checkpoints are only suitable for resuming" " training using `--resume_from_checkpoint`." ), ) parser.add_argument( "--checkpoints_total_limit", type=int, default=None, help=("Max number of checkpoints to store."), ) parser.add_argument( "--resume_from_checkpoint", type=str, default=None, help=( "Whether training should be resumed from a previous checkpoint. Use a path saved by" ' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.' ), ) parser.add_argument( "--enable_xformers_memory_efficient_attention", action="store_true", help="Whether or not to use xformers." ) parser.add_argument("--noise_offset", type=float, default=0, help="The scale of noise offset.") parser.add_argument( "--rank", type=int, default=4, help=("The dimension of the LoRA update matrices."), ) args = parser.parse_args() env_local_rank = int(os.environ.get("LOCAL_RANK", -1)) if env_local_rank != -1 and env_local_rank != args.local_rank: args.local_rank = env_local_rank # Sanity checks if args.dataset_name is None and args.train_data_dir is None: raise ValueError("Need either a dataset name or a training folder.") return args DATASET_NAME_MAPPING = { "lambdalabs/pokemon-blip-captions": ("image", "text"), } def main(): args = parse_args() if args.report_to == "wandb" and args.hub_token is not None: raise ValueError( "You cannot use both --report_to=wandb and --hub_token due to a security risk of exposing your token." " Please use `huggingface-cli login` to authenticate with the Hub." ) logging_dir = Path(args.output_dir, args.logging_dir) accelerator_project_config = ProjectConfiguration(project_dir=args.output_dir, logging_dir=logging_dir) accelerator = Accelerator( gradient_accumulation_steps=args.gradient_accumulation_steps, mixed_precision=args.mixed_precision, log_with=args.report_to, project_config=accelerator_project_config, ) if args.report_to == "wandb": if not is_wandb_available(): raise ImportError("Make sure to install wandb if you want to use it for logging during training.") import wandb # Make one log on every process with the configuration for debugging. logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) logger.info(accelerator.state, main_process_only=False) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_warning() diffusers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() diffusers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Handle the repository creation if accelerator.is_main_process: if args.output_dir is not None: os.makedirs(args.output_dir, exist_ok=True) if args.push_to_hub: repo_id = create_repo( repo_id=args.hub_model_id or Path(args.output_dir).name, exist_ok=True, token=args.hub_token ).repo_id # Load scheduler, tokenizer and models. noise_scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler") tokenizer = CLIPTokenizer.from_pretrained( args.pretrained_model_name_or_path, subfolder="tokenizer", revision=args.revision ) text_encoder = CLIPTextModel.from_pretrained( args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision ) vae = AutoencoderKL.from_pretrained( args.pretrained_model_name_or_path, subfolder="vae", revision=args.revision, variant=args.variant ) unet = UNet2DConditionModel.from_pretrained( args.pretrained_model_name_or_path, subfolder="unet", revision=args.revision, variant=args.variant ) # freeze parameters of models to save more memory unet.requires_grad_(False) vae.requires_grad_(False) text_encoder.requires_grad_(False) # For mixed precision training we cast all non-trainable weights (vae, non-lora text_encoder and non-lora unet) to half-precision # as these weights are only used for inference, keeping weights in full precision is not required. weight_dtype = torch.float32 if accelerator.mixed_precision == "fp16": weight_dtype = torch.float16 elif accelerator.mixed_precision == "bf16": weight_dtype = torch.bfloat16 # Freeze the unet parameters before adding adapters for param in unet.parameters(): param.requires_grad_(False) unet_lora_config = LoraConfig( r=args.rank, lora_alpha=args.rank, init_lora_weights="gaussian", target_modules=["to_k", "to_q", "to_v", "to_out.0"], ) # Move unet, vae and text_encoder to device and cast to weight_dtype unet.to(accelerator.device, dtype=weight_dtype) vae.to(accelerator.device, dtype=weight_dtype) text_encoder.to(accelerator.device, dtype=weight_dtype) # Add adapter and make sure the trainable params are in float32. unet.add_adapter(unet_lora_config) if args.mixed_precision == "fp16": # only upcast trainable parameters (LoRA) into fp32 cast_training_params(unet, dtype=torch.float32) if args.enable_xformers_memory_efficient_attention: if is_xformers_available(): import xformers xformers_version = version.parse(xformers.__version__) if xformers_version == version.parse("0.0.16"): logger.warning( "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details." ) unet.enable_xformers_memory_efficient_attention() else: raise ValueError("xformers is not available. Make sure it is installed correctly") lora_layers = filter(lambda p: p.requires_grad, unet.parameters()) if args.gradient_checkpointing: unet.enable_gradient_checkpointing() # Enable TF32 for faster training on Ampere GPUs, # cf https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices if args.allow_tf32: torch.backends.cuda.matmul.allow_tf32 = True if args.scale_lr: args.learning_rate = ( args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes ) # Initialize the optimizer if args.use_8bit_adam: try: import bitsandbytes as bnb except ImportError: raise ImportError( "Please install bitsandbytes to use 8-bit Adam. You can do so by running `pip install bitsandbytes`" ) optimizer_cls = bnb.optim.AdamW8bit else: optimizer_cls = torch.optim.AdamW optimizer = optimizer_cls( lora_layers, lr=args.learning_rate, betas=(args.adam_beta1, args.adam_beta2), weight_decay=args.adam_weight_decay, eps=args.adam_epsilon, ) # Get the datasets: you can either provide your own training and evaluation files (see below) # or specify a Dataset from the hub (the dataset will be downloaded automatically from the datasets Hub). # In distributed training, the load_dataset function guarantees that only one local process can concurrently # download the dataset. if args.dataset_name is not None: # Downloading and loading a dataset from the hub. dataset = load_dataset( args.dataset_name, args.dataset_config_name, cache_dir=args.cache_dir, data_dir=args.train_data_dir, ) else: data_files = {} if args.train_data_dir is not None: data_files["train"] = os.path.join(args.train_data_dir, "**") dataset = load_dataset( "imagefolder", data_files=data_files, cache_dir=args.cache_dir, ) # See more about loading custom images at # https://huggingface.co/docs/datasets/v2.4.0/en/image_load#imagefolder # Preprocessing the datasets. # We need to tokenize inputs and targets. column_names = dataset["train"].column_names # 6. Get the column names for input/target. dataset_columns = DATASET_NAME_MAPPING.get(args.dataset_name, None) if args.image_column is None: image_column = dataset_columns[0] if dataset_columns is not None else column_names[0] else: image_column = args.image_column if image_column not in column_names: raise ValueError( f"--image_column' value '{args.image_column}' needs to be one of: {', '.join(column_names)}" ) if args.caption_column is None: caption_column = dataset_columns[1] if dataset_columns is not None else column_names[1] else: caption_column = args.caption_column if caption_column not in column_names: raise ValueError( f"--caption_column' value '{args.caption_column}' needs to be one of: {', '.join(column_names)}" ) # Preprocessing the datasets. # We need to tokenize input captions and transform the images. def tokenize_captions(examples, is_train=True): captions = [] for caption in examples[caption_column]: if isinstance(caption, str): captions.append(caption) elif isinstance(caption, (list, np.ndarray)): # take a random caption if there are multiple captions.append(random.choice(caption) if is_train else caption[0]) else: raise ValueError( f"Caption column `{caption_column}` should contain either strings or lists of strings." ) inputs = tokenizer( captions, max_length=tokenizer.model_max_length, padding="max_length", truncation=True, return_tensors="pt" ) return inputs.input_ids # Preprocessing the datasets. train_transforms = transforms.Compose( [ transforms.Resize(args.resolution, interpolation=transforms.InterpolationMode.BILINEAR), transforms.CenterCrop(args.resolution) if args.center_crop else transforms.RandomCrop(args.resolution), transforms.RandomHorizontalFlip() if args.random_flip else transforms.Lambda(lambda x: x), transforms.ToTensor(), transforms.Normalize([0.5], [0.5]), ] ) def unwrap_model(model): model = accelerator.unwrap_model(model) model = model._orig_mod if is_compiled_module(model) else model return model def preprocess_train(examples): images = [image.convert("RGB") for image in examples[image_column]] examples["pixel_values"] = [train_transforms(image) for image in images] examples["input_ids"] = tokenize_captions(examples) return examples with accelerator.main_process_first(): if args.max_train_samples is not None: dataset["train"] = dataset["train"].shuffle(seed=args.seed).select(range(args.max_train_samples)) # Set the training transforms train_dataset = dataset["train"].with_transform(preprocess_train) def collate_fn(examples): pixel_values = torch.stack([example["pixel_values"] for example in examples]) pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float() input_ids = torch.stack([example["input_ids"] for example in examples]) return {"pixel_values": pixel_values, "input_ids": input_ids} # DataLoaders creation: train_dataloader = torch.utils.data.DataLoader( train_dataset, shuffle=True, collate_fn=collate_fn, batch_size=args.train_batch_size, num_workers=args.dataloader_num_workers, ) # Scheduler and math around the number of training steps. overrode_max_train_steps = False num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if args.max_train_steps is None: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch overrode_max_train_steps = True lr_scheduler = get_scheduler( args.lr_scheduler, optimizer=optimizer, num_warmup_steps=args.lr_warmup_steps * accelerator.num_processes, num_training_steps=args.max_train_steps * accelerator.num_processes, ) # Prepare everything with our `accelerator`. unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare( unet, optimizer, train_dataloader, lr_scheduler ) # We need to recalculate our total training steps as the size of the training dataloader may have changed. num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if overrode_max_train_steps: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch # Afterwards we recalculate our number of training epochs args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) # We need to initialize the trackers we use, and also store our configuration. # The trackers initializes automatically on the main process. if accelerator.is_main_process: accelerator.init_trackers("text2image-fine-tune", config=vars(args)) # Train! total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps logger.info("***** Running training *****") logger.info(f" Num examples = {len(train_dataset)}") logger.info(f" Num Epochs = {args.num_train_epochs}") logger.info(f" Instantaneous batch size per device = {args.train_batch_size}") logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") logger.info(f" Total optimization steps = {args.max_train_steps}") global_step = 0 first_epoch = 0 # Potentially load in the weights and states from a previous save if args.resume_from_checkpoint: if args.resume_from_checkpoint != "latest": path = os.path.basename(args.resume_from_checkpoint) else: # Get the most recent checkpoint dirs = os.listdir(args.output_dir) dirs = [d for d in dirs if d.startswith("checkpoint")] dirs = sorted(dirs, key=lambda x: int(x.split("-")[1])) path = dirs[-1] if len(dirs) > 0 else None if path is None: accelerator.print( f"Checkpoint '{args.resume_from_checkpoint}' does not exist. Starting a new training run." ) args.resume_from_checkpoint = None initial_global_step = 0 else: accelerator.print(f"Resuming from checkpoint {path}") accelerator.load_state(os.path.join(args.output_dir, path)) global_step = int(path.split("-")[1]) initial_global_step = global_step first_epoch = global_step // num_update_steps_per_epoch else: initial_global_step = 0 progress_bar = tqdm( range(0, args.max_train_steps), initial=initial_global_step, desc="Steps", # Only show the progress bar once on each machine. disable=not accelerator.is_local_main_process, ) for epoch in range(first_epoch, args.num_train_epochs): unet.train() train_loss = 0.0 for step, batch in enumerate(train_dataloader): with accelerator.accumulate(unet): # Convert images to latent space latents = vae.encode(batch["pixel_values"].to(dtype=weight_dtype)).latent_dist.sample() latents = latents * vae.config.scaling_factor # Sample noise that we'll add to the latents noise = torch.randn_like(latents) if args.noise_offset: # https://www.crosslabs.org//blog/diffusion-with-offset-noise noise += args.noise_offset * torch.randn( (latents.shape[0], latents.shape[1], 1, 1), device=latents.device ) bsz = latents.shape[0] # Sample a random timestep for each image timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (bsz,), device=latents.device) timesteps = timesteps.long() # Add noise to the latents according to the noise magnitude at each timestep # (this is the forward diffusion process) noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps) # Get the text embedding for conditioning encoder_hidden_states = text_encoder(batch["input_ids"], return_dict=False)[0] # Get the target for loss depending on the prediction type if args.prediction_type is not None: # set prediction_type of scheduler if defined noise_scheduler.register_to_config(prediction_type=args.prediction_type) if noise_scheduler.config.prediction_type == "epsilon": target = noise elif noise_scheduler.config.prediction_type == "v_prediction": target = noise_scheduler.get_velocity(latents, noise, timesteps) else: raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}") # Predict the noise residual and compute loss model_pred = unet(noisy_latents, timesteps, encoder_hidden_states, return_dict=False)[0] if args.snr_gamma is None: loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean") else: # Compute loss-weights as per Section 3.4 of https://arxiv.org/abs/2303.09556. # Since we predict the noise instead of x_0, the original formulation is slightly changed. # This is discussed in Section 4.2 of the same paper. snr = compute_snr(noise_scheduler, timesteps) mse_loss_weights = torch.stack([snr, args.snr_gamma * torch.ones_like(timesteps)], dim=1).min( dim=1 )[0] if noise_scheduler.config.prediction_type == "epsilon": mse_loss_weights = mse_loss_weights / snr elif noise_scheduler.config.prediction_type == "v_prediction": mse_loss_weights = mse_loss_weights / (snr + 1) loss = F.mse_loss(model_pred.float(), target.float(), reduction="none") loss = loss.mean(dim=list(range(1, len(loss.shape)))) * mse_loss_weights loss = loss.mean() # Gather the losses across all processes for logging (if we use distributed training). avg_loss = accelerator.gather(loss.repeat(args.train_batch_size)).mean() train_loss += avg_loss.item() / args.gradient_accumulation_steps # Backpropagate accelerator.backward(loss) if accelerator.sync_gradients: params_to_clip = lora_layers accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm) optimizer.step() lr_scheduler.step() optimizer.zero_grad() # Checks if the accelerator has performed an optimization step behind the scenes if accelerator.sync_gradients: progress_bar.update(1) global_step += 1 accelerator.log({"train_loss": train_loss}, step=global_step) train_loss = 0.0 if global_step % args.checkpointing_steps == 0: if accelerator.is_main_process: # _before_ saving state, check if this save would set us over the `checkpoints_total_limit` if args.checkpoints_total_limit is not None: checkpoints = os.listdir(args.output_dir) checkpoints = [d for d in checkpoints if d.startswith("checkpoint")] checkpoints = sorted(checkpoints, key=lambda x: int(x.split("-")[1])) # before we save the new checkpoint, we need to have at _most_ `checkpoints_total_limit - 1` checkpoints if len(checkpoints) >= args.checkpoints_total_limit: num_to_remove = len(checkpoints) - args.checkpoints_total_limit + 1 removing_checkpoints = checkpoints[0:num_to_remove] logger.info( f"{len(checkpoints)} checkpoints already exist, removing {len(removing_checkpoints)} checkpoints" ) logger.info(f"removing checkpoints: {', '.join(removing_checkpoints)}") for removing_checkpoint in removing_checkpoints: removing_checkpoint = os.path.join(args.output_dir, removing_checkpoint) shutil.rmtree(removing_checkpoint) save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}") accelerator.save_state(save_path) unwrapped_unet = unwrap_model(unet) unet_lora_state_dict = convert_state_dict_to_diffusers( get_peft_model_state_dict(unwrapped_unet) ) StableDiffusionPipeline.save_lora_weights( save_directory=save_path, unet_lora_layers=unet_lora_state_dict, safe_serialization=True, ) logger.info(f"Saved state to {save_path}") logs = {"step_loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]} progress_bar.set_postfix(**logs) if global_step >= args.max_train_steps: break if accelerator.is_main_process: if args.validation_prompt is not None and epoch % args.validation_epochs == 0: logger.info( f"Running validation... \n Generating {args.num_validation_images} images with prompt:" f" {args.validation_prompt}." ) # create pipeline pipeline = DiffusionPipeline.from_pretrained( args.pretrained_model_name_or_path, unet=unwrap_model(unet), revision=args.revision, variant=args.variant, torch_dtype=weight_dtype, ) pipeline = pipeline.to(accelerator.device) pipeline.set_progress_bar_config(disable=True) # run inference generator = torch.Generator(device=accelerator.device) if args.seed is not None: generator = generator.manual_seed(args.seed) images = [] with torch.cuda.amp.autocast(): for _ in range(args.num_validation_images): images.append( pipeline(args.validation_prompt, num_inference_steps=30, generator=generator).images[0] ) for tracker in accelerator.trackers: if tracker.name == "tensorboard": np_images = np.stack([np.asarray(img) for img in images]) tracker.writer.add_images("validation", np_images, epoch, dataformats="NHWC") if tracker.name == "wandb": tracker.log( { "validation": [ wandb.Image(image, caption=f"{i}: {args.validation_prompt}") for i, image in enumerate(images) ] } ) del pipeline torch.cuda.empty_cache() # Save the lora layers accelerator.wait_for_everyone() if accelerator.is_main_process: unet = unet.to(torch.float32) unwrapped_unet = unwrap_model(unet) unet_lora_state_dict = convert_state_dict_to_diffusers(get_peft_model_state_dict(unwrapped_unet)) StableDiffusionPipeline.save_lora_weights( save_directory=args.output_dir, unet_lora_layers=unet_lora_state_dict, safe_serialization=True, ) if args.push_to_hub: save_model_card( repo_id, images=images, base_model=args.pretrained_model_name_or_path, dataset_name=args.dataset_name, repo_folder=args.output_dir, ) upload_folder( repo_id=repo_id, folder_path=args.output_dir, commit_message="End of training", ignore_patterns=["step_*", "epoch_*"], ) # Final inference # Load previous pipeline if args.validation_prompt is not None: pipeline = DiffusionPipeline.from_pretrained( args.pretrained_model_name_or_path, revision=args.revision, variant=args.variant, torch_dtype=weight_dtype, ) pipeline = pipeline.to(accelerator.device) # load attention processors pipeline.load_lora_weights(args.output_dir) # run inference generator = torch.Generator(device=accelerator.device) if args.seed is not None: generator = generator.manual_seed(args.seed) images = [] with torch.cuda.amp.autocast(): for _ in range(args.num_validation_images): images.append( pipeline(args.validation_prompt, num_inference_steps=30, generator=generator).images[0] ) for tracker in accelerator.trackers: if len(images) != 0: if tracker.name == "tensorboard": np_images = np.stack([np.asarray(img) for img in images]) tracker.writer.add_images("test", np_images, epoch, dataformats="NHWC") if tracker.name == "wandb": tracker.log( { "test": [ wandb.Image(image, caption=f"{i}: {args.validation_prompt}") for i, image in enumerate(images) ] } ) accelerator.end_training() if __name__ == "__main__": main()
diffusers/examples/text_to_image/train_text_to_image_lora.py/0
{ "file_path": "diffusers/examples/text_to_image/train_text_to_image_lora.py", "repo_id": "diffusers", "token_count": 18194 }
113
# Würstchen text-to-image fine-tuning ## Running locally with PyTorch Before running the scripts, make sure to install the library's training dependencies: **Important** To make sure you can successfully run the latest versions of the example scripts, we highly recommend **installing from source** and keeping the install up to date. To do this, execute the following steps in a new virtual environment: ```bash git clone https://github.com/huggingface/diffusers cd diffusers pip install . ``` Then cd into the example folder and run ```bash cd examples/wuerstchen/text_to_image pip install -r requirements.txt ``` And initialize an [🤗Accelerate](https://github.com/huggingface/accelerate/) environment with: ```bash accelerate config ``` For this example we want to directly store the trained LoRA embeddings on the Hub, so we need to be logged in and add the `--push_to_hub` flag to the training script. To log in, run: ```bash huggingface-cli login ``` ## Prior training You can fine-tune the Würstchen prior model with the `train_text_to_image_prior.py` script. Note that we currently support `--gradient_checkpointing` for prior model fine-tuning so you can use it for more GPU memory constrained setups. <br> <!-- accelerate_snippet_start --> ```bash export DATASET_NAME="lambdalabs/pokemon-blip-captions" accelerate launch train_text_to_image_prior.py \ --mixed_precision="fp16" \ --dataset_name=$DATASET_NAME \ --resolution=768 \ --train_batch_size=4 \ --gradient_accumulation_steps=4 \ --gradient_checkpointing \ --dataloader_num_workers=4 \ --max_train_steps=15000 \ --learning_rate=1e-05 \ --max_grad_norm=1 \ --checkpoints_total_limit=3 \ --lr_scheduler="constant" --lr_warmup_steps=0 \ --validation_prompts="A robot pokemon, 4k photo" \ --report_to="wandb" \ --push_to_hub \ --output_dir="wuerstchen-prior-pokemon-model" ``` <!-- accelerate_snippet_end --> ## Training with LoRA Low-Rank Adaption of Large Language Models (or LoRA) was first introduced by Microsoft in [LoRA: Low-Rank Adaptation of Large Language Models](https://arxiv.org/abs/2106.09685) by *Edward J. Hu, Yelong Shen, Phillip Wallis, Zeyuan Allen-Zhu, Yuanzhi Li, Shean Wang, Lu Wang, Weizhu Chen*. In a nutshell, LoRA allows adapting pretrained models by adding pairs of rank-decomposition matrices to existing weights and **only** training those newly added weights. This has a couple of advantages: - Previous pretrained weights are kept frozen so that the model is not prone to [catastrophic forgetting](https://www.pnas.org/doi/10.1073/pnas.1611835114). - Rank-decomposition matrices have significantly fewer parameters than original model, which means that trained LoRA weights are easily portable. - LoRA attention layers allow to control to which extent the model is adapted toward new training images via a `scale` parameter. ### Prior Training First, you need to set up your development environment as explained in the [installation](#Running-locally-with-PyTorch) section. Make sure to set the `DATASET_NAME` environment variable. Here, we will use the [Pokemon captions dataset](https://huggingface.co/datasets/lambdalabs/pokemon-blip-captions). ```bash export DATASET_NAME="lambdalabs/pokemon-blip-captions" accelerate launch train_text_to_image_lora_prior.py \ --mixed_precision="fp16" \ --dataset_name=$DATASET_NAME --caption_column="text" \ --resolution=768 \ --train_batch_size=8 \ --num_train_epochs=100 --checkpointing_steps=5000 \ --learning_rate=1e-04 --lr_scheduler="constant" --lr_warmup_steps=0 \ --seed=42 \ --rank=4 \ --validation_prompt="cute dragon creature" \ --report_to="wandb" \ --push_to_hub \ --output_dir="wuerstchen-prior-pokemon-lora" ```
diffusers/examples/wuerstchen/text_to_image/README.md/0
{ "file_path": "diffusers/examples/wuerstchen/text_to_image/README.md", "repo_id": "diffusers", "token_count": 1206 }
114
import argparse import os import torch from diffusers import ( CMStochasticIterativeScheduler, ConsistencyModelPipeline, UNet2DModel, ) TEST_UNET_CONFIG = { "sample_size": 32, "in_channels": 3, "out_channels": 3, "layers_per_block": 2, "num_class_embeds": 1000, "block_out_channels": [32, 64], "attention_head_dim": 8, "down_block_types": [ "ResnetDownsampleBlock2D", "AttnDownBlock2D", ], "up_block_types": [ "AttnUpBlock2D", "ResnetUpsampleBlock2D", ], "resnet_time_scale_shift": "scale_shift", "attn_norm_num_groups": 32, "upsample_type": "resnet", "downsample_type": "resnet", } IMAGENET_64_UNET_CONFIG = { "sample_size": 64, "in_channels": 3, "out_channels": 3, "layers_per_block": 3, "num_class_embeds": 1000, "block_out_channels": [192, 192 * 2, 192 * 3, 192 * 4], "attention_head_dim": 64, "down_block_types": [ "ResnetDownsampleBlock2D", "AttnDownBlock2D", "AttnDownBlock2D", "AttnDownBlock2D", ], "up_block_types": [ "AttnUpBlock2D", "AttnUpBlock2D", "AttnUpBlock2D", "ResnetUpsampleBlock2D", ], "resnet_time_scale_shift": "scale_shift", "attn_norm_num_groups": 32, "upsample_type": "resnet", "downsample_type": "resnet", } LSUN_256_UNET_CONFIG = { "sample_size": 256, "in_channels": 3, "out_channels": 3, "layers_per_block": 2, "num_class_embeds": None, "block_out_channels": [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4], "attention_head_dim": 64, "down_block_types": [ "ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D", "AttnDownBlock2D", "AttnDownBlock2D", "AttnDownBlock2D", ], "up_block_types": [ "AttnUpBlock2D", "AttnUpBlock2D", "AttnUpBlock2D", "ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D", ], "resnet_time_scale_shift": "default", "upsample_type": "resnet", "downsample_type": "resnet", } CD_SCHEDULER_CONFIG = { "num_train_timesteps": 40, "sigma_min": 0.002, "sigma_max": 80.0, } CT_IMAGENET_64_SCHEDULER_CONFIG = { "num_train_timesteps": 201, "sigma_min": 0.002, "sigma_max": 80.0, } CT_LSUN_256_SCHEDULER_CONFIG = { "num_train_timesteps": 151, "sigma_min": 0.002, "sigma_max": 80.0, } def str2bool(v): """ https://stackoverflow.com/questions/15008758/parsing-boolean-values-with-argparse """ if isinstance(v, bool): return v if v.lower() in ("yes", "true", "t", "y", "1"): return True elif v.lower() in ("no", "false", "f", "n", "0"): return False else: raise argparse.ArgumentTypeError("boolean value expected") def convert_resnet(checkpoint, new_checkpoint, old_prefix, new_prefix, has_skip=False): new_checkpoint[f"{new_prefix}.norm1.weight"] = checkpoint[f"{old_prefix}.in_layers.0.weight"] new_checkpoint[f"{new_prefix}.norm1.bias"] = checkpoint[f"{old_prefix}.in_layers.0.bias"] new_checkpoint[f"{new_prefix}.conv1.weight"] = checkpoint[f"{old_prefix}.in_layers.2.weight"] new_checkpoint[f"{new_prefix}.conv1.bias"] = checkpoint[f"{old_prefix}.in_layers.2.bias"] new_checkpoint[f"{new_prefix}.time_emb_proj.weight"] = checkpoint[f"{old_prefix}.emb_layers.1.weight"] new_checkpoint[f"{new_prefix}.time_emb_proj.bias"] = checkpoint[f"{old_prefix}.emb_layers.1.bias"] new_checkpoint[f"{new_prefix}.norm2.weight"] = checkpoint[f"{old_prefix}.out_layers.0.weight"] new_checkpoint[f"{new_prefix}.norm2.bias"] = checkpoint[f"{old_prefix}.out_layers.0.bias"] new_checkpoint[f"{new_prefix}.conv2.weight"] = checkpoint[f"{old_prefix}.out_layers.3.weight"] new_checkpoint[f"{new_prefix}.conv2.bias"] = checkpoint[f"{old_prefix}.out_layers.3.bias"] if has_skip: new_checkpoint[f"{new_prefix}.conv_shortcut.weight"] = checkpoint[f"{old_prefix}.skip_connection.weight"] new_checkpoint[f"{new_prefix}.conv_shortcut.bias"] = checkpoint[f"{old_prefix}.skip_connection.bias"] return new_checkpoint def convert_attention(checkpoint, new_checkpoint, old_prefix, new_prefix, attention_dim=None): weight_q, weight_k, weight_v = checkpoint[f"{old_prefix}.qkv.weight"].chunk(3, dim=0) bias_q, bias_k, bias_v = checkpoint[f"{old_prefix}.qkv.bias"].chunk(3, dim=0) new_checkpoint[f"{new_prefix}.group_norm.weight"] = checkpoint[f"{old_prefix}.norm.weight"] new_checkpoint[f"{new_prefix}.group_norm.bias"] = checkpoint[f"{old_prefix}.norm.bias"] new_checkpoint[f"{new_prefix}.to_q.weight"] = weight_q.squeeze(-1).squeeze(-1) new_checkpoint[f"{new_prefix}.to_q.bias"] = bias_q.squeeze(-1).squeeze(-1) new_checkpoint[f"{new_prefix}.to_k.weight"] = weight_k.squeeze(-1).squeeze(-1) new_checkpoint[f"{new_prefix}.to_k.bias"] = bias_k.squeeze(-1).squeeze(-1) new_checkpoint[f"{new_prefix}.to_v.weight"] = weight_v.squeeze(-1).squeeze(-1) new_checkpoint[f"{new_prefix}.to_v.bias"] = bias_v.squeeze(-1).squeeze(-1) new_checkpoint[f"{new_prefix}.to_out.0.weight"] = ( checkpoint[f"{old_prefix}.proj_out.weight"].squeeze(-1).squeeze(-1) ) new_checkpoint[f"{new_prefix}.to_out.0.bias"] = checkpoint[f"{old_prefix}.proj_out.bias"].squeeze(-1).squeeze(-1) return new_checkpoint def con_pt_to_diffuser(checkpoint_path: str, unet_config): checkpoint = torch.load(checkpoint_path, map_location="cpu") new_checkpoint = {} new_checkpoint["time_embedding.linear_1.weight"] = checkpoint["time_embed.0.weight"] new_checkpoint["time_embedding.linear_1.bias"] = checkpoint["time_embed.0.bias"] new_checkpoint["time_embedding.linear_2.weight"] = checkpoint["time_embed.2.weight"] new_checkpoint["time_embedding.linear_2.bias"] = checkpoint["time_embed.2.bias"] if unet_config["num_class_embeds"] is not None: new_checkpoint["class_embedding.weight"] = checkpoint["label_emb.weight"] new_checkpoint["conv_in.weight"] = checkpoint["input_blocks.0.0.weight"] new_checkpoint["conv_in.bias"] = checkpoint["input_blocks.0.0.bias"] down_block_types = unet_config["down_block_types"] layers_per_block = unet_config["layers_per_block"] attention_head_dim = unet_config["attention_head_dim"] channels_list = unet_config["block_out_channels"] current_layer = 1 prev_channels = channels_list[0] for i, layer_type in enumerate(down_block_types): current_channels = channels_list[i] downsample_block_has_skip = current_channels != prev_channels if layer_type == "ResnetDownsampleBlock2D": for j in range(layers_per_block): new_prefix = f"down_blocks.{i}.resnets.{j}" old_prefix = f"input_blocks.{current_layer}.0" has_skip = True if j == 0 and downsample_block_has_skip else False new_checkpoint = convert_resnet(checkpoint, new_checkpoint, old_prefix, new_prefix, has_skip=has_skip) current_layer += 1 elif layer_type == "AttnDownBlock2D": for j in range(layers_per_block): new_prefix = f"down_blocks.{i}.resnets.{j}" old_prefix = f"input_blocks.{current_layer}.0" has_skip = True if j == 0 and downsample_block_has_skip else False new_checkpoint = convert_resnet(checkpoint, new_checkpoint, old_prefix, new_prefix, has_skip=has_skip) new_prefix = f"down_blocks.{i}.attentions.{j}" old_prefix = f"input_blocks.{current_layer}.1" new_checkpoint = convert_attention( checkpoint, new_checkpoint, old_prefix, new_prefix, attention_head_dim ) current_layer += 1 if i != len(down_block_types) - 1: new_prefix = f"down_blocks.{i}.downsamplers.0" old_prefix = f"input_blocks.{current_layer}.0" new_checkpoint = convert_resnet(checkpoint, new_checkpoint, old_prefix, new_prefix) current_layer += 1 prev_channels = current_channels # hardcoded the mid-block for now new_prefix = "mid_block.resnets.0" old_prefix = "middle_block.0" new_checkpoint = convert_resnet(checkpoint, new_checkpoint, old_prefix, new_prefix) new_prefix = "mid_block.attentions.0" old_prefix = "middle_block.1" new_checkpoint = convert_attention(checkpoint, new_checkpoint, old_prefix, new_prefix, attention_head_dim) new_prefix = "mid_block.resnets.1" old_prefix = "middle_block.2" new_checkpoint = convert_resnet(checkpoint, new_checkpoint, old_prefix, new_prefix) current_layer = 0 up_block_types = unet_config["up_block_types"] for i, layer_type in enumerate(up_block_types): if layer_type == "ResnetUpsampleBlock2D": for j in range(layers_per_block + 1): new_prefix = f"up_blocks.{i}.resnets.{j}" old_prefix = f"output_blocks.{current_layer}.0" new_checkpoint = convert_resnet(checkpoint, new_checkpoint, old_prefix, new_prefix, has_skip=True) current_layer += 1 if i != len(up_block_types) - 1: new_prefix = f"up_blocks.{i}.upsamplers.0" old_prefix = f"output_blocks.{current_layer-1}.1" new_checkpoint = convert_resnet(checkpoint, new_checkpoint, old_prefix, new_prefix) elif layer_type == "AttnUpBlock2D": for j in range(layers_per_block + 1): new_prefix = f"up_blocks.{i}.resnets.{j}" old_prefix = f"output_blocks.{current_layer}.0" new_checkpoint = convert_resnet(checkpoint, new_checkpoint, old_prefix, new_prefix, has_skip=True) new_prefix = f"up_blocks.{i}.attentions.{j}" old_prefix = f"output_blocks.{current_layer}.1" new_checkpoint = convert_attention( checkpoint, new_checkpoint, old_prefix, new_prefix, attention_head_dim ) current_layer += 1 if i != len(up_block_types) - 1: new_prefix = f"up_blocks.{i}.upsamplers.0" old_prefix = f"output_blocks.{current_layer-1}.2" new_checkpoint = convert_resnet(checkpoint, new_checkpoint, old_prefix, new_prefix) new_checkpoint["conv_norm_out.weight"] = checkpoint["out.0.weight"] new_checkpoint["conv_norm_out.bias"] = checkpoint["out.0.bias"] new_checkpoint["conv_out.weight"] = checkpoint["out.2.weight"] new_checkpoint["conv_out.bias"] = checkpoint["out.2.bias"] return new_checkpoint if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--unet_path", default=None, type=str, required=True, help="Path to the unet.pt to convert.") parser.add_argument( "--dump_path", default=None, type=str, required=True, help="Path to output the converted UNet model." ) parser.add_argument("--class_cond", default=True, type=str, help="Whether the model is class-conditional.") args = parser.parse_args() args.class_cond = str2bool(args.class_cond) ckpt_name = os.path.basename(args.unet_path) print(f"Checkpoint: {ckpt_name}") # Get U-Net config if "imagenet64" in ckpt_name: unet_config = IMAGENET_64_UNET_CONFIG elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)): unet_config = LSUN_256_UNET_CONFIG elif "test" in ckpt_name: unet_config = TEST_UNET_CONFIG else: raise ValueError(f"Checkpoint type {ckpt_name} is not currently supported.") if not args.class_cond: unet_config["num_class_embeds"] = None converted_unet_ckpt = con_pt_to_diffuser(args.unet_path, unet_config) image_unet = UNet2DModel(**unet_config) image_unet.load_state_dict(converted_unet_ckpt) # Get scheduler config if "cd" in ckpt_name or "test" in ckpt_name: scheduler_config = CD_SCHEDULER_CONFIG elif "ct" in ckpt_name and "imagenet64" in ckpt_name: scheduler_config = CT_IMAGENET_64_SCHEDULER_CONFIG elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)): scheduler_config = CT_LSUN_256_SCHEDULER_CONFIG else: raise ValueError(f"Checkpoint type {ckpt_name} is not currently supported.") cm_scheduler = CMStochasticIterativeScheduler(**scheduler_config) consistency_model = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler) consistency_model.save_pretrained(args.dump_path)
diffusers/scripts/convert_consistency_to_diffusers.py/0
{ "file_path": "diffusers/scripts/convert_consistency_to_diffusers.py", "repo_id": "diffusers", "token_count": 5773 }
115
import json import os import torch from diffusers import UNet1DModel os.makedirs("hub/hopper-medium-v2/unet/hor32", exist_ok=True) os.makedirs("hub/hopper-medium-v2/unet/hor128", exist_ok=True) os.makedirs("hub/hopper-medium-v2/value_function", exist_ok=True) def unet(hor): if hor == 128: down_block_types = ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D") block_out_channels = (32, 128, 256) up_block_types = ("UpResnetBlock1D", "UpResnetBlock1D") elif hor == 32: down_block_types = ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D") block_out_channels = (32, 64, 128, 256) up_block_types = ("UpResnetBlock1D", "UpResnetBlock1D", "UpResnetBlock1D") model = torch.load(f"/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch") state_dict = model.state_dict() config = { "down_block_types": down_block_types, "block_out_channels": block_out_channels, "up_block_types": up_block_types, "layers_per_block": 1, "use_timestep_embedding": True, "out_block_type": "OutConv1DBlock", "norm_num_groups": 8, "downsample_each_block": False, "in_channels": 14, "out_channels": 14, "extra_in_channels": 0, "time_embedding_type": "positional", "flip_sin_to_cos": False, "freq_shift": 1, "sample_size": 65536, "mid_block_type": "MidResTemporalBlock1D", "act_fn": "mish", } hf_value_function = UNet1DModel(**config) print(f"length of state dict: {len(state_dict.keys())}") print(f"length of value function dict: {len(hf_value_function.state_dict().keys())}") mapping = dict(zip(model.state_dict().keys(), hf_value_function.state_dict().keys())) for k, v in mapping.items(): state_dict[v] = state_dict.pop(k) hf_value_function.load_state_dict(state_dict) torch.save(hf_value_function.state_dict(), f"hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin") with open(f"hub/hopper-medium-v2/unet/hor{hor}/config.json", "w") as f: json.dump(config, f) def value_function(): config = { "in_channels": 14, "down_block_types": ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D"), "up_block_types": (), "out_block_type": "ValueFunction", "mid_block_type": "ValueFunctionMidBlock1D", "block_out_channels": (32, 64, 128, 256), "layers_per_block": 1, "downsample_each_block": True, "sample_size": 65536, "out_channels": 14, "extra_in_channels": 0, "time_embedding_type": "positional", "use_timestep_embedding": True, "flip_sin_to_cos": False, "freq_shift": 1, "norm_num_groups": 8, "act_fn": "mish", } model = torch.load("/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch") state_dict = model hf_value_function = UNet1DModel(**config) print(f"length of state dict: {len(state_dict.keys())}") print(f"length of value function dict: {len(hf_value_function.state_dict().keys())}") mapping = dict(zip(state_dict.keys(), hf_value_function.state_dict().keys())) for k, v in mapping.items(): state_dict[v] = state_dict.pop(k) hf_value_function.load_state_dict(state_dict) torch.save(hf_value_function.state_dict(), "hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin") with open("hub/hopper-medium-v2/value_function/config.json", "w") as f: json.dump(config, f) if __name__ == "__main__": unet(32) # unet(128) value_function()
diffusers/scripts/convert_models_diffuser_to_diffusers.py/0
{ "file_path": "diffusers/scripts/convert_models_diffuser_to_diffusers.py", "repo_id": "diffusers", "token_count": 1700 }
116
import argparse import sys import tensorrt as trt def convert_models(onnx_path: str, num_controlnet: int, output_path: str, fp16: bool = False, sd_xl: bool = False): """ Function to convert models in stable diffusion controlnet pipeline into TensorRT format Example: python convert_stable_diffusion_controlnet_to_tensorrt.py --onnx_path path-to-models-stable_diffusion/RevAnimated-v1-2-2/unet/model.onnx --output_path path-to-models-stable_diffusion/RevAnimated-v1-2-2/unet/model.engine --fp16 --num_controlnet 2 Example for SD XL: python convert_stable_diffusion_controlnet_to_tensorrt.py --onnx_path path-to-models-stable_diffusion/stable-diffusion-xl-base-1.0/unet/model.onnx --output_path path-to-models-stable_diffusion/stable-diffusion-xl-base-1.0/unet/model.engine --fp16 --num_controlnet 1 --sd_xl Returns: unet/model.engine run test script in diffusers/examples/community python test_onnx_controlnet.py --sd_model danbrown/RevAnimated-v1-2-2 --onnx_model_dir path-to-models-stable_diffusion/RevAnimated-v1-2-2 --unet_engine_path path-to-models-stable_diffusion/stable-diffusion-xl-base-1.0/unet/model.engine --qr_img_path path-to-qr-code-image """ # UNET if sd_xl: batch_size = 1 unet_in_channels = 4 unet_sample_size = 64 num_tokens = 77 text_hidden_size = 2048 img_size = 512 text_embeds_shape = (2 * batch_size, 1280) time_ids_shape = (2 * batch_size, 6) else: batch_size = 1 unet_in_channels = 4 unet_sample_size = 64 num_tokens = 77 text_hidden_size = 768 img_size = 512 batch_size = 1 latents_shape = (2 * batch_size, unet_in_channels, unet_sample_size, unet_sample_size) embed_shape = (2 * batch_size, num_tokens, text_hidden_size) controlnet_conds_shape = (num_controlnet, 2 * batch_size, 3, img_size, img_size) TRT_LOGGER = trt.Logger(trt.Logger.VERBOSE) TRT_BUILDER = trt.Builder(TRT_LOGGER) TRT_RUNTIME = trt.Runtime(TRT_LOGGER) network = TRT_BUILDER.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) onnx_parser = trt.OnnxParser(network, TRT_LOGGER) parse_success = onnx_parser.parse_from_file(onnx_path) for idx in range(onnx_parser.num_errors): print(onnx_parser.get_error(idx)) if not parse_success: sys.exit("ONNX model parsing failed") print("Load Onnx model done") profile = TRT_BUILDER.create_optimization_profile() profile.set_shape("sample", latents_shape, latents_shape, latents_shape) profile.set_shape("encoder_hidden_states", embed_shape, embed_shape, embed_shape) profile.set_shape("controlnet_conds", controlnet_conds_shape, controlnet_conds_shape, controlnet_conds_shape) if sd_xl: profile.set_shape("text_embeds", text_embeds_shape, text_embeds_shape, text_embeds_shape) profile.set_shape("time_ids", time_ids_shape, time_ids_shape, time_ids_shape) config = TRT_BUILDER.create_builder_config() config.add_optimization_profile(profile) config.set_preview_feature(trt.PreviewFeature.DISABLE_EXTERNAL_TACTIC_SOURCES_FOR_CORE_0805, True) if fp16: config.set_flag(trt.BuilderFlag.FP16) plan = TRT_BUILDER.build_serialized_network(network, config) if plan is None: sys.exit("Failed building engine") print("Succeeded building engine") engine = TRT_RUNTIME.deserialize_cuda_engine(plan) ## save TRT engine with open(output_path, "wb") as f: f.write(engine.serialize()) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--sd_xl", action="store_true", default=False, help="SD XL pipeline") parser.add_argument( "--onnx_path", type=str, required=True, help="Path to the onnx checkpoint to convert", ) parser.add_argument("--num_controlnet", type=int) parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.") parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode") args = parser.parse_args() convert_models(args.onnx_path, args.num_controlnet, args.output_path, args.fp16, args.sd_xl)
diffusers/scripts/convert_stable_diffusion_controlnet_to_tensorrt.py/0
{ "file_path": "diffusers/scripts/convert_stable_diffusion_controlnet_to_tensorrt.py", "repo_id": "diffusers", "token_count": 1860 }
117
#!/usr/bin/env python # Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from argparse import ArgumentParser from .env import EnvironmentCommand from .fp16_safetensors import FP16SafetensorsCommand def main(): parser = ArgumentParser("Diffusers CLI tool", usage="diffusers-cli <command> [<args>]") commands_parser = parser.add_subparsers(help="diffusers-cli command helpers") # Register commands EnvironmentCommand.register_subcommand(commands_parser) FP16SafetensorsCommand.register_subcommand(commands_parser) # Let's go args = parser.parse_args() if not hasattr(args, "func"): parser.print_help() exit(1) # Run service = args.func(args) service.run() if __name__ == "__main__": main()
diffusers/src/diffusers/commands/diffusers_cli.py/0
{ "file_path": "diffusers/src/diffusers/commands/diffusers_cli.py", "repo_id": "diffusers", "token_count": 411 }
118
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from ..utils import logging logger = logging.get_logger(__name__) def _maybe_map_sgm_blocks_to_diffusers(state_dict, unet_config, delimiter="_", block_slice_pos=5): # 1. get all state_dict_keys all_keys = list(state_dict.keys()) sgm_patterns = ["input_blocks", "middle_block", "output_blocks"] # 2. check if needs remapping, if not return original dict is_in_sgm_format = False for key in all_keys: if any(p in key for p in sgm_patterns): is_in_sgm_format = True break if not is_in_sgm_format: return state_dict # 3. Else remap from SGM patterns new_state_dict = {} inner_block_map = ["resnets", "attentions", "upsamplers"] # Retrieves # of down, mid and up blocks input_block_ids, middle_block_ids, output_block_ids = set(), set(), set() for layer in all_keys: if "text" in layer: new_state_dict[layer] = state_dict.pop(layer) else: layer_id = int(layer.split(delimiter)[:block_slice_pos][-1]) if sgm_patterns[0] in layer: input_block_ids.add(layer_id) elif sgm_patterns[1] in layer: middle_block_ids.add(layer_id) elif sgm_patterns[2] in layer: output_block_ids.add(layer_id) else: raise ValueError(f"Checkpoint not supported because layer {layer} not supported.") input_blocks = { layer_id: [key for key in state_dict if f"input_blocks{delimiter}{layer_id}" in key] for layer_id in input_block_ids } middle_blocks = { layer_id: [key for key in state_dict if f"middle_block{delimiter}{layer_id}" in key] for layer_id in middle_block_ids } output_blocks = { layer_id: [key for key in state_dict if f"output_blocks{delimiter}{layer_id}" in key] for layer_id in output_block_ids } # Rename keys accordingly for i in input_block_ids: block_id = (i - 1) // (unet_config.layers_per_block + 1) layer_in_block_id = (i - 1) % (unet_config.layers_per_block + 1) for key in input_blocks[i]: inner_block_id = int(key.split(delimiter)[block_slice_pos]) inner_block_key = inner_block_map[inner_block_id] if "op" not in key else "downsamplers" inner_layers_in_block = str(layer_in_block_id) if "op" not in key else "0" new_key = delimiter.join( key.split(delimiter)[: block_slice_pos - 1] + [str(block_id), inner_block_key, inner_layers_in_block] + key.split(delimiter)[block_slice_pos + 1 :] ) new_state_dict[new_key] = state_dict.pop(key) for i in middle_block_ids: key_part = None if i == 0: key_part = [inner_block_map[0], "0"] elif i == 1: key_part = [inner_block_map[1], "0"] elif i == 2: key_part = [inner_block_map[0], "1"] else: raise ValueError(f"Invalid middle block id {i}.") for key in middle_blocks[i]: new_key = delimiter.join( key.split(delimiter)[: block_slice_pos - 1] + key_part + key.split(delimiter)[block_slice_pos:] ) new_state_dict[new_key] = state_dict.pop(key) for i in output_block_ids: block_id = i // (unet_config.layers_per_block + 1) layer_in_block_id = i % (unet_config.layers_per_block + 1) for key in output_blocks[i]: inner_block_id = int(key.split(delimiter)[block_slice_pos]) inner_block_key = inner_block_map[inner_block_id] inner_layers_in_block = str(layer_in_block_id) if inner_block_id < 2 else "0" new_key = delimiter.join( key.split(delimiter)[: block_slice_pos - 1] + [str(block_id), inner_block_key, inner_layers_in_block] + key.split(delimiter)[block_slice_pos + 1 :] ) new_state_dict[new_key] = state_dict.pop(key) if len(state_dict) > 0: raise ValueError("At this point all state dict entries have to be converted.") return new_state_dict def _convert_kohya_lora_to_diffusers(state_dict, unet_name="unet", text_encoder_name="text_encoder"): unet_state_dict = {} te_state_dict = {} te2_state_dict = {} network_alphas = {} # every down weight has a corresponding up weight and potentially an alpha weight lora_keys = [k for k in state_dict.keys() if k.endswith("lora_down.weight")] for key in lora_keys: lora_name = key.split(".")[0] lora_name_up = lora_name + ".lora_up.weight" lora_name_alpha = lora_name + ".alpha" if lora_name.startswith("lora_unet_"): diffusers_name = key.replace("lora_unet_", "").replace("_", ".") if "input.blocks" in diffusers_name: diffusers_name = diffusers_name.replace("input.blocks", "down_blocks") else: diffusers_name = diffusers_name.replace("down.blocks", "down_blocks") if "middle.block" in diffusers_name: diffusers_name = diffusers_name.replace("middle.block", "mid_block") else: diffusers_name = diffusers_name.replace("mid.block", "mid_block") if "output.blocks" in diffusers_name: diffusers_name = diffusers_name.replace("output.blocks", "up_blocks") else: diffusers_name = diffusers_name.replace("up.blocks", "up_blocks") diffusers_name = diffusers_name.replace("transformer.blocks", "transformer_blocks") diffusers_name = diffusers_name.replace("to.q.lora", "to_q_lora") diffusers_name = diffusers_name.replace("to.k.lora", "to_k_lora") diffusers_name = diffusers_name.replace("to.v.lora", "to_v_lora") diffusers_name = diffusers_name.replace("to.out.0.lora", "to_out_lora") diffusers_name = diffusers_name.replace("proj.in", "proj_in") diffusers_name = diffusers_name.replace("proj.out", "proj_out") diffusers_name = diffusers_name.replace("emb.layers", "time_emb_proj") # SDXL specificity. if "emb" in diffusers_name and "time.emb.proj" not in diffusers_name: pattern = r"\.\d+(?=\D*$)" diffusers_name = re.sub(pattern, "", diffusers_name, count=1) if ".in." in diffusers_name: diffusers_name = diffusers_name.replace("in.layers.2", "conv1") if ".out." in diffusers_name: diffusers_name = diffusers_name.replace("out.layers.3", "conv2") if "downsamplers" in diffusers_name or "upsamplers" in diffusers_name: diffusers_name = diffusers_name.replace("op", "conv") if "skip" in diffusers_name: diffusers_name = diffusers_name.replace("skip.connection", "conv_shortcut") # LyCORIS specificity. if "time.emb.proj" in diffusers_name: diffusers_name = diffusers_name.replace("time.emb.proj", "time_emb_proj") if "conv.shortcut" in diffusers_name: diffusers_name = diffusers_name.replace("conv.shortcut", "conv_shortcut") # General coverage. if "transformer_blocks" in diffusers_name: if "attn1" in diffusers_name or "attn2" in diffusers_name: diffusers_name = diffusers_name.replace("attn1", "attn1.processor") diffusers_name = diffusers_name.replace("attn2", "attn2.processor") unet_state_dict[diffusers_name] = state_dict.pop(key) unet_state_dict[diffusers_name.replace(".down.", ".up.")] = state_dict.pop(lora_name_up) elif "ff" in diffusers_name: unet_state_dict[diffusers_name] = state_dict.pop(key) unet_state_dict[diffusers_name.replace(".down.", ".up.")] = state_dict.pop(lora_name_up) elif any(key in diffusers_name for key in ("proj_in", "proj_out")): unet_state_dict[diffusers_name] = state_dict.pop(key) unet_state_dict[diffusers_name.replace(".down.", ".up.")] = state_dict.pop(lora_name_up) else: unet_state_dict[diffusers_name] = state_dict.pop(key) unet_state_dict[diffusers_name.replace(".down.", ".up.")] = state_dict.pop(lora_name_up) elif lora_name.startswith(("lora_te_", "lora_te1_", "lora_te2_")): if lora_name.startswith(("lora_te_", "lora_te1_")): key_to_replace = "lora_te_" if lora_name.startswith("lora_te_") else "lora_te1_" else: key_to_replace = "lora_te2_" diffusers_name = key.replace(key_to_replace, "").replace("_", ".") diffusers_name = diffusers_name.replace("text.model", "text_model") diffusers_name = diffusers_name.replace("self.attn", "self_attn") diffusers_name = diffusers_name.replace("q.proj.lora", "to_q_lora") diffusers_name = diffusers_name.replace("k.proj.lora", "to_k_lora") diffusers_name = diffusers_name.replace("v.proj.lora", "to_v_lora") diffusers_name = diffusers_name.replace("out.proj.lora", "to_out_lora") if "self_attn" in diffusers_name: if lora_name.startswith(("lora_te_", "lora_te1_")): te_state_dict[diffusers_name] = state_dict.pop(key) te_state_dict[diffusers_name.replace(".down.", ".up.")] = state_dict.pop(lora_name_up) else: te2_state_dict[diffusers_name] = state_dict.pop(key) te2_state_dict[diffusers_name.replace(".down.", ".up.")] = state_dict.pop(lora_name_up) elif "mlp" in diffusers_name: # Be aware that this is the new diffusers convention and the rest of the code might # not utilize it yet. diffusers_name = diffusers_name.replace(".lora.", ".lora_linear_layer.") if lora_name.startswith(("lora_te_", "lora_te1_")): te_state_dict[diffusers_name] = state_dict.pop(key) te_state_dict[diffusers_name.replace(".down.", ".up.")] = state_dict.pop(lora_name_up) else: te2_state_dict[diffusers_name] = state_dict.pop(key) te2_state_dict[diffusers_name.replace(".down.", ".up.")] = state_dict.pop(lora_name_up) # Rename the alphas so that they can be mapped appropriately. if lora_name_alpha in state_dict: alpha = state_dict.pop(lora_name_alpha).item() if lora_name_alpha.startswith("lora_unet_"): prefix = "unet." elif lora_name_alpha.startswith(("lora_te_", "lora_te1_")): prefix = "text_encoder." else: prefix = "text_encoder_2." new_name = prefix + diffusers_name.split(".lora.")[0] + ".alpha" network_alphas.update({new_name: alpha}) if len(state_dict) > 0: raise ValueError(f"The following keys have not been correctly be renamed: \n\n {', '.join(state_dict.keys())}") logger.info("Kohya-style checkpoint detected.") unet_state_dict = {f"{unet_name}.{module_name}": params for module_name, params in unet_state_dict.items()} te_state_dict = {f"{text_encoder_name}.{module_name}": params for module_name, params in te_state_dict.items()} te2_state_dict = ( {f"text_encoder_2.{module_name}": params for module_name, params in te2_state_dict.items()} if len(te2_state_dict) > 0 else None ) if te2_state_dict is not None: te_state_dict.update(te2_state_dict) new_state_dict = {**unet_state_dict, **te_state_dict} return new_state_dict, network_alphas
diffusers/src/diffusers/loaders/lora_conversion_utils.py/0
{ "file_path": "diffusers/src/diffusers/loaders/lora_conversion_utils.py", "repo_id": "diffusers", "token_count": 5925 }
119
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Dict, Optional, Tuple, Union import torch import torch.nn as nn from ...configuration_utils import ConfigMixin, register_to_config from ...loaders import FromOriginalVAEMixin from ...utils.accelerate_utils import apply_forward_hook from ..attention_processor import ( ADDED_KV_ATTENTION_PROCESSORS, CROSS_ATTENTION_PROCESSORS, Attention, AttentionProcessor, AttnAddedKVProcessor, AttnProcessor, ) from ..modeling_outputs import AutoencoderKLOutput from ..modeling_utils import ModelMixin from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder class AutoencoderKL(ModelMixin, ConfigMixin, FromOriginalVAEMixin): r""" A VAE model with KL loss for encoding images into latents and decoding latent representations into images. This model inherits from [`ModelMixin`]. Check the superclass documentation for it's generic methods implemented for all models (such as downloading or saving). Parameters: in_channels (int, *optional*, defaults to 3): Number of channels in the input image. out_channels (int, *optional*, defaults to 3): Number of channels in the output. down_block_types (`Tuple[str]`, *optional*, defaults to `("DownEncoderBlock2D",)`): Tuple of downsample block types. up_block_types (`Tuple[str]`, *optional*, defaults to `("UpDecoderBlock2D",)`): Tuple of upsample block types. block_out_channels (`Tuple[int]`, *optional*, defaults to `(64,)`): Tuple of block output channels. act_fn (`str`, *optional*, defaults to `"silu"`): The activation function to use. latent_channels (`int`, *optional*, defaults to 4): Number of channels in the latent space. sample_size (`int`, *optional*, defaults to `32`): Sample input size. scaling_factor (`float`, *optional*, defaults to 0.18215): The component-wise standard deviation of the trained latent space computed using the first batch of the training set. This is used to scale the latent space to have unit variance when training the diffusion model. The latents are scaled with the formula `z = z * scaling_factor` before being passed to the diffusion model. When decoding, the latents are scaled back to the original scale with the formula: `z = 1 / scaling_factor * z`. For more details, refer to sections 4.3.2 and D.1 of the [High-Resolution Image Synthesis with Latent Diffusion Models](https://arxiv.org/abs/2112.10752) paper. force_upcast (`bool`, *optional*, default to `True`): If enabled it will force the VAE to run in float32 for high image resolution pipelines, such as SD-XL. VAE can be fine-tuned / trained to a lower range without loosing too much precision in which case `force_upcast` can be set to `False` - see: https://huggingface.co/madebyollin/sdxl-vae-fp16-fix """ _supports_gradient_checkpointing = True @register_to_config def __init__( self, in_channels: int = 3, out_channels: int = 3, down_block_types: Tuple[str] = ("DownEncoderBlock2D",), up_block_types: Tuple[str] = ("UpDecoderBlock2D",), block_out_channels: Tuple[int] = (64,), layers_per_block: int = 1, act_fn: str = "silu", latent_channels: int = 4, norm_num_groups: int = 32, sample_size: int = 32, scaling_factor: float = 0.18215, latents_mean: Optional[Tuple[float]] = None, latents_std: Optional[Tuple[float]] = None, force_upcast: float = True, ): super().__init__() # pass init params to Encoder self.encoder = Encoder( in_channels=in_channels, out_channels=latent_channels, down_block_types=down_block_types, block_out_channels=block_out_channels, layers_per_block=layers_per_block, act_fn=act_fn, norm_num_groups=norm_num_groups, double_z=True, ) # pass init params to Decoder self.decoder = Decoder( in_channels=latent_channels, out_channels=out_channels, up_block_types=up_block_types, block_out_channels=block_out_channels, layers_per_block=layers_per_block, norm_num_groups=norm_num_groups, act_fn=act_fn, ) self.quant_conv = nn.Conv2d(2 * latent_channels, 2 * latent_channels, 1) self.post_quant_conv = nn.Conv2d(latent_channels, latent_channels, 1) self.use_slicing = False self.use_tiling = False # only relevant if vae tiling is enabled self.tile_sample_min_size = self.config.sample_size sample_size = ( self.config.sample_size[0] if isinstance(self.config.sample_size, (list, tuple)) else self.config.sample_size ) self.tile_latent_min_size = int(sample_size / (2 ** (len(self.config.block_out_channels) - 1))) self.tile_overlap_factor = 0.25 def _set_gradient_checkpointing(self, module, value=False): if isinstance(module, (Encoder, Decoder)): module.gradient_checkpointing = value def enable_tiling(self, use_tiling: bool = True): r""" Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow processing larger images. """ self.use_tiling = use_tiling def disable_tiling(self): r""" Disable tiled VAE decoding. If `enable_tiling` was previously enabled, this method will go back to computing decoding in one step. """ self.enable_tiling(False) def enable_slicing(self): r""" Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several steps. This is useful to save some memory and allow larger batch sizes. """ self.use_slicing = True def disable_slicing(self): r""" Disable sliced VAE decoding. If `enable_slicing` was previously enabled, this method will go back to computing decoding in one step. """ self.use_slicing = False @property # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.attn_processors def attn_processors(self) -> Dict[str, AttentionProcessor]: r""" Returns: `dict` of attention processors: A dictionary containing all attention processors used in the model with indexed by its weight name. """ # set recursively processors = {} def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]): if hasattr(module, "get_processor"): processors[f"{name}.processor"] = module.get_processor(return_deprecated_lora=True) for sub_name, child in module.named_children(): fn_recursive_add_processors(f"{name}.{sub_name}", child, processors) return processors for name, module in self.named_children(): fn_recursive_add_processors(name, module, processors) return processors # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attn_processor def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]): r""" Sets the attention processor to use to compute attention. Parameters: processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`): The instantiated processor class or a dictionary of processor classes that will be set as the processor for **all** `Attention` layers. If `processor` is a dict, the key needs to define the path to the corresponding cross attention processor. This is strongly recommended when setting trainable attention processors. """ count = len(self.attn_processors.keys()) if isinstance(processor, dict) and len(processor) != count: raise ValueError( f"A dict of processors was passed, but the number of processors {len(processor)} does not match the" f" number of attention layers: {count}. Please make sure to pass {count} processor classes." ) def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor): if hasattr(module, "set_processor"): if not isinstance(processor, dict): module.set_processor(processor) else: module.set_processor(processor.pop(f"{name}.processor")) for sub_name, child in module.named_children(): fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor) for name, module in self.named_children(): fn_recursive_attn_processor(name, module, processor) # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_default_attn_processor def set_default_attn_processor(self): """ Disables custom attention processors and sets the default attention implementation. """ if all(proc.__class__ in ADDED_KV_ATTENTION_PROCESSORS for proc in self.attn_processors.values()): processor = AttnAddedKVProcessor() elif all(proc.__class__ in CROSS_ATTENTION_PROCESSORS for proc in self.attn_processors.values()): processor = AttnProcessor() else: raise ValueError( f"Cannot call `set_default_attn_processor` when attention processors are of type {next(iter(self.attn_processors.values()))}" ) self.set_attn_processor(processor) @apply_forward_hook def encode( self, x: torch.FloatTensor, return_dict: bool = True ) -> Union[AutoencoderKLOutput, Tuple[DiagonalGaussianDistribution]]: """ Encode a batch of images into latents. Args: x (`torch.FloatTensor`): Input batch of images. return_dict (`bool`, *optional*, defaults to `True`): Whether to return a [`~models.autoencoder_kl.AutoencoderKLOutput`] instead of a plain tuple. Returns: The latent representations of the encoded images. If `return_dict` is True, a [`~models.autoencoder_kl.AutoencoderKLOutput`] is returned, otherwise a plain `tuple` is returned. """ if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size): return self.tiled_encode(x, return_dict=return_dict) if self.use_slicing and x.shape[0] > 1: encoded_slices = [self.encoder(x_slice) for x_slice in x.split(1)] h = torch.cat(encoded_slices) else: h = self.encoder(x) moments = self.quant_conv(h) posterior = DiagonalGaussianDistribution(moments) if not return_dict: return (posterior,) return AutoencoderKLOutput(latent_dist=posterior) def _decode(self, z: torch.FloatTensor, return_dict: bool = True) -> Union[DecoderOutput, torch.FloatTensor]: if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size): return self.tiled_decode(z, return_dict=return_dict) z = self.post_quant_conv(z) dec = self.decoder(z) if not return_dict: return (dec,) return DecoderOutput(sample=dec) @apply_forward_hook def decode( self, z: torch.FloatTensor, return_dict: bool = True, generator=None ) -> Union[DecoderOutput, torch.FloatTensor]: """ Decode a batch of images. Args: z (`torch.FloatTensor`): Input batch of latent vectors. return_dict (`bool`, *optional*, defaults to `True`): Whether to return a [`~models.vae.DecoderOutput`] instead of a plain tuple. Returns: [`~models.vae.DecoderOutput`] or `tuple`: If return_dict is True, a [`~models.vae.DecoderOutput`] is returned, otherwise a plain `tuple` is returned. """ if self.use_slicing and z.shape[0] > 1: decoded_slices = [self._decode(z_slice).sample for z_slice in z.split(1)] decoded = torch.cat(decoded_slices) else: decoded = self._decode(z).sample if not return_dict: return (decoded,) return DecoderOutput(sample=decoded) def blend_v(self, a: torch.Tensor, b: torch.Tensor, blend_extent: int) -> torch.Tensor: blend_extent = min(a.shape[2], b.shape[2], blend_extent) for y in range(blend_extent): b[:, :, y, :] = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent) return b def blend_h(self, a: torch.Tensor, b: torch.Tensor, blend_extent: int) -> torch.Tensor: blend_extent = min(a.shape[3], b.shape[3], blend_extent) for x in range(blend_extent): b[:, :, :, x] = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent) return b def tiled_encode(self, x: torch.FloatTensor, return_dict: bool = True) -> AutoencoderKLOutput: r"""Encode a batch of images using a tiled encoder. When this option is enabled, the VAE will split the input tensor into tiles to compute encoding in several steps. This is useful to keep memory use constant regardless of image size. The end result of tiled encoding is different from non-tiled encoding because each tile uses a different encoder. To avoid tiling artifacts, the tiles overlap and are blended together to form a smooth output. You may still see tile-sized changes in the output, but they should be much less noticeable. Args: x (`torch.FloatTensor`): Input batch of images. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~models.autoencoder_kl.AutoencoderKLOutput`] instead of a plain tuple. Returns: [`~models.autoencoder_kl.AutoencoderKLOutput`] or `tuple`: If return_dict is True, a [`~models.autoencoder_kl.AutoencoderKLOutput`] is returned, otherwise a plain `tuple` is returned. """ overlap_size = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor)) blend_extent = int(self.tile_latent_min_size * self.tile_overlap_factor) row_limit = self.tile_latent_min_size - blend_extent # Split the image into 512x512 tiles and encode them separately. rows = [] for i in range(0, x.shape[2], overlap_size): row = [] for j in range(0, x.shape[3], overlap_size): tile = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size] tile = self.encoder(tile) tile = self.quant_conv(tile) row.append(tile) rows.append(row) result_rows = [] for i, row in enumerate(rows): result_row = [] for j, tile in enumerate(row): # blend the above tile and the left tile # to the current tile and add the current tile to the result row if i > 0: tile = self.blend_v(rows[i - 1][j], tile, blend_extent) if j > 0: tile = self.blend_h(row[j - 1], tile, blend_extent) result_row.append(tile[:, :, :row_limit, :row_limit]) result_rows.append(torch.cat(result_row, dim=3)) moments = torch.cat(result_rows, dim=2) posterior = DiagonalGaussianDistribution(moments) if not return_dict: return (posterior,) return AutoencoderKLOutput(latent_dist=posterior) def tiled_decode(self, z: torch.FloatTensor, return_dict: bool = True) -> Union[DecoderOutput, torch.FloatTensor]: r""" Decode a batch of images using a tiled decoder. Args: z (`torch.FloatTensor`): Input batch of latent vectors. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~models.vae.DecoderOutput`] instead of a plain tuple. Returns: [`~models.vae.DecoderOutput`] or `tuple`: If return_dict is True, a [`~models.vae.DecoderOutput`] is returned, otherwise a plain `tuple` is returned. """ overlap_size = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor)) blend_extent = int(self.tile_sample_min_size * self.tile_overlap_factor) row_limit = self.tile_sample_min_size - blend_extent # Split z into overlapping 64x64 tiles and decode them separately. # The tiles have an overlap to avoid seams between tiles. rows = [] for i in range(0, z.shape[2], overlap_size): row = [] for j in range(0, z.shape[3], overlap_size): tile = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size] tile = self.post_quant_conv(tile) decoded = self.decoder(tile) row.append(decoded) rows.append(row) result_rows = [] for i, row in enumerate(rows): result_row = [] for j, tile in enumerate(row): # blend the above tile and the left tile # to the current tile and add the current tile to the result row if i > 0: tile = self.blend_v(rows[i - 1][j], tile, blend_extent) if j > 0: tile = self.blend_h(row[j - 1], tile, blend_extent) result_row.append(tile[:, :, :row_limit, :row_limit]) result_rows.append(torch.cat(result_row, dim=3)) dec = torch.cat(result_rows, dim=2) if not return_dict: return (dec,) return DecoderOutput(sample=dec) def forward( self, sample: torch.FloatTensor, sample_posterior: bool = False, return_dict: bool = True, generator: Optional[torch.Generator] = None, ) -> Union[DecoderOutput, torch.FloatTensor]: r""" Args: sample (`torch.FloatTensor`): Input sample. sample_posterior (`bool`, *optional*, defaults to `False`): Whether to sample from the posterior. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`DecoderOutput`] instead of a plain tuple. """ x = sample posterior = self.encode(x).latent_dist if sample_posterior: z = posterior.sample(generator=generator) else: z = posterior.mode() dec = self.decode(z).sample if not return_dict: return (dec,) return DecoderOutput(sample=dec) # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.fuse_qkv_projections def fuse_qkv_projections(self): """ Enables fused QKV projections. For self-attention modules, all projection matrices (i.e., query, key, value) are fused. For cross-attention modules, key and value projection matrices are fused. <Tip warning={true}> This API is 🧪 experimental. </Tip> """ self.original_attn_processors = None for _, attn_processor in self.attn_processors.items(): if "Added" in str(attn_processor.__class__.__name__): raise ValueError("`fuse_qkv_projections()` is not supported for models having added KV projections.") self.original_attn_processors = self.attn_processors for module in self.modules(): if isinstance(module, Attention): module.fuse_projections(fuse=True) # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.unfuse_qkv_projections def unfuse_qkv_projections(self): """Disables the fused QKV projection if enabled. <Tip warning={true}> This API is 🧪 experimental. </Tip> """ if self.original_attn_processors is not None: self.set_attn_processor(self.original_attn_processors)
diffusers/src/diffusers/models/autoencoders/autoencoder_kl.py/0
{ "file_path": "diffusers/src/diffusers/models/autoencoders/autoencoder_kl.py", "repo_id": "diffusers", "token_count": 9113 }
120
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import itertools import os import re from collections import OrderedDict from functools import partial from pathlib import Path from typing import Any, Callable, List, Optional, Tuple, Union import safetensors import torch from huggingface_hub import create_repo from huggingface_hub.utils import validate_hf_hub_args from torch import Tensor, nn from .. import __version__ from ..utils import ( CONFIG_NAME, FLAX_WEIGHTS_NAME, SAFETENSORS_FILE_EXTENSION, SAFETENSORS_WEIGHTS_NAME, WEIGHTS_NAME, _add_variant, _get_model_file, deprecate, is_accelerate_available, is_torch_version, logging, ) from ..utils.hub_utils import PushToHubMixin, load_or_create_model_card, populate_model_card logger = logging.get_logger(__name__) if is_torch_version(">=", "1.9.0"): _LOW_CPU_MEM_USAGE_DEFAULT = True else: _LOW_CPU_MEM_USAGE_DEFAULT = False if is_accelerate_available(): import accelerate from accelerate.utils import set_module_tensor_to_device from accelerate.utils.versions import is_torch_version def get_parameter_device(parameter: torch.nn.Module) -> torch.device: try: parameters_and_buffers = itertools.chain(parameter.parameters(), parameter.buffers()) return next(parameters_and_buffers).device except StopIteration: # For torch.nn.DataParallel compatibility in PyTorch 1.5 def find_tensor_attributes(module: torch.nn.Module) -> List[Tuple[str, Tensor]]: tuples = [(k, v) for k, v in module.__dict__.items() if torch.is_tensor(v)] return tuples gen = parameter._named_members(get_members_fn=find_tensor_attributes) first_tuple = next(gen) return first_tuple[1].device def get_parameter_dtype(parameter: torch.nn.Module) -> torch.dtype: try: params = tuple(parameter.parameters()) if len(params) > 0: return params[0].dtype buffers = tuple(parameter.buffers()) if len(buffers) > 0: return buffers[0].dtype except StopIteration: # For torch.nn.DataParallel compatibility in PyTorch 1.5 def find_tensor_attributes(module: torch.nn.Module) -> List[Tuple[str, Tensor]]: tuples = [(k, v) for k, v in module.__dict__.items() if torch.is_tensor(v)] return tuples gen = parameter._named_members(get_members_fn=find_tensor_attributes) first_tuple = next(gen) return first_tuple[1].dtype def load_state_dict(checkpoint_file: Union[str, os.PathLike], variant: Optional[str] = None): """ Reads a checkpoint file, returning properly formatted errors if they arise. """ try: file_extension = os.path.basename(checkpoint_file).split(".")[-1] if file_extension == SAFETENSORS_FILE_EXTENSION: return safetensors.torch.load_file(checkpoint_file, device="cpu") else: weights_only_kwarg = {"weights_only": True} if is_torch_version(">=", "1.13") else {} return torch.load( checkpoint_file, map_location="cpu", **weights_only_kwarg, ) except Exception as e: try: with open(checkpoint_file) as f: if f.read().startswith("version"): raise OSError( "You seem to have cloned a repository without having git-lfs installed. Please install " "git-lfs and run `git lfs install` followed by `git lfs pull` in the folder " "you cloned." ) else: raise ValueError( f"Unable to locate the file {checkpoint_file} which is necessary to load this pretrained " "model. Make sure you have saved the model properly." ) from e except (UnicodeDecodeError, ValueError): raise OSError( f"Unable to load weights from checkpoint file for '{checkpoint_file}' " f"at '{checkpoint_file}'. " ) def load_model_dict_into_meta( model, state_dict: OrderedDict, device: Optional[Union[str, torch.device]] = None, dtype: Optional[Union[str, torch.dtype]] = None, model_name_or_path: Optional[str] = None, ) -> List[str]: device = device or torch.device("cpu") dtype = dtype or torch.float32 accepts_dtype = "dtype" in set(inspect.signature(set_module_tensor_to_device).parameters.keys()) unexpected_keys = [] empty_state_dict = model.state_dict() for param_name, param in state_dict.items(): if param_name not in empty_state_dict: unexpected_keys.append(param_name) continue if empty_state_dict[param_name].shape != param.shape: model_name_or_path_str = f"{model_name_or_path} " if model_name_or_path is not None else "" raise ValueError( f"Cannot load {model_name_or_path_str}because {param_name} expected shape {empty_state_dict[param_name]}, but got {param.shape}. If you want to instead overwrite randomly initialized weights, please make sure to pass both `low_cpu_mem_usage=False` and `ignore_mismatched_sizes=True`. For more information, see also: https://github.com/huggingface/diffusers/issues/1619#issuecomment-1345604389 as an example." ) if accepts_dtype: set_module_tensor_to_device(model, param_name, device, value=param, dtype=dtype) else: set_module_tensor_to_device(model, param_name, device, value=param) return unexpected_keys def _load_state_dict_into_model(model_to_load, state_dict: OrderedDict) -> List[str]: # Convert old format to new format if needed from a PyTorch state_dict # copy state_dict so _load_from_state_dict can modify it state_dict = state_dict.copy() error_msgs = [] # PyTorch's `_load_from_state_dict` does not copy parameters in a module's descendants # so we need to apply the function recursively. def load(module: torch.nn.Module, prefix: str = ""): args = (state_dict, prefix, {}, True, [], [], error_msgs) module._load_from_state_dict(*args) for name, child in module._modules.items(): if child is not None: load(child, prefix + name + ".") load(model_to_load) return error_msgs class ModelMixin(torch.nn.Module, PushToHubMixin): r""" Base class for all models. [`ModelMixin`] takes care of storing the model configuration and provides methods for loading, downloading and saving models. - **config_name** ([`str`]) -- Filename to save a model to when calling [`~models.ModelMixin.save_pretrained`]. """ config_name = CONFIG_NAME _automatically_saved_args = ["_diffusers_version", "_class_name", "_name_or_path"] _supports_gradient_checkpointing = False _keys_to_ignore_on_load_unexpected = None def __init__(self): super().__init__() def __getattr__(self, name: str) -> Any: """The only reason we overwrite `getattr` here is to gracefully deprecate accessing config attributes directly. See https://github.com/huggingface/diffusers/pull/3129 We need to overwrite __getattr__ here in addition so that we don't trigger `torch.nn.Module`'s __getattr__': https://pytorch.org/docs/stable/_modules/torch/nn/modules/module.html#Module """ is_in_config = "_internal_dict" in self.__dict__ and hasattr(self.__dict__["_internal_dict"], name) is_attribute = name in self.__dict__ if is_in_config and not is_attribute: deprecation_message = f"Accessing config attribute `{name}` directly via '{type(self).__name__}' object attribute is deprecated. Please access '{name}' over '{type(self).__name__}'s config object instead, e.g. 'unet.config.{name}'." deprecate("direct config name access", "1.0.0", deprecation_message, standard_warn=False, stacklevel=3) return self._internal_dict[name] # call PyTorch's https://pytorch.org/docs/stable/_modules/torch/nn/modules/module.html#Module return super().__getattr__(name) @property def is_gradient_checkpointing(self) -> bool: """ Whether gradient checkpointing is activated for this model or not. """ return any(hasattr(m, "gradient_checkpointing") and m.gradient_checkpointing for m in self.modules()) def enable_gradient_checkpointing(self) -> None: """ Activates gradient checkpointing for the current model (may be referred to as *activation checkpointing* or *checkpoint activations* in other frameworks). """ if not self._supports_gradient_checkpointing: raise ValueError(f"{self.__class__.__name__} does not support gradient checkpointing.") self.apply(partial(self._set_gradient_checkpointing, value=True)) def disable_gradient_checkpointing(self) -> None: """ Deactivates gradient checkpointing for the current model (may be referred to as *activation checkpointing* or *checkpoint activations* in other frameworks). """ if self._supports_gradient_checkpointing: self.apply(partial(self._set_gradient_checkpointing, value=False)) def set_use_memory_efficient_attention_xformers( self, valid: bool, attention_op: Optional[Callable] = None ) -> None: # Recursively walk through all the children. # Any children which exposes the set_use_memory_efficient_attention_xformers method # gets the message def fn_recursive_set_mem_eff(module: torch.nn.Module): if hasattr(module, "set_use_memory_efficient_attention_xformers"): module.set_use_memory_efficient_attention_xformers(valid, attention_op) for child in module.children(): fn_recursive_set_mem_eff(child) for module in self.children(): if isinstance(module, torch.nn.Module): fn_recursive_set_mem_eff(module) def enable_xformers_memory_efficient_attention(self, attention_op: Optional[Callable] = None) -> None: r""" Enable memory efficient attention from [xFormers](https://facebookresearch.github.io/xformers/). When this option is enabled, you should observe lower GPU memory usage and a potential speed up during inference. Speed up during training is not guaranteed. <Tip warning={true}> ⚠️ When memory efficient attention and sliced attention are both enabled, memory efficient attention takes precedent. </Tip> Parameters: attention_op (`Callable`, *optional*): Override the default `None` operator for use as `op` argument to the [`memory_efficient_attention()`](https://facebookresearch.github.io/xformers/components/ops.html#xformers.ops.memory_efficient_attention) function of xFormers. Examples: ```py >>> import torch >>> from diffusers import UNet2DConditionModel >>> from xformers.ops import MemoryEfficientAttentionFlashAttentionOp >>> model = UNet2DConditionModel.from_pretrained( ... "stabilityai/stable-diffusion-2-1", subfolder="unet", torch_dtype=torch.float16 ... ) >>> model = model.to("cuda") >>> model.enable_xformers_memory_efficient_attention(attention_op=MemoryEfficientAttentionFlashAttentionOp) ``` """ self.set_use_memory_efficient_attention_xformers(True, attention_op) def disable_xformers_memory_efficient_attention(self) -> None: r""" Disable memory efficient attention from [xFormers](https://facebookresearch.github.io/xformers/). """ self.set_use_memory_efficient_attention_xformers(False) def save_pretrained( self, save_directory: Union[str, os.PathLike], is_main_process: bool = True, save_function: Optional[Callable] = None, safe_serialization: bool = True, variant: Optional[str] = None, push_to_hub: bool = False, **kwargs, ): """ Save a model and its configuration file to a directory so that it can be reloaded using the [`~models.ModelMixin.from_pretrained`] class method. Arguments: save_directory (`str` or `os.PathLike`): Directory to save a model and its configuration file to. Will be created if it doesn't exist. is_main_process (`bool`, *optional*, defaults to `True`): Whether the process calling this is the main process or not. Useful during distributed training and you need to call this function on all processes. In this case, set `is_main_process=True` only on the main process to avoid race conditions. save_function (`Callable`): The function to use to save the state dictionary. Useful during distributed training when you need to replace `torch.save` with another method. Can be configured with the environment variable `DIFFUSERS_SAVE_MODE`. safe_serialization (`bool`, *optional*, defaults to `True`): Whether to save the model using `safetensors` or the traditional PyTorch way with `pickle`. variant (`str`, *optional*): If specified, weights are saved in the format `pytorch_model.<variant>.bin`. push_to_hub (`bool`, *optional*, defaults to `False`): Whether or not to push your model to the Hugging Face Hub after saving it. You can specify the repository you want to push to with `repo_id` (will default to the name of `save_directory` in your namespace). kwargs (`Dict[str, Any]`, *optional*): Additional keyword arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method. """ if os.path.isfile(save_directory): logger.error(f"Provided path ({save_directory}) should be a directory, not a file") return os.makedirs(save_directory, exist_ok=True) if push_to_hub: commit_message = kwargs.pop("commit_message", None) private = kwargs.pop("private", False) create_pr = kwargs.pop("create_pr", False) token = kwargs.pop("token", None) repo_id = kwargs.pop("repo_id", save_directory.split(os.path.sep)[-1]) repo_id = create_repo(repo_id, exist_ok=True, private=private, token=token).repo_id # Only save the model itself if we are using distributed training model_to_save = self # Attach architecture to the config # Save the config if is_main_process: model_to_save.save_config(save_directory) # Save the model state_dict = model_to_save.state_dict() weights_name = SAFETENSORS_WEIGHTS_NAME if safe_serialization else WEIGHTS_NAME weights_name = _add_variant(weights_name, variant) # Save the model if safe_serialization: safetensors.torch.save_file( state_dict, Path(save_directory, weights_name).as_posix(), metadata={"format": "pt"} ) else: torch.save(state_dict, Path(save_directory, weights_name).as_posix()) logger.info(f"Model weights saved in {Path(save_directory, weights_name).as_posix()}") if push_to_hub: # Create a new empty model card and eventually tag it model_card = load_or_create_model_card(repo_id, token=token) model_card = populate_model_card(model_card) model_card.save(Path(save_directory, "README.md").as_posix()) self._upload_folder( save_directory, repo_id, token=token, commit_message=commit_message, create_pr=create_pr, ) @classmethod @validate_hf_hub_args def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], **kwargs): r""" Instantiate a pretrained PyTorch model from a pretrained model configuration. The model is set in evaluation mode - `model.eval()` - by default, and dropout modules are deactivated. To train the model, set it back in training mode with `model.train()`. Parameters: pretrained_model_name_or_path (`str` or `os.PathLike`, *optional*): Can be either: - A string, the *model id* (for example `google/ddpm-celebahq-256`) of a pretrained model hosted on the Hub. - A path to a *directory* (for example `./my_model_directory`) containing the model weights saved with [`~ModelMixin.save_pretrained`]. cache_dir (`Union[str, os.PathLike]`, *optional*): Path to a directory where a downloaded pretrained model configuration is cached if the standard cache is not used. torch_dtype (`str` or `torch.dtype`, *optional*): Override the default `torch.dtype` and load the model with another dtype. If `"auto"` is passed, the dtype is automatically derived from the model's weights. force_download (`bool`, *optional*, defaults to `False`): Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist. resume_download (`bool`, *optional*, defaults to `False`): Whether or not to resume downloading the model weights and configuration files. If set to `False`, any incompletely downloaded files are deleted. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. output_loading_info (`bool`, *optional*, defaults to `False`): Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages. local_files_only(`bool`, *optional*, defaults to `False`): Whether to only load local model weights and configuration files or not. If set to `True`, the model won't be downloaded from the Hub. token (`str` or *bool*, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from `diffusers-cli login` (stored in `~/.huggingface`) is used. revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier allowed by Git. from_flax (`bool`, *optional*, defaults to `False`): Load the model weights from a Flax checkpoint save file. subfolder (`str`, *optional*, defaults to `""`): The subfolder location of a model file within a larger model repository on the Hub or locally. mirror (`str`, *optional*): Mirror source to resolve accessibility issues if you're downloading a model in China. We do not guarantee the timeliness or safety of the source, and you should refer to the mirror site for more information. device_map (`str` or `Dict[str, Union[int, str, torch.device]]`, *optional*): A map that specifies where each submodule should go. It doesn't need to be defined for each parameter/buffer name; once a given module name is inside, every submodule of it will be sent to the same device. Set `device_map="auto"` to have 🤗 Accelerate automatically compute the most optimized `device_map`. For more information about each option see [designing a device map](https://hf.co/docs/accelerate/main/en/usage_guides/big_modeling#designing-a-device-map). max_memory (`Dict`, *optional*): A dictionary device identifier for the maximum memory. Will default to the maximum memory available for each GPU and the available CPU RAM if unset. offload_folder (`str` or `os.PathLike`, *optional*): The path to offload weights if `device_map` contains the value `"disk"`. offload_state_dict (`bool`, *optional*): If `True`, temporarily offloads the CPU state dict to the hard drive to avoid running out of CPU RAM if the weight of the CPU state dict + the biggest shard of the checkpoint does not fit. Defaults to `True` when there is some disk offload. low_cpu_mem_usage (`bool`, *optional*, defaults to `True` if torch version >= 1.9.0 else `False`): Speed up model loading only loading the pretrained weights and not initializing the weights. This also tries to not use more than 1x model size in CPU memory (including peak memory) while loading the model. Only supported for PyTorch >= 1.9.0. If you are using an older version of PyTorch, setting this argument to `True` will raise an error. variant (`str`, *optional*): Load weights from a specified `variant` filename such as `"fp16"` or `"ema"`. This is ignored when loading `from_flax`. use_safetensors (`bool`, *optional*, defaults to `None`): If set to `None`, the `safetensors` weights are downloaded if they're available **and** if the `safetensors` library is installed. If set to `True`, the model is forcibly loaded from `safetensors` weights. If set to `False`, `safetensors` weights are not loaded. <Tip> To use private or [gated models](https://huggingface.co/docs/hub/models-gated#gated-models), log-in with `huggingface-cli login`. You can also activate the special ["offline-mode"](https://huggingface.co/diffusers/installation.html#offline-mode) to use this method in a firewalled environment. </Tip> Example: ```py from diffusers import UNet2DConditionModel unet = UNet2DConditionModel.from_pretrained("runwayml/stable-diffusion-v1-5", subfolder="unet") ``` If you get the error message below, you need to finetune the weights for your downstream task: ```bash Some weights of UNet2DConditionModel were not initialized from the model checkpoint at runwayml/stable-diffusion-v1-5 and are newly initialized because the shapes did not match: - conv_in.weight: found shape torch.Size([320, 4, 3, 3]) in the checkpoint and torch.Size([320, 9, 3, 3]) in the model instantiated You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. ``` """ cache_dir = kwargs.pop("cache_dir", None) ignore_mismatched_sizes = kwargs.pop("ignore_mismatched_sizes", False) force_download = kwargs.pop("force_download", False) from_flax = kwargs.pop("from_flax", False) resume_download = kwargs.pop("resume_download", False) proxies = kwargs.pop("proxies", None) output_loading_info = kwargs.pop("output_loading_info", False) local_files_only = kwargs.pop("local_files_only", None) token = kwargs.pop("token", None) revision = kwargs.pop("revision", None) torch_dtype = kwargs.pop("torch_dtype", None) subfolder = kwargs.pop("subfolder", None) device_map = kwargs.pop("device_map", None) max_memory = kwargs.pop("max_memory", None) offload_folder = kwargs.pop("offload_folder", None) offload_state_dict = kwargs.pop("offload_state_dict", False) low_cpu_mem_usage = kwargs.pop("low_cpu_mem_usage", _LOW_CPU_MEM_USAGE_DEFAULT) variant = kwargs.pop("variant", None) use_safetensors = kwargs.pop("use_safetensors", None) allow_pickle = False if use_safetensors is None: use_safetensors = True allow_pickle = True if low_cpu_mem_usage and not is_accelerate_available(): low_cpu_mem_usage = False logger.warning( "Cannot initialize model with low cpu memory usage because `accelerate` was not found in the" " environment. Defaulting to `low_cpu_mem_usage=False`. It is strongly recommended to install" " `accelerate` for faster and less memory-intense model loading. You can do so with: \n```\npip" " install accelerate\n```\n." ) if device_map is not None and not is_accelerate_available(): raise NotImplementedError( "Loading and dispatching requires `accelerate`. Please make sure to install accelerate or set" " `device_map=None`. You can install accelerate with `pip install accelerate`." ) # Check if we can handle device_map and dispatching the weights if device_map is not None and not is_torch_version(">=", "1.9.0"): raise NotImplementedError( "Loading and dispatching requires torch >= 1.9.0. Please either update your PyTorch version or set" " `device_map=None`." ) if low_cpu_mem_usage is True and not is_torch_version(">=", "1.9.0"): raise NotImplementedError( "Low memory initialization requires torch >= 1.9.0. Please either update your PyTorch version or set" " `low_cpu_mem_usage=False`." ) if low_cpu_mem_usage is False and device_map is not None: raise ValueError( f"You cannot set `low_cpu_mem_usage` to `False` while using device_map={device_map} for loading and" " dispatching. Please make sure to set `low_cpu_mem_usage=True`." ) # Load config if we don't provide a configuration config_path = pretrained_model_name_or_path user_agent = { "diffusers": __version__, "file_type": "model", "framework": "pytorch", } # load config config, unused_kwargs, commit_hash = cls.load_config( config_path, cache_dir=cache_dir, return_unused_kwargs=True, return_commit_hash=True, force_download=force_download, resume_download=resume_download, proxies=proxies, local_files_only=local_files_only, token=token, revision=revision, subfolder=subfolder, device_map=device_map, max_memory=max_memory, offload_folder=offload_folder, offload_state_dict=offload_state_dict, user_agent=user_agent, **kwargs, ) # load model model_file = None if from_flax: model_file = _get_model_file( pretrained_model_name_or_path, weights_name=FLAX_WEIGHTS_NAME, cache_dir=cache_dir, force_download=force_download, resume_download=resume_download, proxies=proxies, local_files_only=local_files_only, token=token, revision=revision, subfolder=subfolder, user_agent=user_agent, commit_hash=commit_hash, ) model = cls.from_config(config, **unused_kwargs) # Convert the weights from .modeling_pytorch_flax_utils import load_flax_checkpoint_in_pytorch_model model = load_flax_checkpoint_in_pytorch_model(model, model_file) else: if use_safetensors: try: model_file = _get_model_file( pretrained_model_name_or_path, weights_name=_add_variant(SAFETENSORS_WEIGHTS_NAME, variant), cache_dir=cache_dir, force_download=force_download, resume_download=resume_download, proxies=proxies, local_files_only=local_files_only, token=token, revision=revision, subfolder=subfolder, user_agent=user_agent, commit_hash=commit_hash, ) except IOError as e: if not allow_pickle: raise e pass if model_file is None: model_file = _get_model_file( pretrained_model_name_or_path, weights_name=_add_variant(WEIGHTS_NAME, variant), cache_dir=cache_dir, force_download=force_download, resume_download=resume_download, proxies=proxies, local_files_only=local_files_only, token=token, revision=revision, subfolder=subfolder, user_agent=user_agent, commit_hash=commit_hash, ) if low_cpu_mem_usage: # Instantiate model with empty weights with accelerate.init_empty_weights(): model = cls.from_config(config, **unused_kwargs) # if device_map is None, load the state dict and move the params from meta device to the cpu if device_map is None: param_device = "cpu" state_dict = load_state_dict(model_file, variant=variant) model._convert_deprecated_attention_blocks(state_dict) # move the params from meta device to cpu missing_keys = set(model.state_dict().keys()) - set(state_dict.keys()) if len(missing_keys) > 0: raise ValueError( f"Cannot load {cls} from {pretrained_model_name_or_path} because the following keys are" f" missing: \n {', '.join(missing_keys)}. \n Please make sure to pass" " `low_cpu_mem_usage=False` and `device_map=None` if you want to randomly initialize" " those weights or else make sure your checkpoint file is correct." ) unexpected_keys = load_model_dict_into_meta( model, state_dict, device=param_device, dtype=torch_dtype, model_name_or_path=pretrained_model_name_or_path, ) if cls._keys_to_ignore_on_load_unexpected is not None: for pat in cls._keys_to_ignore_on_load_unexpected: unexpected_keys = [k for k in unexpected_keys if re.search(pat, k) is None] if len(unexpected_keys) > 0: logger.warning( f"Some weights of the model checkpoint were not used when initializing {cls.__name__}: \n {[', '.join(unexpected_keys)]}" ) else: # else let accelerate handle loading and dispatching. # Load weights and dispatch according to the device_map # by default the device_map is None and the weights are loaded on the CPU try: accelerate.load_checkpoint_and_dispatch( model, model_file, device_map, max_memory=max_memory, offload_folder=offload_folder, offload_state_dict=offload_state_dict, dtype=torch_dtype, ) except AttributeError as e: # When using accelerate loading, we do not have the ability to load the state # dict and rename the weight names manually. Additionally, accelerate skips # torch loading conventions and directly writes into `module.{_buffers, _parameters}` # (which look like they should be private variables?), so we can't use the standard hooks # to rename parameters on load. We need to mimic the original weight names so the correct # attributes are available. After we have loaded the weights, we convert the deprecated # names to the new non-deprecated names. Then we _greatly encourage_ the user to convert # the weights so we don't have to do this again. if "'Attention' object has no attribute" in str(e): logger.warning( f"Taking `{str(e)}` while using `accelerate.load_checkpoint_and_dispatch` to mean {pretrained_model_name_or_path}" " was saved with deprecated attention block weight names. We will load it with the deprecated attention block" " names and convert them on the fly to the new attention block format. Please re-save the model after this conversion," " so we don't have to do the on the fly renaming in the future. If the model is from a hub checkpoint," " please also re-upload it or open a PR on the original repository." ) model._temp_convert_self_to_deprecated_attention_blocks() accelerate.load_checkpoint_and_dispatch( model, model_file, device_map, max_memory=max_memory, offload_folder=offload_folder, offload_state_dict=offload_state_dict, dtype=torch_dtype, ) model._undo_temp_convert_self_to_deprecated_attention_blocks() else: raise e loading_info = { "missing_keys": [], "unexpected_keys": [], "mismatched_keys": [], "error_msgs": [], } else: model = cls.from_config(config, **unused_kwargs) state_dict = load_state_dict(model_file, variant=variant) model._convert_deprecated_attention_blocks(state_dict) model, missing_keys, unexpected_keys, mismatched_keys, error_msgs = cls._load_pretrained_model( model, state_dict, model_file, pretrained_model_name_or_path, ignore_mismatched_sizes=ignore_mismatched_sizes, ) loading_info = { "missing_keys": missing_keys, "unexpected_keys": unexpected_keys, "mismatched_keys": mismatched_keys, "error_msgs": error_msgs, } if torch_dtype is not None and not isinstance(torch_dtype, torch.dtype): raise ValueError( f"{torch_dtype} needs to be of type `torch.dtype`, e.g. `torch.float16`, but is {type(torch_dtype)}." ) elif torch_dtype is not None: model = model.to(torch_dtype) model.register_to_config(_name_or_path=pretrained_model_name_or_path) # Set model in evaluation mode to deactivate DropOut modules by default model.eval() if output_loading_info: return model, loading_info return model @classmethod def _load_pretrained_model( cls, model, state_dict: OrderedDict, resolved_archive_file, pretrained_model_name_or_path: Union[str, os.PathLike], ignore_mismatched_sizes: bool = False, ): # Retrieve missing & unexpected_keys model_state_dict = model.state_dict() loaded_keys = list(state_dict.keys()) expected_keys = list(model_state_dict.keys()) original_loaded_keys = loaded_keys missing_keys = list(set(expected_keys) - set(loaded_keys)) unexpected_keys = list(set(loaded_keys) - set(expected_keys)) # Make sure we are able to load base models as well as derived models (with heads) model_to_load = model def _find_mismatched_keys( state_dict, model_state_dict, loaded_keys, ignore_mismatched_sizes, ): mismatched_keys = [] if ignore_mismatched_sizes: for checkpoint_key in loaded_keys: model_key = checkpoint_key if ( model_key in model_state_dict and state_dict[checkpoint_key].shape != model_state_dict[model_key].shape ): mismatched_keys.append( (checkpoint_key, state_dict[checkpoint_key].shape, model_state_dict[model_key].shape) ) del state_dict[checkpoint_key] return mismatched_keys if state_dict is not None: # Whole checkpoint mismatched_keys = _find_mismatched_keys( state_dict, model_state_dict, original_loaded_keys, ignore_mismatched_sizes, ) error_msgs = _load_state_dict_into_model(model_to_load, state_dict) if len(error_msgs) > 0: error_msg = "\n\t".join(error_msgs) if "size mismatch" in error_msg: error_msg += ( "\n\tYou may consider adding `ignore_mismatched_sizes=True` in the model `from_pretrained` method." ) raise RuntimeError(f"Error(s) in loading state_dict for {model.__class__.__name__}:\n\t{error_msg}") if len(unexpected_keys) > 0: logger.warning( f"Some weights of the model checkpoint at {pretrained_model_name_or_path} were not used when" f" initializing {model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are" f" initializing {model.__class__.__name__} from the checkpoint of a model trained on another task" " or with another architecture (e.g. initializing a BertForSequenceClassification model from a" " BertForPreTraining model).\n- This IS NOT expected if you are initializing" f" {model.__class__.__name__} from the checkpoint of a model that you expect to be exactly" " identical (initializing a BertForSequenceClassification model from a" " BertForSequenceClassification model)." ) else: logger.info(f"All model checkpoint weights were used when initializing {model.__class__.__name__}.\n") if len(missing_keys) > 0: logger.warning( f"Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at" f" {pretrained_model_name_or_path} and are newly initialized: {missing_keys}\nYou should probably" " TRAIN this model on a down-stream task to be able to use it for predictions and inference." ) elif len(mismatched_keys) == 0: logger.info( f"All the weights of {model.__class__.__name__} were initialized from the model checkpoint at" f" {pretrained_model_name_or_path}.\nIf your task is similar to the task the model of the" f" checkpoint was trained on, you can already use {model.__class__.__name__} for predictions" " without further training." ) if len(mismatched_keys) > 0: mismatched_warning = "\n".join( [ f"- {key}: found shape {shape1} in the checkpoint and {shape2} in the model instantiated" for key, shape1, shape2 in mismatched_keys ] ) logger.warning( f"Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at" f" {pretrained_model_name_or_path} and are newly initialized because the shapes did not" f" match:\n{mismatched_warning}\nYou should probably TRAIN this model on a down-stream task to be" " able to use it for predictions and inference." ) return model, missing_keys, unexpected_keys, mismatched_keys, error_msgs @property def device(self) -> torch.device: """ `torch.device`: The device on which the module is (assuming that all the module parameters are on the same device). """ return get_parameter_device(self) @property def dtype(self) -> torch.dtype: """ `torch.dtype`: The dtype of the module (assuming that all the module parameters have the same dtype). """ return get_parameter_dtype(self) def num_parameters(self, only_trainable: bool = False, exclude_embeddings: bool = False) -> int: """ Get number of (trainable or non-embedding) parameters in the module. Args: only_trainable (`bool`, *optional*, defaults to `False`): Whether or not to return only the number of trainable parameters. exclude_embeddings (`bool`, *optional*, defaults to `False`): Whether or not to return only the number of non-embedding parameters. Returns: `int`: The number of parameters. Example: ```py from diffusers import UNet2DConditionModel model_id = "runwayml/stable-diffusion-v1-5" unet = UNet2DConditionModel.from_pretrained(model_id, subfolder="unet") unet.num_parameters(only_trainable=True) 859520964 ``` """ if exclude_embeddings: embedding_param_names = [ f"{name}.weight" for name, module_type in self.named_modules() if isinstance(module_type, torch.nn.Embedding) ] non_embedding_parameters = [ parameter for name, parameter in self.named_parameters() if name not in embedding_param_names ] return sum(p.numel() for p in non_embedding_parameters if p.requires_grad or not only_trainable) else: return sum(p.numel() for p in self.parameters() if p.requires_grad or not only_trainable) def _convert_deprecated_attention_blocks(self, state_dict: OrderedDict) -> None: deprecated_attention_block_paths = [] def recursive_find_attn_block(name, module): if hasattr(module, "_from_deprecated_attn_block") and module._from_deprecated_attn_block: deprecated_attention_block_paths.append(name) for sub_name, sub_module in module.named_children(): sub_name = sub_name if name == "" else f"{name}.{sub_name}" recursive_find_attn_block(sub_name, sub_module) recursive_find_attn_block("", self) # NOTE: we have to check if the deprecated parameters are in the state dict # because it is possible we are loading from a state dict that was already # converted for path in deprecated_attention_block_paths: # group_norm path stays the same # query -> to_q if f"{path}.query.weight" in state_dict: state_dict[f"{path}.to_q.weight"] = state_dict.pop(f"{path}.query.weight") if f"{path}.query.bias" in state_dict: state_dict[f"{path}.to_q.bias"] = state_dict.pop(f"{path}.query.bias") # key -> to_k if f"{path}.key.weight" in state_dict: state_dict[f"{path}.to_k.weight"] = state_dict.pop(f"{path}.key.weight") if f"{path}.key.bias" in state_dict: state_dict[f"{path}.to_k.bias"] = state_dict.pop(f"{path}.key.bias") # value -> to_v if f"{path}.value.weight" in state_dict: state_dict[f"{path}.to_v.weight"] = state_dict.pop(f"{path}.value.weight") if f"{path}.value.bias" in state_dict: state_dict[f"{path}.to_v.bias"] = state_dict.pop(f"{path}.value.bias") # proj_attn -> to_out.0 if f"{path}.proj_attn.weight" in state_dict: state_dict[f"{path}.to_out.0.weight"] = state_dict.pop(f"{path}.proj_attn.weight") if f"{path}.proj_attn.bias" in state_dict: state_dict[f"{path}.to_out.0.bias"] = state_dict.pop(f"{path}.proj_attn.bias") def _temp_convert_self_to_deprecated_attention_blocks(self) -> None: deprecated_attention_block_modules = [] def recursive_find_attn_block(module): if hasattr(module, "_from_deprecated_attn_block") and module._from_deprecated_attn_block: deprecated_attention_block_modules.append(module) for sub_module in module.children(): recursive_find_attn_block(sub_module) recursive_find_attn_block(self) for module in deprecated_attention_block_modules: module.query = module.to_q module.key = module.to_k module.value = module.to_v module.proj_attn = module.to_out[0] # We don't _have_ to delete the old attributes, but it's helpful to ensure # that _all_ the weights are loaded into the new attributes and we're not # making an incorrect assumption that this model should be converted when # it really shouldn't be. del module.to_q del module.to_k del module.to_v del module.to_out def _undo_temp_convert_self_to_deprecated_attention_blocks(self) -> None: deprecated_attention_block_modules = [] def recursive_find_attn_block(module) -> None: if hasattr(module, "_from_deprecated_attn_block") and module._from_deprecated_attn_block: deprecated_attention_block_modules.append(module) for sub_module in module.children(): recursive_find_attn_block(sub_module) recursive_find_attn_block(self) for module in deprecated_attention_block_modules: module.to_q = module.query module.to_k = module.key module.to_v = module.value module.to_out = nn.ModuleList([module.proj_attn, nn.Dropout(module.dropout)]) del module.query del module.key del module.value del module.proj_attn
diffusers/src/diffusers/models/modeling_utils.py/0
{ "file_path": "diffusers/src/diffusers/models/modeling_utils.py", "repo_id": "diffusers", "token_count": 21847 }
121
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..utils import deprecate from .unets.unet_2d import UNet2DModel, UNet2DOutput class UNet2DOutput(UNet2DOutput): deprecation_message = "Importing `UNet2DOutput` from `diffusers.models.unet_2d` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_2d import UNet2DOutput`, instead." deprecate("UNet2DOutput", "0.29", deprecation_message) class UNet2DModel(UNet2DModel): deprecation_message = "Importing `UNet2DModel` from `diffusers.models.unet_2d` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_2d import UNet2DModel`, instead." deprecate("UNet2DModel", "0.29", deprecation_message)
diffusers/src/diffusers/models/unet_2d.py/0
{ "file_path": "diffusers/src/diffusers/models/unet_2d.py", "repo_id": "diffusers", "token_count": 410 }
122
from dataclasses import dataclass from typing import Dict, Optional, Tuple, Union import torch import torch.nn as nn from ...configuration_utils import ConfigMixin, register_to_config from ...loaders import UNet2DConditionLoadersMixin from ...utils import BaseOutput, logging from ..attention_processor import CROSS_ATTENTION_PROCESSORS, AttentionProcessor, AttnProcessor from ..embeddings import TimestepEmbedding, Timesteps from ..modeling_utils import ModelMixin from .unet_3d_blocks import UNetMidBlockSpatioTemporal, get_down_block, get_up_block logger = logging.get_logger(__name__) # pylint: disable=invalid-name @dataclass class UNetSpatioTemporalConditionOutput(BaseOutput): """ The output of [`UNetSpatioTemporalConditionModel`]. Args: sample (`torch.FloatTensor` of shape `(batch_size, num_frames, num_channels, height, width)`): The hidden states output conditioned on `encoder_hidden_states` input. Output of last layer of model. """ sample: torch.FloatTensor = None class UNetSpatioTemporalConditionModel(ModelMixin, ConfigMixin, UNet2DConditionLoadersMixin): r""" A conditional Spatio-Temporal UNet model that takes a noisy video frames, conditional state, and a timestep and returns a sample shaped output. This model inherits from [`ModelMixin`]. Check the superclass documentation for it's generic methods implemented for all models (such as downloading or saving). Parameters: sample_size (`int` or `Tuple[int, int]`, *optional*, defaults to `None`): Height and width of input/output sample. in_channels (`int`, *optional*, defaults to 8): Number of channels in the input sample. out_channels (`int`, *optional*, defaults to 4): Number of channels in the output. down_block_types (`Tuple[str]`, *optional*, defaults to `("CrossAttnDownBlockSpatioTemporal", "CrossAttnDownBlockSpatioTemporal", "CrossAttnDownBlockSpatioTemporal", "DownBlockSpatioTemporal")`): The tuple of downsample blocks to use. up_block_types (`Tuple[str]`, *optional*, defaults to `("UpBlockSpatioTemporal", "CrossAttnUpBlockSpatioTemporal", "CrossAttnUpBlockSpatioTemporal", "CrossAttnUpBlockSpatioTemporal")`): The tuple of upsample blocks to use. block_out_channels (`Tuple[int]`, *optional*, defaults to `(320, 640, 1280, 1280)`): The tuple of output channels for each block. addition_time_embed_dim: (`int`, defaults to 256): Dimension to to encode the additional time ids. projection_class_embeddings_input_dim (`int`, defaults to 768): The dimension of the projection of encoded `added_time_ids`. layers_per_block (`int`, *optional*, defaults to 2): The number of layers per block. cross_attention_dim (`int` or `Tuple[int]`, *optional*, defaults to 1280): The dimension of the cross attention features. transformer_layers_per_block (`int`, `Tuple[int]`, or `Tuple[Tuple]` , *optional*, defaults to 1): The number of transformer blocks of type [`~models.attention.BasicTransformerBlock`]. Only relevant for [`~models.unet_3d_blocks.CrossAttnDownBlockSpatioTemporal`], [`~models.unet_3d_blocks.CrossAttnUpBlockSpatioTemporal`], [`~models.unet_3d_blocks.UNetMidBlockSpatioTemporal`]. num_attention_heads (`int`, `Tuple[int]`, defaults to `(5, 10, 10, 20)`): The number of attention heads. dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use. """ _supports_gradient_checkpointing = True @register_to_config def __init__( self, sample_size: Optional[int] = None, in_channels: int = 8, out_channels: int = 4, down_block_types: Tuple[str] = ( "CrossAttnDownBlockSpatioTemporal", "CrossAttnDownBlockSpatioTemporal", "CrossAttnDownBlockSpatioTemporal", "DownBlockSpatioTemporal", ), up_block_types: Tuple[str] = ( "UpBlockSpatioTemporal", "CrossAttnUpBlockSpatioTemporal", "CrossAttnUpBlockSpatioTemporal", "CrossAttnUpBlockSpatioTemporal", ), block_out_channels: Tuple[int] = (320, 640, 1280, 1280), addition_time_embed_dim: int = 256, projection_class_embeddings_input_dim: int = 768, layers_per_block: Union[int, Tuple[int]] = 2, cross_attention_dim: Union[int, Tuple[int]] = 1024, transformer_layers_per_block: Union[int, Tuple[int], Tuple[Tuple]] = 1, num_attention_heads: Union[int, Tuple[int]] = (5, 10, 20, 20), num_frames: int = 25, ): super().__init__() self.sample_size = sample_size # Check inputs if len(down_block_types) != len(up_block_types): raise ValueError( f"Must provide the same number of `down_block_types` as `up_block_types`. `down_block_types`: {down_block_types}. `up_block_types`: {up_block_types}." ) if len(block_out_channels) != len(down_block_types): raise ValueError( f"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}." ) if not isinstance(num_attention_heads, int) and len(num_attention_heads) != len(down_block_types): raise ValueError( f"Must provide the same number of `num_attention_heads` as `down_block_types`. `num_attention_heads`: {num_attention_heads}. `down_block_types`: {down_block_types}." ) if isinstance(cross_attention_dim, list) and len(cross_attention_dim) != len(down_block_types): raise ValueError( f"Must provide the same number of `cross_attention_dim` as `down_block_types`. `cross_attention_dim`: {cross_attention_dim}. `down_block_types`: {down_block_types}." ) if not isinstance(layers_per_block, int) and len(layers_per_block) != len(down_block_types): raise ValueError( f"Must provide the same number of `layers_per_block` as `down_block_types`. `layers_per_block`: {layers_per_block}. `down_block_types`: {down_block_types}." ) # input self.conv_in = nn.Conv2d( in_channels, block_out_channels[0], kernel_size=3, padding=1, ) # time time_embed_dim = block_out_channels[0] * 4 self.time_proj = Timesteps(block_out_channels[0], True, downscale_freq_shift=0) timestep_input_dim = block_out_channels[0] self.time_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim) self.add_time_proj = Timesteps(addition_time_embed_dim, True, downscale_freq_shift=0) self.add_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim) self.down_blocks = nn.ModuleList([]) self.up_blocks = nn.ModuleList([]) if isinstance(num_attention_heads, int): num_attention_heads = (num_attention_heads,) * len(down_block_types) if isinstance(cross_attention_dim, int): cross_attention_dim = (cross_attention_dim,) * len(down_block_types) if isinstance(layers_per_block, int): layers_per_block = [layers_per_block] * len(down_block_types) if isinstance(transformer_layers_per_block, int): transformer_layers_per_block = [transformer_layers_per_block] * len(down_block_types) blocks_time_embed_dim = time_embed_dim # down output_channel = block_out_channels[0] for i, down_block_type in enumerate(down_block_types): input_channel = output_channel output_channel = block_out_channels[i] is_final_block = i == len(block_out_channels) - 1 down_block = get_down_block( down_block_type, num_layers=layers_per_block[i], transformer_layers_per_block=transformer_layers_per_block[i], in_channels=input_channel, out_channels=output_channel, temb_channels=blocks_time_embed_dim, add_downsample=not is_final_block, resnet_eps=1e-5, cross_attention_dim=cross_attention_dim[i], num_attention_heads=num_attention_heads[i], resnet_act_fn="silu", ) self.down_blocks.append(down_block) # mid self.mid_block = UNetMidBlockSpatioTemporal( block_out_channels[-1], temb_channels=blocks_time_embed_dim, transformer_layers_per_block=transformer_layers_per_block[-1], cross_attention_dim=cross_attention_dim[-1], num_attention_heads=num_attention_heads[-1], ) # count how many layers upsample the images self.num_upsamplers = 0 # up reversed_block_out_channels = list(reversed(block_out_channels)) reversed_num_attention_heads = list(reversed(num_attention_heads)) reversed_layers_per_block = list(reversed(layers_per_block)) reversed_cross_attention_dim = list(reversed(cross_attention_dim)) reversed_transformer_layers_per_block = list(reversed(transformer_layers_per_block)) output_channel = reversed_block_out_channels[0] for i, up_block_type in enumerate(up_block_types): is_final_block = i == len(block_out_channels) - 1 prev_output_channel = output_channel output_channel = reversed_block_out_channels[i] input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)] # add upsample block for all BUT final layer if not is_final_block: add_upsample = True self.num_upsamplers += 1 else: add_upsample = False up_block = get_up_block( up_block_type, num_layers=reversed_layers_per_block[i] + 1, transformer_layers_per_block=reversed_transformer_layers_per_block[i], in_channels=input_channel, out_channels=output_channel, prev_output_channel=prev_output_channel, temb_channels=blocks_time_embed_dim, add_upsample=add_upsample, resnet_eps=1e-5, resolution_idx=i, cross_attention_dim=reversed_cross_attention_dim[i], num_attention_heads=reversed_num_attention_heads[i], resnet_act_fn="silu", ) self.up_blocks.append(up_block) prev_output_channel = output_channel # out self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[0], num_groups=32, eps=1e-5) self.conv_act = nn.SiLU() self.conv_out = nn.Conv2d( block_out_channels[0], out_channels, kernel_size=3, padding=1, ) @property def attn_processors(self) -> Dict[str, AttentionProcessor]: r""" Returns: `dict` of attention processors: A dictionary containing all attention processors used in the model with indexed by its weight name. """ # set recursively processors = {} def fn_recursive_add_processors( name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor], ): if hasattr(module, "get_processor"): processors[f"{name}.processor"] = module.get_processor(return_deprecated_lora=True) for sub_name, child in module.named_children(): fn_recursive_add_processors(f"{name}.{sub_name}", child, processors) return processors for name, module in self.named_children(): fn_recursive_add_processors(name, module, processors) return processors def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]): r""" Sets the attention processor to use to compute attention. Parameters: processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`): The instantiated processor class or a dictionary of processor classes that will be set as the processor for **all** `Attention` layers. If `processor` is a dict, the key needs to define the path to the corresponding cross attention processor. This is strongly recommended when setting trainable attention processors. """ count = len(self.attn_processors.keys()) if isinstance(processor, dict) and len(processor) != count: raise ValueError( f"A dict of processors was passed, but the number of processors {len(processor)} does not match the" f" number of attention layers: {count}. Please make sure to pass {count} processor classes." ) def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor): if hasattr(module, "set_processor"): if not isinstance(processor, dict): module.set_processor(processor) else: module.set_processor(processor.pop(f"{name}.processor")) for sub_name, child in module.named_children(): fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor) for name, module in self.named_children(): fn_recursive_attn_processor(name, module, processor) def set_default_attn_processor(self): """ Disables custom attention processors and sets the default attention implementation. """ if all(proc.__class__ in CROSS_ATTENTION_PROCESSORS for proc in self.attn_processors.values()): processor = AttnProcessor() else: raise ValueError( f"Cannot call `set_default_attn_processor` when attention processors are of type {next(iter(self.attn_processors.values()))}" ) self.set_attn_processor(processor) def _set_gradient_checkpointing(self, module, value=False): if hasattr(module, "gradient_checkpointing"): module.gradient_checkpointing = value # Copied from diffusers.models.unets.unet_3d_condition.UNet3DConditionModel.enable_forward_chunking def enable_forward_chunking(self, chunk_size: Optional[int] = None, dim: int = 0) -> None: """ Sets the attention processor to use [feed forward chunking](https://huggingface.co/blog/reformer#2-chunked-feed-forward-layers). Parameters: chunk_size (`int`, *optional*): The chunk size of the feed-forward layers. If not specified, will run feed-forward layer individually over each tensor of dim=`dim`. dim (`int`, *optional*, defaults to `0`): The dimension over which the feed-forward computation should be chunked. Choose between dim=0 (batch) or dim=1 (sequence length). """ if dim not in [0, 1]: raise ValueError(f"Make sure to set `dim` to either 0 or 1, not {dim}") # By default chunk size is 1 chunk_size = chunk_size or 1 def fn_recursive_feed_forward(module: torch.nn.Module, chunk_size: int, dim: int): if hasattr(module, "set_chunk_feed_forward"): module.set_chunk_feed_forward(chunk_size=chunk_size, dim=dim) for child in module.children(): fn_recursive_feed_forward(child, chunk_size, dim) for module in self.children(): fn_recursive_feed_forward(module, chunk_size, dim) def forward( self, sample: torch.FloatTensor, timestep: Union[torch.Tensor, float, int], encoder_hidden_states: torch.Tensor, added_time_ids: torch.Tensor, return_dict: bool = True, ) -> Union[UNetSpatioTemporalConditionOutput, Tuple]: r""" The [`UNetSpatioTemporalConditionModel`] forward method. Args: sample (`torch.FloatTensor`): The noisy input tensor with the following shape `(batch, num_frames, channel, height, width)`. timestep (`torch.FloatTensor` or `float` or `int`): The number of timesteps to denoise an input. encoder_hidden_states (`torch.FloatTensor`): The encoder hidden states with shape `(batch, sequence_length, cross_attention_dim)`. added_time_ids: (`torch.FloatTensor`): The additional time ids with shape `(batch, num_additional_ids)`. These are encoded with sinusoidal embeddings and added to the time embeddings. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~models.unet_slatio_temporal.UNetSpatioTemporalConditionOutput`] instead of a plain tuple. Returns: [`~models.unet_slatio_temporal.UNetSpatioTemporalConditionOutput`] or `tuple`: If `return_dict` is True, an [`~models.unet_slatio_temporal.UNetSpatioTemporalConditionOutput`] is returned, otherwise a `tuple` is returned where the first element is the sample tensor. """ # 1. time timesteps = timestep if not torch.is_tensor(timesteps): # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can # This would be a good case for the `match` statement (Python 3.10+) is_mps = sample.device.type == "mps" if isinstance(timestep, float): dtype = torch.float32 if is_mps else torch.float64 else: dtype = torch.int32 if is_mps else torch.int64 timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device) elif len(timesteps.shape) == 0: timesteps = timesteps[None].to(sample.device) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML batch_size, num_frames = sample.shape[:2] timesteps = timesteps.expand(batch_size) t_emb = self.time_proj(timesteps) # `Timesteps` does not contain any weights and will always return f32 tensors # but time_embedding might actually be running in fp16. so we need to cast here. # there might be better ways to encapsulate this. t_emb = t_emb.to(dtype=sample.dtype) emb = self.time_embedding(t_emb) time_embeds = self.add_time_proj(added_time_ids.flatten()) time_embeds = time_embeds.reshape((batch_size, -1)) time_embeds = time_embeds.to(emb.dtype) aug_emb = self.add_embedding(time_embeds) emb = emb + aug_emb # Flatten the batch and frames dimensions # sample: [batch, frames, channels, height, width] -> [batch * frames, channels, height, width] sample = sample.flatten(0, 1) # Repeat the embeddings num_video_frames times # emb: [batch, channels] -> [batch * frames, channels] emb = emb.repeat_interleave(num_frames, dim=0) # encoder_hidden_states: [batch, 1, channels] -> [batch * frames, 1, channels] encoder_hidden_states = encoder_hidden_states.repeat_interleave(num_frames, dim=0) # 2. pre-process sample = self.conv_in(sample) image_only_indicator = torch.zeros(batch_size, num_frames, dtype=sample.dtype, device=sample.device) down_block_res_samples = (sample,) for downsample_block in self.down_blocks: if hasattr(downsample_block, "has_cross_attention") and downsample_block.has_cross_attention: sample, res_samples = downsample_block( hidden_states=sample, temb=emb, encoder_hidden_states=encoder_hidden_states, image_only_indicator=image_only_indicator, ) else: sample, res_samples = downsample_block( hidden_states=sample, temb=emb, image_only_indicator=image_only_indicator, ) down_block_res_samples += res_samples # 4. mid sample = self.mid_block( hidden_states=sample, temb=emb, encoder_hidden_states=encoder_hidden_states, image_only_indicator=image_only_indicator, ) # 5. up for i, upsample_block in enumerate(self.up_blocks): res_samples = down_block_res_samples[-len(upsample_block.resnets) :] down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)] if hasattr(upsample_block, "has_cross_attention") and upsample_block.has_cross_attention: sample = upsample_block( hidden_states=sample, temb=emb, res_hidden_states_tuple=res_samples, encoder_hidden_states=encoder_hidden_states, image_only_indicator=image_only_indicator, ) else: sample = upsample_block( hidden_states=sample, temb=emb, res_hidden_states_tuple=res_samples, image_only_indicator=image_only_indicator, ) # 6. post-process sample = self.conv_norm_out(sample) sample = self.conv_act(sample) sample = self.conv_out(sample) # 7. Reshape back to original shape sample = sample.reshape(batch_size, num_frames, *sample.shape[1:]) if not return_dict: return (sample,) return UNetSpatioTemporalConditionOutput(sample=sample)
diffusers/src/diffusers/models/unets/unet_spatio_temporal_condition.py/0
{ "file_path": "diffusers/src/diffusers/models/unets/unet_spatio_temporal_condition.py", "repo_id": "diffusers", "token_count": 9881 }
123
from typing import TYPE_CHECKING from ....utils import DIFFUSERS_SLOW_IMPORT, _LazyModule _import_structure = { "mel": ["Mel"], "pipeline_audio_diffusion": ["AudioDiffusionPipeline"], } if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: from .mel import Mel from .pipeline_audio_diffusion import AudioDiffusionPipeline else: import sys sys.modules[__name__] = _LazyModule( __name__, globals()["__file__"], _import_structure, module_spec=__spec__, )
diffusers/src/diffusers/pipelines/deprecated/audio_diffusion/__init__.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/deprecated/audio_diffusion/__init__.py", "repo_id": "diffusers", "token_count": 212 }
124
from typing import TYPE_CHECKING from ....utils import ( DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, get_objects_from_module, is_torch_available, is_transformers_available, ) _dummy_objects = {} _import_structure = {} try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ....utils import dummy_torch_and_transformers_objects _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: _import_structure["pipeline_cycle_diffusion"] = ["CycleDiffusionPipeline"] _import_structure["pipeline_stable_diffusion_inpaint_legacy"] = ["StableDiffusionInpaintPipelineLegacy"] _import_structure["pipeline_stable_diffusion_model_editing"] = ["StableDiffusionModelEditingPipeline"] _import_structure["pipeline_stable_diffusion_paradigms"] = ["StableDiffusionParadigmsPipeline"] _import_structure["pipeline_stable_diffusion_pix2pix_zero"] = ["StableDiffusionPix2PixZeroPipeline"] if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ....utils.dummy_torch_and_transformers_objects import * else: from .pipeline_cycle_diffusion import CycleDiffusionPipeline from .pipeline_stable_diffusion_inpaint_legacy import StableDiffusionInpaintPipelineLegacy from .pipeline_stable_diffusion_model_editing import StableDiffusionModelEditingPipeline from .pipeline_stable_diffusion_paradigms import StableDiffusionParadigmsPipeline from .pipeline_stable_diffusion_pix2pix_zero import StableDiffusionPix2PixZeroPipeline else: import sys sys.modules[__name__] = _LazyModule( __name__, globals()["__file__"], _import_structure, module_spec=__spec__, ) for name, value in _dummy_objects.items(): setattr(sys.modules[__name__], name, value)
diffusers/src/diffusers/pipelines/deprecated/stable_diffusion_variants/__init__.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/deprecated/stable_diffusion_variants/__init__.py", "repo_id": "diffusers", "token_count": 817 }
125
# Copyright 2024 Microsoft and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Callable, List, Optional, Tuple, Union import torch from transformers import CLIPTextModel, CLIPTokenizer from ....configuration_utils import ConfigMixin, register_to_config from ....models import ModelMixin, Transformer2DModel, VQModel from ....schedulers import VQDiffusionScheduler from ....utils import logging from ...pipeline_utils import DiffusionPipeline, ImagePipelineOutput logger = logging.get_logger(__name__) # pylint: disable=invalid-name class LearnedClassifierFreeSamplingEmbeddings(ModelMixin, ConfigMixin): """ Utility class for storing learned text embeddings for classifier free sampling """ @register_to_config def __init__(self, learnable: bool, hidden_size: Optional[int] = None, length: Optional[int] = None): super().__init__() self.learnable = learnable if self.learnable: assert hidden_size is not None, "learnable=True requires `hidden_size` to be set" assert length is not None, "learnable=True requires `length` to be set" embeddings = torch.zeros(length, hidden_size) else: embeddings = None self.embeddings = torch.nn.Parameter(embeddings) class VQDiffusionPipeline(DiffusionPipeline): r""" Pipeline for text-to-image generation using VQ Diffusion. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.). Args: vqvae ([`VQModel`]): Vector Quantized Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. text_encoder ([`~transformers.CLIPTextModel`]): Frozen text-encoder ([clip-vit-base-patch32](https://huggingface.co/openai/clip-vit-base-patch32)). tokenizer ([`~transformers.CLIPTokenizer`]): A `CLIPTokenizer` to tokenize text. transformer ([`Transformer2DModel`]): A conditional `Transformer2DModel` to denoise the encoded image latents. scheduler ([`VQDiffusionScheduler`]): A scheduler to be used in combination with `transformer` to denoise the encoded image latents. """ vqvae: VQModel text_encoder: CLIPTextModel tokenizer: CLIPTokenizer transformer: Transformer2DModel learned_classifier_free_sampling_embeddings: LearnedClassifierFreeSamplingEmbeddings scheduler: VQDiffusionScheduler def __init__( self, vqvae: VQModel, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, transformer: Transformer2DModel, scheduler: VQDiffusionScheduler, learned_classifier_free_sampling_embeddings: LearnedClassifierFreeSamplingEmbeddings, ): super().__init__() self.register_modules( vqvae=vqvae, transformer=transformer, text_encoder=text_encoder, tokenizer=tokenizer, scheduler=scheduler, learned_classifier_free_sampling_embeddings=learned_classifier_free_sampling_embeddings, ) def _encode_prompt(self, prompt, num_images_per_prompt, do_classifier_free_guidance): batch_size = len(prompt) if isinstance(prompt, list) else 1 # get prompt text embeddings text_inputs = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, return_tensors="pt", ) text_input_ids = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: removed_text = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :]) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) text_input_ids = text_input_ids[:, : self.tokenizer.model_max_length] prompt_embeds = self.text_encoder(text_input_ids.to(self.device))[0] # NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion. # While CLIP does normalize the pooled output of the text transformer when combining # the image and text embeddings, CLIP does not directly normalize the last hidden state. # # CLIP normalizing the pooled output. # https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053 prompt_embeds = prompt_embeds / prompt_embeds.norm(dim=-1, keepdim=True) # duplicate text embeddings for each generation per prompt prompt_embeds = prompt_embeds.repeat_interleave(num_images_per_prompt, dim=0) if do_classifier_free_guidance: if self.learned_classifier_free_sampling_embeddings.learnable: negative_prompt_embeds = self.learned_classifier_free_sampling_embeddings.embeddings negative_prompt_embeds = negative_prompt_embeds.unsqueeze(0).repeat(batch_size, 1, 1) else: uncond_tokens = [""] * batch_size max_length = text_input_ids.shape[-1] uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", ) negative_prompt_embeds = self.text_encoder(uncond_input.input_ids.to(self.device))[0] # See comment for normalizing text embeddings negative_prompt_embeds = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1, keepdim=True) # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) return prompt_embeds @torch.no_grad() def __call__( self, prompt: Union[str, List[str]], num_inference_steps: int = 100, guidance_scale: float = 5.0, truncation_rate: float = 1.0, num_images_per_prompt: int = 1, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.FloatTensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, callback_steps: int = 1, ) -> Union[ImagePipelineOutput, Tuple]: """ The call function to the pipeline for generation. Args: prompt (`str` or `List[str]`): The prompt or prompts to guide image generation. num_inference_steps (`int`, *optional*, defaults to 100): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. guidance_scale (`float`, *optional*, defaults to 7.5): A higher guidance scale value encourages the model to generate images closely linked to the text `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. truncation_rate (`float`, *optional*, defaults to 1.0 (equivalent to no truncation)): Used to "truncate" the predicted classes for x_0 such that the cumulative probability for a pixel is at most `truncation_rate`. The lowest probabilities that would increase the cumulative probability above `truncation_rate` are set to zero. num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. generator (`torch.Generator`, *optional*): A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.FloatTensor` of shape (batch), *optional*): Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image generation. Must be valid embedding indices.If not provided, a latents tensor will be generated of completely masked latent pixels. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generated image. Choose between `PIL.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. callback (`Callable`, *optional*): A function that calls every `callback_steps` steps during inference. The function is called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function is called. If not specified, the callback is called at every step. Returns: [`~pipelines.ImagePipelineOutput`] or `tuple`: If `return_dict` is `True`, [`~pipelines.ImagePipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated images. """ if isinstance(prompt, str): batch_size = 1 elif isinstance(prompt, list): batch_size = len(prompt) else: raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") batch_size = batch_size * num_images_per_prompt do_classifier_free_guidance = guidance_scale > 1.0 prompt_embeds = self._encode_prompt(prompt, num_images_per_prompt, do_classifier_free_guidance) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) ): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) # get the initial completely masked latents unless the user supplied it latents_shape = (batch_size, self.transformer.num_latent_pixels) if latents is None: mask_class = self.transformer.num_vector_embeds - 1 latents = torch.full(latents_shape, mask_class).to(self.device) else: if latents.shape != latents_shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}") if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any(): raise ValueError( "Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0," f" {self.transformer.num_vector_embeds - 1} (inclusive)." ) latents = latents.to(self.device) # set timesteps self.scheduler.set_timesteps(num_inference_steps, device=self.device) timesteps_tensor = self.scheduler.timesteps.to(self.device) sample = latents for i, t in enumerate(self.progress_bar(timesteps_tensor)): # expand the sample if we are doing classifier free guidance latent_model_input = torch.cat([sample] * 2) if do_classifier_free_guidance else sample # predict the un-noised image # model_output == `log_p_x_0` model_output = self.transformer(latent_model_input, encoder_hidden_states=prompt_embeds, timestep=t).sample if do_classifier_free_guidance: model_output_uncond, model_output_text = model_output.chunk(2) model_output = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond) model_output -= torch.logsumexp(model_output, dim=1, keepdim=True) model_output = self.truncate(model_output, truncation_rate) # remove `log(0)`'s (`-inf`s) model_output = model_output.clamp(-70) # compute the previous noisy sample x_t -> x_t-1 sample = self.scheduler.step(model_output, timestep=t, sample=sample, generator=generator).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(i, t, sample) embedding_channels = self.vqvae.config.vq_embed_dim embeddings_shape = (batch_size, self.transformer.height, self.transformer.width, embedding_channels) embeddings = self.vqvae.quantize.get_codebook_entry(sample, shape=embeddings_shape) image = self.vqvae.decode(embeddings, force_not_quantize=True).sample image = (image / 2 + 0.5).clamp(0, 1) image = image.cpu().permute(0, 2, 3, 1).numpy() if output_type == "pil": image = self.numpy_to_pil(image) if not return_dict: return (image,) return ImagePipelineOutput(images=image) def truncate(self, log_p_x_0: torch.FloatTensor, truncation_rate: float) -> torch.FloatTensor: """ Truncates `log_p_x_0` such that for each column vector, the total cumulative probability is `truncation_rate` The lowest probabilities that would increase the cumulative probability above `truncation_rate` are set to zero. """ sorted_log_p_x_0, indices = torch.sort(log_p_x_0, 1, descending=True) sorted_p_x_0 = torch.exp(sorted_log_p_x_0) keep_mask = sorted_p_x_0.cumsum(dim=1) < truncation_rate # Ensure that at least the largest probability is not zeroed out all_true = torch.full_like(keep_mask[:, 0:1, :], True) keep_mask = torch.cat((all_true, keep_mask), dim=1) keep_mask = keep_mask[:, :-1, :] keep_mask = keep_mask.gather(1, indices.argsort(1)) rv = log_p_x_0.clone() rv[~keep_mask] = -torch.inf # -inf = log(0) return rv
diffusers/src/diffusers/pipelines/deprecated/vq_diffusion/pipeline_vq_diffusion.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/deprecated/vq_diffusion/pipeline_vq_diffusion.py", "repo_id": "diffusers", "token_count": 6462 }
126
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Callable, List, Optional, Union import torch from ...models import UNet2DConditionModel, VQModel from ...schedulers import DDPMScheduler from ...utils import ( logging, ) from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput logger = logging.get_logger(__name__) # pylint: disable=invalid-name EXAMPLE_DOC_STRING = """ Examples: ```py >>> import torch >>> import numpy as np >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline >>> from transformers import pipeline >>> from diffusers.utils import load_image >>> def make_hint(image, depth_estimator): ... image = depth_estimator(image)["depth"] ... image = np.array(image) ... image = image[:, :, None] ... image = np.concatenate([image, image, image], axis=2) ... detected_map = torch.from_numpy(image).float() / 255.0 ... hint = detected_map.permute(2, 0, 1) ... return hint >>> depth_estimator = pipeline("depth-estimation") >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained( ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16 ... ) >>> pipe_prior = pipe_prior.to("cuda") >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained( ... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16 ... ) >>> pipe = pipe.to("cuda") >>> img = load_image( ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" ... "/kandinsky/cat.png" ... ).resize((768, 768)) >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda") >>> prompt = "A robot, 4k photo" >>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature" >>> generator = torch.Generator(device="cuda").manual_seed(43) >>> image_emb, zero_image_emb = pipe_prior( ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator ... ).to_tuple() >>> images = pipe( ... image_embeds=image_emb, ... negative_image_embeds=zero_image_emb, ... hint=hint, ... num_inference_steps=50, ... generator=generator, ... height=768, ... width=768, ... ).images >>> images[0].save("robot_cat.png") ``` """ # Copied from diffusers.pipelines.kandinsky2_2.pipeline_kandinsky2_2.downscale_height_and_width def downscale_height_and_width(height, width, scale_factor=8): new_height = height // scale_factor**2 if height % scale_factor**2 != 0: new_height += 1 new_width = width // scale_factor**2 if width % scale_factor**2 != 0: new_width += 1 return new_height * scale_factor, new_width * scale_factor class KandinskyV22ControlnetPipeline(DiffusionPipeline): """ Pipeline for text-to-image generation using Kandinsky This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) Args: scheduler ([`DDIMScheduler`]): A scheduler to be used in combination with `unet` to generate image latents. unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the image embedding. movq ([`VQModel`]): MoVQ Decoder to generate the image from the latents. """ model_cpu_offload_seq = "unet->movq" def __init__( self, unet: UNet2DConditionModel, scheduler: DDPMScheduler, movq: VQModel, ): super().__init__() self.register_modules( unet=unet, scheduler=scheduler, movq=movq, ) self.movq_scale_factor = 2 ** (len(self.movq.config.block_out_channels) - 1) # Copied from diffusers.pipelines.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents def prepare_latents(self, shape, dtype, device, generator, latents, scheduler): if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: if latents.shape != shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") latents = latents.to(device) latents = latents * scheduler.init_noise_sigma return latents @torch.no_grad() def __call__( self, image_embeds: Union[torch.FloatTensor, List[torch.FloatTensor]], negative_image_embeds: Union[torch.FloatTensor, List[torch.FloatTensor]], hint: torch.FloatTensor, height: int = 512, width: int = 512, num_inference_steps: int = 100, guidance_scale: float = 4.0, num_images_per_prompt: int = 1, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.FloatTensor] = None, output_type: Optional[str] = "pil", callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, callback_steps: int = 1, return_dict: bool = True, ): """ Function invoked when calling the pipeline for generation. Args: prompt (`str` or `List[str]`): The prompt or prompts to guide the image generation. hint (`torch.FloatTensor`): The controlnet condition. image_embeds (`torch.FloatTensor` or `List[torch.FloatTensor]`): The clip image embeddings for text prompt, that will be used to condition the image generation. negative_image_embeds (`torch.FloatTensor` or `List[torch.FloatTensor]`): The clip image embeddings for negative text prompt, will be used to condition the image generation. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). height (`int`, *optional*, defaults to 512): The height in pixels of the generated image. width (`int`, *optional*, defaults to 512): The width in pixels of the generated image. num_inference_steps (`int`, *optional*, defaults to 100): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. guidance_scale (`float`, *optional*, defaults to 4.0): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will ge generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` (`np.array`) or `"pt"` (`torch.Tensor`). callback (`Callable`, *optional*): A function that calls every `callback_steps` steps during inference. The function is called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function is called. If not specified, the callback is called at every step. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. Examples: Returns: [`~pipelines.ImagePipelineOutput`] or `tuple` """ device = self._execution_device do_classifier_free_guidance = guidance_scale > 1.0 if isinstance(image_embeds, list): image_embeds = torch.cat(image_embeds, dim=0) if isinstance(negative_image_embeds, list): negative_image_embeds = torch.cat(negative_image_embeds, dim=0) if isinstance(hint, list): hint = torch.cat(hint, dim=0) batch_size = image_embeds.shape[0] * num_images_per_prompt if do_classifier_free_guidance: image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) negative_image_embeds = negative_image_embeds.repeat_interleave(num_images_per_prompt, dim=0) hint = hint.repeat_interleave(num_images_per_prompt, dim=0) image_embeds = torch.cat([negative_image_embeds, image_embeds], dim=0).to( dtype=self.unet.dtype, device=device ) hint = torch.cat([hint, hint], dim=0).to(dtype=self.unet.dtype, device=device) self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps_tensor = self.scheduler.timesteps num_channels_latents = self.movq.config.latent_channels height, width = downscale_height_and_width(height, width, self.movq_scale_factor) # create initial latent latents = self.prepare_latents( (batch_size, num_channels_latents, height, width), image_embeds.dtype, device, generator, latents, self.scheduler, ) for i, t in enumerate(self.progress_bar(timesteps_tensor)): # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents added_cond_kwargs = {"image_embeds": image_embeds, "hint": hint} noise_pred = self.unet( sample=latent_model_input, timestep=t, encoder_hidden_states=None, added_cond_kwargs=added_cond_kwargs, return_dict=False, )[0] if do_classifier_free_guidance: noise_pred, variance_pred = noise_pred.split(latents.shape[1], dim=1) noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) _, variance_pred_text = variance_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) noise_pred = torch.cat([noise_pred, variance_pred_text], dim=1) if not ( hasattr(self.scheduler.config, "variance_type") and self.scheduler.config.variance_type in ["learned", "learned_range"] ): noise_pred, _ = noise_pred.split(latents.shape[1], dim=1) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step( noise_pred, t, latents, generator=generator, )[0] if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, "order", 1) callback(step_idx, t, latents) # post-processing image = self.movq.decode(latents, force_not_quantize=True)["sample"] # Offload all models self.maybe_free_model_hooks() if output_type not in ["pt", "np", "pil"]: raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}") if output_type in ["np", "pil"]: image = image * 0.5 + 0.5 image = image.clamp(0, 1) image = image.cpu().permute(0, 2, 3, 1).float().numpy() if output_type == "pil": image = self.numpy_to_pil(image) if not return_dict: return (image,) return ImagePipelineOutput(images=image)
diffusers/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_controlnet.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_controlnet.py", "repo_id": "diffusers", "token_count": 6129 }
127
# Copyright 2024 PixArt-Alpha Authors and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import html import inspect import re import urllib.parse as ul from typing import Callable, List, Optional, Tuple, Union import torch import torch.nn.functional as F from transformers import T5EncoderModel, T5Tokenizer from ...image_processor import VaeImageProcessor from ...models import AutoencoderKL, Transformer2DModel from ...schedulers import DPMSolverMultistepScheduler from ...utils import ( BACKENDS_MAPPING, deprecate, is_bs4_available, is_ftfy_available, logging, replace_example_docstring, ) from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput logger = logging.get_logger(__name__) # pylint: disable=invalid-name if is_bs4_available(): from bs4 import BeautifulSoup if is_ftfy_available(): import ftfy EXAMPLE_DOC_STRING = """ Examples: ```py >>> import torch >>> from diffusers import PixArtAlphaPipeline >>> # You can replace the checkpoint id with "PixArt-alpha/PixArt-XL-2-512x512" too. >>> pipe = PixArtAlphaPipeline.from_pretrained("PixArt-alpha/PixArt-XL-2-1024-MS", torch_dtype=torch.float16) >>> # Enable memory optimizations. >>> pipe.enable_model_cpu_offload() >>> prompt = "A small cactus with a happy face in the Sahara desert." >>> image = pipe(prompt).images[0] ``` """ ASPECT_RATIO_1024_BIN = { "0.25": [512.0, 2048.0], "0.28": [512.0, 1856.0], "0.32": [576.0, 1792.0], "0.33": [576.0, 1728.0], "0.35": [576.0, 1664.0], "0.4": [640.0, 1600.0], "0.42": [640.0, 1536.0], "0.48": [704.0, 1472.0], "0.5": [704.0, 1408.0], "0.52": [704.0, 1344.0], "0.57": [768.0, 1344.0], "0.6": [768.0, 1280.0], "0.68": [832.0, 1216.0], "0.72": [832.0, 1152.0], "0.78": [896.0, 1152.0], "0.82": [896.0, 1088.0], "0.88": [960.0, 1088.0], "0.94": [960.0, 1024.0], "1.0": [1024.0, 1024.0], "1.07": [1024.0, 960.0], "1.13": [1088.0, 960.0], "1.21": [1088.0, 896.0], "1.29": [1152.0, 896.0], "1.38": [1152.0, 832.0], "1.46": [1216.0, 832.0], "1.67": [1280.0, 768.0], "1.75": [1344.0, 768.0], "2.0": [1408.0, 704.0], "2.09": [1472.0, 704.0], "2.4": [1536.0, 640.0], "2.5": [1600.0, 640.0], "3.0": [1728.0, 576.0], "4.0": [2048.0, 512.0], } ASPECT_RATIO_512_BIN = { "0.25": [256.0, 1024.0], "0.28": [256.0, 928.0], "0.32": [288.0, 896.0], "0.33": [288.0, 864.0], "0.35": [288.0, 832.0], "0.4": [320.0, 800.0], "0.42": [320.0, 768.0], "0.48": [352.0, 736.0], "0.5": [352.0, 704.0], "0.52": [352.0, 672.0], "0.57": [384.0, 672.0], "0.6": [384.0, 640.0], "0.68": [416.0, 608.0], "0.72": [416.0, 576.0], "0.78": [448.0, 576.0], "0.82": [448.0, 544.0], "0.88": [480.0, 544.0], "0.94": [480.0, 512.0], "1.0": [512.0, 512.0], "1.07": [512.0, 480.0], "1.13": [544.0, 480.0], "1.21": [544.0, 448.0], "1.29": [576.0, 448.0], "1.38": [576.0, 416.0], "1.46": [608.0, 416.0], "1.67": [640.0, 384.0], "1.75": [672.0, 384.0], "2.0": [704.0, 352.0], "2.09": [736.0, 352.0], "2.4": [768.0, 320.0], "2.5": [800.0, 320.0], "3.0": [864.0, 288.0], "4.0": [1024.0, 256.0], } ASPECT_RATIO_256_BIN = { "0.25": [128.0, 512.0], "0.28": [128.0, 464.0], "0.32": [144.0, 448.0], "0.33": [144.0, 432.0], "0.35": [144.0, 416.0], "0.4": [160.0, 400.0], "0.42": [160.0, 384.0], "0.48": [176.0, 368.0], "0.5": [176.0, 352.0], "0.52": [176.0, 336.0], "0.57": [192.0, 336.0], "0.6": [192.0, 320.0], "0.68": [208.0, 304.0], "0.72": [208.0, 288.0], "0.78": [224.0, 288.0], "0.82": [224.0, 272.0], "0.88": [240.0, 272.0], "0.94": [240.0, 256.0], "1.0": [256.0, 256.0], "1.07": [256.0, 240.0], "1.13": [272.0, 240.0], "1.21": [272.0, 224.0], "1.29": [288.0, 224.0], "1.38": [288.0, 208.0], "1.46": [304.0, 208.0], "1.67": [320.0, 192.0], "1.75": [336.0, 192.0], "2.0": [352.0, 176.0], "2.09": [368.0, 176.0], "2.4": [384.0, 160.0], "2.5": [400.0, 160.0], "3.0": [432.0, 144.0], "4.0": [512.0, 128.0], } # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps def retrieve_timesteps( scheduler, num_inference_steps: Optional[int] = None, device: Optional[Union[str, torch.device]] = None, timesteps: Optional[List[int]] = None, **kwargs, ): """ Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. Args: scheduler (`SchedulerMixin`): The scheduler to get timesteps from. num_inference_steps (`int`): The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` must be `None`. device (`str` or `torch.device`, *optional*): The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. timesteps (`List[int]`, *optional*): Custom timesteps used to support arbitrary spacing between timesteps. If `None`, then the default timestep spacing strategy of the scheduler is used. If `timesteps` is passed, `num_inference_steps` must be `None`. Returns: `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the second element is the number of inference steps. """ if timesteps is not None: accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accepts_timesteps: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" timestep schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) else: scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) timesteps = scheduler.timesteps return timesteps, num_inference_steps class PixArtAlphaPipeline(DiffusionPipeline): r""" Pipeline for text-to-image generation using PixArt-Alpha. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) Args: vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. text_encoder ([`T5EncoderModel`]): Frozen text-encoder. PixArt-Alpha uses [T5](https://huggingface.co/docs/transformers/model_doc/t5#transformers.T5EncoderModel), specifically the [t5-v1_1-xxl](https://huggingface.co/PixArt-alpha/PixArt-alpha/tree/main/t5-v1_1-xxl) variant. tokenizer (`T5Tokenizer`): Tokenizer of class [T5Tokenizer](https://huggingface.co/docs/transformers/model_doc/t5#transformers.T5Tokenizer). transformer ([`Transformer2DModel`]): A text conditioned `Transformer2DModel` to denoise the encoded image latents. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `transformer` to denoise the encoded image latents. """ bad_punct_regex = re.compile( r"[" + "#®•©™&@·º½¾¿¡§~" + r"\)" + r"\(" + r"\]" + r"\[" + r"\}" + r"\{" + r"\|" + "\\" + r"\/" + r"\*" + r"]{1,}" ) # noqa _optional_components = ["tokenizer", "text_encoder"] model_cpu_offload_seq = "text_encoder->transformer->vae" def __init__( self, tokenizer: T5Tokenizer, text_encoder: T5EncoderModel, vae: AutoencoderKL, transformer: Transformer2DModel, scheduler: DPMSolverMultistepScheduler, ): super().__init__() self.register_modules( tokenizer=tokenizer, text_encoder=text_encoder, vae=vae, transformer=transformer, scheduler=scheduler ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) # Adapted from https://github.com/PixArt-alpha/PixArt-alpha/blob/master/diffusion/model/utils.py def mask_text_embeddings(self, emb, mask): if emb.shape[0] == 1: keep_index = mask.sum().item() return emb[:, :, :keep_index, :], keep_index else: masked_feature = emb * mask[:, None, :, None] return masked_feature, emb.shape[2] # Adapted from diffusers.pipelines.deepfloyd_if.pipeline_if.encode_prompt def encode_prompt( self, prompt: Union[str, List[str]], do_classifier_free_guidance: bool = True, negative_prompt: str = "", num_images_per_prompt: int = 1, device: Optional[torch.device] = None, prompt_embeds: Optional[torch.FloatTensor] = None, negative_prompt_embeds: Optional[torch.FloatTensor] = None, prompt_attention_mask: Optional[torch.FloatTensor] = None, negative_prompt_attention_mask: Optional[torch.FloatTensor] = None, clean_caption: bool = False, max_sequence_length: int = 120, **kwargs, ): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `List[str]`, *optional*): prompt to be encoded negative_prompt (`str` or `List[str]`, *optional*): The prompt not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). For PixArt-Alpha, this should be "". do_classifier_free_guidance (`bool`, *optional*, defaults to `True`): whether to use classifier free guidance or not num_images_per_prompt (`int`, *optional*, defaults to 1): number of images that should be generated per prompt device: (`torch.device`, *optional*): torch device to place the resulting embeddings on prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative text embeddings. For PixArt-Alpha, it's should be the embeddings of the "" string. clean_caption (`bool`, defaults to `False`): If `True`, the function will preprocess and clean the provided caption before encoding. max_sequence_length (`int`, defaults to 120): Maximum sequence length to use for the prompt. """ if "mask_feature" in kwargs: deprecation_message = "The use of `mask_feature` is deprecated. It is no longer used in any computation and that doesn't affect the end results. It will be removed in a future version." deprecate("mask_feature", "1.0.0", deprecation_message, standard_warn=False) if device is None: device = self._execution_device if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] # See Section 3.1. of the paper. max_length = max_sequence_length if prompt_embeds is None: prompt = self._text_preprocessing(prompt, clean_caption=clean_caption) text_inputs = self.tokenizer( prompt, padding="max_length", max_length=max_length, truncation=True, add_special_tokens=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( text_input_ids, untruncated_ids ): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, max_length - 1 : -1]) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {max_length} tokens: {removed_text}" ) prompt_attention_mask = text_inputs.attention_mask prompt_attention_mask = prompt_attention_mask.to(device) prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=prompt_attention_mask) prompt_embeds = prompt_embeds[0] if self.text_encoder is not None: dtype = self.text_encoder.dtype elif self.transformer is not None: dtype = self.transformer.dtype else: dtype = None prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) bs_embed, seq_len, _ = prompt_embeds.shape # duplicate text embeddings and attention mask for each generation per prompt, using mps friendly method prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) prompt_attention_mask = prompt_attention_mask.view(bs_embed, -1) prompt_attention_mask = prompt_attention_mask.repeat(num_images_per_prompt, 1) # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens = [negative_prompt] * batch_size uncond_tokens = self._text_preprocessing(uncond_tokens, clean_caption=clean_caption) max_length = prompt_embeds.shape[1] uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=max_length, truncation=True, return_attention_mask=True, add_special_tokens=True, return_tensors="pt", ) negative_prompt_attention_mask = uncond_input.attention_mask negative_prompt_attention_mask = negative_prompt_attention_mask.to(device) negative_prompt_embeds = self.text_encoder( uncond_input.input_ids.to(device), attention_mask=negative_prompt_attention_mask ) negative_prompt_embeds = negative_prompt_embeds[0] if do_classifier_free_guidance: # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) negative_prompt_attention_mask = negative_prompt_attention_mask.view(bs_embed, -1) negative_prompt_attention_mask = negative_prompt_attention_mask.repeat(num_images_per_prompt, 1) else: negative_prompt_embeds = None negative_prompt_attention_mask = None return prompt_embeds, prompt_attention_mask, negative_prompt_embeds, negative_prompt_attention_mask # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs def check_inputs( self, prompt, height, width, negative_prompt, callback_steps, prompt_embeds=None, negative_prompt_embeds=None, prompt_attention_mask=None, negative_prompt_attention_mask=None, ): if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") if (callback_steps is None) or ( callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) ): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) if prompt_embeds is not None and prompt_attention_mask is None: raise ValueError("Must provide `prompt_attention_mask` when specifying `prompt_embeds`.") if negative_prompt_embeds is not None and negative_prompt_attention_mask is None: raise ValueError("Must provide `negative_prompt_attention_mask` when specifying `negative_prompt_embeds`.") if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError( "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" f" {negative_prompt_embeds.shape}." ) if prompt_attention_mask.shape != negative_prompt_attention_mask.shape: raise ValueError( "`prompt_attention_mask` and `negative_prompt_attention_mask` must have the same shape when passed directly, but" f" got: `prompt_attention_mask` {prompt_attention_mask.shape} != `negative_prompt_attention_mask`" f" {negative_prompt_attention_mask.shape}." ) # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline._text_preprocessing def _text_preprocessing(self, text, clean_caption=False): if clean_caption and not is_bs4_available(): logger.warning(BACKENDS_MAPPING["bs4"][-1].format("Setting `clean_caption=True`")) logger.warning("Setting `clean_caption` to False...") clean_caption = False if clean_caption and not is_ftfy_available(): logger.warning(BACKENDS_MAPPING["ftfy"][-1].format("Setting `clean_caption=True`")) logger.warning("Setting `clean_caption` to False...") clean_caption = False if not isinstance(text, (tuple, list)): text = [text] def process(text: str): if clean_caption: text = self._clean_caption(text) text = self._clean_caption(text) else: text = text.lower().strip() return text return [process(t) for t in text] # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline._clean_caption def _clean_caption(self, caption): caption = str(caption) caption = ul.unquote_plus(caption) caption = caption.strip().lower() caption = re.sub("<person>", "person", caption) # urls: caption = re.sub( r"\b((?:https?:(?:\/{1,3}|[a-zA-Z0-9%])|[a-zA-Z0-9.\-]+[.](?:com|co|ru|net|org|edu|gov|it)[\w/-]*\b\/?(?!@)))", # noqa "", caption, ) # regex for urls caption = re.sub( r"\b((?:www:(?:\/{1,3}|[a-zA-Z0-9%])|[a-zA-Z0-9.\-]+[.](?:com|co|ru|net|org|edu|gov|it)[\w/-]*\b\/?(?!@)))", # noqa "", caption, ) # regex for urls # html: caption = BeautifulSoup(caption, features="html.parser").text # @<nickname> caption = re.sub(r"@[\w\d]+\b", "", caption) # 31C0—31EF CJK Strokes # 31F0—31FF Katakana Phonetic Extensions # 3200—32FF Enclosed CJK Letters and Months # 3300—33FF CJK Compatibility # 3400—4DBF CJK Unified Ideographs Extension A # 4DC0—4DFF Yijing Hexagram Symbols # 4E00—9FFF CJK Unified Ideographs caption = re.sub(r"[\u31c0-\u31ef]+", "", caption) caption = re.sub(r"[\u31f0-\u31ff]+", "", caption) caption = re.sub(r"[\u3200-\u32ff]+", "", caption) caption = re.sub(r"[\u3300-\u33ff]+", "", caption) caption = re.sub(r"[\u3400-\u4dbf]+", "", caption) caption = re.sub(r"[\u4dc0-\u4dff]+", "", caption) caption = re.sub(r"[\u4e00-\u9fff]+", "", caption) ####################################################### # все виды тире / all types of dash --> "-" caption = re.sub( r"[\u002D\u058A\u05BE\u1400\u1806\u2010-\u2015\u2E17\u2E1A\u2E3A\u2E3B\u2E40\u301C\u3030\u30A0\uFE31\uFE32\uFE58\uFE63\uFF0D]+", # noqa "-", caption, ) # кавычки к одному стандарту caption = re.sub(r"[`´«»“”¨]", '"', caption) caption = re.sub(r"[‘’]", "'", caption) # &quot; caption = re.sub(r"&quot;?", "", caption) # &amp caption = re.sub(r"&amp", "", caption) # ip adresses: caption = re.sub(r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}", " ", caption) # article ids: caption = re.sub(r"\d:\d\d\s+$", "", caption) # \n caption = re.sub(r"\\n", " ", caption) # "#123" caption = re.sub(r"#\d{1,3}\b", "", caption) # "#12345.." caption = re.sub(r"#\d{5,}\b", "", caption) # "123456.." caption = re.sub(r"\b\d{6,}\b", "", caption) # filenames: caption = re.sub(r"[\S]+\.(?:png|jpg|jpeg|bmp|webp|eps|pdf|apk|mp4)", "", caption) # caption = re.sub(r"[\"\']{2,}", r'"', caption) # """AUSVERKAUFT""" caption = re.sub(r"[\.]{2,}", r" ", caption) # """AUSVERKAUFT""" caption = re.sub(self.bad_punct_regex, r" ", caption) # ***AUSVERKAUFT***, #AUSVERKAUFT caption = re.sub(r"\s+\.\s+", r" ", caption) # " . " # this-is-my-cute-cat / this_is_my_cute_cat regex2 = re.compile(r"(?:\-|\_)") if len(re.findall(regex2, caption)) > 3: caption = re.sub(regex2, " ", caption) caption = ftfy.fix_text(caption) caption = html.unescape(html.unescape(caption)) caption = re.sub(r"\b[a-zA-Z]{1,3}\d{3,15}\b", "", caption) # jc6640 caption = re.sub(r"\b[a-zA-Z]+\d+[a-zA-Z]+\b", "", caption) # jc6640vc caption = re.sub(r"\b\d+[a-zA-Z]+\d+\b", "", caption) # 6640vc231 caption = re.sub(r"(worldwide\s+)?(free\s+)?shipping", "", caption) caption = re.sub(r"(free\s)?download(\sfree)?", "", caption) caption = re.sub(r"\bclick\b\s(?:for|on)\s\w+", "", caption) caption = re.sub(r"\b(?:png|jpg|jpeg|bmp|webp|eps|pdf|apk|mp4)(\simage[s]?)?", "", caption) caption = re.sub(r"\bpage\s+\d+\b", "", caption) caption = re.sub(r"\b\d*[a-zA-Z]+\d+[a-zA-Z]+\d+[a-zA-Z\d]*\b", r" ", caption) # j2d1a2a... caption = re.sub(r"\b\d+\.?\d*[xх×]\d+\.?\d*\b", "", caption) caption = re.sub(r"\b\s+\:\s+", r": ", caption) caption = re.sub(r"(\D[,\./])\b", r"\1 ", caption) caption = re.sub(r"\s+", " ", caption) caption.strip() caption = re.sub(r"^[\"\']([\w\W]+)[\"\']$", r"\1", caption) caption = re.sub(r"^[\'\_,\-\:;]", r"", caption) caption = re.sub(r"[\'\_,\-\:\-\+]$", r"", caption) caption = re.sub(r"^\.\S+$", "", caption) return caption.strip() # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma return latents @staticmethod def classify_height_width_bin(height: int, width: int, ratios: dict) -> Tuple[int, int]: """Returns binned height and width.""" ar = float(height / width) closest_ratio = min(ratios.keys(), key=lambda ratio: abs(float(ratio) - ar)) default_hw = ratios[closest_ratio] return int(default_hw[0]), int(default_hw[1]) @staticmethod def resize_and_crop_tensor(samples: torch.Tensor, new_width: int, new_height: int) -> torch.Tensor: orig_height, orig_width = samples.shape[2], samples.shape[3] # Check if resizing is needed if orig_height != new_height or orig_width != new_width: ratio = max(new_height / orig_height, new_width / orig_width) resized_width = int(orig_width * ratio) resized_height = int(orig_height * ratio) # Resize samples = F.interpolate( samples, size=(resized_height, resized_width), mode="bilinear", align_corners=False ) # Center Crop start_x = (resized_width - new_width) // 2 end_x = start_x + new_width start_y = (resized_height - new_height) // 2 end_y = start_y + new_height samples = samples[:, :, start_y:end_y, start_x:end_x] return samples @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, prompt: Union[str, List[str]] = None, negative_prompt: str = "", num_inference_steps: int = 20, timesteps: List[int] = None, guidance_scale: float = 4.5, num_images_per_prompt: Optional[int] = 1, height: Optional[int] = None, width: Optional[int] = None, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.FloatTensor] = None, prompt_embeds: Optional[torch.FloatTensor] = None, prompt_attention_mask: Optional[torch.FloatTensor] = None, negative_prompt_embeds: Optional[torch.FloatTensor] = None, negative_prompt_attention_mask: Optional[torch.FloatTensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, callback_steps: int = 1, clean_caption: bool = True, use_resolution_binning: bool = True, max_sequence_length: int = 120, **kwargs, ) -> Union[ImagePipelineOutput, Tuple]: """ Function invoked when calling the pipeline for generation. Args: prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. instead. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). num_inference_steps (`int`, *optional*, defaults to 100): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. timesteps (`List[int]`, *optional*): Custom timesteps to use for the denoising process. If not defined, equal spaced `num_inference_steps` timesteps are used. Must be in descending order. guidance_scale (`float`, *optional*, defaults to 4.5): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. height (`int`, *optional*, defaults to self.unet.config.sample_size): The height in pixels of the generated image. width (`int`, *optional*, defaults to self.unet.config.sample_size): The width in pixels of the generated image. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to [`schedulers.DDIMScheduler`], will be ignored for others. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. prompt_attention_mask (`torch.FloatTensor`, *optional*): Pre-generated attention mask for text embeddings. negative_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative text embeddings. For PixArt-Alpha this negative prompt should be "". If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. negative_prompt_attention_mask (`torch.FloatTensor`, *optional*): Pre-generated attention mask for negative text embeddings. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion.IFPipelineOutput`] instead of a plain tuple. callback (`Callable`, *optional*): A function that will be called every `callback_steps` steps during inference. The function will be called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function will be called. If not specified, the callback will be called at every step. clean_caption (`bool`, *optional*, defaults to `True`): Whether or not to clean the caption before creating embeddings. Requires `beautifulsoup4` and `ftfy` to be installed. If the dependencies are not installed, the embeddings will be created from the raw prompt. use_resolution_binning (`bool` defaults to `True`): If set to `True`, the requested height and width are first mapped to the closest resolutions using `ASPECT_RATIO_1024_BIN`. After the produced latents are decoded into images, they are resized back to the requested resolution. Useful for generating non-square images. max_sequence_length (`int` defaults to 120): Maximum sequence length to use with the `prompt`. Examples: Returns: [`~pipelines.ImagePipelineOutput`] or `tuple`: If `return_dict` is `True`, [`~pipelines.ImagePipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated images """ if "mask_feature" in kwargs: deprecation_message = "The use of `mask_feature` is deprecated. It is no longer used in any computation and that doesn't affect the end results. It will be removed in a future version." deprecate("mask_feature", "1.0.0", deprecation_message, standard_warn=False) # 1. Check inputs. Raise error if not correct height = height or self.transformer.config.sample_size * self.vae_scale_factor width = width or self.transformer.config.sample_size * self.vae_scale_factor if use_resolution_binning: if self.transformer.config.sample_size == 128: aspect_ratio_bin = ASPECT_RATIO_1024_BIN elif self.transformer.config.sample_size == 64: aspect_ratio_bin = ASPECT_RATIO_512_BIN elif self.transformer.config.sample_size == 32: aspect_ratio_bin = ASPECT_RATIO_256_BIN else: raise ValueError("Invalid sample size") orig_height, orig_width = height, width height, width = self.classify_height_width_bin(height, width, ratios=aspect_ratio_bin) self.check_inputs( prompt, height, width, negative_prompt, callback_steps, prompt_embeds, negative_prompt_embeds, prompt_attention_mask, negative_prompt_attention_mask, ) # 2. Default height and width to transformer if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 # 3. Encode input prompt ( prompt_embeds, prompt_attention_mask, negative_prompt_embeds, negative_prompt_attention_mask, ) = self.encode_prompt( prompt, do_classifier_free_guidance, negative_prompt=negative_prompt, num_images_per_prompt=num_images_per_prompt, device=device, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, prompt_attention_mask=prompt_attention_mask, negative_prompt_attention_mask=negative_prompt_attention_mask, clean_caption=clean_caption, max_sequence_length=max_sequence_length, ) if do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) prompt_attention_mask = torch.cat([negative_prompt_attention_mask, prompt_attention_mask], dim=0) # 4. Prepare timesteps timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps) # 5. Prepare latents. latent_channels = self.transformer.config.in_channels latents = self.prepare_latents( batch_size * num_images_per_prompt, latent_channels, height, width, prompt_embeds.dtype, device, generator, latents, ) # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) # 6.1 Prepare micro-conditions. added_cond_kwargs = {"resolution": None, "aspect_ratio": None} if self.transformer.config.sample_size == 128: resolution = torch.tensor([height, width]).repeat(batch_size * num_images_per_prompt, 1) aspect_ratio = torch.tensor([float(height / width)]).repeat(batch_size * num_images_per_prompt, 1) resolution = resolution.to(dtype=prompt_embeds.dtype, device=device) aspect_ratio = aspect_ratio.to(dtype=prompt_embeds.dtype, device=device) if do_classifier_free_guidance: resolution = torch.cat([resolution, resolution], dim=0) aspect_ratio = torch.cat([aspect_ratio, aspect_ratio], dim=0) added_cond_kwargs = {"resolution": resolution, "aspect_ratio": aspect_ratio} # 7. Denoising loop num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) current_timestep = t if not torch.is_tensor(current_timestep): # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can # This would be a good case for the `match` statement (Python 3.10+) is_mps = latent_model_input.device.type == "mps" if isinstance(current_timestep, float): dtype = torch.float32 if is_mps else torch.float64 else: dtype = torch.int32 if is_mps else torch.int64 current_timestep = torch.tensor([current_timestep], dtype=dtype, device=latent_model_input.device) elif len(current_timestep.shape) == 0: current_timestep = current_timestep[None].to(latent_model_input.device) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML current_timestep = current_timestep.expand(latent_model_input.shape[0]) # predict noise model_output noise_pred = self.transformer( latent_model_input, encoder_hidden_states=prompt_embeds, encoder_attention_mask=prompt_attention_mask, timestep=current_timestep, added_cond_kwargs=added_cond_kwargs, return_dict=False, )[0] # perform guidance if do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # learned sigma if self.transformer.config.out_channels // 2 == latent_channels: noise_pred = noise_pred.chunk(2, dim=1)[0] else: noise_pred = noise_pred # compute previous image: x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, "order", 1) callback(step_idx, t, latents) if not output_type == "latent": image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] if use_resolution_binning: image = self.resize_and_crop_tensor(image, orig_width, orig_height) else: image = latents if not output_type == "latent": image = self.image_processor.postprocess(image, output_type=output_type) # Offload all models self.maybe_free_model_hooks() if not return_dict: return (image,) return ImagePipelineOutput(images=image)
diffusers/src/diffusers/pipelines/pixart_alpha/pipeline_pixart_alpha.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/pixart_alpha/pipeline_pixart_alpha.py", "repo_id": "diffusers", "token_count": 20685 }
128
# Copyright 2024 Harutatsu Akiyama and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect from typing import Any, Callable, Dict, List, Optional, Tuple, Union import PIL.Image import torch from transformers import CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from ...image_processor import PipelineImageInput, VaeImageProcessor from ...loaders import FromSingleFileMixin, StableDiffusionXLLoraLoaderMixin, TextualInversionLoaderMixin from ...models import AutoencoderKL, UNet2DConditionModel from ...models.attention_processor import ( AttnProcessor2_0, FusedAttnProcessor2_0, LoRAAttnProcessor2_0, LoRAXFormersAttnProcessor, XFormersAttnProcessor, ) from ...models.lora import adjust_lora_scale_text_encoder from ...schedulers import KarrasDiffusionSchedulers from ...utils import ( USE_PEFT_BACKEND, deprecate, is_invisible_watermark_available, is_torch_xla_available, logging, replace_example_docstring, scale_lora_layers, ) from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin from .pipeline_output import StableDiffusionXLPipelineOutput if is_invisible_watermark_available(): from .watermark import StableDiffusionXLWatermarker if is_torch_xla_available(): import torch_xla.core.xla_model as xm XLA_AVAILABLE = True else: XLA_AVAILABLE = False logger = logging.get_logger(__name__) # pylint: disable=invalid-name EXAMPLE_DOC_STRING = """ Examples: ```py >>> import torch >>> from diffusers import StableDiffusionXLInstructPix2PixPipeline >>> from diffusers.utils import load_image >>> resolution = 768 >>> image = load_image( ... "https://hf.co/datasets/diffusers/diffusers-images-docs/resolve/main/mountain.png" ... ).resize((resolution, resolution)) >>> edit_instruction = "Turn sky into a cloudy one" >>> pipe = StableDiffusionXLInstructPix2PixPipeline.from_pretrained( ... "diffusers/sdxl-instructpix2pix-768", torch_dtype=torch.float16 ... ).to("cuda") >>> edited_image = pipe( ... prompt=edit_instruction, ... image=image, ... height=resolution, ... width=resolution, ... guidance_scale=3.0, ... image_guidance_scale=1.5, ... num_inference_steps=30, ... ).images[0] >>> edited_image ``` """ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents def retrieve_latents( encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample" ): if hasattr(encoder_output, "latent_dist") and sample_mode == "sample": return encoder_output.latent_dist.sample(generator) elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax": return encoder_output.latent_dist.mode() elif hasattr(encoder_output, "latents"): return encoder_output.latents else: raise AttributeError("Could not access latents of provided encoder_output") def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0): """ Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4 """ std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True) std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True) # rescale the results from guidance (fixes overexposure) noise_pred_rescaled = noise_cfg * (std_text / std_cfg) # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg return noise_cfg class StableDiffusionXLInstructPix2PixPipeline( DiffusionPipeline, StableDiffusionMixin, TextualInversionLoaderMixin, FromSingleFileMixin, StableDiffusionXLLoraLoaderMixin, ): r""" Pipeline for pixel-level image editing by following text instructions. Based on Stable Diffusion XL. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) The pipeline also inherits the following loading methods: - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files - [`~loaders.StableDiffusionXLLoraLoaderMixin.load_lora_weights`] for loading LoRA weights - [`~loaders.StableDiffusionXLLoraLoaderMixin.save_lora_weights`] for saving LoRA weights Args: vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. text_encoder ([`CLIPTextModel`]): Frozen text-encoder. Stable Diffusion XL uses the text portion of [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. text_encoder_2 ([` CLIPTextModelWithProjection`]): Second frozen text-encoder. Stable Diffusion XL uses the text and pool portion of [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModelWithProjection), specifically the [laion/CLIP-ViT-bigG-14-laion2B-39B-b160k](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k) variant. tokenizer (`CLIPTokenizer`): Tokenizer of class [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). tokenizer_2 (`CLIPTokenizer`): Second Tokenizer of class [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. requires_aesthetics_score (`bool`, *optional*, defaults to `"False"`): Whether the `unet` requires a aesthetic_score condition to be passed during inference. Also see the config of `stabilityai/stable-diffusion-xl-refiner-1-0`. force_zeros_for_empty_prompt (`bool`, *optional*, defaults to `"True"`): Whether the negative prompt embeddings shall be forced to always be set to 0. Also see the config of `stabilityai/stable-diffusion-xl-base-1-0`. add_watermarker (`bool`, *optional*): Whether to use the [invisible_watermark library](https://github.com/ShieldMnt/invisible-watermark/) to watermark output images. If not defined, it will default to True if the package is installed, otherwise no watermarker will be used. """ model_cpu_offload_seq = "text_encoder->text_encoder_2->unet->vae" _optional_components = ["tokenizer", "tokenizer_2", "text_encoder", "text_encoder_2"] def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, text_encoder_2: CLIPTextModelWithProjection, tokenizer: CLIPTokenizer, tokenizer_2: CLIPTokenizer, unet: UNet2DConditionModel, scheduler: KarrasDiffusionSchedulers, force_zeros_for_empty_prompt: bool = True, add_watermarker: Optional[bool] = None, ): super().__init__() self.register_modules( vae=vae, text_encoder=text_encoder, text_encoder_2=text_encoder_2, tokenizer=tokenizer, tokenizer_2=tokenizer_2, unet=unet, scheduler=scheduler, ) self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) self.default_sample_size = self.unet.config.sample_size add_watermarker = add_watermarker if add_watermarker is not None else is_invisible_watermark_available() if add_watermarker: self.watermark = StableDiffusionXLWatermarker() else: self.watermark = None def encode_prompt( self, prompt: str, prompt_2: Optional[str] = None, device: Optional[torch.device] = None, num_images_per_prompt: int = 1, do_classifier_free_guidance: bool = True, negative_prompt: Optional[str] = None, negative_prompt_2: Optional[str] = None, prompt_embeds: Optional[torch.FloatTensor] = None, negative_prompt_embeds: Optional[torch.FloatTensor] = None, pooled_prompt_embeds: Optional[torch.FloatTensor] = None, negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None, lora_scale: Optional[float] = None, ): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `List[str]`, *optional*): prompt to be encoded prompt_2 (`str` or `List[str]`, *optional*): The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is used in both text-encoders device: (`torch.device`): torch device num_images_per_prompt (`int`): number of images that should be generated per prompt do_classifier_free_guidance (`bool`): whether to use classifier free guidance or not negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). negative_prompt_2 (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. pooled_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, pooled text embeddings will be generated from `prompt` input argument. negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` input argument. lora_scale (`float`, *optional*): A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. """ device = device or self._execution_device # set lora scale so that monkey patched LoRA # function of text encoder can correctly access it if lora_scale is not None and isinstance(self, StableDiffusionXLLoraLoaderMixin): self._lora_scale = lora_scale # dynamically adjust the LoRA scale if self.text_encoder is not None: if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) else: scale_lora_layers(self.text_encoder, lora_scale) if self.text_encoder_2 is not None: if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder_2, lora_scale) else: scale_lora_layers(self.text_encoder_2, lora_scale) if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] # Define tokenizers and text encoders tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2] text_encoders = ( [self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2] ) if prompt_embeds is None: prompt_2 = prompt_2 or prompt # textual inversion: process multi-vector tokens if necessary prompt_embeds_list = [] prompts = [prompt, prompt_2] for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders): if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, tokenizer) text_inputs = tokenizer( prompt, padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( text_input_ids, untruncated_ids ): removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1]) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {tokenizer.model_max_length} tokens: {removed_text}" ) prompt_embeds = text_encoder( text_input_ids.to(device), output_hidden_states=True, ) # We are only ALWAYS interested in the pooled output of the final text encoder pooled_prompt_embeds = prompt_embeds[0] prompt_embeds = prompt_embeds.hidden_states[-2] prompt_embeds_list.append(prompt_embeds) prompt_embeds = torch.concat(prompt_embeds_list, dim=-1) # get unconditional embeddings for classifier free guidance zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt: negative_prompt_embeds = torch.zeros_like(prompt_embeds) negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds) elif do_classifier_free_guidance and negative_prompt_embeds is None: negative_prompt = negative_prompt or "" negative_prompt_2 = negative_prompt_2 or negative_prompt uncond_tokens: List[str] if prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt, negative_prompt_2] elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = [negative_prompt, negative_prompt_2] negative_prompt_embeds_list = [] for negative_prompt, tokenizer, text_encoder in zip(uncond_tokens, tokenizers, text_encoders): if isinstance(self, TextualInversionLoaderMixin): negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer) max_length = prompt_embeds.shape[1] uncond_input = tokenizer( negative_prompt, padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", ) negative_prompt_embeds = text_encoder( uncond_input.input_ids.to(device), output_hidden_states=True, ) # We are only ALWAYS interested in the pooled output of the final text encoder negative_pooled_prompt_embeds = negative_prompt_embeds[0] negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2] negative_prompt_embeds_list.append(negative_prompt_embeds) negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1) prompt_embeds_dtype = self.text_encoder_2.dtype if self.text_encoder_2 is not None else self.unet.dtype prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) bs_embed, seq_len, _ = prompt_embeds.shape # duplicate text embeddings for each generation per prompt, using mps friendly method prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) if do_classifier_free_guidance: # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( bs_embed * num_images_per_prompt, -1 ) if do_classifier_free_guidance: negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( bs_embed * num_images_per_prompt, -1 ) return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_instruct_pix2pix.StableDiffusionInstructPix2PixPipeline.check_inputs def check_inputs( self, prompt, callback_steps, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, callback_on_step_end_tensor_inputs=None, ): if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) if callback_on_step_end_tensor_inputs is not None and not all( k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs ): raise ValueError( f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" ) if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError( "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" f" {negative_prompt_embeds.shape}." ) # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma return latents def prepare_image_latents( self, image, batch_size, num_images_per_prompt, dtype, device, do_classifier_free_guidance, generator=None ): if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)): raise ValueError( f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}" ) image = image.to(device=device, dtype=dtype) batch_size = batch_size * num_images_per_prompt if image.shape[1] == 4: image_latents = image else: # make sure the VAE is in float32 mode, as it overflows in float16 needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast if needs_upcasting: self.upcast_vae() image = image.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) image_latents = retrieve_latents(self.vae.encode(image), sample_mode="argmax") # cast back to fp16 if needed if needs_upcasting: self.vae.to(dtype=torch.float16) if batch_size > image_latents.shape[0] and batch_size % image_latents.shape[0] == 0: # expand image_latents for batch_size deprecation_message = ( f"You have passed {batch_size} text prompts (`prompt`), but only {image_latents.shape[0]} initial" " images (`image`). Initial images are now duplicating to match the number of text prompts. Note" " that this behavior is deprecated and will be removed in a version 1.0.0. Please make sure to update" " your script to pass as many initial images as text prompts to suppress this warning." ) deprecate("len(prompt) != len(image)", "1.0.0", deprecation_message, standard_warn=False) additional_image_per_prompt = batch_size // image_latents.shape[0] image_latents = torch.cat([image_latents] * additional_image_per_prompt, dim=0) elif batch_size > image_latents.shape[0] and batch_size % image_latents.shape[0] != 0: raise ValueError( f"Cannot duplicate `image` of batch size {image_latents.shape[0]} to {batch_size} text prompts." ) else: image_latents = torch.cat([image_latents], dim=0) if do_classifier_free_guidance: uncond_image_latents = torch.zeros_like(image_latents) image_latents = torch.cat([image_latents, image_latents, uncond_image_latents], dim=0) if image_latents.dtype != self.vae.dtype: image_latents = image_latents.to(dtype=self.vae.dtype) return image_latents # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline._get_add_time_ids def _get_add_time_ids( self, original_size, crops_coords_top_left, target_size, dtype, text_encoder_projection_dim=None ): add_time_ids = list(original_size + crops_coords_top_left + target_size) passed_add_embed_dim = ( self.unet.config.addition_time_embed_dim * len(add_time_ids) + text_encoder_projection_dim ) expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features if expected_add_embed_dim != passed_add_embed_dim: raise ValueError( f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`." ) add_time_ids = torch.tensor([add_time_ids], dtype=dtype) return add_time_ids # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.upcast_vae def upcast_vae(self): dtype = self.vae.dtype self.vae.to(dtype=torch.float32) use_torch_2_0_or_xformers = isinstance( self.vae.decoder.mid_block.attentions[0].processor, ( AttnProcessor2_0, XFormersAttnProcessor, LoRAXFormersAttnProcessor, LoRAAttnProcessor2_0, FusedAttnProcessor2_0, ), ) # if xformers or torch_2_0 is used attention block does not need # to be in float32 which can save lots of memory if use_torch_2_0_or_xformers: self.vae.post_quant_conv.to(dtype) self.vae.decoder.conv_in.to(dtype) self.vae.decoder.mid_block.to(dtype) @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, prompt: Union[str, List[str]] = None, prompt_2: Optional[Union[str, List[str]]] = None, image: PipelineImageInput = None, height: Optional[int] = None, width: Optional[int] = None, num_inference_steps: int = 100, denoising_end: Optional[float] = None, guidance_scale: float = 5.0, image_guidance_scale: float = 1.5, negative_prompt: Optional[Union[str, List[str]]] = None, negative_prompt_2: Optional[Union[str, List[str]]] = None, num_images_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.FloatTensor] = None, prompt_embeds: Optional[torch.FloatTensor] = None, negative_prompt_embeds: Optional[torch.FloatTensor] = None, pooled_prompt_embeds: Optional[torch.FloatTensor] = None, negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, callback_steps: int = 1, cross_attention_kwargs: Optional[Dict[str, Any]] = None, guidance_rescale: float = 0.0, original_size: Tuple[int, int] = None, crops_coords_top_left: Tuple[int, int] = (0, 0), target_size: Tuple[int, int] = None, ): r""" Function invoked when calling the pipeline for generation. Args: prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. instead. prompt_2 (`str` or `List[str]`, *optional*): The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is used in both text-encoders image (`torch.FloatTensor` or `PIL.Image.Image` or `np.ndarray` or `List[torch.FloatTensor]` or `List[PIL.Image.Image]` or `List[np.ndarray]`): The image(s) to modify with the pipeline. height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): The height in pixels of the generated image. width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): The width in pixels of the generated image. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. denoising_end (`float`, *optional*): When specified, determines the fraction (between 0.0 and 1.0) of the total denoising process to be completed before it is intentionally prematurely terminated. As a result, the returned sample will still retain a substantial amount of noise as determined by the discrete timesteps selected by the scheduler. The denoising_end parameter should ideally be utilized when this pipeline forms a part of a "Mixture of Denoisers" multi-pipeline setup, as elaborated in [**Refining the Image Output**](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/stable_diffusion_xl#refining-the-image-output) guidance_scale (`float`, *optional*, defaults to 5.0): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. image_guidance_scale (`float`, *optional*, defaults to 1.5): Image guidance scale is to push the generated image towards the initial image `image`. Image guidance scale is enabled by setting `image_guidance_scale > 1`. Higher image guidance scale encourages to generate images that are closely linked to the source image `image`, usually at the expense of lower image quality. This pipeline requires a value of at least `1`. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). negative_prompt_2 (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders. num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to [`schedulers.DDIMScheduler`], will be ignored for others. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. pooled_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, pooled text embeddings will be generated from `prompt` input argument. negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` input argument. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionXLPipelineOutput`] instead of a plain tuple. callback (`Callable`, *optional*): A function that will be called every `callback_steps` steps during inference. The function will be called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function will be called. If not specified, the callback will be called at every step. cross_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under `self.processor` in [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). guidance_rescale (`float`, *optional*, defaults to 0.0): Guidance rescale factor proposed by [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf) `guidance_scale` is defined as `φ` in equation 16. of [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). Guidance rescale factor should fix overexposure when using zero terminal SNR. original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled. `original_size` defaults to `(height, width)` if not specified. Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): `crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position `crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting `crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): For most cases, `target_size` should be set to the desired height and width of the generated image. If not specified it will default to `(height, width)`. Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). aesthetic_score (`float`, *optional*, defaults to 6.0): Used to simulate an aesthetic score of the generated image by influencing the positive text condition. Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). negative_aesthetic_score (`float`, *optional*, defaults to 2.5): Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). Can be used to simulate an aesthetic score of the generated image by influencing the negative text condition. Examples: Returns: [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] or `tuple`: [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] if `return_dict` is True, otherwise a `tuple`. When returning a tuple, the first element is a list with the generated images. """ # 0. Default height and width to unet height = height or self.default_sample_size * self.vae_scale_factor width = width or self.default_sample_size * self.vae_scale_factor original_size = original_size or (height, width) target_size = target_size or (height, width) # 1. Check inputs. Raise error if not correct self.check_inputs(prompt, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds) if image is None: raise ValueError("`image` input cannot be undefined.") # 2. Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 and image_guidance_scale >= 1.0 # 3. Encode input prompt text_encoder_lora_scale = ( cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None ) ( prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds, ) = self.encode_prompt( prompt=prompt, prompt_2=prompt_2, device=device, num_images_per_prompt=num_images_per_prompt, do_classifier_free_guidance=do_classifier_free_guidance, negative_prompt=negative_prompt, negative_prompt_2=negative_prompt_2, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, lora_scale=text_encoder_lora_scale, ) # 4. Preprocess image image = self.image_processor.preprocess(image, height=height, width=width).to(device) # 5. Prepare timesteps self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps # 6. Prepare Image latents image_latents = self.prepare_image_latents( image, batch_size, num_images_per_prompt, prompt_embeds.dtype, device, do_classifier_free_guidance, ) # 7. Prepare latent variables num_channels_latents = self.vae.config.latent_channels latents = self.prepare_latents( batch_size * num_images_per_prompt, num_channels_latents, height, width, prompt_embeds.dtype, device, generator, latents, ) # 8. Check that shapes of latents and image match the UNet channels num_channels_image = image_latents.shape[1] if num_channels_latents + num_channels_image != self.unet.config.in_channels: raise ValueError( f"Incorrect configuration settings! The config of `pipeline.unet`: {self.unet.config} expects" f" {self.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} +" f" `num_channels_image`: {num_channels_image} " f" = {num_channels_latents + num_channels_image}. Please verify the config of" " `pipeline.unet` or your `image` input." ) # 9. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) # 10. Prepare added time ids & embeddings add_text_embeds = pooled_prompt_embeds if self.text_encoder_2 is None: text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1]) else: text_encoder_projection_dim = self.text_encoder_2.config.projection_dim add_time_ids = self._get_add_time_ids( original_size, crops_coords_top_left, target_size, dtype=prompt_embeds.dtype, text_encoder_projection_dim=text_encoder_projection_dim, ) if do_classifier_free_guidance: # The extra concat similar to how it's done in SD InstructPix2Pix. prompt_embeds = torch.cat([prompt_embeds, negative_prompt_embeds, negative_prompt_embeds], dim=0) add_text_embeds = torch.cat( [add_text_embeds, negative_pooled_prompt_embeds, negative_pooled_prompt_embeds], dim=0 ) add_time_ids = torch.cat([add_time_ids, add_time_ids, add_time_ids], dim=0) prompt_embeds = prompt_embeds.to(device) add_text_embeds = add_text_embeds.to(device) add_time_ids = add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1) # 11. Denoising loop num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) if denoising_end is not None and isinstance(denoising_end, float) and denoising_end > 0 and denoising_end < 1: discrete_timestep_cutoff = int( round( self.scheduler.config.num_train_timesteps - (denoising_end * self.scheduler.config.num_train_timesteps) ) ) num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps))) timesteps = timesteps[:num_inference_steps] with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): # Expand the latents if we are doing classifier free guidance. # The latents are expanded 3 times because for pix2pix the guidance # is applied for both the text and the input image. latent_model_input = torch.cat([latents] * 3) if do_classifier_free_guidance else latents # concat latents, image_latents in the channel dimension scaled_latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) scaled_latent_model_input = torch.cat([scaled_latent_model_input, image_latents], dim=1) # predict the noise residual added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids} noise_pred = self.unet( scaled_latent_model_input, t, encoder_hidden_states=prompt_embeds, cross_attention_kwargs=cross_attention_kwargs, added_cond_kwargs=added_cond_kwargs, return_dict=False, )[0] # perform guidance if do_classifier_free_guidance: noise_pred_text, noise_pred_image, noise_pred_uncond = noise_pred.chunk(3) noise_pred = ( noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_image) + image_guidance_scale * (noise_pred_image - noise_pred_uncond) ) if do_classifier_free_guidance and guidance_rescale > 0.0: # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, "order", 1) callback(step_idx, t, latents) if XLA_AVAILABLE: xm.mark_step() if not output_type == "latent": # make sure the VAE is in float32 mode, as it overflows in float16 needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast if needs_upcasting: self.upcast_vae() latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) # unscale/denormalize the latents # denormalize with the mean and std if available and not None has_latents_mean = hasattr(self.vae.config, "latents_mean") and self.vae.config.latents_mean is not None has_latents_std = hasattr(self.vae.config, "latents_std") and self.vae.config.latents_std is not None if has_latents_mean and has_latents_std: latents_mean = ( torch.tensor(self.vae.config.latents_mean).view(1, 4, 1, 1).to(latents.device, latents.dtype) ) latents_std = ( torch.tensor(self.vae.config.latents_std).view(1, 4, 1, 1).to(latents.device, latents.dtype) ) latents = latents * latents_std / self.vae.config.scaling_factor + latents_mean else: latents = latents / self.vae.config.scaling_factor image = self.vae.decode(latents, return_dict=False)[0] # cast back to fp16 if needed if needs_upcasting: self.vae.to(dtype=torch.float16) else: return StableDiffusionXLPipelineOutput(images=latents) # apply watermark if available if self.watermark is not None: image = self.watermark.apply_watermark(image) image = self.image_processor.postprocess(image, output_type=output_type) # Offload all models self.maybe_free_model_hooks() if not return_dict: return (image,) return StableDiffusionXLPipelineOutput(images=image)
diffusers/src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_instruct_pix2pix.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_instruct_pix2pix.py", "repo_id": "diffusers", "token_count": 22882 }
129
# Copyright 2024 Kakao Brain and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class UnCLIPTextProjModel(ModelMixin, ConfigMixin): """ Utility class for CLIP embeddings. Used to combine the image and text embeddings into a format usable by the decoder. For more details, see the original paper: https://arxiv.org/abs/2204.06125 section 2.1 """ @register_to_config def __init__( self, *, clip_extra_context_tokens: int = 4, clip_embeddings_dim: int = 768, time_embed_dim: int, cross_attention_dim, ): super().__init__() self.learned_classifier_free_guidance_embeddings = nn.Parameter(torch.zeros(clip_embeddings_dim)) # parameters for additional clip time embeddings self.embedding_proj = nn.Linear(clip_embeddings_dim, time_embed_dim) self.clip_image_embeddings_project_to_time_embeddings = nn.Linear(clip_embeddings_dim, time_embed_dim) # parameters for encoder hidden states self.clip_extra_context_tokens = clip_extra_context_tokens self.clip_extra_context_tokens_proj = nn.Linear( clip_embeddings_dim, self.clip_extra_context_tokens * cross_attention_dim ) self.encoder_hidden_states_proj = nn.Linear(clip_embeddings_dim, cross_attention_dim) self.text_encoder_hidden_states_norm = nn.LayerNorm(cross_attention_dim) def forward(self, *, image_embeddings, prompt_embeds, text_encoder_hidden_states, do_classifier_free_guidance): if do_classifier_free_guidance: # Add the classifier free guidance embeddings to the image embeddings image_embeddings_batch_size = image_embeddings.shape[0] classifier_free_guidance_embeddings = self.learned_classifier_free_guidance_embeddings.unsqueeze(0) classifier_free_guidance_embeddings = classifier_free_guidance_embeddings.expand( image_embeddings_batch_size, -1 ) image_embeddings = torch.cat([classifier_free_guidance_embeddings, image_embeddings], dim=0) # The image embeddings batch size and the text embeddings batch size are equal assert image_embeddings.shape[0] == prompt_embeds.shape[0] batch_size = prompt_embeds.shape[0] # "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and # adding CLIP embeddings to the existing timestep embedding, ... time_projected_prompt_embeds = self.embedding_proj(prompt_embeds) time_projected_image_embeddings = self.clip_image_embeddings_project_to_time_embeddings(image_embeddings) additive_clip_time_embeddings = time_projected_image_embeddings + time_projected_prompt_embeds # ... and by projecting CLIP embeddings into four # extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder" clip_extra_context_tokens = self.clip_extra_context_tokens_proj(image_embeddings) clip_extra_context_tokens = clip_extra_context_tokens.reshape(batch_size, -1, self.clip_extra_context_tokens) clip_extra_context_tokens = clip_extra_context_tokens.permute(0, 2, 1) text_encoder_hidden_states = self.encoder_hidden_states_proj(text_encoder_hidden_states) text_encoder_hidden_states = self.text_encoder_hidden_states_norm(text_encoder_hidden_states) text_encoder_hidden_states = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states], dim=1) return text_encoder_hidden_states, additive_clip_time_embeddings
diffusers/src/diffusers/pipelines/unclip/text_proj.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/unclip/text_proj.py", "repo_id": "diffusers", "token_count": 1637 }
130
from typing import TYPE_CHECKING from ...utils import ( DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, get_objects_from_module, is_torch_available, is_transformers_available, ) _dummy_objects = {} _import_structure = {} try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils import dummy_pt_objects # noqa F403 _dummy_objects.update(get_objects_from_module(dummy_pt_objects)) else: _import_structure["scheduling_karras_ve"] = ["KarrasVeScheduler"] _import_structure["scheduling_sde_vp"] = ["ScoreSdeVpScheduler"] if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_pt_objects import * # noqa F403 else: from .scheduling_karras_ve import KarrasVeScheduler from .scheduling_sde_vp import ScoreSdeVpScheduler else: import sys sys.modules[__name__] = _LazyModule( __name__, globals()["__file__"], _import_structure, module_spec=__spec__, ) for name, value in _dummy_objects.items(): setattr(sys.modules[__name__], name, value)
diffusers/src/diffusers/schedulers/deprecated/__init__.py/0
{ "file_path": "diffusers/src/diffusers/schedulers/deprecated/__init__.py", "repo_id": "diffusers", "token_count": 555 }
131
# Copyright 2024 TSAIL Team and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # DISCLAIMER: This file is strongly influenced by https://github.com/LuChengTHU/dpm-solver from dataclasses import dataclass from typing import List, Optional, Tuple, Union import flax import jax import jax.numpy as jnp from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils_flax import ( CommonSchedulerState, FlaxKarrasDiffusionSchedulers, FlaxSchedulerMixin, FlaxSchedulerOutput, add_noise_common, ) @flax.struct.dataclass class DPMSolverMultistepSchedulerState: common: CommonSchedulerState alpha_t: jnp.ndarray sigma_t: jnp.ndarray lambda_t: jnp.ndarray # setable values init_noise_sigma: jnp.ndarray timesteps: jnp.ndarray num_inference_steps: Optional[int] = None # running values model_outputs: Optional[jnp.ndarray] = None lower_order_nums: Optional[jnp.int32] = None prev_timestep: Optional[jnp.int32] = None cur_sample: Optional[jnp.ndarray] = None @classmethod def create( cls, common: CommonSchedulerState, alpha_t: jnp.ndarray, sigma_t: jnp.ndarray, lambda_t: jnp.ndarray, init_noise_sigma: jnp.ndarray, timesteps: jnp.ndarray, ): return cls( common=common, alpha_t=alpha_t, sigma_t=sigma_t, lambda_t=lambda_t, init_noise_sigma=init_noise_sigma, timesteps=timesteps, ) @dataclass class FlaxDPMSolverMultistepSchedulerOutput(FlaxSchedulerOutput): state: DPMSolverMultistepSchedulerState class FlaxDPMSolverMultistepScheduler(FlaxSchedulerMixin, ConfigMixin): """ DPM-Solver (and the improved version DPM-Solver++) is a fast dedicated high-order solver for diffusion ODEs with the convergence order guarantee. Empirically, sampling by DPM-Solver with only 20 steps can generate high-quality samples, and it can generate quite good samples even in only 10 steps. For more details, see the original paper: https://arxiv.org/abs/2206.00927 and https://arxiv.org/abs/2211.01095 Currently, we support the multistep DPM-Solver for both noise prediction models and data prediction models. We recommend to use `solver_order=2` for guided sampling, and `solver_order=3` for unconditional sampling. We also support the "dynamic thresholding" method in Imagen (https://arxiv.org/abs/2205.11487). For pixel-space diffusion models, you can set both `algorithm_type="dpmsolver++"` and `thresholding=True` to use the dynamic thresholding. Note that the thresholding method is unsuitable for latent-space diffusion models (such as stable-diffusion). [`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__` function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`. [`SchedulerMixin`] provides general loading and saving functionality via the [`SchedulerMixin.save_pretrained`] and [`~SchedulerMixin.from_pretrained`] functions. For more details, see the original paper: https://arxiv.org/abs/2206.00927 and https://arxiv.org/abs/2211.01095 Args: num_train_timesteps (`int`): number of diffusion steps used to train the model. beta_start (`float`): the starting `beta` value of inference. beta_end (`float`): the final `beta` value. beta_schedule (`str`): the beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from `linear`, `scaled_linear`, or `squaredcos_cap_v2`. trained_betas (`np.ndarray`, optional): option to pass an array of betas directly to the constructor to bypass `beta_start`, `beta_end` etc. solver_order (`int`, default `2`): the order of DPM-Solver; can be `1` or `2` or `3`. We recommend to use `solver_order=2` for guided sampling, and `solver_order=3` for unconditional sampling. prediction_type (`str`, default `epsilon`): indicates whether the model predicts the noise (epsilon), or the data / `x0`. One of `epsilon`, `sample`, or `v-prediction`. thresholding (`bool`, default `False`): whether to use the "dynamic thresholding" method (introduced by Imagen, https://arxiv.org/abs/2205.11487). For pixel-space diffusion models, you can set both `algorithm_type=dpmsolver++` and `thresholding=True` to use the dynamic thresholding. Note that the thresholding method is unsuitable for latent-space diffusion models (such as stable-diffusion). dynamic_thresholding_ratio (`float`, default `0.995`): the ratio for the dynamic thresholding method. Default is `0.995`, the same as Imagen (https://arxiv.org/abs/2205.11487). sample_max_value (`float`, default `1.0`): the threshold value for dynamic thresholding. Valid only when `thresholding=True` and `algorithm_type="dpmsolver++`. algorithm_type (`str`, default `dpmsolver++`): the algorithm type for the solver. Either `dpmsolver` or `dpmsolver++`. The `dpmsolver` type implements the algorithms in https://arxiv.org/abs/2206.00927, and the `dpmsolver++` type implements the algorithms in https://arxiv.org/abs/2211.01095. We recommend to use `dpmsolver++` with `solver_order=2` for guided sampling (e.g. stable-diffusion). solver_type (`str`, default `midpoint`): the solver type for the second-order solver. Either `midpoint` or `heun`. The solver type slightly affects the sample quality, especially for small number of steps. We empirically find that `midpoint` solvers are slightly better, so we recommend to use the `midpoint` type. lower_order_final (`bool`, default `True`): whether to use lower-order solvers in the final steps. Only valid for < 15 inference steps. We empirically find this trick can stabilize the sampling of DPM-Solver for steps < 15, especially for steps <= 10. timestep_spacing (`str`, defaults to `"linspace"`): The way the timesteps should be scaled. Refer to Table 2 of the [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://huggingface.co/papers/2305.08891) for more information. dtype (`jnp.dtype`, *optional*, defaults to `jnp.float32`): the `dtype` used for params and computation. """ _compatibles = [e.name for e in FlaxKarrasDiffusionSchedulers] dtype: jnp.dtype @property def has_state(self): return True @register_to_config def __init__( self, num_train_timesteps: int = 1000, beta_start: float = 0.0001, beta_end: float = 0.02, beta_schedule: str = "linear", trained_betas: Optional[jnp.ndarray] = None, solver_order: int = 2, prediction_type: str = "epsilon", thresholding: bool = False, dynamic_thresholding_ratio: float = 0.995, sample_max_value: float = 1.0, algorithm_type: str = "dpmsolver++", solver_type: str = "midpoint", lower_order_final: bool = True, timestep_spacing: str = "linspace", dtype: jnp.dtype = jnp.float32, ): self.dtype = dtype def create_state(self, common: Optional[CommonSchedulerState] = None) -> DPMSolverMultistepSchedulerState: if common is None: common = CommonSchedulerState.create(self) # Currently we only support VP-type noise schedule alpha_t = jnp.sqrt(common.alphas_cumprod) sigma_t = jnp.sqrt(1 - common.alphas_cumprod) lambda_t = jnp.log(alpha_t) - jnp.log(sigma_t) # settings for DPM-Solver if self.config.algorithm_type not in ["dpmsolver", "dpmsolver++"]: raise NotImplementedError(f"{self.config.algorithm_type} does is not implemented for {self.__class__}") if self.config.solver_type not in ["midpoint", "heun"]: raise NotImplementedError(f"{self.config.solver_type} does is not implemented for {self.__class__}") # standard deviation of the initial noise distribution init_noise_sigma = jnp.array(1.0, dtype=self.dtype) timesteps = jnp.arange(0, self.config.num_train_timesteps).round()[::-1] return DPMSolverMultistepSchedulerState.create( common=common, alpha_t=alpha_t, sigma_t=sigma_t, lambda_t=lambda_t, init_noise_sigma=init_noise_sigma, timesteps=timesteps, ) def set_timesteps( self, state: DPMSolverMultistepSchedulerState, num_inference_steps: int, shape: Tuple ) -> DPMSolverMultistepSchedulerState: """ Sets the discrete timesteps used for the diffusion chain. Supporting function to be run before inference. Args: state (`DPMSolverMultistepSchedulerState`): the `FlaxDPMSolverMultistepScheduler` state data class instance. num_inference_steps (`int`): the number of diffusion steps used when generating samples with a pre-trained model. shape (`Tuple`): the shape of the samples to be generated. """ last_timestep = self.config.num_train_timesteps if self.config.timestep_spacing == "linspace": timesteps = ( jnp.linspace(0, last_timestep - 1, num_inference_steps + 1).round()[::-1][:-1].astype(jnp.int32) ) elif self.config.timestep_spacing == "leading": step_ratio = last_timestep // (num_inference_steps + 1) # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 timesteps = ( (jnp.arange(0, num_inference_steps + 1) * step_ratio).round()[::-1][:-1].copy().astype(jnp.int32) ) timesteps += self.config.steps_offset elif self.config.timestep_spacing == "trailing": step_ratio = self.config.num_train_timesteps / num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 timesteps = jnp.arange(last_timestep, 0, -step_ratio).round().copy().astype(jnp.int32) timesteps -= 1 else: raise ValueError( f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'." ) # initial running values model_outputs = jnp.zeros((self.config.solver_order,) + shape, dtype=self.dtype) lower_order_nums = jnp.int32(0) prev_timestep = jnp.int32(-1) cur_sample = jnp.zeros(shape, dtype=self.dtype) return state.replace( num_inference_steps=num_inference_steps, timesteps=timesteps, model_outputs=model_outputs, lower_order_nums=lower_order_nums, prev_timestep=prev_timestep, cur_sample=cur_sample, ) def convert_model_output( self, state: DPMSolverMultistepSchedulerState, model_output: jnp.ndarray, timestep: int, sample: jnp.ndarray, ) -> jnp.ndarray: """ Convert the model output to the corresponding type that the algorithm (DPM-Solver / DPM-Solver++) needs. DPM-Solver is designed to discretize an integral of the noise prediction model, and DPM-Solver++ is designed to discretize an integral of the data prediction model. So we need to first convert the model output to the corresponding type to match the algorithm. Note that the algorithm type and the model type is decoupled. That is to say, we can use either DPM-Solver or DPM-Solver++ for both noise prediction model and data prediction model. Args: model_output (`jnp.ndarray`): direct output from learned diffusion model. timestep (`int`): current discrete timestep in the diffusion chain. sample (`jnp.ndarray`): current instance of sample being created by diffusion process. Returns: `jnp.ndarray`: the converted model output. """ # DPM-Solver++ needs to solve an integral of the data prediction model. if self.config.algorithm_type == "dpmsolver++": if self.config.prediction_type == "epsilon": alpha_t, sigma_t = state.alpha_t[timestep], state.sigma_t[timestep] x0_pred = (sample - sigma_t * model_output) / alpha_t elif self.config.prediction_type == "sample": x0_pred = model_output elif self.config.prediction_type == "v_prediction": alpha_t, sigma_t = state.alpha_t[timestep], state.sigma_t[timestep] x0_pred = alpha_t * sample - sigma_t * model_output else: raise ValueError( f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, " " or `v_prediction` for the FlaxDPMSolverMultistepScheduler." ) if self.config.thresholding: # Dynamic thresholding in https://arxiv.org/abs/2205.11487 dynamic_max_val = jnp.percentile( jnp.abs(x0_pred), self.config.dynamic_thresholding_ratio, axis=tuple(range(1, x0_pred.ndim)) ) dynamic_max_val = jnp.maximum( dynamic_max_val, self.config.sample_max_value * jnp.ones_like(dynamic_max_val) ) x0_pred = jnp.clip(x0_pred, -dynamic_max_val, dynamic_max_val) / dynamic_max_val return x0_pred # DPM-Solver needs to solve an integral of the noise prediction model. elif self.config.algorithm_type == "dpmsolver": if self.config.prediction_type == "epsilon": return model_output elif self.config.prediction_type == "sample": alpha_t, sigma_t = state.alpha_t[timestep], state.sigma_t[timestep] epsilon = (sample - alpha_t * model_output) / sigma_t return epsilon elif self.config.prediction_type == "v_prediction": alpha_t, sigma_t = state.alpha_t[timestep], state.sigma_t[timestep] epsilon = alpha_t * model_output + sigma_t * sample return epsilon else: raise ValueError( f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, " " or `v_prediction` for the FlaxDPMSolverMultistepScheduler." ) def dpm_solver_first_order_update( self, state: DPMSolverMultistepSchedulerState, model_output: jnp.ndarray, timestep: int, prev_timestep: int, sample: jnp.ndarray, ) -> jnp.ndarray: """ One step for the first-order DPM-Solver (equivalent to DDIM). See https://arxiv.org/abs/2206.00927 for the detailed derivation. Args: model_output (`jnp.ndarray`): direct output from learned diffusion model. timestep (`int`): current discrete timestep in the diffusion chain. prev_timestep (`int`): previous discrete timestep in the diffusion chain. sample (`jnp.ndarray`): current instance of sample being created by diffusion process. Returns: `jnp.ndarray`: the sample tensor at the previous timestep. """ t, s0 = prev_timestep, timestep m0 = model_output lambda_t, lambda_s = state.lambda_t[t], state.lambda_t[s0] alpha_t, alpha_s = state.alpha_t[t], state.alpha_t[s0] sigma_t, sigma_s = state.sigma_t[t], state.sigma_t[s0] h = lambda_t - lambda_s if self.config.algorithm_type == "dpmsolver++": x_t = (sigma_t / sigma_s) * sample - (alpha_t * (jnp.exp(-h) - 1.0)) * m0 elif self.config.algorithm_type == "dpmsolver": x_t = (alpha_t / alpha_s) * sample - (sigma_t * (jnp.exp(h) - 1.0)) * m0 return x_t def multistep_dpm_solver_second_order_update( self, state: DPMSolverMultistepSchedulerState, model_output_list: jnp.ndarray, timestep_list: List[int], prev_timestep: int, sample: jnp.ndarray, ) -> jnp.ndarray: """ One step for the second-order multistep DPM-Solver. Args: model_output_list (`List[jnp.ndarray]`): direct outputs from learned diffusion model at current and latter timesteps. timestep (`int`): current and latter discrete timestep in the diffusion chain. prev_timestep (`int`): previous discrete timestep in the diffusion chain. sample (`jnp.ndarray`): current instance of sample being created by diffusion process. Returns: `jnp.ndarray`: the sample tensor at the previous timestep. """ t, s0, s1 = prev_timestep, timestep_list[-1], timestep_list[-2] m0, m1 = model_output_list[-1], model_output_list[-2] lambda_t, lambda_s0, lambda_s1 = state.lambda_t[t], state.lambda_t[s0], state.lambda_t[s1] alpha_t, alpha_s0 = state.alpha_t[t], state.alpha_t[s0] sigma_t, sigma_s0 = state.sigma_t[t], state.sigma_t[s0] h, h_0 = lambda_t - lambda_s0, lambda_s0 - lambda_s1 r0 = h_0 / h D0, D1 = m0, (1.0 / r0) * (m0 - m1) if self.config.algorithm_type == "dpmsolver++": # See https://arxiv.org/abs/2211.01095 for detailed derivations if self.config.solver_type == "midpoint": x_t = ( (sigma_t / sigma_s0) * sample - (alpha_t * (jnp.exp(-h) - 1.0)) * D0 - 0.5 * (alpha_t * (jnp.exp(-h) - 1.0)) * D1 ) elif self.config.solver_type == "heun": x_t = ( (sigma_t / sigma_s0) * sample - (alpha_t * (jnp.exp(-h) - 1.0)) * D0 + (alpha_t * ((jnp.exp(-h) - 1.0) / h + 1.0)) * D1 ) elif self.config.algorithm_type == "dpmsolver": # See https://arxiv.org/abs/2206.00927 for detailed derivations if self.config.solver_type == "midpoint": x_t = ( (alpha_t / alpha_s0) * sample - (sigma_t * (jnp.exp(h) - 1.0)) * D0 - 0.5 * (sigma_t * (jnp.exp(h) - 1.0)) * D1 ) elif self.config.solver_type == "heun": x_t = ( (alpha_t / alpha_s0) * sample - (sigma_t * (jnp.exp(h) - 1.0)) * D0 - (sigma_t * ((jnp.exp(h) - 1.0) / h - 1.0)) * D1 ) return x_t def multistep_dpm_solver_third_order_update( self, state: DPMSolverMultistepSchedulerState, model_output_list: jnp.ndarray, timestep_list: List[int], prev_timestep: int, sample: jnp.ndarray, ) -> jnp.ndarray: """ One step for the third-order multistep DPM-Solver. Args: model_output_list (`List[jnp.ndarray]`): direct outputs from learned diffusion model at current and latter timesteps. timestep (`int`): current and latter discrete timestep in the diffusion chain. prev_timestep (`int`): previous discrete timestep in the diffusion chain. sample (`jnp.ndarray`): current instance of sample being created by diffusion process. Returns: `jnp.ndarray`: the sample tensor at the previous timestep. """ t, s0, s1, s2 = prev_timestep, timestep_list[-1], timestep_list[-2], timestep_list[-3] m0, m1, m2 = model_output_list[-1], model_output_list[-2], model_output_list[-3] lambda_t, lambda_s0, lambda_s1, lambda_s2 = ( state.lambda_t[t], state.lambda_t[s0], state.lambda_t[s1], state.lambda_t[s2], ) alpha_t, alpha_s0 = state.alpha_t[t], state.alpha_t[s0] sigma_t, sigma_s0 = state.sigma_t[t], state.sigma_t[s0] h, h_0, h_1 = lambda_t - lambda_s0, lambda_s0 - lambda_s1, lambda_s1 - lambda_s2 r0, r1 = h_0 / h, h_1 / h D0 = m0 D1_0, D1_1 = (1.0 / r0) * (m0 - m1), (1.0 / r1) * (m1 - m2) D1 = D1_0 + (r0 / (r0 + r1)) * (D1_0 - D1_1) D2 = (1.0 / (r0 + r1)) * (D1_0 - D1_1) if self.config.algorithm_type == "dpmsolver++": # See https://arxiv.org/abs/2206.00927 for detailed derivations x_t = ( (sigma_t / sigma_s0) * sample - (alpha_t * (jnp.exp(-h) - 1.0)) * D0 + (alpha_t * ((jnp.exp(-h) - 1.0) / h + 1.0)) * D1 - (alpha_t * ((jnp.exp(-h) - 1.0 + h) / h**2 - 0.5)) * D2 ) elif self.config.algorithm_type == "dpmsolver": # See https://arxiv.org/abs/2206.00927 for detailed derivations x_t = ( (alpha_t / alpha_s0) * sample - (sigma_t * (jnp.exp(h) - 1.0)) * D0 - (sigma_t * ((jnp.exp(h) - 1.0) / h - 1.0)) * D1 - (sigma_t * ((jnp.exp(h) - 1.0 - h) / h**2 - 0.5)) * D2 ) return x_t def step( self, state: DPMSolverMultistepSchedulerState, model_output: jnp.ndarray, timestep: int, sample: jnp.ndarray, return_dict: bool = True, ) -> Union[FlaxDPMSolverMultistepSchedulerOutput, Tuple]: """ Predict the sample at the previous timestep by DPM-Solver. Core function to propagate the diffusion process from the learned model outputs (most often the predicted noise). Args: state (`DPMSolverMultistepSchedulerState`): the `FlaxDPMSolverMultistepScheduler` state data class instance. model_output (`jnp.ndarray`): direct output from learned diffusion model. timestep (`int`): current discrete timestep in the diffusion chain. sample (`jnp.ndarray`): current instance of sample being created by diffusion process. return_dict (`bool`): option for returning tuple rather than FlaxDPMSolverMultistepSchedulerOutput class Returns: [`FlaxDPMSolverMultistepSchedulerOutput`] or `tuple`: [`FlaxDPMSolverMultistepSchedulerOutput`] if `return_dict` is True, otherwise a `tuple`. When returning a tuple, the first element is the sample tensor. """ if state.num_inference_steps is None: raise ValueError( "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" ) (step_index,) = jnp.where(state.timesteps == timestep, size=1) step_index = step_index[0] prev_timestep = jax.lax.select(step_index == len(state.timesteps) - 1, 0, state.timesteps[step_index + 1]) model_output = self.convert_model_output(state, model_output, timestep, sample) model_outputs_new = jnp.roll(state.model_outputs, -1, axis=0) model_outputs_new = model_outputs_new.at[-1].set(model_output) state = state.replace( model_outputs=model_outputs_new, prev_timestep=prev_timestep, cur_sample=sample, ) def step_1(state: DPMSolverMultistepSchedulerState) -> jnp.ndarray: return self.dpm_solver_first_order_update( state, state.model_outputs[-1], state.timesteps[step_index], state.prev_timestep, state.cur_sample, ) def step_23(state: DPMSolverMultistepSchedulerState) -> jnp.ndarray: def step_2(state: DPMSolverMultistepSchedulerState) -> jnp.ndarray: timestep_list = jnp.array([state.timesteps[step_index - 1], state.timesteps[step_index]]) return self.multistep_dpm_solver_second_order_update( state, state.model_outputs, timestep_list, state.prev_timestep, state.cur_sample, ) def step_3(state: DPMSolverMultistepSchedulerState) -> jnp.ndarray: timestep_list = jnp.array( [ state.timesteps[step_index - 2], state.timesteps[step_index - 1], state.timesteps[step_index], ] ) return self.multistep_dpm_solver_third_order_update( state, state.model_outputs, timestep_list, state.prev_timestep, state.cur_sample, ) step_2_output = step_2(state) step_3_output = step_3(state) if self.config.solver_order == 2: return step_2_output elif self.config.lower_order_final and len(state.timesteps) < 15: return jax.lax.select( state.lower_order_nums < 2, step_2_output, jax.lax.select( step_index == len(state.timesteps) - 2, step_2_output, step_3_output, ), ) else: return jax.lax.select( state.lower_order_nums < 2, step_2_output, step_3_output, ) step_1_output = step_1(state) step_23_output = step_23(state) if self.config.solver_order == 1: prev_sample = step_1_output elif self.config.lower_order_final and len(state.timesteps) < 15: prev_sample = jax.lax.select( state.lower_order_nums < 1, step_1_output, jax.lax.select( step_index == len(state.timesteps) - 1, step_1_output, step_23_output, ), ) else: prev_sample = jax.lax.select( state.lower_order_nums < 1, step_1_output, step_23_output, ) state = state.replace( lower_order_nums=jnp.minimum(state.lower_order_nums + 1, self.config.solver_order), ) if not return_dict: return (prev_sample, state) return FlaxDPMSolverMultistepSchedulerOutput(prev_sample=prev_sample, state=state) def scale_model_input( self, state: DPMSolverMultistepSchedulerState, sample: jnp.ndarray, timestep: Optional[int] = None ) -> jnp.ndarray: """ Ensures interchangeability with schedulers that need to scale the denoising model input depending on the current timestep. Args: state (`DPMSolverMultistepSchedulerState`): the `FlaxDPMSolverMultistepScheduler` state data class instance. sample (`jnp.ndarray`): input sample timestep (`int`, optional): current timestep Returns: `jnp.ndarray`: scaled input sample """ return sample def add_noise( self, state: DPMSolverMultistepSchedulerState, original_samples: jnp.ndarray, noise: jnp.ndarray, timesteps: jnp.ndarray, ) -> jnp.ndarray: return add_noise_common(state.common, original_samples, noise, timesteps) def __len__(self): return self.config.num_train_timesteps
diffusers/src/diffusers/schedulers/scheduling_dpmsolver_multistep_flax.py/0
{ "file_path": "diffusers/src/diffusers/schedulers/scheduling_dpmsolver_multistep_flax.py", "repo_id": "diffusers", "token_count": 13615 }
132
# Copyright 2024 Katherine Crowson and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass from typing import Optional, Tuple, Union import flax import jax.numpy as jnp from scipy import integrate from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils_flax import ( CommonSchedulerState, FlaxKarrasDiffusionSchedulers, FlaxSchedulerMixin, FlaxSchedulerOutput, broadcast_to_shape_from_left, ) @flax.struct.dataclass class LMSDiscreteSchedulerState: common: CommonSchedulerState # setable values init_noise_sigma: jnp.ndarray timesteps: jnp.ndarray sigmas: jnp.ndarray num_inference_steps: Optional[int] = None # running values derivatives: Optional[jnp.ndarray] = None @classmethod def create( cls, common: CommonSchedulerState, init_noise_sigma: jnp.ndarray, timesteps: jnp.ndarray, sigmas: jnp.ndarray ): return cls(common=common, init_noise_sigma=init_noise_sigma, timesteps=timesteps, sigmas=sigmas) @dataclass class FlaxLMSSchedulerOutput(FlaxSchedulerOutput): state: LMSDiscreteSchedulerState class FlaxLMSDiscreteScheduler(FlaxSchedulerMixin, ConfigMixin): """ Linear Multistep Scheduler for discrete beta schedules. Based on the original k-diffusion implementation by Katherine Crowson: https://github.com/crowsonkb/k-diffusion/blob/481677d114f6ea445aa009cf5bd7a9cdee909e47/k_diffusion/sampling.py#L181 [`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__` function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`. [`SchedulerMixin`] provides general loading and saving functionality via the [`SchedulerMixin.save_pretrained`] and [`~SchedulerMixin.from_pretrained`] functions. Args: num_train_timesteps (`int`): number of diffusion steps used to train the model. beta_start (`float`): the starting `beta` value of inference. beta_end (`float`): the final `beta` value. beta_schedule (`str`): the beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from `linear` or `scaled_linear`. trained_betas (`jnp.ndarray`, optional): option to pass an array of betas directly to the constructor to bypass `beta_start`, `beta_end` etc. prediction_type (`str`, default `epsilon`, optional): prediction type of the scheduler function, one of `epsilon` (predicting the noise of the diffusion process), `sample` (directly predicting the noisy sample`) or `v_prediction` (see section 2.4 https://imagen.research.google/video/paper.pdf) dtype (`jnp.dtype`, *optional*, defaults to `jnp.float32`): the `dtype` used for params and computation. """ _compatibles = [e.name for e in FlaxKarrasDiffusionSchedulers] dtype: jnp.dtype @property def has_state(self): return True @register_to_config def __init__( self, num_train_timesteps: int = 1000, beta_start: float = 0.0001, beta_end: float = 0.02, beta_schedule: str = "linear", trained_betas: Optional[jnp.ndarray] = None, prediction_type: str = "epsilon", dtype: jnp.dtype = jnp.float32, ): self.dtype = dtype def create_state(self, common: Optional[CommonSchedulerState] = None) -> LMSDiscreteSchedulerState: if common is None: common = CommonSchedulerState.create(self) timesteps = jnp.arange(0, self.config.num_train_timesteps).round()[::-1] sigmas = ((1 - common.alphas_cumprod) / common.alphas_cumprod) ** 0.5 # standard deviation of the initial noise distribution init_noise_sigma = sigmas.max() return LMSDiscreteSchedulerState.create( common=common, init_noise_sigma=init_noise_sigma, timesteps=timesteps, sigmas=sigmas, ) def scale_model_input(self, state: LMSDiscreteSchedulerState, sample: jnp.ndarray, timestep: int) -> jnp.ndarray: """ Scales the denoising model input by `(sigma**2 + 1) ** 0.5` to match the K-LMS algorithm. Args: state (`LMSDiscreteSchedulerState`): the `FlaxLMSDiscreteScheduler` state data class instance. sample (`jnp.ndarray`): current instance of sample being created by diffusion process. timestep (`int`): current discrete timestep in the diffusion chain. Returns: `jnp.ndarray`: scaled input sample """ (step_index,) = jnp.where(state.timesteps == timestep, size=1) step_index = step_index[0] sigma = state.sigmas[step_index] sample = sample / ((sigma**2 + 1) ** 0.5) return sample def get_lms_coefficient(self, state: LMSDiscreteSchedulerState, order, t, current_order): """ Compute a linear multistep coefficient. Args: order (TODO): t (TODO): current_order (TODO): """ def lms_derivative(tau): prod = 1.0 for k in range(order): if current_order == k: continue prod *= (tau - state.sigmas[t - k]) / (state.sigmas[t - current_order] - state.sigmas[t - k]) return prod integrated_coeff = integrate.quad(lms_derivative, state.sigmas[t], state.sigmas[t + 1], epsrel=1e-4)[0] return integrated_coeff def set_timesteps( self, state: LMSDiscreteSchedulerState, num_inference_steps: int, shape: Tuple = () ) -> LMSDiscreteSchedulerState: """ Sets the timesteps used for the diffusion chain. Supporting function to be run before inference. Args: state (`LMSDiscreteSchedulerState`): the `FlaxLMSDiscreteScheduler` state data class instance. num_inference_steps (`int`): the number of diffusion steps used when generating samples with a pre-trained model. """ timesteps = jnp.linspace(self.config.num_train_timesteps - 1, 0, num_inference_steps, dtype=self.dtype) low_idx = jnp.floor(timesteps).astype(jnp.int32) high_idx = jnp.ceil(timesteps).astype(jnp.int32) frac = jnp.mod(timesteps, 1.0) sigmas = ((1 - state.common.alphas_cumprod) / state.common.alphas_cumprod) ** 0.5 sigmas = (1 - frac) * sigmas[low_idx] + frac * sigmas[high_idx] sigmas = jnp.concatenate([sigmas, jnp.array([0.0], dtype=self.dtype)]) timesteps = timesteps.astype(jnp.int32) # initial running values derivatives = jnp.zeros((0,) + shape, dtype=self.dtype) return state.replace( timesteps=timesteps, sigmas=sigmas, num_inference_steps=num_inference_steps, derivatives=derivatives, ) def step( self, state: LMSDiscreteSchedulerState, model_output: jnp.ndarray, timestep: int, sample: jnp.ndarray, order: int = 4, return_dict: bool = True, ) -> Union[FlaxLMSSchedulerOutput, Tuple]: """ Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion process from the learned model outputs (most often the predicted noise). Args: state (`LMSDiscreteSchedulerState`): the `FlaxLMSDiscreteScheduler` state data class instance. model_output (`jnp.ndarray`): direct output from learned diffusion model. timestep (`int`): current discrete timestep in the diffusion chain. sample (`jnp.ndarray`): current instance of sample being created by diffusion process. order: coefficient for multi-step inference. return_dict (`bool`): option for returning tuple rather than FlaxLMSSchedulerOutput class Returns: [`FlaxLMSSchedulerOutput`] or `tuple`: [`FlaxLMSSchedulerOutput`] if `return_dict` is True, otherwise a `tuple`. When returning a tuple, the first element is the sample tensor. """ if state.num_inference_steps is None: raise ValueError( "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" ) sigma = state.sigmas[timestep] # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise if self.config.prediction_type == "epsilon": pred_original_sample = sample - sigma * model_output elif self.config.prediction_type == "v_prediction": # * c_out + input * c_skip pred_original_sample = model_output * (-sigma / (sigma**2 + 1) ** 0.5) + (sample / (sigma**2 + 1)) else: raise ValueError( f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`" ) # 2. Convert to an ODE derivative derivative = (sample - pred_original_sample) / sigma state = state.replace(derivatives=jnp.append(state.derivatives, derivative)) if len(state.derivatives) > order: state = state.replace(derivatives=jnp.delete(state.derivatives, 0)) # 3. Compute linear multistep coefficients order = min(timestep + 1, order) lms_coeffs = [self.get_lms_coefficient(state, order, timestep, curr_order) for curr_order in range(order)] # 4. Compute previous sample based on the derivatives path prev_sample = sample + sum( coeff * derivative for coeff, derivative in zip(lms_coeffs, reversed(state.derivatives)) ) if not return_dict: return (prev_sample, state) return FlaxLMSSchedulerOutput(prev_sample=prev_sample, state=state) def add_noise( self, state: LMSDiscreteSchedulerState, original_samples: jnp.ndarray, noise: jnp.ndarray, timesteps: jnp.ndarray, ) -> jnp.ndarray: sigma = state.sigmas[timesteps].flatten() sigma = broadcast_to_shape_from_left(sigma, noise.shape) noisy_samples = original_samples + noise * sigma return noisy_samples def __len__(self): return self.config.num_train_timesteps
diffusers/src/diffusers/schedulers/scheduling_lms_discrete_flax.py/0
{ "file_path": "diffusers/src/diffusers/schedulers/scheduling_lms_discrete_flax.py", "repo_id": "diffusers", "token_count": 4681 }
133
# Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import importlib import os from huggingface_hub.constants import HF_HOME from packaging import version from ..dependency_versions_check import dep_version_check from .import_utils import ENV_VARS_TRUE_VALUES, is_peft_available, is_transformers_available MIN_PEFT_VERSION = "0.6.0" MIN_TRANSFORMERS_VERSION = "4.34.0" _CHECK_PEFT = os.environ.get("_CHECK_PEFT", "1") in ENV_VARS_TRUE_VALUES CONFIG_NAME = "config.json" WEIGHTS_NAME = "diffusion_pytorch_model.bin" FLAX_WEIGHTS_NAME = "diffusion_flax_model.msgpack" ONNX_WEIGHTS_NAME = "model.onnx" SAFETENSORS_WEIGHTS_NAME = "diffusion_pytorch_model.safetensors" SAFETENSORS_FILE_EXTENSION = "safetensors" ONNX_EXTERNAL_WEIGHTS_NAME = "weights.pb" HUGGINGFACE_CO_RESOLVE_ENDPOINT = os.environ.get("HF_ENDPOINT", "https://huggingface.co") DIFFUSERS_DYNAMIC_MODULE_NAME = "diffusers_modules" HF_MODULES_CACHE = os.getenv("HF_MODULES_CACHE", os.path.join(HF_HOME, "modules")) DEPRECATED_REVISION_ARGS = ["fp16", "non-ema"] # Below should be `True` if the current version of `peft` and `transformers` are compatible with # PEFT backend. Will automatically fall back to PEFT backend if the correct versions of the libraries are # available. # For PEFT it is has to be greater than or equal to 0.6.0 and for transformers it has to be greater than or equal to 4.34.0. _required_peft_version = is_peft_available() and version.parse( version.parse(importlib.metadata.version("peft")).base_version ) >= version.parse(MIN_PEFT_VERSION) _required_transformers_version = is_transformers_available() and version.parse( version.parse(importlib.metadata.version("transformers")).base_version ) >= version.parse(MIN_TRANSFORMERS_VERSION) USE_PEFT_BACKEND = _required_peft_version and _required_transformers_version if USE_PEFT_BACKEND and _CHECK_PEFT: dep_version_check("peft")
diffusers/src/diffusers/utils/constants.py/0
{ "file_path": "diffusers/src/diffusers/utils/constants.py", "repo_id": "diffusers", "token_count": 819 }
134
import io import random import struct import tempfile from contextlib import contextmanager from typing import List, Union import numpy as np import PIL.Image import PIL.ImageOps from .import_utils import ( BACKENDS_MAPPING, is_opencv_available, ) from .logging import get_logger global_rng = random.Random() logger = get_logger(__name__) @contextmanager def buffered_writer(raw_f): f = io.BufferedWriter(raw_f) yield f f.flush() def export_to_gif(image: List[PIL.Image.Image], output_gif_path: str = None, fps: int = 10) -> str: if output_gif_path is None: output_gif_path = tempfile.NamedTemporaryFile(suffix=".gif").name image[0].save( output_gif_path, save_all=True, append_images=image[1:], optimize=False, duration=1000 // fps, loop=0, ) return output_gif_path def export_to_ply(mesh, output_ply_path: str = None): """ Write a PLY file for a mesh. """ if output_ply_path is None: output_ply_path = tempfile.NamedTemporaryFile(suffix=".ply").name coords = mesh.verts.detach().cpu().numpy() faces = mesh.faces.cpu().numpy() rgb = np.stack([mesh.vertex_channels[x].detach().cpu().numpy() for x in "RGB"], axis=1) with buffered_writer(open(output_ply_path, "wb")) as f: f.write(b"ply\n") f.write(b"format binary_little_endian 1.0\n") f.write(bytes(f"element vertex {len(coords)}\n", "ascii")) f.write(b"property float x\n") f.write(b"property float y\n") f.write(b"property float z\n") if rgb is not None: f.write(b"property uchar red\n") f.write(b"property uchar green\n") f.write(b"property uchar blue\n") if faces is not None: f.write(bytes(f"element face {len(faces)}\n", "ascii")) f.write(b"property list uchar int vertex_index\n") f.write(b"end_header\n") if rgb is not None: rgb = (rgb * 255.499).round().astype(int) vertices = [ (*coord, *rgb) for coord, rgb in zip( coords.tolist(), rgb.tolist(), ) ] format = struct.Struct("<3f3B") for item in vertices: f.write(format.pack(*item)) else: format = struct.Struct("<3f") for vertex in coords.tolist(): f.write(format.pack(*vertex)) if faces is not None: format = struct.Struct("<B3I") for tri in faces.tolist(): f.write(format.pack(len(tri), *tri)) return output_ply_path def export_to_obj(mesh, output_obj_path: str = None): if output_obj_path is None: output_obj_path = tempfile.NamedTemporaryFile(suffix=".obj").name verts = mesh.verts.detach().cpu().numpy() faces = mesh.faces.cpu().numpy() vertex_colors = np.stack([mesh.vertex_channels[x].detach().cpu().numpy() for x in "RGB"], axis=1) vertices = [ "{} {} {} {} {} {}".format(*coord, *color) for coord, color in zip(verts.tolist(), vertex_colors.tolist()) ] faces = ["f {} {} {}".format(str(tri[0] + 1), str(tri[1] + 1), str(tri[2] + 1)) for tri in faces.tolist()] combined_data = ["v " + vertex for vertex in vertices] + faces with open(output_obj_path, "w") as f: f.writelines("\n".join(combined_data)) def export_to_video( video_frames: Union[List[np.ndarray], List[PIL.Image.Image]], output_video_path: str = None, fps: int = 10 ) -> str: if is_opencv_available(): import cv2 else: raise ImportError(BACKENDS_MAPPING["opencv"][1].format("export_to_video")) if output_video_path is None: output_video_path = tempfile.NamedTemporaryFile(suffix=".mp4").name if isinstance(video_frames[0], np.ndarray): video_frames = [(frame * 255).astype(np.uint8) for frame in video_frames] elif isinstance(video_frames[0], PIL.Image.Image): video_frames = [np.array(frame) for frame in video_frames] fourcc = cv2.VideoWriter_fourcc(*"mp4v") h, w, c = video_frames[0].shape video_writer = cv2.VideoWriter(output_video_path, fourcc, fps=fps, frameSize=(w, h)) for i in range(len(video_frames)): img = cv2.cvtColor(video_frames[i], cv2.COLOR_RGB2BGR) video_writer.write(img) return output_video_path
diffusers/src/diffusers/utils/export_utils.py/0
{ "file_path": "diffusers/src/diffusers/utils/export_utils.py", "repo_id": "diffusers", "token_count": 2024 }
135
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import inspect import unittest import torch from parameterized import parameterized from diffusers import PriorTransformer from diffusers.utils.testing_utils import ( backend_empty_cache, enable_full_determinism, floats_tensor, slow, torch_all_close, torch_device, ) from ..test_modeling_common import ModelTesterMixin enable_full_determinism() class PriorTransformerTests(ModelTesterMixin, unittest.TestCase): model_class = PriorTransformer main_input_name = "hidden_states" @property def dummy_input(self): batch_size = 4 embedding_dim = 8 num_embeddings = 7 hidden_states = floats_tensor((batch_size, embedding_dim)).to(torch_device) proj_embedding = floats_tensor((batch_size, embedding_dim)).to(torch_device) encoder_hidden_states = floats_tensor((batch_size, num_embeddings, embedding_dim)).to(torch_device) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } def get_dummy_seed_input(self, seed=0): torch.manual_seed(seed) batch_size = 4 embedding_dim = 8 num_embeddings = 7 hidden_states = torch.randn((batch_size, embedding_dim)).to(torch_device) proj_embedding = torch.randn((batch_size, embedding_dim)).to(torch_device) encoder_hidden_states = torch.randn((batch_size, num_embeddings, embedding_dim)).to(torch_device) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } @property def input_shape(self): return (4, 8) @property def output_shape(self): return (4, 8) def prepare_init_args_and_inputs_for_common(self): init_dict = { "num_attention_heads": 2, "attention_head_dim": 4, "num_layers": 2, "embedding_dim": 8, "num_embeddings": 7, "additional_embeddings": 4, } inputs_dict = self.dummy_input return init_dict, inputs_dict def test_from_pretrained_hub(self): model, loading_info = PriorTransformer.from_pretrained( "hf-internal-testing/prior-dummy", output_loading_info=True ) self.assertIsNotNone(model) self.assertEqual(len(loading_info["missing_keys"]), 0) model.to(torch_device) hidden_states = model(**self.dummy_input)[0] assert hidden_states is not None, "Make sure output is not None" def test_forward_signature(self): init_dict, _ = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**init_dict) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["hidden_states", "timestep"] self.assertListEqual(arg_names[:2], expected_arg_names) def test_output_pretrained(self): model = PriorTransformer.from_pretrained("hf-internal-testing/prior-dummy") model = model.to(torch_device) if hasattr(model, "set_default_attn_processor"): model.set_default_attn_processor() input = self.get_dummy_seed_input() with torch.no_grad(): output = model(**input)[0] output_slice = output[0, :5].flatten().cpu() print(output_slice) # Since the VAE Gaussian prior's generator is seeded on the appropriate device, # the expected output slices are not the same for CPU and GPU. expected_output_slice = torch.tensor([-1.3436, -0.2870, 0.7538, 0.4368, -0.0239]) self.assertTrue(torch_all_close(output_slice, expected_output_slice, rtol=1e-2)) @slow class PriorTransformerIntegrationTests(unittest.TestCase): def get_dummy_seed_input(self, batch_size=1, embedding_dim=768, num_embeddings=77, seed=0): torch.manual_seed(seed) batch_size = batch_size embedding_dim = embedding_dim num_embeddings = num_embeddings hidden_states = torch.randn((batch_size, embedding_dim)).to(torch_device) proj_embedding = torch.randn((batch_size, embedding_dim)).to(torch_device) encoder_hidden_states = torch.randn((batch_size, num_embeddings, embedding_dim)).to(torch_device) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() backend_empty_cache(torch_device) @parameterized.expand( [ # fmt: off [13, [-0.5861, 0.1283, -0.0931, 0.0882, 0.4476, 0.1329, -0.0498, 0.0640]], [37, [-0.4913, 0.0110, -0.0483, 0.0541, 0.4954, -0.0170, 0.0354, 0.1651]], # fmt: on ] ) def test_kandinsky_prior(self, seed, expected_slice): model = PriorTransformer.from_pretrained("kandinsky-community/kandinsky-2-1-prior", subfolder="prior") model.to(torch_device) input = self.get_dummy_seed_input(seed=seed) with torch.no_grad(): sample = model(**input)[0] assert list(sample.shape) == [1, 768] output_slice = sample[0, :8].flatten().cpu() print(output_slice) expected_output_slice = torch.tensor(expected_slice) assert torch_all_close(output_slice, expected_output_slice, atol=1e-3)
diffusers/tests/models/transformers/test_models_prior.py/0
{ "file_path": "diffusers/tests/models/transformers/test_models_prior.py", "repo_id": "diffusers", "token_count": 2766 }
136
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tempfile import unittest import torch from diffusers import UNet2DConditionModel from diffusers.training_utils import EMAModel from diffusers.utils.testing_utils import enable_full_determinism, skip_mps, torch_device enable_full_determinism() class EMAModelTests(unittest.TestCase): model_id = "hf-internal-testing/tiny-stable-diffusion-pipe" batch_size = 1 prompt_length = 77 text_encoder_hidden_dim = 32 num_in_channels = 4 latent_height = latent_width = 64 generator = torch.manual_seed(0) def get_models(self, decay=0.9999): unet = UNet2DConditionModel.from_pretrained(self.model_id, subfolder="unet") unet = unet.to(torch_device) ema_unet = EMAModel(unet.parameters(), decay=decay, model_cls=UNet2DConditionModel, model_config=unet.config) return unet, ema_unet def get_dummy_inputs(self): noisy_latents = torch.randn( self.batch_size, self.num_in_channels, self.latent_height, self.latent_width, generator=self.generator ).to(torch_device) timesteps = torch.randint(0, 1000, size=(self.batch_size,), generator=self.generator).to(torch_device) encoder_hidden_states = torch.randn( self.batch_size, self.prompt_length, self.text_encoder_hidden_dim, generator=self.generator ).to(torch_device) return noisy_latents, timesteps, encoder_hidden_states def simulate_backprop(self, unet): updated_state_dict = {} for k, param in unet.state_dict().items(): updated_param = torch.randn_like(param) + (param * torch.randn_like(param)) updated_state_dict.update({k: updated_param}) unet.load_state_dict(updated_state_dict) return unet def test_optimization_steps_updated(self): unet, ema_unet = self.get_models() # Take the first (hypothetical) EMA step. ema_unet.step(unet.parameters()) assert ema_unet.optimization_step == 1 # Take two more. for _ in range(2): ema_unet.step(unet.parameters()) assert ema_unet.optimization_step == 3 def test_shadow_params_not_updated(self): unet, ema_unet = self.get_models() # Since the `unet` is not being updated (i.e., backprop'd) # there won't be any difference between the `params` of `unet` # and `ema_unet` even if we call `ema_unet.step(unet.parameters())`. ema_unet.step(unet.parameters()) orig_params = list(unet.parameters()) for s_param, param in zip(ema_unet.shadow_params, orig_params): assert torch.allclose(s_param, param) # The above holds true even if we call `ema.step()` multiple times since # `unet` params are still not being updated. for _ in range(4): ema_unet.step(unet.parameters()) for s_param, param in zip(ema_unet.shadow_params, orig_params): assert torch.allclose(s_param, param) def test_shadow_params_updated(self): unet, ema_unet = self.get_models() # Here we simulate the parameter updates for `unet`. Since there might # be some parameters which are initialized to zero we take extra care to # initialize their values to something non-zero before the multiplication. unet_pseudo_updated_step_one = self.simulate_backprop(unet) # Take the EMA step. ema_unet.step(unet_pseudo_updated_step_one.parameters()) # Now the EMA'd parameters won't be equal to the original model parameters. orig_params = list(unet_pseudo_updated_step_one.parameters()) for s_param, param in zip(ema_unet.shadow_params, orig_params): assert ~torch.allclose(s_param, param) # Ensure this is the case when we take multiple EMA steps. for _ in range(4): ema_unet.step(unet.parameters()) for s_param, param in zip(ema_unet.shadow_params, orig_params): assert ~torch.allclose(s_param, param) def test_consecutive_shadow_params_updated(self): # If we call EMA step after a backpropagation consecutively for two times, # the shadow params from those two steps should be different. unet, ema_unet = self.get_models() # First backprop + EMA unet_step_one = self.simulate_backprop(unet) ema_unet.step(unet_step_one.parameters()) step_one_shadow_params = ema_unet.shadow_params # Second backprop + EMA unet_step_two = self.simulate_backprop(unet_step_one) ema_unet.step(unet_step_two.parameters()) step_two_shadow_params = ema_unet.shadow_params for step_one, step_two in zip(step_one_shadow_params, step_two_shadow_params): assert ~torch.allclose(step_one, step_two) def test_zero_decay(self): # If there's no decay even if there are backprops, EMA steps # won't take any effect i.e., the shadow params would remain the # same. unet, ema_unet = self.get_models(decay=0.0) unet_step_one = self.simulate_backprop(unet) ema_unet.step(unet_step_one.parameters()) step_one_shadow_params = ema_unet.shadow_params unet_step_two = self.simulate_backprop(unet_step_one) ema_unet.step(unet_step_two.parameters()) step_two_shadow_params = ema_unet.shadow_params for step_one, step_two in zip(step_one_shadow_params, step_two_shadow_params): assert torch.allclose(step_one, step_two) @skip_mps def test_serialization(self): unet, ema_unet = self.get_models() noisy_latents, timesteps, encoder_hidden_states = self.get_dummy_inputs() with tempfile.TemporaryDirectory() as tmpdir: ema_unet.save_pretrained(tmpdir) loaded_unet = UNet2DConditionModel.from_pretrained(tmpdir, model_cls=UNet2DConditionModel) loaded_unet = loaded_unet.to(unet.device) # Since no EMA step has been performed the outputs should match. output = unet(noisy_latents, timesteps, encoder_hidden_states).sample output_loaded = loaded_unet(noisy_latents, timesteps, encoder_hidden_states).sample assert torch.allclose(output, output_loaded, atol=1e-4)
diffusers/tests/others/test_ema.py/0
{ "file_path": "diffusers/tests/others/test_ema.py", "repo_id": "diffusers", "token_count": 2815 }
137
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest import numpy as np import torch from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNet1DModel from diffusers.utils.testing_utils import enable_full_determinism, nightly, require_torch_gpu, skip_mps, torch_device from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class DanceDiffusionPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = DanceDiffusionPipeline params = UNCONDITIONAL_AUDIO_GENERATION_PARAMS required_optional_params = PipelineTesterMixin.required_optional_params - { "callback", "latents", "callback_steps", "output_type", "num_images_per_prompt", } batch_params = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS test_attention_slicing = False def get_dummy_components(self): torch.manual_seed(0) unet = UNet1DModel( block_out_channels=(32, 32, 64), extra_in_channels=16, sample_size=512, sample_rate=16_000, in_channels=2, out_channels=2, flip_sin_to_cos=True, use_timestep_embedding=False, time_embedding_type="fourier", mid_block_type="UNetMidBlock1D", down_block_types=("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D"), up_block_types=("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip"), ) scheduler = IPNDMScheduler() components = { "unet": unet, "scheduler": scheduler, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "batch_size": 1, "generator": generator, "num_inference_steps": 4, } return inputs def test_dance_diffusion(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() pipe = DanceDiffusionPipeline(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) output = pipe(**inputs) audio = output.audios audio_slice = audio[0, -3:, -3:] assert audio.shape == (1, 2, components["unet"].sample_size) expected_slice = np.array([-0.7265, 1.0000, -0.8388, 0.1175, 0.9498, -1.0000]) assert np.abs(audio_slice.flatten() - expected_slice).max() < 1e-2 @skip_mps def test_save_load_local(self): return super().test_save_load_local() @skip_mps def test_dict_tuple_outputs_equivalent(self): return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3) @skip_mps def test_save_load_optional_components(self): return super().test_save_load_optional_components() @skip_mps def test_attention_slicing_forward_pass(self): return super().test_attention_slicing_forward_pass() def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=3e-3) @nightly @require_torch_gpu class PipelineIntegrationTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def test_dance_diffusion(self): device = torch_device pipe = DanceDiffusionPipeline.from_pretrained("harmonai/maestro-150k") pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) generator = torch.manual_seed(0) output = pipe(generator=generator, num_inference_steps=100, audio_length_in_s=4.096) audio = output.audios audio_slice = audio[0, -3:, -3:] assert audio.shape == (1, 2, pipe.unet.config.sample_size) expected_slice = np.array([-0.0192, -0.0231, -0.0318, -0.0059, 0.0002, -0.0020]) assert np.abs(audio_slice.flatten() - expected_slice).max() < 1e-2 def test_dance_diffusion_fp16(self): device = torch_device pipe = DanceDiffusionPipeline.from_pretrained("harmonai/maestro-150k", torch_dtype=torch.float16) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) generator = torch.manual_seed(0) output = pipe(generator=generator, num_inference_steps=100, audio_length_in_s=4.096) audio = output.audios audio_slice = audio[0, -3:, -3:] assert audio.shape == (1, 2, pipe.unet.config.sample_size) expected_slice = np.array([-0.0367, -0.0488, -0.0771, -0.0525, -0.0444, -0.0341]) assert np.abs(audio_slice.flatten() - expected_slice).max() < 1e-2
diffusers/tests/pipelines/dance_diffusion/test_dance_diffusion.py/0
{ "file_path": "diffusers/tests/pipelines/dance_diffusion/test_dance_diffusion.py", "repo_id": "diffusers", "token_count": 2426 }
138
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest import numpy as np import torch from transformers import ( CLIPImageProcessor, CLIPVisionModelWithProjection, ) from diffusers import ( StableDiffusionImg2ImgPipeline, StableDiffusionInpaintPipeline, StableDiffusionPipeline, StableDiffusionXLImg2ImgPipeline, StableDiffusionXLInpaintPipeline, StableDiffusionXLPipeline, ) from diffusers.image_processor import IPAdapterMaskProcessor from diffusers.models.attention_processor import AttnProcessor, AttnProcessor2_0 from diffusers.utils import load_image from diffusers.utils.testing_utils import ( enable_full_determinism, is_flaky, numpy_cosine_similarity_distance, require_torch_gpu, slow, torch_device, ) enable_full_determinism() class IPAdapterNightlyTestsMixin(unittest.TestCase): dtype = torch.float16 def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def get_image_encoder(self, repo_id, subfolder): image_encoder = CLIPVisionModelWithProjection.from_pretrained( repo_id, subfolder=subfolder, torch_dtype=self.dtype ).to(torch_device) return image_encoder def get_image_processor(self, repo_id): image_processor = CLIPImageProcessor.from_pretrained(repo_id) return image_processor def get_dummy_inputs(self, for_image_to_image=False, for_inpainting=False, for_sdxl=False, for_masks=False): image = load_image( "https://user-images.githubusercontent.com/24734142/266492875-2d50d223-8475-44f0-a7c6-08b51cb53572.png" ) if for_sdxl: image = image.resize((1024, 1024)) input_kwargs = { "prompt": "best quality, high quality", "negative_prompt": "monochrome, lowres, bad anatomy, worst quality, low quality", "num_inference_steps": 5, "generator": torch.Generator(device="cpu").manual_seed(33), "ip_adapter_image": image, "output_type": "np", } if for_image_to_image: image = load_image("https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/vermeer.jpg") ip_image = load_image("https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/river.png") if for_sdxl: image = image.resize((1024, 1024)) ip_image = ip_image.resize((1024, 1024)) input_kwargs.update({"image": image, "ip_adapter_image": ip_image}) elif for_inpainting: image = load_image("https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/inpaint_image.png") mask = load_image("https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/mask.png") ip_image = load_image("https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/girl.png") if for_sdxl: image = image.resize((1024, 1024)) mask = mask.resize((1024, 1024)) ip_image = ip_image.resize((1024, 1024)) input_kwargs.update({"image": image, "mask_image": mask, "ip_adapter_image": ip_image}) elif for_masks: face_image1 = load_image( "https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/ip_mask_girl1.png" ) face_image2 = load_image( "https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/ip_mask_girl2.png" ) mask1 = load_image("https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/ip_mask_mask1.png") mask2 = load_image("https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/ip_mask_mask2.png") input_kwargs.update( { "ip_adapter_image": [[face_image1], [face_image2]], "cross_attention_kwargs": {"ip_adapter_masks": [mask1, mask2]}, } ) return input_kwargs @slow @require_torch_gpu class IPAdapterSDIntegrationTests(IPAdapterNightlyTestsMixin): def test_text_to_image(self): image_encoder = self.get_image_encoder(repo_id="h94/IP-Adapter", subfolder="models/image_encoder") pipeline = StableDiffusionPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", image_encoder=image_encoder, safety_checker=None, torch_dtype=self.dtype ) pipeline.to(torch_device) pipeline.load_ip_adapter("h94/IP-Adapter", subfolder="models", weight_name="ip-adapter_sd15.bin") inputs = self.get_dummy_inputs() images = pipeline(**inputs).images image_slice = images[0, :3, :3, -1].flatten() expected_slice = np.array([0.80810547, 0.88183594, 0.9296875, 0.9189453, 0.9848633, 1.0, 0.97021484, 1.0, 1.0]) max_diff = numpy_cosine_similarity_distance(image_slice, expected_slice) assert max_diff < 5e-4 pipeline.load_ip_adapter("h94/IP-Adapter", subfolder="models", weight_name="ip-adapter-plus_sd15.bin") inputs = self.get_dummy_inputs() images = pipeline(**inputs).images image_slice = images[0, :3, :3, -1].flatten() expected_slice = np.array( [0.30444336, 0.26513672, 0.22436523, 0.2758789, 0.25585938, 0.20751953, 0.25390625, 0.24633789, 0.21923828] ) max_diff = numpy_cosine_similarity_distance(image_slice, expected_slice) assert max_diff < 5e-4 def test_image_to_image(self): image_encoder = self.get_image_encoder(repo_id="h94/IP-Adapter", subfolder="models/image_encoder") pipeline = StableDiffusionImg2ImgPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", image_encoder=image_encoder, safety_checker=None, torch_dtype=self.dtype ) pipeline.to(torch_device) pipeline.load_ip_adapter("h94/IP-Adapter", subfolder="models", weight_name="ip-adapter_sd15.bin") inputs = self.get_dummy_inputs(for_image_to_image=True) images = pipeline(**inputs).images image_slice = images[0, :3, :3, -1].flatten() expected_slice = np.array( [0.22167969, 0.21875, 0.21728516, 0.22607422, 0.21948242, 0.23925781, 0.22387695, 0.25268555, 0.2722168] ) max_diff = numpy_cosine_similarity_distance(image_slice, expected_slice) assert max_diff < 5e-4 pipeline.load_ip_adapter("h94/IP-Adapter", subfolder="models", weight_name="ip-adapter-plus_sd15.bin") inputs = self.get_dummy_inputs(for_image_to_image=True) images = pipeline(**inputs).images image_slice = images[0, :3, :3, -1].flatten() expected_slice = np.array( [0.35913086, 0.265625, 0.26367188, 0.24658203, 0.19750977, 0.39990234, 0.15258789, 0.20336914, 0.5517578] ) max_diff = numpy_cosine_similarity_distance(image_slice, expected_slice) assert max_diff < 5e-4 def test_inpainting(self): image_encoder = self.get_image_encoder(repo_id="h94/IP-Adapter", subfolder="models/image_encoder") pipeline = StableDiffusionInpaintPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", image_encoder=image_encoder, safety_checker=None, torch_dtype=self.dtype ) pipeline.to(torch_device) pipeline.load_ip_adapter("h94/IP-Adapter", subfolder="models", weight_name="ip-adapter_sd15.bin") inputs = self.get_dummy_inputs(for_inpainting=True) images = pipeline(**inputs).images image_slice = images[0, :3, :3, -1].flatten() expected_slice = np.array( [0.27148438, 0.24047852, 0.22167969, 0.23217773, 0.21118164, 0.21142578, 0.21875, 0.20751953, 0.20019531] ) max_diff = numpy_cosine_similarity_distance(image_slice, expected_slice) assert max_diff < 5e-4 pipeline.load_ip_adapter("h94/IP-Adapter", subfolder="models", weight_name="ip-adapter-plus_sd15.bin") inputs = self.get_dummy_inputs(for_inpainting=True) images = pipeline(**inputs).images image_slice = images[0, :3, :3, -1].flatten() max_diff = numpy_cosine_similarity_distance(image_slice, expected_slice) assert max_diff < 5e-4 def test_text_to_image_model_cpu_offload(self): image_encoder = self.get_image_encoder(repo_id="h94/IP-Adapter", subfolder="models/image_encoder") pipeline = StableDiffusionPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", image_encoder=image_encoder, safety_checker=None, torch_dtype=self.dtype ) pipeline.load_ip_adapter("h94/IP-Adapter", subfolder="models", weight_name="ip-adapter_sd15.bin") pipeline.to(torch_device) inputs = self.get_dummy_inputs() output_without_offload = pipeline(**inputs).images pipeline.enable_model_cpu_offload() inputs = self.get_dummy_inputs() output_with_offload = pipeline(**inputs).images max_diff = np.abs(output_with_offload - output_without_offload).max() self.assertLess(max_diff, 1e-3, "CPU offloading should not affect the inference results") offloaded_modules = [ v for k, v in pipeline.components.items() if isinstance(v, torch.nn.Module) and k not in pipeline._exclude_from_cpu_offload ] ( self.assertTrue(all(v.device.type == "cpu" for v in offloaded_modules)), f"Not offloaded: {[v for v in offloaded_modules if v.device.type != 'cpu']}", ) def test_text_to_image_full_face(self): image_encoder = self.get_image_encoder(repo_id="h94/IP-Adapter", subfolder="models/image_encoder") pipeline = StableDiffusionPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", image_encoder=image_encoder, safety_checker=None, torch_dtype=self.dtype ) pipeline.to(torch_device) pipeline.load_ip_adapter("h94/IP-Adapter", subfolder="models", weight_name="ip-adapter-full-face_sd15.bin") pipeline.set_ip_adapter_scale(0.7) inputs = self.get_dummy_inputs() images = pipeline(**inputs).images image_slice = images[0, :3, :3, -1].flatten() expected_slice = np.array([0.1704, 0.1296, 0.1272, 0.2212, 0.1514, 0.1479, 0.4172, 0.4263, 0.4360]) max_diff = numpy_cosine_similarity_distance(image_slice, expected_slice) assert max_diff < 5e-4 def test_unload(self): image_encoder = self.get_image_encoder(repo_id="h94/IP-Adapter", subfolder="models/image_encoder") pipeline = StableDiffusionPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", image_encoder=image_encoder, safety_checker=None, torch_dtype=self.dtype ) pipeline.to(torch_device) pipeline.load_ip_adapter("h94/IP-Adapter", subfolder="models", weight_name="ip-adapter_sd15.bin") pipeline.set_ip_adapter_scale(0.7) pipeline.unload_ip_adapter() assert getattr(pipeline, "image_encoder") is None assert getattr(pipeline, "feature_extractor") is not None processors = [ isinstance(attn_proc, (AttnProcessor, AttnProcessor2_0)) for name, attn_proc in pipeline.unet.attn_processors.items() ] assert processors == [True] * len(processors) @is_flaky def test_multi(self): image_encoder = self.get_image_encoder(repo_id="h94/IP-Adapter", subfolder="models/image_encoder") pipeline = StableDiffusionPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", image_encoder=image_encoder, safety_checker=None, torch_dtype=self.dtype ) pipeline.to(torch_device) pipeline.load_ip_adapter( "h94/IP-Adapter", subfolder="models", weight_name=["ip-adapter_sd15.bin", "ip-adapter-plus_sd15.bin"] ) pipeline.set_ip_adapter_scale([0.7, 0.3]) inputs = self.get_dummy_inputs() ip_adapter_image = inputs["ip_adapter_image"] inputs["ip_adapter_image"] = [ip_adapter_image, [ip_adapter_image] * 2] images = pipeline(**inputs).images image_slice = images[0, :3, :3, -1].flatten() expected_slice = np.array([0.5234, 0.5352, 0.5625, 0.5713, 0.5947, 0.6206, 0.5786, 0.6187, 0.6494]) max_diff = numpy_cosine_similarity_distance(image_slice, expected_slice) assert max_diff < 5e-4 @slow @require_torch_gpu class IPAdapterSDXLIntegrationTests(IPAdapterNightlyTestsMixin): def test_text_to_image_sdxl(self): image_encoder = self.get_image_encoder(repo_id="h94/IP-Adapter", subfolder="sdxl_models/image_encoder") feature_extractor = self.get_image_processor("laion/CLIP-ViT-bigG-14-laion2B-39B-b160k") pipeline = StableDiffusionXLPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", image_encoder=image_encoder, feature_extractor=feature_extractor, torch_dtype=self.dtype, ) pipeline.to(torch_device) pipeline.load_ip_adapter("h94/IP-Adapter", subfolder="sdxl_models", weight_name="ip-adapter_sdxl.bin") inputs = self.get_dummy_inputs() images = pipeline(**inputs).images image_slice = images[0, :3, :3, -1].flatten() expected_slice = np.array( [ 0.09630299, 0.09551358, 0.08480701, 0.09070173, 0.09437338, 0.09264627, 0.08883232, 0.09287417, 0.09197289, ] ) max_diff = numpy_cosine_similarity_distance(image_slice, expected_slice) assert max_diff < 5e-4 image_encoder = self.get_image_encoder(repo_id="h94/IP-Adapter", subfolder="models/image_encoder") pipeline = StableDiffusionXLPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", image_encoder=image_encoder, feature_extractor=feature_extractor, torch_dtype=self.dtype, ) pipeline.to(torch_device) pipeline.load_ip_adapter( "h94/IP-Adapter", subfolder="sdxl_models", weight_name="ip-adapter-plus_sdxl_vit-h.bin", ) inputs = self.get_dummy_inputs() images = pipeline(**inputs).images image_slice = images[0, :3, :3, -1].flatten() expected_slice = np.array( [0.0576596, 0.05600825, 0.04479006, 0.05288461, 0.05461192, 0.05137569, 0.04867965, 0.05301541, 0.04939842] ) max_diff = numpy_cosine_similarity_distance(image_slice, expected_slice) assert max_diff < 5e-4 def test_image_to_image_sdxl(self): image_encoder = self.get_image_encoder(repo_id="h94/IP-Adapter", subfolder="sdxl_models/image_encoder") feature_extractor = self.get_image_processor("laion/CLIP-ViT-bigG-14-laion2B-39B-b160k") pipeline = StableDiffusionXLImg2ImgPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", image_encoder=image_encoder, feature_extractor=feature_extractor, torch_dtype=self.dtype, ) pipeline.to(torch_device) pipeline.load_ip_adapter("h94/IP-Adapter", subfolder="sdxl_models", weight_name="ip-adapter_sdxl.bin") inputs = self.get_dummy_inputs(for_image_to_image=True) images = pipeline(**inputs).images image_slice = images[0, :3, :3, -1].flatten() expected_slice = np.array( [ 0.06513795, 0.07009393, 0.07234055, 0.07426041, 0.07002589, 0.06415862, 0.07827643, 0.07962808, 0.07411247, ] ) assert np.allclose(image_slice, expected_slice, atol=1e-3) image_encoder = self.get_image_encoder(repo_id="h94/IP-Adapter", subfolder="models/image_encoder") feature_extractor = self.get_image_processor("laion/CLIP-ViT-bigG-14-laion2B-39B-b160k") pipeline = StableDiffusionXLImg2ImgPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", image_encoder=image_encoder, feature_extractor=feature_extractor, torch_dtype=self.dtype, ) pipeline.to(torch_device) pipeline.load_ip_adapter( "h94/IP-Adapter", subfolder="sdxl_models", weight_name="ip-adapter-plus_sdxl_vit-h.bin", ) inputs = self.get_dummy_inputs(for_image_to_image=True) images = pipeline(**inputs).images image_slice = images[0, :3, :3, -1].flatten() expected_slice = np.array( [ 0.07126552, 0.07025367, 0.07348302, 0.07580167, 0.07467338, 0.06918576, 0.07480252, 0.08279955, 0.08547315, ] ) assert np.allclose(image_slice, expected_slice, atol=1e-3) def test_inpainting_sdxl(self): image_encoder = self.get_image_encoder(repo_id="h94/IP-Adapter", subfolder="sdxl_models/image_encoder") feature_extractor = self.get_image_processor("laion/CLIP-ViT-bigG-14-laion2B-39B-b160k") pipeline = StableDiffusionXLInpaintPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", image_encoder=image_encoder, feature_extractor=feature_extractor, torch_dtype=self.dtype, ) pipeline.to(torch_device) pipeline.load_ip_adapter("h94/IP-Adapter", subfolder="sdxl_models", weight_name="ip-adapter_sdxl.bin") inputs = self.get_dummy_inputs(for_inpainting=True) images = pipeline(**inputs).images image_slice = images[0, :3, :3, -1].flatten() image_slice.tolist() expected_slice = np.array( [0.14181179, 0.1493012, 0.14283323, 0.14602411, 0.14915377, 0.15015268, 0.14725655, 0.15009224, 0.15164584] ) max_diff = numpy_cosine_similarity_distance(image_slice, expected_slice) assert max_diff < 5e-4 image_encoder = self.get_image_encoder(repo_id="h94/IP-Adapter", subfolder="models/image_encoder") feature_extractor = self.get_image_processor("laion/CLIP-ViT-bigG-14-laion2B-39B-b160k") pipeline = StableDiffusionXLInpaintPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", image_encoder=image_encoder, feature_extractor=feature_extractor, torch_dtype=self.dtype, ) pipeline.to(torch_device) pipeline.load_ip_adapter( "h94/IP-Adapter", subfolder="sdxl_models", weight_name="ip-adapter-plus_sdxl_vit-h.bin", ) inputs = self.get_dummy_inputs(for_inpainting=True) images = pipeline(**inputs).images image_slice = images[0, :3, :3, -1].flatten() image_slice.tolist() expected_slice = np.array([0.1398, 0.1476, 0.1407, 0.1442, 0.1470, 0.1480, 0.1449, 0.1481, 0.1494]) max_diff = numpy_cosine_similarity_distance(image_slice, expected_slice) assert max_diff < 5e-4 def test_ip_adapter_single_mask(self): image_encoder = self.get_image_encoder(repo_id="h94/IP-Adapter", subfolder="models/image_encoder") pipeline = StableDiffusionXLPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", image_encoder=image_encoder, torch_dtype=self.dtype, ) pipeline.to(torch_device) pipeline.load_ip_adapter( "h94/IP-Adapter", subfolder="sdxl_models", weight_name="ip-adapter-plus-face_sdxl_vit-h.safetensors" ) pipeline.set_ip_adapter_scale(0.7) inputs = self.get_dummy_inputs(for_masks=True) mask = inputs["cross_attention_kwargs"]["ip_adapter_masks"][0] processor = IPAdapterMaskProcessor() mask = processor.preprocess(mask) inputs["cross_attention_kwargs"]["ip_adapter_masks"] = mask inputs["ip_adapter_image"] = inputs["ip_adapter_image"][0] images = pipeline(**inputs).images image_slice = images[0, :3, :3, -1].flatten() expected_slice = np.array( [0.7307304, 0.73450166, 0.73731124, 0.7377061, 0.7318013, 0.73720926, 0.74746597, 0.7409929, 0.74074936] ) max_diff = numpy_cosine_similarity_distance(image_slice, expected_slice) assert max_diff < 5e-4 def test_ip_adapter_multiple_masks(self): image_encoder = self.get_image_encoder(repo_id="h94/IP-Adapter", subfolder="models/image_encoder") pipeline = StableDiffusionXLPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", image_encoder=image_encoder, torch_dtype=self.dtype, ) pipeline.to(torch_device) pipeline.load_ip_adapter( "h94/IP-Adapter", subfolder="sdxl_models", weight_name=["ip-adapter-plus-face_sdxl_vit-h.safetensors"] * 2 ) pipeline.set_ip_adapter_scale([0.7] * 2) inputs = self.get_dummy_inputs(for_masks=True) masks = inputs["cross_attention_kwargs"]["ip_adapter_masks"] processor = IPAdapterMaskProcessor() masks = processor.preprocess(masks) inputs["cross_attention_kwargs"]["ip_adapter_masks"] = masks images = pipeline(**inputs).images image_slice = images[0, :3, :3, -1].flatten() expected_slice = np.array( [0.79474676, 0.7977683, 0.8013954, 0.7988008, 0.7970615, 0.8029355, 0.80614823, 0.8050743, 0.80627424] ) max_diff = numpy_cosine_similarity_distance(image_slice, expected_slice) assert max_diff < 5e-4
diffusers/tests/pipelines/ip_adapters/test_ip_adapter_stable_diffusion.py/0
{ "file_path": "diffusers/tests/pipelines/ip_adapters/test_ip_adapter_stable_diffusion.py", "repo_id": "diffusers", "token_count": 10806 }
139