| import math |
| import torch |
| import torch.nn as nn |
| from torch.nn import functional as F |
| import numpy as np |
| from einops import rearrange |
| from typing import Optional, Any |
|
|
| from model.distributions import DiagonalGaussianDistribution |
| from model.config import Config, AttnMode |
|
|
|
|
| def nonlinearity(x): |
| |
| return x*torch.sigmoid(x) |
|
|
|
|
| def Normalize(in_channels, num_groups=32): |
| return torch.nn.GroupNorm(num_groups=num_groups, num_channels=in_channels, eps=1e-6, affine=True) |
|
|
|
|
| class Upsample(nn.Module): |
| def __init__(self, in_channels, with_conv): |
| super().__init__() |
| self.with_conv = with_conv |
| if self.with_conv: |
| self.conv = torch.nn.Conv2d(in_channels, |
| in_channels, |
| kernel_size=3, |
| stride=1, |
| padding=1) |
|
|
| def forward(self, x): |
| x = torch.nn.functional.interpolate(x, scale_factor=2.0, mode="nearest") |
| if self.with_conv: |
| x = self.conv(x) |
| return x |
|
|
|
|
| class Downsample(nn.Module): |
| def __init__(self, in_channels, with_conv): |
| super().__init__() |
| self.with_conv = with_conv |
| if self.with_conv: |
| |
| self.conv = torch.nn.Conv2d(in_channels, |
| in_channels, |
| kernel_size=3, |
| stride=2, |
| padding=0) |
|
|
| def forward(self, x): |
| if self.with_conv: |
| pad = (0,1,0,1) |
| x = torch.nn.functional.pad(x, pad, mode="constant", value=0) |
| x = self.conv(x) |
| else: |
| x = torch.nn.functional.avg_pool2d(x, kernel_size=2, stride=2) |
| return x |
|
|
|
|
| class ResnetBlock(nn.Module): |
| def __init__(self, *, in_channels, out_channels=None, conv_shortcut=False, |
| dropout, temb_channels=512): |
| super().__init__() |
| self.in_channels = in_channels |
| out_channels = in_channels if out_channels is None else out_channels |
| self.out_channels = out_channels |
| self.use_conv_shortcut = conv_shortcut |
|
|
| self.norm1 = Normalize(in_channels) |
| self.conv1 = torch.nn.Conv2d(in_channels, |
| out_channels, |
| kernel_size=3, |
| stride=1, |
| padding=1) |
| if temb_channels > 0: |
| self.temb_proj = torch.nn.Linear(temb_channels, |
| out_channels) |
| self.norm2 = Normalize(out_channels) |
| self.dropout = torch.nn.Dropout(dropout) |
| self.conv2 = torch.nn.Conv2d(out_channels, |
| out_channels, |
| kernel_size=3, |
| stride=1, |
| padding=1) |
| if self.in_channels != self.out_channels: |
| if self.use_conv_shortcut: |
| self.conv_shortcut = torch.nn.Conv2d(in_channels, |
| out_channels, |
| kernel_size=3, |
| stride=1, |
| padding=1) |
| else: |
| self.nin_shortcut = torch.nn.Conv2d(in_channels, |
| out_channels, |
| kernel_size=1, |
| stride=1, |
| padding=0) |
|
|
| def forward(self, x, temb): |
| h = x |
| h = self.norm1(h) |
| h = nonlinearity(h) |
| h = self.conv1(h) |
|
|
| if temb is not None: |
| h = h + self.temb_proj(nonlinearity(temb))[:,:,None,None] |
|
|
| h = self.norm2(h) |
| h = nonlinearity(h) |
| h = self.dropout(h) |
| h = self.conv2(h) |
|
|
| if self.in_channels != self.out_channels: |
| if self.use_conv_shortcut: |
| x = self.conv_shortcut(x) |
| else: |
| x = self.nin_shortcut(x) |
|
|
| return x+h |
|
|
|
|
| class AttnBlock(nn.Module): |
| def __init__(self, in_channels): |
| super().__init__() |
| print(f"building AttnBlock (vanilla) with {in_channels} in_channels") |
|
|
| self.in_channels = in_channels |
|
|
| self.norm = Normalize(in_channels) |
| self.q = torch.nn.Conv2d(in_channels, |
| in_channels, |
| kernel_size=1, |
| stride=1, |
| padding=0) |
| self.k = torch.nn.Conv2d(in_channels, |
| in_channels, |
| kernel_size=1, |
| stride=1, |
| padding=0) |
| self.v = torch.nn.Conv2d(in_channels, |
| in_channels, |
| kernel_size=1, |
| stride=1, |
| padding=0) |
| self.proj_out = torch.nn.Conv2d(in_channels, |
| in_channels, |
| kernel_size=1, |
| stride=1, |
| padding=0) |
|
|
| def forward(self, x): |
| h_ = x |
| h_ = self.norm(h_) |
| q = self.q(h_) |
| k = self.k(h_) |
| v = self.v(h_) |
|
|
| |
| b,c,h,w = q.shape |
| q = q.reshape(b,c,h*w) |
| q = q.permute(0,2,1) |
| k = k.reshape(b,c,h*w) |
| w_ = torch.bmm(q,k) |
| w_ = w_ * (int(c)**(-0.5)) |
| w_ = torch.nn.functional.softmax(w_, dim=2) |
|
|
| |
| v = v.reshape(b,c,h*w) |
| w_ = w_.permute(0,2,1) |
| h_ = torch.bmm(v,w_) |
| h_ = h_.reshape(b,c,h,w) |
|
|
| h_ = self.proj_out(h_) |
|
|
| return x+h_ |
|
|
|
|
| class MemoryEfficientAttnBlock(nn.Module): |
| """ |
| Uses xformers efficient implementation, |
| see https://github.com/MatthieuTPHR/diffusers/blob/d80b531ff8060ec1ea982b65a1b8df70f73aa67c/src/diffusers/models/attention.py#L223 |
| Note: this is a single-head self-attention operation |
| """ |
| |
| def __init__(self, in_channels): |
| super().__init__() |
| print(f"building MemoryEfficientAttnBlock (xformers) with {in_channels} in_channels") |
| self.in_channels = in_channels |
|
|
| self.norm = Normalize(in_channels) |
| self.q = torch.nn.Conv2d(in_channels, |
| in_channels, |
| kernel_size=1, |
| stride=1, |
| padding=0) |
| self.k = torch.nn.Conv2d(in_channels, |
| in_channels, |
| kernel_size=1, |
| stride=1, |
| padding=0) |
| self.v = torch.nn.Conv2d(in_channels, |
| in_channels, |
| kernel_size=1, |
| stride=1, |
| padding=0) |
| self.proj_out = torch.nn.Conv2d(in_channels, |
| in_channels, |
| kernel_size=1, |
| stride=1, |
| padding=0) |
| self.attention_op: Optional[Any] = None |
|
|
| def forward(self, x): |
| h_ = x |
| h_ = self.norm(h_) |
| q = self.q(h_) |
| k = self.k(h_) |
| v = self.v(h_) |
|
|
| |
| B, C, H, W = q.shape |
| q, k, v = map(lambda x: rearrange(x, 'b c h w -> b (h w) c'), (q, k, v)) |
|
|
| q, k, v = map( |
| lambda t: t.unsqueeze(3) |
| .reshape(B, t.shape[1], 1, C) |
| .permute(0, 2, 1, 3) |
| .reshape(B * 1, t.shape[1], C) |
| .contiguous(), |
| (q, k, v), |
| ) |
| out = Config.xformers.ops.memory_efficient_attention(q, k, v, attn_bias=None, op=self.attention_op) |
| |
| out = ( |
| out.unsqueeze(0) |
| .reshape(B, 1, out.shape[1], C) |
| .permute(0, 2, 1, 3) |
| .reshape(B, out.shape[1], C) |
| ) |
| out = rearrange(out, 'b (h w) c -> b c h w', b=B, h=H, w=W, c=C) |
| out = self.proj_out(out) |
| return x+out |
|
|
|
|
| class SDPAttnBlock(nn.Module): |
|
|
| def __init__(self, in_channels): |
| super().__init__() |
| print(f"building SDPAttnBlock (sdp) with {in_channels} in_channels") |
| self.in_channels = in_channels |
|
|
| self.norm = Normalize(in_channels) |
| self.q = torch.nn.Conv2d(in_channels, |
| in_channels, |
| kernel_size=1, |
| stride=1, |
| padding=0) |
| self.k = torch.nn.Conv2d(in_channels, |
| in_channels, |
| kernel_size=1, |
| stride=1, |
| padding=0) |
| self.v = torch.nn.Conv2d(in_channels, |
| in_channels, |
| kernel_size=1, |
| stride=1, |
| padding=0) |
| self.proj_out = torch.nn.Conv2d(in_channels, |
| in_channels, |
| kernel_size=1, |
| stride=1, |
| padding=0) |
|
|
| def forward(self, x): |
| h_ = x |
| h_ = self.norm(h_) |
| q = self.q(h_) |
| k = self.k(h_) |
| v = self.v(h_) |
|
|
| |
| B, C, H, W = q.shape |
| q, k, v = map(lambda x: rearrange(x, 'b c h w -> b (h w) c'), (q, k, v)) |
|
|
| q, k, v = map( |
| lambda t: t.unsqueeze(3) |
| .reshape(B, t.shape[1], 1, C) |
| .permute(0, 2, 1, 3) |
| .reshape(B * 1, t.shape[1], C) |
| .contiguous(), |
| (q, k, v), |
| ) |
| out = F.scaled_dot_product_attention(q, k, v) |
|
|
| out = ( |
| out.unsqueeze(0) |
| .reshape(B, 1, out.shape[1], C) |
| .permute(0, 2, 1, 3) |
| .reshape(B, out.shape[1], C) |
| ) |
| out = rearrange(out, 'b (h w) c -> b c h w', b=B, h=H, w=W, c=C) |
| out = self.proj_out(out) |
| return x+out |
|
|
|
|
| def make_attn(in_channels, attn_type="vanilla", attn_kwargs=None): |
| assert attn_type in ["vanilla", "sdp", "xformers", "linear", "none"], f'attn_type {attn_type} unknown' |
| if attn_type == "vanilla": |
| assert attn_kwargs is None |
| return AttnBlock(in_channels) |
| elif attn_type == "sdp": |
| return SDPAttnBlock(in_channels) |
| elif attn_type == "xformers": |
| return MemoryEfficientAttnBlock(in_channels) |
| elif attn_type == "none": |
| return nn.Identity(in_channels) |
| else: |
| raise NotImplementedError() |
|
|
|
|
| class Encoder(nn.Module): |
| def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks, |
| attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels, |
| resolution, z_channels, double_z=True, use_linear_attn=False, |
| **ignore_kwargs): |
| super().__init__() |
| |
| if Config.attn_mode == AttnMode.SDP: |
| attn_type = "sdp" |
| elif Config.attn_mode == AttnMode.XFORMERS: |
| attn_type = "xformers" |
| else: |
| attn_type = "vanilla" |
| if use_linear_attn: attn_type = "linear" |
| self.ch = ch |
| self.temb_ch = 0 |
| self.num_resolutions = len(ch_mult) |
| self.num_res_blocks = num_res_blocks |
| self.resolution = resolution |
| self.in_channels = in_channels |
|
|
| |
| self.conv_in = torch.nn.Conv2d(in_channels, |
| self.ch, |
| kernel_size=3, |
| stride=1, |
| padding=1) |
|
|
| curr_res = resolution |
| in_ch_mult = (1,)+tuple(ch_mult) |
| self.in_ch_mult = in_ch_mult |
| self.down = nn.ModuleList() |
| for i_level in range(self.num_resolutions): |
| block = nn.ModuleList() |
| attn = nn.ModuleList() |
| block_in = ch*in_ch_mult[i_level] |
| block_out = ch*ch_mult[i_level] |
| for i_block in range(self.num_res_blocks): |
| block.append(ResnetBlock(in_channels=block_in, |
| out_channels=block_out, |
| temb_channels=self.temb_ch, |
| dropout=dropout)) |
| block_in = block_out |
| if curr_res in attn_resolutions: |
| attn.append(make_attn(block_in, attn_type=attn_type)) |
| down = nn.Module() |
| down.block = block |
| down.attn = attn |
| if i_level != self.num_resolutions-1: |
| down.downsample = Downsample(block_in, resamp_with_conv) |
| curr_res = curr_res // 2 |
| self.down.append(down) |
|
|
| |
| self.mid = nn.Module() |
| self.mid.block_1 = ResnetBlock(in_channels=block_in, |
| out_channels=block_in, |
| temb_channels=self.temb_ch, |
| dropout=dropout) |
| self.mid.attn_1 = make_attn(block_in, attn_type=attn_type) |
| self.mid.block_2 = ResnetBlock(in_channels=block_in, |
| out_channels=block_in, |
| temb_channels=self.temb_ch, |
| dropout=dropout) |
|
|
| |
| self.norm_out = Normalize(block_in) |
| self.conv_out = torch.nn.Conv2d(block_in, |
| 2*z_channels if double_z else z_channels, |
| kernel_size=3, |
| stride=1, |
| padding=1) |
|
|
| def forward(self, x): |
| |
| temb = None |
|
|
| |
| hs = [self.conv_in(x)] |
| for i_level in range(self.num_resolutions): |
| for i_block in range(self.num_res_blocks): |
| h = self.down[i_level].block[i_block](hs[-1], temb) |
| if len(self.down[i_level].attn) > 0: |
| h = self.down[i_level].attn[i_block](h) |
| hs.append(h) |
| if i_level != self.num_resolutions-1: |
| hs.append(self.down[i_level].downsample(hs[-1])) |
|
|
| |
| h = hs[-1] |
| h = self.mid.block_1(h, temb) |
| h = self.mid.attn_1(h) |
| h = self.mid.block_2(h, temb) |
|
|
| |
| h = self.norm_out(h) |
| h = nonlinearity(h) |
| h = self.conv_out(h) |
| return h |
|
|
|
|
| class Decoder(nn.Module): |
| def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks, |
| attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels, |
| resolution, z_channels, give_pre_end=False, tanh_out=False, use_linear_attn=False, |
| **ignorekwargs): |
| super().__init__() |
| |
| if Config.attn_mode == AttnMode.SDP: |
| attn_type = "sdp" |
| elif Config.attn_mode == AttnMode.XFORMERS: |
| attn_type = "xformers" |
| else: |
| attn_type = "vanilla" |
| if use_linear_attn: attn_type = "linear" |
| self.ch = ch |
| self.temb_ch = 0 |
| self.num_resolutions = len(ch_mult) |
| self.num_res_blocks = num_res_blocks |
| self.resolution = resolution |
| self.in_channels = in_channels |
| self.give_pre_end = give_pre_end |
| self.tanh_out = tanh_out |
| self.controller = None |
|
|
| |
| in_ch_mult = (1,)+tuple(ch_mult) |
| block_in = ch*ch_mult[self.num_resolutions-1] |
| curr_res = resolution // 2**(self.num_resolutions-1) |
| self.z_shape = (1,z_channels,curr_res,curr_res) |
|
|
| |
| self.conv_in = torch.nn.Conv2d(z_channels, |
| block_in, |
| kernel_size=3, |
| stride=1, |
| padding=1) |
|
|
| |
| self.mid = nn.Module() |
| self.mid.block_1 = ResnetBlock(in_channels=block_in, |
| out_channels=block_in, |
| temb_channels=self.temb_ch, |
| dropout=dropout) |
| self.mid.attn_1 = make_attn(block_in, attn_type=attn_type) |
| self.mid.block_2 = ResnetBlock(in_channels=block_in, |
| out_channels=block_in, |
| temb_channels=self.temb_ch, |
| dropout=dropout) |
|
|
| |
| self.up = nn.ModuleList() |
| for i_level in reversed(range(self.num_resolutions)): |
| block = nn.ModuleList() |
| attn = nn.ModuleList() |
| block_out = ch*ch_mult[i_level] |
| for i_block in range(self.num_res_blocks+1): |
| block.append(ResnetBlock(in_channels=block_in, |
| out_channels=block_out, |
| temb_channels=self.temb_ch, |
| dropout=dropout)) |
| block_in = block_out |
| if curr_res in attn_resolutions: |
| print(f"attn") |
| attn.append(make_attn(block_in, attn_type=attn_type)) |
| up = nn.Module() |
| up.block = block |
| up.attn = attn |
| if i_level != 0: |
| up.upsample = Upsample(block_in, resamp_with_conv) |
| curr_res = curr_res * 2 |
| self.up.insert(0, up) |
|
|
| |
| self.norm_out = Normalize(block_in) |
| self.conv_out = torch.nn.Conv2d(block_in, |
| out_ch, |
| kernel_size=3, |
| stride=1, |
| padding=1) |
|
|
| def forward(self, z): |
| |
| self.last_z_shape = z.shape |
|
|
| |
| temb = None |
|
|
| |
| h = self.conv_in(z) |
|
|
| |
| h = self.mid.block_1(h, temb) |
|
|
|
|
| ''' ToMe ''' |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| |
| |
| |
| |
| |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| ''' ToMe ended''' |
| h = self.mid.attn_1(h) |
| h = self.mid.block_2(h, temb) |
|
|
| |
| for i_level in reversed(range(self.num_resolutions)): |
| ''' ToMe ''' |
| |
| |
| |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| ''' ToMe ended ''' |
| for i_block in range(self.num_res_blocks+1): |
| h = self.up[i_level].block[i_block](h, temb) |
| |
| if len(self.up[i_level].attn) > 0: |
| h = self.up[i_level].attn[i_block](h) |
| if i_level != 0: |
| h = self.up[i_level].upsample(h) |
| |
| |
| if self.give_pre_end: |
| return h |
|
|
| h = self.norm_out(h) |
| h = nonlinearity(h) |
| h = self.conv_out(h) |
| if self.tanh_out: |
| h = torch.tanh(h) |
| return h |
|
|
|
|
| class AutoencoderKL(nn.Module): |
| |
| def __init__(self, ddconfig, embed_dim): |
| super().__init__() |
| self.encoder = Encoder(**ddconfig) |
| self.decoder = Decoder(**ddconfig) |
| assert ddconfig["double_z"] |
| self.quant_conv = torch.nn.Conv2d(2*ddconfig["z_channels"], 2*embed_dim, 1) |
| self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig["z_channels"], 1) |
| self.embed_dim = embed_dim |
| |
| def encode(self, x, batch_size=0): |
| if batch_size: |
| h = [] |
| batch_x = x.split(batch_size, dim=0) |
| for x_ in batch_x: |
| h_ = self.encoder(x_) |
| h += [h_] |
| torch.cuda.empty_cache() |
| h = torch.cat(h) |
| else: |
| h = self.encoder(x) |
| moments = self.quant_conv(h) |
| posterior = DiagonalGaussianDistribution(moments) |
| return posterior |
|
|
| def decode(self, z, batch_size=0): |
| z = self.post_quant_conv(z) |
| if batch_size: |
| dec = [] |
| batch_z = z.split(batch_size, dim=0) |
| for z_ in batch_z: |
| |
| z_ = self.decoder(z_) |
| dec += [z_] |
| torch.cuda.empty_cache() |
| dec = torch.cat(dec) |
| else: |
| dec = self.decoder(z) |
| |
| return dec |
|
|
| def forward(self, input, sample_posterior=True): |
| posterior = self.encode(input) |
| if sample_posterior: |
| z = posterior.sample() |
| else: |
| z = posterior.mode() |
| dec = self.decode(z) |
| return dec, posterior |
|
|