Some checks failed
Python Linting / Run Ruff (push) Has been cancelled
Python Linting / Run Pylint (push) Has been cancelled
Full Comfy CI Workflow Runs / test-stable (12.1, , linux, 3.10, [self-hosted Linux], stable) (push) Has been cancelled
Full Comfy CI Workflow Runs / test-stable (12.1, , linux, 3.11, [self-hosted Linux], stable) (push) Has been cancelled
Full Comfy CI Workflow Runs / test-stable (12.1, , linux, 3.12, [self-hosted Linux], stable) (push) Has been cancelled
Full Comfy CI Workflow Runs / test-unix-nightly (12.1, , linux, 3.11, [self-hosted Linux], nightly) (push) Has been cancelled
Execution Tests / test (macos-latest) (push) Has been cancelled
Execution Tests / test (ubuntu-latest) (push) Has been cancelled
Execution Tests / test (windows-latest) (push) Has been cancelled
Test server launches without errors / test (push) Has been cancelled
Unit Tests / test (macos-latest) (push) Has been cancelled
Unit Tests / test (ubuntu-latest) (push) Has been cancelled
Unit Tests / test (windows-2022) (push) Has been cancelled
Includes 30 custom nodes committed directly, 7 Civitai-exclusive loras stored via Git LFS, and a setup script that installs all dependencies and downloads HuggingFace-hosted models on vast.ai. Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
385 lines
12 KiB
Python
385 lines
12 KiB
Python
# modified from https://github.com/mlfoundations/open_flamingo/blob/main/open_flamingo/src/helpers.py
|
|
import math
|
|
|
|
import torch
|
|
import torch.nn as nn
|
|
from typing import Optional
|
|
|
|
|
|
ACTIVATION_FUNCTIONS = {
|
|
"swish": nn.SiLU(),
|
|
"silu": nn.SiLU(),
|
|
"mish": nn.Mish(),
|
|
"gelu": nn.GELU(),
|
|
"relu": nn.ReLU(),
|
|
}
|
|
def get_activation(act_fn: str) -> nn.Module:
|
|
"""Helper function to get activation function from string.
|
|
|
|
Args:
|
|
act_fn (str): Name of activation function.
|
|
|
|
Returns:
|
|
nn.Module: Activation function.
|
|
"""
|
|
|
|
act_fn = act_fn.lower()
|
|
if act_fn in ACTIVATION_FUNCTIONS:
|
|
return ACTIVATION_FUNCTIONS[act_fn]
|
|
else:
|
|
raise ValueError(f"Unsupported activation function: {act_fn}")
|
|
|
|
def get_timestep_embedding(
|
|
timesteps: torch.Tensor,
|
|
embedding_dim: int,
|
|
flip_sin_to_cos: bool = False,
|
|
downscale_freq_shift: float = 1,
|
|
scale: float = 1,
|
|
max_period: int = 10000,
|
|
):
|
|
"""
|
|
This matches the implementation in Denoising Diffusion Probabilistic Models: Create sinusoidal timestep embeddings.
|
|
|
|
Args
|
|
timesteps (torch.Tensor):
|
|
a 1-D Tensor of N indices, one per batch element. These may be fractional.
|
|
embedding_dim (int):
|
|
the dimension of the output.
|
|
flip_sin_to_cos (bool):
|
|
Whether the embedding order should be `cos, sin` (if True) or `sin, cos` (if False)
|
|
downscale_freq_shift (float):
|
|
Controls the delta between frequencies between dimensions
|
|
scale (float):
|
|
Scaling factor applied to the embeddings.
|
|
max_period (int):
|
|
Controls the maximum frequency of the embeddings
|
|
Returns
|
|
torch.Tensor: an [N x dim] Tensor of positional embeddings.
|
|
"""
|
|
assert len(timesteps.shape) == 1, "Timesteps should be a 1d-array"
|
|
|
|
half_dim = embedding_dim // 2
|
|
exponent = -math.log(max_period) * torch.arange(
|
|
start=0, end=half_dim, dtype=torch.float32, device=timesteps.device
|
|
)
|
|
exponent = exponent / (half_dim - downscale_freq_shift)
|
|
|
|
emb = torch.exp(exponent)
|
|
emb = timesteps[:, None].float() * emb[None, :]
|
|
|
|
# scale embeddings
|
|
emb = scale * emb
|
|
|
|
# concat sine and cosine embeddings
|
|
emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=-1)
|
|
|
|
# flip sine and cosine embeddings
|
|
if flip_sin_to_cos:
|
|
emb = torch.cat([emb[:, half_dim:], emb[:, :half_dim]], dim=-1)
|
|
|
|
# zero pad
|
|
if embedding_dim % 2 == 1:
|
|
emb = torch.nn.functional.pad(emb, (0, 1, 0, 0))
|
|
return emb
|
|
|
|
class Timesteps(nn.Module):
|
|
def __init__(self, num_channels: int, flip_sin_to_cos: bool, downscale_freq_shift: float, scale: int = 1):
|
|
super().__init__()
|
|
self.num_channels = num_channels
|
|
self.flip_sin_to_cos = flip_sin_to_cos
|
|
self.downscale_freq_shift = downscale_freq_shift
|
|
self.scale = scale
|
|
|
|
def forward(self, timesteps):
|
|
t_emb = get_timestep_embedding(
|
|
timesteps,
|
|
self.num_channels,
|
|
flip_sin_to_cos=self.flip_sin_to_cos,
|
|
downscale_freq_shift=self.downscale_freq_shift,
|
|
scale=self.scale,
|
|
)
|
|
return t_emb
|
|
|
|
class TimestepEmbedding(nn.Module):
|
|
def __init__(
|
|
self,
|
|
in_channels: int,
|
|
time_embed_dim: int,
|
|
act_fn: str = "silu",
|
|
out_dim: int = None,
|
|
post_act_fn: Optional[str] = None,
|
|
cond_proj_dim=None,
|
|
sample_proj_bias=True,
|
|
):
|
|
super().__init__()
|
|
|
|
self.linear_1 = nn.Linear(in_channels, time_embed_dim, sample_proj_bias)
|
|
|
|
if cond_proj_dim is not None:
|
|
self.cond_proj = nn.Linear(cond_proj_dim, in_channels, bias=False)
|
|
else:
|
|
self.cond_proj = None
|
|
|
|
self.act = get_activation(act_fn)
|
|
|
|
if out_dim is not None:
|
|
time_embed_dim_out = out_dim
|
|
else:
|
|
time_embed_dim_out = time_embed_dim
|
|
self.linear_2 = nn.Linear(time_embed_dim, time_embed_dim_out, sample_proj_bias)
|
|
|
|
if post_act_fn is None:
|
|
self.post_act = None
|
|
else:
|
|
self.post_act = get_activation(post_act_fn)
|
|
|
|
def forward(self, sample, condition=None):
|
|
if condition is not None:
|
|
sample = sample + self.cond_proj(condition)
|
|
sample = self.linear_1(sample)
|
|
|
|
if self.act is not None:
|
|
sample = self.act(sample)
|
|
|
|
sample = self.linear_2(sample)
|
|
|
|
if self.post_act is not None:
|
|
sample = self.post_act(sample)
|
|
return sample
|
|
|
|
|
|
# FFN
|
|
def FeedForward(dim, mult=4):
|
|
inner_dim = int(dim * mult)
|
|
return nn.Sequential(
|
|
nn.LayerNorm(dim),
|
|
nn.Linear(dim, inner_dim, bias=False),
|
|
nn.GELU(),
|
|
nn.Linear(inner_dim, dim, bias=False),
|
|
)
|
|
|
|
|
|
def reshape_tensor(x, heads):
|
|
bs, length, width = x.shape
|
|
# (bs, length, width) --> (bs, length, n_heads, dim_per_head)
|
|
x = x.view(bs, length, heads, -1)
|
|
# (bs, length, n_heads, dim_per_head) --> (bs, n_heads, length, dim_per_head)
|
|
x = x.transpose(1, 2)
|
|
# (bs, n_heads, length, dim_per_head) --> (bs*n_heads, length, dim_per_head)
|
|
x = x.reshape(bs, heads, length, -1)
|
|
return x
|
|
|
|
|
|
class PerceiverAttention(nn.Module):
|
|
def __init__(self, *, dim, dim_head=64, heads=8):
|
|
super().__init__()
|
|
self.scale = dim_head**-0.5
|
|
self.dim_head = dim_head
|
|
self.heads = heads
|
|
inner_dim = dim_head * heads
|
|
|
|
self.norm1 = nn.LayerNorm(dim)
|
|
self.norm2 = nn.LayerNorm(dim)
|
|
|
|
self.to_q = nn.Linear(dim, inner_dim, bias=False)
|
|
self.to_kv = nn.Linear(dim, inner_dim * 2, bias=False)
|
|
self.to_out = nn.Linear(inner_dim, dim, bias=False)
|
|
|
|
def forward(self, x, latents, shift=None, scale=None):
|
|
"""
|
|
Args:
|
|
x (torch.Tensor): image features
|
|
shape (b, n1, D)
|
|
latent (torch.Tensor): latent features
|
|
shape (b, n2, D)
|
|
"""
|
|
x = self.norm1(x)
|
|
latents = self.norm2(latents)
|
|
|
|
if shift is not None and scale is not None:
|
|
latents = latents * (1 + scale.unsqueeze(1)) + shift.unsqueeze(1)
|
|
|
|
b, l, _ = latents.shape
|
|
|
|
q = self.to_q(latents)
|
|
kv_input = torch.cat((x, latents), dim=-2)
|
|
k, v = self.to_kv(kv_input).chunk(2, dim=-1)
|
|
|
|
q = reshape_tensor(q, self.heads)
|
|
k = reshape_tensor(k, self.heads)
|
|
v = reshape_tensor(v, self.heads)
|
|
|
|
# attention
|
|
scale = 1 / math.sqrt(math.sqrt(self.dim_head))
|
|
weight = (q * scale) @ (k * scale).transpose(
|
|
-2, -1
|
|
) # More stable with f16 than dividing afterwards
|
|
weight = torch.softmax(weight.float(), dim=-1).type(weight.dtype)
|
|
out = weight @ v
|
|
|
|
out = out.permute(0, 2, 1, 3).reshape(b, l, -1)
|
|
|
|
return self.to_out(out)
|
|
|
|
|
|
class Resampler(nn.Module):
|
|
def __init__(
|
|
self,
|
|
dim=1024,
|
|
depth=8,
|
|
dim_head=64,
|
|
heads=16,
|
|
num_queries=8,
|
|
embedding_dim=768,
|
|
output_dim=1024,
|
|
ff_mult=4,
|
|
*args,
|
|
**kwargs,
|
|
):
|
|
super().__init__()
|
|
|
|
self.latents = nn.Parameter(torch.randn(1, num_queries, dim) / dim**0.5)
|
|
|
|
self.proj_in = nn.Linear(embedding_dim, dim)
|
|
|
|
self.proj_out = nn.Linear(dim, output_dim)
|
|
self.norm_out = nn.LayerNorm(output_dim)
|
|
|
|
self.layers = nn.ModuleList([])
|
|
for _ in range(depth):
|
|
self.layers.append(
|
|
nn.ModuleList(
|
|
[
|
|
PerceiverAttention(dim=dim, dim_head=dim_head, heads=heads),
|
|
FeedForward(dim=dim, mult=ff_mult),
|
|
]
|
|
)
|
|
)
|
|
|
|
def forward(self, x):
|
|
|
|
latents = self.latents.repeat(x.size(0), 1, 1)
|
|
|
|
x = self.proj_in(x)
|
|
|
|
for attn, ff in self.layers:
|
|
latents = attn(x, latents) + latents
|
|
latents = ff(latents) + latents
|
|
|
|
latents = self.proj_out(latents)
|
|
return self.norm_out(latents)
|
|
|
|
|
|
class TimeResampler(nn.Module):
|
|
def __init__(
|
|
self,
|
|
dim=1024,
|
|
depth=8,
|
|
dim_head=64,
|
|
heads=16,
|
|
num_queries=8,
|
|
embedding_dim=768,
|
|
output_dim=1024,
|
|
ff_mult=4,
|
|
timestep_in_dim=320,
|
|
timestep_flip_sin_to_cos=True,
|
|
timestep_freq_shift=0,
|
|
):
|
|
super().__init__()
|
|
|
|
self.latents = nn.Parameter(torch.randn(1, num_queries, dim) / dim**0.5)
|
|
|
|
self.proj_in = nn.Linear(embedding_dim, dim)
|
|
|
|
self.proj_out = nn.Linear(dim, output_dim)
|
|
self.norm_out = nn.LayerNorm(output_dim)
|
|
|
|
self.layers = nn.ModuleList([])
|
|
for _ in range(depth):
|
|
self.layers.append(
|
|
nn.ModuleList(
|
|
[
|
|
# msa
|
|
PerceiverAttention(dim=dim, dim_head=dim_head, heads=heads),
|
|
# ff
|
|
FeedForward(dim=dim, mult=ff_mult),
|
|
# adaLN
|
|
nn.Sequential(nn.SiLU(), nn.Linear(dim, 4 * dim, bias=True)),
|
|
]
|
|
)
|
|
)
|
|
|
|
# time
|
|
self.time_proj = Timesteps(
|
|
timestep_in_dim, timestep_flip_sin_to_cos, timestep_freq_shift
|
|
)
|
|
self.time_embedding = TimestepEmbedding(timestep_in_dim, dim, act_fn="silu")
|
|
|
|
# adaLN
|
|
# self.adaLN_modulation = nn.Sequential(
|
|
# nn.SiLU(),
|
|
# nn.Linear(timestep_out_dim, 6 * timestep_out_dim, bias=True)
|
|
# )
|
|
|
|
def forward(self, x, timestep, need_temb=False):
|
|
timestep_emb = self.embedding_time(x, timestep) # bs, dim
|
|
|
|
latents = self.latents.repeat(x.size(0), 1, 1)
|
|
|
|
x = self.proj_in(x)
|
|
x = x + timestep_emb[:, None]
|
|
|
|
for attn, ff, adaLN_modulation in self.layers:
|
|
shift_msa, scale_msa, shift_mlp, scale_mlp = adaLN_modulation(
|
|
timestep_emb
|
|
).chunk(4, dim=1)
|
|
latents = attn(x, latents, shift_msa, scale_msa) + latents
|
|
|
|
res = latents
|
|
for idx_ff in range(len(ff)):
|
|
layer_ff = ff[idx_ff]
|
|
latents = layer_ff(latents)
|
|
if idx_ff == 0 and isinstance(layer_ff, nn.LayerNorm): # adaLN
|
|
latents = latents * (
|
|
1 + scale_mlp.unsqueeze(1)
|
|
) + shift_mlp.unsqueeze(1)
|
|
latents = latents + res
|
|
|
|
# latents = ff(latents) + latents
|
|
|
|
latents = self.proj_out(latents)
|
|
latents = self.norm_out(latents)
|
|
|
|
if need_temb:
|
|
return latents, timestep_emb
|
|
else:
|
|
return latents
|
|
|
|
def embedding_time(self, sample, timestep):
|
|
|
|
# 1. time
|
|
timesteps = timestep
|
|
if not torch.is_tensor(timesteps):
|
|
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
|
|
# This would be a good case for the `match` statement (Python 3.10+)
|
|
is_mps = sample.device.type == "mps"
|
|
if isinstance(timestep, float):
|
|
dtype = torch.float32 if is_mps else torch.float64
|
|
else:
|
|
dtype = torch.int32 if is_mps else torch.int64
|
|
timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device)
|
|
elif len(timesteps.shape) == 0:
|
|
timesteps = timesteps[None].to(sample.device)
|
|
|
|
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
|
|
timesteps = timesteps.expand(sample.shape[0])
|
|
|
|
t_emb = self.time_proj(timesteps)
|
|
|
|
# timesteps does not contain any weights and will always return f32 tensors
|
|
# but time_embedding might actually be running in fp16. so we need to cast here.
|
|
# there might be better ways to encapsulate this.
|
|
t_emb = t_emb.to(dtype=sample.dtype)
|
|
|
|
emb = self.time_embedding(t_emb, None)
|
|
return emb |