Skip to content

Commit ed86b81

Browse files
committed
update drag bench evaluation code
1 parent 7d2e68b commit ed86b81

8 files changed

+1019
-28
lines changed

drag_bench_evaluation/dift_sd.py

+232
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,232 @@
1+
# code credit: https://github.com/Tsingularity/dift/blob/main/src/models/dift_sd.py
2+
from diffusers import StableDiffusionPipeline
3+
import torch
4+
import torch.nn as nn
5+
import matplotlib.pyplot as plt
6+
import numpy as np
7+
from typing import Any, Callable, Dict, List, Optional, Union
8+
from diffusers.models.unet_2d_condition import UNet2DConditionModel
9+
from diffusers import DDIMScheduler
10+
import gc
11+
from PIL import Image
12+
13+
class MyUNet2DConditionModel(UNet2DConditionModel):
14+
def forward(
15+
self,
16+
sample: torch.FloatTensor,
17+
timestep: Union[torch.Tensor, float, int],
18+
up_ft_indices,
19+
encoder_hidden_states: torch.Tensor,
20+
class_labels: Optional[torch.Tensor] = None,
21+
timestep_cond: Optional[torch.Tensor] = None,
22+
attention_mask: Optional[torch.Tensor] = None,
23+
cross_attention_kwargs: Optional[Dict[str, Any]] = None):
24+
r"""
25+
Args:
26+
sample (`torch.FloatTensor`): (batch, channel, height, width) noisy inputs tensor
27+
timestep (`torch.FloatTensor` or `float` or `int`): (batch) timesteps
28+
encoder_hidden_states (`torch.FloatTensor`): (batch, sequence_length, feature_dim) encoder hidden states
29+
cross_attention_kwargs (`dict`, *optional*):
30+
A kwargs dictionary that if specified is passed along to the `AttnProcessor` as defined under
31+
`self.processor` in
32+
[diffusers.cross_attention](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py).
33+
"""
34+
# By default samples have to be AT least a multiple of the overall upsampling factor.
35+
# The overall upsampling factor is equal to 2 ** (# num of upsampling layears).
36+
# However, the upsampling interpolation output size can be forced to fit any upsampling size
37+
# on the fly if necessary.
38+
default_overall_up_factor = 2**self.num_upsamplers
39+
40+
# upsample size should be forwarded when sample is not a multiple of `default_overall_up_factor`
41+
forward_upsample_size = False
42+
upsample_size = None
43+
44+
if any(s % default_overall_up_factor != 0 for s in sample.shape[-2:]):
45+
# logger.info("Forward upsample size to force interpolation output size.")
46+
forward_upsample_size = True
47+
48+
# prepare attention_mask
49+
if attention_mask is not None:
50+
attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0
51+
attention_mask = attention_mask.unsqueeze(1)
52+
53+
# 0. center input if necessary
54+
if self.config.center_input_sample:
55+
sample = 2 * sample - 1.0
56+
57+
# 1. time
58+
timesteps = timestep
59+
if not torch.is_tensor(timesteps):
60+
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
61+
# This would be a good case for the `match` statement (Python 3.10+)
62+
is_mps = sample.device.type == "mps"
63+
if isinstance(timestep, float):
64+
dtype = torch.float32 if is_mps else torch.float64
65+
else:
66+
dtype = torch.int32 if is_mps else torch.int64
67+
timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device)
68+
elif len(timesteps.shape) == 0:
69+
timesteps = timesteps[None].to(sample.device)
70+
71+
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
72+
timesteps = timesteps.expand(sample.shape[0])
73+
74+
t_emb = self.time_proj(timesteps)
75+
76+
# timesteps does not contain any weights and will always return f32 tensors
77+
# but time_embedding might actually be running in fp16. so we need to cast here.
78+
# there might be better ways to encapsulate this.
79+
t_emb = t_emb.to(dtype=self.dtype)
80+
81+
emb = self.time_embedding(t_emb, timestep_cond)
82+
83+
if self.class_embedding is not None:
84+
if class_labels is None:
85+
raise ValueError("class_labels should be provided when num_class_embeds > 0")
86+
87+
if self.config.class_embed_type == "timestep":
88+
class_labels = self.time_proj(class_labels)
89+
90+
class_emb = self.class_embedding(class_labels).to(dtype=self.dtype)
91+
emb = emb + class_emb
92+
93+
# 2. pre-process
94+
sample = self.conv_in(sample)
95+
96+
# 3. down
97+
down_block_res_samples = (sample,)
98+
for downsample_block in self.down_blocks:
99+
if hasattr(downsample_block, "has_cross_attention") and downsample_block.has_cross_attention:
100+
sample, res_samples = downsample_block(
101+
hidden_states=sample,
102+
temb=emb,
103+
encoder_hidden_states=encoder_hidden_states,
104+
attention_mask=attention_mask,
105+
cross_attention_kwargs=cross_attention_kwargs,
106+
)
107+
else:
108+
sample, res_samples = downsample_block(hidden_states=sample, temb=emb)
109+
110+
down_block_res_samples += res_samples
111+
112+
# 4. mid
113+
if self.mid_block is not None:
114+
sample = self.mid_block(
115+
sample,
116+
emb,
117+
encoder_hidden_states=encoder_hidden_states,
118+
attention_mask=attention_mask,
119+
cross_attention_kwargs=cross_attention_kwargs,
120+
)
121+
122+
# 5. up
123+
up_ft = {}
124+
for i, upsample_block in enumerate(self.up_blocks):
125+
126+
if i > np.max(up_ft_indices):
127+
break
128+
129+
is_final_block = i == len(self.up_blocks) - 1
130+
131+
res_samples = down_block_res_samples[-len(upsample_block.resnets) :]
132+
down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)]
133+
134+
# if we have not reached the final block and need to forward the
135+
# upsample size, we do it here
136+
if not is_final_block and forward_upsample_size:
137+
upsample_size = down_block_res_samples[-1].shape[2:]
138+
139+
if hasattr(upsample_block, "has_cross_attention") and upsample_block.has_cross_attention:
140+
sample = upsample_block(
141+
hidden_states=sample,
142+
temb=emb,
143+
res_hidden_states_tuple=res_samples,
144+
encoder_hidden_states=encoder_hidden_states,
145+
cross_attention_kwargs=cross_attention_kwargs,
146+
upsample_size=upsample_size,
147+
attention_mask=attention_mask,
148+
)
149+
else:
150+
sample = upsample_block(
151+
hidden_states=sample, temb=emb, res_hidden_states_tuple=res_samples, upsample_size=upsample_size
152+
)
153+
154+
if i in up_ft_indices:
155+
up_ft[i] = sample.detach()
156+
157+
output = {}
158+
output['up_ft'] = up_ft
159+
return output
160+
161+
class OneStepSDPipeline(StableDiffusionPipeline):
162+
@torch.no_grad()
163+
def __call__(
164+
self,
165+
img_tensor,
166+
t,
167+
up_ft_indices,
168+
negative_prompt: Optional[Union[str, List[str]]] = None,
169+
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
170+
prompt_embeds: Optional[torch.FloatTensor] = None,
171+
callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
172+
callback_steps: int = 1,
173+
cross_attention_kwargs: Optional[Dict[str, Any]] = None
174+
):
175+
176+
device = self._execution_device
177+
latents = self.vae.encode(img_tensor).latent_dist.sample() * self.vae.config.scaling_factor
178+
t = torch.tensor(t, dtype=torch.long, device=device)
179+
noise = torch.randn_like(latents).to(device)
180+
latents_noisy = self.scheduler.add_noise(latents, noise, t)
181+
unet_output = self.unet(latents_noisy,
182+
t,
183+
up_ft_indices,
184+
encoder_hidden_states=prompt_embeds,
185+
cross_attention_kwargs=cross_attention_kwargs)
186+
return unet_output
187+
188+
189+
class SDFeaturizer:
190+
def __init__(self, sd_id='stabilityai/stable-diffusion-2-1'):
191+
unet = MyUNet2DConditionModel.from_pretrained(sd_id, subfolder="unet")
192+
onestep_pipe = OneStepSDPipeline.from_pretrained(sd_id, unet=unet, safety_checker=None)
193+
onestep_pipe.vae.decoder = None
194+
onestep_pipe.scheduler = DDIMScheduler.from_pretrained(sd_id, subfolder="scheduler")
195+
gc.collect()
196+
onestep_pipe = onestep_pipe.to("cuda")
197+
onestep_pipe.enable_attention_slicing()
198+
# onestep_pipe.enable_xformers_memory_efficient_attention()
199+
self.pipe = onestep_pipe
200+
201+
@torch.no_grad()
202+
def forward(self,
203+
img_tensor,
204+
prompt,
205+
t=261,
206+
up_ft_index=1,
207+
ensemble_size=8):
208+
'''
209+
Args:
210+
img_tensor: should be a single torch tensor in the shape of [1, C, H, W] or [C, H, W]
211+
prompt: the prompt to use, a string
212+
t: the time step to use, should be an int in the range of [0, 1000]
213+
up_ft_index: which upsampling block of the U-Net to extract feature, you can choose [0, 1, 2, 3]
214+
ensemble_size: the number of repeated images used in the batch to extract features
215+
Return:
216+
unet_ft: a torch tensor in the shape of [1, c, h, w]
217+
'''
218+
img_tensor = img_tensor.repeat(ensemble_size, 1, 1, 1).cuda() # ensem, c, h, w
219+
prompt_embeds = self.pipe._encode_prompt(
220+
prompt=prompt,
221+
device='cuda',
222+
num_images_per_prompt=1,
223+
do_classifier_free_guidance=False) # [1, 77, dim]
224+
prompt_embeds = prompt_embeds.repeat(ensemble_size, 1, 1)
225+
unet_ft_all = self.pipe(
226+
img_tensor=img_tensor,
227+
t=t,
228+
up_ft_indices=[up_ft_index],
229+
prompt_embeds=prompt_embeds)
230+
unet_ft = unet_ft_all['up_ft'][up_ft_index] # ensem, c, h, w
231+
unet_ft = unet_ft.mean(0, keepdim=True) # 1,c,h,w
232+
return unet_ft

drag_bench_evaluation/drag_bench_data/'extract the dragbench dataset here!'

Whitespace-only changes.

0 commit comments

Comments
 (0)