-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathhcp_equicon_train.py
112 lines (79 loc) · 2.76 KB
/
hcp_equicon_train.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
import random
import os
import torch
import numpy as np
import torch.nn.functional as F
import footsteps
import icon_registration as icon
import icon_registration.networks as networks
import footsteps
import icon_registration as icon
import icon_registration.data
import icon_registration.networks as networks
from icon_registration.config import device
import matplotlib.pyplot as plt
import numpy as np
import torch
import torchvision.utils
import icon_registration as icon
input_shape = [1, 1, 130, 155, 130]
DO_HARD_AUGMENT = False
def make_batch(dataset, pairs_made):
image = torch.cat([random.choice(dataset) for _ in range(GPUS * BATCH_SIZE)])
image = image.cuda()
image = image / torch.max(image)
print(torch.max(image), torch.min(image))
if DO_HARD_AUGMENT:
return augment(image, pairs_made)
else:
return image
def augment(image_A, pairs_made):
noise = torch.randn((image_A.shape[0], 3, 3))
noise = noise - torch.permute(noise, (0, 2, 1))
forward = torch.linalg.matrix_exp(noise * pairs_made / 10000)
full = torch.zeros((image_A.shape[0], 3, 4))
full[:, :3, :3] = forward
grid_shape = list(image_A.shape)
grid_shape[1] = 3
forward_grid = F.affine_grid(full.cuda(), grid_shape)
print(full)
warped_A = F.grid_sample(image_A, forward_grid, padding_mode="border")
return warped_A
def make_make_pair(dataset):
pairs_made = [0]
def make_pair():
pairs_made[0] += 1
return make_batch(dataset, pairs_made[0]), make_batch(dataset, pairs_made[0])
return make_pair
if __name__ == "__main__":
import equivariant_reg
dataset = torch.load(
"/playpen-raid1/tgreer/equivariant_reg_2/dataset_cache/brain_train_2sdown_scaled"
)
footsteps.initialize()
threestep_consistent_net = equivariant_reg.make_network_final(input_shape, dimension=3, diffusion=True)
net_par = torch.nn.DataParallel(threestep_consistent_net).cuda()
optimizer = torch.optim.Adam(net_par.parameters(), lr=0.0001)
net_par.train()
BATCH_SIZE = 1
GPUS = 4
icon.train_batchfunction(
net_par,
optimizer,
make_make_pair(dataset),
unwrapped_net=threestep_consistent_net,
steps=1500
)
old_state = threestep_consistent_net.state_dict()
threestep_consistent_net = equivariant_reg.make_network_final(input_shape, dimension=3, diffusion=False)
threestep_consistent_net.load_state_dict(old_state)
net_par = torch.nn.DataParallel(threestep_consistent_net).cuda()
optimizer = torch.optim.Adam(net_par.parameters(), lr=0.0001)
net_par.train()
icon.train_batchfunction(
net_par,
optimizer,
make_make_pair(dataset),
unwrapped_net=threestep_consistent_net,
steps=50000,
)