forked from fishaudio/Bert-VITS2
-
Notifications
You must be signed in to change notification settings - Fork 0
/
losses.py
153 lines (124 loc) · 4.37 KB
/
losses.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
import torch
import torchaudio
from transformers import AutoModel
def feature_loss(fmap_r, fmap_g):
loss = 0
for dr, dg in zip(fmap_r, fmap_g):
for rl, gl in zip(dr, dg):
rl = rl.float().detach()
gl = gl.float()
loss += torch.mean(torch.abs(rl - gl))
return loss * 2
def discriminator_loss(disc_real_outputs, disc_generated_outputs):
loss = 0
r_losses = []
g_losses = []
for dr, dg in zip(disc_real_outputs, disc_generated_outputs):
dr = dr.float()
dg = dg.float()
r_loss = torch.mean((1 - dr) ** 2)
g_loss = torch.mean(dg**2)
loss += r_loss + g_loss
r_losses.append(r_loss.item())
g_losses.append(g_loss.item())
return loss, r_losses, g_losses
def generator_loss(disc_outputs):
loss = 0
gen_losses = []
for dg in disc_outputs:
dg = dg.float()
l = torch.mean((1 - dg) ** 2)
gen_losses.append(l)
loss += l
return loss, gen_losses
def kl_loss(z_p, logs_q, m_p, logs_p, z_mask):
"""
z_p, logs_q: [b, h, t_t]
m_p, logs_p: [b, h, t_t]
"""
z_p = z_p.float()
logs_q = logs_q.float()
m_p = m_p.float()
logs_p = logs_p.float()
z_mask = z_mask.float()
kl = logs_p - logs_q - 0.5
kl += 0.5 * ((z_p - m_p) ** 2) * torch.exp(-2.0 * logs_p)
kl = torch.sum(kl * z_mask)
l = kl / torch.sum(z_mask)
return l
class WavLMLoss(torch.nn.Module):
def __init__(self, model, wd, model_sr, slm_sr=16000):
super(WavLMLoss, self).__init__()
self.wavlm = AutoModel.from_pretrained(model)
self.wd = wd
self.resample = torchaudio.transforms.Resample(model_sr, slm_sr)
self.wavlm.eval()
for param in self.wavlm.parameters():
param.requires_grad = False
def forward(self, wav, y_rec):
with torch.no_grad():
wav_16 = self.resample(wav)
wav_embeddings = self.wavlm(
input_values=wav_16, output_hidden_states=True
).hidden_states
y_rec_16 = self.resample(y_rec)
y_rec_embeddings = self.wavlm(
input_values=y_rec_16.squeeze(), output_hidden_states=True
).hidden_states
floss = 0
for er, eg in zip(wav_embeddings, y_rec_embeddings):
floss += torch.mean(torch.abs(er - eg))
return floss.mean()
def generator(self, y_rec):
y_rec_16 = self.resample(y_rec)
y_rec_embeddings = self.wavlm(
input_values=y_rec_16, output_hidden_states=True
).hidden_states
y_rec_embeddings = (
torch.stack(y_rec_embeddings, dim=1)
.transpose(-1, -2)
.flatten(start_dim=1, end_dim=2)
)
y_df_hat_g = self.wd(y_rec_embeddings)
loss_gen = torch.mean((1 - y_df_hat_g) ** 2)
return loss_gen
def discriminator(self, wav, y_rec):
with torch.no_grad():
wav_16 = self.resample(wav)
wav_embeddings = self.wavlm(
input_values=wav_16, output_hidden_states=True
).hidden_states
y_rec_16 = self.resample(y_rec)
y_rec_embeddings = self.wavlm(
input_values=y_rec_16, output_hidden_states=True
).hidden_states
y_embeddings = (
torch.stack(wav_embeddings, dim=1)
.transpose(-1, -2)
.flatten(start_dim=1, end_dim=2)
)
y_rec_embeddings = (
torch.stack(y_rec_embeddings, dim=1)
.transpose(-1, -2)
.flatten(start_dim=1, end_dim=2)
)
y_d_rs = self.wd(y_embeddings)
y_d_gs = self.wd(y_rec_embeddings)
y_df_hat_r, y_df_hat_g = y_d_rs, y_d_gs
r_loss = torch.mean((1 - y_df_hat_r) ** 2)
g_loss = torch.mean((y_df_hat_g) ** 2)
loss_disc_f = r_loss + g_loss
return loss_disc_f.mean()
def discriminator_forward(self, wav):
with torch.no_grad():
wav_16 = self.resample(wav)
wav_embeddings = self.wavlm(
input_values=wav_16, output_hidden_states=True
).hidden_states
y_embeddings = (
torch.stack(wav_embeddings, dim=1)
.transpose(-1, -2)
.flatten(start_dim=1, end_dim=2)
)
y_d_rs = self.wd(y_embeddings)
return y_d_rs