Skip to content
Snippets Groups Projects
Commit 167f5455 authored by Elquintas's avatar Elquintas
Browse files

changements 06/01/2023

parent a63c1e74
No related branches found
No related tags found
No related merge requests found
# PATHS # PATHS
wav_path: '../data/wavs/' wav_path: '../data/wavs/'
data_path: './data/'
embedding_path: '../data/embeddings/' embedding_path: '../data/embeddings/'
model_path: '../models/model'
# PARAMETERS
# TRAINING PARAMETERS
sampling_rate: 16000 sampling_rate: 16000
batch_size: 16 batch_size: 16
learning_rate: 0.001 learning_rate: 0.001
epochs: 50
dropout: 0.2
training_set_file: 'TRAINING.txt'
test_set_file: 'TEST.txt'
# MODEL PARAMETERS
first_layer: 512
second_layer: 128
third_layer: 62
# Types of embeddings supported: 'ecapa_tdnn' or 'x-vector' # Types of embeddings supported: 'ecapa_tdnn' or 'x-vector'
# ecapa_tdnn: dim = 192 # ecapa_tdnn: dim = 192 (change first_layer dim)
# x-vector: dim = 512 # x-vector: dim = 512 (change first_layer dim)
embedding_type: x-vector embedding_type: x-vector
ANC150_LEC_seg_1_ecapa_emb.pickle,7.0,2.3,1.2,4.5,2.0,2.1
ANC150_LEC_seg_2_ecapa_emb.pickle,5.0,3.3,2.2,4.1,1.2,8.3
ANC150_LEC_seg_3_ecapa_emb.pickle,4.0,2.5,4.2,3.5,3.0,8.3
ANC150_LEC_seg_4_ecapa_emb.pickle,3.0,2.3,1.2,4.1,3.4,5.4
ANC150_LEC_seg_5_ecapa_emb.pickle,1.2,2.5,6.2,4.1,1.0,2.1
ANC150_LEC_seg_6_ecapa_emb.pickle,7.3,2.4,1.2,3.5,2.5,2.4
ANC150_LEC_seg_7_ecapa_emb.pickle,4.0,2.3,3.2,4.3,1.0,3.6
ANC150_LEC_seg_8_ecapa_emb.pickle,2.0,2.3,2.2,4.2,1.6,2.4
File added
File added
import pandas as pd
import torch
from torch.utils.data import Dataset,DataLoader
class load_data(Dataset):
def __init__(self, filename, datadir):
self.filename = filename
self.datadir = datadir
xy = pd.read_csv(filename,header=None)
self.file = xy.values[:,0]
self.INT = xy.values[:,1]
self.SEV = xy.values[:,2]
self.V = xy.values[:,3]
self.R = xy.values[:,4]
self.P = xy.values[:,5]
self.PD = xy.values[:,6]
def __len__(self):
return self.n_samples
def __getitem__(self, idx):
return (self.file[idx], )
File added
import yaml
import torch
import torch.nn as nn
import torch.nn.functional as F
with open("./configs/parameters.yaml", "r") as ymlfile:
cfg = yaml.load(ymlfile)
class model_embedding_snn(nn.Module):
def __init__(self):
super(model_embedding_snn,self).__init__()
self.relu = nn.ReLU()
self.dropout = nn.Dropout2d(cfg['dropout'])
self.batch_norm1 = nn.BatchNorm1d(cfg['first_layer'])
self.batch_norm2 = nn.BatchNorm1d(cfg['second_layer'])
self.batch_norm3 = nn.BatchNorm1d(cfg['third_layer'])
self.fc1 = nn.Linear(cfg['first_layer'],cfg['second_layer'])
self.fc2 = nn.Linear(cfg['second_layer'],cfg['third_layer'])
self.fc_voix = nn.Linear(cfg['third_layer'],1)
self.fc_res = nn.Linear(cfg['third_layer'],1)
self.fc_pros = nn.Linear(cfg['third_layer'],1)
self.fc_pd = nn.Linear(cfg['third_layer'],1)
self.fc_int = nn.Linear(cfg['third_layer'],1)
def forward(self, input_embs):
x = self.batch_norm1(input_embs)
x = self.fc1(x)
x = self.dropout(x)
x = self.relu(x)
x = self.batch_norm2(x)
x = self.fc2(x)
x = self.dropout(x)
x = self.relu(x)
x = self.batch_norm3(x)
v = self.fc_voix(x)
v = self.relu(v)
r = self.fc_res(x)
r = self.relu(r)
p = self.fc_pros(x)
p = self.relu(p)
pd = self.fc_pd(x)
pd = self.relu(pd)
INT = self.fc_int(x)
INT = self.relu(INT)
return INT, v, r, p, pd
train.py 0 → 100644
import sys
import torch
import torch.nn as nn
import yaml
import models.model
import dataloader.load
def load_config(config_path):
try:
with open(config_path, 'r') as file:
config = yaml.safe_load(file)
return config
except Exception as e:
print('Error reading the config file')
if __name__ == "__main__":
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
config_path = './configs/parameters.yaml'
cfg = load_config(config_path)
model_snn = models.model.model_embedding_snn().cuda()
criterion = nn.MSELoss()
optimizer = torch.optim.Adam(model_snn.parameters())
train_filename = cfg['data_path']+cfg['traininig_set_file']
print(train_filename)
for ep in range(cfg['epochs']):
print(ep)
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment