Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
C
Comparative-Evaluation-of-Clustered-Federated-Learning-Methods
Manage
Activity
Members
Code
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
SIG
Projects
Vilagil - Federated Learning
Comparative-Evaluation-of-Clustered-Federated-Learning-Methods
Commits
c5544108
Commit
c5544108
authored
9 months ago
by
leahcimali
Browse files
Options
Downloads
Patches
Plain Diff
Delete unused CovNet Model
parent
b8826a5a
Branches
Branches containing commit
No related tags found
No related merge requests found
Changes
4
Hide whitespace changes
Inline
Side-by-side
Showing
4 changed files
src/models.py
+1
-47
1 addition, 47 deletions
src/models.py
src/utils_data.py
+14
-15
14 additions, 15 deletions
src/utils_data.py
src/utils_fed.py
+1
-1
1 addition, 1 deletion
src/utils_fed.py
src/utils_training.py
+1
-1
1 addition, 1 deletion
src/utils_training.py
with
17 additions
and
64 deletions
src/models.py
+
1
−
47
View file @
c5544108
...
@@ -91,50 +91,4 @@ class GenericConvModel(ImageClassificationBase):
...
@@ -91,50 +91,4 @@ class GenericConvModel(ImageClassificationBase):
)
)
def
forward
(
self
,
xb
):
def
forward
(
self
,
xb
):
return
self
.
network
(
xb
)
return
self
.
network
(
xb
)
\ No newline at end of file
class
CovNet
(
ImageClassificationBase
):
def
__init__
(
self
,
in_size
,
n_channels
):
super
().
__init__
()
self
.
img_final_size
=
int
(
in_size
/
(
2
**
3
))
self
.
network
=
nn
.
Sequential
(
nn
.
Conv2d
(
n_channels
,
32
,
kernel_size
=
3
,
padding
=
1
),
nn
.
BatchNorm2d
(
32
),
nn
.
ReLU
(),
nn
.
Conv2d
(
32
,
32
,
kernel_size
=
3
,
stride
=
1
,
padding
=
1
),
nn
.
BatchNorm2d
(
32
),
nn
.
ReLU
(),
nn
.
MaxPool2d
(
2
,
2
),
# output: 32 x 16 x 16
nn
.
Dropout
(
0.25
),
nn
.
Conv2d
(
32
,
64
,
kernel_size
=
3
,
stride
=
1
,
padding
=
1
),
nn
.
BatchNorm2d
(
64
),
nn
.
ReLU
(),
nn
.
Conv2d
(
64
,
64
,
kernel_size
=
3
,
stride
=
1
,
padding
=
1
),
nn
.
BatchNorm2d
(
64
),
nn
.
ReLU
(),
nn
.
MaxPool2d
(
2
,
2
),
# output: 64 x 8 x 8
nn
.
Dropout
(
0.25
),
nn
.
Conv2d
(
64
,
128
,
kernel_size
=
3
,
stride
=
1
,
padding
=
1
),
nn
.
BatchNorm2d
(
128
),
nn
.
ReLU
(),
nn
.
Conv2d
(
128
,
128
,
kernel_size
=
3
,
stride
=
1
,
padding
=
1
),
nn
.
BatchNorm2d
(
128
),
nn
.
ReLU
(),
nn
.
MaxPool2d
(
2
,
2
),
# output: 128 x 4 x 4
nn
.
Dropout
(
0.25
),
nn
.
Flatten
(),
nn
.
Linear
(
128
*
self
.
img_final_size
*
self
.
img_final_size
,
512
),
nn
.
ReLU
(),
nn
.
Linear
(
512
,
256
),
nn
.
ReLU
(),
nn
.
Linear
(
256
,
10
)
)
def
forward
(
self
,
xb
):
return
self
.
network
(
xb
)
This diff is collapsed.
Click to expand it.
src/utils_data.py
+
14
−
15
View file @
c5544108
...
@@ -53,7 +53,7 @@ def create_label_dict(dataset : str, nn_model : str) -> dict:
...
@@ -53,7 +53,7 @@ def create_label_dict(dataset : str, nn_model : str) -> dict:
fashion_mnist
=
torchvision
.
datasets
.
MNIST
(
"
datasets
"
,
download
=
True
)
fashion_mnist
=
torchvision
.
datasets
.
MNIST
(
"
datasets
"
,
download
=
True
)
(
x_data
,
y_data
)
=
fashion_mnist
.
data
,
fashion_mnist
.
targets
(
x_data
,
y_data
)
=
fashion_mnist
.
data
,
fashion_mnist
.
targets
if
nn_model
in
[
"
convolutional
"
,
"
CovNet
"
]:
if
nn_model
in
[
"
convolutional
"
]:
x_data
=
x_data
.
unsqueeze
(
1
)
x_data
=
x_data
.
unsqueeze
(
1
)
elif
dataset
==
'
mnist
'
:
elif
dataset
==
'
mnist
'
:
...
@@ -63,7 +63,7 @@ def create_label_dict(dataset : str, nn_model : str) -> dict:
...
@@ -63,7 +63,7 @@ def create_label_dict(dataset : str, nn_model : str) -> dict:
elif
dataset
==
"
cifar10
"
:
elif
dataset
==
"
cifar10
"
:
cifar10
=
torchvision
.
datasets
.
CIFAR10
(
"
datasets
"
,
download
=
True
)
cifar10
=
torchvision
.
datasets
.
CIFAR10
(
"
datasets
"
,
download
=
True
)
(
x_data
,
y_data
)
=
cifar10
.
data
,
cifar10
.
targets
(
x_data
,
y_data
)
=
cifar10
.
data
,
cifar10
.
targets
#x_data = np.transpose(x_data, (0, 3, 1, 2))
elif
dataset
==
'
kmnist
'
:
elif
dataset
==
'
kmnist
'
:
kmnist
=
torchvision
.
datasets
.
KMNIST
(
"
datasets
"
,
download
=
True
)
kmnist
=
torchvision
.
datasets
.
KMNIST
(
"
datasets
"
,
download
=
True
)
...
@@ -256,9 +256,9 @@ def data_preparation(client: Client, row_exp: dict) -> None:
...
@@ -256,9 +256,9 @@ def data_preparation(client: Client, row_exp: dict) -> None:
test_dataset
=
CustomDataset
(
x_test
,
y_test
,
transform
=
test_val_transform
)
test_dataset
=
CustomDataset
(
x_test
,
y_test
,
transform
=
test_val_transform
)
# Create DataLoaders
# Create DataLoaders
train_loader
=
DataLoader
(
train_dataset
,
batch_size
=
64
,
shuffle
=
True
)
train_loader
=
DataLoader
(
train_dataset
,
batch_size
=
128
,
shuffle
=
True
)
validation_loader
=
DataLoader
(
val_dataset
,
batch_size
=
64
,
shuffle
=
True
)
validation_loader
=
DataLoader
(
val_dataset
,
batch_size
=
128
,
shuffle
=
True
)
test_loader
=
DataLoader
(
test_dataset
,
batch_size
=
64
,
shuffle
=
True
)
test_loader
=
DataLoader
(
test_dataset
,
batch_size
=
128
,
shuffle
=
True
)
# Store DataLoaders in the client object
# Store DataLoaders in the client object
setattr
(
client
,
'
data_loader
'
,
{
'
train
'
:
train_loader
,
'
val
'
:
validation_loader
,
'
test
'
:
test_loader
})
setattr
(
client
,
'
data_loader
'
,
{
'
train
'
:
train_loader
,
'
val
'
:
validation_loader
,
'
test
'
:
test_loader
})
...
@@ -307,7 +307,7 @@ def setup_experiment(row_exp: dict) -> Tuple[Server, list]:
...
@@ -307,7 +307,7 @@ def setup_experiment(row_exp: dict) -> Tuple[Server, list]:
"""
"""
from
src.models
import
GenericConvModel
,
CovNet
from
src.models
import
GenericConvModel
from
src.utils_fed
import
init_server_cluster
from
src.utils_fed
import
init_server_cluster
import
torch
import
torch
...
@@ -326,8 +326,6 @@ def setup_experiment(row_exp: dict) -> Tuple[Server, list]:
...
@@ -326,8 +326,6 @@ def setup_experiment(row_exp: dict) -> Tuple[Server, list]:
elif
row_exp
[
'
nn_model
'
]
==
"
convolutional
"
:
elif
row_exp
[
'
nn_model
'
]
==
"
convolutional
"
:
model_server
=
Server
(
GenericConvModel
(
in_size
=
imgs_params
[
row_exp
[
'
dataset
'
]][
0
],
n_channels
=
imgs_params
[
row_exp
[
'
dataset
'
]][
1
]))
model_server
=
Server
(
GenericConvModel
(
in_size
=
imgs_params
[
row_exp
[
'
dataset
'
]][
0
],
n_channels
=
imgs_params
[
row_exp
[
'
dataset
'
]][
1
]))
elif
row_exp
[
'
nn_model
'
]
==
"
CovNet
"
:
model_server
=
Server
(
CovNet
(
in_size
=
imgs_params
[
row_exp
[
'
dataset
'
]][
0
],
n_channels
=
imgs_params
[
row_exp
[
'
dataset
'
]][
1
]))
model_server
.
model
.
to
(
device
)
model_server
.
model
.
to
(
device
)
...
@@ -613,7 +611,7 @@ def swap_labels(labels : list, client : Client, heterogeneity_class : int) -> Cl
...
@@ -613,7 +611,7 @@ def swap_labels(labels : list, client : Client, heterogeneity_class : int) -> Cl
return
client
return
client
'''
def centralize_data(list_clients : list) -> Tuple[DataLoader, DataLoader]:
def centralize_data(list_clients : list) -> Tuple[DataLoader, DataLoader]:
"""
Centralize data of the federated learning setup for central model comparison
"""
Centralize data of the federated learning setup for central model comparison
...
@@ -643,11 +641,12 @@ def centralize_data(list_clients : list) -> Tuple[DataLoader, DataLoader]:
...
@@ -643,11 +641,12 @@ def centralize_data(list_clients : list) -> Tuple[DataLoader, DataLoader]:
x_test_tensor = torch.tensor(x_test, dtype=torch.float32)
x_test_tensor = torch.tensor(x_test, dtype=torch.float32)
y_test_tensor = torch.tensor(y_test, dtype=torch.long)
y_test_tensor = torch.tensor(y_test, dtype=torch.long)
train_loader
=
DataLoader
(
TensorDataset
(
x_train_tensor
,
y_train_tensor
),
batch_size
=
64
,
shuffle
=
True
)
train_loader = DataLoader(TensorDataset(x_train_tensor, y_train_tensor), batch_size=
128
, shuffle=True)
val_loader
=
DataLoader
(
TensorDataset
(
x_val_tensor
,
y_val_tensor
),
batch_size
=
64
,
shuffle
=
Tru
e
)
val_loader = DataLoader(TensorDataset(x_val_tensor, y_val_tensor), batch_size=
128
, shuffle=
Fals
e)
test_loader
=
DataLoader
(
TensorDataset
(
x_test_tensor
,
y_test_tensor
),
batch_size
=
64
,
shuffle
=
Tru
e
)
test_loader = DataLoader(TensorDataset(x_test_tensor, y_test_tensor), batch_size=
128
, shuffle=
Fals
e)
return train_loader, val_loader, test_loader
return train_loader, val_loader, test_loader
'''
def
centralize_data
(
list_clients
:
list
)
->
Tuple
[
DataLoader
,
DataLoader
]:
def
centralize_data
(
list_clients
:
list
)
->
Tuple
[
DataLoader
,
DataLoader
]:
"""
Centralize data of the federated learning setup for central model comparison
"""
Centralize data of the federated learning setup for central model comparison
...
@@ -696,9 +695,9 @@ def centralize_data(list_clients: list) -> Tuple[DataLoader, DataLoader]:
...
@@ -696,9 +695,9 @@ def centralize_data(list_clients: list) -> Tuple[DataLoader, DataLoader]:
test_dataset
=
CustomDataset
(
x_test
,
y_test
,
transform
=
test_val_transform
)
test_dataset
=
CustomDataset
(
x_test
,
y_test
,
transform
=
test_val_transform
)
# Create DataLoaders
# Create DataLoaders
train_loader
=
DataLoader
(
train_dataset
,
batch_size
=
64
,
shuffle
=
True
)
train_loader
=
DataLoader
(
train_dataset
,
batch_size
=
128
,
shuffle
=
True
)
val_loader
=
DataLoader
(
val_dataset
,
batch_size
=
64
,
shuffle
=
False
)
# Validation typically not shuffled
val_loader
=
DataLoader
(
val_dataset
,
batch_size
=
128
,
shuffle
=
False
)
# Validation typically not shuffled
test_loader
=
DataLoader
(
test_dataset
,
batch_size
=
64
,
shuffle
=
False
)
# Test data typically not shuffled
test_loader
=
DataLoader
(
test_dataset
,
batch_size
=
128
,
shuffle
=
False
)
# Test data typically not shuffled
return
train_loader
,
val_loader
,
test_loader
return
train_loader
,
val_loader
,
test_loader
...
...
This diff is collapsed.
Click to expand it.
src/utils_fed.py
+
1
−
1
View file @
c5544108
...
@@ -199,7 +199,7 @@ def init_server_cluster(my_server : Server, list_clients : list, row_exp : dict,
...
@@ -199,7 +199,7 @@ def init_server_cluster(my_server : Server, list_clients : list, row_exp : dict,
p_expert_opintion : Parameter to avoid completly random assignment if neeed (default to 0)
p_expert_opintion : Parameter to avoid completly random assignment if neeed (default to 0)
"""
"""
from
src.models
import
GenericLinearModel
,
GenericConvModel
,
CovNet
from
src.models
import
GenericLinearModel
,
GenericConvModel
import
numpy
as
np
import
numpy
as
np
import
copy
import
copy
...
...
This diff is collapsed.
Click to expand it.
src/utils_training.py
+
1
−
1
View file @
c5544108
...
@@ -212,7 +212,7 @@ def train_central(model: ImageClassificationBase, train_loader: DataLoader, val_
...
@@ -212,7 +212,7 @@ def train_central(model: ImageClassificationBase, train_loader: DataLoader, val_
# Move the model to the appropriate device
# Move the model to the appropriate device
model
.
to
(
device
)
model
.
to
(
device
)
opt_func
=
torch
.
optim
.
Adam
# if row_exp['nn_model'] == "linear" else torch.optim.Adam
opt_func
=
torch
.
optim
.
SGD
# if row_exp['nn_model'] == "linear" else torch.optim.Adam
lr
=
0.001
lr
=
0.001
history
=
[]
history
=
[]
optimizer
=
opt_func
(
model
.
parameters
(),
lr
)
optimizer
=
opt_func
(
model
.
parameters
(),
lr
)
...
...
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment