|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import torch |
|
import time |
|
import copy |
|
import random |
|
import numpy as np |
|
from flcore.clients.clientfomo import clientFomo |
|
from flcore.servers.serverbase import Server |
|
from threading import Thread |
|
from utils.dlg import DLG |
|
|
|
|
|
class FedFomo(Server): |
|
def __init__(self, args, times): |
|
super().__init__(args, times) |
|
|
|
|
|
self.set_slow_clients() |
|
self.set_clients(clientFomo) |
|
|
|
self.P = torch.diag(torch.ones(self.num_clients, device=self.device)) |
|
self.uploaded_ids = [] |
|
self.M = min(args.M, self.num_join_clients) |
|
self.client_models = [copy.deepcopy(self.global_model) for _ in range(self.num_clients)] |
|
|
|
print(f"\nJoin ratio / total clients: {self.join_ratio} / {self.num_clients}") |
|
print("Finished creating server and clients.") |
|
self.Budget = [] |
|
|
|
|
|
def train(self): |
|
for i in range(self.global_rounds+1): |
|
s_t = time.time() |
|
self.selected_clients = self.select_clients() |
|
self.send_models() |
|
|
|
if i%self.eval_gap == 0: |
|
print(f"\n-------------Round number: {i}-------------") |
|
print("\nEvaluate personalized models") |
|
self.evaluate() |
|
|
|
for client in self.selected_clients: |
|
client.train() |
|
|
|
|
|
|
|
|
|
|
|
|
|
if self.dlg_eval and i%self.dlg_gap == 0: |
|
self.call_dlg(i) |
|
self.receive_models() |
|
|
|
self.Budget.append(time.time() - s_t) |
|
print('-'*25, 'time cost', '-'*25, self.Budget[-1]) |
|
|
|
if self.auto_break and self.check_done(acc_lss=[self.rs_test_acc], top_cnt=self.top_cnt): |
|
break |
|
|
|
print("\nBest accuracy.") |
|
|
|
|
|
print(max(self.rs_test_acc)) |
|
print("\nAverage time cost per round.") |
|
print(sum(self.Budget[1:])/len(self.Budget[1:])) |
|
|
|
self.save_results() |
|
|
|
|
|
def send_models(self): |
|
assert (len(self.selected_clients) > 0) |
|
for client in self.clients: |
|
start_time = time.time() |
|
|
|
if client.send_slow: |
|
time.sleep(0.1 * np.abs(np.random.rand())) |
|
|
|
M_ = min(self.M, len(self.uploaded_ids)) |
|
indices = torch.topk(self.P[client.id], M_).indices.tolist() |
|
|
|
send_ids = [] |
|
send_models = [] |
|
for i in indices: |
|
send_ids.append(i) |
|
send_models.append(self.client_models[i]) |
|
|
|
client.receive_models(send_ids, send_models) |
|
|
|
client.send_time_cost['num_rounds'] += 1 |
|
client.send_time_cost['total_cost'] += 2 * (time.time() - start_time) |
|
|
|
def receive_models(self): |
|
assert (len(self.selected_clients) > 0) |
|
|
|
active_clients = random.sample( |
|
self.selected_clients, int((1-self.client_drop_rate) * self.current_num_join_clients)) |
|
|
|
self.uploaded_ids = [] |
|
self.uploaded_weights = [] |
|
tot_samples = 0 |
|
for client in active_clients: |
|
try: |
|
client_time_cost = client.train_time_cost['total_cost'] / client.train_time_cost['num_rounds'] + \ |
|
client.send_time_cost['total_cost'] / client.send_time_cost['num_rounds'] |
|
except ZeroDivisionError: |
|
client_time_cost = 0 |
|
if client_time_cost <= self.time_threthold: |
|
tot_samples += client.train_samples |
|
self.uploaded_ids.append(client.id) |
|
self.uploaded_weights.append(client.train_samples) |
|
self.client_models[client.id] = copy.deepcopy(client.model) |
|
self.P[client.id] += client.weight_vector |
|
for i, w in enumerate(self.uploaded_weights): |
|
self.uploaded_weights[i] = w / tot_samples |
|
|
|
def call_dlg(self, R): |
|
|
|
cnt = 0 |
|
psnr_val = 0 |
|
for cid, client_model_server in zip(range(self.num_clients), self.client_models): |
|
client_model = self.clients[cid].model |
|
client_model.eval() |
|
origin_grad = [] |
|
for gp, pp in zip(client_model_server.parameters(), client_model.parameters()): |
|
origin_grad.append(gp.data - pp.data) |
|
|
|
target_inputs = [] |
|
trainloader, _ = self.clients[cid].load_train_data() |
|
with torch.no_grad(): |
|
for i, (x, y) in enumerate(trainloader): |
|
if i >= self.batch_num_per_client: |
|
break |
|
|
|
if type(x) == type([]): |
|
x[0] = x[0].to(self.device) |
|
else: |
|
x = x.to(self.device) |
|
y = y.to(self.device) |
|
output = client_model(x) |
|
target_inputs.append((x, output)) |
|
|
|
d = DLG(client_model, origin_grad, target_inputs) |
|
if d is not None: |
|
psnr_val += d |
|
cnt += 1 |
|
|
|
|
|
|
|
if cnt > 0: |
|
print('PSNR value is {:.2f} dB'.format(psnr_val / cnt)) |
|
else: |
|
print('PSNR error') |
|
|
|
|
|
|
|
|