|
7 | 7 |
|
8 | 8 | """ |
9 | 9 |
|
10 | | -import asyncio |
11 | | -import copy |
12 | | -import logging |
13 | | -import os |
14 | | - |
15 | | -import torch |
16 | | -import torch.nn.functional as F |
17 | | - |
18 | | -from plato.config import Config |
19 | 10 | from plato.servers import fedavg |
| 11 | +from plato.servers.strategies import PortAggregationStrategy |
20 | 12 |
|
21 | 13 |
|
22 | 14 | class Server(fedavg.Server): |
23 | | - """A federated learning server using the FedAsync algorithm.""" |
24 | | - |
25 | | - async def cosine_similarity(self, update, staleness): |
26 | | - """Compute the cosine similarity of the received updates and the difference |
27 | | - between the current and a previous model according to client staleness.""" |
28 | | - # Loading the global model from a previous round according to staleness |
29 | | - filename = f"model_{self.current_round - 2}.pth" |
30 | | - model_path = Config().params["model_path"] |
31 | | - model_path = f"{model_path}/{filename}" |
32 | | - |
33 | | - similarity = 1.0 |
34 | | - |
35 | | - if staleness > 1 and os.path.exists(model_path): |
36 | | - previous_model = copy.deepcopy(self.trainer.model) |
37 | | - previous_model.load_state_dict(torch.load(model_path)) |
38 | | - |
39 | | - previous = torch.zeros(0) |
40 | | - for __, weight in previous_model.cpu().state_dict().items(): |
41 | | - previous = torch.cat((previous, weight.view(-1))) |
42 | | - |
43 | | - current = torch.zeros(0) |
44 | | - for __, weight in self.trainer.model.cpu().state_dict().items(): |
45 | | - current = torch.cat((current, weight.view(-1))) |
46 | | - |
47 | | - deltas = torch.zeros(0) |
48 | | - for __, delta in update.items(): |
49 | | - deltas = torch.cat((deltas, delta.view(-1))) |
50 | | - |
51 | | - similarity = F.cosine_similarity(current - previous, deltas, dim=0) |
52 | | - |
53 | | - return similarity |
54 | | - |
55 | | - async def aggregate_deltas(self, updates, deltas_received): |
56 | | - """Aggregate weight updates from the clients using federated averaging.""" |
57 | | - # Extract the total number of samples |
58 | | - self.total_samples = sum(update.report.num_samples for update in updates) |
59 | | - |
60 | | - # Constructing the aggregation weights to be used |
61 | | - aggregation_weights = [] |
62 | | - |
63 | | - for i, update in enumerate(deltas_received): |
64 | | - report = updates[i].report |
65 | | - staleness = updates[i].staleness |
66 | | - num_samples = report.num_samples |
67 | | - |
68 | | - similarity = await self.cosine_similarity(update, staleness) |
69 | | - staleness_factor = Server.staleness_function(staleness) |
70 | | - |
71 | | - similarity_weight = ( |
72 | | - Config().server.similarity_weight |
73 | | - if hasattr(Config().server, "similarity_weight") |
74 | | - else 1 |
75 | | - ) |
76 | | - staleness_weight = ( |
77 | | - Config().server.staleness_weight |
78 | | - if hasattr(Config().server, "staleness_weight") |
79 | | - else 1 |
80 | | - ) |
81 | | - |
82 | | - logging.info("[Client %s] similarity: %s", i, (similarity + 1) / 2) |
83 | | - logging.info( |
84 | | - "[Client %s] staleness: %s, staleness factor: %s", |
85 | | - i, |
86 | | - staleness, |
87 | | - staleness_factor, |
88 | | - ) |
89 | | - raw_weight = ( |
90 | | - num_samples |
91 | | - / self.total_samples |
92 | | - * ( |
93 | | - (similarity + 1) / 2 * similarity_weight |
94 | | - + staleness_factor * staleness_weight |
95 | | - ) |
96 | | - ) |
97 | | - logging.info("[Client %s] raw weight = %s", i, raw_weight) |
98 | | - |
99 | | - aggregation_weights.append(raw_weight) |
100 | | - |
101 | | - # Normalize so that the sum of aggregation weights equals 1 |
102 | | - aggregation_weights = [ |
103 | | - i / sum(aggregation_weights) for i in aggregation_weights |
104 | | - ] |
105 | | - |
106 | | - logging.info( |
107 | | - "[Server #%s] normalized aggregation weights: %s", |
108 | | - os.getpid(), |
109 | | - aggregation_weights, |
| 15 | + """A federated learning server using the Port aggregation strategy.""" |
| 16 | + |
| 17 | + def __init__( |
| 18 | + self, |
| 19 | + model=None, |
| 20 | + datasource=None, |
| 21 | + algorithm=None, |
| 22 | + trainer=None, |
| 23 | + callbacks=None, |
| 24 | + ): |
| 25 | + super().__init__( |
| 26 | + model=model, |
| 27 | + datasource=datasource, |
| 28 | + algorithm=algorithm, |
| 29 | + trainer=trainer, |
| 30 | + callbacks=callbacks, |
| 31 | + aggregation_strategy=PortAggregationStrategy(), |
110 | 32 | ) |
111 | 33 |
|
112 | | - # Perform weighted averaging |
113 | | - avg_update = { |
114 | | - name: self.trainer.zeros(weights.shape) |
115 | | - for name, weights in deltas_received[0].items() |
116 | | - } |
117 | | - |
118 | | - for i, update in enumerate(deltas_received): |
119 | | - for name, delta in update.items(): |
120 | | - avg_update[name] += delta * aggregation_weights[i] |
121 | | - |
122 | | - # Yield to other tasks in the server |
123 | | - await asyncio.sleep(0) |
124 | | - |
125 | | - return avg_update |
126 | | - |
127 | 34 | def weights_aggregated(self, updates): |
128 | 35 | """ |
129 | 36 | Method called at the end of aggregating received weights. |
130 | 37 | """ |
131 | 38 | # Save the current model for later retrieval when cosine similarity needs to be computed |
132 | 39 | filename = f"model_{self.current_round}.pth" |
133 | | - self.trainer.save_model(filename) |
134 | | - |
135 | | - @staticmethod |
136 | | - def staleness_function(staleness): |
137 | | - """The staleness_function.""" |
138 | | - staleness_bound = 10 |
139 | | - |
140 | | - if hasattr(Config().server, "staleness_bound"): |
141 | | - staleness_bound = Config().server.staleness_bound |
142 | | - |
143 | | - staleness_factor = staleness_bound / (staleness + staleness_bound) |
144 | | - |
145 | | - return staleness_factor |
| 40 | + trainer = self.require_trainer() |
| 41 | + trainer.save_model(filename) |
0 commit comments