fedbox.optimization.utils
1from ..datasets import utils 2 3from abc import ABC, abstractmethod 4import hashlib 5import math 6import numpy 7import json 8import os 9import torch 10import time 11from typing import Callable 12 13 14class WeightingScheme(ABC): 15 ''' 16 This abstract class defines a weighting scheme to aggregate updates from agents. 17 ''' 18 19 def __init__(self, datasets: dict[str, list[utils.FederatedSubset]]): 20 ''' 21 Constructs an aggregation scheme using the list of clients' `datasets`. 22 23 Parameters 24 ---------- 25 datasets: dict[str, list[utils.FederatedSubset]] 26 List of local subsets where each one corresponds to a client 27 ''' 28 29 self.datasets_ = datasets 30 self.weights_ = None 31 32 @abstractmethod 33 def weights(self) -> dict[str, list[float]]: 34 ''' 35 Computes aggregation weights. 36 37 Returns 38 ------- 39 dict[str, list[float]] 40 Aggregation weights 41 ''' 42 43 pass 44 45 46class UniformWeightingScheme(WeightingScheme): 47 ''' 48 This class defines a uniform weighting scheme to aggregate updates from agents. 49 ''' 50 51 def __init__(self, datasets: dict[str, list[utils.FederatedSubset]]): 52 ''' 53 Constructs an aggregation scheme using the list of clients' `datasets`. 54 55 Parameters 56 ---------- 57 datasets: dict[str, list[utils.FederatedSubset]] 58 List of local subsets where each one corresponds to a client 59 ''' 60 61 super().__init__(datasets) 62 63 def weights(self) -> dict[str, list[float]]: 64 ''' 65 Computes uniform aggregation weights. 66 67 Returns 68 ------- 69 dict[str, list[float]] 70 Uniform aggregation weights 71 ''' 72 73 if self.weights_ is None: 74 self.weights_ = { 75 group: [ 1 / len(subsets) for _ in subsets ] 76 for group, subsets in self.datasets_.items() 77 } 78 79 return self.weights_ 80 81 82class AdjancencyWeightingScheme(WeightingScheme): 83 ''' 84 This class defines a custom weighting scheme to aggregate updates from agents based on 85 mutual statistical similarities computed as the negative logarithm of client misalignments. 86 87 Note 88 ---- 89 Each weight corresponds to the degree of each agent as a node in the graph representation of 90 the federated network. 91 ''' 92 93 def __init__(self, datasets: dict[str, list[utils.FederatedSubset]]): 94 ''' 95 Constructs an aggregation scheme using the list of clients' `datasets`. 96 97 Parameters 98 ---------- 99 datasets: dict[str, list[utils.FederatedSubset]] 100 List of local subsets where each one corresponds to a client 101 ''' 102 103 super().__init__(datasets) 104 105 self.adjacency_matrices_ = None 106 107 def weights(self) -> dict[str, list[float]]: 108 ''' 109 Computes aggregation weights as clients' degrees in the graph representation of the network. 110 111 Returns 112 ------- 113 dict[str, list[float]] 114 Aggregation weights as clients' degrees (adjacency matrix) 115 ''' 116 117 if self.weights_ is None or self.adjacency_matrices_ is None: 118 self.adjacency_matrices_ = {} 119 self.weights_ = {} 120 121 for group, subsets in self.datasets_.items(): 122 self.adjacency_matrices_[group], self.weights_[group] = self.adjacency_matrix_(subsets) 123 124 return self.weights_ 125 126 def adjacencies(self) -> dict[str, torch.Tensor]: 127 ''' 128 Computes the adjacency matrices for the training network (training clients) and testing network (testing clients). 129 130 Returns 131 ------- 132 dict[str, torch.Tensor] 133 Adjacency matrices for training clients ('training') and testing clients ('testing') 134 ''' 135 136 if self.weights_ is None or self.adjacency_matrices_ is None: 137 self.weights() 138 139 return self.adjacency_matrices_ 140 141 def adjacency_matrix_(self, subsets: list[utils.FederatedSubset]) -> tuple[torch.Tensor, list[float]]: 142 ''' 143 Computes the adjacency matrix and related aggregation weights on a network of clients where each client corresponds to one subset in `subsets`. 144 145 Parameters 146 --------- 147 subsets: list[utils.FederatedSubset] 148 List of clients' subsets 149 150 Returns 151 ------- 152 tuple[torch.Tensor, list[float]] 153 Tuple containing the adjacency matrix built on statistical similarities (0) and clients' aggregation weights as degrees in the related graph (1) 154 ''' 155 156 n_subsets = len(subsets) 157 messages = [] 158 159 adjacency_matrix = torch.zeros(size = (n_subsets, n_subsets)) 160 161 for subset in subsets: 162 messages.append(self.message_(subset)) 163 164 for i in range(n_subsets): 165 for j in range(i + 1, n_subsets): 166 # client misalignment for couple of clients (i, j) 167 misalignment = (1 - torch.dot(messages[i], messages[j])) / 2 168 # connection weight in the adjacency matrix as the negative logarithm of the misalignment 169 adjacency_matrix[i][j] = adjacency_matrix[j][i] = -torch.log(misalignment) 170 171 # aggregation weights are computed as node degrees from the adjacency matrix of the graph 172 weights = torch.sum(adjacency_matrix, dim = 1).tolist() 173 174 return adjacency_matrix, weights 175 176 def message_(self, subset: utils.FederatedSubset) -> torch.Tensor: 177 ''' 178 Computes the statistically-significant message on a client's local subset. 179 180 Parameters 181 ---------- 182 subset: utils.FederatedSubset 183 Subset of samples of a specific client 184 185 Returns 186 ------- 187 torch.Tensor 188 Unitary vector message, i.e. normalized first principal component 189 190 Note 191 ---- 192 The message is computed as the first principal component on the client's dataset. 193 ''' 194 195 if isinstance(subset.data, torch.Tensor): 196 data = subset.data.view(len(subset), -1) 197 else: 198 data = subset.data.reshape(len(subset), -1) 199 data = torch.tensor(data) 200 201 message = torch.zeros(size = (data.shape[1],)) 202 # the statistically-significant message is computed as the first 203 # principal component of the local dataset using the unscaled and 204 # uncentered principal component analysis 205 x = data.reshape(len(subset.data), -1) / subset.normalization 206 _, _, vh = torch.linalg.svd(x, full_matrices = False) 207 component = vh[0] 208 rank = len(component) 209 # the message is normalized as a unitary vector 210 message[:rank] = component 211 message /= message.norm() 212 213 return message 214 215class Logger: 216 ''' 217 This class allows to log simulation metrics on a JSON file. 218 ''' 219 220 def __init__(self, directory: str, simulation: dict, convergence: Callable, enable: bool = True): 221 ''' 222 Constructs a logger instance. 223 224 Parameters 225 ---------- 226 directory: str 227 Directory where to save the produced JSON file 228 simulation: dict 229 Simulation metadata 230 convergence: Callable 231 Boolean predicate that is run at each `log(...)` call to detect if the model converged 232 enable: bool 233 Flag to enable logging, `True` by default 234 ''' 235 236 self.directory: str = directory 237 self.simulation: dict = simulation 238 self.metrics: list[dict] = [] 239 self.convergence_time: int = numpy.inf 240 self.execution_time = time.perf_counter() 241 self.convergence = convergence 242 self.enable = enable 243 244 def log(self, values: dict): 245 ''' 246 Adds new logged data to the buffer and checks whether convergence has been reached. 247 248 Parameters 249 ---------- 250 values: dict 251 Dictionary of logged values 252 ''' 253 254 self.metrics.append(values) 255 256 if numpy.isfinite(self.convergence_time): 257 return 258 259 converged = self.convergence(values) 260 261 if converged is not None: 262 self.convergence_time = converged 263 264 def flush(self): 265 ''' 266 Flushes the buffer's content to the output JSON file. 267 ''' 268 269 if not self.enable: 270 return 271 272 seconds = math.floor(time.perf_counter() - self.execution_time) 273 274 if seconds < 60: 275 self.execution_time = '{:02}s'.format(seconds) 276 elif seconds < 3600: 277 self.execution_time = '{:02}m{:02}s'.format(seconds // 60, seconds % 60) 278 elif seconds < 86400: 279 self.execution_time = '{:02}h{:02}m'.format(seconds // 3600, (seconds % 3600) // 60) 280 else: 281 self.execution_time = '{}d{:02}h'.format(seconds // 86400, (seconds % 86400) // 3600) 282 283 if not os.path.exists(self.directory): 284 os.mkdir(self.directory) 285 286 data = { **self.simulation, 'execution_time': self.execution_time, 'convergence_time': self.convergence_time, 'metrics': self.metrics } 287 288 with open(os.path.join(self.directory, '{}.json'.format(hashlib.sha1(str(self.simulation.items()).encode()).hexdigest())), 'w', encoding = 'utf8') as file: 289 json.dump(data, file, indent = 4) 290 291 @staticmethod 292 def default(): 293 ''' 294 Constructs a default fake logger. 295 296 Returns 297 ------- 298 Logger 299 Fake logger 300 ''' 301 302 return Logger('', '', lambda _: (), False)
15class WeightingScheme(ABC): 16 ''' 17 This abstract class defines a weighting scheme to aggregate updates from agents. 18 ''' 19 20 def __init__(self, datasets: dict[str, list[utils.FederatedSubset]]): 21 ''' 22 Constructs an aggregation scheme using the list of clients' `datasets`. 23 24 Parameters 25 ---------- 26 datasets: dict[str, list[utils.FederatedSubset]] 27 List of local subsets where each one corresponds to a client 28 ''' 29 30 self.datasets_ = datasets 31 self.weights_ = None 32 33 @abstractmethod 34 def weights(self) -> dict[str, list[float]]: 35 ''' 36 Computes aggregation weights. 37 38 Returns 39 ------- 40 dict[str, list[float]] 41 Aggregation weights 42 ''' 43 44 pass
This abstract class defines a weighting scheme to aggregate updates from agents.
20 def __init__(self, datasets: dict[str, list[utils.FederatedSubset]]): 21 ''' 22 Constructs an aggregation scheme using the list of clients' `datasets`. 23 24 Parameters 25 ---------- 26 datasets: dict[str, list[utils.FederatedSubset]] 27 List of local subsets where each one corresponds to a client 28 ''' 29 30 self.datasets_ = datasets 31 self.weights_ = None
Constructs an aggregation scheme using the list of clients' datasets.
Parameters
- datasets (dict[str, list[utils.FederatedSubset]]): List of local subsets where each one corresponds to a client
33 @abstractmethod 34 def weights(self) -> dict[str, list[float]]: 35 ''' 36 Computes aggregation weights. 37 38 Returns 39 ------- 40 dict[str, list[float]] 41 Aggregation weights 42 ''' 43 44 pass
Computes aggregation weights.
Returns
- dict[str, list[float]]: Aggregation weights
47class UniformWeightingScheme(WeightingScheme): 48 ''' 49 This class defines a uniform weighting scheme to aggregate updates from agents. 50 ''' 51 52 def __init__(self, datasets: dict[str, list[utils.FederatedSubset]]): 53 ''' 54 Constructs an aggregation scheme using the list of clients' `datasets`. 55 56 Parameters 57 ---------- 58 datasets: dict[str, list[utils.FederatedSubset]] 59 List of local subsets where each one corresponds to a client 60 ''' 61 62 super().__init__(datasets) 63 64 def weights(self) -> dict[str, list[float]]: 65 ''' 66 Computes uniform aggregation weights. 67 68 Returns 69 ------- 70 dict[str, list[float]] 71 Uniform aggregation weights 72 ''' 73 74 if self.weights_ is None: 75 self.weights_ = { 76 group: [ 1 / len(subsets) for _ in subsets ] 77 for group, subsets in self.datasets_.items() 78 } 79 80 return self.weights_
This class defines a uniform weighting scheme to aggregate updates from agents.
52 def __init__(self, datasets: dict[str, list[utils.FederatedSubset]]): 53 ''' 54 Constructs an aggregation scheme using the list of clients' `datasets`. 55 56 Parameters 57 ---------- 58 datasets: dict[str, list[utils.FederatedSubset]] 59 List of local subsets where each one corresponds to a client 60 ''' 61 62 super().__init__(datasets)
Constructs an aggregation scheme using the list of clients' datasets.
Parameters
- datasets (dict[str, list[utils.FederatedSubset]]): List of local subsets where each one corresponds to a client
64 def weights(self) -> dict[str, list[float]]: 65 ''' 66 Computes uniform aggregation weights. 67 68 Returns 69 ------- 70 dict[str, list[float]] 71 Uniform aggregation weights 72 ''' 73 74 if self.weights_ is None: 75 self.weights_ = { 76 group: [ 1 / len(subsets) for _ in subsets ] 77 for group, subsets in self.datasets_.items() 78 } 79 80 return self.weights_
Computes uniform aggregation weights.
Returns
- dict[str, list[float]]: Uniform aggregation weights
Inherited Members
83class AdjancencyWeightingScheme(WeightingScheme): 84 ''' 85 This class defines a custom weighting scheme to aggregate updates from agents based on 86 mutual statistical similarities computed as the negative logarithm of client misalignments. 87 88 Note 89 ---- 90 Each weight corresponds to the degree of each agent as a node in the graph representation of 91 the federated network. 92 ''' 93 94 def __init__(self, datasets: dict[str, list[utils.FederatedSubset]]): 95 ''' 96 Constructs an aggregation scheme using the list of clients' `datasets`. 97 98 Parameters 99 ---------- 100 datasets: dict[str, list[utils.FederatedSubset]] 101 List of local subsets where each one corresponds to a client 102 ''' 103 104 super().__init__(datasets) 105 106 self.adjacency_matrices_ = None 107 108 def weights(self) -> dict[str, list[float]]: 109 ''' 110 Computes aggregation weights as clients' degrees in the graph representation of the network. 111 112 Returns 113 ------- 114 dict[str, list[float]] 115 Aggregation weights as clients' degrees (adjacency matrix) 116 ''' 117 118 if self.weights_ is None or self.adjacency_matrices_ is None: 119 self.adjacency_matrices_ = {} 120 self.weights_ = {} 121 122 for group, subsets in self.datasets_.items(): 123 self.adjacency_matrices_[group], self.weights_[group] = self.adjacency_matrix_(subsets) 124 125 return self.weights_ 126 127 def adjacencies(self) -> dict[str, torch.Tensor]: 128 ''' 129 Computes the adjacency matrices for the training network (training clients) and testing network (testing clients). 130 131 Returns 132 ------- 133 dict[str, torch.Tensor] 134 Adjacency matrices for training clients ('training') and testing clients ('testing') 135 ''' 136 137 if self.weights_ is None or self.adjacency_matrices_ is None: 138 self.weights() 139 140 return self.adjacency_matrices_ 141 142 def adjacency_matrix_(self, subsets: list[utils.FederatedSubset]) -> tuple[torch.Tensor, list[float]]: 143 ''' 144 Computes the adjacency matrix and related aggregation weights on a network of clients where each client corresponds to one subset in `subsets`. 145 146 Parameters 147 --------- 148 subsets: list[utils.FederatedSubset] 149 List of clients' subsets 150 151 Returns 152 ------- 153 tuple[torch.Tensor, list[float]] 154 Tuple containing the adjacency matrix built on statistical similarities (0) and clients' aggregation weights as degrees in the related graph (1) 155 ''' 156 157 n_subsets = len(subsets) 158 messages = [] 159 160 adjacency_matrix = torch.zeros(size = (n_subsets, n_subsets)) 161 162 for subset in subsets: 163 messages.append(self.message_(subset)) 164 165 for i in range(n_subsets): 166 for j in range(i + 1, n_subsets): 167 # client misalignment for couple of clients (i, j) 168 misalignment = (1 - torch.dot(messages[i], messages[j])) / 2 169 # connection weight in the adjacency matrix as the negative logarithm of the misalignment 170 adjacency_matrix[i][j] = adjacency_matrix[j][i] = -torch.log(misalignment) 171 172 # aggregation weights are computed as node degrees from the adjacency matrix of the graph 173 weights = torch.sum(adjacency_matrix, dim = 1).tolist() 174 175 return adjacency_matrix, weights 176 177 def message_(self, subset: utils.FederatedSubset) -> torch.Tensor: 178 ''' 179 Computes the statistically-significant message on a client's local subset. 180 181 Parameters 182 ---------- 183 subset: utils.FederatedSubset 184 Subset of samples of a specific client 185 186 Returns 187 ------- 188 torch.Tensor 189 Unitary vector message, i.e. normalized first principal component 190 191 Note 192 ---- 193 The message is computed as the first principal component on the client's dataset. 194 ''' 195 196 if isinstance(subset.data, torch.Tensor): 197 data = subset.data.view(len(subset), -1) 198 else: 199 data = subset.data.reshape(len(subset), -1) 200 data = torch.tensor(data) 201 202 message = torch.zeros(size = (data.shape[1],)) 203 # the statistically-significant message is computed as the first 204 # principal component of the local dataset using the unscaled and 205 # uncentered principal component analysis 206 x = data.reshape(len(subset.data), -1) / subset.normalization 207 _, _, vh = torch.linalg.svd(x, full_matrices = False) 208 component = vh[0] 209 rank = len(component) 210 # the message is normalized as a unitary vector 211 message[:rank] = component 212 message /= message.norm() 213 214 return message
This class defines a custom weighting scheme to aggregate updates from agents based on mutual statistical similarities computed as the negative logarithm of client misalignments.
Note
Each weight corresponds to the degree of each agent as a node in the graph representation of the federated network.
94 def __init__(self, datasets: dict[str, list[utils.FederatedSubset]]): 95 ''' 96 Constructs an aggregation scheme using the list of clients' `datasets`. 97 98 Parameters 99 ---------- 100 datasets: dict[str, list[utils.FederatedSubset]] 101 List of local subsets where each one corresponds to a client 102 ''' 103 104 super().__init__(datasets) 105 106 self.adjacency_matrices_ = None
Constructs an aggregation scheme using the list of clients' datasets.
Parameters
- datasets (dict[str, list[utils.FederatedSubset]]): List of local subsets where each one corresponds to a client
108 def weights(self) -> dict[str, list[float]]: 109 ''' 110 Computes aggregation weights as clients' degrees in the graph representation of the network. 111 112 Returns 113 ------- 114 dict[str, list[float]] 115 Aggregation weights as clients' degrees (adjacency matrix) 116 ''' 117 118 if self.weights_ is None or self.adjacency_matrices_ is None: 119 self.adjacency_matrices_ = {} 120 self.weights_ = {} 121 122 for group, subsets in self.datasets_.items(): 123 self.adjacency_matrices_[group], self.weights_[group] = self.adjacency_matrix_(subsets) 124 125 return self.weights_
Computes aggregation weights as clients' degrees in the graph representation of the network.
Returns
- dict[str, list[float]]: Aggregation weights as clients' degrees (adjacency matrix)
127 def adjacencies(self) -> dict[str, torch.Tensor]: 128 ''' 129 Computes the adjacency matrices for the training network (training clients) and testing network (testing clients). 130 131 Returns 132 ------- 133 dict[str, torch.Tensor] 134 Adjacency matrices for training clients ('training') and testing clients ('testing') 135 ''' 136 137 if self.weights_ is None or self.adjacency_matrices_ is None: 138 self.weights() 139 140 return self.adjacency_matrices_
Computes the adjacency matrices for the training network (training clients) and testing network (testing clients).
Returns
- dict[str, torch.Tensor]: Adjacency matrices for training clients ('training') and testing clients ('testing')
142 def adjacency_matrix_(self, subsets: list[utils.FederatedSubset]) -> tuple[torch.Tensor, list[float]]: 143 ''' 144 Computes the adjacency matrix and related aggregation weights on a network of clients where each client corresponds to one subset in `subsets`. 145 146 Parameters 147 --------- 148 subsets: list[utils.FederatedSubset] 149 List of clients' subsets 150 151 Returns 152 ------- 153 tuple[torch.Tensor, list[float]] 154 Tuple containing the adjacency matrix built on statistical similarities (0) and clients' aggregation weights as degrees in the related graph (1) 155 ''' 156 157 n_subsets = len(subsets) 158 messages = [] 159 160 adjacency_matrix = torch.zeros(size = (n_subsets, n_subsets)) 161 162 for subset in subsets: 163 messages.append(self.message_(subset)) 164 165 for i in range(n_subsets): 166 for j in range(i + 1, n_subsets): 167 # client misalignment for couple of clients (i, j) 168 misalignment = (1 - torch.dot(messages[i], messages[j])) / 2 169 # connection weight in the adjacency matrix as the negative logarithm of the misalignment 170 adjacency_matrix[i][j] = adjacency_matrix[j][i] = -torch.log(misalignment) 171 172 # aggregation weights are computed as node degrees from the adjacency matrix of the graph 173 weights = torch.sum(adjacency_matrix, dim = 1).tolist() 174 175 return adjacency_matrix, weights
Computes the adjacency matrix and related aggregation weights on a network of clients where each client corresponds to one subset in subsets.
Parameters
- subsets (list[utils.FederatedSubset]): List of clients' subsets
Returns
- tuple[torch.Tensor, list[float]]: Tuple containing the adjacency matrix built on statistical similarities (0) and clients' aggregation weights as degrees in the related graph (1)
177 def message_(self, subset: utils.FederatedSubset) -> torch.Tensor: 178 ''' 179 Computes the statistically-significant message on a client's local subset. 180 181 Parameters 182 ---------- 183 subset: utils.FederatedSubset 184 Subset of samples of a specific client 185 186 Returns 187 ------- 188 torch.Tensor 189 Unitary vector message, i.e. normalized first principal component 190 191 Note 192 ---- 193 The message is computed as the first principal component on the client's dataset. 194 ''' 195 196 if isinstance(subset.data, torch.Tensor): 197 data = subset.data.view(len(subset), -1) 198 else: 199 data = subset.data.reshape(len(subset), -1) 200 data = torch.tensor(data) 201 202 message = torch.zeros(size = (data.shape[1],)) 203 # the statistically-significant message is computed as the first 204 # principal component of the local dataset using the unscaled and 205 # uncentered principal component analysis 206 x = data.reshape(len(subset.data), -1) / subset.normalization 207 _, _, vh = torch.linalg.svd(x, full_matrices = False) 208 component = vh[0] 209 rank = len(component) 210 # the message is normalized as a unitary vector 211 message[:rank] = component 212 message /= message.norm() 213 214 return message
Computes the statistically-significant message on a client's local subset.
Parameters
- subset (utils.FederatedSubset): Subset of samples of a specific client
Returns
- torch.Tensor: Unitary vector message, i.e. normalized first principal component
Note
The message is computed as the first principal component on the client's dataset.
Inherited Members
216class Logger: 217 ''' 218 This class allows to log simulation metrics on a JSON file. 219 ''' 220 221 def __init__(self, directory: str, simulation: dict, convergence: Callable, enable: bool = True): 222 ''' 223 Constructs a logger instance. 224 225 Parameters 226 ---------- 227 directory: str 228 Directory where to save the produced JSON file 229 simulation: dict 230 Simulation metadata 231 convergence: Callable 232 Boolean predicate that is run at each `log(...)` call to detect if the model converged 233 enable: bool 234 Flag to enable logging, `True` by default 235 ''' 236 237 self.directory: str = directory 238 self.simulation: dict = simulation 239 self.metrics: list[dict] = [] 240 self.convergence_time: int = numpy.inf 241 self.execution_time = time.perf_counter() 242 self.convergence = convergence 243 self.enable = enable 244 245 def log(self, values: dict): 246 ''' 247 Adds new logged data to the buffer and checks whether convergence has been reached. 248 249 Parameters 250 ---------- 251 values: dict 252 Dictionary of logged values 253 ''' 254 255 self.metrics.append(values) 256 257 if numpy.isfinite(self.convergence_time): 258 return 259 260 converged = self.convergence(values) 261 262 if converged is not None: 263 self.convergence_time = converged 264 265 def flush(self): 266 ''' 267 Flushes the buffer's content to the output JSON file. 268 ''' 269 270 if not self.enable: 271 return 272 273 seconds = math.floor(time.perf_counter() - self.execution_time) 274 275 if seconds < 60: 276 self.execution_time = '{:02}s'.format(seconds) 277 elif seconds < 3600: 278 self.execution_time = '{:02}m{:02}s'.format(seconds // 60, seconds % 60) 279 elif seconds < 86400: 280 self.execution_time = '{:02}h{:02}m'.format(seconds // 3600, (seconds % 3600) // 60) 281 else: 282 self.execution_time = '{}d{:02}h'.format(seconds // 86400, (seconds % 86400) // 3600) 283 284 if not os.path.exists(self.directory): 285 os.mkdir(self.directory) 286 287 data = { **self.simulation, 'execution_time': self.execution_time, 'convergence_time': self.convergence_time, 'metrics': self.metrics } 288 289 with open(os.path.join(self.directory, '{}.json'.format(hashlib.sha1(str(self.simulation.items()).encode()).hexdigest())), 'w', encoding = 'utf8') as file: 290 json.dump(data, file, indent = 4) 291 292 @staticmethod 293 def default(): 294 ''' 295 Constructs a default fake logger. 296 297 Returns 298 ------- 299 Logger 300 Fake logger 301 ''' 302 303 return Logger('', '', lambda _: (), False)
This class allows to log simulation metrics on a JSON file.
221 def __init__(self, directory: str, simulation: dict, convergence: Callable, enable: bool = True): 222 ''' 223 Constructs a logger instance. 224 225 Parameters 226 ---------- 227 directory: str 228 Directory where to save the produced JSON file 229 simulation: dict 230 Simulation metadata 231 convergence: Callable 232 Boolean predicate that is run at each `log(...)` call to detect if the model converged 233 enable: bool 234 Flag to enable logging, `True` by default 235 ''' 236 237 self.directory: str = directory 238 self.simulation: dict = simulation 239 self.metrics: list[dict] = [] 240 self.convergence_time: int = numpy.inf 241 self.execution_time = time.perf_counter() 242 self.convergence = convergence 243 self.enable = enable
Constructs a logger instance.
Parameters
- directory (str): Directory where to save the produced JSON file
- simulation (dict): Simulation metadata
- convergence (Callable):
Boolean predicate that is run at each
log(...)call to detect if the model converged - enable (bool):
Flag to enable logging,
Trueby default
245 def log(self, values: dict): 246 ''' 247 Adds new logged data to the buffer and checks whether convergence has been reached. 248 249 Parameters 250 ---------- 251 values: dict 252 Dictionary of logged values 253 ''' 254 255 self.metrics.append(values) 256 257 if numpy.isfinite(self.convergence_time): 258 return 259 260 converged = self.convergence(values) 261 262 if converged is not None: 263 self.convergence_time = converged
Adds new logged data to the buffer and checks whether convergence has been reached.
Parameters
- values (dict): Dictionary of logged values
265 def flush(self): 266 ''' 267 Flushes the buffer's content to the output JSON file. 268 ''' 269 270 if not self.enable: 271 return 272 273 seconds = math.floor(time.perf_counter() - self.execution_time) 274 275 if seconds < 60: 276 self.execution_time = '{:02}s'.format(seconds) 277 elif seconds < 3600: 278 self.execution_time = '{:02}m{:02}s'.format(seconds // 60, seconds % 60) 279 elif seconds < 86400: 280 self.execution_time = '{:02}h{:02}m'.format(seconds // 3600, (seconds % 3600) // 60) 281 else: 282 self.execution_time = '{}d{:02}h'.format(seconds // 86400, (seconds % 86400) // 3600) 283 284 if not os.path.exists(self.directory): 285 os.mkdir(self.directory) 286 287 data = { **self.simulation, 'execution_time': self.execution_time, 'convergence_time': self.convergence_time, 'metrics': self.metrics } 288 289 with open(os.path.join(self.directory, '{}.json'.format(hashlib.sha1(str(self.simulation.items()).encode()).hexdigest())), 'w', encoding = 'utf8') as file: 290 json.dump(data, file, indent = 4)
Flushes the buffer's content to the output JSON file.