fedbox.models.nn
1import torch 2from torch import nn, optim 3from torch.utils import data 4 5 6class NeuralNetwork(nn.Module): 7 ''' 8 Basic neural network with one hidden layer and ReLU activation. The 9 combination of this model and the loss criterion constitutes a nonconvex 10 loss objective. 11 ''' 12 13 def __init__(self, n_inputs: int, n_classes: int, n_hidden: int = 128): 14 super().__init__() 15 16 self.flatten = nn.Flatten() 17 self.linear1 = nn.Linear(n_inputs, n_hidden) 18 self.linear2 = nn.Linear(n_hidden, n_classes) 19 20 def forward(self, x: torch.Tensor) -> torch.Tensor: 21 x = self.flatten(x) 22 x = self.linear1(x) 23 x = nn.functional.relu(x) 24 return self.linear2(x)
class
NeuralNetwork(torch.nn.modules.module.Module):
7class NeuralNetwork(nn.Module): 8 ''' 9 Basic neural network with one hidden layer and ReLU activation. The 10 combination of this model and the loss criterion constitutes a nonconvex 11 loss objective. 12 ''' 13 14 def __init__(self, n_inputs: int, n_classes: int, n_hidden: int = 128): 15 super().__init__() 16 17 self.flatten = nn.Flatten() 18 self.linear1 = nn.Linear(n_inputs, n_hidden) 19 self.linear2 = nn.Linear(n_hidden, n_classes) 20 21 def forward(self, x: torch.Tensor) -> torch.Tensor: 22 x = self.flatten(x) 23 x = self.linear1(x) 24 x = nn.functional.relu(x) 25 return self.linear2(x)
Basic neural network with one hidden layer and ReLU activation. The combination of this model and the loss criterion constitutes a nonconvex loss objective.
NeuralNetwork(n_inputs: int, n_classes: int, n_hidden: int = 128)
14 def __init__(self, n_inputs: int, n_classes: int, n_hidden: int = 128): 15 super().__init__() 16 17 self.flatten = nn.Flatten() 18 self.linear1 = nn.Linear(n_inputs, n_hidden) 19 self.linear2 = nn.Linear(n_hidden, n_classes)
Initialize internal Module state, shared by both nn.Module and ScriptModule.
def
forward(self, x: torch.Tensor) -> torch.Tensor:
21 def forward(self, x: torch.Tensor) -> torch.Tensor: 22 x = self.flatten(x) 23 x = self.linear1(x) 24 x = nn.functional.relu(x) 25 return self.linear2(x)
Define the computation performed at every call.
Should be overridden by all subclasses.
Although the recipe for forward pass needs to be defined within
this function, one should call the Module instance afterwards
instead of this since the former takes care of running the
registered hooks while the latter silently ignores them.
Inherited Members
- torch.nn.modules.module.Module
- dump_patches
- training
- call_super_init
- register_buffer
- register_parameter
- add_module
- register_module
- get_submodule
- get_parameter
- get_buffer
- get_extra_state
- set_extra_state
- apply
- cuda
- ipu
- xpu
- cpu
- type
- float
- double
- half
- bfloat16
- to_empty
- to
- register_full_backward_pre_hook
- register_backward_hook
- register_full_backward_hook
- register_forward_pre_hook
- register_forward_hook
- register_state_dict_pre_hook
- state_dict
- register_load_state_dict_post_hook
- load_state_dict
- parameters
- named_parameters
- buffers
- named_buffers
- children
- named_children
- modules
- named_modules
- train
- eval
- requires_grad_
- zero_grad
- extra_repr
- compile