ezmsg.learn.process.mlp_old#

Classes

class MLPProcessor(*args, **kwargs)[source]#

Bases: BaseAdaptiveTransformer[MLPSettings, AxisArray, AxisArray, MLPState]

save_checkpoint(path)[source]#

Save the current model state to a checkpoint file.

Parameters:

path (str) – Path where the checkpoint will be saved

Return type:

None

partial_fit(message)[source]#
Return type:

None

Parameters:

message (SampleMessage)

class MLPSettings(hidden_channels, norm_layer=None, activation_layer=<class 'torch.nn.modules.activation.ReLU'>, inplace=None, bias=True, dropout=0.0, single_precision=True, learning_rate=0.001, scheduler_gamma=0.999, checkpoint_path=None)[source]#

Bases: Settings

Parameters:
hidden_channels: list[int]#

List of the hidden channel dimensions

norm_layer: Callable[[...], Module] | None = None#

Norm layer that will be stacked on top of the linear layer. If None this layer won’t be used.

activation_layer#

Activation function which will be stacked on top of the normalization layer (if not None), otherwise on top of the linear layer. If None this layer won’t be used.

alias of ReLU

inplace: bool | None = None#

Parameter for the activation layer, which can optionally do the operation in-place. Default is None, which uses the respective default values of the activation_layer and Dropout layer.

bias: bool = True#

Whether to use bias in the linear layer.

dropout: float = 0.0#

The probability for the dropout layer.

single_precision: bool = True#
learning_rate: float = 0.001#
scheduler_gamma: float = 0.999#

Learning scheduler decay rate. Set to 0.0 to disable the scheduler.

checkpoint_path: str | None = None#

Path to a checkpoint file containing model weights. If None, the model will be initialized with random weights.

__init__(hidden_channels, norm_layer=None, activation_layer=<class 'torch.nn.modules.activation.ReLU'>, inplace=None, bias=True, dropout=0.0, single_precision=True, learning_rate=0.001, scheduler_gamma=0.999, checkpoint_path=None)#
Parameters:
Return type:

None

class MLPState[source]#

Bases: object

model: MLP | None = None#
optimizer: Optimizer | None = None#
scheduler: LRScheduler | None = None#
template: AxisArray | None = None#
device: object | None = None#
class MLPUnit(*args, settings=None, **kwargs)[source]#

Bases: BaseAdaptiveTransformerUnit[MLPSettings, AxisArray, AxisArray, MLPProcessor]

Parameters:

settings (Settings | None)

SETTINGS#

alias of MLPSettings

class MLPSettings(hidden_channels, norm_layer=None, activation_layer=<class 'torch.nn.modules.activation.ReLU'>, inplace=None, bias=True, dropout=0.0, single_precision=True, learning_rate=0.001, scheduler_gamma=0.999, checkpoint_path=None)[source]#

Bases: Settings

Parameters:
hidden_channels: list[int]#

List of the hidden channel dimensions

norm_layer: Callable[[...], Module] | None = None#

Norm layer that will be stacked on top of the linear layer. If None this layer won’t be used.

activation_layer#

Activation function which will be stacked on top of the normalization layer (if not None), otherwise on top of the linear layer. If None this layer won’t be used.

alias of ReLU

inplace: bool | None = None#

Parameter for the activation layer, which can optionally do the operation in-place. Default is None, which uses the respective default values of the activation_layer and Dropout layer.

bias: bool = True#

Whether to use bias in the linear layer.

dropout: float = 0.0#

The probability for the dropout layer.

single_precision: bool = True#
learning_rate: float = 0.001#
scheduler_gamma: float = 0.999#

Learning scheduler decay rate. Set to 0.0 to disable the scheduler.

checkpoint_path: str | None = None#

Path to a checkpoint file containing model weights. If None, the model will be initialized with random weights.

__init__(hidden_channels, norm_layer=None, activation_layer=<class 'torch.nn.modules.activation.ReLU'>, inplace=None, bias=True, dropout=0.0, single_precision=True, learning_rate=0.001, scheduler_gamma=0.999, checkpoint_path=None)#
Parameters:
Return type:

None

class MLPState[source]#

Bases: object

model: MLP | None = None#
optimizer: Optimizer | None = None#
scheduler: LRScheduler | None = None#
template: AxisArray | None = None#
device: object | None = None#
class MLPProcessor(*args, **kwargs)[source]#

Bases: BaseAdaptiveTransformer[MLPSettings, AxisArray, AxisArray, MLPState]

save_checkpoint(path)[source]#

Save the current model state to a checkpoint file.

Parameters:

path (str) – Path where the checkpoint will be saved

Return type:

None

partial_fit(message)[source]#
Return type:

None

Parameters:

message (SampleMessage)

class MLPUnit(*args, settings=None, **kwargs)[source]#

Bases: BaseAdaptiveTransformerUnit[MLPSettings, AxisArray, AxisArray, MLPProcessor]

Parameters:

settings (Settings | None)

SETTINGS#

alias of MLPSettings