ezmsg.learn.dim_reduce.adaptive_decomp#

Classes

class AdaptiveDecompSettings(axis='!time', n_components=2)[source]#

Bases: Settings

Parameters:
  • axis (str)

  • n_components (int)

axis: str = '!time'#
n_components: int = 2#
__init__(axis='!time', n_components=2)#
Parameters:
  • axis (str)

  • n_components (int)

Return type:

None

class AdaptiveDecompState[source]#

Bases: object

template: AxisArray | None = None#
axis_groups: tuple[str, list[str], list[str]] | None = None#
estimator: Any = None#
class AdaptiveDecompTransformer(*args, **kwargs)[source]#

Bases: BaseAdaptiveTransformer[AdaptiveDecompSettings, AxisArray, AxisArray, AdaptiveDecompState], Generic[EstimatorType]

Base class for adaptive decomposition transformers. See IncrementalPCATransformer and MiniBatchNMFTransformer for concrete implementations.

Note that for these classes, adaptation is not automatic. The user must call partial_fit on the transformer. For automated adaptation, see IncrementalDecompTransformer.

__init__(*args, **kwargs)[source]#
classmethod get_message_type(dir)[source]#
Return type:

Type[AxisArray]

Parameters:

dir (str)

classmethod get_estimator_type()[source]#
Return type:

Type[TypeVar(EstimatorType, bound= Union[IncrementalPCA, MiniBatchNMF])]

partial_fit(message)[source]#
Return type:

None

Parameters:

message (AxisArray)

class BaseAdaptiveDecompUnit(*args, settings=None, **kwargs)[source]#

Bases: BaseAdaptiveTransformerUnit[SettingsType, AxisArray, AxisArray, TransformerType], Generic[SettingsType, TransformerType]

Parameters:

settings (Settings | None)

INPUT_SAMPLE = InputStream:unlocated[<class 'ezmsg.util.messages.axisarray.AxisArray'>]()#
async on_sample(msg)[source]#
Return type:

None

Parameters:

msg (AxisArray)

class IncrementalPCASettings(axis='!time', n_components=2, whiten=False, batch_size=None)[source]#

Bases: AdaptiveDecompSettings

Parameters:
  • axis (str)

  • n_components (int)

  • whiten (bool)

  • batch_size (int | None)

whiten: bool = False#
batch_size: Optional[int] = None#
__init__(axis='!time', n_components=2, whiten=False, batch_size=None)#
Parameters:
  • axis (str)

  • n_components (int)

  • whiten (bool)

  • batch_size (int | None)

Return type:

None

class IncrementalPCATransformer(*args, **kwargs)[source]#

Bases: AdaptiveDecompTransformer[IncrementalPCA]

class IncrementalPCAUnit(*args, settings=None, **kwargs)[source]#

Bases: BaseAdaptiveDecompUnit[IncrementalPCASettings, IncrementalPCATransformer]

Parameters:

settings (Settings | None)

SETTINGS#

alias of IncrementalPCASettings

class MiniBatchNMFSettings(axis='!time', n_components=2, init='random', batch_size=1024, beta_loss='frobenius', tol=0.0001, max_no_improvement=None, max_iter=200, alpha_W=0.0, alpha_H='same', l1_ratio=0.0, forget_factor=0.7)[source]#

Bases: AdaptiveDecompSettings

Parameters:
init: Optional[str] = 'random'#

‘random’, ‘nndsvd’, ‘nndsvda’, ‘nndsvdar’, ‘custom’, or None

batch_size: int = 1024#

batch_size is used only when doing a full fit (i.e., a reset), or as the exponent to forget_factor, where a very small batch_size will cause the model to update more slowly. It is better to set batch_size to a larger number than the expected chunk size and instead use forget_factor to control the learning rate.

beta_loss: Union[str, float] = 'frobenius'#

‘frobenius’, ‘kullback-leibler’, ‘itakura-saito’ Note that values different from ‘frobenius’

(or 2) and ‘kullback-leibler’ (or 1) lead to significantly slower fits. Note that for beta_loss <= 0 (or ‘itakura-saito’), the input matrix X cannot contain zeros.

tol: float = 0.0001#
max_no_improvement: Optional[int] = None#
max_iter: int = 200#
alpha_W: float = 0.0#
alpha_H: Union[float, str] = 'same'#
l1_ratio: float = 0.0#
forget_factor: float = 0.7#
__init__(axis='!time', n_components=2, init='random', batch_size=1024, beta_loss='frobenius', tol=0.0001, max_no_improvement=None, max_iter=200, alpha_W=0.0, alpha_H='same', l1_ratio=0.0, forget_factor=0.7)#
Parameters:
Return type:

None

class MiniBatchNMFTransformer(*args, **kwargs)[source]#

Bases: AdaptiveDecompTransformer[MiniBatchNMF]

class MiniBatchNMFUnit(*args, settings=None, **kwargs)[source]#

Bases: BaseAdaptiveDecompUnit[MiniBatchNMFSettings, MiniBatchNMFTransformer]

Parameters:

settings (Settings | None)

SETTINGS#

alias of MiniBatchNMFSettings

class AdaptiveDecompSettings(axis='!time', n_components=2)[source]#

Bases: Settings

Parameters:
  • axis (str)

  • n_components (int)

axis: str = '!time'#
n_components: int = 2#
__init__(axis='!time', n_components=2)#
Parameters:
  • axis (str)

  • n_components (int)

Return type:

None

class AdaptiveDecompState[source]#

Bases: object

template: AxisArray | None = None#
axis_groups: tuple[str, list[str], list[str]] | None = None#
estimator: Any = None#
class AdaptiveDecompTransformer(*args, **kwargs)[source]#

Bases: BaseAdaptiveTransformer[AdaptiveDecompSettings, AxisArray, AxisArray, AdaptiveDecompState], Generic[EstimatorType]

Base class for adaptive decomposition transformers. See IncrementalPCATransformer and MiniBatchNMFTransformer for concrete implementations.

Note that for these classes, adaptation is not automatic. The user must call partial_fit on the transformer. For automated adaptation, see IncrementalDecompTransformer.

__init__(*args, **kwargs)[source]#
classmethod get_message_type(dir)[source]#
Return type:

Type[AxisArray]

Parameters:

dir (str)

classmethod get_estimator_type()[source]#
Return type:

Type[TypeVar(EstimatorType, bound= Union[IncrementalPCA, MiniBatchNMF])]

partial_fit(message)[source]#
Return type:

None

Parameters:

message (AxisArray)

class IncrementalPCASettings(axis='!time', n_components=2, whiten=False, batch_size=None)[source]#

Bases: AdaptiveDecompSettings

Parameters:
  • axis (str)

  • n_components (int)

  • whiten (bool)

  • batch_size (int | None)

whiten: bool = False#
batch_size: Optional[int] = None#
__init__(axis='!time', n_components=2, whiten=False, batch_size=None)#
Parameters:
  • axis (str)

  • n_components (int)

  • whiten (bool)

  • batch_size (int | None)

Return type:

None

class IncrementalPCATransformer(*args, **kwargs)[source]#

Bases: AdaptiveDecompTransformer[IncrementalPCA]

class MiniBatchNMFSettings(axis='!time', n_components=2, init='random', batch_size=1024, beta_loss='frobenius', tol=0.0001, max_no_improvement=None, max_iter=200, alpha_W=0.0, alpha_H='same', l1_ratio=0.0, forget_factor=0.7)[source]#

Bases: AdaptiveDecompSettings

Parameters:
init: Optional[str] = 'random'#

‘random’, ‘nndsvd’, ‘nndsvda’, ‘nndsvdar’, ‘custom’, or None

batch_size: int = 1024#

batch_size is used only when doing a full fit (i.e., a reset), or as the exponent to forget_factor, where a very small batch_size will cause the model to update more slowly. It is better to set batch_size to a larger number than the expected chunk size and instead use forget_factor to control the learning rate.

beta_loss: Union[str, float] = 'frobenius'#

‘frobenius’, ‘kullback-leibler’, ‘itakura-saito’ Note that values different from ‘frobenius’

(or 2) and ‘kullback-leibler’ (or 1) lead to significantly slower fits. Note that for beta_loss <= 0 (or ‘itakura-saito’), the input matrix X cannot contain zeros.

tol: float = 0.0001#
max_no_improvement: Optional[int] = None#
max_iter: int = 200#
alpha_W: float = 0.0#
alpha_H: Union[float, str] = 'same'#
l1_ratio: float = 0.0#
forget_factor: float = 0.7#
__init__(axis='!time', n_components=2, init='random', batch_size=1024, beta_loss='frobenius', tol=0.0001, max_no_improvement=None, max_iter=200, alpha_W=0.0, alpha_H='same', l1_ratio=0.0, forget_factor=0.7)#
Parameters:
Return type:

None

class MiniBatchNMFTransformer(*args, **kwargs)[source]#

Bases: AdaptiveDecompTransformer[MiniBatchNMF]

class BaseAdaptiveDecompUnit(*args, settings=None, **kwargs)[source]#

Bases: BaseAdaptiveTransformerUnit[SettingsType, AxisArray, AxisArray, TransformerType], Generic[SettingsType, TransformerType]

Parameters:

settings (Settings | None)

INPUT_SAMPLE = InputStream:unlocated[<class 'ezmsg.util.messages.axisarray.AxisArray'>]()#
async on_sample(msg)[source]#
Return type:

None

Parameters:

msg (AxisArray)

class IncrementalPCAUnit(*args, settings=None, **kwargs)[source]#

Bases: BaseAdaptiveDecompUnit[IncrementalPCASettings, IncrementalPCATransformer]

Parameters:

settings (Settings | None)

SETTINGS#

alias of IncrementalPCASettings

class MiniBatchNMFUnit(*args, settings=None, **kwargs)[source]#

Bases: BaseAdaptiveDecompUnit[MiniBatchNMFSettings, MiniBatchNMFTransformer]

Parameters:

settings (Settings | None)

SETTINGS#

alias of MiniBatchNMFSettings