Skip to content

Commit e99af17

Browse files
committed
adjusted docstrings for documentation
1 parent c91175b commit e99af17

8 files changed

Lines changed: 1428 additions & 3 deletions

File tree

mambular/models/fttransformer.py

Lines changed: 303 additions & 0 deletions
Large diffs are not rendered by default.

mambular/models/mambular.py

Lines changed: 339 additions & 0 deletions
Large diffs are not rendered by default.

mambular/models/mlp.py

Lines changed: 237 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,15 +6,252 @@
66

77

88
class MLPRegressor(SklearnBaseRegressor):
9+
"""
10+
Multi-Layer Perceptron regressor. This class extends the SklearnBaseRegressor class and uses the MLP model
11+
with the default MLP configuration.
12+
13+
The accepted arguments to the MLPRegressor class include both the attributes in the DefaultMLPConfig dataclass
14+
and the parameters for the Preprocessor class.
15+
16+
Parameters
17+
----------
18+
lr : float, default=1e-04
19+
Learning rate for the optimizer.
20+
lr_patience : int, default=10
21+
Number of epochs with no improvement after which learning rate will be reduced.
22+
weight_decay : float, default=1e-06
23+
Weight decay (L2 penalty) for the optimizer.
24+
lr_factor : float, default=0.1
25+
Factor by which the learning rate will be reduced.
26+
layer_sizes : list, default=(128, 128, 32)
27+
Sizes of the layers in the MLP.
28+
activation : callable, default=nn.SELU()
29+
Activation function for the MLP layers.
30+
skip_layers : bool, default=False
31+
Whether to skip layers in the MLP.
32+
dropout : float, default=0.5
33+
Dropout rate for regularization.
34+
norm : str, default=None
35+
Normalization method to be used, if any.
36+
use_glu : bool, default=False
37+
Whether to use Gated Linear Units (GLU) in the MLP.
38+
skip_connections : bool, default=False
39+
Whether to use skip connections in the MLP.
40+
batch_norm : bool, default=False
41+
Whether to use batch normalization in the MLP layers.
42+
layer_norm : bool, default=False
43+
Whether to use layer normalization in the MLP layers.
44+
n_bins : int, default=50
45+
The number of bins to use for numerical feature binning. This parameter is relevant
46+
only if `numerical_preprocessing` is set to 'binning' or 'one_hot'.
47+
numerical_preprocessing : str, default="ple"
48+
The preprocessing strategy for numerical features. Valid options are
49+
'binning', 'one_hot', 'standardization', and 'normalization'.
50+
use_decision_tree_bins : bool, default=False
51+
If True, uses decision tree regression/classification to determine
52+
optimal bin edges for numerical feature binning. This parameter is
53+
relevant only if `numerical_preprocessing` is set to 'binning' or 'one_hot'.
54+
binning_strategy : str, default="uniform"
55+
Defines the strategy for binning numerical features. Options include 'uniform',
56+
'quantile', or other sklearn-compatible strategies.
57+
cat_cutoff : float or int, default=0.03
58+
Indicates the cutoff after which integer values are treated as categorical.
59+
If float, it's treated as a percentage. If int, it's the maximum number of
60+
unique values for a column to be considered categorical.
61+
treat_all_integers_as_numerical : bool, default=False
62+
If True, all integer columns will be treated as numerical, regardless
63+
of their unique value count or proportion.
64+
degree : int, default=3
65+
The degree of the polynomial features to be used in preprocessing.
66+
knots : int, default=12
67+
The number of knots to be used in spline transformations.
68+
69+
Notes
70+
-----
71+
- The accepted arguments to the MLPRegressor class are the same as the attributes in the DefaultMLPConfig dataclass.
72+
- MLPRegressor uses SklearnBaseRegressor as the parent class. The methods for fitting, predicting, and evaluating the model are inherited from the parent class. Please refer to the parent class for more information.
73+
74+
See Also
75+
--------
76+
mambular.models.SklearnBaseRegressor : The parent class for MLPRegressor.
77+
78+
Examples
79+
--------
80+
>>> from mambular.models import MLPRegressor
81+
>>> model = MLPRegressor(layer_sizes=[128, 128, 64], activation=nn.ReLU())
82+
>>> model.fit(X_train, y_train)
83+
>>> preds = model.predict(X_test)
84+
>>> model.evaluate(X_test, y_test)
85+
"""
86+
987
def __init__(self, **kwargs):
1088
super().__init__(model=MLP, config=DefaultMLPConfig, **kwargs)
1189

1290

1391
class MLPClassifier(SklearnBaseClassifier):
92+
"""
93+
Multi-Layer Perceptron classifier. This class extends the SklearnBaseClassifier class and uses the MLP model
94+
with the default MLP configuration.
95+
96+
The accepted arguments to the MLPClassifier class include both the attributes in the DefaultMLPConfig dataclass
97+
and the parameters for the Preprocessor class.
98+
99+
Parameters
100+
----------
101+
lr : float, default=1e-04
102+
Learning rate for the optimizer.
103+
lr_patience : int, default=10
104+
Number of epochs with no improvement after which learning rate will be reduced.
105+
weight_decay : float, default=1e-06
106+
Weight decay (L2 penalty) for the optimizer.
107+
lr_factor : float, default=0.1
108+
Factor by which the learning rate will be reduced.
109+
layer_sizes : list, default=(128, 128, 32)
110+
Sizes of the layers in the MLP.
111+
activation : callable, default=nn.SELU()
112+
Activation function for the MLP layers.
113+
skip_layers : bool, default=False
114+
Whether to skip layers in the MLP.
115+
dropout : float, default=0.5
116+
Dropout rate for regularization.
117+
norm : str, default=None
118+
Normalization method to be used, if any.
119+
use_glu : bool, default=False
120+
Whether to use Gated Linear Units (GLU) in the MLP.
121+
skip_connections : bool, default=False
122+
Whether to use skip connections in the MLP.
123+
batch_norm : bool, default=False
124+
Whether to use batch normalization in the MLP layers.
125+
layer_norm : bool, default=False
126+
Whether to use layer normalization in the MLP layers.
127+
n_bins : int, default=50
128+
The number of bins to use for numerical feature binning. This parameter is relevant
129+
only if `numerical_preprocessing` is set to 'binning' or 'one_hot'.
130+
numerical_preprocessing : str, default="ple"
131+
The preprocessing strategy for numerical features. Valid options are
132+
'binning', 'one_hot', 'standardization', and 'normalization'.
133+
use_decision_tree_bins : bool, default=False
134+
If True, uses decision tree regression/classification to determine
135+
optimal bin edges for numerical feature binning. This parameter is
136+
relevant only if `numerical_preprocessing` is set to 'binning' or 'one_hot'.
137+
binning_strategy : str, default="uniform"
138+
Defines the strategy for binning numerical features. Options include 'uniform',
139+
'quantile', or other sklearn-compatible strategies.
140+
cat_cutoff : float or int, default=0.03
141+
Indicates the cutoff after which integer values are treated as categorical.
142+
If float, it's treated as a percentage. If int, it's the maximum number of
143+
unique values for a column to be considered categorical.
144+
treat_all_integers_as_numerical : bool, default=False
145+
If True, all integer columns will be treated as numerical, regardless
146+
of their unique value count or proportion.
147+
degree : int, default=3
148+
The degree of the polynomial features to be used in preprocessing.
149+
knots : int, default=12
150+
The number of knots to be used in spline transformations.
151+
152+
Notes
153+
-----
154+
- The accepted arguments to the MLPClassifier class are the same as the attributes in the DefaultMLPConfig dataclass.
155+
- MLPClassifier uses SklearnBaseClassifieras the parent class. The methods for fitting, predicting, and evaluating the model are inherited from the parent class. Please refer to the parent class for more information.
156+
157+
See Also
158+
--------
159+
mambular.models.SklearnBaseRegressor : The parent class for MLPClassifier.
160+
161+
Examples
162+
--------
163+
>>> from mambular.models import MLPClassifier
164+
>>> model = MLPClassifier(layer_sizes=[128, 128, 64], activation=nn.ReLU())
165+
>>> model.fit(X_train, y_train)
166+
>>> preds = model.predict(X_test)
167+
>>> model.evaluate(X_test, y_test)
168+
"""
169+
14170
def __init__(self, **kwargs):
15171
super().__init__(model=MLP, config=DefaultMLPConfig, **kwargs)
16172

17173

18174
class MLPLSS(SklearnBaseLSS):
175+
"""
176+
Multi-Layer Perceptron for distributional regression. This class extends the SklearnBaseLSS class and uses the MLP model
177+
with the default MLP configuration.
178+
179+
The accepted arguments to the MLPLSS class include both the attributes in the DefaultMLPConfig dataclass
180+
and the parameters for the Preprocessor class.
181+
182+
Parameters
183+
----------
184+
lr : float, default=1e-04
185+
Learning rate for the optimizer.
186+
lr_patience : int, default=10
187+
Number of epochs with no improvement after which learning rate will be reduced.
188+
weight_decay : float, default=1e-06
189+
Weight decay (L2 penalty) for the optimizer.
190+
lr_factor : float, default=0.1
191+
Factor by which the learning rate will be reduced.
192+
layer_sizes : list, default=(128, 128, 32)
193+
Sizes of the layers in the MLP.
194+
activation : callable, default=nn.SELU()
195+
Activation function for the MLP layers.
196+
skip_layers : bool, default=False
197+
Whether to skip layers in the MLP.
198+
dropout : float, default=0.5
199+
Dropout rate for regularization.
200+
norm : str, default=None
201+
Normalization method to be used, if any.
202+
use_glu : bool, default=False
203+
Whether to use Gated Linear Units (GLU) in the MLP.
204+
skip_connections : bool, default=False
205+
Whether to use skip connections in the MLP.
206+
batch_norm : bool, default=False
207+
Whether to use batch normalization in the MLP layers.
208+
layer_norm : bool, default=False
209+
Whether to use layer normalization in the MLP layers.
210+
n_bins : int, default=50
211+
The number of bins to use for numerical feature binning. This parameter is relevant
212+
only if `numerical_preprocessing` is set to 'binning' or 'one_hot'.
213+
numerical_preprocessing : str, default="ple"
214+
The preprocessing strategy for numerical features. Valid options are
215+
'binning', 'one_hot', 'standardization', and 'normalization'.
216+
use_decision_tree_bins : bool, default=False
217+
If True, uses decision tree regression/classification to determine
218+
optimal bin edges for numerical feature binning. This parameter is
219+
relevant only if `numerical_preprocessing` is set to 'binning' or 'one_hot'.
220+
binning_strategy : str, default="uniform"
221+
Defines the strategy for binning numerical features. Options include 'uniform',
222+
'quantile', or other sklearn-compatible strategies.
223+
task : str, default="regression"
224+
Indicates the type of machine learning task ('regression' or 'classification'). This can
225+
influence certain preprocessing behaviors, especially when using decision tree-based binning as ple.
226+
cat_cutoff : float or int, default=0.03
227+
Indicates the cutoff after which integer values are treated as categorical.
228+
If float, it's treated as a percentage. If int, it's the maximum number of
229+
unique values for a column to be considered categorical.
230+
treat_all_integers_as_numerical : bool, default=False
231+
If True, all integer columns will be treated as numerical, regardless
232+
of their unique value count or proportion.
233+
degree : int, default=3
234+
The degree of the polynomial features to be used in preprocessing.
235+
knots : int, default=12
236+
The number of knots to be used in spline transformations.
237+
238+
Notes
239+
-----
240+
- The accepted arguments to the MLPLSS class are the same as the attributes in the DefaultMLPConfig dataclass.
241+
- MLPLSS uses SklearnBaseLSS as the parent class. The methods for fitting, predicting, and evaluating the model are inherited from the parent class. Please refer to the parent class for more information.
242+
243+
See Also
244+
--------
245+
mambular.models.SklearnBaseLSS : The parent class for MLPLSS.
246+
247+
Examples
248+
--------
249+
>>> from mambular.models import MLPLSS
250+
>>> model = MLPLSS(layer_sizes=[128, 128, 64], activation=nn.ReLU())
251+
>>> model.fit(X_train, y_train)
252+
>>> preds = model.predict(X_test)
253+
>>> model.evaluate(X_test, y_test)
254+
"""
255+
19256
def __init__(self, **kwargs):
20257
super().__init__(model=MLP, config=DefaultMLPConfig, **kwargs)

0 commit comments

Comments
 (0)