Skip to content

Commit

Permalink
..
Browse files Browse the repository at this point in the history
  • Loading branch information
sherstpasha committed Feb 1, 2024
1 parent b230aa2 commit 164e741
Show file tree
Hide file tree
Showing 3 changed files with 223 additions and 34 deletions.
41 changes: 9 additions & 32 deletions src/test.py
Original file line number Diff line number Diff line change
@@ -1,34 +1,11 @@
from sklearn.preprocessing import minmax_scale
from thefittest.classifiers._mlpeaclassifier import MLPClassifierEA2
from thefittest.optimizers import SHADE
from sklearn.utils.estimator_checks import check_estimator

model = MLPClassifierEA2(iters=500, pop_size=250,
hidden_layers=(5,),
activation="relu",
weights_optimizer=SHADE,
weights_optimizer_args={"show_progress_each": 50})

def minmax_scale(data):
data_copy = data.copy()
max_value = data_copy.max()
min_value = data_copy.min()
if max_value == min_value:
scaled_data = np.ones_like(data_copy, dtype=np.float64)
else:
scaled_data = ((data_copy - min_value) / (max_value - min_value)).astype(np.float64)
return scaled_data



import numpy as np

# Example data
example_data = np.random.uniform(-100, 100, size = 10).astype(np.float64)*0 + 1

# Scale the data using the scale_data function
scaled_data = scale_data(example_data)
scaled_data2 = minmax_scale(example_data)

# Display original and scaled data
print("Original Data:", example_data)
print("Scaled Data:", scaled_data)
print("Scaled Data 2:", scaled_data2)

from thefittest.utils.random import random_weighted_sample

print(random_weighted_sample(scaled_data, 10, False))

print(random_weighted_sample(scaled_data2, 10, False))
print(check_estimator(model))
157 changes: 157 additions & 0 deletions src/thefittest/base/_model.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,25 @@
from __future__ import annotations

from abc import ABCMeta, abstractmethod

from typing import Any
from typing import Union

from sklearn.base import BaseEstimator

import numpy as np
from numpy.typing import NDArray

from ..base import Net
from ..base._net import ACTIV_NAME_INV
from ..optimizers import DifferentialEvolution
from ..optimizers import GeneticAlgorithm
from ..optimizers import GeneticProgramming
from ..optimizers import SHADE
from ..optimizers import SHAGA
from ..optimizers import SelfCGA
from ..optimizers import SelfCGP
from ..optimizers import jDE

class Model:
def _fit(
Expand Down Expand Up @@ -34,3 +48,146 @@ def fit(

def predict(self, X: NDArray[np.float64]) -> NDArray[Union[np.float64, np.int64]]:
return self._predict(X)


class BaseMLPEA(BaseEstimator, metaclass=ABCMeta):

@abstractmethod
def __init__(
self,
*,
iters: int,
pop_size: int,
hidden_layers: Tuple[int, ...],
activation: str = "sigma",
offset: bool = True,
weights_optimizer: weights_type_optimizer_alias = SHADE,
weights_optimizer_args: Optional[dict[str, Any]] = None,
):
self.iters = iters
self.pop_size = pop_size
self.hidden_layers = hidden_layers
self.activation = activation
self.offset = offset
self.weights_optimizer = weights_optimizer
self.weights_optimizer_args = weights_optimizer_args

def _defitne_net(self: BaseEstimator, n_inputs: int, n_outputs: int) -> Net:
start = 0
end = n_inputs
inputs_id = set(range(start, end))

net = Net(inputs=inputs_id)

for n_layer in self.hidden_layers:
start = end
end = end + n_layer
inputs_id = {(n_inputs - 1)}
hidden_id = set(range(start, end))
activs = dict(zip(hidden_id, [ACTIV_NAME_INV[self.activation]] * len(hidden_id)))

if self.offset:
layer_net = Net(inputs=inputs_id) > Net(hidden_layers=[hidden_id], activs=activs)
else:
layer_net = Net(hidden_layers=[hidden_id], activs=activs)

net = net > layer_net

start = end
end = end + n_outputs
inputs_id = {(n_inputs - 1)}
output_id = set(range(start, end))
activs = dict(zip(output_id, [ACTIV_NAME_INV["softmax"]] * len(output_id)))

if self.offset:
layer_net = Net(inputs=inputs_id) > Net(outputs=output_id, activs=activs)
else:
layer_net = Net(outputs=output_id, activs=activs)

net = net > layer_net
net._offset = self.offset
return net

def _train_net(
self: MLPEAClassifier,
net: Net,
X_train: NDArray[np.float64],
y_train: NDArray[np.float64],
) -> NDArray[np.float64]:
if self.weights_optimizer_args is not None:
for arg in (
"fitness_function",
"left",
"right",
"str_len",
"genotype_to_phenotype",
"minimization",
):
assert (
"iters" not in self.weights_optimizer_args.keys()
and "pop_size" not in self.weights_optimizer_args.keys()
), """Do not set the "iters" or "pop_size", or "uniset" in the "optimizer_args".
Instead, use the "MLPClassifierEA" arguments"""
assert (
arg not in self.weights_optimizer_args.keys()
), f"""Do not set the "{arg}"
to the "weights_optimizer_args". It is defined automatically"""
weights_optimizer_args = self.weights_optimizer_args.copy()
else:
weights_optimizer_args = {}

weights_optimizer_args["iters"] = self_iters
weights_optimizer_args["pop_size"] = self.pop_size
left: NDArray[np.float64] = np.full(
shape=len(net._weights), fill_value=-10, dtype=np.float64
)
right: NDArray[np.float64] = np.full(
shape=len(net._weights), fill_value=10, dtype=np.float64
)
initial_population: Union[
NDArray[np.float64], NDArray[np.byte]
] = DifferentialEvolution.float_population(weights_optimizer_args["pop_size"], left, right)
initial_population[0] = net._weights.copy()

weights_optimizer_args["fitness_function"] = fitness_function
weights_optimizer_args["fitness_function_args"] = {
"net": net,
"X": X_train,
"targets": y_train,
}

if self.weights_optimizer_class in (SHADE, DifferentialEvolution, jDE):
weights_optimizer_args["left"] = left
weights_optimizer_args["right"] = right
else:
parts: NDArray[np.int64] = np.full(
shape=len(net._weights), fill_value=16, dtype=np.int64
)
genotype_to_phenotype = GrayCode().fit(left_border=-10., right_border=10.0,
num_variables=len(net._weights), bits_per_variable=16)
weights_optimizer_args["str_len"] = np.sum(genotype_to_phenotype._bits_per_variable)
weights_optimizer_args["genotype_to_phenotype"] = genotype_to_phenotype.transform

weights_optimizer_args["minimization"] = True
optimizer = self.weights_optimizer_class(**weights_optimizer_args)
optimizer.fit()

self.weights_optimizer = optimizer

phenotype = optimizer.get_fittest()["phenotype"]

return phenotype

def get_optimizer(
self: MLPEAClassifier,
) -> Union[
DifferentialEvolution,
GeneticAlgorithm,
GeneticProgramming,
jDE,
SelfCGA,
SelfCGP,
SHADE,
SHAGA,
]:
return self.weights_optimizer
59 changes: 57 additions & 2 deletions src/thefittest/classifiers/_mlpeaclassifier.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,9 @@
import numpy as np
from numpy.typing import NDArray

from sklearn.utils.validation import check_X_y, check_is_fitted, check_array
from sklearn.utils.multiclass import check_classification_targets

from ..base import Net
from ..base._model import Model
from ..base._net import ACTIV_NAME_INV
Expand Down Expand Up @@ -162,8 +165,12 @@ def _train_net(
parts: NDArray[np.int64] = np.full(
shape=len(net._weights), fill_value=16, dtype=np.int64
)
genotype_to_phenotype = GrayCode().fit(left_border=-10., right_border=10.0,
num_variables=len(net._weights), bits_per_variable=16)
genotype_to_phenotype = GrayCode().fit(
left_border=-10.0,
right_border=10.0,
num_variables=len(net._weights),
bits_per_variable=16,
)
weights_optimizer_args["str_len"] = np.sum(genotype_to_phenotype._bits_per_variable)
weights_optimizer_args["genotype_to_phenotype"] = genotype_to_phenotype.transform

Expand Down Expand Up @@ -197,6 +204,8 @@ def get_net(self: MLPEAClassifier) -> Net:
def _fit(
self: MLPEAClassifier, X: NDArray[np.float64], y: NDArray[Union[np.float64, np.int64]]
) -> MLPEAClassifier:


if self._offset:
X = np.hstack([X, np.ones((X.shape[0], 1))])

Expand All @@ -220,3 +229,49 @@ def _predict(
output = self._net.forward(X)[0]
y_pred = np.argmax(output, axis=1)
return y_pred


from thefittest.base._model import BaseMLPEA
from sklearn.base import ClassifierMixin


class MLPClassifierEA2(ClassifierMixin, BaseMLPEA):
def __init__(
self,
*,
iters: int,
pop_size: int,
hidden_layers: Tuple[int, ...],
activation: str = "sigma",
offset: bool = True,
weights_optimizer: weights_type_optimizer_alias = SHADE,
weights_optimizer_args: Optional[dict[str, Any]] = None,
):

super().__init__(
iters=iters,
pop_size=pop_size,
hidden_layers=hidden_layers,
activation=activation,
offset=offset,
weights_optimizer=weights_optimizer,
weights_optimizer_args=weights_optimizer_args,
)

def fit(
self: MLPClassifierEA2, X: NDArray[np.float64], y: NDArray[np.int64]
) -> MLPEAClassifier:
X, y = check_X_y(X, y)
check_classification_targets(y)

if self.offset:
X = np.hstack([X, np.ones((X.shape[0], 1))])

n_inputs: int = X.shape[1]
n_outputs: int = len(set(y))
eye: NDArray[np.float64] = np.eye(n_outputs, dtype=np.float64)
proba: NDArray[np.float64] = eye[y]

self.net = self._defitne_net(n_inputs, n_outputs)
self.net._weights = self._train_net(self.net, X, proba)
return self

0 comments on commit 164e741

Please sign in to comment.