refactor hypertraining.py to improve model layer handling and response plotting; adjust data settings for batch processing
This commit is contained in:
@@ -245,18 +245,18 @@ class HyperTraining:
|
||||
dtype = getattr(torch, dtype)
|
||||
|
||||
afunc = trial.suggest_categorical_optional("model_activation_func", self.model_settings.model_activation_func)
|
||||
# T0 = trial.suggest_float_optional("T0", self.model_settings.satabsT0 , log=True)
|
||||
|
||||
layers = []
|
||||
last_dim = input_dim
|
||||
n_nodes = last_dim
|
||||
for i in range(n_layers):
|
||||
if hidden_dim_override := self.model_settings.overrides.get(f"n_hidden_nodes_{i}", False):
|
||||
hidden_dim = trial.suggest_int_optional(f"model_hidden_dim_{i}", hidden_dim_override, force=True)
|
||||
hidden_dim = trial.suggest_int_optional(f"model_hidden_dim_{i}", hidden_dim_override)
|
||||
else:
|
||||
hidden_dim = trial.suggest_int_optional(
|
||||
f"model_hidden_dim_{i}",
|
||||
self.model_settings.n_hidden_nodes,
|
||||
# step=2,
|
||||
)
|
||||
layers.append(util.complexNN.SemiUnitaryLayer(last_dim, hidden_dim, dtype=dtype))
|
||||
last_dim = hidden_dim
|
||||
@@ -642,6 +642,7 @@ class HyperTraining:
|
||||
|
||||
if show:
|
||||
plt.show()
|
||||
return fig
|
||||
|
||||
def _plot_model_response_head(self, *signals, labels=None, sps=None, title_append="", subtitle="", show=True):
|
||||
if not hasattr(labels, "__iter__") or isinstance(labels, (str, type(None))):
|
||||
@@ -684,7 +685,7 @@ class HyperTraining:
|
||||
):
|
||||
data_settings_backup = copy.deepcopy(self.data_settings)
|
||||
pytorch_settings_backup = copy.deepcopy(self.pytorch_settings)
|
||||
self.data_settings.drop_first = 100
|
||||
self.data_settings.drop_first = 100*128
|
||||
self.data_settings.shuffle = False
|
||||
self.data_settings.train_split = 1.0
|
||||
self.pytorch_settings.batchsize = (
|
||||
@@ -739,11 +740,15 @@ class HyperTraining:
|
||||
@staticmethod
|
||||
def build_title(trial: optuna.trial.Trial):
|
||||
title_append = f"for trial {trial.number}"
|
||||
model_n_layers = util.misc.multi_getattr((trial.params, trial.user_attrs), "model_n_layers", 0)
|
||||
model_hidden_dims = [
|
||||
model_n_hidden_layers = util.misc.multi_getattr((trial.params, trial.user_attrs), "model_n_hidden_layers", 0)
|
||||
input_dim = util.misc.multi_getattr((trial.params, trial.user_attrs), "model_input_dim", 0)
|
||||
model_dims = [
|
||||
util.misc.multi_getattr((trial.params, trial.user_attrs), f"model_hidden_dim_{i}", 0)
|
||||
for i in range(model_n_layers)
|
||||
for i in range(model_n_hidden_layers)
|
||||
]
|
||||
model_dims.insert(0, input_dim)
|
||||
model_dims.append(2)
|
||||
model_dims = [str(dim) for dim in model_dims]
|
||||
model_activation_func = util.misc.multi_getattr(
|
||||
(trial.params, trial.user_attrs),
|
||||
"model_activation_func",
|
||||
@@ -752,7 +757,7 @@ class HyperTraining:
|
||||
model_dtype = util.misc.multi_getattr((trial.params, trial.user_attrs), "model_dtype", "unknown dtype")
|
||||
|
||||
subtitle = (
|
||||
f"{model_n_layers} layers à ({', '.join(model_hidden_dims)}) units, {model_activation_func}, {model_dtype}"
|
||||
f"{model_n_hidden_layers+2} layers à ({', '.join(model_dims)}) units, {model_activation_func}, {model_dtype}"
|
||||
)
|
||||
|
||||
return title_append, subtitle
|
||||
|
||||
Reference in New Issue
Block a user