Skip to content

Commit

Permalink
Cleanup 2023 09 25 (#10)
Browse files Browse the repository at this point in the history
* chore: stripped unused code pieces

* chore: removed unused class Model in autoencoder_fastai2022 module

* feat: refactored gradient / parameter / activation tracking using the class ChildSearch in the search.py module

* chore: updated nbs to use new Telemetry interface

* chore: moved class ModelTelemetry and utilities to telemetry.py

* chore: updated nb ModelTelemetry imports

* chore: 0.1.2

* chore: rm requirements.txt

* feat: added bumpver and make bump-patch and make bump-minor for versionining

* chore: refactored ActivationsHistory etc by creating parent class History and inheriting

* chore: minor renamings of telemetry parameters for consistency

* feat: added unittests for telemetry

* chore: renamed telemetry parameters in nb
  • Loading branch information
eschmidt42 authored Sep 30, 2023
1 parent ae8a4bc commit eaa7a4d
Show file tree
Hide file tree
Showing 17 changed files with 993 additions and 1,406 deletions.
16 changes: 16 additions & 0 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,8 @@ help:
@echo "compile-upgrade : upgrade the environment requirements."
@echo "update : pip install new requriements into the virtual environment."
@echo "test : run pytests."
@echo "bump-patch : bump the patch version."
@echo "bump-minor : bump the minor version."

# create a virtual environment
.PHONY: venv
Expand Down Expand Up @@ -89,3 +91,17 @@ update:
test:
source .venv/bin/activate && \
pytest -vx .

# ==============================================================================
# bump version
# ==============================================================================

.PHONY: bump-patch
bump-patch:
source .venv/bin/activate && \
bumpver --patch

.PHONY: bump-minor
bump-minor:
source .venv/bin/activate && \
bumpver --minor
93 changes: 30 additions & 63 deletions nbs/cnn_autoencoder_fastai2022.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -59,10 +59,20 @@
"\n",
"import random_neural_net_models.autoencoder_fastai2022 as ae\n",
"import random_neural_net_models.convolution_lecun1990 as conv_lecun1990\n",
"import random_neural_net_models.telemetry as telemetry\n",
"\n",
"sns.set_theme()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"DO_OVERFITTING_ONLY = True"
]
},
{
"cell_type": "code",
"execution_count": null,
Expand Down Expand Up @@ -204,55 +214,6 @@
"## overfitting"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"def check_module_name_is_activation(name: str) -> bool:\n",
" return re.match(r\".*act\\d$\", name) is not None\n",
"\n",
"\n",
"print(\n",
" check_module_name_is_activation(\"act1\"),\n",
" check_module_name_is_activation(\"blub_act1\"),\n",
" check_module_name_is_activation(\"blub\"),\n",
" check_module_name_is_activation(\"act1_bla\"),\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"def check_module_name_grad_relevant(name: str) -> bool:\n",
" return (\n",
" name\n",
" not in [\n",
" \"add_dim\",\n",
" \"add_padding\",\n",
" \"rm_dim\",\n",
" \"rm_padding\",\n",
" \"conv2flat\",\n",
" \"flat2conv\",\n",
" \"encoder\",\n",
" \"decoder\",\n",
" ]\n",
" ) and re.match(r\".*act\\d$\", name) is None\n",
"\n",
"\n",
"print(\n",
" check_module_name_grad_relevant(\"rm_dim\"),\n",
" check_module_name_grad_relevant(\"encoder\"),\n",
" check_module_name_grad_relevant(\"decoder\"),\n",
" check_module_name_grad_relevant(\"dec_bn3\"),\n",
" check_module_name_grad_relevant(\"dec_act3\"),\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
Expand All @@ -267,12 +228,12 @@
"outputs": [],
"source": [
"model = ae.CNNAutoEncoder()\n",
"model = conv_lecun1990.ModelTelemetry(\n",
"model = telemetry.ModelTelemetry(\n",
" model,\n",
" func_is_act=check_module_name_is_activation,\n",
" func_is_grad_relevant=check_module_name_grad_relevant,\n",
" loss_names=(\"total\",),\n",
" sub_modules=(\"encoder\", \"decoder\"),\n",
" activation_name_patterns=(\".*act.*\",),\n",
" gradients_name_patterns=(\".*conv\\d$\",),\n",
" parameter_name_patterns=(\".*conv\\d$\",),\n",
")\n",
"model.double()\n",
"model.to(device);"
Expand Down Expand Up @@ -388,9 +349,7 @@
"metadata": {},
"outputs": [],
"source": [
"model.draw_parameter_stats(\n",
" \"enc_conv1\", \"enc_conv2\", \"dec_deconv1\", \"dec_deconv2\"\n",
")"
"model.draw_parameter_stats()"
]
},
{
Expand Down Expand Up @@ -491,6 +450,16 @@
"model.clean_hooks()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"if DO_OVERFITTING_ONLY:\n",
" raise SystemExit(\"Skipping training beyond overfitting.\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
Expand Down Expand Up @@ -542,15 +511,15 @@
"outputs": [],
"source": [
"model = ae.CNNAutoEncoder()\n",
"model = conv_lecun1990.ModelTelemetry(\n",
"model = telemetry.ModelTelemetry(\n",
" model,\n",
" func_is_act=check_module_name_is_activation,\n",
" func_is_grad_relevant=check_module_name_grad_relevant,\n",
" loss_names=(\"total\",),\n",
" gradients_every_n=100,\n",
" activations_every_n=100,\n",
" parameter_every_n=100,\n",
" sub_modules=(\"encoder\", \"decoder\"),\n",
" activation_name_patterns=(\".*act.*\",),\n",
" gradients_name_patterns=(\".*conv\\d\",),\n",
" parameter_name_patterns=(\".*conv\\d\",),\n",
")\n",
"model.double()\n",
"model.to(device);"
Expand Down Expand Up @@ -686,9 +655,7 @@
"metadata": {},
"outputs": [],
"source": [
"model.draw_parameter_stats(\n",
" \"enc_conv1\", \"enc_conv2\", \"dec_deconv1\", \"dec_deconv2\"\n",
")"
"model.draw_parameter_stats()"
]
},
{
Expand Down
93 changes: 30 additions & 63 deletions nbs/cnn_autoencoder_fastai2022_fashion.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -59,10 +59,20 @@
"\n",
"import random_neural_net_models.autoencoder_fastai2022 as ae\n",
"import random_neural_net_models.convolution_lecun1990 as conv_lecun1990\n",
"import random_neural_net_models.telemetry as telemetry\n",
"\n",
"sns.set_theme()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"DO_OVERFITTING_ONLY = True"
]
},
{
"cell_type": "code",
"execution_count": null,
Expand Down Expand Up @@ -204,55 +214,6 @@
"## overfitting"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"def check_module_name_is_activation(name: str) -> bool:\n",
" return re.match(r\".*act\\d$\", name) is not None\n",
"\n",
"\n",
"print(\n",
" check_module_name_is_activation(\"act1\"),\n",
" check_module_name_is_activation(\"blub_act1\"),\n",
" check_module_name_is_activation(\"blub\"),\n",
" check_module_name_is_activation(\"act1_bla\"),\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"def check_module_name_grad_relevant(name: str) -> bool:\n",
" return (\n",
" name\n",
" not in [\n",
" \"add_dim\",\n",
" \"add_padding\",\n",
" \"rm_dim\",\n",
" \"rm_padding\",\n",
" \"conv2flat\",\n",
" \"flat2conv\",\n",
" \"encoder\",\n",
" \"decoder\",\n",
" ]\n",
" ) and re.match(r\".*act\\d$\", name) is None\n",
"\n",
"\n",
"print(\n",
" check_module_name_grad_relevant(\"rm_dim\"),\n",
" check_module_name_grad_relevant(\"encoder\"),\n",
" check_module_name_grad_relevant(\"decoder\"),\n",
" check_module_name_grad_relevant(\"dec_bn3\"),\n",
" check_module_name_grad_relevant(\"dec_act3\"),\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
Expand All @@ -267,12 +228,12 @@
"outputs": [],
"source": [
"model = ae.CNNAutoEncoder()\n",
"model = conv_lecun1990.ModelTelemetry(\n",
"model = telemetry.ModelTelemetry(\n",
" model,\n",
" func_is_act=check_module_name_is_activation,\n",
" func_is_grad_relevant=check_module_name_grad_relevant,\n",
" loss_names=(\"total\",),\n",
" sub_modules=(\"encoder\", \"decoder\"),\n",
" activation_name_patterns=(\".*act.*\",),\n",
" gradients_name_patterns=(\".*conv\\d$\",),\n",
" parameter_name_patterns=(\".*conv\\d$\",),\n",
")\n",
"model.double()\n",
"model.to(device);"
Expand Down Expand Up @@ -388,9 +349,7 @@
"metadata": {},
"outputs": [],
"source": [
"model.draw_parameter_stats(\n",
" \"enc_conv1\", \"enc_conv2\", \"dec_deconv1\", \"dec_deconv2\"\n",
")"
"model.draw_parameter_stats()"
]
},
{
Expand Down Expand Up @@ -491,6 +450,16 @@
"model.clean_hooks()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"if DO_OVERFITTING_ONLY:\n",
" raise SystemExit(\"Skipping training beyond overfitting.\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
Expand Down Expand Up @@ -549,15 +518,15 @@
"outputs": [],
"source": [
"model = ae.CNNAutoEncoder()\n",
"model = conv_lecun1990.ModelTelemetry(\n",
"model = telemetry.ModelTelemetry(\n",
" model,\n",
" func_is_act=check_module_name_is_activation,\n",
" func_is_grad_relevant=check_module_name_grad_relevant,\n",
" loss_names=(\"total\",),\n",
" gradients_every_n=100,\n",
" activations_every_n=100,\n",
" parameter_every_n=100,\n",
" sub_modules=(\"encoder\", \"decoder\"),\n",
" activation_name_patterns=(\".*act.*\",),\n",
" gradients_name_patterns=(\".*conv\\d\",),\n",
" parameter_name_patterns=(\".*conv\\d\",),\n",
")\n",
"model.double()\n",
"model.to(device);"
Expand Down Expand Up @@ -693,9 +662,7 @@
"metadata": {},
"outputs": [],
"source": [
"model.draw_parameter_stats(\n",
" \"enc_conv1\", \"enc_conv2\", \"dec_deconv1\", \"dec_deconv2\"\n",
")"
"model.draw_parameter_stats()"
]
},
{
Expand Down
Loading

0 comments on commit eaa7a4d

Please sign in to comment.