Skip to content

Commit

Permalink
final results ready for publishing
Browse files Browse the repository at this point in the history
  • Loading branch information
davidsteinar committed Oct 7, 2018
1 parent dc98606 commit 8cb29a7
Show file tree
Hide file tree
Showing 74 changed files with 7,477 additions and 2,543 deletions.
844 changes: 844 additions & 0 deletions choose_threshold.ipynb

Large diffs are not rendered by default.

1,030 changes: 0 additions & 1,030 deletions data/concrete.txt

This file was deleted.

707 changes: 0 additions & 707 deletions data/loaco2.csv

This file was deleted.

611 changes: 611 additions & 0 deletions find_damage_rugl.ipynb

Large diffs are not rendered by default.

14 changes: 9 additions & 5 deletions notebooks/testset_error_distribution.ipynb
Expand Up @@ -119,11 +119,15 @@
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"/home/dsteinar/miniconda3/lib/python3.6/site-packages/torch/serialization.py:333: UserWarning: Couldn't retrieve source code for container of type Model. It won't be checked for correctness upon loading.\n",
" \"type \" + container_type.__name__ + \". It won't be checked \"\n"
"ename": "FileNotFoundError",
"evalue": "[Errno 2] No such file or directory: '../results/trained_autoencoder_correct.pt'",
"output_type": "error",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mFileNotFoundError\u001b[0m Traceback (most recent call last)",
"\u001b[0;32m<ipython-input-4-49b5fb759409>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m()\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0mmodel\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtorch\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mload\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mf\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m'../results/trained_autoencoder_correct.pt'\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmap_location\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m'cpu'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 2\u001b[0m \u001b[0mloss_criterion\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtorch\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mnn\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mMSELoss\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mreduce\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mFalse\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m~/miniconda3/lib/python3.6/site-packages/torch/serialization.py\u001b[0m in \u001b[0;36mload\u001b[0;34m(f, map_location, pickle_module)\u001b[0m\n\u001b[1;32m 299\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0msys\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mversion_info\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0;36m3\u001b[0m \u001b[0;32mand\u001b[0m \u001b[0misinstance\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mf\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mpathlib\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mPath\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 300\u001b[0m \u001b[0mnew_fd\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mTrue\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 301\u001b[0;31m \u001b[0mf\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mopen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mf\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m'rb'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 302\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 303\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0m_load\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mf\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmap_location\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mpickle_module\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;31mFileNotFoundError\u001b[0m: [Errno 2] No such file or directory: '../results/trained_autoencoder_correct.pt'"
]
}
],
Expand Down
2 changes: 1 addition & 1 deletion notebooks/z24-data-explore.ipynb
Expand Up @@ -857,7 +857,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.4"
"version": "3.7.0"
}
},
"nbformat": 4,
Expand Down
78 changes: 78 additions & 0 deletions results/autoencoder_full_orthogonal.log
@@ -0,0 +1,78 @@
###Starting script###
torch version: 0.4.0
Number CUDA Devices: 1
batch size: 1024
epochs: 50
Model(
(h1): Linear(in_features=753, out_features=512, bias=True)
(h2): Linear(in_features=512, out_features=256, bias=True)
(z): Linear(in_features=256, out_features=128, bias=True)
(h4): Linear(in_features=128, out_features=256, bias=True)
(h5): Linear(in_features=256, out_features=512, bias=True)
(h6): Linear(in_features=512, out_features=700, bias=True)
)
1073980
{'dropout_p': 0.1, 'learning_rate': 0.0001, 'weight_init': <function orthogonal_ at 0x7f333012ad08>, 'hidden_size1': 512, 'hidden_size2': 256, 'z_size': 128}
Epoch: 0
train loss: 1.3198069856885006
valiation loss: 0.558456106178211
Epoch: 1
train loss: 0.9861347221346907
valiation loss: 0.5091420509518451
Epoch: 2
train loss: 0.8863865082711992
valiation loss: 0.4833347361597708
Epoch: 3
train loss: 0.8200794545329572
valiation loss: 0.4704232626449207
Epoch: 4
train loss: 0.7788513942858797
valiation loss: 0.4596094799970713
Epoch: 5
train loss: 0.749287265726529
valiation loss: 0.4501727740575053
###Starting script###
torch version: 0.4.0
Number CUDA Devices: 1
batch size: 1024
epochs: 10
Model(
(h1): Linear(in_features=1453, out_features=512, bias=True)
(h2): Linear(in_features=512, out_features=256, bias=True)
(z): Linear(in_features=256, out_features=128, bias=True)
(h4): Linear(in_features=128, out_features=256, bias=True)
(h5): Linear(in_features=256, out_features=512, bias=True)
(h6): Linear(in_features=512, out_features=1400, bias=True)
)
1791480
{'dropout_p': 0.1, 'learning_rate': 0.0001, 'weight_init': <function orthogonal_ at 0x7fdbd646ed08>, 'hidden_size1': 512, 'hidden_size2': 256, 'z_size': 128}
Epoch: 0
train loss: 1.4884252085255971
valiation loss: 0.6099970621147236
Epoch: 1
train loss: 1.0927252587524876
valiation loss: 0.5750192816876158
Epoch: 2
train loss: 1.0507070927246325
valiation loss: 0.5455901472345938
Epoch: 3
train loss: 1.0248937394092048
valiation loss: 0.5294280034894682
Epoch: 4
train loss: 1.001809555011383
valiation loss: 0.515722020077303
Epoch: 5
train loss: 0.9811841305885194
valiation loss: 0.5057342650942536
Epoch: 6
train loss: 0.9590787479141052
valiation loss: 0.4985074367971486
Epoch: 7
train loss: 0.9381276072421367
valiation loss: 0.49097703907340134
Epoch: 8
train loss: 0.9231405416503549
valiation loss: 0.4890101517978977
Epoch: 9
train loss: 0.9197049505900645
valiation loss: 0.48701937147710894
45 changes: 45 additions & 0 deletions results/autoencoder_orthogonal100.log
@@ -0,0 +1,45 @@
###Starting script###
torch version: 0.4.0
Number CUDA Devices: 1
batch size: 1024
epochs: 10
Model(
(h1): Linear(in_features=753, out_features=512, bias=True)
(h2): Linear(in_features=512, out_features=256, bias=True)
(z): Linear(in_features=256, out_features=128, bias=True)
(h4): Linear(in_features=128, out_features=256, bias=True)
(h5): Linear(in_features=256, out_features=512, bias=True)
(h6): Linear(in_features=512, out_features=700, bias=True)
)
1073980
{'dropout_p': 0.1, 'learning_rate': 0.0001, 'weight_init': <function orthogonal_ at 0x7fe37f67fd08>, 'hidden_size1': 512, 'hidden_size2': 256, 'z_size': 128}
Epoch: 0
train loss: 1.3358571395675611
valiation loss: 0.5798759653236921
Epoch: 1
train loss: 1.0068298294729154
valiation loss: 0.5185036406526146
Epoch: 2
train loss: 0.9056229343219129
valiation loss: 0.4944814006102425
Epoch: 3
train loss: 0.8325434875590723
valiation loss: 0.47947885217498754
Epoch: 4
train loss: 0.7917809133775522
valiation loss: 0.46133572218806057
Epoch: 5
train loss: 0.7629912252517266
valiation loss: 0.4559700830981041
Epoch: 6
train loss: 0.7400819475849739
valiation loss: 0.4440603829715951
Epoch: 7
train loss: 0.7182545222304947
valiation loss: 0.43812326487067316
Epoch: 8
train loss: 0.7033397049075713
valiation loss: 0.4353056059156069
Epoch: 9
train loss: 0.6999109750188136
valiation loss: 0.435301588744098
45 changes: 45 additions & 0 deletions results/autoencoder_orthogonal500_lr001.log
@@ -0,0 +1,45 @@
###Starting script###
torch version: 0.4.0
Number CUDA Devices: 1
batch size: 1024
epochs: 10
Model(
(h1): Linear(in_features=3553, out_features=512, bias=True)
(h2): Linear(in_features=512, out_features=256, bias=True)
(z): Linear(in_features=256, out_features=128, bias=True)
(h4): Linear(in_features=128, out_features=256, bias=True)
(h5): Linear(in_features=256, out_features=512, bias=True)
(h6): Linear(in_features=512, out_features=3500, bias=True)
)
3943980
{'dropout_p': 0.1, 'learning_rate': 0.001, 'weight_init': <function orthogonal_ at 0x7f483339bd08>, 'hidden_size1': 512, 'hidden_size2': 256, 'z_size': 128}
Epoch: 0
train loss: 1.2955295446898545
valiation loss: 0.5972153081705696
Epoch: 1
train loss: 1.0771710993743104
valiation loss: 0.5656329549456898
Epoch: 2
train loss: 1.0453982466230543
valiation loss: 0.5478053587831949
Epoch: 3
train loss: 1.028692602707624
valiation loss: 0.5393279070132657
Epoch: 4
train loss: 1.0182944227758433
valiation loss: 0.5366644602857138
Epoch: 5
train loss: 1.0099200473816616
valiation loss: 0.5332107117301539
Epoch: 6
train loss: 1.002841280357025
valiation loss: 0.5287646327363817
Epoch: 7
train loss: 0.9889586468313403
valiation loss: 0.5252069997160058
Epoch: 8
train loss: 0.9735605827034461
valiation loss: 0.5212440446019173
Epoch: 9
train loss: 0.9678559675308167
valiation loss: 0.5215851792379429

0 comments on commit 8cb29a7

Please sign in to comment.