From 40b798ce276dc40ae7cbf4c13b25780f4cc3b919 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=D0=92i=D0=BA=D1=82=D0=BE=D1=80i=D1=8F=20=D0=A1=D0=BA?= =?UTF-8?q?=D0=BE=D1=80i=D0=BA?= Date: Tue, 31 Oct 2023 23:50:53 +0200 Subject: [PATCH] Update CI --- .github/workflows/ci.yml | 1 + README.md | 8 ++++---- config.yml | 4 ++-- 3 files changed, 7 insertions(+), 6 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 2a4b676..b1858b5 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -18,6 +18,7 @@ jobs: - name: Install dependencies run: | pip install -r requirements.txt + pip install tensorflow-cpu working-directory: . - name: Run Pylint diff --git a/README.md b/README.md index d7ad3bf..c7de35c 100644 --- a/README.md +++ b/README.md @@ -11,7 +11,7 @@ Implemented approach is pretty simple. * Undersampling * Augmentation * BatchNormalization -* Dice + BSE Loss +* Dice + BCE Loss ### Technologies * Python 3.9 @@ -69,8 +69,8 @@ To verify that your code adheres to python standards run linting as shown below: ## Featured Notebooks -* [EDA Notebook](link) -* [Kaggle Notebook](link) +* [EDA Notebook](https://github.com/ViiSkor/SatelliteImgOfShips/blob/master/notebooks/airbus-eda.ipynb) +* [Kaggle Notebook](https://github.com/ViiSkor/SatelliteImgOfShips/blob/master/notebooks/kaggle-notebook.ipynb) ## Project Contents @@ -83,7 +83,7 @@ To verify that your code adheres to python standards run linting as shown below: │ ├── notebooks <- Notebooks for analysis and testing │ ├── airbus-eda <- Notebooks for EDA -│ └── Kaggle_Notebook <- Notebooks for Kaggle notebook that was used to run this project and inference testing. +│ └── kaggle-notebook <- Notebooks for Kaggle notebook that was used to run this project and inference testing. │ ├── core <- Code for use in this project. │ ├── data <- Example python package - place shared code in such a package diff --git a/config.yml b/config.yml index 83eb5a6..19ac02e 100644 --- a/config.yml +++ b/config.yml @@ -15,12 +15,12 @@ preprocessing: train: learning_rate: 0.01 batch_size: 64 - epochs: 10 + epochs: 70 shuffle: True do_augmentation: True reduceLROnPlat: factor: 0.33 - patience: 2 + patience: 10 min_delta: 0.0001 min_lr: 0.00000001 cooldown: 0