diff --git a/11_deep_learning/01-Image-Restoration.ipynb b/11_deep_learning/01-Image-Restoration.ipynb index 0364a8a..9c7d378 100644 --- a/11_deep_learning/01-Image-Restoration.ipynb +++ b/11_deep_learning/01-Image-Restoration.ipynb @@ -16,7 +16,7 @@ "\n", ">conda create -n 'dl-biapol' python=3.7 \n", "conda activate dl-biapol \n", - "pip install tensorflow-gpu=2.4.1 keras=2.3.1 n2v jupyter scikit-image gputools\n", + "pip install tensorflow-gpu==2.4.1 keras==2.3.1 n2v jupyter scikit-image gputools\n", "\n", "Finally open this notebook using `jupyter notebook`\n", "\n", diff --git a/11_deep_learning/02-Image-Semantic-Segmentation.ipynb b/11_deep_learning/02-Image-Semantic-Segmentation.ipynb index bf8a876..62a3b1e 100644 --- a/11_deep_learning/02-Image-Semantic-Segmentation.ipynb +++ b/11_deep_learning/02-Image-Semantic-Segmentation.ipynb @@ -2,15 +2,15 @@ "cells": [ { "cell_type": "markdown", - "id": "df043420", + "id": "61a163c8", "metadata": {}, "source": [ - "### This notebook is adapted from https://github.com/dl4mia/04_instance_segmentation/blob/main/1_semantic_segmentation_2D.ipynb" + "### This notebook is adapted from **https://github.com/dl4mia/04_instance_segmentation/blob/main/1_semantic_segmentation_2D.ipynb**" ] }, { "cell_type": "markdown", - "id": "f42b61ff", + "id": "8d8bb293", "metadata": {}, "source": [ "In this notebook, we will perform pixel-wise segmentation or semantic segmentation on some microscopy images using a standard model architecture called the U-Net. \n", @@ -24,7 +24,7 @@ "\n", ">conda create -n 'dl-biapol' python=3.7 \n", "conda activate dl-biapol \n", - "pip install tensorflow-gpu=2.4.1 keras=2.3.1 n2v jupyter scikit-image gputools\n", + "pip install tensorflow-gpu==2.4.1 keras==2.3.1 n2v jupyter scikit-image gputools\n", "\n", "Finally open this notebook using `jupyter notebook`\n", "\n", @@ -37,7 +37,7 @@ }, { "cell_type": "markdown", - "id": "3c078f0c", + "id": "aae6c93c", "metadata": {}, "source": [ "### Get Dependencies" @@ -46,7 +46,7 @@ { "cell_type": "code", "execution_count": null, - "id": "88cf29aa", + "id": "787ee718", "metadata": {}, "outputs": [], "source": [ @@ -71,7 +71,7 @@ }, { "cell_type": "markdown", - "id": "22059655", + "id": "eaa7d193", "metadata": {}, "source": [ "### Data" @@ -79,7 +79,7 @@ }, { "cell_type": "markdown", - "id": "62cdd825", + "id": "2f491c62", "metadata": {}, "source": [ "> First we download some sample images and corresponding masks" @@ -88,7 +88,7 @@ { "cell_type": "code", "execution_count": null, - "id": "0b96ab62", + "id": "880568ef", "metadata": {}, "outputs": [], "source": [ @@ -103,7 +103,7 @@ }, { "cell_type": "markdown", - "id": "57af5782", + "id": "2f8365a6", "metadata": {}, "source": [ "> Next we load the data, generate from the annotation masks background/foreground/cell border masks, and crop out a central patch (this is just for simplicity, as it makes our life a bit easier when all images have the same shape)\n" @@ -112,7 +112,7 @@ { "cell_type": "code", "execution_count": null, - "id": "652b1f9c", + "id": "b7eadac0", "metadata": {}, "outputs": [], "source": [ @@ -140,7 +140,7 @@ }, { "cell_type": "markdown", - "id": "d92f9390", + "id": "5ddf35bf", "metadata": {}, "source": [ "