diff --git a/README.md b/README.md index 3edc4994cf..fcc6f7a51e 100644 --- a/README.md +++ b/README.md @@ -69,10 +69,10 @@ segmentation model, a text sentiment model, a recommendation system, and a tabular model. For each of the applications, the code is much the same. -Read through the [Tutorials](https://docs.fast.ai/tutorial) to learn how -to train your own models on your own datasets. Use the navigation -sidebar to look through the fastai documentation. Every class, function, -and method is documented here. +Read through the [Tutorials](https://docs.fast.ai/tutorial.html) to +learn how to train your own models on your own datasets. Use the +navigation sidebar to look through the fastai documentation. Every +class, function, and method is documented here. To learn about the design and motivation of the library, read the [peer reviewed paper](https://www.mdpi.com/2078-2489/11/2/108/htm). diff --git a/fastai/_modidx.py b/fastai/_modidx.py index f686c19c28..fcc2cbf5ac 100644 --- a/fastai/_modidx.py +++ b/fastai/_modidx.py @@ -42,8 +42,7 @@ 'fastai.callback.captum': { 'fastai.callback.captum.CaptumInterpretation': 'https://docs.fast.ai/callback.captum.html#captuminterpretation', 'fastai.callback.captum.CaptumInterpretation.get_baseline_img': 'https://docs.fast.ai/callback.captum.html#captuminterpretation.get_baseline_img', 'fastai.callback.captum.CaptumInterpretation.insights': 'https://docs.fast.ai/callback.captum.html#captuminterpretation.insights', - 'fastai.callback.captum.CaptumInterpretation.visualize': 'https://docs.fast.ai/callback.captum.html#captuminterpretation.visualize', - 'fastai.callback.captum.json_clean': 'https://docs.fast.ai/callback.captum.html#json_clean'}, + 'fastai.callback.captum.CaptumInterpretation.visualize': 'https://docs.fast.ai/callback.captum.html#captuminterpretation.visualize'}, 'fastai.callback.comet': { 'fastai.callback.comet.CometCallback': 'https://docs.fast.ai/callback.comet.html#cometcallback', 'fastai.callback.comet.CometCallback.after_batch': 'https://docs.fast.ai/callback.comet.html#cometcallback.after_batch', 'fastai.callback.comet.CometCallback.after_epoch': 'https://docs.fast.ai/callback.comet.html#cometcallback.after_epoch', @@ -1286,6 +1285,7 @@ 'fastai.vision.core.BBoxLabeler.setups': 'https://docs.fast.ai/vision.core.html#bboxlabeler.setups', 'fastai.vision.core.BILINEAR': 'https://docs.fast.ai/vision.core.html#bilinear', 'fastai.vision.core.Image': 'https://docs.fast.ai/vision.core.html#image', + 'fastai.vision.core.Image.Image.__repr__': 'https://docs.fast.ai/vision.core.html#image.image.__repr__', 'fastai.vision.core.Image.Image.aspect': 'https://docs.fast.ai/vision.core.html#image.image.aspect', 'fastai.vision.core.Image.Image.n_px': 'https://docs.fast.ai/vision.core.html#image.image.n_px', 'fastai.vision.core.Image.Image.reshape': 'https://docs.fast.ai/vision.core.html#image.image.reshape', diff --git a/fastai/callback/captum.py b/fastai/callback/captum.py index 811d3607f1..43afb63e3d 100644 --- a/fastai/callback/captum.py +++ b/fastai/callback/captum.py @@ -6,7 +6,7 @@ from ..basics import * # %% auto 0 -__all__ = ['json_clean', 'CaptumInterpretation'] +__all__ = ['CaptumInterpretation'] # %% ../nbs/70c_callback.captum.ipynb 6 from ipykernel import jsonutil @@ -92,7 +92,7 @@ def _get_attributions(self,enc_data,metric,n_steps,nt_type,baseline_type,strides sliding_window_shapes=sliding_window_shapes, baselines=baseline) -# %% ../nbs/70c_callback.captum.ipynb 25 +# %% ../nbs/70c_callback.captum.ipynb 26 @patch def insights(x: CaptumInterpretation,inp_data,debug=True): _baseline_func= lambda o: o*0 diff --git a/fastai/callback/tensorboard.py b/fastai/callback/tensorboard.py index 2aaa507e13..44a48bc235 100644 --- a/fastai/callback/tensorboard.py +++ b/fastai/callback/tensorboard.py @@ -8,13 +8,13 @@ __all__ = ['TensorBoardBaseCallback', 'TensorBoardCallback', 'TensorBoardProjectorCallback', 'projector_word_embeddings', 'tensorboard_log'] -# %% ../nbs/70a_callback.tensorboard.ipynb 18 +# %% ../nbs/70a_callback.tensorboard.ipynb 19 import tensorboard from torch.utils.tensorboard import SummaryWriter from .fp16 import ModelToHalf from .hook import hook_output -# %% ../nbs/70a_callback.tensorboard.ipynb 19 +# %% ../nbs/70a_callback.tensorboard.ipynb 20 class TensorBoardBaseCallback(Callback): order = Recorder.order+1 "Base class for tensorboard callbacks" @@ -42,7 +42,7 @@ def __del__(self): self._remove() def _remove(self): if getattr(self, 'h', None): self.h.remove() -# %% ../nbs/70a_callback.tensorboard.ipynb 20 +# %% ../nbs/70a_callback.tensorboard.ipynb 22 class TensorBoardCallback(TensorBoardBaseCallback): "Saves model topology, losses & metrics for tensorboard and tensorboard projector during training" def __init__(self, log_dir=None, trace_model=True, log_preds=True, n_preds=9, projector=False, layer=None): @@ -79,7 +79,7 @@ def after_epoch(self): def before_validate(self): if self.projector: self._setup_projector() -# %% ../nbs/70a_callback.tensorboard.ipynb 21 +# %% ../nbs/70a_callback.tensorboard.ipynb 24 class TensorBoardProjectorCallback(TensorBoardBaseCallback): "Extracts and exports image featuers for tensorboard projector during inference" def __init__(self, log_dir=None, layer=None): @@ -94,13 +94,13 @@ def before_fit(self): def before_validate(self): self._setup_projector() -# %% ../nbs/70a_callback.tensorboard.ipynb 22 +# %% ../nbs/70a_callback.tensorboard.ipynb 26 def _write_projector_embedding(learn, writer, feat): lbls = [learn.dl.vocab[l] for l in feat['lbl']] if getattr(learn.dl, 'vocab', None) else None vecs = feat['vec'].squeeze() writer.add_embedding(vecs, metadata=lbls, label_img=feat['img'], global_step=learn.train_iter) -# %% ../nbs/70a_callback.tensorboard.ipynb 23 +# %% ../nbs/70a_callback.tensorboard.ipynb 27 def _add_projector_features(learn, hook, feat): img = _normalize_for_projector(learn.x) first_epoch = True if learn.iter == 0 else False @@ -110,12 +110,12 @@ def _add_projector_features(learn, hook, feat): feat['lbl'] = learn.y if first_epoch else torch.cat((feat['lbl'], learn.y),0) return feat -# %% ../nbs/70a_callback.tensorboard.ipynb 24 +# %% ../nbs/70a_callback.tensorboard.ipynb 28 def _get_embeddings(model, layer): layer = model[0].encoder if layer == None else layer return layer.weight -# %% ../nbs/70a_callback.tensorboard.ipynb 25 +# %% ../nbs/70a_callback.tensorboard.ipynb 29 @typedispatch def _normalize_for_projector(x:TensorImage): # normalize tensor to be between 0-1 @@ -127,10 +127,10 @@ def _normalize_for_projector(x:TensorImage): img = img.view(*sz) return img -# %% ../nbs/70a_callback.tensorboard.ipynb 26 +# %% ../nbs/70a_callback.tensorboard.ipynb 30 from ..text.all import LMLearner, TextLearner -# %% ../nbs/70a_callback.tensorboard.ipynb 27 +# %% ../nbs/70a_callback.tensorboard.ipynb 31 def projector_word_embeddings(learn=None, layer=None, vocab=None, limit=-1, start=0, log_dir=None): "Extracts and exports word embeddings from language models embedding layers" if not layer: @@ -145,10 +145,10 @@ def projector_word_embeddings(learn=None, layer=None, vocab=None, limit=-1, star writer.add_embedding(emb[start:end], metadata=vocab[start:end], label_img=img[start:end]) writer.close() -# %% ../nbs/70a_callback.tensorboard.ipynb 28 +# %% ../nbs/70a_callback.tensorboard.ipynb 33 from ..vision.data import * -# %% ../nbs/70a_callback.tensorboard.ipynb 29 +# %% ../nbs/70a_callback.tensorboard.ipynb 34 @typedispatch def tensorboard_log(x:TensorImage, y: TensorCategory, samples, outs, writer, step): fig,axs = get_grid(len(samples), return_fig=True) @@ -158,10 +158,10 @@ def tensorboard_log(x:TensorImage, y: TensorCategory, samples, outs, writer, ste for b,r,c in zip(samples.itemgot(1),outs.itemgot(0),axs)] writer.add_figure('Sample results', fig, step) -# %% ../nbs/70a_callback.tensorboard.ipynb 30 +# %% ../nbs/70a_callback.tensorboard.ipynb 35 from ..vision.core import TensorPoint,TensorBBox -# %% ../nbs/70a_callback.tensorboard.ipynb 31 +# %% ../nbs/70a_callback.tensorboard.ipynb 36 @typedispatch def tensorboard_log(x:TensorImage, y: TensorImageBase|TensorPoint|TensorBBox, samples, outs, writer, step): fig,axs = get_grid(len(samples), return_fig=True, double=True) diff --git a/fastai/callback/wandb.py b/fastai/callback/wandb.py index 92bec684d2..dc9a747c2a 100644 --- a/fastai/callback/wandb.py +++ b/fastai/callback/wandb.py @@ -157,7 +157,7 @@ def after_fit(self): self._wandb_step += 1 -# %% ../nbs/70_callback.wandb.ipynb 11 +# %% ../nbs/70_callback.wandb.ipynb 12 @patch def gather_args(self:Learner): "Gather config parameters accessible to the learner" @@ -185,7 +185,7 @@ def gather_args(self:Learner): args['dls.after_batch'] = f'{self.dls.after_batch}' return args -# %% ../nbs/70_callback.wandb.ipynb 12 +# %% ../nbs/70_callback.wandb.ipynb 14 def _make_plt(img): "Make plot to image resolution" # from https://stackoverflow.com/a/13714915 @@ -198,7 +198,7 @@ def _make_plt(img): fig.add_axes(ax) return fig, ax -# %% ../nbs/70_callback.wandb.ipynb 13 +# %% ../nbs/70_callback.wandb.ipynb 15 def _format_config_value(v): if isinstance(v, list): return [_format_config_value(item) for item in v] @@ -206,7 +206,7 @@ def _format_config_value(v): return {**_format_config(v.__stored_args__), '_name': v} return v -# %% ../nbs/70_callback.wandb.ipynb 14 +# %% ../nbs/70_callback.wandb.ipynb 16 def _format_config(config): "Format config parameters before logging them" for k,v in config.items(): @@ -216,12 +216,12 @@ def _format_config(config): config[k] = _format_config_value(v) return config -# %% ../nbs/70_callback.wandb.ipynb 15 +# %% ../nbs/70_callback.wandb.ipynb 17 def _format_metadata(metadata): "Format metadata associated to artifacts" for k,v in metadata.items(): metadata[k] = str(v) -# %% ../nbs/70_callback.wandb.ipynb 16 +# %% ../nbs/70_callback.wandb.ipynb 18 def log_dataset(path, name=None, metadata={}, description='raw dataset'): "Log dataset folder" # Check if wandb.init has been called in case datasets are logged manually @@ -240,7 +240,7 @@ def log_dataset(path, name=None, metadata={}, description='raw dataset'): else: artifact_dataset.add_file(str(p.resolve())) wandb.run.use_artifact(artifact_dataset) -# %% ../nbs/70_callback.wandb.ipynb 17 +# %% ../nbs/70_callback.wandb.ipynb 20 def log_model(path, name=None, metadata={}, description='trained model'): "Log model file" if wandb.run is None: @@ -255,7 +255,7 @@ def log_model(path, name=None, metadata={}, description='trained model'): fa.write(path.read_bytes()) wandb.run.log_artifact(artifact_model) -# %% ../nbs/70_callback.wandb.ipynb 18 +# %% ../nbs/70_callback.wandb.ipynb 22 @typedispatch def wandb_process(x:TensorImage, y, samples, outs, preds): "Process `sample` and `out` depending on the type of `x/y`" @@ -272,14 +272,14 @@ def wandb_process(x:TensorImage, y, samples, outs, preds): plt.close(fig) return {"Inputs":res_input, "Predictions":res_pred, "Ground_Truth":res_label} -# %% ../nbs/70_callback.wandb.ipynb 19 +# %% ../nbs/70_callback.wandb.ipynb 23 def _unlist(l): "get element of lists of lenght 1" if isinstance(l, (list, tuple)): if len(l) == 1: return l[0] else: return l -# %% ../nbs/70_callback.wandb.ipynb 20 +# %% ../nbs/70_callback.wandb.ipynb 24 @typedispatch def wandb_process(x:TensorImage, y:TensorCategory|TensorMultiCategory, samples, outs, preds): table = wandb.Table(columns=["Input image", "Ground_Truth", "Predictions"]) @@ -287,7 +287,7 @@ def wandb_process(x:TensorImage, y:TensorCategory|TensorMultiCategory, samples, table.add_data(wandb.Image(image.permute(1,2,0)), label, _unlist(pred_label)) return {"Prediction_Samples": table} -# %% ../nbs/70_callback.wandb.ipynb 21 +# %% ../nbs/70_callback.wandb.ipynb 25 @typedispatch def wandb_process(x:TensorImage, y:TensorMask, samples, outs, preds): res = [] @@ -305,18 +305,18 @@ def wandb_process(x:TensorImage, y:TensorMask, samples, outs, preds): ) return {"Prediction_Samples": table} -# %% ../nbs/70_callback.wandb.ipynb 22 +# %% ../nbs/70_callback.wandb.ipynb 26 @typedispatch def wandb_process(x:TensorText, y:TensorCategory|TensorMultiCategory, samples, outs, preds): data = [[s[0], s[1], o[0]] for s,o in zip(samples,outs)] return {"Prediction_Samples": wandb.Table(data=data, columns=["Text", "Target", "Prediction"])} -# %% ../nbs/70_callback.wandb.ipynb 23 +# %% ../nbs/70_callback.wandb.ipynb 27 @typedispatch def wandb_process(x:Tabular, y:Tabular, samples, outs, preds): df = x.all_cols for n in x.y_names: df[n+'_pred'] = y[n].values return {"Prediction_Samples": wandb.Table(dataframe=df)} -# %% ../nbs/70_callback.wandb.ipynb 27 +# %% ../nbs/70_callback.wandb.ipynb 32 _all_ = ['wandb_process'] diff --git a/fastai/vision/core.py b/fastai/vision/core.py index 9f8b4c264b..73387a7f85 100644 --- a/fastai/vision/core.py +++ b/fastai/vision/core.py @@ -6,6 +6,7 @@ from ..data.all import * from PIL import Image + try: BILINEAR,NEAREST = Image.Resampling.BILINEAR,Image.Resampling.NEAREST except AttributeError: from PIL.Image import BILINEAR,NEAREST @@ -21,6 +22,11 @@ # %% ../nbs/07_vision.core.ipynb 7 _all_ = ['Image','ToTensor'] +# %% ../nbs/07_vision.core.ipynb 8 +@patch +def __repr__(x:Image.Image): + return "<%s.%s image mode=%s size=%dx%d at 0x%X>" % (x.__class__.__module__, x.__class__.__name__, x.mode, x.size[0], x.size[1]) + # %% ../nbs/07_vision.core.ipynb 11 imagenet_stats = ([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) cifar_stats = ([0.491, 0.482, 0.447], [0.247, 0.243, 0.261]) diff --git a/fastai/vision/utils.py b/fastai/vision/utils.py index 15111bcdec..a0ddbf1f22 100644 --- a/fastai/vision/utils.py +++ b/fastai/vision/utils.py @@ -23,7 +23,7 @@ def _get_downloaded_image_filename(dest, name, suffix): return candidate_name -# %% ../nbs/09b_vision.utils.ipynb 8 +# %% ../nbs/09b_vision.utils.ipynb 7 def _download_image_inner(dest, inp, timeout=4, preserve_filename=False): i,url = inp url = url.split("?")[0] @@ -33,7 +33,7 @@ def _download_image_inner(dest, inp, timeout=4, preserve_filename=False): try: download_url(url, dest/f"{name}{suffix}", show_progress=False, timeout=timeout) except Exception as e: f"Couldn't download {url}." -# %% ../nbs/09b_vision.utils.ipynb 10 +# %% ../nbs/09b_vision.utils.ipynb 9 def download_images(dest, url_file=None, urls=None, max_pics=1000, n_workers=8, timeout=4, preserve_filename=False): "Download images listed in text file `url_file` to path `dest`, at most `max_pics`" if urls is None: urls = url_file.read_text().strip().split("\n")[:max_pics] @@ -42,7 +42,7 @@ def download_images(dest, url_file=None, urls=None, max_pics=1000, n_workers=8, parallel(partial(_download_image_inner, dest, timeout=timeout, preserve_filename=preserve_filename), list(enumerate(urls)), n_workers=n_workers, threadpool=True) -# %% ../nbs/09b_vision.utils.ipynb 12 +# %% ../nbs/09b_vision.utils.ipynb 11 def resize_to(img, targ_sz, use_min=False): "Size to resize to, to hit `targ_sz` at same aspect ratio, in PIL coords (i.e w*h)" w,h = img.size @@ -50,7 +50,7 @@ def resize_to(img, targ_sz, use_min=False): ratio = targ_sz/min_sz return int(w*ratio),int(h*ratio) -# %% ../nbs/09b_vision.utils.ipynb 14 +# %% ../nbs/09b_vision.utils.ipynb 13 def verify_image(fn): "Confirm that `fn` can be opened" try: @@ -60,12 +60,12 @@ def verify_image(fn): return True except: return False -# %% ../nbs/09b_vision.utils.ipynb 15 +# %% ../nbs/09b_vision.utils.ipynb 14 def verify_images(fns): "Find images in `fns` that can't be opened" return L(fns[i] for i,o in enumerate(parallel(verify_image, fns)) if not o) -# %% ../nbs/09b_vision.utils.ipynb 16 +# %% ../nbs/09b_vision.utils.ipynb 15 def resize_image(file, dest, src='.', max_size=None, n_channels=3, ext=None, img_format=None, resample=BILINEAR, resume=False, **kwargs ): "Resize file to dest to max_size" @@ -89,7 +89,7 @@ def resize_image(file, dest, src='.', max_size=None, n_channels=3, ext=None, img.save(dest_fname, img_format, **kwargs) elif file != dest_fname : shutil.copy2(file, dest_fname) -# %% ../nbs/09b_vision.utils.ipynb 19 +# %% ../nbs/09b_vision.utils.ipynb 18 def resize_images(path, max_workers=defaults.cpus, max_size=None, recurse=False, dest=Path('.'), n_channels=3, ext=None, img_format=None, resample=BILINEAR, resume=None, **kwargs): diff --git a/nbs/07_vision.core.ipynb b/nbs/07_vision.core.ipynb index c9f3756ba7..2e4bebb8a0 100755 --- a/nbs/07_vision.core.ipynb +++ b/nbs/07_vision.core.ipynb @@ -33,6 +33,7 @@ "from fastai.data.all import *\n", "\n", "from PIL import Image\n", + "\n", "try: BILINEAR,NEAREST = Image.Resampling.BILINEAR,Image.Resampling.NEAREST\n", "except AttributeError: from PIL.Image import BILINEAR,NEAREST" ] @@ -82,8 +83,10 @@ "metadata": {}, "outputs": [], "source": [ - "#It didn't use to be necessary to add ToTensor in all but we don't have the encodes methods defined here otherwise.\n", - "#TODO: investigate" + "#|exporti\n", + "@patch\n", + "def __repr__(x:Image.Image):\n", + " return \"<%s.%s image mode=%s size=%dx%d at 0x%X>\" % (x.__class__.__module__, x.__class__.__name__, x.mode, x.size[0], x.size[1])" ] }, { @@ -91,6 +94,7 @@ "metadata": {}, "source": [ "# Core vision\n", + "\n", "> Basic image opening/processing functionality" ] }, @@ -1079,6 +1083,8 @@ "ToTensor:\n", "encodes: (PILMask,object) -> encodes\n", "(PILBase,object) -> encodes\n", + "(PILMask,object) -> encodes\n", + "(PILBase,object) -> encodes\n", "decodes: \n", "\n", "\n" diff --git a/nbs/70_callback.wandb.ipynb b/nbs/70_callback.wandb.ipynb index 4528cfa15b..a068872b1a 100644 --- a/nbs/70_callback.wandb.ipynb +++ b/nbs/70_callback.wandb.ipynb @@ -17,7 +17,7 @@ "metadata": {}, "outputs": [], "source": [ - "#| nbflags skip_exec" + "#| nbflags skip_showdoc skip_exec" ] }, { @@ -238,6 +238,54 @@ " " ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "data": { + "text/markdown": [ + "---\n", + "\n", + "### WandbCallback\n", + "\n", + "> WandbCallback (log:'str'=None, log_preds:'bool'=True,\n", + "> log_preds_every_epoch:'bool'=False,\n", + "> log_model:'bool'=False, model_name:'str'=None,\n", + "> log_dataset:'bool'=False, dataset_name:'str'=None,\n", + "> valid_dl:'TfmdDL'=None, n_preds:'int'=36,\n", + "> seed:'int'=12345, reorder=True)\n", + "\n", + "Saves model topology, losses & metrics\n", + "\n", + "| | **Type** | **Default** | **Details** |\n", + "| -- | -------- | ----------- | ----------- |\n", + "| log | str | None | What to log (can be `gradients`, `parameters`, `all` or None) |\n", + "| log_preds | bool | True | Whether to log model predictions on a `wandb.Table` |\n", + "| log_preds_every_epoch | bool | False | Whether to log predictions every epoch or at the end |\n", + "| log_model | bool | False | Whether to save the model checkpoint to a `wandb.Artifact` |\n", + "| model_name | str | None | The name of the `model_name` to save, overrides `SaveModelCallback` |\n", + "| log_dataset | bool | False | Whether to log the dataset to a `wandb.Artifact` |\n", + "| dataset_name | str | None | A name to log the dataset with |\n", + "| valid_dl | TfmdDL | None | If `log_preds=True`, then the samples will be drawn from `valid_dl` |\n", + "| n_preds | int | 36 | How many samples to log predictions |\n", + "| seed | int | 12345 | The seed of the samples drawn |\n", + "| reorder | bool | True | |" + ], + "text/plain": [ + "" + ] + }, + "execution_count": null, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "show_doc(WandbCallback)" + ] + }, { "cell_type": "markdown", "metadata": {}, @@ -290,6 +338,35 @@ " return args" ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "data": { + "text/markdown": [ + "---\n", + "\n", + "#### Learner.gather_args\n", + "\n", + "> Learner.gather_args ()\n", + "\n", + "Gather config parameters accessible to the learner" + ], + "text/plain": [ + "" + ] + }, + "execution_count": null, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "show_doc(Learner.gather_args)" + ] + }, { "cell_type": "code", "execution_count": null, @@ -380,6 +457,35 @@ " wandb.run.use_artifact(artifact_dataset)" ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "data": { + "text/markdown": [ + "---\n", + "\n", + "#### log_dataset\n", + "\n", + "> log_dataset (path, name=None, metadata={}, description='rawdataset')\n", + "\n", + "Log dataset folder" + ], + "text/plain": [ + "" + ] + }, + "execution_count": null, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "show_doc(log_dataset)" + ] + }, { "cell_type": "code", "execution_count": null, @@ -402,6 +508,35 @@ " wandb.run.log_artifact(artifact_model)" ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "data": { + "text/markdown": [ + "---\n", + "\n", + "#### log_model\n", + "\n", + "> log_model (path, name=None, metadata={}, description='trainedmodel')\n", + "\n", + "Log model file" + ], + "text/plain": [ + "" + ] + }, + "execution_count": null, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "show_doc(log_model)" + ] + }, { "cell_type": "code", "execution_count": null, @@ -511,8 +646,13 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Example of use:\n", - "\n", + "## Example of use:" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ "Once your have defined your `Learner`, before you call to `fit` or `fit_one_cycle`, you need to initialize wandb:\n", "```\n", "import wandb\n", diff --git a/nbs/70a_callback.tensorboard.ipynb b/nbs/70a_callback.tensorboard.ipynb index 48af1465e5..ec04450359 100644 --- a/nbs/70a_callback.tensorboard.ipynb +++ b/nbs/70a_callback.tensorboard.ipynb @@ -26,7 +26,7 @@ "metadata": {}, "outputs": [], "source": [ - "#| nbflags skip_exec" + "#| nbflags skip_showdoc skip_exec" ] }, { @@ -40,6 +40,15 @@ "from fastai.basics import *" ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from nbdev import show_doc" + ] + }, { "cell_type": "markdown", "metadata": {}, @@ -228,6 +237,35 @@ " if getattr(self, 'h', None): self.h.remove()" ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "data": { + "text/markdown": [ + "---\n", + "\n", + "### TensorBoardBaseCallback\n", + "\n", + "> TensorBoardBaseCallback ()\n", + "\n", + "Basic class handling tweaks of the training loop by changing a `Learner` in various events" + ], + "text/plain": [ + "" + ] + }, + "execution_count": null, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "show_doc(TensorBoardBaseCallback)" + ] + }, { "cell_type": "code", "execution_count": null, @@ -272,6 +310,36 @@ " if self.projector: self._setup_projector()" ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "data": { + "text/markdown": [ + "---\n", + "\n", + "### TensorBoardCallback\n", + "\n", + "> TensorBoardCallback (log_dir=None, trace_model=True, log_preds=True,\n", + "> n_preds=9, projector=False, layer=None)\n", + "\n", + "Saves model topology, losses & metrics for tensorboard and tensorboard projector during training" + ], + "text/plain": [ + "" + ] + }, + "execution_count": null, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "show_doc(TensorBoardCallback)" + ] + }, { "cell_type": "code", "execution_count": null, @@ -294,6 +362,35 @@ " self._setup_projector()" ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "data": { + "text/markdown": [ + "---\n", + "\n", + "### TensorBoardProjectorCallback\n", + "\n", + "> TensorBoardProjectorCallback (log_dir=None, layer=None)\n", + "\n", + "Extracts and exports image featuers for tensorboard projector during inference" + ], + "text/plain": [ + "" + ] + }, + "execution_count": null, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "show_doc(TensorBoardProjectorCallback)" + ] + }, { "cell_type": "code", "execution_count": null, @@ -387,6 +484,36 @@ " writer.close()" ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "data": { + "text/markdown": [ + "---\n", + "\n", + "#### projector_word_embeddings\n", + "\n", + "> projector_word_embeddings (learn=None, layer=None, vocab=None, limit=-1,\n", + "> start=0, log_dir=None)\n", + "\n", + "Extracts and exports word embeddings from language models embedding layers" + ], + "text/plain": [ + "" + ] + }, + "execution_count": null, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "show_doc(projector_word_embeddings)" + ] + }, { "cell_type": "code", "execution_count": null, diff --git a/nbs/70b_callback.neptune.ipynb b/nbs/70b_callback.neptune.ipynb index 3cab9c7cbd..33c79fa4bd 100644 --- a/nbs/70b_callback.neptune.ipynb +++ b/nbs/70b_callback.neptune.ipynb @@ -17,7 +17,7 @@ "metadata": {}, "outputs": [], "source": [ - "#| nbflags skip_exec" + "#| nbflags skip_showdoc skip_exec" ] }, { @@ -226,6 +226,35 @@ " 'You can log more data to it, like this: `neptune.log_metric()`')" ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "data": { + "text/markdown": [ + "---\n", + "\n", + "### NeptuneCallback\n", + "\n", + "> NeptuneCallback (log_model_weights=True, keep_experiment_running=False)\n", + "\n", + "Log losses, metrics, model weights, model architecture summary to neptune" + ], + "text/plain": [ + "" + ] + }, + "execution_count": null, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "show_doc(NeptuneCallback)" + ] + }, { "cell_type": "code", "execution_count": null, diff --git a/nbs/70c_callback.captum.ipynb b/nbs/70c_callback.captum.ipynb index 3e612449f2..9127d64860 100644 --- a/nbs/70c_callback.captum.ipynb +++ b/nbs/70c_callback.captum.ipynb @@ -17,7 +17,7 @@ "metadata": {}, "outputs": [], "source": [ - "#| nbflags skip_exec\n", + "#| nbflags skip_showdoc skip_exec\n", "#|default_exp callback.captum" ] }, @@ -77,7 +77,7 @@ "metadata": {}, "outputs": [], "source": [ - "#|export\n", + "#|exporti\n", "# Dirty hack as json_clean doesn't support CategoryMap type\n", "_json_clean=jsonutil.json_clean\n", "def json_clean(o):\n", @@ -146,70 +146,7 @@ "cell_type": "code", "execution_count": null, "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "\n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - "
epochtrain_lossvalid_losserror_ratetime
00.3917420.1614830.05954000:10
" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/html": [ - "\n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - "
epochtrain_lossvalid_losserror_ratetime
00.1564850.0556110.02029800:10
" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], + "outputs": [], "source": [ "learn = vision_learner(dls, resnet34, metrics=error_rate)\n", "learn.fine_tune(1)" @@ -299,6 +236,37 @@ " baselines=baseline)" ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "data": { + "text/markdown": [ + "---\n", + "\n", + "### CaptumInterpretation\n", + "\n", + "> CaptumInterpretation (learn, cmap_name='customblue', colors=None, N=256,\n", + "> methods=('original_image','heat_map'),\n", + "> signs=('all','positive'), outlier_perc=1)\n", + "\n", + "Captum Interpretation for Resnet" + ], + "text/plain": [ + "" + ] + }, + "execution_count": null, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "show_doc(CaptumInterpretation)" + ] + }, { "cell_type": "markdown", "metadata": {}, diff --git a/nbs/70d_callback.comet.ipynb b/nbs/70d_callback.comet.ipynb index 7add9579c0..c6458fab33 100644 --- a/nbs/70d_callback.comet.ipynb +++ b/nbs/70d_callback.comet.ipynb @@ -17,7 +17,7 @@ "metadata": {}, "outputs": [], "source": [ - "#| nbflags skip_exec" + "#| nbflags skip_showdoc skip_exec" ] }, { @@ -226,6 +226,42 @@ " except:\n", " print(\"No neptune experiment to stop.\")" ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "data": { + "text/markdown": [ + "---\n", + "\n", + "### CometCallback\n", + "\n", + "> CometCallback (project_name, log_model_weights=True)\n", + "\n", + "Log losses, metrics, model weights, model architecture summary to neptune" + ], + "text/plain": [ + "" + ] + }, + "execution_count": null, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "show_doc(CometCallback)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] } ], "metadata": { diff --git a/nbs/index.ipynb b/nbs/index.ipynb index 6f0742d74b..bfcb83e53e 100644 --- a/nbs/index.ipynb +++ b/nbs/index.ipynb @@ -78,7 +78,7 @@ "\n", "To see what's possible with fastai, take a look at the [Quick Start](https://docs.fast.ai/quick_start.html), which shows how to use around 5 lines of code to build an image classifier, an image segmentation model, a text sentiment model, a recommendation system, and a tabular model. For each of the applications, the code is much the same.\n", "\n", - "Read through the [Tutorials](https://docs.fast.ai/tutorial) to learn how to train your own models on your own datasets. Use the navigation sidebar to look through the fastai documentation. Every class, function, and method is documented here.\n", + "Read through the [Tutorials](https://docs.fast.ai/tutorial.html) to learn how to train your own models on your own datasets. Use the navigation sidebar to look through the fastai documentation. Every class, function, and method is documented here.\n", "\n", "To learn about the design and motivation of the library, read the [peer reviewed paper](https://www.mdpi.com/2078-2489/11/2/108/htm)." ] @@ -216,7 +216,7 @@ "split_at_heading": true }, "kernelspec": { - "display_name": "Python 3", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }