diff --git a/src/About/About.js b/src/About/About.js index fe25683..e555c8b 100644 --- a/src/About/About.js +++ b/src/About/About.js @@ -4,6 +4,7 @@ import AccordionSummary from '@material-ui/core/AccordionSummary'; import AccordionDetails from '@material-ui/core/AccordionDetails'; import Container from '@material-ui/core/Container'; import Grid from '@material-ui/core/Grid'; +import Link from '@material-ui/core/Link'; import Paper from '@material-ui/core/Paper'; import Typography from '@material-ui/core/Typography'; import { makeStyles } from '@material-ui/core/styles'; @@ -85,11 +86,7 @@ export default function About() { (1) Data Annotation and Management, (2) Model Development, and (3) Deployment and Inference - + @@ -103,11 +100,10 @@ export default function About() { id="panel-data-1-header" > DeepCell Label - {/* I am an accordion */} - DeepCell Label is our training data curation tool. + DeepCell Label is our training data curation tool. It provides an inutitive UI for users to create annotations from scratch or to correct model predictions, to faciliate the creation of large, high-quality datasets. DeepCell Label can be deployed locally or on the cloud. @@ -128,14 +124,13 @@ export default function About() { id="panel-model-dev-1-header" > deepcell-tf - {/* I am an accordion */} - deepcell-tf is our core deep learning library. + deepcell-tf is our core deep learning library. Based on TensorFlow, it contains a suite of tools for building and training deep learning models. The library has been constructed in a modular fashion to make it easy to mix and match different model architectures, prediction tasks, and post-processing functions. - For more information, check out the documentation. + For more information, check out the documentation. @@ -154,21 +149,16 @@ export default function About() { id="panel-deployment-3-header" > kiosk-console - {/* I am an accordion */} - The kiosk-console is a turn-key cloud-based solution for deploying a scalable inference platform. - The platform includes a simple drag-and-drop interface for segmenting a few images, and a robust API capable of affordably processing millions of images. + The kiosk-console is a turn-key cloud-based solution for deploying a scalable inference platform. + The platform includes a simple drag-and-drop interface for segmenting a few images, and a robust API capable of affordably processing millions of images.

- The platform comes out of the box with three distinct model types: -
    -
  • Segmentation: A nuclear prediction model for cell culture. The input to this model is a single nuclear image. The output of this model is a mask with the nuclear segmentation of each cell in the image.
  • -
  • Tracking: A live-cell tracking model. The input to this model is a time-lapse movie of a single nuclear channel. The output of this model is a segmentation mask for each frame in the time-lapse movie, with the cell ids linked across images such that the same cell always has the same label.
  • -
  • Multiplex: A multiplex imaging model. The input to this model is a 2-channel image consisting of a nuclear channel and a membrane or cytoplasm channel. The output of this model is a mask with the whole-cell segmentation of each cell in the image.
  • -
+ We use this platform to host DeepCell.org and currently deployed models. However, it is built with extensibility in mind, and it is easy to deploy your own models. - To learn more about deploying your own instance of deepcell.org using the kiosk-console, read the docs. + + To learn more about deploying your own instance of DeepCell.org using the kiosk-console, read the docs.
@@ -184,7 +174,7 @@ export default function About() { - The kiosk-imagej-plugin enables ImageJ to segment images with a deployed DeepCell Kiosk model without leaving the application. + The kiosk-imagej-plugin enables ImageJ to segment images with a deployed DeepCell Kiosk model without leaving the application. @@ -200,7 +190,7 @@ export default function About() { - deepcell-applications contains a variety of trained deep learning models and post-processing functions for instance segmentation. + deepcell-applications contains a variety of trained deep learning models and post-processing functions for instance segmentation. Each model can be imported and run locally from a Docker image, Jupyter notebook, or custom script. @@ -217,9 +207,9 @@ export default function About() { - The ark repository is our integrated multiplex image analysis pipeline. + The ark repository is our integrated multiplex image analysis pipeline. The input is multiplexed image data from any platform. - It runs the data through deepcell, extracts the counts of each marker in each cell, normalizes the data, and then creates a summary table with morphological information and marker intensity for every cell in each image. + It segments the data with Mesmer using the Kiosk, extracts the counts of each marker in each cell, normalizes the data, and then creates a summary table with morphological information and marker intensity for every cell in each image. It also provides an easy way to run some standard spatial analysis functions on your data. diff --git a/src/App/App.js b/src/App/App.js index 9392a05..c3f2c57 100644 --- a/src/App/App.js +++ b/src/App/App.js @@ -14,7 +14,7 @@ const Footer = lazy(() => import('../Footer/Footer')); const NavBar = lazy(() => import('../NavBar/NavBar')); const Landing = lazy(() => import('../Landing/Landing')); const Predict = lazy(() => import('../Predict/Predict')); -const Data = lazy(() => import('../Data/Data')); +// const Data = lazy(() => import('../Data/Data')); const NotFound = lazy(() => import('../NotFound/NotFound')); // If the mode is NOT production, then notify that we are in dev mode. @@ -77,7 +77,6 @@ export default function App() { - diff --git a/src/Faq/Faq.js b/src/Faq/Faq.js index 601c2c3..919aa34 100644 --- a/src/Faq/Faq.js +++ b/src/Faq/Faq.js @@ -34,120 +34,77 @@ export default function Faq() { - - - What is DeepCell? - - - DeepCell is a software ecosystem that enables deep learning based biological image analysis in the cloud. - - It consists of several software packages, including the deep learning library deepcell-tf, and a Kubernetes cloud deployment platform, the DeepCell Kiosk. - - - How do I get started using DeepCell? - You can use the Predict tab to upload image files (.tiff, .png, .jpg, etc.) and segment them with our pre-trained models. There is example data for uploading on the Data tab. - - Additionally, you can deploy your own DeepCell Kiosk by following the instructions on the Kiosk Documentation. - - - The deployment comes with pre-trained models for a few common image processing tasks, including nuclear segmentation for 2D images and nuclear tracking for 3D tiff stacks. - - These models are hosted with TensorFlow Serving and are running on auto-scaling GPUs to minimize cost. - - The servable model files can be found in our public bucket. - - - - - - What is the DeepCell Kiosk? - - - The DeepCell Kiosk is a turn-key cloud solution for large-scale image processing with deep learning models. - - It is a public software and fully extensible for custom image processing tasks. - - Documentation for the DeepCell Kiosk can be found here. - - - This website is an implementation of the production branch of the DeepCell Kiosk, and is running on the Google Kubernetes Engine. - - - - - - What is deepcell-tf? - - - deepcell-tf is a TensorFlow/Keras based Python library for training deep learning models for biological image analysis. - All models hosted on DeepCell.org have been trained using this library. - - Documentation for the library can be found here. - - - + If you’d like to use our pretrained models to segment your own data, you can use the predict page. + The predict page allows you to easily upload your images with a drag and drop interface, select the most appropriate model, and get predictions back all without needing to install any software. +

+ If you’d like to train your own models, check out deepcell-tf. + If you’d like to annotate your data, you can use the DeepCell Label tool, available via our website or from the GitHub repository.
- Can I use my own models? - - - Custom models must be exported for TensorFlow Serving and saved in a cloud bucket defined when deploying a new DeepCell Kiosk. - - TensorFlow Serving will read the bucket when it is starting up, and will load all exported models it finds. - - - See here for a custom Python function for exporting models to TensorFlow Serving. - - The model can be exported directly to the cloud bucket with the appropriate protocol prefix (e.g. s3://bucket/model or gs://bucket/model) - + What does this error message mean? +
+
    +
  • + + Invalid image shape + + + The image provided is not compatible with the model. + + Check that the channels of the input image match the expected model output, and that the dimensions of the image match the model (i.e. 2D images or 3D movies). + +
  • + +
  • + + Input only has X channels but channel Y was declared as an input channel. + + + An RGB channel was specified but is out of range for the input image. + +
  • + +
  • + + Input image is larger than the maximum supported image size of (M, N). + + + Your input image is too big! Try cropping the image and uploading the crops separately. + +
  • +
- Can I add a custom job type? + Can I add my own models? - This is a bit more involved and requires forked changes to the consumers. Check out our tutorial on building a custom job pipeline. - - The frontend places jobs in a Redis queue, and the consumers will perform all the work. New jobs will require a new job queue, which are listed in the dropdown list on the Predict page. - - Custom jobs will also require new consumer with any required pre- and post-processing steps to be defined and deployed in the DeepCell Kiosk with a custom helmfile for the new consumer. - - - - - - Where do I get data? - - - We have uploaded some sample data both on the Data page. - The prediction data is meant to be used with the pre-trained models while - the training data is available for download for training new models. + Yes! deepcell.org is an instance of the kiosk-console which is fully extensible and serves models from a cloud bucket using TensorFlow Serving. - The training data is also available in deepcell.datasets which can be used directly within a Python environment. + For more information on creating and customizing your own instance of the kiosk-console, please check out its docs. - What types of data are supported? + Can you help me annotate my data? - Standard image files are supported (ie. .png, .jpg) as well as .tiff files. - Usually we expect around 1000 by 1000 pixel images with no more than 300 objects. - Images should be 2D with the exception of data for tracking which should be a 3D stack. + Yes! Our training data was created using DeepCell Label, a tool for creating segmentation masks for images. + DeepCell Label is an open-source web application that can integrate with crowd-sourcing platforms. - The training data is also available in deepcell.datasets which can be used directly within a Python environment. + If you have any questions or interest in collaborating on the data annotation process, please make a new Issue on the repository issue page. @@ -156,19 +113,9 @@ export default function Faq() { Where can I get help? - For an overview of the DeepCell ecocystem, please see our introductory docs. + For an overview of the DeepCell ecocystem, please see the About page and our introductory docs.
- Support for DeepCell Kiosk is available through our documentation and issues on Github -
- - - - - Can you help me annotate my data? - - - Yes! Our training data was created using DeepCell Label, a tool for creating segmentation masks for images. - DeepCell Label is an open-source web application that can integrate with crowd-sourcing platforms like Figure Eight. + If you would like to report a bug or ask a question, please open a new issue on the issues page. diff --git a/src/Footer/Footer.js b/src/Footer/Footer.js index af4e6e5..a3db05d 100644 --- a/src/Footer/Footer.js +++ b/src/Footer/Footer.js @@ -1,5 +1,6 @@ import React from 'react'; import { makeStyles } from '@material-ui/core/styles'; +import Link from '@material-ui/core/Link'; import Typography from '@material-ui/core/Typography'; const useStyles = makeStyles(theme => ({ @@ -17,10 +18,13 @@ export default function Footer() { const classes = useStyles(); return (
- + © 2016-{currYear} The Van Valen Lab at the California Institute of Technology (Caltech). All rights reserved. + + For any questions or collaborations request, please reach out to info@deepcell.org. +
); } diff --git a/src/Landing/Landing.js b/src/Landing/Landing.js index 04d3bcc..28ed58f 100644 --- a/src/Landing/Landing.js +++ b/src/Landing/Landing.js @@ -41,7 +41,9 @@ export default function Landing() { variant="contained" color="primary" className={classes.button} - href="/data"> + href="https://datasets.deepcell.org" + target="_blank" + rel="noopener noreferrer"> Data
diff --git a/src/NavBar/NavBar.js b/src/NavBar/NavBar.js index 15bea7a..b184dd9 100644 --- a/src/NavBar/NavBar.js +++ b/src/NavBar/NavBar.js @@ -1,4 +1,5 @@ import React, { useState } from 'react'; +import { PropTypes } from 'prop-types'; import { makeStyles } from '@material-ui/core/styles'; import AppBar from '@material-ui/core/AppBar'; import Button from '@material-ui/core/Button'; @@ -32,40 +33,48 @@ const useStyles = makeStyles(theme => ({ } })); +const MobileMenu = (props) => { + const classes = useStyles(); + const { anchorEl, onClose } = props; + + return ( + + + + + + + + ); +}; + +MobileMenu.propTypes = { + anchorEl: PropTypes.any, + onClose: PropTypes.func, +}; + export default function NavBar() { // const [anchorEl, setAnchorEl] = useState(null); const [mobileMoreAnchorEl, setMobileMoreAnchorEl] = useState(null); const classes = useStyles(); - const MobileMenu = () => { - return ( - - - - - - - - ); - }; - return (
@@ -80,27 +89,30 @@ export default function NavBar() { - - +
- setMobileMoreAnchorEl(e.currentTarget)}> + setMobileMoreAnchorEl(e.currentTarget) }>
- + setMobileMoreAnchorEl(null)} + /> ); } diff --git a/src/Predict/Predict.js b/src/Predict/Predict.js index 8128d80..e52f783 100644 --- a/src/Predict/Predict.js +++ b/src/Predict/Predict.js @@ -4,6 +4,7 @@ import Button from '@material-ui/core/Button'; import Container from '@material-ui/core/Container'; import Grid from '@material-ui/core/Grid'; import LinearProgress from '@material-ui/core/LinearProgress'; +import Link from '@material-ui/core/Link'; import Paper from '@material-ui/core/Paper'; import Typography from '@material-ui/core/Typography'; import axios from 'axios'; @@ -109,7 +110,13 @@ export default function Predict() { setStatus(response.data.value[0].split('-').join(' ')); if (response.data.value[0] === 'failed') { clearInterval(statusCheck); - setErrorText(`Job Failed: ${response.data.value[3]}`); + // only show the full stack trace if NODE_NV is not production + let error = response.data.value[3]; + if (process.env.NODE_ENV === 'production') { + const lines = error.split('\n'); + error = lines[lines.length - 1]; + } + setErrorText(`Job Failed: ${error}`); expireRedisHash(redisHash, 3600); } else if (response.data.value[0] === 'done') { clearInterval(statusCheck); @@ -251,14 +258,23 @@ export default function Predict() { {/* Display error to user */} { errorText.length > 0 && - - {errorText} - } +
+ + {errorText} + + + See the FAQ for information on common errors. + +
} {/* Submit button */} { !submitted && diff --git a/src/Predict/jobData.js b/src/Predict/jobData.js index ebd3343..de576d0 100644 --- a/src/Predict/jobData.js +++ b/src/Predict/jobData.js @@ -13,7 +13,7 @@ const jobCards = { file: 'tiff_stack_examples/vectra_breast_cancer.tif', name: 'Mesmer', model: 'Mesmer performs whole-cell segmentation of multiplex tissue data.', - inputs: 'A two-channel TIFF where the first channel is a nuclear marker and the second channel is a membrane marker.', + inputs: 'An image containing both a nuclear marker and a membrane/cytoplasm marker taken at ~20X (0.5 μm per pixel).', thumbnail: 'thumbnails/breast_vectra.png', scaleEnabled: false, requiredChannels: ['nuclei', 'cytoplasm'], @@ -32,7 +32,7 @@ const jobCards = { file: 'tiff_stack_examples/vectra_breast_cancer.tif', name: 'Mesmer', model: 'Mesmer performs whole-cell segmentation of multiplex tissue data.', - inputs: 'A two-channel TIFF where the first channel is a nuclear marker and the second channel is a membrane marker.', + inputs: 'An image containing both a nuclear marker and a membrane/cytoplasm marker taken at ~20X (0.5 μm per pixel).', thumbnail: 'thumbnails/breast_vectra.png', scaleEnabled: false, requiredChannels: ['nuclei', 'cytoplasm'],