diff --git a/.github/workflows/cdci.yml b/.github/workflows/cdci.yml
index 538e6f5..13652a9 100644
--- a/.github/workflows/cdci.yml
+++ b/.github/workflows/cdci.yml
@@ -118,6 +118,14 @@ jobs:
echo "Error: One or more protected files have been modified."
exit 1
fi
+ - name: check streamlit report files for chatbot API
+ run: |
+ vuegen -c docs/example_config_files/Chatbot_example_config.yaml -output_dir tests/report_examples/chat_bot
+ if git diff tests/report_examples | grep .; then
+ echo Failed for report: $format
+ echo "Error: One or more protected files have been modified."
+ exit 1
+ fi
- name: check for changes in report files
run: |
# write streamlit report to test folder
diff --git a/.gitignore b/.gitignore
index dafea3c..dcb46fd 100644
--- a/.gitignore
+++ b/.gitignore
@@ -116,7 +116,7 @@ cython_debug/
# Temporary files
logs/
vuegen/logs/
-./streamlit_report/
+streamlit_report/
!tests/report_examples
quarto_report/
output_docker/
diff --git a/.vscode/settings.json b/.vscode/settings.json
new file mode 100644
index 0000000..5e73618
--- /dev/null
+++ b/.vscode/settings.json
@@ -0,0 +1,3 @@
+{
+ "editor.rulers": [88, 100, 120]
+}
diff --git a/docs/README.md b/docs/README.md
index 8e11f08..2c090f5 100644
--- a/docs/README.md
+++ b/docs/README.md
@@ -1,10 +1,10 @@
# Docs creation
-In order to build the docs you need to
+In order to build the docs you need to
- 1. Install sphinx and additional support packages
- 2. Build the package reference files
- 3. Run sphinx to create a local html version
+1. Install sphinx and additional support packages
+2. Build the package reference files
+3. Run sphinx to create a local html version
The documentation is build using readthedocs automatically.
@@ -18,12 +18,13 @@ poetry install --with docs
## Build docs using Sphinx command line tools
-Command to be run from `path/to/docs`, i.e. from within the `docs` package folder:
+Command to be run from `path/to/docs`, i.e. from within the `docs` package folder:
Options:
- - `--separate` to build separate pages for each (sub-)module
-```bash
+- `--separate` to build separate pages for each (sub-)module
+
+```bash
# pwd: docs
# apidoc
sphinx-apidoc --force --implicit-namespaces --module-first -o reference ../src/vuegen
@@ -38,4 +39,3 @@ The README is included in the `Overview` section of the docs. We created a [Pyth
Relative links are used in the main README, which need to be resolved when building. It's
possible to include the a `relative-docs` option if one uses `index.md` ([see docs](https://myst-parser.readthedocs.io/en/latest/faq/index.html#include-a-file-from-outside-the-docs-folder-like-readme-md)). This does not work
with `href` links, only native markdown links.
-
diff --git a/docs/example_config_files/Chatbot_example_config.yaml b/docs/example_config_files/Chatbot_example_config.yaml
index 86f9ad0..d9c4b41 100644
--- a/docs/example_config_files/Chatbot_example_config.yaml
+++ b/docs/example_config_files/Chatbot_example_config.yaml
@@ -1,7 +1,7 @@
report:
title: Chatbot example
description: >
- A chatbot exaple.
+ A chatbot example.
sections:
- title: ChatBot test
subsections:
diff --git a/docs/example_config_files/Earth_microbiome_vuegen_demo_notebook_config.yaml b/docs/example_config_files/Earth_microbiome_vuegen_demo_notebook_config.yaml
index 626d37e..4bc273f 100644
--- a/docs/example_config_files/Earth_microbiome_vuegen_demo_notebook_config.yaml
+++ b/docs/example_config_files/Earth_microbiome_vuegen_demo_notebook_config.yaml
@@ -1,16 +1,16 @@
report:
title: Earth Microbiome Vuegen Demo Notebook
- description: "The Earth Microbiome Project (EMP) is a systematic attempt to characterize\
- \ global microbial taxonomic and functional diversity for the benefit of the planet\
- \ and humankind. \n It aimed to sample the Earth\u2019s microbial communities\
- \ at an unprecedented scale in order to advance our understanding of the organizing\
- \ biogeographic principles that govern microbial community structure. \n The\
- \ EMP dataset is generated from samples that individual researchers have compiled\
- \ and contributed to the EMP. \n The result is both a reference database giving\
- \ global context to DNA sequence data and a framework for incorporating data from\
- \ future studies, fostering increasingly complete characterization of Earth\u2019\
- s microbial diversity.\n \n You can find more information about the Earth Microbiome\
- \ Project at https://earthmicrobiome.org/ and in the [original article](https://www.nature.com/articles/nature24621).\n"
+ description: >
+ The Earth Microbiome Project (EMP) is a systematic attempt to characterize global
+ microbial taxonomic and functional diversity for the benefit of the planet and humankind.
+ It aimed to sample the Earth’s microbial communities at an unprecedented scale in order to
+ advance our understanding of the organizing biogeographic principles that govern microbial
+ community structure. The EMP dataset is generated from samples that individual researchers
+ have compiled and contributed to the EMP. The result is both a reference database giving
+ global context to DNA sequence data and a framework for incorporating data from future
+ studies, fostering increasingly complete characterization of Earth’s microbial diversity.
+ You can find more information about the Earth Microbiome Project at https://earthmicrobiome.org/
+ and in the original article at https://www.nature.com/articles/nature24621.
graphical_abstract: https://raw.githubusercontent.com/ElDeveloper/cogs220/master/emp-logo.svg
logo: https://raw.githubusercontent.com/ElDeveloper/cogs220/master/emp-logo.svg
sections:
diff --git a/docs/vuegen_basic_case_study_configfile.md b/docs/vuegen_basic_case_study_configfile.md
index 2336459..6cb1d71 100644
--- a/docs/vuegen_basic_case_study_configfile.md
+++ b/docs/vuegen_basic_case_study_configfile.md
@@ -1,13 +1,13 @@
# Predefined Directory Case Study - Configuration File
-The [configuration file](https://github.com/Multiomics-Analytics-Group/vuegen/blob/main/docs/example_config_files/Basic_example_vuegen_demo_notebook_config.yaml) of the basic case study using a predefined directory is presented below:
+The [configuration file](https://github.com/Multiomics-Analytics-Group/vuegen/blob/main/docs/example_config_files/Basic_example_vuegen_demo_notebook_config.yaml) of the basic case study using a predefined directory is presented below:
```yaml
report:
title: Basic Example Vuegen Demo Notebook
description: A general description of the report.
- graphical_abstract: https://raw.githubusercontent.com/Multiomics-Analytics-Group/vuegen/main/docs/images/vuegen_logo.svg
- logo: https://raw.githubusercontent.com/Multiomics-Analytics-Group/vuegen/main/docs/images/vuegen_logo.svg
+ graphical_abstract: https://raw.githubusercontent.com/Multiomics-Analytics-Group/vuegen/main/docs/images/vuegen_logo.png
+ logo: https://raw.githubusercontent.com/Multiomics-Analytics-Group/vuegen/main/docs/images/vuegen_logo.png
sections:
- title: Plots
description: This section contains example plots.
diff --git a/docs/vuegen_earth_microbiome_case_study_configfile.md b/docs/vuegen_earth_microbiome_case_study_configfile.md
index 8f01857..318c50d 100644
--- a/docs/vuegen_earth_microbiome_case_study_configfile.md
+++ b/docs/vuegen_earth_microbiome_case_study_configfile.md
@@ -5,17 +5,17 @@ The [configuration file](https://github.com/Multiomics-Analytics-Group/vuegen/bl
```yaml
report:
title: Earth Microbiome Vuegen Demo Notebook
- description: "The Earth Microbiome Project (EMP) is a systematic attempt to characterize\
- \ global microbial taxonomic and functional diversity for the benefit of the planet\
- \ and humankind. \n It aimed to sample the Earth\u2019s microbial communities\
- \ at an unprecedented scale in order to advance our understanding of the organizing\
- \ biogeographic principles that govern microbial community structure. \n The\
- \ EMP dataset is generated from samples that individual researchers have compiled\
- \ and contributed to the EMP. \n The result is both a reference database giving\
- \ global context to DNA sequence data and a framework for incorporating data from\
- \ future studies, fostering increasingly complete characterization of Earth\u2019\
- s microbial diversity.\n \n You can find more information about the Earth Microbiome\
- \ Project at https://earthmicrobiome.org/ and in the [original article](https://www.nature.com/articles/nature24621).\n"
+ description: >
+ The Earth Microbiome Project (EMP) is a systematic attempt to characterize global
+ microbial taxonomic and functional diversity for the benefit of the planet and humankind.
+ It aimed to sample the Earth’s microbial communities at an unprecedented scale in order to
+ advance our understanding of the organizing biogeographic principles that govern microbial
+ community structure. The EMP dataset is generated from samples that individual researchers
+ have compiled and contributed to the EMP. The result is both a reference database giving
+ global context to DNA sequence data and a framework for incorporating data from future
+ studies, fostering increasingly complete characterization of Earth’s microbial diversity.
+ You can find more information about the Earth Microbiome Project at https://earthmicrobiome.org/
+ and in the original article at https://www.nature.com/articles/nature24621.
graphical_abstract: https://raw.githubusercontent.com/ElDeveloper/cogs220/master/emp-logo.svg
logo: https://raw.githubusercontent.com/ElDeveloper/cogs220/master/emp-logo.svg
sections:
@@ -128,8 +128,7 @@ sections:
component_type: PLOT
plot_type: STATIC
- title: Shanon entropy analysis
- description: This subsection contains the Shannon entropy analysis of the EMP
- dataset.
+ description: This subsection contains the Shannon entropy analysis of the EMP dataset.
components:
- title: Specificity of sequences and higher taxonomic groups for environment
file_path: https://raw.githubusercontent.com/biocore/emp/master/methods/images/figure4_entropy.png
diff --git a/pyproject.toml b/pyproject.toml
index 7a886d1..cff7df2 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -74,3 +74,20 @@ vuegen = "vuegen.__main__:main"
[tool.isort]
profile = "black"
+
+[tool.jupytext]
+formats = "ipynb,py:percent"
+
+[tool.ruff]
+# Allow lines to be as long as:
+line-length = 88
+
+[tool.ruff.lint]
+# https://docs.astral.sh/ruff/tutorial/#rule-selection
+# 1. Enable flake8-bugbear (`B`) rules
+# 2. Enable pycodestyle (`E`) errors and (`W`) warnings
+# 3. Pyflakes (`F`) errors
+extend-select = ["E", "W", 'F', 'B']
+
+[tool.black]
+line-length = 88
diff --git a/setup.cfg b/setup.cfg
new file mode 100644
index 0000000..6e63392
--- /dev/null
+++ b/setup.cfg
@@ -0,0 +1,4 @@
+[flake8]
+exclude = docs
+max-line-length = 88
+aggressive = 2
diff --git a/src/vuegen/__init__.py b/src/vuegen/__init__.py
index 5becc17..3e1f602 100644
--- a/src/vuegen/__init__.py
+++ b/src/vuegen/__init__.py
@@ -1 +1,6 @@
+"""VueGen automates the creation of reports from bioinformatics outputs,
+supporting formats like PDF, HTML, DOCX, ODT, PPTX, Reveal.js, Jupyter notebooks,
+and Streamlit web applications. Users simply provide a directory with output files
+and VueGen compiles them into a structured report."""
+
__version__ = "1.0.0"
diff --git a/src/vuegen/__main__.py b/src/vuegen/__main__.py
index be29fde..826375f 100644
--- a/src/vuegen/__main__.py
+++ b/src/vuegen/__main__.py
@@ -1,3 +1,5 @@
+"""Command-line interface for VueGen report generation."""
+
import sys
from pathlib import Path
diff --git a/src/vuegen/config_manager.py b/src/vuegen/config_manager.py
index d3a613b..d356e4f 100644
--- a/src/vuegen/config_manager.py
+++ b/src/vuegen/config_manager.py
@@ -1,3 +1,7 @@
+"""ConfigManage creates configuration files from folders and can create components
+for reports from YAML config files.
+"""
+
import json
import logging
import os
@@ -10,7 +14,8 @@
class ConfigManager:
"""
- Class for handling metadata of reports from YAML config file and creating report objects.
+ Class for handling metadata of reports from YAML config file and creating report
+ objects.
"""
def __init__(self, logger: Optional[logging.Logger] = None, max_depth: int = 2):
@@ -20,10 +25,11 @@ def __init__(self, logger: Optional[logging.Logger] = None, max_depth: int = 2):
Parameters
----------
logger : logging.Logger, optional
- A logger instance for the class. If not provided, a default logger will be created.
+ A logger instance for the class.
+ If not provided, a default logger will be created.
max_depth : int, optional
- The maximum depth of the directory structure to consider when generating the report
- config from a directory.
+ The maximum depth of the directory structure to consider when generating
+ the report config from a directory.
The default is 2, which means it will include sections and subsections.
"""
if logger is None:
@@ -53,7 +59,8 @@ def _create_title_fromdir(self, file_dirname: str) -> str:
def _create_component_config_fromfile(self, file_path: Path) -> Dict[str, str]:
"""
- Infers a component config from a file, including component type, plot type, and additional fields.
+ Infers a component config from a file, including component type, plot type,
+ and additional fields.
Parameters
----------
@@ -144,13 +151,18 @@ def _create_component_config_fromfile(self, file_path: Path) -> Dict[str, str]:
else:
component_config["plot_type"] = r.PlotType.PLOTLY.value
except Exception as e:
- self.logger.warning(f"Could not parse JSON file {file_path}: {e}")
+ self.logger.warning(
+ "Could not parse JSON file %s: %s", file_path, e, exc_info=True
+ )
component_config["plot_type"] = "unknown"
elif file_ext == ".md":
component_config["component_type"] = r.ComponentType.MARKDOWN.value
else:
+ if not file_ext:
+ # hidden files starting with a dot
+ file_ext = file_path.name
self.logger.error(
- f"Unsupported file extension: {file_ext}. Skipping file: {file_path}"
+ "Unsupported file extension: %s. Skipping file: %s", file_ext, file_path
)
return None
@@ -158,7 +170,8 @@ def _create_component_config_fromfile(self, file_path: Path) -> Dict[str, str]:
def _sort_paths_by_numprefix(self, paths: List[Path]) -> List[Path]:
"""
- Sorts a list of Paths by numeric prefixes in their names, placing non-numeric items at the end.
+ Sorts a list of Paths by numeric prefixes in their names, placing non-numeric
+ items at the end.
Parameters
----------
@@ -239,7 +252,8 @@ def _create_subsect_config_fromdir(
continue
# components are added to subsection
# ! Alternatively, one could add (sub-)sections to the subsection
- # ? Then one could remove differentiation between sections and subsections
+ # ? Then one could remove differentiation between sections and
+ # ? subsections
nested_components = self._create_subsect_config_fromdir(file, level + 1)
components.extend(nested_components["components"])
@@ -298,7 +312,8 @@ def create_yamlconfig_fromdir(
self, base_dir: str
) -> Tuple[Dict[str, Union[str, List[Dict]]], Path]:
"""
- Generates a YAML-compatible config file from a directory. It also returns the resolved folder path.
+ Generates a YAML-compatible config file from a directory. It also returns the
+ resolved folder path.
Parameters
----------
@@ -361,7 +376,8 @@ def create_yamlconfig_fromdir(
def initialize_report(self, config: dict) -> tuple[r.Report, dict]:
"""
- Extracts report metadata from a YAML config file and returns a Report object and the raw metadata.
+ Extracts report metadata from a YAML config file and returns a Report object and
+ the raw metadata.
Parameters
----------
@@ -371,7 +387,8 @@ def initialize_report(self, config: dict) -> tuple[r.Report, dict]:
Returns
-------
report, config : tuple[Report, dict]
- A tuple containing the Report object created from the YAML config file and the raw metadata dictionary.
+ A tuple containing the Report object created from the YAML config file and
+ the raw metadata dictionary.
Raises
------
@@ -396,7 +413,9 @@ def initialize_report(self, config: dict) -> tuple[r.Report, dict]:
report.sections.append(section)
self.logger.info(
- f"Report '{report.title}' initialized with {len(report.sections)} sections."
+ "Report '%s' initialized with %d sections.",
+ report.title,
+ len(report.sections),
)
return report, config
@@ -472,7 +491,8 @@ def _create_component(self, component_data: dict) -> r.Component:
Returns
-------
Component
- A Component object (Plot, DataFrame, or Markdown) populated with the provided metadata.
+ A Component object (Plot, DataFrame, or Markdown) populated with the
+ provided metadata.
"""
# Determine the component type
component_type = assert_enum_value(
@@ -620,8 +640,10 @@ def _create_apicall_component(self, component_data: dict) -> r.APICall:
try:
parsed_body = json.loads(request_body)
except json.JSONDecodeError as e:
- self.logger.error(f"Failed to parse request_body JSON: {e}")
- raise ValueError(f"Invalid JSON in request_body: {e}")
+ self.logger.error(
+ "Failed to parse request_body JSON: %s", e, exc_info=True
+ )
+ raise ValueError("Invalid JSON in request_body.") from e
return r.APICall(
title=component_data["title"],
diff --git a/src/vuegen/constants.py b/src/vuegen/constants.py
new file mode 100644
index 0000000..72e27eb
--- /dev/null
+++ b/src/vuegen/constants.py
@@ -0,0 +1,11 @@
+"""Constants for the Vuegen project."""
+
+GITHUB_ORG_URL = "https://github.com/Multiomics-Analytics-Group"
+ORG = "Multiomics Network Analytics Group (MoNA)"
+GITHUB_ORG_URL_BRACKETS = "{https://github.com/Multiomics-Analytics-Group}"
+REPO_URL = "https://github.com/Multiomics-Analytics-Group/vuegen"
+LOGO_URL = (
+ "https://raw.githubusercontent.com/Multiomics-Analytics-Group/"
+ "vuegen/main/docs/images/vuegen_logo.svg"
+)
+TIMEOUT: int = 60
diff --git a/src/vuegen/quarto_reportview.py b/src/vuegen/quarto_reportview.py
index 738993d..32b5599 100644
--- a/src/vuegen/quarto_reportview.py
+++ b/src/vuegen/quarto_reportview.py
@@ -1,3 +1,5 @@
+"""QuartoReportView class for generating Quarto reports."""
+
import os
import subprocess
import sys
@@ -9,6 +11,7 @@
from . import report as r
from . import table_utils
+from .constants import GITHUB_ORG_URL, GITHUB_ORG_URL_BRACKETS, LOGO_URL, ORG, REPO_URL
from .utils import create_folder, get_relative_file_path, is_url, sort_imports
@@ -52,13 +55,13 @@ def __init__(
if getattr(sys, "frozen", False) and hasattr(sys, "_MEIPASS"):
self.report.logger.info("running in a PyInstaller bundle")
# self.BUNDLED_EXECUTION = True
- self.report.logger.debug(f"sys._MEIPASS: {sys._MEIPASS}")
+ self.report.logger.debug("sys._MEIPASS: %s", sys._MEIPASS)
else:
self.report.logger.info("running in a normal Python process")
self.report.logger.debug("env_vars (QuartoReport): %s", os.environ)
- self.report.logger.debug(f"PATH: {os.environ['PATH']}")
- self.report.logger.debug(f"sys.path: {sys.path}")
+ self.report.logger.debug("PATH: %s", os.environ["PATH"])
+ self.report.logger.debug("sys.path: %s", sys.path)
self.is_report_static = self.report_type in {
r.ReportType.PDF,
@@ -76,7 +79,8 @@ def __init__(
def generate_report(self, output_dir: Optional[Path] = None) -> None:
"""
- Generates the qmd file of the quarto report. It creates code for rendering each section and its subsections with all components.
+ Generates the qmd file of the quarto report. It creates code for rendering
+ each section and its subsections with all components.
Parameters
----------
@@ -88,24 +92,27 @@ def generate_report(self, output_dir: Optional[Path] = None) -> None:
self.output_dir = Path(output_dir).resolve()
self.report.logger.debug(
- f"Generating '{self.report_type}' report in directory: '{self.output_dir}'"
+ "Generating '%s' report in directory: '%s'",
+ self.report_type,
+ self.output_dir,
)
# Create the output folder
if create_folder(self.output_dir, is_nested=True):
- self.report.logger.debug(f"Created output directory: '{self.output_dir}'")
+ self.report.logger.debug("Created output directory: '%s'", self.output_dir)
else:
self.report.logger.debug(
- f"Output directory already existed: '{self.output_dir}'"
+ "Output directory already existed: '%s'", self.output_dir
)
# Create the static folder
if create_folder(self.static_dir):
self.report.logger.info(
- f"Created output directory for static content: '{self.static_dir}'"
+ "Created output directory for static content: '%s'", self.static_dir
)
else:
self.report.logger.info(
- f"Output directory for static content already existed: '{self.static_dir}'"
+ "Output directory for static content already existed: '%s'",
+ self.static_dir,
)
try:
@@ -117,9 +124,8 @@ def generate_report(self, output_dir: Optional[Path] = None) -> None:
# Create qmd content and imports for the report
qmd_content = []
- report_imports = (
- []
- ) # only one global import list for a single report (different to streamlit)
+ # only one global import list for a single report (different to streamlit)
+ report_imports = []
# Add description of the report
if self.report.description:
@@ -135,7 +141,9 @@ def generate_report(self, output_dir: Optional[Path] = None) -> None:
self.report.logger.info("Starting to generate sections for the report.")
for section in self.report.sections:
self.report.logger.debug(
- f"Processing section: '{section.title}' - {len(section.subsections)} subsection(s)"
+ "Processing section: '%s' - %d subsection(s)",
+ section.title,
+ len(section.subsections),
)
# Add section header and description
qmd_content.append(f"# {section.title}")
@@ -143,8 +151,8 @@ def generate_report(self, output_dir: Optional[Path] = None) -> None:
qmd_content.append(f"""{section.description}\n""")
# Add components of section to the report
- # ! description can be a Markdown component, but it is treated differently
- # ! It won't be added to the section content.
+ # ! description can be a Markdown component, but it is treated
+ # ! differently. It won't be added to the section content.
if section.components:
self.report.logger.debug(
"Adding components of section folder to the report."
@@ -167,10 +175,13 @@ def generate_report(self, output_dir: Optional[Path] = None) -> None:
report_imports.extend(section_imports)
if section.subsections:
- # Iterate through subsections and integrate them into the section file
+ # Iterate through subsections and
+ # integrate them into the section file
for subsection in section.subsections:
self.report.logger.debug(
- f"Processing subsection: '{subsection.title}' - {len(subsection.components)} component(s)"
+ "Processing subsection: '%s' - %d component(s)",
+ subsection.title,
+ len(subsection.components),
)
# Generate content for the subsection
subsection_content, subsection_imports = (
@@ -185,7 +196,9 @@ def generate_report(self, output_dir: Optional[Path] = None) -> None:
) # even easier as it's global
else:
self.report.logger.warning(
- f"No subsections found in section: '{section.title}'. To show content in the report, add subsections to the section."
+ "No subsections found in section: '%s'. To show content "
+ "in the report, add subsections to the section.",
+ section.title,
)
# Add globally set output folder
report_imports.append("from pathlib import Path")
@@ -208,7 +221,7 @@ def generate_report(self, output_dir: Optional[Path] = None) -> None:
# Write the navigation and general content to a Python file
fname_qmd_report = self.output_dir / f"{self.BASE_DIR}.qmd"
- with open(fname_qmd_report, "w") as quarto_report:
+ with open(fname_qmd_report, "w", encoding="utf-8") as quarto_report:
quarto_report.write(yaml_header)
quarto_report.write(
f"""\n```{{python}}
@@ -218,12 +231,14 @@ def generate_report(self, output_dir: Optional[Path] = None) -> None:
)
quarto_report.write("\n".join(qmd_content))
self.report.logger.info(
- f"Created qmd script to render the app: {fname_qmd_report}"
+ "Created qmd script to render the app: %s", fname_qmd_report
)
except Exception as e:
self.report.logger.error(
- f"An error occurred while generating the report: {str(e)}"
+ "An error occurred while generating the report: %s",
+ e,
+ exc_info=True,
)
raise
@@ -243,7 +258,10 @@ def run_report(self, output_dir: Optional[Path] = None) -> None:
file_path_to_qmd = Path(self.output_dir) / f"{self.BASE_DIR}.qmd"
args = [self.quarto_path, "render", str(file_path_to_qmd)]
self.report.logger.info(
- f"Running '{self.report.title}' '{self.report_type}' report with {args!r}"
+ "Running '%s' '%s' report with %r",
+ self.report.title,
+ self.report_type,
+ args,
)
if (
self.report_type
@@ -281,25 +299,29 @@ def run_report(self, output_dir: Optional[Path] = None) -> None:
check=True,
)
self.report.logger.info(
- f"Converted '{self.report.title}' '{self.report_type}' report to Jupyter Notebook after execution"
+ "Converted '%s' '%s' report to Jupyter Notebook after execution",
+ self.report.title,
+ self.report_type,
)
self.report.logger.info(
- f"'{self.report.title}' '{self.report_type}' report rendered"
+ "'%s' '%s' report rendered",
+ self.report.title,
+ self.report_type,
)
except subprocess.CalledProcessError as e:
self.report.logger.error(
- f"Error running '{self.report.title}' {self.report_type} report: {str(e)}"
+ "Error running '%s' %s report: %s",
+ self.report.title,
+ self.report_type,
+ e,
+ exc_info=True,
)
raise
- # except FileNotFoundError as e:
- # self.report.logger.error(
- # f"Quarto is not installed. Please install Quarto to run the report: {str(e)}"
- # )
- # raise
def _create_yaml_header(self) -> str:
"""
- Creates a YAML header for the Quarto report based on the specified eport type and output format.
+ Creates a YAML header for the Quarto report based on the specified eport type
+ and output format.
Returns
-------
@@ -307,122 +329,157 @@ def _create_yaml_header(self) -> str:
A formatted YAML header string customized for the specified output format.
"""
# Base YAML header with title
- yaml_header = f"""---
-title: {self.report.title}
-fig-align: center
-execute:
- echo: false
- output: asis
-jupyter: python3
-format:"""
-
+ yaml_header = textwrap.dedent(
+ f"""\
+ ---
+ title: {self.report.title}
+ fig-align: center
+ execute:
+ echo: false
+ output: asis
+ jupyter: python3
+ format:"""
+ )
# Define format-specific YAML configurations
+ # \u007b is { and \u007d is }
format_configs = {
- r.ReportType.HTML: """
- html:
- toc: true
- toc-location: left
- toc-depth: 3
- page-layout: full
- self-contained: true
-include-in-header:
- text: |
-
-include-after-body:
- text: |
- """,
- r.ReportType.PDF: """
- pdf:
- toc: false
- fig-align: center
- margin:
- - bottom=40mm
- include-in-header:
- text: |
- \\usepackage{scrlayer-scrpage}
- \\usepackage{hyperref}
- \\clearpairofpagestyles
- \\lofoot{This report was generated with \\href{https://github.com/Multiomics-Analytics-Group/vuegen}{VueGen} | \\copyright{} 2025 \\href{https://github.com/Multiomics-Analytics-Group}{Multiomics Network Analytics Group}}
- \\rofoot{\\pagemark}""",
- r.ReportType.DOCX: """
- docx:
- toc: false""",
- r.ReportType.ODT: """
- odt:
- toc: false""",
- r.ReportType.REVEALJS: """
- revealjs:
- toc: false
- smaller: true
- controls: true
- navigation-mode: vertical
- controls-layout: bottom-right
- output-file: quarto_report_revealjs.html
-include-in-header:
- text: |
-
-include-after-body:
- text: |
- """,
- r.ReportType.PPTX: """
- pptx:
- toc: false
- output: true""",
- r.ReportType.JUPYTER: """
- html:
- toc: true
- toc-location: left
- toc-depth: 3
- page-layout: full
- self-contained: true
-include-in-header:
- text: |
-
-include-after-body:
- text: |
- """,
+ r.ReportType.HTML: textwrap.dedent(
+ f"""
+ html:
+ toc: true
+ toc-location: left
+ toc-depth: 3
+ page-layout: full
+ self-contained: true
+ include-in-header:
+ text: |
+
+ include-after-body:
+ text: |
+ """
+ ),
+ # \u007b is { and \u007d is }
+ r.ReportType.PDF: textwrap.indent(
+ textwrap.dedent(
+ f"""
+ pdf:
+ toc: false
+ fig-align: center
+ margin:
+ - bottom=40mm
+ include-in-header:
+ text: |
+ \\usepackage{{scrlayer-scrpage}}
+ \\usepackage{{hyperref}}
+ \\clearpairofpagestyles
+ \\lofoot\u007bThis report was generated with
+ \\href{{{REPO_URL}}}{{VueGen}} | \\copyright{{}} 2025
+ \\href{GITHUB_ORG_URL_BRACKETS}\u007b{ORG}\u007d\u007d
+ \\rofoot{{\\pagemark}}"""
+ ),
+ " ",
+ ),
+ r.ReportType.DOCX: textwrap.indent(
+ textwrap.dedent(
+ """
+ docx:
+ toc: false"""
+ ),
+ " ",
+ ),
+ r.ReportType.ODT: textwrap.indent(
+ textwrap.dedent(
+ """
+ odt:
+ toc: false"""
+ ),
+ " ",
+ ),
+ r.ReportType.REVEALJS: textwrap.dedent(
+ f"""
+ revealjs:
+ toc: false
+ smaller: true
+ controls: true
+ navigation-mode: vertical
+ controls-layout: bottom-right
+ output-file: quarto_report_revealjs.html
+ include-in-header:
+ text: |
+
+ include-after-body:
+ text: |
+ """
+ ),
+ r.ReportType.PPTX: textwrap.indent(
+ textwrap.dedent(
+ """
+ pptx:
+ toc: false
+ output: true"""
+ ),
+ " ",
+ ),
+ r.ReportType.JUPYTER: textwrap.dedent(
+ f"""
+ html:
+ toc: true
+ toc-location: left
+ toc-depth: 3
+ page-layout: full
+ self-contained: true
+ include-in-header:
+ text: |
+
+ include-after-body:
+ text: |
+ """
+ ),
}
# Create a key based on the report type and format
key = self.report_type
@@ -455,7 +512,7 @@ def _combine_components(self, components: list[dict]) -> tuple[list, list]:
fct = self.components_fct_map.get(component.component_type, None)
if fct is None:
self.report.logger.warning(
- f"Unsupported component type '{component.component_type}' "
+ "Unsupported component type '%s'", component.component_type
)
elif (
component.component_type == r.ComponentType.MARKDOWN
@@ -480,8 +537,9 @@ def _generate_subsection(
is_report_revealjs,
) -> tuple[List[str], List[str]]:
"""
- Generate code to render components (plots, dataframes, markdown) in the given subsection,
- creating imports and content for the subsection based on the component type.
+ Generate code to render components (plots, dataframes, markdown) in the given
+ subsection, creating imports and content for the subsection based on the
+ component type.
Parameters
----------
@@ -516,7 +574,7 @@ def _generate_subsection(
subsection_content.append(":::\n")
self.report.logger.info(
- f"Generated content and imports for subsection: '{subsection.title}'"
+ "Generated content and imports for subsection: '%s'", subsection.title
)
return subsection_content, subsection_imports
@@ -544,7 +602,7 @@ def _generate_plot_content(self, plot) -> List[str]:
static_plot_path = (
Path(self.static_dir) / f"{plot.title.replace(' ', '_')}.png"
).resolve()
- self.report.logger.debug(f"Static plot path: {static_plot_path}")
+ self.report.logger.debug("Static plot path: %s", static_plot_path)
else:
html_plot_file = (
Path(self.static_dir) / f"{plot.title.replace(' ', '_')}.html"
@@ -559,25 +617,24 @@ def _generate_plot_content(self, plot) -> List[str]:
elif plot.plot_type == r.PlotType.PLOTLY:
plot_content.append(self._generate_plot_code(plot))
if self.is_report_static:
- plot_content.append(
- f"""fig_plotly.write_image("{static_plot_path.relative_to(self.output_dir).as_posix()}")\n```\n"""
- )
+ fpath = static_plot_path.relative_to(self.output_dir).as_posix()
+ plot_content.append(f"""fig_plotly.write_image("{fpath}")\n```\n""")
plot_content.append(self._generate_image_content(static_plot_path))
else:
plot_content.append("""fig_plotly.show()\n```\n""")
elif plot.plot_type == r.PlotType.ALTAIR:
plot_content.append(self._generate_plot_code(plot))
if self.is_report_static:
- plot_content.append(
- f"""fig_altair.save("{static_plot_path.relative_to(self.output_dir).as_posix()}")\n```\n"""
- )
+ fpath = static_plot_path.relative_to(self.output_dir).as_posix()
+ plot_content.append(f"""fig_altair.save("{fpath}")\n```\n""")
plot_content.append(self._generate_image_content(static_plot_path))
else:
plot_content.append("""fig_altair\n```\n""")
elif plot.plot_type == r.PlotType.INTERACTIVE_NETWORK:
networkx_graph = plot.read_network()
if isinstance(networkx_graph, tuple):
- # If network_data is a tuple, separate the network and html file path
+ # If network_data is a tuple,
+ # separate the network and html file path
networkx_graph, html_plot_file = networkx_graph
elif isinstance(networkx_graph, nx.Graph) and not self.is_report_static:
# Get the pyvis object and create html
@@ -598,10 +655,15 @@ def _generate_plot_content(self, plot) -> List[str]:
else:
plot_content.append(self._generate_plot_code(plot, html_plot_file))
else:
- self.report.logger.warning(f"Unsupported plot type: {plot.plot_type}")
+ self.report.logger.warning("Unsupported plot type: %s", plot.plot_type)
except Exception as e:
self.report.logger.error(
- f"Error generating content for '{plot.plot_type}' plot '{plot.id}' '{plot.title}': {str(e)}"
+ "Error generating content for '%s' plot '%s' '%s': %s",
+ plot.plot_type,
+ plot.id,
+ plot.title,
+ e,
+ exc_info=True,
)
raise
@@ -610,7 +672,7 @@ def _generate_plot_content(self, plot) -> List[str]:
plot_content.append(f">{plot.caption}\n")
self.report.logger.info(
- f"Successfully generated content for plot: '{plot.title}'"
+ "Successfully generated content for plot: '%s'", plot.title
)
return plot_content
@@ -630,41 +692,64 @@ def _generate_plot_code(self, plot, output_file="") -> str:
The generated plot code as a string.
"""
# Initialize plot code with common structure
- plot_code = f"""```{{python}}
-#| label: '{plot.title} {plot.id}'
-#| fig-cap: ""
-"""
+ plot_code = textwrap.dedent(
+ f"""
+ ```{{python}}
+ #| label: '{plot.title} {plot.id}'
+ #| fig-cap: ""
+ """
+ )
# If the file path is a URL, generate code to fetch content via requests
if is_url(plot.file_path):
- plot_code += f"""
-response = requests.get('{plot.file_path}')
-response.raise_for_status()
-plot_json = response.text\n"""
+ plot_code += textwrap.dedent(
+ f"""
+ response = requests.get('{plot.file_path}')
+ response.raise_for_status()
+ plot_json = response.text
+ """
+ )
else: # If it's a local file
plot_rel_path = get_relative_file_path(
plot.file_path, relative_to=self.output_dir
).as_posix()
- plot_code += f"""
+ plot_code += textwrap.dedent(
+ f"""
with open(report_dir /'{plot_rel_path}', 'r') as plot_file:
- plot_json = json.load(plot_file)\n"""
+ plot_json = json.load(plot_file)
+"""
+ )
# Add specific code for each visualization tool
if plot.plot_type == r.PlotType.PLOTLY:
- plot_code += """
-# Keep only 'data' and 'layout' sections
-plot_json = {key: plot_json[key] for key in plot_json if key in ['data', 'layout']}\n
-# Remove 'frame' section in 'data'
-plot_json['data'] = [{k: v for k, v in entry.items() if k != 'frame'} for entry in plot_json.get('data', [])]\n
-# Convert JSON to string
-plot_json_str = json.dumps(plot_json)\n
-# Create the plotly plot
-fig_plotly = pio.from_json(plot_json_str)
-fig_plotly.update_layout(autosize=False, width=950, height=400, margin=dict(b=50, t=50, l=50, r=50))\n"""
+ plot_code += textwrap.dedent(
+ """
+ # Keep only 'data' and 'layout' sections
+ plot_json = {key: plot_json[key] for key in plot_json
+ if key in ['data', 'layout']
+ }
+ # Remove 'frame' section in 'data'
+ plot_json['data'] = [{k: v for k, v in entry.items() if k != 'frame'}
+ for entry in plot_json.get('data', [])
+ ]
+ # Convert JSON to string
+ plot_json_str = json.dumps(plot_json)
+ # Create the plotly plot
+ fig_plotly = pio.from_json(plot_json_str)
+ fig_plotly.update_layout(autosize=False, width=950, height=400,
+ margin=dict(b=50, t=50, l=50, r=50)
+ )
+ """
+ )
elif plot.plot_type == r.PlotType.ALTAIR:
- plot_code += """
-# Convert JSON to string
-plot_json_str = json.dumps(plot_json)\n
-# Create the plotly plot
-fig_altair = alt.Chart.from_json(plot_json_str).properties(width=900, height=370)\n"""
+ plot_code += textwrap.dedent(
+ """
+ # Convert JSON to string
+ plot_json_str = json.dumps(plot_json)
+
+ # Create the altair plot
+ fig_altair = alt.Chart.from_json(plot_json_str
+ ).properties(width=900, height=370)
+ """
+ )
elif plot.plot_type == r.PlotType.INTERACTIVE_NETWORK:
# Generate the HTML embedding for interactive networks
if is_url(plot.file_path) and plot.file_path.endswith(".html"):
@@ -675,10 +760,15 @@ def _generate_plot_code(self, plot, output_file="") -> str:
)
# Embed the HTML file in an iframe
- plot_code = f"""
-
-
-
\n"""
+ plot_code = textwrap.dedent(
+ f"""
+
+
+
+ """
+ )
return plot_code
def _generate_dataframe_content(self, dataframe) -> List[str]:
@@ -718,7 +808,9 @@ def _generate_dataframe_content(self, dataframe) -> List[str]:
file_extension == fmt.value_with_dot for fmt in r.DataFrameFormat
):
self.report.logger.error(
- f"Unsupported file extension: {file_extension}. Supported extensions are: {', '.join(fmt.value for fmt in r.DataFrameFormat)}."
+ "Unsupported file extension: %s. Supported extensions are: %s.",
+ file_extension,
+ ", ".join(fmt.value for fmt in r.DataFrameFormat),
)
# Build the file path (URL or local file)
@@ -738,8 +830,9 @@ def _generate_dataframe_content(self, dataframe) -> List[str]:
if len(sheet_names) > 1:
# If there are multiple sheets, use the first one
self.report.logger.info(
- f"Multiple sheets found in the Excel file: {df_file_path}. "
- f"Sheets: {sheet_names}"
+ "Multiple sheets found in the Excel file: %s. Sheets: %s",
+ df_file_path,
+ sheet_names,
)
else:
sheet_names = None
@@ -773,7 +866,8 @@ def _generate_dataframe_content(self, dataframe) -> List[str]:
)
)
dataframe_content.append(
- f"df = pd.{read_function.__name__}(report_dir / '{df_file_path}', "
+ f"df = pd.{read_function.__name__}"
+ f"(report_dir / '{df_file_path}', "
f"sheet_name='{sheet_name}')\n"
)
# Display the dataframe
@@ -783,7 +877,10 @@ def _generate_dataframe_content(self, dataframe) -> List[str]:
except Exception as e:
self.report.logger.error(
- f"Error generating content for DataFrame: {dataframe.title}. Error: {str(e)}"
+ "Error generating content for DataFrame: %s. Error: %s",
+ dataframe.title,
+ e,
+ exc_info=True,
)
raise
# Add caption if available
@@ -792,7 +889,7 @@ def _generate_dataframe_content(self, dataframe) -> List[str]:
dataframe_content.append(f">{dataframe.caption}\n")
self.report.logger.info(
- f"Successfully generated content for DataFrame: '{dataframe.title}'"
+ "Successfully generated content for DataFrame: '%s'", dataframe.title
)
return dataframe_content
@@ -851,7 +948,10 @@ def _generate_markdown_content(self, markdown) -> List[str]:
except Exception as e:
self.report.logger.error(
- f"Error generating content for Markdown: {markdown.title}. Error: {str(e)}"
+ "Error generating content for Markdown: %s. Error: %s",
+ markdown.title,
+ e,
+ exc_info=True,
)
raise
@@ -860,13 +960,14 @@ def _generate_markdown_content(self, markdown) -> List[str]:
markdown_content.append(f">{markdown.caption}\n")
self.report.logger.info(
- f"Successfully generated content for Markdown: '{markdown.title}'"
+ "Successfully generated content for Markdown: '%s'", markdown.title
)
return markdown_content
def _show_dataframe(self, dataframe, suffix: Optional[str] = None) -> List[str]:
"""
- Appends either a static image or an interactive representation of a DataFrame to the content list.
+ Appends either a static image or an interactive representation of a DataFrame
+ to the content list.
Parameters
----------
@@ -902,7 +1003,8 @@ def _show_dataframe(self, dataframe, suffix: Optional[str] = None) -> List[str]:
else:
# Append code to display the DataFrame interactively
dataframe_content.append(
- """show(df, classes="display nowrap compact", lengthMenu=[3, 5, 10])\n```\n"""
+ 'show(df, classes="display nowrap compact", '
+ "lengthMenu=[3, 5, 10])\n```\n"
)
return dataframe_content
@@ -914,7 +1016,8 @@ def _generate_html_content(self, html) -> List[str]:
Parameters
----------
html : Html
- The HTML component to add to the report. This could be a local file path or a URL.
+ The HTML component to add to the report. This could be a local file path
+ or a URL.
Returns
-------
@@ -934,20 +1037,27 @@ def _generate_html_content(self, html) -> List[str]:
html_file_path = get_relative_file_path(
html.file_path, relative_to=self.output_dir
)
- iframe_code = f"""
-
-
-
\n"""
+ iframe_code = textwrap.dedent(
+ f"""
+
+
+
+ """
+ )
html_content.append(iframe_code)
except Exception as e:
self.report.logger.error(
- f"Error generating content for HTML: {html.title}. Error: {str(e)}"
+ "Error generating content for HTML: %s. Error: %s",
+ html.title,
+ e,
+ exc_info=True,
)
raise
self.report.logger.info(
- f"Successfully generated content for HTML: '{html.title}'"
+ "Successfully generated content for HTML: '%s'", html.title
)
return html_content
@@ -955,7 +1065,8 @@ def _generate_image_content(
self, image_path: str, alt_text: str = "", width: str = "90%"
) -> str:
"""
- Adds an image to the content list in an HTML format with a specified width and height.
+ Adds an image to the content list in an HTML format with a specified width
+ and height.
Parameters
----------
@@ -989,7 +1100,8 @@ def _generate_component_imports(self, component: r.Component) -> List[str]:
Parameters
----------
component : r.Component
- The component for which to generate the required imports. The component can be of type:
+ The component for which to generate the required imports.
+ The component can be of type:
- PLOT
- DATAFRAME
- MARKDOWN
diff --git a/src/vuegen/report.py b/src/vuegen/report.py
index 55aab67..b835e0f 100644
--- a/src/vuegen/report.py
+++ b/src/vuegen/report.py
@@ -1,3 +1,5 @@
+"""Contains all comonent classes and Report related base classes for VueGen."""
+
import logging
import os
from abc import ABC, abstractmethod
@@ -17,10 +19,14 @@
import requests
from pyvis.network import Network
+from vuegen.constants import TIMEOUT
+
from .utils import cyjs_to_networkx, fetch_file_stream, pyvishtml_to_networkx
class ReportType(StrEnum):
+ """Enum representing different types of reports that can be generated."""
+
STREAMLIT = auto()
HTML = auto()
PDF = auto()
@@ -32,6 +38,8 @@ class ReportType(StrEnum):
class ComponentType(StrEnum):
+ """Enum representing different types of components in a report subsection."""
+
PLOT = auto()
DATAFRAME = auto()
MARKDOWN = auto()
@@ -41,6 +49,8 @@ class ComponentType(StrEnum):
class PlotType(StrEnum):
+ """Enum representing different types of plots that can be generated."""
+
STATIC = auto()
PLOTLY = auto()
ALTAIR = auto()
@@ -48,6 +58,8 @@ class PlotType(StrEnum):
class NetworkFormat(StrEnum):
+ """Enum representing different formats for network graphs."""
+
GML = auto()
GRAPHML = auto()
GEXF = auto()
@@ -67,11 +79,15 @@ def value_with_dot(self):
class CSVNetworkFormat(StrEnum):
+ """Enum representing different formats for CSV network files."""
+
EDGELIST = auto()
ADJLIST = auto()
class DataFrameFormat(StrEnum):
+ """Enum representing different file formats for data in DataFrame format."""
+
CSV = auto()
TXT = auto()
PARQUET = auto()
@@ -87,15 +103,17 @@ def value_with_dot(self):
@dataclass
class Component:
"""
- Base class for different components in a report subsection. It encapsulates elements like
- plots, dataframes, markdown, or apicalls, providing a consistent structure for report generation.
+ Base class for different components in a report subsection. It encapsulates elements
+ like plots, dataframes, markdown, or apicalls,
+ providing a consistent structure for report generation.
Attributes
----------
_id_counter : ClassVar[int]
Class-level counter for unique IDs.
id : int
- Unique identifier for the component, assigned automatically when an object is created.
+ Unique identifier for the component, assigned automatically
+ when an object is created.
title : str
Title of the component.
component_type : ComponentType
@@ -103,7 +121,8 @@ class Component:
logger : logging.Logger
Logger object for tracking warnings, errors, and info messages.
file_path : Optional[str]
- Path to the file associated with the component (e.g., plot JSON file, image file, csv file, etc.).
+ Path to the file associated with the component
+ (e.g., plot JSON file, image file, csv file, etc.).
caption : Optional[str]
Caption providing additional context about the component (default: None).
"""
@@ -134,7 +153,8 @@ class Plot(Component):
plot_type : PlotType
The type of the plot (INTERACTIVE or STATIC).
csv_network_format : CSVNetworkFormat, optional
- The format of the CSV file for network plots (EDGELIST or ADJLIST) (default is None).
+ The format of the CSV file for network plots (EDGELIST or ADJLIST)
+ (default is None).
"""
def __init__(
@@ -188,7 +208,8 @@ def read_network(self) -> nx.Graph:
NetworkFormat.CYJS.value_with_dot: cyjs_to_networkx,
}
- # Handle .csv and .txt files with custom delimiters based on the text format (edgelist or adjlist)
+ # Handle .csv and .txt files with custom delimiters based on the text format
+ # (edgelist or adjlist)
try:
# Fetch the file stream (local or URL) using fetch_file_stream
file_stream = fetch_file_stream(self.file_path)
@@ -199,7 +220,9 @@ def read_network(self) -> nx.Graph:
# Check if the file extension matches any Enum value
if not any(file_extension == fmt.value_with_dot for fmt in NetworkFormat):
self.logger.error(
- f"Unsupported file extension: {file_extension}. Supported extensions are: {', '.join(fmt.value for fmt in NetworkFormat)}."
+ "Unsupported file extension: %s. Supported extensions are: %s",
+ file_extension,
+ ", ".join(fmt.value for fmt in NetworkFormat),
)
# Handle HTML files for pyvis interactive networks
@@ -207,7 +230,8 @@ def read_network(self) -> nx.Graph:
G = pyvishtml_to_networkx(file_stream)
return (G, self.file_path)
- # Handle CSV and TXT files with custom delimiters based on the text format (edgelist or adjlist)
+ # Handle CSV and TXT files with custom delimiters based on the text format
+ # (edgelist or adjlist)
if (
file_extension
in [NetworkFormat.CSV.value_with_dot, NetworkFormat.TXT.value_with_dot]
@@ -216,23 +240,31 @@ def read_network(self) -> nx.Graph:
delimiter = "," if file_extension == ".csv" else "\\t"
try:
df_net = pd.read_csv(file_stream, delimiter=delimiter)
- except pd.errors.ParserError:
+ except pd.errors.ParserError as e:
self.logger.error(
- f"Error parsing CSV/TXT file {self.file_path}. Please check the file format or delimiter."
+ "Error parsing CSV/TXT file %s. "
+ "Please check the file format or delimiter: %s.",
+ self.file_path,
+ e,
+ exc_info=True,
)
if self.csv_network_format == CSVNetworkFormat.EDGELIST:
- # Assert that "source" and "target" columns are present in the DataFrame
+ # Assert that "source" and "target" columns
+ # are present in the DataFrame
required_columns = {"source", "target"}
if not required_columns.issubset(df_net.columns):
missing_cols = ", ".join(
required_columns.difference(df_net.columns)
)
self.logger.error(
- f"CSV network file must contain 'source' and 'target' columns. Missing columns: {missing_cols}."
+ "CSV network file must contain 'source' and 'target'"
+ " columns. Missing columns: %s.",
+ missing_cols,
)
- # Use additional columns as edge attributes, excluding "source" and "target"
+ # Use additional columns as edge attributes,
+ # excluding "source" and "target"
edge_attributes = [
col for col in df_net.columns if col not in required_columns
]
@@ -251,30 +283,34 @@ def read_network(self) -> nx.Graph:
)
self.logger.info(
- f"Successfully read network from file: {self.file_path}."
+ "Successfully read network from file: %s.", self.file_path
)
return G
elif self.csv_network_format == CSVNetworkFormat.ADJLIST:
G = nx.from_pandas_adjacency(df_net)
self.logger.info(
- f"Successfully read network from file: {self.file_path}."
+ "Successfully read network from file: %s.", self.file_path
)
return G
else:
self.logger.error(
- f"Unsupported format for CSV/TXT file: {self.csv_network_format}."
+ "Unsupported format for CSV/TXT file: %s.",
+ self.csv_network_format,
)
- # Handle other formats using the mapping and return the NetworkX graph object from the specified network file
+ # Handle other formats using the mapping and return the NetworkX graph
+ # object from the specified network file
G = file_extension_map[file_extension](file_stream)
G = self._add_size_attribute(G)
- self.logger.info(f"Successfully read network from file: {self.file_path}.")
+ self.logger.info("Successfully read network from file: %s.", self.file_path)
return G
except Exception as e:
- self.logger.error(f"Error occurred while reading network file: {str(e)}.")
- raise RuntimeError(
- f"An error occurred while reading the network file: {str(e)}"
+ self.logger.error(
+ "Error occurred while reading network file: %s.", e, exc_info=True
)
+ raise RuntimeError(
+ "An error occurred while reading the network file."
+ ) from e
def save_network_image(
self, G: nx.Graph, output_file: str, format: str, dpi: int = 300
@@ -297,20 +333,25 @@ def save_network_image(
# Check if the output file path is valid
if not os.path.isdir(os.path.dirname(output_file)):
self.logger.error(
- f"Directory for saving image does not exist: {os.path.dirname(output_file)}."
+ "Directory for saving image does not exist: %s",
+ os.path.dirname(output_file),
)
raise FileNotFoundError(
- f"The directory for saving the file does not exist: {os.path.dirname(output_file)}."
+ "The directory for saving the file does not exist: "
+ f"{os.path.dirname(output_file)}."
)
# Validate image format
valid_formats = ["png", "jpg", "jpeg", "svg"]
if format.lower() not in valid_formats:
self.logger.error(
- f"Invalid image format: {format}. Supported formats are: {', '.join(valid_formats)}."
+ "Invalid image format: %s. Supported formats are: %s.",
+ format,
+ ", ".join(valid_formats),
)
raise ValueError(
- f"Invalid format: {format}. Supported formats are: {', '.join(valid_formats)}."
+ f"Invalid format: {format}."
+ f" Supported formats are: {', '.join(valid_formats)}."
)
try:
@@ -318,14 +359,15 @@ def save_network_image(
nx.draw(G, with_labels=False)
plt.savefig(output_file, format=format, dpi=dpi)
plt.clf()
- self.logger.info(f"Network image saved successfully at: {output_file}.")
+ self.logger.info("Network image saved successfully at: %s.", output_file)
except Exception as e:
- self.logger.error(f"Failed to save the network image: {str(e)}.")
- raise RuntimeError(f"Failed to save the network image: {str(e)}")
+ self.logger.error("Failed to save the network image: %s.", e, exc_info=True)
+ raise RuntimeError("Failed to save the network image.") from e
def create_and_save_pyvis_network(self, G: nx.Graph, output_file: str) -> Network:
"""
- Creates a PyVis network from a NetworkX graph object and saves it as an HTML file.
+ Creates a PyVis network from a NetworkX graph object and saves it as an HTML
+ file.
Parameters
----------
@@ -343,17 +385,19 @@ def create_and_save_pyvis_network(self, G: nx.Graph, output_file: str) -> Networ
# Check if the network object and output file path are valid
if not isinstance(G, nx.Graph):
self.logger.error(
- f"Provided object is not a valid NetworkX graph: {type(G)}."
+ "Provided object is not a valid NetworkX graph: %s.", type(G)
)
raise TypeError(
f"The provided object is not a valid NetworkX graph: {type(G)}."
)
if not os.path.isdir(os.path.dirname(output_file)):
self.logger.error(
- f"Directory for saving PyVis network does not exist: {os.path.dirname(output_file)}."
+ "Directory for saving PyVis network does not exist: %s.",
+ os.path.dirname(output_file),
)
raise FileNotFoundError(
- f"The directory for saving the file does not exist: {os.path.dirname(output_file)}."
+ "The directory for saving the file does not exist: "
+ f"{os.path.dirname(output_file)}."
)
try:
@@ -384,16 +428,19 @@ def create_and_save_pyvis_network(self, G: nx.Graph, output_file: str) -> Networ
# Save the network as an HTML file
net.save_graph(str(output_file))
- self.logger.info(f"PyVis network created and saved as: {output_file}.")
+ self.logger.info("PyVis network created and saved as: %s.", output_file)
return net
except Exception as e:
- self.logger.error(f"Failed to create and save PyVis network: {str(e)}.")
- raise RuntimeError(f"Failed to create and save the PyVis network: {str(e)}")
+ self.logger.error(
+ "Failed to create and save PyVis network: %s.", e, exc_info=True
+ )
+ raise RuntimeError("Failed to create and save the PyVis network.") from e
def _add_size_attribute(self, G: nx.Graph) -> nx.Graph:
"""
- Adds a 'size' attribute to the nodes of a NetworkX graph based on their degree centrality.
+ Adds a 'size' attribute to the nodes of a NetworkX graph
+ based on their degree centrality.
Parameters
----------
@@ -406,7 +453,7 @@ def _add_size_attribute(self, G: nx.Graph) -> nx.Graph:
A NetworkX graph object with the 'size' attribute added to the nodes.
"""
# Clean up edge attributes to avoid conflicts
- for u, v, data in G.edges(data=True):
+ for _, _, data in G.edges(data=True):
data.pop("source", None)
data.pop("target", None)
@@ -445,9 +492,11 @@ class DataFrame(Component):
Attributes
----------
file_format : DataFrameFormat
- The format of the file from which the DataFrame is loaded (e.g., CSV, TXT, PARQUET).
+ The format of the file from which the DataFrame is loaded
+ (e.g., CSV, TXT, PARQUET).
delimiter : Optional[str]
- The delimiter to use if the file is a delimited text format (e.g., ';', '\t', etc).
+ The delimiter to use if the file is a delimited text format
+ (e.g., ';', '\t', etc).
"""
def __init__(
@@ -530,7 +579,8 @@ class APICall(Component):
api_url : str
The URL of the API to interact with.
method : str
- HTTP method to use for the request ("GET", "POST", or "PUT"). The deafult is "GET".
+ HTTP method to use for the request ("GET", "POST", or "PUT").
+ The deafult is "GET".
headers : Optional[dict]
Headers to include in the API request (default is None).
params : Optional[dict]
@@ -588,9 +638,9 @@ def make_api_request(
else self.request_body
)
try:
- self.logger.info(f"Making {self.method} request to API: {self.api_url}")
- self.logger.debug(f"Headers: {self.headers}")
- self.logger.debug(f"Params: {self.params}")
+ self.logger.info("Making %s request to API: %s", self.method, self.api_url)
+ self.logger.debug("Headers: %s", self.headers)
+ self.logger.debug("Params: %s", self.params)
response = requests.request(
self.method,
@@ -603,26 +653,29 @@ def make_api_request(
if self.method in ["POST", "PUT", "PATCH"] and request_body_to_send
else None
),
+ timeout=TIMEOUT,
)
response.raise_for_status()
self.logger.info(
- f"Request successful with status code {response.status_code}."
+ "Request successful with status code %d.", response.status_code
)
return response.json()
except requests.exceptions.RequestException as e:
- self.logger.error(f"API request failed: {e}")
+ self.logger.error("API request failed: %s", e, exc_info=True)
return None
class ChatBot(Component):
"""
A component for creating a ChatBot that interacts with an API.
- This component uses an APICall instance to send requests to the chatbot API and receive responses.
+ This component uses an APICall instance to send requests
+ to the chatbot API and receive responses.
Attributes
----------
api_call : APICall
- An instance of the APICall class used to interact with the API for fetching chatbot responses.
+ An instance of the APICall class used to interact
+ with the API for fetching chatbot responses.
model : Optional[str]
The language model to use for the chatbot (default is None).
headers : Optional[dict]
@@ -670,7 +723,8 @@ class Subsection:
_id_counter : ClassVar[int]
Class-level counter for unique IDs.
id : int
- Unique identifier for the subsection, assigned automatically when an object is created.
+ Unique identifier for the subsection, assigned automatically
+ when an object is created.
title : str
Title of the subsection.
components : List[Component]
@@ -698,7 +752,8 @@ def _generate_id(cls) -> int:
return cls._id_counter
-# ? Section is a subclass of Subsection (adding subsections). Destinction might not be necessary
+# ? Section is a subclass of Subsection (adding subsections).
+# ? Distinction might not be necessary
@dataclass
class Section:
"""
@@ -709,7 +764,8 @@ class Section:
_id_counter : ClassVar[int]
Class-level counter for unique IDs.
id : int
- Unique identifier for the section, assigned automatically when an object is created.
+ Unique identifier for the section, assigned automatically
+ when an object is created.
title : str
Title of the section.
subsections : List[Subsection]
@@ -798,9 +854,9 @@ def generate_report(self, output_dir: str = "sections") -> None:
Parameters
----------
output_dir : str, optional
- The folder where the generated report files will be saved (default is 'sections').
+ The folder where the generated report files will be saved
+ (default is 'sections').
"""
- pass
@abstractmethod
def run_report(self, output_dir: str = "sections") -> None:
@@ -812,17 +868,17 @@ def run_report(self, output_dir: str = "sections") -> None:
output_dir : str, optional
The folder where the report was generated (default is 'sections').
"""
- pass
@abstractmethod
- def _generate_component_imports(self) -> str:
+ def _generate_component_imports(self, component: Component) -> str:
"""
Generate necessary imports for a component of the report.
Parameters
----------
- component : r.Component
- The component for which to generate the required imports. The component can be of type:
+ component : Component
+ The component for which to generate the required imports.
+ The component can be of type:
- PLOT
- DATAFRAME
- MARKDOWN
@@ -832,7 +888,6 @@ def _generate_component_imports(self) -> str:
str
A str of import statements for the component.
"""
- pass
class WebAppReportView(ReportView):
@@ -852,7 +907,8 @@ def _format_text(self, text: str, type: str, level: int, color: str) -> str:
type : str
The type of the text (e.g., 'header', 'paragraph').
level : int, optional
- If the text is a header, the level of the header (e.g., 1 for h1, 2 for h2, etc.).
+ If the text is a header, the level of the header
+ (e.g., 1 for h1, 2 for h2, etc.).
color : str, optional
The color of the header text.
@@ -861,7 +917,6 @@ def _format_text(self, text: str, type: str, level: int, color: str) -> str:
str
The formatted text string.
"""
- pass
@abstractmethod
def _generate_sections(self, output_dir: str) -> None:
@@ -877,15 +932,15 @@ def _generate_sections(self, output_dir: str) -> None:
-----
This method is intended to be used internally by the `generate_report` method.
"""
- pass
@abstractmethod
def _generate_subsection(
self, subsection: Subsection
) -> tuple[List[str], List[str]]:
"""
- Generate code to render components (plots, dataframes, markdown) in the given subsection,
- creating imports and content for the subsection based on the component type.
+ Generate code to render components (plots, dataframes, markdown) in the given
+ subsection, creating imports and content for the subsection based on
+ the component type.
Parameters
----------
@@ -898,4 +953,3 @@ def _generate_subsection(
- list of subsection content lines (List[str])
- list of imports for the subsection (List[str])
"""
- pass
diff --git a/src/vuegen/report_generator.py b/src/vuegen/report_generator.py
index e0b7f56..a3f5db7 100644
--- a/src/vuegen/report_generator.py
+++ b/src/vuegen/report_generator.py
@@ -1,3 +1,5 @@
+"""Main API entry point for generating reports using VueGen."""
+
import logging
import shutil
import sys
@@ -28,23 +30,25 @@ def get_report(
report_type : str
The report type. It should be one of the values of the ReportType Enum.
logger : logging.Logger, optional
- A logger object to track warnings, errors, and info messages. If not provided, a default logger will be created.
+ A logger object to track warnings, errors, and info messages. If not provided,
+ a default logger will be created.
config_path : str, optional
Path to the YAML configuration file.
dir_path : str, optional
Path to the directory from which to generate the configuration file.
streamlit_autorun : bool, optional
- Whether to automatically run the Streamlit report after generation (default is False).
- quarto_checks : bool, optional
- Whether to perform checks for Quarto report generation for TeX and Chromium installation
+ Whether to automatically run the Streamlit report after generation
(default is False).
+ quarto_checks : bool, optional
+ Whether to perform checks for Quarto report generation for TeX and Chromium
+ installation (default is False).
output_dir : Path, optional
The directory where the report folder will be generated.
If not provided, the current directory will be used.
max_depth : int, optional
- The maximum depth of the directory structure to consider when generating the report.
- The default is 2, which means it will include sections and subsections. The parater
- is only used when 'dir_path' is used.
+ The maximum depth of the directory structure to consider when generating the
+ report. The default is 2, which means it will include sections and subsections.
+ The parater is only used when 'dir_path' is used.
Raises
------
@@ -80,7 +84,7 @@ def get_report(
if dir_path:
# Generate configuration from the provided directory
- yaml_data, base_folder_path = config_manager.create_yamlconfig_fromdir(dir_path)
+ yaml_data, _ = config_manager.create_yamlconfig_fromdir(dir_path)
# yaml_data has under report a title created based on the directory name
config_path = write_yaml_config(yaml_data, output_dir)
logger.info("Configuration file generated at %s", config_path)
@@ -89,7 +93,7 @@ def get_report(
report_config = load_yaml_config(config_path)
# Load report object and metadata
- report, report_metadata = config_manager.initialize_report(report_config)
+ report, _ = config_manager.initialize_report(report_config)
# Validate and convert the report type to its enum value
report_type = assert_enum_value(ReportType, report_type, logger)
@@ -112,12 +116,12 @@ def get_report(
if shutil.which("quarto") is None and not hasattr(
sys, "_MEIPASS"
): # ? and not getattr(sys, "frozen", False)
- logger.error(
- "Quarto is not installed. Please install Quarto before generating this report type."
- )
- raise RuntimeError(
- "Quarto is not installed. Please install Quarto before generating this report type."
+ msg = (
+ "Quarto is not installed. Please install Quarto before generating this "
+ "report type."
)
+ logger.error(msg)
+ raise RuntimeError(msg)
report_dir = output_dir / "quarto_report"
static_files_dir = report_dir / "static"
quarto_report = QuartoReportView(
diff --git a/src/vuegen/streamlit_reportview.py b/src/vuegen/streamlit_reportview.py
index 5ede75a..67023eb 100644
--- a/src/vuegen/streamlit_reportview.py
+++ b/src/vuegen/streamlit_reportview.py
@@ -1,3 +1,8 @@
+"""
+StreamlitReportView class for generating Streamlit reports
+based on a configuration file.
+"""
+
import os
import subprocess
import sys
@@ -20,7 +25,8 @@
def write_python_file(fpath: str, imports: list[str], contents: list[str]) -> None:
- with open(fpath, "w", encoding="utf8") as f:
+ """Write a Python file with the given imports and contents."""
+ with open(fpath, "w", encoding="utf-8") as f:
# Write imports at the top of the file
f.write("\n".join(imports) + "\n\n")
@@ -55,16 +61,18 @@ def __init__(
report_type : r.ReportType
Enum of report type as definded by the ReportType Enum.
streamlit_autorun : bool, optional
- Wheather streamlit should be started after report generation, by default False
+ Wheather streamlit should be started after report generation,
+ by default False
static_dir : str, optional
- The folder where the static files will be saved, by default STATIC_FILES_DIR.
+ The folder where the static files will be saved,
+ by default STATIC_FILES_DIR.
"""
super().__init__(report=report, report_type=report_type)
self.streamlit_autorun = streamlit_autorun
- self.BUNDLED_EXECUTION = False
+ self.bundled_execution = False
if getattr(sys, "frozen", False) and hasattr(sys, "_MEIPASS"):
self.report.logger.info("running in a PyInstaller bundle")
- self.BUNDLED_EXECUTION = True
+ self.bundled_execution = True
else:
self.report.logger.info("running in a normal Python process")
@@ -81,31 +89,36 @@ def __init__(
def generate_report(self, output_dir: str = SECTIONS_DIR) -> None:
"""
- Generates the Streamlit report and creates Python files for each section and its subsections and plots.
+ Generates the Streamlit report and creates Python files for each section
+ and its subsections and plots.
Parameters
----------
output_dir : str, optional
- The folder where the generated report files will be saved (default is SECTIONS_DIR).
+ The folder where the generated report files will be saved
+ (default is SECTIONS_DIR).
"""
self.report.logger.debug(
- f"Generating '{self.report_type}' report in directory: '{output_dir}'"
+ "Generating '%s' report in directory: '%s'", self.report_type, output_dir
)
# Create the output folder
if create_folder(output_dir, is_nested=True):
- self.report.logger.info(f"Created output directory: '{output_dir}'")
+ self.report.logger.info("Created output directory: '%s'", output_dir)
else:
- self.report.logger.info(f"Output directory already existed: '{output_dir}'")
+ self.report.logger.info(
+ "Output directory already existed: '%s'", output_dir
+ )
# Create the static folder
if create_folder(self.static_dir):
self.report.logger.info(
- f"Created output directory for static content: '{self.static_dir}'"
+ "Created output directory for static content: '%s'", self.static_dir
)
else:
self.report.logger.info(
- f"Output directory for static content already existed: '{self.static_dir}'"
+ "Output directory for static content already existed: '%s'",
+ self.static_dir,
)
try:
@@ -117,8 +130,8 @@ def generate_report(self, output_dir: str = SECTIONS_DIR) -> None:
"""\
import os
import time
-
- import psutil
+
+ import psutil
import streamlit as st
"""
)
@@ -127,7 +140,10 @@ def generate_report(self, output_dir: str = SECTIONS_DIR) -> None:
report_manag_content.append(
textwrap.dedent(
f"""\
- st.set_page_config(layout="wide", page_title="{self.report.title}", page_icon="{self.report.logo}")
+ st.set_page_config(layout="wide",
+ page_title="{self.report.title}",
+ page_icon="{self.report.logo}"
+ )
st.logo("{self.report.logo}")
"""
)
@@ -136,7 +152,8 @@ def generate_report(self, output_dir: str = SECTIONS_DIR) -> None:
report_manag_content.append(
textwrap.dedent(
f"""\
- st.set_page_config(layout="wide", page_title="{self.report.title}")
+ st.set_page_config(layout="wide",
+ page_title="{self.report.title}")
"""
)
)
@@ -165,22 +182,29 @@ def generate_report(self, output_dir: str = SECTIONS_DIR) -> None:
if create_folder(section_dir_path):
self.report.logger.debug(
- f"Created section directory: {section_dir_path}"
+ "Created section directory: %s", section_dir_path
)
else:
self.report.logger.debug(
- f"Section directory already existed: {section_dir_path}"
+ "Section directory already existed: %s", section_dir_path
)
- # add an overview page to section of components exist
+ # add an overview page to section for it's section components
+ # they will be written when the components are parsed
+ # using `_generate_sections`
if section.components:
+ _fname = (
+ f"0_overview_{make_valid_identifier(section.title).lower()}.py"
+ )
subsection_file_path = (
- Path(section_name_var)
- / f"0_overview_{make_valid_identifier(section.title).lower()}.py"
+ Path(section_name_var) / _fname
).as_posix() # Make sure it's Posix Paths
section.file_path = subsection_file_path
- # Create a Page object for each subsection and add it to the home page content
+ # Create a Page object for each subsection and
+ # add it to the home page content
report_manag_content.append(
- f"{section_name_var}_overview = st.Page('{subsection_file_path}', title='Overview {section.title}')"
+ f"{section_name_var}_overview = "
+ f"st.Page('{subsection_file_path}'"
+ f", title='Overview {section.title}')"
)
subsection_page_vars.append(f"{section_name_var}_overview")
@@ -188,25 +212,30 @@ def generate_report(self, output_dir: str = SECTIONS_DIR) -> None:
# ! could add a non-integer to ensure it's a valid identifier
subsection_name_var = make_valid_identifier(subsection.title)
if not subsection_name_var.isidentifier():
- self.report.logger.warning(
- f"Subsection name '{subsection_name_var}' is not a valid identifier."
+ msg = (
+ "Subsection name is not a valid Python identifier: "
+ f"{subsection_name_var}"
)
+ self.report.logger.error(msg)
raise ValueError(
- f"Subsection name is not a valid Python identifier: {subsection_name_var}"
+ msg,
)
subsection_file_path = (
Path(section_name_var) / f"{subsection_name_var}.py"
).as_posix() # Make sure it's Posix Paths
subsection.file_path = subsection_file_path
- # Create a Page object for each subsection and add it to the home page content
+ # Create a Page object for each subsection and
+ # add it to the home page content
report_manag_content.append(
- f"{subsection_name_var} = st.Page('{subsection_file_path}', title='{subsection.title}')"
+ f"{subsection_name_var} = st.Page('{subsection_file_path}', "
+ f"title='{subsection.title}')"
)
subsection_page_vars.append(subsection_name_var)
# Add all subsection Page objects to the corresponding section
report_manag_content.append(
- f"sections_pages['{section.title}'] = [{', '.join(subsection_page_vars)}]\n"
+ f"sections_pages['{section.title}'] = "
+ f"[{', '.join(subsection_page_vars)}]\n"
)
# Add navigation object to the home page content
@@ -214,9 +243,12 @@ def generate_report(self, output_dir: str = SECTIONS_DIR) -> None:
textwrap.dedent(
"""\
report_nav = st.navigation(sections_pages)
-
- # Following https://discuss.streamlit.io/t/close-streamlit-app-with-button-click/35132/5
- exit_app = st.sidebar.button("Shut Down App", icon=":material/power_off:", use_container_width=True)
+
+ # Following https://discuss.streamlit.io/t/\
+close-streamlit-app-with-button-click/35132/5
+ exit_app = st.sidebar.button("Shut Down App",
+ icon=":material/power_off:",
+ use_container_width=True)
if exit_app:
st.toast("Shutting down the app...")
time.sleep(1)
@@ -225,7 +257,7 @@ def generate_report(self, output_dir: str = SECTIONS_DIR) -> None:
p = psutil.Process(pid)
p.terminate()
-
+
report_nav.run()
"""
)
@@ -237,14 +269,16 @@ def generate_report(self, output_dir: str = SECTIONS_DIR) -> None:
) as nav_manager:
nav_manager.write("\n".join(report_manag_content))
self.report.logger.info(
- f"Created app navigation script: {self.REPORT_MANAG_SCRIPT}"
+ "Created app navigation script: %s", self.REPORT_MANAG_SCRIPT
)
# Create Python files for each section and its subsections and plots
self._generate_sections(output_dir=output_dir)
except Exception as e:
self.report.logger.error(
- f"An error occurred while generating the report: {str(e)}"
+ "An error occurred while generating the report: %s",
+ e,
+ exc_info=True,
)
raise
@@ -259,21 +293,22 @@ def run_report(self, output_dir: str = SECTIONS_DIR) -> None:
"""
if self.streamlit_autorun:
self.report.logger.info(
- f"Running '{self.report.title}' {self.report_type} report."
+ "Running '%s' %s report.", self.report.title, self.report_type
)
self.report.logger.debug(
- f"Running Streamlit report from directory: {output_dir}"
+ "Running Streamlit report from directory: %s", output_dir
)
- # ! using pyinstaller: vuegen main script as executable, not the Python Interpreter
+ # ! using pyinstaller: vuegen main script as executable,
+ # ! not the Python Interpreter
msg = f"{sys.executable = }"
self.report.logger.debug(msg)
try:
# ! streamlit command option is not known in packaged app
target_file = os.path.join(output_dir, self.REPORT_MANAG_SCRIPT)
self.report.logger.debug(
- f"Running Streamlit report from file: {target_file}"
+ "Running Streamlit report from file: %s", target_file
)
- if self.BUNDLED_EXECUTION:
+ if self.bundled_execution:
args = [
"streamlit",
"run",
@@ -292,22 +327,26 @@ def run_report(self, output_dir: str = SECTIONS_DIR) -> None:
except KeyboardInterrupt:
print("Streamlit process interrupted.")
except subprocess.CalledProcessError as e:
- self.report.logger.error(f"Error running Streamlit report: {str(e)}")
+ self.report.logger.error(
+ "Error running Streamlit report: %s", e, exc_info=True
+ )
raise
else:
# If autorun is False, print instructions for manual execution
self.report.logger.info(
- f"All the scripts to build the Streamlit app are available at {output_dir}"
+ "All the scripts to build the Streamlit app are available at %s",
+ output_dir,
)
self.report.logger.info(
"To run the Streamlit app, use the following command:"
)
self.report.logger.info(
- f"streamlit run {Path(output_dir) / self.REPORT_MANAG_SCRIPT}"
+ "streamlit run %s", Path(output_dir) / self.REPORT_MANAG_SCRIPT
)
msg = (
- f"\nAll the scripts to build the Streamlit app are available at: {output_dir}\n\n"
- f"To run the Streamlit app, use the following command:\n\n"
+ "\nAll the scripts to build the Streamlit app are available at: "
+ f"{output_dir}\n\n"
+ "To run the Streamlit app, use the following command:\n\n"
f"\tstreamlit run {Path(output_dir) / self.REPORT_MANAG_SCRIPT}"
)
print(msg)
@@ -330,7 +369,8 @@ def _format_text(
type : str
The type of the text (e.g., 'header', 'paragraph').
level : int, optional
- If the text is a header, the level of the header (e.g., 1 for h1, 2 for h2, etc.).
+ If the text is a header, the level of the header
+ (e.g., 1 for h1, 2 for h2, etc.).
color : str, optional
The color of the header text.
text_align : str, optional
@@ -345,8 +385,24 @@ def _format_text(
tag = f"h{level}"
elif type == "paragraph" or type == "caption":
tag = "p"
+ else:
+ raise ValueError(
+ f"Unsupported text type: {type}. Supported types are 'header', "
+ "'paragraph', and 'caption'."
+ )
- return f"""st.markdown('''<{tag} style='text-align: {text_align}; color: {color};'>{text}{tag}>''', unsafe_allow_html=True)"""
+ text = text.strip() # get rid of new lines
+
+ return textwrap.dedent(
+ f"""
+ st.markdown(
+ (
+ "<{tag} style='text-align: {text_align}; "
+ "color: {color};'>{text}{tag}>"
+ ),
+ unsafe_allow_html=True)
+ """
+ )
def _generate_home_section(
self,
@@ -369,10 +425,10 @@ def _generate_home_section(
# Create folder for the home page
home_dir_path = Path(output_dir) / "Home"
if create_folder(home_dir_path):
- self.report.logger.debug(f"Created home directory: {home_dir_path}")
+ self.report.logger.debug("Created home directory: %s", home_dir_path)
else:
self.report.logger.debug(
- f"Home directory already existed: {home_dir_path}"
+ "Home directory already existed: %s", home_dir_path
)
# Create the home page content
@@ -384,7 +440,8 @@ def _generate_home_section(
)
if self.report.graphical_abstract:
home_content.append(
- f"\nst.image('{self.report.graphical_abstract}', use_column_width=True)"
+ f"\nst.image('{self.report.graphical_abstract}', "
+ "use_column_width=True)"
)
# add components content to page (if any)
@@ -395,23 +452,29 @@ def _generate_home_section(
# Write the home page content to a Python file
home_page_path = Path(home_dir_path) / "Homepage.py"
- with open(home_page_path, "w") as home_page:
+ with open(home_page_path, "w", encoding="utf-8") as home_page:
home_page.write("\n".join(home_content))
- self.report.logger.info(f"Home page content written to '{home_page_path}'.")
+ self.report.logger.info(
+ "Home page content written to '%s'.", home_page_path
+ )
# Add the home page to the report manager content
report_manag_content.append(
- "homepage = st.Page('Home/Homepage.py', title='Homepage')" # ! here Posix Path is hardcoded
+ # ! here Posix Path is hardcoded
+ "homepage = st.Page('Home/Homepage.py', title='Homepage')"
)
report_manag_content.append("sections_pages['Home'] = [homepage]\n")
self.report.logger.info("Home page added to the report manager content.")
except Exception as e:
- self.report.logger.error(f"Error generating the home section: {str(e)}")
+ self.report.logger.error(
+ "Error generating the home section: %s", e, exc_info=True
+ )
raise
def _generate_sections(self, output_dir: str) -> None:
"""
- Generates Python files for each section in the report, including subsections and its components (plots, dataframes, markdown).
+ Generates Python files for each section in the report, including subsections
+ and its components (plots, dataframes, markdown).
Parameters
----------
@@ -422,7 +485,11 @@ def _generate_sections(self, output_dir: str) -> None:
try:
for section in self.report.sections:
self.report.logger.debug(
- f"Processing section '{section.id}': '{section.title}' - {len(section.subsections)} subsection(s)"
+ # Continue
+ "Processing section '%s': '%s' - %s subsection(s)",
+ section.id,
+ section.title,
+ len(section.subsections),
)
if section.components:
# add an section overview page
@@ -440,16 +507,20 @@ def _generate_sections(self, output_dir: str) -> None:
if not section.subsections:
self.report.logger.debug(
- f"No subsections found in section: '{section.title}'."
+ "No subsections found in section: '%s'.", section.title
)
continue
# Iterate through subsections and integrate them into the section file
- # subsection should have the subsection_file_path as file_path?
+ # ! subsection should have the subsection_file_path as file_path,
+ # ! which is set when parsing the config in the main generate_sections
+ # ! method
for subsection in section.subsections:
self.report.logger.debug(
- f"Processing subsection '{subsection.id}': '{subsection.title} -"
- f" {len(subsection.components)} component(s)'"
+ "Processing subsection '%s': '%s' - %s component(s)",
+ subsection.id,
+ subsection.title,
+ len(subsection.components),
)
try:
# Create subsection file
@@ -468,17 +539,22 @@ def _generate_sections(self, output_dir: str) -> None:
contents=subsection_content,
)
self.report.logger.info(
- f"Subsection file created: '{subsection_file_path}'"
+ "Subsection file created: '%s'", subsection_file_path
)
except Exception as subsection_error:
self.report.logger.error(
- f"Error processing subsection '{subsection.id}' '{subsection.title}' "
- f"in section '{section.id}' '{section.title}': {str(subsection_error)}"
+ "Error processing subsection '%s' '%s' "
+ "in section '%s' '%s': %s",
+ subsection.id,
+ subsection.title,
+ section.id,
+ section.title,
+ str(subsection_error),
)
raise
except Exception as e:
- self.report.logger.error(f"Error generating sections: {str(e)}")
+ self.report.logger.error("Error generating sections: %s", e, exc_info=True)
raise
def _combine_components(self, components: list[dict]) -> tuple[list, list, bool]:
@@ -497,7 +573,7 @@ def _combine_components(self, components: list[dict]) -> tuple[list, list, bool]
fct = self.components_fct_map.get(component.component_type, None)
if fct is None:
self.report.logger.warning(
- f"Unsupported component type '{component.component_type}' "
+ "Unsupported component type '%s' ", component.component_type
)
else:
if component.component_type == r.ComponentType.CHATBOT:
@@ -512,8 +588,9 @@ def _combine_components(self, components: list[dict]) -> tuple[list, list, bool]
def _generate_subsection(self, subsection) -> tuple[List[str], List[str]]:
"""
- Generate code to render components (plots, dataframes, markdown) in the given subsection,
- creating imports and content for the subsection based on the component type.
+ Generate code to render components (plots, dataframes, markdown) in the given
+ subsection, creating imports and content for the subsection based on the
+ component type.
Parameters
----------
@@ -549,13 +626,14 @@ def _generate_subsection(self, subsection) -> tuple[List[str], List[str]]:
subsection_content.append("st.markdown(footer, unsafe_allow_html=True)\n")
self.report.logger.info(
- f"Generated content and imports for subsection: '{subsection.title}'"
+ "Generated content and imports for subsection: '%s'", subsection.title
)
return subsection_content, subsection_imports
def _generate_plot_content(self, plot) -> List[str]:
"""
- Generate content for a plot component based on the plot type (static or interactive).
+ Generate content for a plot component based on the plot type
+ (static or interactive).
Parameters
----------
@@ -576,9 +654,14 @@ def _generate_plot_content(self, plot) -> List[str]:
# Add content for the different plot types
try:
if plot.plot_type == r.PlotType.STATIC:
- plot_rel_path = get_relative_file_path(plot.file_path)
+ # If the file path is a URL, keep the file path as is
+ if is_url(plot.file_path):
+ plot_file_path = plot.file_path
+ else: # If it's a local file
+ plot_file_path = get_relative_file_path(plot.file_path).as_posix()
plot_content.append(
- f"\nst.image('{plot_rel_path.as_posix()}', caption='{plot.caption}', use_column_width=True)\n"
+ f"\nst.image('{plot_file_path}', "
+ f" caption='{plot.caption}', use_column_width=True)\n"
)
elif plot.plot_type == r.PlotType.PLOTLY:
plot_content.append(self._generate_plot_code(plot))
@@ -587,10 +670,12 @@ def _generate_plot_content(self, plot) -> List[str]:
elif plot.plot_type == r.PlotType.INTERACTIVE_NETWORK:
networkx_graph = plot.read_network()
if isinstance(networkx_graph, tuple):
- # If network_data is a tuple, separate the network and html file path
+ # If network_data is a tuple, separate the network
+ # and html file path
networkx_graph, html_plot_file = networkx_graph
else:
- # Otherwise, create and save a new pyvis network from the netowrkx graph
+ # Otherwise,
+ # create and save a new pyvis network from the netowrkx graph
html_plot_file = (
Path(self.static_dir) / f"{plot.title.replace(' ', '_')}.html"
).resolve()
@@ -605,37 +690,59 @@ def _generate_plot_content(self, plot) -> List[str]:
# Determine whether the file path is a URL or a local file
if is_url(html_plot_file):
plot_content.append(
- f"""
-response = requests.get('{html_plot_file}')
-response.raise_for_status()
-html_content = response.text\n"""
+ textwrap.dedent(
+ f"""
+ response = requests.get('{html_plot_file}')
+ response.raise_for_status()
+ html_content = response.text
+ """
+ )
)
else:
+ fpath = Path(html_plot_file).resolve().relative_to(Path.cwd())
plot_content.append(
- f"""
-with open('{Path(html_plot_file).resolve().relative_to(Path.cwd())}', 'r') as html_file:
- html_content = html_file.read()\n"""
+ textwrap.dedent(
+ f"""
+ with open('{fpath}', 'r') as html_file:
+ html_content = html_file.read()
+ """
+ )
)
# Append the code for additional information (nodes and edges count)
plot_content.append(
- f"""
-st.markdown(f" Number of nodes: {num_nodes}
", unsafe_allow_html=True)
-st.markdown(f" Number of relationships: {num_edges}
", unsafe_allow_html=True)\n"""
+ textwrap.dedent(
+ f"""
+ st.markdown((" "
+ "Number of nodes: {num_nodes}
"),
+ unsafe_allow_html=True)
+ st.markdown((""
+ " Number of relationships: {num_edges}"
+ "
"),
+ unsafe_allow_html=True)
+ """
+ )
)
# Add the specific code for visualization
plot_content.append(self._generate_plot_code(plot))
else:
- self.report.logger.warning(f"Unsupported plot type: {plot.plot_type}")
+ self.report.logger.warning("Unsupported plot type: %s", plot.plot_type)
except Exception as e:
self.report.logger.error(
- f"Error generating content for '{plot.plot_type}' plot '{plot.id}' '{plot.title}': {str(e)}"
+ "Error generating content for '%s' plot '%s' '%s': %s",
+ plot.plot_type,
+ plot.id,
+ plot.title,
+ e,
+ exc_info=True,
)
raise
self.report.logger.info(
- f"Successfully generated content for plot '{plot.id}': '{plot.title}'"
+ "Successfully generated content for plot '%s': '%s'",
+ plot.id,
+ plot.title,
)
return plot_content
@@ -656,37 +763,51 @@ def _generate_plot_code(self, plot) -> str:
"""
# If the file path is a URL, generate code to fetch content via requests
if is_url(plot.file_path):
- plot_code = f"""
-response = requests.get('{plot.file_path}')
-response.raise_for_status()
-plot_json = json.loads(response.text)\n"""
+ plot_code = textwrap.dedent(
+ f"""
+ response = requests.get('{plot.file_path}')
+ response.raise_for_status()
+ plot_json = json.loads(response.text)\n"""
+ )
else: # If it's a local file
plot_rel_path = get_relative_file_path(plot.file_path)
- plot_code = f"""
-with open('{plot_rel_path.as_posix()}', 'r') as plot_file:
- plot_json = json.load(plot_file)\n"""
+ plot_code = textwrap.dedent(
+ f"""
+ with open('{plot_rel_path.as_posix()}', 'r') as plot_file:
+ plot_json = json.load(plot_file)\n"""
+ )
# Add specific code for each visualization tool
if plot.plot_type == r.PlotType.PLOTLY:
- plot_code += """
-# Keep only 'data' and 'layout' sections
-plot_json = {key: plot_json[key] for key in plot_json if key in ['data', 'layout']}
-
-# Remove 'frame' section in 'data'
-plot_json['data'] = [{k: v for k, v in entry.items() if k != 'frame'} for entry in plot_json.get('data', [])]
-st.plotly_chart(plot_json, use_container_width=True)\n"""
+ plot_code += textwrap.dedent(
+ """
+ # Keep only 'data' and 'layout' sections
+ plot_json = {key: plot_json[key] for key in plot_json
+ if key in ['data', 'layout']}
+
+ # Remove 'frame' section in 'data'
+ plot_json['data'] = [{k: v for k, v in entry.items() if k != 'frame'}
+ for entry in plot_json.get('data', [])]
+ st.plotly_chart(plot_json, use_container_width=True)\n"""
+ )
elif plot.plot_type == r.PlotType.ALTAIR:
- plot_code += """
-altair_plot = alt.Chart.from_dict(plot_json)
-st.vega_lite_chart(json.loads(altair_plot.to_json()), use_container_width=True)\n"""
+ plot_code += textwrap.dedent(
+ """
+ altair_plot = alt.Chart.from_dict(plot_json)
+ st.vega_lite_chart(json.loads(altair_plot.to_json()),
+ use_container_width=True)\n"""
+ )
elif plot.plot_type == r.PlotType.INTERACTIVE_NETWORK:
- plot_code = """# Streamlit checkbox for controlling the layout
-control_layout = st.checkbox('Add panel to control layout', value=True)
-net_html_height = 1200 if control_layout else 630
-# Load HTML into HTML component for display on Streamlit
-st.components.v1.html(html_content, height=net_html_height)\n"""
+ plot_code = textwrap.dedent(
+ """\
+ # Streamlit checkbox for controlling the layout
+ control_layout = st.checkbox('Add panel to control layout', value=True)
+ net_html_height = 1200 if control_layout else 630
+ # Load HTML into HTML component for display on Streamlit
+ st.components.v1.html(html_content, height=net_html_height)\n"""
+ )
return plot_code
def _generate_dataframe_content(self, dataframe) -> List[str]:
@@ -721,10 +842,13 @@ def _generate_dataframe_content(self, dataframe) -> List[str]:
file_extension == fmt.value_with_dot for fmt in r.DataFrameFormat
):
self.report.logger.error(
- f"Unsupported file extension: {file_extension}. Supported extensions are: {', '.join(fmt.value for fmt in r.DataFrameFormat)}."
+ "Unsupported file extension: %s. Supported extensions are: %s.",
+ file_extension,
+ ", ".join(fmt.value for fmt in r.DataFrameFormat),
)
# return [] # Skip execution if unsupported file extension
- # Should it not return here? Can we even call the method with an unsupported file extension?
+ # Should it not return here?
+ # Can we even call the method with an unsupported file extension?
# Build the file path (URL or local file)
if is_url(dataframe.file_path):
@@ -740,12 +864,14 @@ def _generate_dataframe_content(self, dataframe) -> List[str]:
sheet_names = table_utils.get_sheet_names(df_file_path.as_posix())
if len(sheet_names) > 1:
# If there are multiple sheets, ask the user to select one
-
+ fpath = df_file_path.as_posix()
dataframe_content.append(
textwrap.dedent(
f"""\
- sheet_names = table_utils.get_sheet_names("{df_file_path.as_posix()}")
- selected_sheet = st.selectbox("Select a sheet to display", options=sheet_names)
+ sheet_names = table_utils.get_sheet_names("{fpath}")
+ selected_sheet = st.selectbox("Select a sheet to display",
+ options=sheet_names,
+ )
"""
)
)
@@ -757,39 +883,53 @@ def _generate_dataframe_content(self, dataframe) -> List[str]:
r.DataFrameFormat.XLSX.value_with_dot,
]:
dataframe_content.append(
- f"""df = pd.{read_function.__name__}('{df_file_path.as_posix()}', sheet_name=selected_sheet)\n"""
+ f"df = pd.{read_function.__name__}('{df_file_path.as_posix()}',"
+ " sheet_name=selected_sheet)\n"
)
else:
dataframe_content.append(
- f"""df = pd.{read_function.__name__}('{df_file_path.as_posix()}')\n"""
+ f"df = pd.{read_function.__name__}('{df_file_path.as_posix()}')\n"
)
# ! Alternative to select box: iterate over sheets in DataFrame
# Displays a DataFrame using AgGrid with configurable options.
dataframe_content.append(
- """
-# Displays a DataFrame using AgGrid with configurable options.
-grid_builder = GridOptionsBuilder.from_dataframe(df)
-grid_builder.configure_default_column(editable=True, groupable=True, filter=True)
-grid_builder.configure_side_bar(filters_panel=True, columns_panel=True)
-grid_builder.configure_selection(selection_mode="multiple")
-grid_builder.configure_pagination(enabled=True, paginationAutoPageSize=False, paginationPageSize=20)
-grid_options = grid_builder.build()
-
-AgGrid(df, gridOptions=grid_options, enable_enterprise_modules=True)
-
-# Button to download the df
-df_csv = df.to_csv(sep=',', header=True, index=False).encode('utf-8')
-st.download_button(
- label="Download dataframe as CSV",
- data=df_csv,
- file_name=f"dataframe_{df_index}.csv",
- mime='text/csv',
- key=f"download_button_{df_index}")
-df_index += 1"""
+ textwrap.dedent(
+ """
+ # Displays a DataFrame using AgGrid with configurable options.
+ grid_builder = GridOptionsBuilder.from_dataframe(df)
+ grid_builder.configure_default_column(editable=True,
+ groupable=True,
+ filter=True,
+ )
+ grid_builder.configure_side_bar(filters_panel=True,
+ columns_panel=True)
+ grid_builder.configure_selection(selection_mode="multiple")
+ grid_builder.configure_pagination(enabled=True,
+ paginationAutoPageSize=False,
+ paginationPageSize=20,
+ )
+ grid_options = grid_builder.build()
+
+ AgGrid(df, gridOptions=grid_options, enable_enterprise_modules=True)
+
+ # Button to download the df
+ df_csv = df.to_csv(sep=',', header=True, index=False
+ ).encode('utf-8')
+ st.download_button(
+ label="Download dataframe as CSV",
+ data=df_csv,
+ file_name=f"dataframe_{df_index}.csv",
+ mime='text/csv',
+ key=f"download_button_{df_index}")
+ df_index += 1"""
+ )
)
except Exception as e:
self.report.logger.error(
- f"Error generating content for DataFrame: {dataframe.title}. Error: {str(e)}"
+ "Error generating content for DataFrame: %s. Error: %s",
+ dataframe.title,
+ e,
+ exc_info=True,
)
raise
@@ -802,7 +942,8 @@ def _generate_dataframe_content(self, dataframe) -> List[str]:
)
self.report.logger.info(
- f"Successfully generated content for DataFrame: '{dataframe.title}'"
+ "Successfully generated content for DataFrame: '%s'",
+ dataframe.title,
)
return dataframe_content
@@ -832,17 +973,23 @@ def _generate_markdown_content(self, markdown) -> List[str]:
# If the file path is a URL, generate code to fetch content via requests
if is_url(markdown.file_path):
markdown_content.append(
- f"""
-response = requests.get('{markdown.file_path}')
-response.raise_for_status()
-markdown_content = response.text\n"""
+ textwrap.dedent(
+ f"""
+ response = requests.get('{markdown.file_path}')
+ response.raise_for_status()
+ markdown_content = response.text
+ """
+ )
)
else: # If it's a local file
md_rel_path = get_relative_file_path(markdown.file_path)
markdown_content.append(
- f"""
-with open('{md_rel_path.as_posix()}', 'r') as markdown_file:
- markdown_content = markdown_file.read()\n"""
+ textwrap.dedent(
+ f"""
+ with open('{md_rel_path.as_posix()}', 'r') as markdown_file:
+ markdown_content = markdown_file.read()
+ """
+ )
)
# Code to display md content
markdown_content.append(
@@ -850,7 +997,10 @@ def _generate_markdown_content(self, markdown) -> List[str]:
)
except Exception as e:
self.report.logger.error(
- f"Error generating content for Markdown: {markdown.title}. Error: {str(e)}"
+ "Error generating content for Markdown: %s. Error: %s",
+ markdown.title,
+ e,
+ exc_info=True,
)
raise
@@ -863,7 +1013,8 @@ def _generate_markdown_content(self, markdown) -> List[str]:
)
self.report.logger.info(
- f"Successfully generated content for Markdown: '{markdown.title}'"
+ "Successfully generated content for Markdown: '%s'",
+ markdown.title,
)
return markdown_content
@@ -891,18 +1042,24 @@ def _generate_html_content(self, html) -> List[str]:
try:
if is_url(html.file_path):
# If it's a URL, fetch content dynamically
- html_content.append(
- f"""
-response = requests.get('{html.file_path}')
-response.raise_for_status()
-html_content = response.text\n"""
+ textwrap.dedent(
+ html_content.append(
+ f"""
+ response = requests.get('{html.file_path}')
+ response.raise_for_status()
+ html_content = response.text
+ """
+ )
)
else: # If it's a local file
- html_rel_path = get_relative_file_path(html.file_path)
+ html_rel_path = get_relative_file_path(html.file_path).as_posix()
html_content.append(
- f"""
-with open('{html_rel_path.as_posix()}', 'r', encoding='utf-8') as html_file:
- html_content = html_file.read()\n"""
+ textwrap.dedent(
+ f"""
+ with open('{html_rel_path}', 'r', encoding='utf-8') as f:
+ html_content = f.read()
+ """
+ )
)
# Display HTML content using Streamlit
@@ -912,7 +1069,10 @@ def _generate_html_content(self, html) -> List[str]:
except Exception as e:
self.report.logger.error(
- f"Error generating content for HTML: {html.title}. Error: {str(e)}"
+ "Error generating content for HTML: %s. Error: %s",
+ html.title,
+ e,
+ exc_info=True,
)
raise
@@ -923,14 +1083,15 @@ def _generate_html_content(self, html) -> List[str]:
)
self.report.logger.info(
- f"Successfully generated content for HTML: '{html.title}'"
+ "Successfully generated content for HTML: '%s'",
+ html.title,
)
return html_content
def _generate_apicall_content(self, apicall) -> List[str]:
"""
- Generate content for an API component. This method handles the API call and formats
- the response for display in the Streamlit app.
+ Generate content for an API component. This method handles the API call and
+ formats the response for display in the Streamlit app.
Parameters
----------
@@ -955,7 +1116,10 @@ def _generate_apicall_content(self, apicall) -> List[str]:
apicall_content.append(f"""st.write({apicall_response})\n""")
except Exception as e:
self.report.logger.error(
- f"Error generating content for APICall: {apicall.title}. Error: {str(e)}"
+ "Error generating content for APICall: %s. Error: %s",
+ apicall.title,
+ e,
+ exc_info=True,
)
raise
@@ -968,29 +1132,34 @@ def _generate_apicall_content(self, apicall) -> List[str]:
)
self.report.logger.info(
- f"Successfully generated content for APICall '{apicall.title}' using method '{apicall.method}'"
+ "Successfully generated content for APICall '%s' using method '%s'",
+ apicall.title,
+ apicall.method,
)
return apicall_content
def _generate_chatbot_content(self, chatbot) -> List[str]:
"""
- Generate content to render a ChatBot component, supporting standard and Ollama-style streaming APIs.
+ Generate content to render a ChatBot component, supporting standard and
+ Ollama-style streaming APIs.
- This method builds and returns a list of strings, which are later executed to create the chatbot
- interface in a Streamlit app. It includes user input handling, API interaction logic, response parsing,
+ This method builds and returns a list of strings, which are later executed to
+ create the chatbot interface in a Streamlit app. It includes user input
+ handling, API interaction logic, response parsing,
and conditional rendering of text, source links, and HTML subgraphs.
The function distinguishes between two chatbot modes:
- - **Ollama-style streaming API**: Identified by the presence of `chatbot.model`. Uses streaming
- JSON chunks from the server to simulate a real-time response.
- - **Standard API**: Assumes a simple POST request with a prompt and a full JSON response with text,
+ - **Ollama-style streaming API**: Identified by the presence of `chatbot.model`.
+ Uses streaming JSON chunks from the server to simulate a real-time response.
+ - **Standard API**: Assumes a simple POST request with a prompt and a full JSON
+ response with text,
and other fields like links, HTML graphs, etc.
Parameters
----------
chatbot : ChatBot
- The ChatBot component to generate content for, containing configuration such as title, model,
- API endpoint, headers, and caption.
+ The ChatBot component to generate content for, containing configuration such
+ as title, model, API endpoint, headers, and caption.
Returns
-------
@@ -1007,141 +1176,158 @@ def _generate_chatbot_content(self, chatbot) -> List[str]:
)
# --- Shared code blocks (as strings) ---
- init_messages_block = """
-# Init session state
-if 'messages' not in st.session_state:
- st.session_state['messages'] = []
- """
-
- render_messages_block = """
-# Display chat history
-for message in st.session_state['messages']:
- with st.chat_message(message['role']):
- content = message['content']
- if isinstance(content, dict):
- st.markdown(content.get('text', ''), unsafe_allow_html=True)
- if 'links' in content:
- st.markdown("**Sources:**")
- for link in content['links']:
- st.markdown(f"- [{link}]({link})")
- if 'subgraph_pyvis' in content:
- st.components.v1.html(content['subgraph_pyvis'], height=600)
- else:
- st.write(content)
- """
+ init_messages_block = textwrap.indent(
+ """
+ # Init session state
+ if 'messages' not in st.session_state:
+ st.session_state['messages'] = []
+ """,
+ " " * 4,
+ )
- handle_prompt_block = """
-# Capture and append new user prompt
-if prompt := st.chat_input("Enter your prompt here:"):
- st.session_state.messages.append({"role": "user", "content": prompt})
- with st.chat_message("user"):
- st.write(prompt)
- """
+ render_messages_block = textwrap.indent(
+ """
+ # Display chat history
+ for message in st.session_state['messages']:
+ with st.chat_message(message['role']):
+ content = message['content']
+ if isinstance(content, dict):
+ st.markdown(content.get('text', ''), unsafe_allow_html=True)
+ if 'links' in content:
+ st.markdown("**Sources:**")
+ for link in content['links']:
+ st.markdown(f"- [{link}]({link})")
+ if 'subgraph_pyvis' in content:
+ st.components.v1.html(content['subgraph_pyvis'], height=600)
+ else:
+ st.write(content)
+ """,
+ " " * 4,
+ )
+ handle_prompt_block = textwrap.indent(
+ """
+ # Capture and append new user prompt
+ if prompt := st.chat_input("Enter your prompt here:"):
+ st.session_state.messages.append({"role": "user", "content": prompt})
+ with st.chat_message("user"):
+ st.write(prompt)
+ """,
+ " " * 4,
+ )
if chatbot.model:
# --- Ollama-style streaming chatbot ---
- chatbot_content.append(
+ # all other codeblocks pasted in need to be on this indentation level
+ code_block = textwrap.dedent(
f"""
-{init_messages_block}
-
-# Function to send prompt to Ollama API
-def generate_query(messages):
- response = requests.post(
- "{chatbot.api_call.api_url}",
- json={{"model": "{chatbot.model}", "messages": messages, "stream": True}},
- )
- response.raise_for_status()
- return response
-
-# Parse streaming response from Ollama
-def parse_api_response(response):
- try:
- output = ""
- for line in response.iter_lines():
- body = json.loads(line)
- if "error" in body:
- raise Exception(f"API error: {{body['error']}}")
- if body.get("done", False):
- return {{"role": "assistant", "content": output}}
- output += body.get("message", {{}}).get("content", "")
- except Exception as e:
- return {{"role": "assistant", "content": f"Error while processing API response: {{str(e)}}"}}
-
-# Simulated typing effect for responses
-def response_generator(msg_content):
- for word in msg_content.split():
- yield word + " "
- time.sleep(0.1)
- yield "\\n"
-
-{render_messages_block}
-
-{handle_prompt_block}
-
- # Retrieve question and generate answer
- combined = "\\n".join(msg["content"] for msg in st.session_state.messages if msg["role"] == "user")
- messages = [{{"role": "user", "content": combined}}]
- with st.spinner('Generating answer...'):
- response = generate_query(messages)
- parsed_response = parse_api_response(response)
-
- # Add the assistant's response to the session state and display it
- st.session_state.messages.append(parsed_response)
- with st.chat_message("assistant"):
- st.write_stream(response_generator(parsed_response["content"]))
+ {init_messages_block}
+ # Function to send prompt to Ollama API
+ def generate_query(messages):
+ response = requests.post(
+ "{chatbot.api_call.api_url}",
+ json={{"model": "{chatbot.model}",
+ "messages": messages,
+ "stream": True}},
+ )
+ response.raise_for_status()
+ return response
+
+ # Parse streaming response from Ollama
+ def parse_api_response(response):
+ try:
+ output = ""
+ for line in response.iter_lines():
+ body = json.loads(line)
+ if "error" in body:
+ raise Exception(f"API error: {{body['error']}}")
+ if body.get("done", False):
+ return {{"role": "assistant", "content": output}}
+ output += body.get("message", {{}}).get("content", "")
+ except Exception as e:
+ return {{"role": "assistant", "content":
+ f"Error while processing API response: {{str(e)}}"}}
+
+ # Simulated typing effect for responses
+ def response_generator(msg_content):
+ for word in msg_content.split():
+ yield word + " "
+ time.sleep(0.1)
+ yield "\\n"
+ {render_messages_block}
+ {handle_prompt_block}
+ # Retrieve question and generate answer
+ combined = "\\n".join(msg["content"]
+ for msg in st.session_state.messages
+ if msg["role"] == "user")
+ messages = [{{"role": "user", "content": combined}}]
+ with st.spinner('Generating answer...'):
+ response = generate_query(messages)
+ parsed_response = parse_api_response(response)
+
+ # Add the assistant's response to the session state and display it
+ st.session_state.messages.append(parsed_response)
+ with st.chat_message("assistant"):
+ st.write_stream(response_generator(parsed_response["content"]))
"""
)
+ chatbot_content.append(code_block)
+
else:
# --- Standard (non-streaming) API chatbot ---
- chatbot_content.append(
+ code_block = textwrap.dedent(
f"""
-{init_messages_block}
-
-# Function to send prompt to standard API
-def generate_query(prompt):
- try:
- response = requests.post(
- "{chatbot.api_call.api_url}",
- json={{"prompt": prompt}},
- headers={chatbot.api_call.headers}
- )
- response.raise_for_status()
- return response.json()
- except requests.exceptions.RequestException as e:
- st.error(f"API request failed: {{str(e)}}")
- if hasattr(e, 'response') and e.response:
- try:
- error_details = e.response.json()
- st.error(f"Error details: {{error_details}}")
- except ValueError:
- st.error(f"Response text: {{e.response.text}}")
- return None
-
-{render_messages_block}
-
-{handle_prompt_block}
-
- with st.spinner('Generating answer...'):
- response = generate_query(prompt)
-
- if response:
- # Append and display assistant response
- st.session_state.messages.append({{
- "role": "assistant",
- "content": response
- }})
- with st.chat_message("assistant"):
- st.markdown(response.get('text', ''), unsafe_allow_html=True)
- if 'links' in response:
- st.markdown("**Sources:**")
- for link in response['links']:
- st.markdown(f"- [{{link}}]({{link}})")
- if 'subgraph_pyvis' in response:
- st.components.v1.html(response['subgraph_pyvis'], height=600)
- else:
- st.error("Failed to get response from API")
- """
+ {init_messages_block}
+
+ # Function to send prompt to standard API
+ def generate_query(prompt):
+ try:
+ response = requests.post(
+ "{chatbot.api_call.api_url}",
+ json={{"prompt": prompt}},
+ headers={chatbot.api_call.headers}
+ )
+ response.raise_for_status()
+ return response.json()
+ except requests.exceptions.RequestException as e:
+ st.error(f"API request failed: {{str(e)}}")
+ if hasattr(e, 'response') and e.response:
+ try:
+ error_details = e.response.json()
+ st.error(f"Error details: {{error_details}}")
+ except ValueError:
+ st.error(f"Response text: {{e.response.text}}")
+ return None
+
+ {render_messages_block}
+
+ {handle_prompt_block}
+
+ with st.spinner('Generating answer...'):
+ response = generate_query(prompt)
+
+ if response:
+ # Append and display assistant response
+ st.session_state.messages.append({{
+ "role": "assistant",
+ "content": response
+ }})
+ with st.chat_message("assistant"):
+ st.markdown(response.get('text', ''),
+ unsafe_allow_html=True)
+ if 'links' in response:
+ st.markdown("**Sources:**")
+ for link in response['links']:
+ st.markdown(f"- [{{link}}]({{link}})")
+ if 'subgraph_pyvis' in response:
+ st.components.v1.html(
+ response['subgraph_pyvis'],
+ height=600
+ )
+ else:
+ st.error("Failed to get response from API")
+ """
)
+ chatbot_content.append(code_block)
if chatbot.caption:
chatbot_content.append(
@@ -1159,7 +1345,8 @@ def _generate_component_imports(self, component: r.Component) -> List[str]:
Parameters
----------
component : r.Component
- The component for which to generate the required imports. The component can be of type:
+ The component for which to generate the required imports.
+ The component can be of type:
- PLOT
- DATAFRAME
diff --git a/src/vuegen/table_utils.py b/src/vuegen/table_utils.py
index b25f413..469934f 100644
--- a/src/vuegen/table_utils.py
+++ b/src/vuegen/table_utils.py
@@ -1,3 +1,5 @@
+"""Reading tabular data using pandas."""
+
import pandas as pd
from . import report as r
diff --git a/src/vuegen/utils/__init__.py b/src/vuegen/utils/__init__.py
index 12f5092..86c638b 100644
--- a/src/vuegen/utils/__init__.py
+++ b/src/vuegen/utils/__init__.py
@@ -1,3 +1,10 @@
+"""File system utilities, file conversion functions,
+graph related utilities, config file writing, and
+command line parser and logging messages (completion).
+
+streamlit report footer is also in this file.
+"""
+
from __future__ import annotations
import argparse
@@ -5,16 +12,11 @@
import logging
import os
import sys
+import textwrap
from datetime import datetime
-
-try:
- from enum import StrEnum
-except ImportError:
- from strenum import StrEnum
-
from io import StringIO
from pathlib import Path
-from typing import Iterable, Type
+from typing import Iterable, Optional, Type
from urllib.parse import urlparse
import networkx as nx
@@ -22,8 +24,15 @@
import yaml
from bs4 import BeautifulSoup
+try:
+ from enum import StrEnum
+except ImportError:
+ from strenum import StrEnum
+
+from vuegen.constants import GITHUB_ORG_URL, LOGO_URL, ORG, REPO_URL, TIMEOUT
+
-## CHECKS
+# CHECKS
def check_path(filepath: Path) -> bool:
"""
Checks if the given file or folder path exists.
@@ -70,14 +79,16 @@ def assert_enum_value(
"""
try:
return enum_class[value.upper()]
- except KeyError:
+ except KeyError as e:
expected_values = ", ".join([str(e.value) for e in enum_class])
logger.error(
- f"Invalid value for {enum_class.__name__}: '{value}'. Expected values are: {expected_values}"
+ f"Invalid value for {enum_class.__name__}: '{value}'."
+ f"Expected values are: {expected_values}"
)
raise ValueError(
- f"Invalid {enum_class.__name__}: {value}. Expected values are: {expected_values}"
- )
+ f"Invalid {enum_class.__name__}: {value}. "
+ f"Expected values are: {expected_values}"
+ ) from e
def is_url(filepath: Path) -> bool:
@@ -93,8 +104,8 @@ def is_url(filepath: Path) -> bool:
-------
bool
True if the input path is a valid URL, meaning it contains both a scheme
- (e.g., http, https, ftp) and a network location (e.g., example.com).
- Returns False if either the scheme or the network location is missing or invalid.
+ (e.g., http, https, ftp) and a network location (e.g., example.com). Returns
+ False if either the scheme or the network location is missing or invalid.
"""
# Parse the url and return validation
parsed_url = urlparse(str(filepath))
@@ -136,17 +147,19 @@ def is_pyvis_html(filepath: str) -> bool:
return pyvis_identifier_valid and body_structure_valid
-## FILE_SYSTEM
+# FILE_SYSTEM
def create_folder(directory_path: str, is_nested: bool = False) -> bool:
"""
- Create a folder. Optionally create nested directories if the specified path includes subdirectories.
+ Create a folder. Optionally create nested directories if the specified path includes
+ subdirectories.
Parameters
----------
directory_path : str
The path of the directory to create.
is_nested : bool
- A flag indicating whether to create nested directories (True uses os.makedirs, False uses os.mkdir).
+ A flag indicating whether to create nested directories.
+ True uses os.makedirs, False uses os.mkdir.
Returns
-------
@@ -171,7 +184,7 @@ def create_folder(directory_path: str, is_nested: bool = False) -> bool:
else:
return False
except OSError as e:
- raise OSError(f"Error creating directory '{directory_path}': {e}")
+ raise OSError(f"Error creating directory '{directory_path}'.") from e
def get_relative_file_path(
@@ -215,7 +228,7 @@ def get_relative_file_path(
return rel_path
-def get_parser(prog_name: str, others: dict = {}) -> argparse.Namespace:
+def get_parser(prog_name: str, others: Optional[dict] = None) -> argparse.Namespace:
"""
Initiates argparse.ArgumentParser() and adds common arguments.
@@ -237,6 +250,8 @@ def get_parser(prog_name: str, others: dict = {}) -> argparse.Namespace:
AssertionError
If prog_name is not a string or others is not a dictionary.
"""
+ if others is None:
+ others = {}
# Preconditions
assert isinstance(prog_name, str), f"prog_name should be a string: {prog_name}"
assert isinstance(others, dict), f"others must be a dict: {others}"
@@ -264,7 +279,10 @@ def get_parser(prog_name: str, others: dict = {}) -> argparse.Namespace:
"--report_type",
type=str,
default="streamlit",
- help="Type of the report to generate (streamlit, html, pdf, docx, odt, revealjs, pptx, or jupyter).",
+ help=(
+ "Type of the report to generate: streamlit, html, pdf, docx, odt, revealjs,"
+ " pptx, or jupyter."
+ ),
)
parser.add_argument(
"-output_dir",
@@ -301,7 +319,7 @@ def get_parser(prog_name: str, others: dict = {}) -> argparse.Namespace:
return parser
-def fetch_file_stream(file_path: str) -> StringIO:
+def fetch_file_stream(file_path: str, timeout: int = TIMEOUT) -> StringIO:
"""
Fetches a file-like stream from a given file path or URL.
@@ -330,13 +348,12 @@ def fetch_file_stream(file_path: str) -> StringIO:
if is_url(file_path):
# Handle URL input
try:
- response = requests.get(file_path)
+ response = requests.get(file_path, timeout=timeout)
response.raise_for_status() # Raise an exception for HTTP errors
return StringIO(response.text)
except requests.exceptions.RequestException as e:
- raise ValueError(
- f"Error fetching content from URL: {file_path}. Error: {str(e)}"
- )
+ raise ValueError(f"Error fetching content from URL: {file_path}.") from e
+
else:
# Handle local file input
if not os.path.exists(file_path):
@@ -347,12 +364,13 @@ def fetch_file_stream(file_path: str) -> StringIO:
return StringIO(file.read())
-## FILE_CONVERSION
+# FILE_CONVERSION
def cyjs_to_networkx(file_path: str, name: str = "name", ident: str = "id") -> nx.Graph:
"""
- Create a NetworkX graph from a `.cyjs` file in Cytoscape format, including all attributes present in the JSON data.
- This function is modified from the `cytoscape_graph` networkx function to handle the 'value' key explicitly and to include
- all additional attributes found in the JSON data for both nodes and edges.
+ Create a NetworkX graph from a `.cyjs` file in Cytoscape format, including all
+ attributes present in the JSON data. This function is modified from the
+ `cytoscape_graph` networkx function to handle the 'value' key explicitly and to
+ include all additional attributes found in the JSON data for both nodes and edges.
Parameters
----------
@@ -367,14 +385,16 @@ def cyjs_to_networkx(file_path: str, name: str = "name", ident: str = "id") -> n
Returns
-------
graph : networkx.Graph
- The graph created from the Cytoscape JSON data, including all node and edge attributes.
+ The graph created from the Cytoscape JSON data, including all node and edge
+ attributes.
Raises
------
NetworkXError
If the `name` and `ident` attributes are identical.
ValueError
- If the data format is invalid or missing required elements, such as 'id' or 'name' for nodes.
+ If the data format is invalid or missing required elements, such as 'id'
+ or 'name' for nodes.
"""
try:
# If file_path is a file-like object (e.g., StringIO), read from it
@@ -434,7 +454,7 @@ def cyjs_to_networkx(file_path: str, name: str = "name", ident: str = "id") -> n
return graph
except KeyError as e:
- raise ValueError(f"Missing required key in data: {e}")
+ raise ValueError("Missing required key in data.") from e
def pyvishtml_to_networkx(html_file: str) -> nx.Graph:
@@ -454,7 +474,8 @@ def pyvishtml_to_networkx(html_file: str) -> nx.Graph:
Raises
------
ValueError
- If the HTML file does not contain the expected network data, or if nodes lack 'id' attribute.
+ If the HTML file does not contain the expected network data,
+ or if nodes lack 'id' attribute.
"""
# Load the HTML file
if isinstance(html_file, StringIO):
@@ -515,7 +536,7 @@ def pyvishtml_to_networkx(html_file: str) -> nx.Graph:
return graph
-## CONFIG
+# CONFIG
def load_yaml_config(file_path: str) -> dict:
"""
Load a YAML configuration file and return its contents as a dictionary.
@@ -546,7 +567,7 @@ def load_yaml_config(file_path: str) -> dict:
try:
config = yaml.safe_load(file)
except yaml.YAMLError as exc:
- raise ValueError(f"Error parsing YAML file: {exc}")
+ raise ValueError("Error parsing YAML file.") from exc
return config
@@ -586,7 +607,7 @@ def write_yaml_config(yaml_data: dict, directory_path: Path) -> Path:
return output_yaml
-## LOGGING
+# LOGGING
def get_basename(fname: None | str = None) -> str:
"""
- For a given filename, returns basename WITHOUT file extension
@@ -696,7 +717,7 @@ def generate_log_filename(folder: str = "logs", suffix: str = "") -> str:
# PRECONDITIONS
create_folder(folder) # ? Path(folder).mkdir(parents=True, exist_ok=True)
except OSError as e:
- raise OSError(f"Error creating directory '{folder}': {e}")
+ raise OSError(f"Error creating directory '{folder}'") from e
# MAIN FUNCTION
log_filename = get_time(incl_timezone=False) + "_" + suffix + ".log"
log_filepath = os.path.join(folder, log_filename)
@@ -790,7 +811,7 @@ def get_logger(
logger = init_log(log_file, display=display, logger_id=logger_id)
# Log the path to the log file
- logger.info(f"Path to log file: {log_file}")
+ logger.info("Path to log file: %s", log_file)
return logger, log_file
@@ -814,41 +835,50 @@ def get_completion_message(report_type: str, config_path: str) -> str:
border = "─" * 65 # Creates a separator line
if report_type == "streamlit":
- message = f"""🚀 Streamlit Report Generated!
+ message = textwrap.dedent(
+ f"""
+ 🚀 Streamlit Report Generated!
-📂 All scripts to build the Streamlit app are available at:
- streamlit_report/sections
+ 📂 All scripts to build the Streamlit app are available at:
+ streamlit_report/sections
-▶️ To run the Streamlit app, use the following command:
- streamlit run streamlit_report/sections/report_manager.py
+ ▶️ To run the Streamlit app, use the following command:
+ streamlit run streamlit_report/sections/report_manager.py
-✨ You can extend the report by adding new files to the input directory or updating the config file.
+ ✨ You can extend the report by adding new files to the input directory or
+ updating the config file.
-🛠️ Advanced users can modify the Python scripts directly in:
- streamlit_report/sections
+ 🛠️ Advanced users can modify the Python scripts directly in:
+ streamlit_report/sections
-⚙️ Configuration file used:
- {config_path}
-"""
+ ⚙️ Configuration file used:
+ {config_path}
+ """
+ )
else:
- message = f"""🚀 {report_type.capitalize()} Report Generated!
+ message = textwrap.dedent(
+ f"""
+ 🚀 {report_type.capitalize()} Report Generated!
-📂 Your {report_type} report is available at:
- quarto_report
+ 📂 Your {report_type} report is available at:
+ quarto_report
-✨ You can extend the report by adding new files to the input directory or updating the config file.
+ ✨ You can extend the report by adding new files to the input directory or
+ updating the config file.
-🛠️ Advanced users can modify the report template directly in:
- quarto_report/quarto_report.qmd
+ 🛠️ Advanced users can modify the report template directly in:
+ quarto_report/quarto_report.qmd
-⚙️ Configuration file used:
- {config_path}
-"""
+ ⚙️ Configuration file used:
+ {config_path}
+ """
+ )
return f"{message}\n{border}"
-## REPORT FORMATTING
+# REPORT FORMATTING
+# ? move as only used in streamlit_report
def generate_footer() -> str:
"""
Generate an HTML footer for a report.
@@ -861,23 +891,27 @@ def generate_footer() -> str:
str
A formatted HTML string representing the footer.
"""
- footer = """
-"""
+ footer = textwrap.dedent(
+ f"""
+
+
+ """
+ )
return footer
diff --git a/src/vuegen/utils/variables.py b/src/vuegen/utils/variables.py
index 7a532b6..ab74520 100644
--- a/src/vuegen/utils/variables.py
+++ b/src/vuegen/utils/variables.py
@@ -1,3 +1,5 @@
+"""Create valid variable names for Python identifiers."""
+
import re
diff --git a/tests/report_examples/Basic_example_vuegen_demo_notebook/docx/quarto_report/quarto_report.qmd b/tests/report_examples/Basic_example_vuegen_demo_notebook/docx/quarto_report/quarto_report.qmd
index 8136c42..2201667 100644
--- a/tests/report_examples/Basic_example_vuegen_demo_notebook/docx/quarto_report/quarto_report.qmd
+++ b/tests/report_examples/Basic_example_vuegen_demo_notebook/docx/quarto_report/quarto_report.qmd
@@ -33,6 +33,7 @@ Optional description for section.
### Top Species Plot By Biome Plotly
+
```{python}
#| label: 'Top Species Plot By Biome Plotly 1'
#| fig-cap: ""
@@ -41,17 +42,20 @@ with open(report_dir /'../../../../../docs/example_data/Basic_example_vuegen_dem
plot_json = json.load(plot_file)
# Keep only 'data' and 'layout' sections
-plot_json = {key: plot_json[key] for key in plot_json if key in ['data', 'layout']}
-
+plot_json = {key: plot_json[key] for key in plot_json
+ if key in ['data', 'layout']
+ }
# Remove 'frame' section in 'data'
-plot_json['data'] = [{k: v for k, v in entry.items() if k != 'frame'} for entry in plot_json.get('data', [])]
-
+plot_json['data'] = [{k: v for k, v in entry.items() if k != 'frame'}
+ for entry in plot_json.get('data', [])
+ ]
# Convert JSON to string
plot_json_str = json.dumps(plot_json)
-
# Create the plotly plot
fig_plotly = pio.from_json(plot_json_str)
-fig_plotly.update_layout(autosize=False, width=950, height=400, margin=dict(b=50, t=50, l=50, r=50))
+fig_plotly.update_layout(autosize=False, width=950, height=400,
+ margin=dict(b=50, t=50, l=50, r=50)
+ )
fig_plotly.write_image("static/Top_Species_Plot_By_Biome_Plotly.png")
```
@@ -59,6 +63,7 @@ fig_plotly.write_image("static/Top_Species_Plot_By_Biome_Plotly.png")
{fig-alt= width=90%}
### Multiline Plot Altair
+
```{python}
#| label: 'Multiline Plot Altair 2'
#| fig-cap: ""
@@ -69,8 +74,9 @@ with open(report_dir /'../../../../../docs/example_data/Basic_example_vuegen_dem
# Convert JSON to string
plot_json_str = json.dumps(plot_json)
-# Create the plotly plot
-fig_altair = alt.Chart.from_json(plot_json_str).properties(width=900, height=370)
+# Create the altair plot
+fig_altair = alt.Chart.from_json(plot_json_str
+ ).properties(width=900, height=370)
fig_altair.save("static/Multiline_Plot_Altair.png")
```
@@ -78,6 +84,7 @@ fig_altair.save("static/Multiline_Plot_Altair.png")
{fig-alt= width=90%}
### Pie Plot Countries Plotly
+
```{python}
#| label: 'Pie Plot Countries Plotly 3'
#| fig-cap: ""
@@ -86,17 +93,20 @@ with open(report_dir /'../../../../../docs/example_data/Basic_example_vuegen_dem
plot_json = json.load(plot_file)
# Keep only 'data' and 'layout' sections
-plot_json = {key: plot_json[key] for key in plot_json if key in ['data', 'layout']}
-
+plot_json = {key: plot_json[key] for key in plot_json
+ if key in ['data', 'layout']
+ }
# Remove 'frame' section in 'data'
-plot_json['data'] = [{k: v for k, v in entry.items() if k != 'frame'} for entry in plot_json.get('data', [])]
-
+plot_json['data'] = [{k: v for k, v in entry.items() if k != 'frame'}
+ for entry in plot_json.get('data', [])
+ ]
# Convert JSON to string
plot_json_str = json.dumps(plot_json)
-
# Create the plotly plot
fig_plotly = pio.from_json(plot_json_str)
-fig_plotly.update_layout(autosize=False, width=950, height=400, margin=dict(b=50, t=50, l=50, r=50))
+fig_plotly.update_layout(autosize=False, width=950, height=400,
+ margin=dict(b=50, t=50, l=50, r=50)
+ )
fig_plotly.write_image("static/Pie_Plot_Countries_Plotly.png")
```
@@ -104,6 +114,7 @@ fig_plotly.write_image("static/Pie_Plot_Countries_Plotly.png")
{fig-alt= width=90%}
### Pie Plots Biomes Plotly
+
```{python}
#| label: 'Pie Plots Biomes Plotly 4'
#| fig-cap: ""
@@ -112,17 +123,20 @@ with open(report_dir /'../../../../../docs/example_data/Basic_example_vuegen_dem
plot_json = json.load(plot_file)
# Keep only 'data' and 'layout' sections
-plot_json = {key: plot_json[key] for key in plot_json if key in ['data', 'layout']}
-
+plot_json = {key: plot_json[key] for key in plot_json
+ if key in ['data', 'layout']
+ }
# Remove 'frame' section in 'data'
-plot_json['data'] = [{k: v for k, v in entry.items() if k != 'frame'} for entry in plot_json.get('data', [])]
-
+plot_json['data'] = [{k: v for k, v in entry.items() if k != 'frame'}
+ for entry in plot_json.get('data', [])
+ ]
# Convert JSON to string
plot_json_str = json.dumps(plot_json)
-
# Create the plotly plot
fig_plotly = pio.from_json(plot_json_str)
-fig_plotly.update_layout(autosize=False, width=950, height=400, margin=dict(b=50, t=50, l=50, r=50))
+fig_plotly.update_layout(autosize=False, width=950, height=400,
+ margin=dict(b=50, t=50, l=50, r=50)
+ )
fig_plotly.write_image("static/Pie_Plots_Biomes_Plotly.png")
```
@@ -130,6 +144,7 @@ fig_plotly.write_image("static/Pie_Plots_Biomes_Plotly.png")
{fig-alt= width=90%}
### Saline Metagenomics Samples Map Altair
+
```{python}
#| label: 'Saline Metagenomics Samples Map Altair 5'
#| fig-cap: ""
@@ -140,8 +155,9 @@ with open(report_dir /'../../../../../docs/example_data/Basic_example_vuegen_dem
# Convert JSON to string
plot_json_str = json.dumps(plot_json)
-# Create the plotly plot
-fig_altair = alt.Chart.from_json(plot_json_str).properties(width=900, height=370)
+# Create the altair plot
+fig_altair = alt.Chart.from_json(plot_json_str
+ ).properties(width=900, height=370)
fig_altair.save("static/Saline_Metagenomics_Samples_Map_Altair.png")
```
@@ -149,6 +165,7 @@ fig_altair.save("static/Saline_Metagenomics_Samples_Map_Altair.png")
{fig-alt= width=90%}
### Plotly Plot R
+
```{python}
#| label: 'Plotly Plot R 6'
#| fig-cap: ""
@@ -157,17 +174,20 @@ with open(report_dir /'../../../../../docs/example_data/Basic_example_vuegen_dem
plot_json = json.load(plot_file)
# Keep only 'data' and 'layout' sections
-plot_json = {key: plot_json[key] for key in plot_json if key in ['data', 'layout']}
-
+plot_json = {key: plot_json[key] for key in plot_json
+ if key in ['data', 'layout']
+ }
# Remove 'frame' section in 'data'
-plot_json['data'] = [{k: v for k, v in entry.items() if k != 'frame'} for entry in plot_json.get('data', [])]
-
+plot_json['data'] = [{k: v for k, v in entry.items() if k != 'frame'}
+ for entry in plot_json.get('data', [])
+ ]
# Convert JSON to string
plot_json_str = json.dumps(plot_json)
-
# Create the plotly plot
fig_plotly = pio.from_json(plot_json_str)
-fig_plotly.update_layout(autosize=False, width=950, height=400, margin=dict(b=50, t=50, l=50, r=50))
+fig_plotly.update_layout(autosize=False, width=950, height=400,
+ margin=dict(b=50, t=50, l=50, r=50)
+ )
fig_plotly.write_image("static/Plotly_Plot_R.png")
```
diff --git a/tests/report_examples/Basic_example_vuegen_demo_notebook/html/quarto_report/quarto_report.qmd b/tests/report_examples/Basic_example_vuegen_demo_notebook/html/quarto_report/quarto_report.qmd
index 8cb3b3b..0a3a5e3 100644
--- a/tests/report_examples/Basic_example_vuegen_demo_notebook/html/quarto_report/quarto_report.qmd
+++ b/tests/report_examples/Basic_example_vuegen_demo_notebook/html/quarto_report/quarto_report.qmd
@@ -26,11 +26,12 @@ include-in-header:
include-after-body:
text: |
---
@@ -58,6 +59,7 @@ Optional description for section.
### Top Species Plot By Biome Plotly
+
```{python}
#| label: 'Top Species Plot By Biome Plotly 1'
#| fig-cap: ""
@@ -66,22 +68,26 @@ with open(report_dir /'../../../../../docs/example_data/Basic_example_vuegen_dem
plot_json = json.load(plot_file)
# Keep only 'data' and 'layout' sections
-plot_json = {key: plot_json[key] for key in plot_json if key in ['data', 'layout']}
-
+plot_json = {key: plot_json[key] for key in plot_json
+ if key in ['data', 'layout']
+ }
# Remove 'frame' section in 'data'
-plot_json['data'] = [{k: v for k, v in entry.items() if k != 'frame'} for entry in plot_json.get('data', [])]
-
+plot_json['data'] = [{k: v for k, v in entry.items() if k != 'frame'}
+ for entry in plot_json.get('data', [])
+ ]
# Convert JSON to string
plot_json_str = json.dumps(plot_json)
-
# Create the plotly plot
fig_plotly = pio.from_json(plot_json_str)
-fig_plotly.update_layout(autosize=False, width=950, height=400, margin=dict(b=50, t=50, l=50, r=50))
+fig_plotly.update_layout(autosize=False, width=950, height=400,
+ margin=dict(b=50, t=50, l=50, r=50)
+ )
fig_plotly.show()
```
### Multiline Plot Altair
+
```{python}
#| label: 'Multiline Plot Altair 2'
#| fig-cap: ""
@@ -92,13 +98,15 @@ with open(report_dir /'../../../../../docs/example_data/Basic_example_vuegen_dem
# Convert JSON to string
plot_json_str = json.dumps(plot_json)
-# Create the plotly plot
-fig_altair = alt.Chart.from_json(plot_json_str).properties(width=900, height=370)
+# Create the altair plot
+fig_altair = alt.Chart.from_json(plot_json_str
+ ).properties(width=900, height=370)
fig_altair
```
### Pie Plot Countries Plotly
+
```{python}
#| label: 'Pie Plot Countries Plotly 3'
#| fig-cap: ""
@@ -107,22 +115,26 @@ with open(report_dir /'../../../../../docs/example_data/Basic_example_vuegen_dem
plot_json = json.load(plot_file)
# Keep only 'data' and 'layout' sections
-plot_json = {key: plot_json[key] for key in plot_json if key in ['data', 'layout']}
-
+plot_json = {key: plot_json[key] for key in plot_json
+ if key in ['data', 'layout']
+ }
# Remove 'frame' section in 'data'
-plot_json['data'] = [{k: v for k, v in entry.items() if k != 'frame'} for entry in plot_json.get('data', [])]
-
+plot_json['data'] = [{k: v for k, v in entry.items() if k != 'frame'}
+ for entry in plot_json.get('data', [])
+ ]
# Convert JSON to string
plot_json_str = json.dumps(plot_json)
-
# Create the plotly plot
fig_plotly = pio.from_json(plot_json_str)
-fig_plotly.update_layout(autosize=False, width=950, height=400, margin=dict(b=50, t=50, l=50, r=50))
+fig_plotly.update_layout(autosize=False, width=950, height=400,
+ margin=dict(b=50, t=50, l=50, r=50)
+ )
fig_plotly.show()
```
### Pie Plots Biomes Plotly
+
```{python}
#| label: 'Pie Plots Biomes Plotly 4'
#| fig-cap: ""
@@ -131,22 +143,26 @@ with open(report_dir /'../../../../../docs/example_data/Basic_example_vuegen_dem
plot_json = json.load(plot_file)
# Keep only 'data' and 'layout' sections
-plot_json = {key: plot_json[key] for key in plot_json if key in ['data', 'layout']}
-
+plot_json = {key: plot_json[key] for key in plot_json
+ if key in ['data', 'layout']
+ }
# Remove 'frame' section in 'data'
-plot_json['data'] = [{k: v for k, v in entry.items() if k != 'frame'} for entry in plot_json.get('data', [])]
-
+plot_json['data'] = [{k: v for k, v in entry.items() if k != 'frame'}
+ for entry in plot_json.get('data', [])
+ ]
# Convert JSON to string
plot_json_str = json.dumps(plot_json)
-
# Create the plotly plot
fig_plotly = pio.from_json(plot_json_str)
-fig_plotly.update_layout(autosize=False, width=950, height=400, margin=dict(b=50, t=50, l=50, r=50))
+fig_plotly.update_layout(autosize=False, width=950, height=400,
+ margin=dict(b=50, t=50, l=50, r=50)
+ )
fig_plotly.show()
```
### Saline Metagenomics Samples Map Altair
+
```{python}
#| label: 'Saline Metagenomics Samples Map Altair 5'
#| fig-cap: ""
@@ -157,13 +173,15 @@ with open(report_dir /'../../../../../docs/example_data/Basic_example_vuegen_dem
# Convert JSON to string
plot_json_str = json.dumps(plot_json)
-# Create the plotly plot
-fig_altair = alt.Chart.from_json(plot_json_str).properties(width=900, height=370)
+# Create the altair plot
+fig_altair = alt.Chart.from_json(plot_json_str
+ ).properties(width=900, height=370)
fig_altair
```
### Plotly Plot R
+
```{python}
#| label: 'Plotly Plot R 6'
#| fig-cap: ""
@@ -172,17 +190,20 @@ with open(report_dir /'../../../../../docs/example_data/Basic_example_vuegen_dem
plot_json = json.load(plot_file)
# Keep only 'data' and 'layout' sections
-plot_json = {key: plot_json[key] for key in plot_json if key in ['data', 'layout']}
-
+plot_json = {key: plot_json[key] for key in plot_json
+ if key in ['data', 'layout']
+ }
# Remove 'frame' section in 'data'
-plot_json['data'] = [{k: v for k, v in entry.items() if k != 'frame'} for entry in plot_json.get('data', [])]
-
+plot_json['data'] = [{k: v for k, v in entry.items() if k != 'frame'}
+ for entry in plot_json.get('data', [])
+ ]
# Convert JSON to string
plot_json_str = json.dumps(plot_json)
-
# Create the plotly plot
fig_plotly = pio.from_json(plot_json_str)
-fig_plotly.update_layout(autosize=False, width=950, height=400, margin=dict(b=50, t=50, l=50, r=50))
+fig_plotly.update_layout(autosize=False, width=950, height=400,
+ margin=dict(b=50, t=50, l=50, r=50)
+ )
fig_plotly.show()
```
@@ -271,7 +292,9 @@ Optional description for subsection.
-
+
## Static Networks
@@ -283,7 +306,8 @@ Optional description for subsection.
### Plot
-
+
### Ckg Network
@@ -293,13 +317,16 @@ Optional description for subsection.
-
+
### Multiqc Report
-
+
# Markdown
diff --git a/tests/report_examples/Basic_example_vuegen_demo_notebook/jupyter/quarto_report/quarto_report.qmd b/tests/report_examples/Basic_example_vuegen_demo_notebook/jupyter/quarto_report/quarto_report.qmd
index b51059f..c9e6638 100644
--- a/tests/report_examples/Basic_example_vuegen_demo_notebook/jupyter/quarto_report/quarto_report.qmd
+++ b/tests/report_examples/Basic_example_vuegen_demo_notebook/jupyter/quarto_report/quarto_report.qmd
@@ -21,16 +21,17 @@ include-in-header:
width: 100%;
text-align: center;
margin-top: 20px;
- }
+ }
include-after-body:
text: |
---
@@ -58,6 +59,7 @@ Optional description for section.
### Top Species Plot By Biome Plotly
+
```{python}
#| label: 'Top Species Plot By Biome Plotly 1'
#| fig-cap: ""
@@ -66,22 +68,26 @@ with open(report_dir /'../../../../../docs/example_data/Basic_example_vuegen_dem
plot_json = json.load(plot_file)
# Keep only 'data' and 'layout' sections
-plot_json = {key: plot_json[key] for key in plot_json if key in ['data', 'layout']}
-
+plot_json = {key: plot_json[key] for key in plot_json
+ if key in ['data', 'layout']
+ }
# Remove 'frame' section in 'data'
-plot_json['data'] = [{k: v for k, v in entry.items() if k != 'frame'} for entry in plot_json.get('data', [])]
-
+plot_json['data'] = [{k: v for k, v in entry.items() if k != 'frame'}
+ for entry in plot_json.get('data', [])
+ ]
# Convert JSON to string
plot_json_str = json.dumps(plot_json)
-
# Create the plotly plot
fig_plotly = pio.from_json(plot_json_str)
-fig_plotly.update_layout(autosize=False, width=950, height=400, margin=dict(b=50, t=50, l=50, r=50))
+fig_plotly.update_layout(autosize=False, width=950, height=400,
+ margin=dict(b=50, t=50, l=50, r=50)
+ )
fig_plotly.show()
```
### Multiline Plot Altair
+
```{python}
#| label: 'Multiline Plot Altair 2'
#| fig-cap: ""
@@ -92,13 +98,15 @@ with open(report_dir /'../../../../../docs/example_data/Basic_example_vuegen_dem
# Convert JSON to string
plot_json_str = json.dumps(plot_json)
-# Create the plotly plot
-fig_altair = alt.Chart.from_json(plot_json_str).properties(width=900, height=370)
+# Create the altair plot
+fig_altair = alt.Chart.from_json(plot_json_str
+ ).properties(width=900, height=370)
fig_altair
```
### Pie Plot Countries Plotly
+
```{python}
#| label: 'Pie Plot Countries Plotly 3'
#| fig-cap: ""
@@ -107,22 +115,26 @@ with open(report_dir /'../../../../../docs/example_data/Basic_example_vuegen_dem
plot_json = json.load(plot_file)
# Keep only 'data' and 'layout' sections
-plot_json = {key: plot_json[key] for key in plot_json if key in ['data', 'layout']}
-
+plot_json = {key: plot_json[key] for key in plot_json
+ if key in ['data', 'layout']
+ }
# Remove 'frame' section in 'data'
-plot_json['data'] = [{k: v for k, v in entry.items() if k != 'frame'} for entry in plot_json.get('data', [])]
-
+plot_json['data'] = [{k: v for k, v in entry.items() if k != 'frame'}
+ for entry in plot_json.get('data', [])
+ ]
# Convert JSON to string
plot_json_str = json.dumps(plot_json)
-
# Create the plotly plot
fig_plotly = pio.from_json(plot_json_str)
-fig_plotly.update_layout(autosize=False, width=950, height=400, margin=dict(b=50, t=50, l=50, r=50))
+fig_plotly.update_layout(autosize=False, width=950, height=400,
+ margin=dict(b=50, t=50, l=50, r=50)
+ )
fig_plotly.show()
```
### Pie Plots Biomes Plotly
+
```{python}
#| label: 'Pie Plots Biomes Plotly 4'
#| fig-cap: ""
@@ -131,22 +143,26 @@ with open(report_dir /'../../../../../docs/example_data/Basic_example_vuegen_dem
plot_json = json.load(plot_file)
# Keep only 'data' and 'layout' sections
-plot_json = {key: plot_json[key] for key in plot_json if key in ['data', 'layout']}
-
+plot_json = {key: plot_json[key] for key in plot_json
+ if key in ['data', 'layout']
+ }
# Remove 'frame' section in 'data'
-plot_json['data'] = [{k: v for k, v in entry.items() if k != 'frame'} for entry in plot_json.get('data', [])]
-
+plot_json['data'] = [{k: v for k, v in entry.items() if k != 'frame'}
+ for entry in plot_json.get('data', [])
+ ]
# Convert JSON to string
plot_json_str = json.dumps(plot_json)
-
# Create the plotly plot
fig_plotly = pio.from_json(plot_json_str)
-fig_plotly.update_layout(autosize=False, width=950, height=400, margin=dict(b=50, t=50, l=50, r=50))
+fig_plotly.update_layout(autosize=False, width=950, height=400,
+ margin=dict(b=50, t=50, l=50, r=50)
+ )
fig_plotly.show()
```
### Saline Metagenomics Samples Map Altair
+
```{python}
#| label: 'Saline Metagenomics Samples Map Altair 5'
#| fig-cap: ""
@@ -157,13 +173,15 @@ with open(report_dir /'../../../../../docs/example_data/Basic_example_vuegen_dem
# Convert JSON to string
plot_json_str = json.dumps(plot_json)
-# Create the plotly plot
-fig_altair = alt.Chart.from_json(plot_json_str).properties(width=900, height=370)
+# Create the altair plot
+fig_altair = alt.Chart.from_json(plot_json_str
+ ).properties(width=900, height=370)
fig_altair
```
### Plotly Plot R
+
```{python}
#| label: 'Plotly Plot R 6'
#| fig-cap: ""
@@ -172,17 +190,20 @@ with open(report_dir /'../../../../../docs/example_data/Basic_example_vuegen_dem
plot_json = json.load(plot_file)
# Keep only 'data' and 'layout' sections
-plot_json = {key: plot_json[key] for key in plot_json if key in ['data', 'layout']}
-
+plot_json = {key: plot_json[key] for key in plot_json
+ if key in ['data', 'layout']
+ }
# Remove 'frame' section in 'data'
-plot_json['data'] = [{k: v for k, v in entry.items() if k != 'frame'} for entry in plot_json.get('data', [])]
-
+plot_json['data'] = [{k: v for k, v in entry.items() if k != 'frame'}
+ for entry in plot_json.get('data', [])
+ ]
# Convert JSON to string
plot_json_str = json.dumps(plot_json)
-
# Create the plotly plot
fig_plotly = pio.from_json(plot_json_str)
-fig_plotly.update_layout(autosize=False, width=950, height=400, margin=dict(b=50, t=50, l=50, r=50))
+fig_plotly.update_layout(autosize=False, width=950, height=400,
+ margin=dict(b=50, t=50, l=50, r=50)
+ )
fig_plotly.show()
```
@@ -271,7 +292,9 @@ Optional description for subsection.
-
+
## Static Networks
@@ -283,7 +306,8 @@ Optional description for subsection.
### Plot
-
+
### Ckg Network
@@ -293,13 +317,16 @@ Optional description for subsection.
-
+
### Multiqc Report
-
+
# Markdown
diff --git a/tests/report_examples/Basic_example_vuegen_demo_notebook/odt/quarto_report/quarto_report.qmd b/tests/report_examples/Basic_example_vuegen_demo_notebook/odt/quarto_report/quarto_report.qmd
index 8bcea9d..bc4151c 100644
--- a/tests/report_examples/Basic_example_vuegen_demo_notebook/odt/quarto_report/quarto_report.qmd
+++ b/tests/report_examples/Basic_example_vuegen_demo_notebook/odt/quarto_report/quarto_report.qmd
@@ -33,6 +33,7 @@ Optional description for section.
### Top Species Plot By Biome Plotly
+
```{python}
#| label: 'Top Species Plot By Biome Plotly 1'
#| fig-cap: ""
@@ -41,17 +42,20 @@ with open(report_dir /'../../../../../docs/example_data/Basic_example_vuegen_dem
plot_json = json.load(plot_file)
# Keep only 'data' and 'layout' sections
-plot_json = {key: plot_json[key] for key in plot_json if key in ['data', 'layout']}
-
+plot_json = {key: plot_json[key] for key in plot_json
+ if key in ['data', 'layout']
+ }
# Remove 'frame' section in 'data'
-plot_json['data'] = [{k: v for k, v in entry.items() if k != 'frame'} for entry in plot_json.get('data', [])]
-
+plot_json['data'] = [{k: v for k, v in entry.items() if k != 'frame'}
+ for entry in plot_json.get('data', [])
+ ]
# Convert JSON to string
plot_json_str = json.dumps(plot_json)
-
# Create the plotly plot
fig_plotly = pio.from_json(plot_json_str)
-fig_plotly.update_layout(autosize=False, width=950, height=400, margin=dict(b=50, t=50, l=50, r=50))
+fig_plotly.update_layout(autosize=False, width=950, height=400,
+ margin=dict(b=50, t=50, l=50, r=50)
+ )
fig_plotly.write_image("static/Top_Species_Plot_By_Biome_Plotly.png")
```
@@ -59,6 +63,7 @@ fig_plotly.write_image("static/Top_Species_Plot_By_Biome_Plotly.png")
{fig-alt= width=90%}
### Multiline Plot Altair
+
```{python}
#| label: 'Multiline Plot Altair 2'
#| fig-cap: ""
@@ -69,8 +74,9 @@ with open(report_dir /'../../../../../docs/example_data/Basic_example_vuegen_dem
# Convert JSON to string
plot_json_str = json.dumps(plot_json)
-# Create the plotly plot
-fig_altair = alt.Chart.from_json(plot_json_str).properties(width=900, height=370)
+# Create the altair plot
+fig_altair = alt.Chart.from_json(plot_json_str
+ ).properties(width=900, height=370)
fig_altair.save("static/Multiline_Plot_Altair.png")
```
@@ -78,6 +84,7 @@ fig_altair.save("static/Multiline_Plot_Altair.png")
{fig-alt= width=90%}
### Pie Plot Countries Plotly
+
```{python}
#| label: 'Pie Plot Countries Plotly 3'
#| fig-cap: ""
@@ -86,17 +93,20 @@ with open(report_dir /'../../../../../docs/example_data/Basic_example_vuegen_dem
plot_json = json.load(plot_file)
# Keep only 'data' and 'layout' sections
-plot_json = {key: plot_json[key] for key in plot_json if key in ['data', 'layout']}
-
+plot_json = {key: plot_json[key] for key in plot_json
+ if key in ['data', 'layout']
+ }
# Remove 'frame' section in 'data'
-plot_json['data'] = [{k: v for k, v in entry.items() if k != 'frame'} for entry in plot_json.get('data', [])]
-
+plot_json['data'] = [{k: v for k, v in entry.items() if k != 'frame'}
+ for entry in plot_json.get('data', [])
+ ]
# Convert JSON to string
plot_json_str = json.dumps(plot_json)
-
# Create the plotly plot
fig_plotly = pio.from_json(plot_json_str)
-fig_plotly.update_layout(autosize=False, width=950, height=400, margin=dict(b=50, t=50, l=50, r=50))
+fig_plotly.update_layout(autosize=False, width=950, height=400,
+ margin=dict(b=50, t=50, l=50, r=50)
+ )
fig_plotly.write_image("static/Pie_Plot_Countries_Plotly.png")
```
@@ -104,6 +114,7 @@ fig_plotly.write_image("static/Pie_Plot_Countries_Plotly.png")
{fig-alt= width=90%}
### Pie Plots Biomes Plotly
+
```{python}
#| label: 'Pie Plots Biomes Plotly 4'
#| fig-cap: ""
@@ -112,17 +123,20 @@ with open(report_dir /'../../../../../docs/example_data/Basic_example_vuegen_dem
plot_json = json.load(plot_file)
# Keep only 'data' and 'layout' sections
-plot_json = {key: plot_json[key] for key in plot_json if key in ['data', 'layout']}
-
+plot_json = {key: plot_json[key] for key in plot_json
+ if key in ['data', 'layout']
+ }
# Remove 'frame' section in 'data'
-plot_json['data'] = [{k: v for k, v in entry.items() if k != 'frame'} for entry in plot_json.get('data', [])]
-
+plot_json['data'] = [{k: v for k, v in entry.items() if k != 'frame'}
+ for entry in plot_json.get('data', [])
+ ]
# Convert JSON to string
plot_json_str = json.dumps(plot_json)
-
# Create the plotly plot
fig_plotly = pio.from_json(plot_json_str)
-fig_plotly.update_layout(autosize=False, width=950, height=400, margin=dict(b=50, t=50, l=50, r=50))
+fig_plotly.update_layout(autosize=False, width=950, height=400,
+ margin=dict(b=50, t=50, l=50, r=50)
+ )
fig_plotly.write_image("static/Pie_Plots_Biomes_Plotly.png")
```
@@ -130,6 +144,7 @@ fig_plotly.write_image("static/Pie_Plots_Biomes_Plotly.png")
{fig-alt= width=90%}
### Saline Metagenomics Samples Map Altair
+
```{python}
#| label: 'Saline Metagenomics Samples Map Altair 5'
#| fig-cap: ""
@@ -140,8 +155,9 @@ with open(report_dir /'../../../../../docs/example_data/Basic_example_vuegen_dem
# Convert JSON to string
plot_json_str = json.dumps(plot_json)
-# Create the plotly plot
-fig_altair = alt.Chart.from_json(plot_json_str).properties(width=900, height=370)
+# Create the altair plot
+fig_altair = alt.Chart.from_json(plot_json_str
+ ).properties(width=900, height=370)
fig_altair.save("static/Saline_Metagenomics_Samples_Map_Altair.png")
```
@@ -149,6 +165,7 @@ fig_altair.save("static/Saline_Metagenomics_Samples_Map_Altair.png")
{fig-alt= width=90%}
### Plotly Plot R
+
```{python}
#| label: 'Plotly Plot R 6'
#| fig-cap: ""
@@ -157,17 +174,20 @@ with open(report_dir /'../../../../../docs/example_data/Basic_example_vuegen_dem
plot_json = json.load(plot_file)
# Keep only 'data' and 'layout' sections
-plot_json = {key: plot_json[key] for key in plot_json if key in ['data', 'layout']}
-
+plot_json = {key: plot_json[key] for key in plot_json
+ if key in ['data', 'layout']
+ }
# Remove 'frame' section in 'data'
-plot_json['data'] = [{k: v for k, v in entry.items() if k != 'frame'} for entry in plot_json.get('data', [])]
-
+plot_json['data'] = [{k: v for k, v in entry.items() if k != 'frame'}
+ for entry in plot_json.get('data', [])
+ ]
# Convert JSON to string
plot_json_str = json.dumps(plot_json)
-
# Create the plotly plot
fig_plotly = pio.from_json(plot_json_str)
-fig_plotly.update_layout(autosize=False, width=950, height=400, margin=dict(b=50, t=50, l=50, r=50))
+fig_plotly.update_layout(autosize=False, width=950, height=400,
+ margin=dict(b=50, t=50, l=50, r=50)
+ )
fig_plotly.write_image("static/Plotly_Plot_R.png")
```
diff --git a/tests/report_examples/Basic_example_vuegen_demo_notebook/pdf/quarto_report/quarto_report.qmd b/tests/report_examples/Basic_example_vuegen_demo_notebook/pdf/quarto_report/quarto_report.qmd
index 57b02d3..edf136b 100644
--- a/tests/report_examples/Basic_example_vuegen_demo_notebook/pdf/quarto_report/quarto_report.qmd
+++ b/tests/report_examples/Basic_example_vuegen_demo_notebook/pdf/quarto_report/quarto_report.qmd
@@ -11,12 +11,14 @@ format:
fig-align: center
margin:
- bottom=40mm
- include-in-header:
+ include-in-header:
text: |
\usepackage{scrlayer-scrpage}
\usepackage{hyperref}
\clearpairofpagestyles
- \lofoot{This report was generated with \href{https://github.com/Multiomics-Analytics-Group/vuegen}{VueGen} | \copyright{} 2025 \href{https://github.com/Multiomics-Analytics-Group}{Multiomics Network Analytics Group}}
+ \lofoot{This report was generated with
+ \href{https://github.com/Multiomics-Analytics-Group/vuegen}{VueGen} | \copyright{} 2025
+ \href{https://github.com/Multiomics-Analytics-Group}{Multiomics Network Analytics Group (MoNA)}}
\rofoot{\pagemark}
---
@@ -43,6 +45,7 @@ Optional description for section.
### Top Species Plot By Biome Plotly
+
```{python}
#| label: 'Top Species Plot By Biome Plotly 1'
#| fig-cap: ""
@@ -51,17 +54,20 @@ with open(report_dir /'../../../../../docs/example_data/Basic_example_vuegen_dem
plot_json = json.load(plot_file)
# Keep only 'data' and 'layout' sections
-plot_json = {key: plot_json[key] for key in plot_json if key in ['data', 'layout']}
-
+plot_json = {key: plot_json[key] for key in plot_json
+ if key in ['data', 'layout']
+ }
# Remove 'frame' section in 'data'
-plot_json['data'] = [{k: v for k, v in entry.items() if k != 'frame'} for entry in plot_json.get('data', [])]
-
+plot_json['data'] = [{k: v for k, v in entry.items() if k != 'frame'}
+ for entry in plot_json.get('data', [])
+ ]
# Convert JSON to string
plot_json_str = json.dumps(plot_json)
-
# Create the plotly plot
fig_plotly = pio.from_json(plot_json_str)
-fig_plotly.update_layout(autosize=False, width=950, height=400, margin=dict(b=50, t=50, l=50, r=50))
+fig_plotly.update_layout(autosize=False, width=950, height=400,
+ margin=dict(b=50, t=50, l=50, r=50)
+ )
fig_plotly.write_image("static/Top_Species_Plot_By_Biome_Plotly.png")
```
@@ -69,6 +75,7 @@ fig_plotly.write_image("static/Top_Species_Plot_By_Biome_Plotly.png")
{fig-alt= width=90%}
### Multiline Plot Altair
+
```{python}
#| label: 'Multiline Plot Altair 2'
#| fig-cap: ""
@@ -79,8 +86,9 @@ with open(report_dir /'../../../../../docs/example_data/Basic_example_vuegen_dem
# Convert JSON to string
plot_json_str = json.dumps(plot_json)
-# Create the plotly plot
-fig_altair = alt.Chart.from_json(plot_json_str).properties(width=900, height=370)
+# Create the altair plot
+fig_altair = alt.Chart.from_json(plot_json_str
+ ).properties(width=900, height=370)
fig_altair.save("static/Multiline_Plot_Altair.png")
```
@@ -88,6 +96,7 @@ fig_altair.save("static/Multiline_Plot_Altair.png")
{fig-alt= width=90%}
### Pie Plot Countries Plotly
+
```{python}
#| label: 'Pie Plot Countries Plotly 3'
#| fig-cap: ""
@@ -96,17 +105,20 @@ with open(report_dir /'../../../../../docs/example_data/Basic_example_vuegen_dem
plot_json = json.load(plot_file)
# Keep only 'data' and 'layout' sections
-plot_json = {key: plot_json[key] for key in plot_json if key in ['data', 'layout']}
-
+plot_json = {key: plot_json[key] for key in plot_json
+ if key in ['data', 'layout']
+ }
# Remove 'frame' section in 'data'
-plot_json['data'] = [{k: v for k, v in entry.items() if k != 'frame'} for entry in plot_json.get('data', [])]
-
+plot_json['data'] = [{k: v for k, v in entry.items() if k != 'frame'}
+ for entry in plot_json.get('data', [])
+ ]
# Convert JSON to string
plot_json_str = json.dumps(plot_json)
-
# Create the plotly plot
fig_plotly = pio.from_json(plot_json_str)
-fig_plotly.update_layout(autosize=False, width=950, height=400, margin=dict(b=50, t=50, l=50, r=50))
+fig_plotly.update_layout(autosize=False, width=950, height=400,
+ margin=dict(b=50, t=50, l=50, r=50)
+ )
fig_plotly.write_image("static/Pie_Plot_Countries_Plotly.png")
```
@@ -114,6 +126,7 @@ fig_plotly.write_image("static/Pie_Plot_Countries_Plotly.png")
{fig-alt= width=90%}
### Pie Plots Biomes Plotly
+
```{python}
#| label: 'Pie Plots Biomes Plotly 4'
#| fig-cap: ""
@@ -122,17 +135,20 @@ with open(report_dir /'../../../../../docs/example_data/Basic_example_vuegen_dem
plot_json = json.load(plot_file)
# Keep only 'data' and 'layout' sections
-plot_json = {key: plot_json[key] for key in plot_json if key in ['data', 'layout']}
-
+plot_json = {key: plot_json[key] for key in plot_json
+ if key in ['data', 'layout']
+ }
# Remove 'frame' section in 'data'
-plot_json['data'] = [{k: v for k, v in entry.items() if k != 'frame'} for entry in plot_json.get('data', [])]
-
+plot_json['data'] = [{k: v for k, v in entry.items() if k != 'frame'}
+ for entry in plot_json.get('data', [])
+ ]
# Convert JSON to string
plot_json_str = json.dumps(plot_json)
-
# Create the plotly plot
fig_plotly = pio.from_json(plot_json_str)
-fig_plotly.update_layout(autosize=False, width=950, height=400, margin=dict(b=50, t=50, l=50, r=50))
+fig_plotly.update_layout(autosize=False, width=950, height=400,
+ margin=dict(b=50, t=50, l=50, r=50)
+ )
fig_plotly.write_image("static/Pie_Plots_Biomes_Plotly.png")
```
@@ -140,6 +156,7 @@ fig_plotly.write_image("static/Pie_Plots_Biomes_Plotly.png")
{fig-alt= width=90%}
### Saline Metagenomics Samples Map Altair
+
```{python}
#| label: 'Saline Metagenomics Samples Map Altair 5'
#| fig-cap: ""
@@ -150,8 +167,9 @@ with open(report_dir /'../../../../../docs/example_data/Basic_example_vuegen_dem
# Convert JSON to string
plot_json_str = json.dumps(plot_json)
-# Create the plotly plot
-fig_altair = alt.Chart.from_json(plot_json_str).properties(width=900, height=370)
+# Create the altair plot
+fig_altair = alt.Chart.from_json(plot_json_str
+ ).properties(width=900, height=370)
fig_altair.save("static/Saline_Metagenomics_Samples_Map_Altair.png")
```
@@ -159,6 +177,7 @@ fig_altair.save("static/Saline_Metagenomics_Samples_Map_Altair.png")
{fig-alt= width=90%}
### Plotly Plot R
+
```{python}
#| label: 'Plotly Plot R 6'
#| fig-cap: ""
@@ -167,17 +186,20 @@ with open(report_dir /'../../../../../docs/example_data/Basic_example_vuegen_dem
plot_json = json.load(plot_file)
# Keep only 'data' and 'layout' sections
-plot_json = {key: plot_json[key] for key in plot_json if key in ['data', 'layout']}
-
+plot_json = {key: plot_json[key] for key in plot_json
+ if key in ['data', 'layout']
+ }
# Remove 'frame' section in 'data'
-plot_json['data'] = [{k: v for k, v in entry.items() if k != 'frame'} for entry in plot_json.get('data', [])]
-
+plot_json['data'] = [{k: v for k, v in entry.items() if k != 'frame'}
+ for entry in plot_json.get('data', [])
+ ]
# Convert JSON to string
plot_json_str = json.dumps(plot_json)
-
# Create the plotly plot
fig_plotly = pio.from_json(plot_json_str)
-fig_plotly.update_layout(autosize=False, width=950, height=400, margin=dict(b=50, t=50, l=50, r=50))
+fig_plotly.update_layout(autosize=False, width=950, height=400,
+ margin=dict(b=50, t=50, l=50, r=50)
+ )
fig_plotly.write_image("static/Plotly_Plot_R.png")
```
diff --git a/tests/report_examples/Basic_example_vuegen_demo_notebook/pptx/quarto_report/quarto_report.qmd b/tests/report_examples/Basic_example_vuegen_demo_notebook/pptx/quarto_report/quarto_report.qmd
index a7db1d1..29d2c4f 100644
--- a/tests/report_examples/Basic_example_vuegen_demo_notebook/pptx/quarto_report/quarto_report.qmd
+++ b/tests/report_examples/Basic_example_vuegen_demo_notebook/pptx/quarto_report/quarto_report.qmd
@@ -34,6 +34,7 @@ Optional description for section.
### Top Species Plot By Biome Plotly
+
```{python}
#| label: 'Top Species Plot By Biome Plotly 1'
#| fig-cap: ""
@@ -42,17 +43,20 @@ with open(report_dir /'../../../../../docs/example_data/Basic_example_vuegen_dem
plot_json = json.load(plot_file)
# Keep only 'data' and 'layout' sections
-plot_json = {key: plot_json[key] for key in plot_json if key in ['data', 'layout']}
-
+plot_json = {key: plot_json[key] for key in plot_json
+ if key in ['data', 'layout']
+ }
# Remove 'frame' section in 'data'
-plot_json['data'] = [{k: v for k, v in entry.items() if k != 'frame'} for entry in plot_json.get('data', [])]
-
+plot_json['data'] = [{k: v for k, v in entry.items() if k != 'frame'}
+ for entry in plot_json.get('data', [])
+ ]
# Convert JSON to string
plot_json_str = json.dumps(plot_json)
-
# Create the plotly plot
fig_plotly = pio.from_json(plot_json_str)
-fig_plotly.update_layout(autosize=False, width=950, height=400, margin=dict(b=50, t=50, l=50, r=50))
+fig_plotly.update_layout(autosize=False, width=950, height=400,
+ margin=dict(b=50, t=50, l=50, r=50)
+ )
fig_plotly.write_image("static/Top_Species_Plot_By_Biome_Plotly.png")
```
@@ -60,6 +64,7 @@ fig_plotly.write_image("static/Top_Species_Plot_By_Biome_Plotly.png")
{fig-alt= width=90%}
### Multiline Plot Altair
+
```{python}
#| label: 'Multiline Plot Altair 2'
#| fig-cap: ""
@@ -70,8 +75,9 @@ with open(report_dir /'../../../../../docs/example_data/Basic_example_vuegen_dem
# Convert JSON to string
plot_json_str = json.dumps(plot_json)
-# Create the plotly plot
-fig_altair = alt.Chart.from_json(plot_json_str).properties(width=900, height=370)
+# Create the altair plot
+fig_altair = alt.Chart.from_json(plot_json_str
+ ).properties(width=900, height=370)
fig_altair.save("static/Multiline_Plot_Altair.png")
```
@@ -79,6 +85,7 @@ fig_altair.save("static/Multiline_Plot_Altair.png")
{fig-alt= width=90%}
### Pie Plot Countries Plotly
+
```{python}
#| label: 'Pie Plot Countries Plotly 3'
#| fig-cap: ""
@@ -87,17 +94,20 @@ with open(report_dir /'../../../../../docs/example_data/Basic_example_vuegen_dem
plot_json = json.load(plot_file)
# Keep only 'data' and 'layout' sections
-plot_json = {key: plot_json[key] for key in plot_json if key in ['data', 'layout']}
-
+plot_json = {key: plot_json[key] for key in plot_json
+ if key in ['data', 'layout']
+ }
# Remove 'frame' section in 'data'
-plot_json['data'] = [{k: v for k, v in entry.items() if k != 'frame'} for entry in plot_json.get('data', [])]
-
+plot_json['data'] = [{k: v for k, v in entry.items() if k != 'frame'}
+ for entry in plot_json.get('data', [])
+ ]
# Convert JSON to string
plot_json_str = json.dumps(plot_json)
-
# Create the plotly plot
fig_plotly = pio.from_json(plot_json_str)
-fig_plotly.update_layout(autosize=False, width=950, height=400, margin=dict(b=50, t=50, l=50, r=50))
+fig_plotly.update_layout(autosize=False, width=950, height=400,
+ margin=dict(b=50, t=50, l=50, r=50)
+ )
fig_plotly.write_image("static/Pie_Plot_Countries_Plotly.png")
```
@@ -105,6 +115,7 @@ fig_plotly.write_image("static/Pie_Plot_Countries_Plotly.png")
{fig-alt= width=90%}
### Pie Plots Biomes Plotly
+
```{python}
#| label: 'Pie Plots Biomes Plotly 4'
#| fig-cap: ""
@@ -113,17 +124,20 @@ with open(report_dir /'../../../../../docs/example_data/Basic_example_vuegen_dem
plot_json = json.load(plot_file)
# Keep only 'data' and 'layout' sections
-plot_json = {key: plot_json[key] for key in plot_json if key in ['data', 'layout']}
-
+plot_json = {key: plot_json[key] for key in plot_json
+ if key in ['data', 'layout']
+ }
# Remove 'frame' section in 'data'
-plot_json['data'] = [{k: v for k, v in entry.items() if k != 'frame'} for entry in plot_json.get('data', [])]
-
+plot_json['data'] = [{k: v for k, v in entry.items() if k != 'frame'}
+ for entry in plot_json.get('data', [])
+ ]
# Convert JSON to string
plot_json_str = json.dumps(plot_json)
-
# Create the plotly plot
fig_plotly = pio.from_json(plot_json_str)
-fig_plotly.update_layout(autosize=False, width=950, height=400, margin=dict(b=50, t=50, l=50, r=50))
+fig_plotly.update_layout(autosize=False, width=950, height=400,
+ margin=dict(b=50, t=50, l=50, r=50)
+ )
fig_plotly.write_image("static/Pie_Plots_Biomes_Plotly.png")
```
@@ -131,6 +145,7 @@ fig_plotly.write_image("static/Pie_Plots_Biomes_Plotly.png")
{fig-alt= width=90%}
### Saline Metagenomics Samples Map Altair
+
```{python}
#| label: 'Saline Metagenomics Samples Map Altair 5'
#| fig-cap: ""
@@ -141,8 +156,9 @@ with open(report_dir /'../../../../../docs/example_data/Basic_example_vuegen_dem
# Convert JSON to string
plot_json_str = json.dumps(plot_json)
-# Create the plotly plot
-fig_altair = alt.Chart.from_json(plot_json_str).properties(width=900, height=370)
+# Create the altair plot
+fig_altair = alt.Chart.from_json(plot_json_str
+ ).properties(width=900, height=370)
fig_altair.save("static/Saline_Metagenomics_Samples_Map_Altair.png")
```
@@ -150,6 +166,7 @@ fig_altair.save("static/Saline_Metagenomics_Samples_Map_Altair.png")
{fig-alt= width=90%}
### Plotly Plot R
+
```{python}
#| label: 'Plotly Plot R 6'
#| fig-cap: ""
@@ -158,17 +175,20 @@ with open(report_dir /'../../../../../docs/example_data/Basic_example_vuegen_dem
plot_json = json.load(plot_file)
# Keep only 'data' and 'layout' sections
-plot_json = {key: plot_json[key] for key in plot_json if key in ['data', 'layout']}
-
+plot_json = {key: plot_json[key] for key in plot_json
+ if key in ['data', 'layout']
+ }
# Remove 'frame' section in 'data'
-plot_json['data'] = [{k: v for k, v in entry.items() if k != 'frame'} for entry in plot_json.get('data', [])]
-
+plot_json['data'] = [{k: v for k, v in entry.items() if k != 'frame'}
+ for entry in plot_json.get('data', [])
+ ]
# Convert JSON to string
plot_json_str = json.dumps(plot_json)
-
# Create the plotly plot
fig_plotly = pio.from_json(plot_json_str)
-fig_plotly.update_layout(autosize=False, width=950, height=400, margin=dict(b=50, t=50, l=50, r=50))
+fig_plotly.update_layout(autosize=False, width=950, height=400,
+ margin=dict(b=50, t=50, l=50, r=50)
+ )
fig_plotly.write_image("static/Plotly_Plot_R.png")
```
diff --git a/tests/report_examples/Basic_example_vuegen_demo_notebook/revealjs/quarto_report/quarto_report.qmd b/tests/report_examples/Basic_example_vuegen_demo_notebook/revealjs/quarto_report/quarto_report.qmd
index 16b1c30..23328cc 100644
--- a/tests/report_examples/Basic_example_vuegen_demo_notebook/revealjs/quarto_report/quarto_report.qmd
+++ b/tests/report_examples/Basic_example_vuegen_demo_notebook/revealjs/quarto_report/quarto_report.qmd
@@ -27,11 +27,12 @@ include-in-header:
include-after-body:
text: |
---
@@ -61,6 +62,7 @@ Optional description for section.
::: {.panel-tabset}
### Top Species Plot By Biome Plotly
+
```{python}
#| label: 'Top Species Plot By Biome Plotly 1'
#| fig-cap: ""
@@ -69,22 +71,26 @@ with open(report_dir /'../../../../../docs/example_data/Basic_example_vuegen_dem
plot_json = json.load(plot_file)
# Keep only 'data' and 'layout' sections
-plot_json = {key: plot_json[key] for key in plot_json if key in ['data', 'layout']}
-
+plot_json = {key: plot_json[key] for key in plot_json
+ if key in ['data', 'layout']
+ }
# Remove 'frame' section in 'data'
-plot_json['data'] = [{k: v for k, v in entry.items() if k != 'frame'} for entry in plot_json.get('data', [])]
-
+plot_json['data'] = [{k: v for k, v in entry.items() if k != 'frame'}
+ for entry in plot_json.get('data', [])
+ ]
# Convert JSON to string
plot_json_str = json.dumps(plot_json)
-
# Create the plotly plot
fig_plotly = pio.from_json(plot_json_str)
-fig_plotly.update_layout(autosize=False, width=950, height=400, margin=dict(b=50, t=50, l=50, r=50))
+fig_plotly.update_layout(autosize=False, width=950, height=400,
+ margin=dict(b=50, t=50, l=50, r=50)
+ )
fig_plotly.show()
```
### Multiline Plot Altair
+
```{python}
#| label: 'Multiline Plot Altair 2'
#| fig-cap: ""
@@ -95,13 +101,15 @@ with open(report_dir /'../../../../../docs/example_data/Basic_example_vuegen_dem
# Convert JSON to string
plot_json_str = json.dumps(plot_json)
-# Create the plotly plot
-fig_altair = alt.Chart.from_json(plot_json_str).properties(width=900, height=370)
+# Create the altair plot
+fig_altair = alt.Chart.from_json(plot_json_str
+ ).properties(width=900, height=370)
fig_altair
```
### Pie Plot Countries Plotly
+
```{python}
#| label: 'Pie Plot Countries Plotly 3'
#| fig-cap: ""
@@ -110,22 +118,26 @@ with open(report_dir /'../../../../../docs/example_data/Basic_example_vuegen_dem
plot_json = json.load(plot_file)
# Keep only 'data' and 'layout' sections
-plot_json = {key: plot_json[key] for key in plot_json if key in ['data', 'layout']}
-
+plot_json = {key: plot_json[key] for key in plot_json
+ if key in ['data', 'layout']
+ }
# Remove 'frame' section in 'data'
-plot_json['data'] = [{k: v for k, v in entry.items() if k != 'frame'} for entry in plot_json.get('data', [])]
-
+plot_json['data'] = [{k: v for k, v in entry.items() if k != 'frame'}
+ for entry in plot_json.get('data', [])
+ ]
# Convert JSON to string
plot_json_str = json.dumps(plot_json)
-
# Create the plotly plot
fig_plotly = pio.from_json(plot_json_str)
-fig_plotly.update_layout(autosize=False, width=950, height=400, margin=dict(b=50, t=50, l=50, r=50))
+fig_plotly.update_layout(autosize=False, width=950, height=400,
+ margin=dict(b=50, t=50, l=50, r=50)
+ )
fig_plotly.show()
```
### Pie Plots Biomes Plotly
+
```{python}
#| label: 'Pie Plots Biomes Plotly 4'
#| fig-cap: ""
@@ -134,22 +146,26 @@ with open(report_dir /'../../../../../docs/example_data/Basic_example_vuegen_dem
plot_json = json.load(plot_file)
# Keep only 'data' and 'layout' sections
-plot_json = {key: plot_json[key] for key in plot_json if key in ['data', 'layout']}
-
+plot_json = {key: plot_json[key] for key in plot_json
+ if key in ['data', 'layout']
+ }
# Remove 'frame' section in 'data'
-plot_json['data'] = [{k: v for k, v in entry.items() if k != 'frame'} for entry in plot_json.get('data', [])]
-
+plot_json['data'] = [{k: v for k, v in entry.items() if k != 'frame'}
+ for entry in plot_json.get('data', [])
+ ]
# Convert JSON to string
plot_json_str = json.dumps(plot_json)
-
# Create the plotly plot
fig_plotly = pio.from_json(plot_json_str)
-fig_plotly.update_layout(autosize=False, width=950, height=400, margin=dict(b=50, t=50, l=50, r=50))
+fig_plotly.update_layout(autosize=False, width=950, height=400,
+ margin=dict(b=50, t=50, l=50, r=50)
+ )
fig_plotly.show()
```
### Saline Metagenomics Samples Map Altair
+
```{python}
#| label: 'Saline Metagenomics Samples Map Altair 5'
#| fig-cap: ""
@@ -160,13 +176,15 @@ with open(report_dir /'../../../../../docs/example_data/Basic_example_vuegen_dem
# Convert JSON to string
plot_json_str = json.dumps(plot_json)
-# Create the plotly plot
-fig_altair = alt.Chart.from_json(plot_json_str).properties(width=900, height=370)
+# Create the altair plot
+fig_altair = alt.Chart.from_json(plot_json_str
+ ).properties(width=900, height=370)
fig_altair
```
### Plotly Plot R
+
```{python}
#| label: 'Plotly Plot R 6'
#| fig-cap: ""
@@ -175,17 +193,20 @@ with open(report_dir /'../../../../../docs/example_data/Basic_example_vuegen_dem
plot_json = json.load(plot_file)
# Keep only 'data' and 'layout' sections
-plot_json = {key: plot_json[key] for key in plot_json if key in ['data', 'layout']}
-
+plot_json = {key: plot_json[key] for key in plot_json
+ if key in ['data', 'layout']
+ }
# Remove 'frame' section in 'data'
-plot_json['data'] = [{k: v for k, v in entry.items() if k != 'frame'} for entry in plot_json.get('data', [])]
-
+plot_json['data'] = [{k: v for k, v in entry.items() if k != 'frame'}
+ for entry in plot_json.get('data', [])
+ ]
# Convert JSON to string
plot_json_str = json.dumps(plot_json)
-
# Create the plotly plot
fig_plotly = pio.from_json(plot_json_str)
-fig_plotly.update_layout(autosize=False, width=950, height=400, margin=dict(b=50, t=50, l=50, r=50))
+fig_plotly.update_layout(autosize=False, width=950, height=400,
+ margin=dict(b=50, t=50, l=50, r=50)
+ )
fig_plotly.show()
```
@@ -286,7 +307,9 @@ Optional description for subsection.
-
+
:::
@@ -306,7 +329,8 @@ Optional description for subsection.
### Plot
-
+
### Ckg Network
@@ -316,13 +340,16 @@ Optional description for subsection.
-
+
### Multiqc Report
-
+
:::
diff --git a/tests/report_examples/Basic_example_vuegen_demo_notebook/streamlit_report/sections/Dataframes/All_Formats.py b/tests/report_examples/Basic_example_vuegen_demo_notebook/streamlit_report/sections/Dataframes/All_Formats.py
index 5d60849..c79b815 100644
--- a/tests/report_examples/Basic_example_vuegen_demo_notebook/streamlit_report/sections/Dataframes/All_Formats.py
+++ b/tests/report_examples/Basic_example_vuegen_demo_notebook/streamlit_report/sections/Dataframes/All_Formats.py
@@ -4,23 +4,45 @@
import streamlit as st
df_index = 1
-st.markdown('''All Formats
''', unsafe_allow_html=True)
-st.markdown('''Phyla Correlation Network Csv
''', unsafe_allow_html=True)
+
+st.markdown(
+ (
+ "All Formats
"
+ ),
+ unsafe_allow_html=True)
+
+
+st.markdown(
+ (
+ "Phyla Correlation Network Csv
"
+ ),
+ unsafe_allow_html=True)
+
df = pd.read_csv('docs/example_data/Basic_example_vuegen_demo_notebook/2_Dataframes/1_All_formats/1_phyla_correlation_network_csv.csv')
# Displays a DataFrame using AgGrid with configurable options.
grid_builder = GridOptionsBuilder.from_dataframe(df)
-grid_builder.configure_default_column(editable=True, groupable=True, filter=True)
-grid_builder.configure_side_bar(filters_panel=True, columns_panel=True)
+grid_builder.configure_default_column(editable=True,
+ groupable=True,
+ filter=True,
+)
+grid_builder.configure_side_bar(filters_panel=True,
+ columns_panel=True)
grid_builder.configure_selection(selection_mode="multiple")
-grid_builder.configure_pagination(enabled=True, paginationAutoPageSize=False, paginationPageSize=20)
+grid_builder.configure_pagination(enabled=True,
+ paginationAutoPageSize=False,
+ paginationPageSize=20,
+)
grid_options = grid_builder.build()
AgGrid(df, gridOptions=grid_options, enable_enterprise_modules=True)
# Button to download the df
-df_csv = df.to_csv(sep=',', header=True, index=False).encode('utf-8')
+df_csv = df.to_csv(sep=',', header=True, index=False
+ ).encode('utf-8')
st.download_button(
label="Download dataframe as CSV",
data=df_csv,
@@ -28,26 +50,43 @@
mime='text/csv',
key=f"download_button_{df_index}")
df_index += 1
-st.markdown('''Abundance Table Example Xls
''', unsafe_allow_html=True)
+
+st.markdown(
+ (
+ "Abundance Table Example Xls
"
+ ),
+ unsafe_allow_html=True)
+
selected_sheet = 0
sheet_names = table_utils.get_sheet_names("docs/example_data/Basic_example_vuegen_demo_notebook/2_Dataframes/1_All_formats/2_abundance_table_example_xls.xls")
-selected_sheet = st.selectbox("Select a sheet to display", options=sheet_names)
+selected_sheet = st.selectbox("Select a sheet to display",
+ options=sheet_names,
+ )
df = pd.read_excel('docs/example_data/Basic_example_vuegen_demo_notebook/2_Dataframes/1_All_formats/2_abundance_table_example_xls.xls', sheet_name=selected_sheet)
# Displays a DataFrame using AgGrid with configurable options.
grid_builder = GridOptionsBuilder.from_dataframe(df)
-grid_builder.configure_default_column(editable=True, groupable=True, filter=True)
-grid_builder.configure_side_bar(filters_panel=True, columns_panel=True)
+grid_builder.configure_default_column(editable=True,
+ groupable=True,
+ filter=True,
+)
+grid_builder.configure_side_bar(filters_panel=True,
+ columns_panel=True)
grid_builder.configure_selection(selection_mode="multiple")
-grid_builder.configure_pagination(enabled=True, paginationAutoPageSize=False, paginationPageSize=20)
+grid_builder.configure_pagination(enabled=True,
+ paginationAutoPageSize=False,
+ paginationPageSize=20,
+)
grid_options = grid_builder.build()
AgGrid(df, gridOptions=grid_options, enable_enterprise_modules=True)
# Button to download the df
-df_csv = df.to_csv(sep=',', header=True, index=False).encode('utf-8')
+df_csv = df.to_csv(sep=',', header=True, index=False
+ ).encode('utf-8')
st.download_button(
label="Download dataframe as CSV",
data=df_csv,
@@ -55,22 +94,37 @@
mime='text/csv',
key=f"download_button_{df_index}")
df_index += 1
-st.markdown('''Sample Info Example Txt
''', unsafe_allow_html=True)
+
+st.markdown(
+ (
+ "Sample Info Example Txt
"
+ ),
+ unsafe_allow_html=True)
+
df = pd.read_table('docs/example_data/Basic_example_vuegen_demo_notebook/2_Dataframes/1_All_formats/3_sample_info_example_txt.txt')
# Displays a DataFrame using AgGrid with configurable options.
grid_builder = GridOptionsBuilder.from_dataframe(df)
-grid_builder.configure_default_column(editable=True, groupable=True, filter=True)
-grid_builder.configure_side_bar(filters_panel=True, columns_panel=True)
+grid_builder.configure_default_column(editable=True,
+ groupable=True,
+ filter=True,
+)
+grid_builder.configure_side_bar(filters_panel=True,
+ columns_panel=True)
grid_builder.configure_selection(selection_mode="multiple")
-grid_builder.configure_pagination(enabled=True, paginationAutoPageSize=False, paginationPageSize=20)
+grid_builder.configure_pagination(enabled=True,
+ paginationAutoPageSize=False,
+ paginationPageSize=20,
+)
grid_options = grid_builder.build()
AgGrid(df, gridOptions=grid_options, enable_enterprise_modules=True)
# Button to download the df
-df_csv = df.to_csv(sep=',', header=True, index=False).encode('utf-8')
+df_csv = df.to_csv(sep=',', header=True, index=False
+ ).encode('utf-8')
st.download_button(
label="Download dataframe as CSV",
data=df_csv,
@@ -78,22 +132,37 @@
mime='text/csv',
key=f"download_button_{df_index}")
df_index += 1
-st.markdown('''Sample Info Example Parquet
''', unsafe_allow_html=True)
+
+st.markdown(
+ (
+ "Sample Info Example Parquet
"
+ ),
+ unsafe_allow_html=True)
+
df = pd.read_parquet('docs/example_data/Basic_example_vuegen_demo_notebook/2_Dataframes/1_All_formats/4_sample_info_example_parquet.parquet')
# Displays a DataFrame using AgGrid with configurable options.
grid_builder = GridOptionsBuilder.from_dataframe(df)
-grid_builder.configure_default_column(editable=True, groupable=True, filter=True)
-grid_builder.configure_side_bar(filters_panel=True, columns_panel=True)
+grid_builder.configure_default_column(editable=True,
+ groupable=True,
+ filter=True,
+)
+grid_builder.configure_side_bar(filters_panel=True,
+ columns_panel=True)
grid_builder.configure_selection(selection_mode="multiple")
-grid_builder.configure_pagination(enabled=True, paginationAutoPageSize=False, paginationPageSize=20)
+grid_builder.configure_pagination(enabled=True,
+ paginationAutoPageSize=False,
+ paginationPageSize=20,
+)
grid_options = grid_builder.build()
AgGrid(df, gridOptions=grid_options, enable_enterprise_modules=True)
# Button to download the df
-df_csv = df.to_csv(sep=',', header=True, index=False).encode('utf-8')
+df_csv = df.to_csv(sep=',', header=True, index=False
+ ).encode('utf-8')
st.download_button(
label="Download dataframe as CSV",
data=df_csv,
@@ -101,23 +170,38 @@
mime='text/csv',
key=f"download_button_{df_index}")
df_index += 1
-st.markdown('''Example Xlsx
''', unsafe_allow_html=True)
+
+st.markdown(
+ (
+ "Example Xlsx
"
+ ),
+ unsafe_allow_html=True)
+
selected_sheet = 0
df = pd.read_excel('docs/example_data/Basic_example_vuegen_demo_notebook/2_Dataframes/1_All_formats/5_example_xlsx.xlsx', sheet_name=selected_sheet)
# Displays a DataFrame using AgGrid with configurable options.
grid_builder = GridOptionsBuilder.from_dataframe(df)
-grid_builder.configure_default_column(editable=True, groupable=True, filter=True)
-grid_builder.configure_side_bar(filters_panel=True, columns_panel=True)
+grid_builder.configure_default_column(editable=True,
+ groupable=True,
+ filter=True,
+)
+grid_builder.configure_side_bar(filters_panel=True,
+ columns_panel=True)
grid_builder.configure_selection(selection_mode="multiple")
-grid_builder.configure_pagination(enabled=True, paginationAutoPageSize=False, paginationPageSize=20)
+grid_builder.configure_pagination(enabled=True,
+ paginationAutoPageSize=False,
+ paginationPageSize=20,
+)
grid_options = grid_builder.build()
AgGrid(df, gridOptions=grid_options, enable_enterprise_modules=True)
# Button to download the df
-df_csv = df.to_csv(sep=',', header=True, index=False).encode('utf-8')
+df_csv = df.to_csv(sep=',', header=True, index=False
+ ).encode('utf-8')
st.download_button(
label="Download dataframe as CSV",
data=df_csv,
@@ -125,7 +209,8 @@
mime='text/csv',
key=f"download_button_{df_index}")
df_index += 1
-footer = '''
'''
+
+'''
st.markdown(footer, unsafe_allow_html=True)
diff --git a/tests/report_examples/Basic_example_vuegen_demo_notebook/streamlit_report/sections/Home/Homepage.py b/tests/report_examples/Basic_example_vuegen_demo_notebook/streamlit_report/sections/Home/Homepage.py
index e35e50f..de210e3 100644
--- a/tests/report_examples/Basic_example_vuegen_demo_notebook/streamlit_report/sections/Home/Homepage.py
+++ b/tests/report_examples/Basic_example_vuegen_demo_notebook/streamlit_report/sections/Home/Homepage.py
@@ -1,7 +1,14 @@
import streamlit as st
-st.markdown('''A general description of the report.
-
''', unsafe_allow_html=True)
-footer = '''
'''
+
+'''
st.markdown(footer, unsafe_allow_html=True)
diff --git a/tests/report_examples/Basic_example_vuegen_demo_notebook/streamlit_report/sections/Html/All_Html.py b/tests/report_examples/Basic_example_vuegen_demo_notebook/streamlit_report/sections/Html/All_Html.py
index 18c444a..9af761a 100644
--- a/tests/report_examples/Basic_example_vuegen_demo_notebook/streamlit_report/sections/Html/All_Html.py
+++ b/tests/report_examples/Basic_example_vuegen_demo_notebook/streamlit_report/sections/Html/All_Html.py
@@ -1,22 +1,48 @@
import requests
import streamlit as st
-st.markdown('''All Html
''', unsafe_allow_html=True)
-st.markdown('''Plot
''', unsafe_allow_html=True)
-with open('docs/example_data/Basic_example_vuegen_demo_notebook/4_Html/1_All_html/1_plot.html', 'r', encoding='utf-8') as html_file:
- html_content = html_file.read()
+st.markdown(
+ (
+ "All Html
"
+ ),
+ unsafe_allow_html=True)
+
+
+st.markdown(
+ (
+ "Plot
"
+ ),
+ unsafe_allow_html=True)
+
+
+with open('docs/example_data/Basic_example_vuegen_demo_notebook/4_Html/1_All_html/1_plot.html', 'r', encoding='utf-8') as f:
+ html_content = f.read()
st.components.v1.html(html_content, height=600, scrolling=True)
-st.markdown('''Ckg Network
''', unsafe_allow_html=True)
+
+st.markdown(
+ (
+ "Ckg Network
"
+ ),
+ unsafe_allow_html=True)
+
with open('docs/example_data/Basic_example_vuegen_demo_notebook/4_Html/1_All_html/2_ckg_network.html', 'r') as html_file:
html_content = html_file.read()
-st.markdown(f" Number of nodes: 33
", unsafe_allow_html=True)
-st.markdown(f" Number of relationships: 35
", unsafe_allow_html=True)
+st.markdown((" "
+ "Number of nodes: 33
"),
+ unsafe_allow_html=True)
+st.markdown((""
+ " Number of relationships: 35"
+ "
"),
+ unsafe_allow_html=True)
# Streamlit checkbox for controlling the layout
control_layout = st.checkbox('Add panel to control layout', value=True)
@@ -24,14 +50,22 @@
# Load HTML into HTML component for display on Streamlit
st.components.v1.html(html_content, height=net_html_height)
-st.markdown('''Multiqc Report
''', unsafe_allow_html=True)
-with open('docs/example_data/Basic_example_vuegen_demo_notebook/4_Html/1_All_html/3_multiqc_report.html', 'r', encoding='utf-8') as html_file:
- html_content = html_file.read()
+st.markdown(
+ (
+ "Multiqc Report
"
+ ),
+ unsafe_allow_html=True)
+
+
+with open('docs/example_data/Basic_example_vuegen_demo_notebook/4_Html/1_All_html/3_multiqc_report.html', 'r', encoding='utf-8') as f:
+ html_content = f.read()
st.components.v1.html(html_content, height=600, scrolling=True)
-footer = '''
'''
+
+'''
st.markdown(footer, unsafe_allow_html=True)
diff --git a/tests/report_examples/Basic_example_vuegen_demo_notebook/streamlit_report/sections/Markdown/All_Markdown.py b/tests/report_examples/Basic_example_vuegen_demo_notebook/streamlit_report/sections/Markdown/All_Markdown.py
index b866236..e63022d 100644
--- a/tests/report_examples/Basic_example_vuegen_demo_notebook/streamlit_report/sections/Markdown/All_Markdown.py
+++ b/tests/report_examples/Basic_example_vuegen_demo_notebook/streamlit_report/sections/Markdown/All_Markdown.py
@@ -1,15 +1,30 @@
import requests
import streamlit as st
-st.markdown('''All Markdown
''', unsafe_allow_html=True)
-st.markdown('''Readme
''', unsafe_allow_html=True)
+
+st.markdown(
+ (
+ "All Markdown
"
+ ),
+ unsafe_allow_html=True)
+
+
+st.markdown(
+ (
+ "Readme
"
+ ),
+ unsafe_allow_html=True)
+
with open('docs/example_data/Basic_example_vuegen_demo_notebook/5_Markdown/1_All_markdown/README.md', 'r') as markdown_file:
markdown_content = markdown_file.read()
st.markdown(markdown_content, unsafe_allow_html=True)
-footer = '''
'''
+
+'''
st.markdown(footer, unsafe_allow_html=True)
diff --git a/tests/report_examples/Basic_example_vuegen_demo_notebook/streamlit_report/sections/Networks/Interactive_Networks.py b/tests/report_examples/Basic_example_vuegen_demo_notebook/streamlit_report/sections/Networks/Interactive_Networks.py
index 02f146e..47fd48f 100644
--- a/tests/report_examples/Basic_example_vuegen_demo_notebook/streamlit_report/sections/Networks/Interactive_Networks.py
+++ b/tests/report_examples/Basic_example_vuegen_demo_notebook/streamlit_report/sections/Networks/Interactive_Networks.py
@@ -1,17 +1,42 @@
import requests
import streamlit as st
-st.markdown('''Interactive Networks
''', unsafe_allow_html=True)
-st.markdown('''Optional description for subsection.
-
''', unsafe_allow_html=True)
-st.markdown('''Man Example
''', unsafe_allow_html=True)
+
+st.markdown(
+ (
+ "Interactive Networks
"
+ ),
+ unsafe_allow_html=True)
+
+
+st.markdown(
+ (
+ "Optional description for subsection.
"
+ ),
+ unsafe_allow_html=True)
+
+
+st.markdown(
+ (
+ "Man Example
"
+ ),
+ unsafe_allow_html=True)
+
with open('tests/report_examples/Basic_example_vuegen_demo_notebook/streamlit_report/static/Man_Example.html', 'r') as html_file:
html_content = html_file.read()
-st.markdown(f" Number of nodes: 9
", unsafe_allow_html=True)
-st.markdown(f" Number of relationships: 14
", unsafe_allow_html=True)
+st.markdown((" "
+ "Number of nodes: 9
"),
+ unsafe_allow_html=True)
+st.markdown((""
+ " Number of relationships: 14"
+ "
"),
+ unsafe_allow_html=True)
# Streamlit checkbox for controlling the layout
control_layout = st.checkbox('Add panel to control layout', value=True)
@@ -19,14 +44,22 @@
# Load HTML into HTML component for display on Streamlit
st.components.v1.html(html_content, height=net_html_height)
-st.markdown('''Description
''', unsafe_allow_html=True)
+
+st.markdown(
+ (
+ "Description
"
+ ),
+ unsafe_allow_html=True)
+
with open('docs/example_data/Basic_example_vuegen_demo_notebook/3_Networks/1_Interactive_networks/description.md', 'r') as markdown_file:
markdown_content = markdown_file.read()
st.markdown(markdown_content, unsafe_allow_html=True)
-footer = '''
'''
+
+'''
st.markdown(footer, unsafe_allow_html=True)
diff --git a/tests/report_examples/Basic_example_vuegen_demo_notebook/streamlit_report/sections/Networks/Static_Networks.py b/tests/report_examples/Basic_example_vuegen_demo_notebook/streamlit_report/sections/Networks/Static_Networks.py
index 59a2b7b..4053106 100644
--- a/tests/report_examples/Basic_example_vuegen_demo_notebook/streamlit_report/sections/Networks/Static_Networks.py
+++ b/tests/report_examples/Basic_example_vuegen_demo_notebook/streamlit_report/sections/Networks/Static_Networks.py
@@ -1,11 +1,26 @@
import streamlit as st
-st.markdown('''Static Networks
''', unsafe_allow_html=True)
-st.markdown('''Phyla Correlation Network
''', unsafe_allow_html=True)
-st.image('docs/example_data/Basic_example_vuegen_demo_notebook/3_Networks/2_Static_networks/1_phyla_correlation_network.png', caption='', use_column_width=True)
+st.markdown(
+ (
+ "Static Networks
"
+ ),
+ unsafe_allow_html=True)
-footer = '''
'''
+
+'''
st.markdown(footer, unsafe_allow_html=True)
diff --git a/tests/report_examples/Basic_example_vuegen_demo_notebook/streamlit_report/sections/Plots/Interactive_Plots.py b/tests/report_examples/Basic_example_vuegen_demo_notebook/streamlit_report/sections/Plots/Interactive_Plots.py
index 8084d36..2a5afdf 100644
--- a/tests/report_examples/Basic_example_vuegen_demo_notebook/streamlit_report/sections/Plots/Interactive_Plots.py
+++ b/tests/report_examples/Basic_example_vuegen_demo_notebook/streamlit_report/sections/Plots/Interactive_Plots.py
@@ -3,81 +3,154 @@
import requests
import streamlit as st
-st.markdown('''Interactive Plots
''', unsafe_allow_html=True)
-st.markdown('''Optional description for section.
-
''', unsafe_allow_html=True)
-st.markdown('''Top Species Plot By Biome Plotly
''', unsafe_allow_html=True)
+
+st.markdown(
+ (
+ "Interactive Plots
"
+ ),
+ unsafe_allow_html=True)
+
+
+st.markdown(
+ (
+ "Optional description for section.
"
+ ),
+ unsafe_allow_html=True)
+
+
+st.markdown(
+ (
+ "Top Species Plot By Biome Plotly
"
+ ),
+ unsafe_allow_html=True)
+
with open('docs/example_data/Basic_example_vuegen_demo_notebook/1_Plots/1_Interactive_plots/1_top_species_plot_by_biome_plotly.json', 'r') as plot_file:
plot_json = json.load(plot_file)
# Keep only 'data' and 'layout' sections
-plot_json = {key: plot_json[key] for key in plot_json if key in ['data', 'layout']}
+plot_json = {key: plot_json[key] for key in plot_json
+ if key in ['data', 'layout']}
# Remove 'frame' section in 'data'
-plot_json['data'] = [{k: v for k, v in entry.items() if k != 'frame'} for entry in plot_json.get('data', [])]
+plot_json['data'] = [{k: v for k, v in entry.items() if k != 'frame'}
+ for entry in plot_json.get('data', [])]
st.plotly_chart(plot_json, use_container_width=True)
-st.markdown('''Multiline Plot Altair
''', unsafe_allow_html=True)
+
+st.markdown(
+ (
+ "Multiline Plot Altair
"
+ ),
+ unsafe_allow_html=True)
+
with open('docs/example_data/Basic_example_vuegen_demo_notebook/1_Plots/1_Interactive_plots/2_multiline_plot_altair.json', 'r') as plot_file:
plot_json = json.load(plot_file)
altair_plot = alt.Chart.from_dict(plot_json)
-st.vega_lite_chart(json.loads(altair_plot.to_json()), use_container_width=True)
+st.vega_lite_chart(json.loads(altair_plot.to_json()),
+ use_container_width=True)
+
+
+st.markdown(
+ (
+ "Pie Plot Countries Plotly
"
+ ),
+ unsafe_allow_html=True)
-st.markdown('''Pie Plot Countries Plotly
''', unsafe_allow_html=True)
with open('docs/example_data/Basic_example_vuegen_demo_notebook/1_Plots/1_Interactive_plots/3_pie_plot_countries_plotly.json', 'r') as plot_file:
plot_json = json.load(plot_file)
# Keep only 'data' and 'layout' sections
-plot_json = {key: plot_json[key] for key in plot_json if key in ['data', 'layout']}
+plot_json = {key: plot_json[key] for key in plot_json
+ if key in ['data', 'layout']}
# Remove 'frame' section in 'data'
-plot_json['data'] = [{k: v for k, v in entry.items() if k != 'frame'} for entry in plot_json.get('data', [])]
+plot_json['data'] = [{k: v for k, v in entry.items() if k != 'frame'}
+ for entry in plot_json.get('data', [])]
st.plotly_chart(plot_json, use_container_width=True)
-st.markdown('''Pie Plots Biomes Plotly
''', unsafe_allow_html=True)
+
+st.markdown(
+ (
+ "Pie Plots Biomes Plotly
"
+ ),
+ unsafe_allow_html=True)
+
with open('docs/example_data/Basic_example_vuegen_demo_notebook/1_Plots/1_Interactive_plots/4_pie_plots_biomes_plotly.json', 'r') as plot_file:
plot_json = json.load(plot_file)
# Keep only 'data' and 'layout' sections
-plot_json = {key: plot_json[key] for key in plot_json if key in ['data', 'layout']}
+plot_json = {key: plot_json[key] for key in plot_json
+ if key in ['data', 'layout']}
# Remove 'frame' section in 'data'
-plot_json['data'] = [{k: v for k, v in entry.items() if k != 'frame'} for entry in plot_json.get('data', [])]
+plot_json['data'] = [{k: v for k, v in entry.items() if k != 'frame'}
+ for entry in plot_json.get('data', [])]
st.plotly_chart(plot_json, use_container_width=True)
-st.markdown('''Saline Metagenomics Samples Map Altair
''', unsafe_allow_html=True)
+
+st.markdown(
+ (
+ "Saline Metagenomics Samples Map Altair
"
+ ),
+ unsafe_allow_html=True)
+
with open('docs/example_data/Basic_example_vuegen_demo_notebook/1_Plots/1_Interactive_plots/5_saline_metagenomics_samples_map_altair.json', 'r') as plot_file:
plot_json = json.load(plot_file)
altair_plot = alt.Chart.from_dict(plot_json)
-st.vega_lite_chart(json.loads(altair_plot.to_json()), use_container_width=True)
+st.vega_lite_chart(json.loads(altair_plot.to_json()),
+ use_container_width=True)
+
+
+st.markdown(
+ (
+ "Plotly Plot R
"
+ ),
+ unsafe_allow_html=True)
-st.markdown('''Plotly Plot R
''', unsafe_allow_html=True)
with open('docs/example_data/Basic_example_vuegen_demo_notebook/1_Plots/1_Interactive_plots/6_plotly_plot_R.json', 'r') as plot_file:
plot_json = json.load(plot_file)
# Keep only 'data' and 'layout' sections
-plot_json = {key: plot_json[key] for key in plot_json if key in ['data', 'layout']}
+plot_json = {key: plot_json[key] for key in plot_json
+ if key in ['data', 'layout']}
# Remove 'frame' section in 'data'
-plot_json['data'] = [{k: v for k, v in entry.items() if k != 'frame'} for entry in plot_json.get('data', [])]
+plot_json['data'] = [{k: v for k, v in entry.items() if k != 'frame'}
+ for entry in plot_json.get('data', [])]
st.plotly_chart(plot_json, use_container_width=True)
-st.markdown('''Description
''', unsafe_allow_html=True)
+
+st.markdown(
+ (
+ "Description
"
+ ),
+ unsafe_allow_html=True)
+
with open('docs/example_data/Basic_example_vuegen_demo_notebook/1_Plots/1_Interactive_plots/description.md', 'r') as markdown_file:
markdown_content = markdown_file.read()
st.markdown(markdown_content, unsafe_allow_html=True)
-footer = '''
'''
+
+'''
st.markdown(footer, unsafe_allow_html=True)
diff --git a/tests/report_examples/Basic_example_vuegen_demo_notebook/streamlit_report/sections/Plots/Static_Plots.py b/tests/report_examples/Basic_example_vuegen_demo_notebook/streamlit_report/sections/Plots/Static_Plots.py
index c88666c..1690420 100644
--- a/tests/report_examples/Basic_example_vuegen_demo_notebook/streamlit_report/sections/Plots/Static_Plots.py
+++ b/tests/report_examples/Basic_example_vuegen_demo_notebook/streamlit_report/sections/Plots/Static_Plots.py
@@ -1,19 +1,48 @@
import streamlit as st
-st.markdown('''Static Plots
''', unsafe_allow_html=True)
-st.markdown('''Number Samples Per Study
''', unsafe_allow_html=True)
-st.image('docs/example_data/Basic_example_vuegen_demo_notebook/1_Plots/2_Static_plots/1_number_samples_per_study.png', caption='', use_column_width=True)
+st.markdown(
+ (
+ "Static Plots
"
+ ),
+ unsafe_allow_html=True)
-st.markdown('''Animal Metagenomics Samples Map
''', unsafe_allow_html=True)
-st.image('docs/example_data/Basic_example_vuegen_demo_notebook/1_Plots/2_Static_plots/2_animal_metagenomics_samples_map.png', caption='', use_column_width=True)
+st.markdown(
+ (
+ "Number Samples Per Study
"
+ ),
+ unsafe_allow_html=True)
-st.markdown('''Alpha Diversity Host Associated Samples
''', unsafe_allow_html=True)
-st.image('docs/example_data/Basic_example_vuegen_demo_notebook/1_Plots/2_Static_plots/3_alpha_diversity_host_associated_samples.png', caption='', use_column_width=True)
+st.image('docs/example_data/Basic_example_vuegen_demo_notebook/1_Plots/2_Static_plots/1_number_samples_per_study.png', caption='', use_column_width=True)
-footer = '''
'''
+
+'''
st.markdown(footer, unsafe_allow_html=True)
diff --git a/tests/report_examples/Basic_example_vuegen_demo_notebook/streamlit_report/sections/report_manager.py b/tests/report_examples/Basic_example_vuegen_demo_notebook/streamlit_report/sections/report_manager.py
index 78edc2d..5ea3ad3 100644
--- a/tests/report_examples/Basic_example_vuegen_demo_notebook/streamlit_report/sections/report_manager.py
+++ b/tests/report_examples/Basic_example_vuegen_demo_notebook/streamlit_report/sections/report_manager.py
@@ -1,12 +1,20 @@
import os
import time
-import psutil
+import psutil
import streamlit as st
-st.set_page_config(layout="wide", page_title="Basic Example Vuegen Demo Notebook")
+st.set_page_config(layout="wide",
+ page_title="Basic Example Vuegen Demo Notebook")
+
+
+st.markdown(
+ (
+ "Basic Example Vuegen Demo Notebook
"
+ ),
+ unsafe_allow_html=True)
-st.markdown('''Basic Example Vuegen Demo Notebook
''', unsafe_allow_html=True)
sections_pages = {}
homepage = st.Page('Home/Homepage.py', title='Homepage')
@@ -32,7 +40,9 @@
report_nav = st.navigation(sections_pages)
# Following https://discuss.streamlit.io/t/close-streamlit-app-with-button-click/35132/5
-exit_app = st.sidebar.button("Shut Down App", icon=":material/power_off:", use_container_width=True)
+exit_app = st.sidebar.button("Shut Down App",
+ icon=":material/power_off:",
+ use_container_width=True)
if exit_app:
st.toast("Shutting down the app...")
time.sleep(1)
diff --git a/tests/report_examples/Basic_example_vuegen_demo_notebook_cfg/html/quarto_report/quarto_report.qmd b/tests/report_examples/Basic_example_vuegen_demo_notebook_cfg/html/quarto_report/quarto_report.qmd
index 58b241e..121e2d2 100644
--- a/tests/report_examples/Basic_example_vuegen_demo_notebook_cfg/html/quarto_report/quarto_report.qmd
+++ b/tests/report_examples/Basic_example_vuegen_demo_notebook_cfg/html/quarto_report/quarto_report.qmd
@@ -26,11 +26,12 @@ include-in-header:
include-after-body:
text: |
---
@@ -60,6 +61,7 @@ This section contains example plots.
Optional description for section.
### Top Species Plot By Biome Plotly
+
```{python}
#| label: 'Top Species Plot By Biome Plotly 1'
#| fig-cap: ""
@@ -68,22 +70,26 @@ with open(report_dir /'../../../../../docs/example_data/Basic_example_vuegen_dem
plot_json = json.load(plot_file)
# Keep only 'data' and 'layout' sections
-plot_json = {key: plot_json[key] for key in plot_json if key in ['data', 'layout']}
-
+plot_json = {key: plot_json[key] for key in plot_json
+ if key in ['data', 'layout']
+ }
# Remove 'frame' section in 'data'
-plot_json['data'] = [{k: v for k, v in entry.items() if k != 'frame'} for entry in plot_json.get('data', [])]
-
+plot_json['data'] = [{k: v for k, v in entry.items() if k != 'frame'}
+ for entry in plot_json.get('data', [])
+ ]
# Convert JSON to string
plot_json_str = json.dumps(plot_json)
-
# Create the plotly plot
fig_plotly = pio.from_json(plot_json_str)
-fig_plotly.update_layout(autosize=False, width=950, height=400, margin=dict(b=50, t=50, l=50, r=50))
+fig_plotly.update_layout(autosize=False, width=950, height=400,
+ margin=dict(b=50, t=50, l=50, r=50)
+ )
fig_plotly.show()
```
### Multiline Plot Altair
+
```{python}
#| label: 'Multiline Plot Altair 2'
#| fig-cap: ""
@@ -94,13 +100,15 @@ with open(report_dir /'../../../../../docs/example_data/Basic_example_vuegen_dem
# Convert JSON to string
plot_json_str = json.dumps(plot_json)
-# Create the plotly plot
-fig_altair = alt.Chart.from_json(plot_json_str).properties(width=900, height=370)
+# Create the altair plot
+fig_altair = alt.Chart.from_json(plot_json_str
+ ).properties(width=900, height=370)
fig_altair
```
### Pie Plot Countries Plotly
+
```{python}
#| label: 'Pie Plot Countries Plotly 3'
#| fig-cap: ""
@@ -109,22 +117,26 @@ with open(report_dir /'../../../../../docs/example_data/Basic_example_vuegen_dem
plot_json = json.load(plot_file)
# Keep only 'data' and 'layout' sections
-plot_json = {key: plot_json[key] for key in plot_json if key in ['data', 'layout']}
-
+plot_json = {key: plot_json[key] for key in plot_json
+ if key in ['data', 'layout']
+ }
# Remove 'frame' section in 'data'
-plot_json['data'] = [{k: v for k, v in entry.items() if k != 'frame'} for entry in plot_json.get('data', [])]
-
+plot_json['data'] = [{k: v for k, v in entry.items() if k != 'frame'}
+ for entry in plot_json.get('data', [])
+ ]
# Convert JSON to string
plot_json_str = json.dumps(plot_json)
-
# Create the plotly plot
fig_plotly = pio.from_json(plot_json_str)
-fig_plotly.update_layout(autosize=False, width=950, height=400, margin=dict(b=50, t=50, l=50, r=50))
+fig_plotly.update_layout(autosize=False, width=950, height=400,
+ margin=dict(b=50, t=50, l=50, r=50)
+ )
fig_plotly.show()
```
### Pie Plots Biomes Plotly
+
```{python}
#| label: 'Pie Plots Biomes Plotly 4'
#| fig-cap: ""
@@ -133,22 +145,26 @@ with open(report_dir /'../../../../../docs/example_data/Basic_example_vuegen_dem
plot_json = json.load(plot_file)
# Keep only 'data' and 'layout' sections
-plot_json = {key: plot_json[key] for key in plot_json if key in ['data', 'layout']}
-
+plot_json = {key: plot_json[key] for key in plot_json
+ if key in ['data', 'layout']
+ }
# Remove 'frame' section in 'data'
-plot_json['data'] = [{k: v for k, v in entry.items() if k != 'frame'} for entry in plot_json.get('data', [])]
-
+plot_json['data'] = [{k: v for k, v in entry.items() if k != 'frame'}
+ for entry in plot_json.get('data', [])
+ ]
# Convert JSON to string
plot_json_str = json.dumps(plot_json)
-
# Create the plotly plot
fig_plotly = pio.from_json(plot_json_str)
-fig_plotly.update_layout(autosize=False, width=950, height=400, margin=dict(b=50, t=50, l=50, r=50))
+fig_plotly.update_layout(autosize=False, width=950, height=400,
+ margin=dict(b=50, t=50, l=50, r=50)
+ )
fig_plotly.show()
```
### Saline Metagenomics Samples Map Altair
+
```{python}
#| label: 'Saline Metagenomics Samples Map Altair 5'
#| fig-cap: ""
@@ -159,8 +175,9 @@ with open(report_dir /'../../../../../docs/example_data/Basic_example_vuegen_dem
# Convert JSON to string
plot_json_str = json.dumps(plot_json)
-# Create the plotly plot
-fig_altair = alt.Chart.from_json(plot_json_str).properties(width=900, height=370)
+# Create the altair plot
+fig_altair = alt.Chart.from_json(plot_json_str
+ ).properties(width=900, height=370)
fig_altair
```
@@ -245,7 +262,9 @@ Optional description for subsection
-
+
## Static Networks
@@ -257,7 +276,8 @@ Optional description for subsection
### Plot
-
+
### Ckg Network
@@ -267,13 +287,16 @@ Optional description for subsection
-
+
### Multiqc Report
-
+
# Markdown
diff --git a/tests/report_examples/chat_bot/streamlit_report/sections/Chatbot_Example/Ollama_style_streaming_chatbot.py b/tests/report_examples/chat_bot/streamlit_report/sections/Chatbot_Example/Ollama_style_streaming_chatbot.py
new file mode 100644
index 0000000..35f4008
--- /dev/null
+++ b/tests/report_examples/chat_bot/streamlit_report/sections/Chatbot_Example/Ollama_style_streaming_chatbot.py
@@ -0,0 +1,97 @@
+import json
+import time
+
+import requests
+import streamlit as st
+
+st.markdown(
+ (
+ "Ollama-style streaming chatbot
"
+ ),
+ unsafe_allow_html=True,
+)
+
+
+st.markdown(
+ ("ChatBot Component
"),
+ unsafe_allow_html=True,
+)
+
+
+# Init session state
+if "messages" not in st.session_state:
+ st.session_state["messages"] = []
+
+
+# Function to send prompt to Ollama API
+def generate_query(messages):
+ response = requests.post(
+ "http://localhost:11434/api/chat",
+ json={"model": "llama3.2", "messages": messages, "stream": True},
+ )
+ response.raise_for_status()
+ return response
+
+
+# Parse streaming response from Ollama
+def parse_api_response(response):
+ try:
+ output = ""
+ for line in response.iter_lines():
+ body = json.loads(line)
+ if "error" in body:
+ raise Exception(f"API error: {body['error']}")
+ if body.get("done", False):
+ return {"role": "assistant", "content": output}
+ output += body.get("message", {}).get("content", "")
+ except Exception as e:
+ return {
+ "role": "assistant",
+ "content": f"Error while processing API response: {str(e)}",
+ }
+
+
+# Simulated typing effect for responses
+def response_generator(msg_content):
+ for word in msg_content.split():
+ yield word + " "
+ time.sleep(0.1)
+ yield "\n"
+
+
+# Display chat history
+for message in st.session_state["messages"]:
+ with st.chat_message(message["role"]):
+ content = message["content"]
+ if isinstance(content, dict):
+ st.markdown(content.get("text", ""), unsafe_allow_html=True)
+ if "links" in content:
+ st.markdown("**Sources:**")
+ for link in content["links"]:
+ st.markdown(f"- [{link}]({link})")
+ if "subgraph_pyvis" in content:
+ st.components.v1.html(content["subgraph_pyvis"], height=600)
+ else:
+ st.write(content)
+
+
+# Capture and append new user prompt
+if prompt := st.chat_input("Enter your prompt here:"):
+ st.session_state.messages.append({"role": "user", "content": prompt})
+ with st.chat_message("user"):
+ st.write(prompt)
+
+ # Retrieve question and generate answer
+ combined = "\n".join(
+ msg["content"] for msg in st.session_state.messages if msg["role"] == "user"
+ )
+ messages = [{"role": "user", "content": combined}]
+ with st.spinner("Generating answer..."):
+ response = generate_query(messages)
+ parsed_response = parse_api_response(response)
+
+ # Add the assistant's response to the session state and display it
+ st.session_state.messages.append(parsed_response)
+ with st.chat_message("assistant"):
+ st.write_stream(response_generator(parsed_response["content"]))
diff --git a/tests/report_examples/chat_bot/streamlit_report/sections/Home/Homepage.py b/tests/report_examples/chat_bot/streamlit_report/sections/Home/Homepage.py
new file mode 100644
index 0000000..0b0555f
--- /dev/null
+++ b/tests/report_examples/chat_bot/streamlit_report/sections/Home/Homepage.py
@@ -0,0 +1,30 @@
+import streamlit as st
+
+st.markdown(
+ (
+ "A chatbot example.
"
+ ),
+ unsafe_allow_html=True)
+
+footer = '''
+
+
+'''
+
+st.markdown(footer, unsafe_allow_html=True)
diff --git a/tests/report_examples/chat_bot/streamlit_report/sections/report_manager.py b/tests/report_examples/chat_bot/streamlit_report/sections/report_manager.py
new file mode 100644
index 0000000..af3eca1
--- /dev/null
+++ b/tests/report_examples/chat_bot/streamlit_report/sections/report_manager.py
@@ -0,0 +1,41 @@
+import os
+import time
+
+import psutil
+import streamlit as st
+
+st.set_page_config(layout="wide",
+ page_title="Chatbot example")
+
+
+st.markdown(
+ (
+ "Chatbot example
"
+ ),
+ unsafe_allow_html=True)
+
+
+sections_pages = {}
+homepage = st.Page('Home/Homepage.py', title='Homepage')
+sections_pages['Home'] = [homepage]
+
+Simple_test = st.Page('ChatBot_test/Simple_test.py', title='Simple test')
+sections_pages['ChatBot test'] = [Simple_test]
+
+report_nav = st.navigation(sections_pages)
+
+# Following https://discuss.streamlit.io/t/close-streamlit-app-with-button-click/35132/5
+exit_app = st.sidebar.button("Shut Down App",
+ icon=":material/power_off:",
+ use_container_width=True)
+if exit_app:
+ st.toast("Shutting down the app...")
+ time.sleep(1)
+ # Terminate streamlit python process
+ pid = os.getpid()
+ p = psutil.Process(pid)
+ p.terminate()
+
+
+report_nav.run()