diff --git a/.github/workflows/_flagship-apps.yml b/.github/workflows/_flagship-apps.yml index 5e7df1659e9de..0e076a9d75868 100644 --- a/.github/workflows/_flagship-apps.yml +++ b/.github/workflows/_flagship-apps.yml @@ -43,9 +43,10 @@ jobs: fail-fast: false matrix: include: - - {app: "flashy", repo: "Lightning-Universe/Flashy-app"} - - {app: "muse", repo: "Lightning-Universe/stable-diffusion-deploy"} - - {app: "jupyter", repo: "Lightning-Universe/Jupyter-component"} + - {app: "flashy", repo: "Lightning-Universe/Flashy-app", requirements: ''} + - {app: "muse", repo: "Lightning-Universe/stable-diffusion-deploy", requirements: ''} + - {app: "jupyter", repo: "Lightning-Universe/Jupyter-component", requirements: ''} + - {app: "tldr", repo: "Lightning-Universe/TLDR-component", requirements: 'tests/requirements.txt'} # TODO: # - Training Studio @@ -89,6 +90,14 @@ jobs: - name: Install Lightning package run: pip install -e .[cloud,test] -f $TORCH_URL + + - name: Install Repo + run: pip install -e ./tests/_flagship-app + + - name: Install Flagship Test Requirements + run: pip install -r ./tests/_flagship-app/${{ matrix.requirements }} + if: ${{ matrix.requirements }} + - name: List pip dependency run: pip --version && pip list diff --git a/tests/integrations_app/flagship/test_tldr.py b/tests/integrations_app/flagship/test_tldr.py new file mode 100644 index 0000000000000..ed3ea76f2b509 --- /dev/null +++ b/tests/integrations_app/flagship/test_tldr.py @@ -0,0 +1,37 @@ +from time import sleep + +from integrations_app.flagship import _PATH_INTEGRATIONS_DIR + +from lightning.app.testing.testing import run_app_in_cloud + + +def test_app_in_cloud(): + + with run_app_in_cloud(_PATH_INTEGRATIONS_DIR, "dummy_test_app.py") as ( + _, + view_page, + fetch_logs, + name, + ): + + # Validate the logs. + has_logs = False + while not has_logs: + logs = list(fetch_logs("multinode.ws.0")) + for log in logs: + if "`Trainer.fit` stopped: `max_epochs=2` reached." in log: + has_logs = True + sleep(1) + + expected_strings = [ + # don't include values for actual hardware availability as this may depend on environment. + "GPU available: ", + "All distributed processes registered.", + "674 K Trainable params\n0 Non - trainable params\n674 K Total params\n2.699 Total estimated model params size(MB)", + "Epoch 0:", + "`Trainer.fit` stopped: `max_epochs=2` reached.", + "Input text:Input text:\n summarize: ML Ops platforms come in many flavors from platforms that train models", + ] + + for curr_str in expected_strings: + assert any([curr_str in line for line in logs])