diff --git a/.github/actions/deploy-vercel/action.yml b/.github/actions/deploy-vercel/action.yml index 9f2d084a0d2c..c33cc346b7e5 100644 --- a/.github/actions/deploy-vercel/action.yml +++ b/.github/actions/deploy-vercel/action.yml @@ -1,6 +1,10 @@ -# This action handles updating the target commit env variable (`RELEASE_COMMIT`) -# which is used as the pointer for `rerun.io/docs` and `rerun.io/examples` -# and triggering a redeploy of `rerun.io`. +# If `target` is set to `production`, this action handles updating the +# target commit env variable (`RELEASE_COMMIT`) which is used as the +# pointer for `rerun.io/docs` and `rerun.io/examples` and triggering +# a redeploy of `rerun.io`. + +# If `target` is set to `preview`, then this instead deploys a fresh preview +# with an override for `release_commit`, and sets the `vercel_preview_url` output. name: "Deploy rerun.io" @@ -25,6 +29,10 @@ inputs: release_version: description: "Which release version to update the deployment to" type: string + required: false + target: + description: "Which Vercel environment to deploy to" + type: string required: true runs: diff --git a/.github/actions/deploy-vercel/index.mjs b/.github/actions/deploy-vercel/index.mjs index 43b8bbb0fe85..b306ef8c2fb3 100644 --- a/.github/actions/deploy-vercel/index.mjs +++ b/.github/actions/deploy-vercel/index.mjs @@ -1,63 +1,32 @@ // @ts-check import { Client } from "./vercel.mjs"; -import { assert, getRequiredInput, info } from "./util.mjs"; +import { assert, getInput, getRequiredInput } from "./util.mjs"; +import { deployToProduction } from "./production.mjs"; +import { deployToPreview } from "./preview.mjs"; // These inputs are defined in `action.yml`, and should be kept in sync const token = getRequiredInput("vercel_token"); -const teamName = getRequiredInput("vercel_team_name"); -const projectName = getRequiredInput("vercel_project_name"); -const releaseCommit = getRequiredInput("release_commit"); -const releaseVersion = getRequiredInput("release_version"); +const team = getRequiredInput("vercel_team_name"); +const project = getRequiredInput("vercel_project_name"); +const commit = getRequiredInput("release_commit"); +const target = getRequiredInput("target"); const client = new Client(token); -info`Fetching team "${teamName}"`; -const availableTeams = await client.teams(); -assert(availableTeams, `failed to get team "${teamName}"`); -const team = availableTeams.find((team) => team.name === teamName); -assert(team, `failed to get team "${teamName}"`); - -info`Fetching project "${projectName}"`; -const projectsInTeam = await client.projects(team.id); -const project = projectsInTeam.find((project) => project.name === projectName); -assert(project, `failed to get project "${projectName}"`); - -info`Fetching latest production deployment`; -const productionDeployments = await client.deployments(team.id, project.id); -const latestProductionDeployment = productionDeployments[0]; -assert( - latestProductionDeployment, - `failed to get latest production deployment`, -); - -const environment = await client.envs(team.id, project.id); -const RELEASE_COMMIT_KEY = "RELEASE_COMMIT"; -const RELEASE_VERSION_KEY = "RELEASE_VERSION"; - -info`Fetching "${RELEASE_COMMIT_KEY}" env var`; -const releaseCommitEnv = environment.find( - (env) => env.key === RELEASE_COMMIT_KEY, -); -assert(releaseCommitEnv, `failed to get "${RELEASE_COMMIT_KEY}" env var`); - -info`Fetching "${RELEASE_VERSION_KEY}" env var`; -const releaseVersionEnv = environment.find( - (env) => env.key === RELEASE_VERSION_KEY, -); -assert(releaseVersionEnv, `failed to get "${RELEASE_VERSION_KEY}" env var`); - -info`Setting "${RELEASE_COMMIT_KEY}" env to "${releaseCommit}"`; -await client.setEnv(team.id, project.id, releaseCommitEnv.id, { - key: RELEASE_COMMIT_KEY, - value: releaseCommit, -}); - -info`Setting "${RELEASE_VERSION_KEY}" env to "${releaseVersion}"`; -await client.setEnv(team.id, project.id, releaseVersionEnv.id, { - key: RELEASE_VERSION_KEY, - value: releaseVersion, -}); - -info`Triggering redeploy`; -await client.redeploy(team.id, latestProductionDeployment.uid, "landing"); +switch (target) { + case "production": { + const version = getRequiredInput("release_version"); + await deployToProduction(client, { team, project, commit, version }); + break; + } + + case "preview": { + await deployToPreview(client, { team, project, commit }); + break; + } + + default: { + throw new Error(`"target" must be one of: production, preview`); + } +} diff --git a/.github/actions/deploy-vercel/preview.mjs b/.github/actions/deploy-vercel/preview.mjs new file mode 100644 index 000000000000..1ff791694d98 --- /dev/null +++ b/.github/actions/deploy-vercel/preview.mjs @@ -0,0 +1,49 @@ +// @ts-check + +import { assert, info, setOutput } from "./util.mjs"; +import { Client } from "./vercel.mjs"; + +/** + * + * @param {Client} client + * @param {{ + * team: string; + * project: string; + * commit: string; + * }} options + */ +export async function deployToPreview(client, options) { + info`Fetching team "${options.team}"`; + const availableTeams = await client.teams(); + assert(availableTeams, `failed to get team "${options.team}"`); + const team = availableTeams.find((team) => team.name === options.team); + assert(team, `failed to get team "${options.team}"`); + + info`Fetching project "${options.project}"`; + const projectsInTeam = await client.projects(team.id); + const project = projectsInTeam.find( + (project) => project.name === options.project, + ); + assert(project, `failed to get project "${options.project}"`); + + info`Fetching latest production deployment`; + const productionDeployments = await client.deployments(team.id, project.id); + const latestProductionDeployment = productionDeployments[0]; + assert( + latestProductionDeployment, + `failed to get latest production deployment`, + ); + + info`Deploying preview with RELEASE_COMMIT=${options.commit}`; + const { url } = await client.deployPreviewFrom( + team.id, + latestProductionDeployment.uid, + "landing-preview", + { + RELEASE_COMMIT: options.commit, + IS_PR_PREVIEW: "true", + }, + ); + + setOutput("vercel_preview_url", url); +} diff --git a/.github/actions/deploy-vercel/production.mjs b/.github/actions/deploy-vercel/production.mjs new file mode 100644 index 000000000000..f73b462850a4 --- /dev/null +++ b/.github/actions/deploy-vercel/production.mjs @@ -0,0 +1,68 @@ +// @ts-check + +import { assert, info } from "./util.mjs"; +import { Client } from "./vercel.mjs"; + +/** + * + * @param {Client} client + * @param {{ + * team: string; + * project: string; + * commit: string; + * version: string; + * }} options + */ +export async function deployToProduction(client, options) { + info`Fetching team "${options.team}"`; + const availableTeams = await client.teams(); + assert(availableTeams, `failed to get team "${options.team}"`); + const team = availableTeams.find((team) => team.name === options.team); + assert(team, `failed to get team "${options.team}"`); + + info`Fetching project "${options.project}"`; + const projectsInTeam = await client.projects(team.id); + const project = projectsInTeam.find( + (project) => project.name === options.project, + ); + assert(project, `failed to get project "${options.project}"`); + + info`Fetching latest production deployment`; + const productionDeployments = await client.deployments(team.id, project.id); + const latestProductionDeployment = productionDeployments[0]; + assert( + latestProductionDeployment, + `failed to get latest production deployment`, + ); + + const environment = await client.envs(team.id, project.id); + const RELEASE_COMMIT_KEY = "RELEASE_COMMIT"; + const RELEASE_VERSION_KEY = "RELEASE_VERSION"; + + info`Fetching "${RELEASE_COMMIT_KEY}" env var`; + const releaseCommitEnv = environment.find( + (env) => env.key === RELEASE_COMMIT_KEY, + ); + assert(releaseCommitEnv, `failed to get "${RELEASE_COMMIT_KEY}" env var`); + + info`Fetching "${RELEASE_VERSION_KEY}" env var`; + const releaseVersionEnv = environment.find( + (env) => env.key === RELEASE_VERSION_KEY, + ); + assert(releaseVersionEnv, `failed to get "${RELEASE_VERSION_KEY}" env var`); + + info`Setting "${RELEASE_COMMIT_KEY}" env to "${options.commit}"`; + await client.setEnv(team.id, project.id, releaseCommitEnv.id, { + key: RELEASE_COMMIT_KEY, + value: options.commit, + }); + + info`Setting "${RELEASE_VERSION_KEY}" env to "${options.version}"`; + await client.setEnv(team.id, project.id, releaseVersionEnv.id, { + key: RELEASE_VERSION_KEY, + value: options.version, + }); + + info`Triggering redeploy`; + await client.redeploy(team.id, latestProductionDeployment.uid, "landing"); +} diff --git a/.github/actions/deploy-vercel/test.mjs b/.github/actions/deploy-vercel/test.mjs new file mode 100644 index 000000000000..a4d63265d3be --- /dev/null +++ b/.github/actions/deploy-vercel/test.mjs @@ -0,0 +1,38 @@ +// @ts-check + +import { Client } from "./vercel.mjs"; +import { assert, info } from "./util.mjs"; + +const client = new Client("NzuZ9WBTnfUGiwHrhd7mit2E"); + +const teamName = "rerun"; +const projectName = "landing"; + +info`Fetching team "${teamName}"`; +const availableTeams = await client.teams(); +assert(availableTeams, `failed to get team "${teamName}"`); +const team = availableTeams.find((team) => team.name === teamName); +assert(team, `failed to get team "${teamName}"`); + +info`Fetching project "${projectName}"`; +const projectsInTeam = await client.projects(team.id); +const project = projectsInTeam.find((project) => project.name === projectName); +assert(project, `failed to get project "${projectName}"`); + +info`Fetching latest production deployment`; +const productionDeployments = await client.deployments(team.id, project.id); +const latestProductionDeployment = productionDeployments[0]; +assert( + latestProductionDeployment, + `failed to get latest production deployment`, +); + +const response = await client.deployPreviewFrom( + team.id, + latestProductionDeployment.uid, + "rerun-custom-preview-test", + { + RELEASE_COMMIT: "main", + }, +); +console.log(response); diff --git a/.github/actions/deploy-vercel/util.mjs b/.github/actions/deploy-vercel/util.mjs index c838307ffce5..24bb3d76db98 100644 --- a/.github/actions/deploy-vercel/util.mjs +++ b/.github/actions/deploy-vercel/util.mjs @@ -1,5 +1,8 @@ // @ts-check +import { appendFileSync } from "fs"; +import os from "os"; + /** * Log a message with level `INFO` * @@ -41,6 +44,16 @@ export function getRequiredInput(name) { return input; } +/** + * Set a GitHub Actions output for other workflows steps to read. + * @param {string} key + * @param {string} value + */ +export function setOutput(key, value) { + const outputFile = /** @type {string} */ (process.env["GITHUB_OUTPUT"]); + appendFileSync(outputFile, `${key}=${value}${os.EOL}`); +} + /** * Assert that `value` is truthy, throwing an error if it is not. * @@ -61,4 +74,3 @@ export function assert(value, message) { throw new Error(error); } } - diff --git a/.github/actions/deploy-vercel/vercel.mjs b/.github/actions/deploy-vercel/vercel.mjs index 454711012343..e69aab2a5fcb 100644 --- a/.github/actions/deploy-vercel/vercel.mjs +++ b/.github/actions/deploy-vercel/vercel.mjs @@ -106,7 +106,10 @@ export class Client { */ async teams() { const response = await this.get("v2/teams"); - assert("teams" in response, () => `failed to get teams: ${JSON.stringify(response)}`); + assert( + "teams" in response, + () => `failed to get teams: ${JSON.stringify(response)}`, + ); return response.teams; } @@ -119,7 +122,10 @@ export class Client { */ async projects(teamId) { const response = await this.get("v9/projects", { teamId }); - assert("projects" in response, () => `failed to get projects: ${JSON.stringify(response)}`); + assert( + "projects" in response, + () => `failed to get projects: ${JSON.stringify(response)}`, + ); return response.projects; } @@ -146,7 +152,7 @@ export class Client { }); assert( "deployments" in response, - () => `failed to get deployments: ${JSON.stringify(response)}` + () => `failed to get deployments: ${JSON.stringify(response)}`, ); return response.deployments; } @@ -162,7 +168,7 @@ export class Client { const response = await this.get(`v9/projects/${projectId}/env`, { teamId }); assert( "envs" in response, - () => `failed to get environment variables: ${JSON.stringify(response)}` + () => `failed to get environment variables: ${JSON.stringify(response)}`, ); return response.envs; } @@ -177,7 +183,10 @@ export class Client { * @returns {Promise} */ async getEnvDecrypted(teamId, projectId, envId) { - return await this.get(`v9/projects/${projectId}/env/${envId}`, { teamId, decrypt: "true" }); + return await this.get(`v9/projects/${projectId}/env/${envId}`, { + teamId, + decrypt: "true", + }); } /** @@ -194,12 +203,17 @@ export class Client { teamId, projectId, envId, - { key, target = ["production", "preview", "development"], type = "encrypted", value } + { + key, + target = ["production", "preview", "development"], + type = "encrypted", + value, + }, ) { return await this.patch( `v9/projects/${projectId}/env/${envId}`, { gitBranch: null, key, target, type, value }, - { teamId } + { teamId }, ); } @@ -217,9 +231,36 @@ export class Client { async redeploy(teamId, deploymentId, name) { return await this.post( `v13/deployments`, - { deploymentId, meta: { action: "redeploy" }, name, target: "production" }, - { teamId, forceNew: "1" } + { + deploymentId, + meta: { action: "redeploy" }, + name, + target: "production", + }, + { teamId, forceNew: "1" }, ); } -} + /** + * Trigger a preview deploy using the files of an existing deployment (`deploymentId`). + * + * @param {string} teamId + * @param {string} deploymentId + * @param {string} name + * @param {Record} [env] + * @returns {Promise} + */ + async deployPreviewFrom(teamId, deploymentId, name, env) { + // `target` not being set means "preview" + const body = { + deploymentId, + meta: { action: "redeploy" }, + name, + }; + if (env) { + body.env = env; + body.build = { env }; + } + return await this.post(`v13/deployments`, body, { teamId, forceNew: "1" }); + } +} diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index 5a7ff934a584..0324b1da38a0 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -23,7 +23,5 @@ To get an auto-generated PR description you can put "copilot:summary" or "copilo * [ ] If applicable, add a new check to the [release checklist](https://github.com/rerun-io/rerun/blob/main/tests/python/release_checklist)! - [PR Build Summary](https://build.rerun.io/pr/{{ pr.number }}) -- [Docs preview](https://rerun.io/preview/{{ pr.commit }}/docs) -- [Examples preview](https://rerun.io/preview/{{ pr.commit }}/examples) - [Recent benchmark results](https://build.rerun.io/graphs/crates.html) - [Wasm size tracking](https://build.rerun.io/graphs/sizes.html) diff --git a/.github/workflows/on_pull_request.yml b/.github/workflows/on_pull_request.yml index 451fd6a47f43..feb35d6b7c95 100644 --- a/.github/workflows/on_pull_request.yml +++ b/.github/workflows/on_pull_request.yml @@ -55,6 +55,23 @@ jobs: - '**/CMakeLists.txt' - '**/*cmake' + docs-paths-filter: + runs-on: ubuntu-latest + outputs: + docs_changes: ${{ steps.filter.outputs.docs_changes }} + steps: + - uses: actions/checkout@v4 + with: + ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.ref || '' }} + - uses: dorny/paths-filter@v2 + id: filter + with: + filters: | + docs_changes: + - 'docs/content/**/*.md' + - 'examples/**/*.md' + - 'examples/manifest.toml' + rust-checks: name: "Rust Checks" if: github.event.pull_request.head.repo.owner.login == 'rerun-io' && needs.rust-paths-filter.outputs.rust_changes == 'true' @@ -186,3 +203,13 @@ jobs: CONCURRENCY: pr-${{ github.event.pull_request.number }} PR_NUMBER: ${{ github.event.pull_request.number }} secrets: inherit + + deploy-landing-preview: + name: "Deploy Landing Preview" + if: needs.docs-paths-filter.outputs.docs_changes == 'true' + needs: docs-paths-filter + uses: ./.github/workflows/reusable_deploy_landing_preview.yml + with: + CONCURRENCY: pr-${{ github.event.pull_request.number }} + PR_NUMBER: ${{ github.event.pull_request.number }} + secrets: inherit diff --git a/.github/workflows/reusable_deploy_docs.yml b/.github/workflows/reusable_deploy_docs.yml index 1f251647c174..736c9f089a78 100644 --- a/.github/workflows/reusable_deploy_docs.yml +++ b/.github/workflows/reusable_deploy_docs.yml @@ -262,3 +262,4 @@ jobs: vercel_project_name: ${{ vars.VERCEL_PROJECT_NAME }} release_commit: ${{ inputs.RELEASE_COMMIT }} release_version: ${{ inputs.RELEASE_VERSION }} + target: "production" diff --git a/.github/workflows/reusable_deploy_landing_preview.yml b/.github/workflows/reusable_deploy_landing_preview.yml new file mode 100644 index 000000000000..e1198e3124f5 --- /dev/null +++ b/.github/workflows/reusable_deploy_landing_preview.yml @@ -0,0 +1,60 @@ +name: Reusable Deploy Landing Preview + +on: + workflow_call: + inputs: + CONCURRENCY: + required: true + type: string + PR_NUMBER: + required: true + type: string + +concurrency: + group: ${{ inputs.CONCURRENCY }}-deploy-landing-preview + cancel-in-progress: true + +permissions: + contents: "write" + id-token: "write" + pull-requests: "write" + +jobs: + deploy: + name: Deploy + + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v4 + with: + ref: ${{ (github.event_name == 'pull_request' && github.event.pull_request.head.ref) || '' }} + + - name: Get sha + id: get-sha + shell: bash + run: | + full_commit="${{ (github.event_name == 'pull_request' && github.event.pull_request.head.sha) || github.sha }}" + echo "sha=$full_commit" >> "$GITHUB_OUTPUT" + + - name: Re-deploy rerun.io + id: deploy-vercel + uses: ./.github/actions/deploy-vercel + with: + vercel_token: ${{ secrets.VERCEL_TOKEN }} + vercel_team_name: ${{ vars.VERCEL_TEAM_NAME }} + vercel_project_name: ${{ vars.VERCEL_PROJECT_NAME }} + release_commit: ${{ steps.get-sha.outputs.sha }} + target: "preview" + + - name: Create PR comment + # https://github.com/mshick/add-pr-comment + uses: mshick/add-pr-comment@v2.8.1 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} + message: | + ## Deployed docs + + | Commit | Link | + | ------- | ----- | + | ${{ steps.get-sha.outputs.sha }} | https://${{ steps.deploy-vercel.outputs.vercel_preview_url }}/docs | diff --git a/.vscode/launch.json b/.vscode/launch.json index 11ee57e12d59..aa252bc28c7d 100644 --- a/.vscode/launch.json +++ b/.vscode/launch.json @@ -41,7 +41,10 @@ "kind": "lib" } }, - "cwd": "${workspaceFolder}" + "cwd": "${workspaceFolder}", + "env": { + "RUST_LOG": "debug" + } }, { "name": "Debug 'rerun' (no args)", @@ -60,7 +63,10 @@ } }, "args": [], - "cwd": "${workspaceFolder}" + "cwd": "${workspaceFolder}", + "env": { + "RUST_LOG": "debug" + } }, { "name": "Debug 'rerun ../data.rrd'", @@ -81,7 +87,10 @@ "args": [ "../data.rrd" ], - "cwd": "${workspaceFolder}" + "cwd": "${workspaceFolder}", + "env": { + "RUST_LOG": "debug" + } }, { "name": "Debug 'rerun' colmap.rrd from url", @@ -102,7 +111,10 @@ "args": [ "https://demo.rerun.io/commit/0f89b62/examples/colmap/data.rrd" ], - "cwd": "${workspaceFolder}" + "cwd": "${workspaceFolder}", + "env": { + "RUST_LOG": "debug" + } }, { "name": "Debug 'minimal' example", @@ -121,7 +133,7 @@ "args": [], "cwd": "${workspaceFolder}", "env": { - "RUST_LOG": "trace" + "RUST_LOG": "debug" } }, { @@ -139,7 +151,10 @@ } }, "args": [], - "cwd": "${workspaceFolder}" + "cwd": "${workspaceFolder}", + "env": { + "RUST_LOG": "debug" + } }, { "name": "Debug 'codegen'", diff --git a/CHANGELOG.md b/CHANGELOG.md index 8068250e913c..77365b9c195e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,15 +1,214 @@ # Rerun changelog ## [Unreleased](https://github.com/rerun-io/rerun/compare/latest...HEAD) +The biggest news is the ability to create a _blueprint_ via the Python logging API: + +```py +import rerun.blueprint as rrb + +blueprint = rrb.Blueprint( + rrb.Vertical( + rrb.Spatial3DView(name="3D", origin="/"), + rrb.Horizontal( + rrb.TextDocumentView(name="README", origin="/description"), + rrb.Spatial2DView(name="Camera", origin="/camera/image"), + rrb.TimeSeriesView(origin="/plot"), + ), + row_shares=[3, 2], + ) + rrb.BlueprintPanel(expanded=True), + rrb.SelectionPanel(expanded=False), + rrb.TimePanel(expanded=False), +) +``` + +The blueprint can then be sent to the viewer with +```py +rr.send_blueprint(blueprint) +``` + +Or stored to a file, and then later opened in the viewer: +```py +blueprint.save("my_nice_dashboard.rbl") +``` + +In this case, the results looks something like this: -## [0.14.1](https://github.com/rerun-io/rerun/compare/0.14.0...0.14.1) - C++ build artifact fix + + + + + + + + +Blueprints are currently only supported in the Python API, with C++ and Rust support coming later. + + +### ✨ Overview & highlights +- 🟦 Configure the layout and content of space views from Python [(docs)](https://www.rerun.io/docs/howto/configure-viewer-through-code?speculative-link) +- πŸ–§ More powerful and flexible data loaders [(docs)](https://www.rerun.io/docs/reference/dataloaders?speculative-link) +- πŸ–΅ Improved UI for managing recordings and applications +- πŸ’Ύ Save and load blueprint files in the viewer +- 🎨 Configurable background color for 3D Space Views [#5443](https://github.com/rerun-io/rerun/pull/5443) +- πŸ’ͺ Linux ARM64 support [#5489](https://github.com/rerun-io/rerun/pull/5489) [#5503](https://github.com/rerun-io/rerun/pull/5503) [#5511](https://github.com/rerun-io/rerun/pull/5511) +- πŸ–ΌοΈ Show examples in the welcome page +- πŸ–±οΈ Improve context-menu when right-clicking items in the blueprint panel and streams tree +- ❌ Remove `InstanceKey` from our logging APIs [#5395](https://github.com/rerun-io/rerun/pull/5395) ([migration guide](https://www.rerun.io/docs/reference/migration/migration-0-15?speculative-link)) +- ❌ Remove groups from blueprints panel [#5326](https://github.com/rerun-io/rerun/pull/5326) + +### πŸ”Ž Details + +#### πŸͺ΅ Log API +- Replace `MarkerShape` with code-generated `enum` type [#5336](https://github.com/rerun-io/rerun/pull/5336) +- Key-less data model 1: scrap `InstanceKey` from public logging APIs [#5395](https://github.com/rerun-io/rerun/pull/5395) +- Remove the check for `WrongNumberOfInstances` [#5399](https://github.com/rerun-io/rerun/pull/5399) +- Control panel expanded state via blueprint APIs [#5484](https://github.com/rerun-io/rerun/pull/5484) +- Remove deprecated `TimeSeriesScalar` [#5604](https://github.com/rerun-io/rerun/pull/5604) +- Customizable data loaders [#5327](https://github.com/rerun-io/rerun/pull/5327) [#5328](https://github.com/rerun-io/rerun/pull/5328) [#5330](https://github.com/rerun-io/rerun/pull/5330) [#5337](https://github.com/rerun-io/rerun/pull/5337) [#5351](https://github.com/rerun-io/rerun/pull/5351) [#5355](https://github.com/rerun-io/rerun/pull/5355) [#5379](https://github.com/rerun-io/rerun/pull/5379) [#5361](https://github.com/rerun-io/rerun/pull/5361) [#5388](https://github.com/rerun-io/rerun/pull/5388) + +#### 🌊 C++ API +- Fix arrow libraries from download & build not being found in some cases [#5366](https://github.com/rerun-io/rerun/pull/5366) +- CMake: Add `RERUN_INSTALL_RERUN_C` option to disable installation of `rerun_c` library [#5374](https://github.com/rerun-io/rerun/pull/5374) (thanks [@traversaro](https://github.com/traversaro)!) +- CMake: Fix `install` not finding external `arrow` for dynamic linking [#5375](https://github.com/rerun-io/rerun/pull/5375) (thanks [@traversaro](https://github.com/traversaro)!) +- Make `pinhole.hpp` robust against `min/max` preprocessor macros (typically from `windows.h`) [#5432](https://github.com/rerun-io/rerun/pull/5432) +- Build C++ SDK for Linux ARM64 [#5489](https://github.com/rerun-io/rerun/pull/5489) +- Generate fewer `.cpp` files: Inline forward serialization of transparent components to their respective datatypes [#5544](https://github.com/rerun-io/rerun/pull/5544) +- Fix `RERUN_C_BUILD_ARTIFACT` path value if `CARGO_BUILD_TARGET` env variable is set [#5547](https://github.com/rerun-io/rerun/pull/5547) (thanks [@traversaro](https://github.com/traversaro)!) + +#### 🐍 Python API +- All python components that wrap a `bool` now implement `__bool__` [#5400](https://github.com/rerun-io/rerun/pull/5400) +- Add the remaining space views and name them consistently [#5498](https://github.com/rerun-io/rerun/pull/5498) +- Add option to include blueprint in an `.rrd` when calling `.save(…)` [#5572](https://github.com/rerun-io/rerun/pull/5572) +- Allow naming space view containers [#5626](https://github.com/rerun-io/rerun/pull/5626) + +#### πŸ¦€ Rust API + +#### πŸͺ³ Bug Fixes +- Sort text log space view on currently selected timeline [#5348](https://github.com/rerun-io/rerun/pull/5348) +- Fix parents of queried paths getting visualized, fix 2D objects not showing at all in 3D if their camera parent is not included [#5424](https://github.com/rerun-io/rerun/pull/5424) +- Fix: allow creating 3D space views for pinhole-only 3D scenes [#5563](https://github.com/rerun-io/rerun/pull/5563) +- Fix depth cloud bounding boxes for depth cloud visualizations with transforms [#5578](https://github.com/rerun-io/rerun/pull/5578) +- Fix image view not handling images with extra leading dimensions of size `1` [#5579](https://github.com/rerun-io/rerun/pull/5579) +- Fix web viewer crash on invalid url parameter [#5631](https://github.com/rerun-io/rerun/pull/5631) +- Be consistent in how items are removed from selection [#5643](https://github.com/rerun-io/rerun/pull/5643) +- Fix layout issue on welcome screen for narrow window, triggering debug assertion [#5650](https://github.com/rerun-io/rerun/pull/5650) +- Fix broken 2D space view heuristics in Python Notebooks [#5674](https://github.com/rerun-io/rerun/pull/5674) +- Avoid a hang on linux by always create the renderer, even when we have no store_view [#5724](https://github.com/rerun-io/rerun/pull/5724) +- Fix crash/freeze when zooming out too far in a plot [#5737](https://github.com/rerun-io/rerun/pull/5737) +- Fix `draw_order` not working [#5794](https://github.com/rerun-io/rerun/pull/5794) + +#### 🌁 Viewer Improvements +- Remove groups from blueprints panel [#5326](https://github.com/rerun-io/rerun/pull/5326) +- Improved tracking of which space views were generated by a heuristic [#5419](https://github.com/rerun-io/rerun/pull/5419) +- Configurable background color for 3D Space Views [#5443](https://github.com/rerun-io/rerun/pull/5443) +- Save recordings from web viewer [#5488](https://github.com/rerun-io/rerun/pull/5488) +- Support loading `.rbl` blueprint files [#5513](https://github.com/rerun-io/rerun/pull/5513) +- Tensor space view can now show images [#5567](https://github.com/rerun-io/rerun/pull/5567) +- Entity path query now shows simple statistics and warns if nothing is displayed [#5693](https://github.com/rerun-io/rerun/pull/5693) +- Go back to example page with browser Back-button [#5750](https://github.com/rerun-io/rerun/pull/5750) +- On Web, implement navigating back/forward with mouse buttons [#5792](https://github.com/rerun-io/rerun/pull/5792) +- Support displaying 1D tensors [#5837](https://github.com/rerun-io/rerun/pull/5837) + +#### πŸ§‘β€πŸ« Examples +- New `incremental_logging` example [#5462](https://github.com/rerun-io/rerun/pull/5462) +- New standalone example showing blueprint configuration of some stock [#5603](https://github.com/rerun-io/rerun/pull/5603) +- New example visualizing KISS-ICP [#5546](https://github.com/rerun-io/rerun/pull/5546) (thanks [@02alexander](https://github.com/02alexander)!) +- Remove car example [#5576](https://github.com/rerun-io/rerun/pull/5576) +- Add blueprint to `arkit_scenes` example, leveraging the viewer's ability to re-project 3D->2D [#5510](https://github.com/rerun-io/rerun/pull/5510) +- Add blueprint to `nuscenes` example [#5556](https://github.com/rerun-io/rerun/pull/5556) +- Add blueprint to Face Tracking example [#5616](https://github.com/rerun-io/rerun/pull/5616) +- Add blueprint to Gesture Detection example [#5619](https://github.com/rerun-io/rerun/pull/5619) +- Add blueprint to Human Pose Tracking example [#5612](https://github.com/rerun-io/rerun/pull/5612) +- Add blueprint to Live Camera Edge Detection example [#5613](https://github.com/rerun-io/rerun/pull/5613) +- Add blueprint to LLM Embedding Ner example [#5614](https://github.com/rerun-io/rerun/pull/5614) +- Add blueprint to Objectron example [#5617](https://github.com/rerun-io/rerun/pull/5617) +- Add blueprint to Signed Distance Fields example [#5635](https://github.com/rerun-io/rerun/pull/5635) +- Add blueprint to the RGBD example [#5623](https://github.com/rerun-io/rerun/pull/5623) +- ARFlow Example Page [#5320](https://github.com/rerun-io/rerun/pull/5320) (thanks [@YiqinZhao](https://github.com/YiqinZhao)!) +- Fix controlnet example for current `controlnet` package version and add blueprint [#5634](https://github.com/rerun-io/rerun/pull/5634) +- Fix RRT-Star example not showing up on website or rerun.io/viewer [#5628](https://github.com/rerun-io/rerun/pull/5628) +- Fix not logging 3D gesture z component correctly in Gesture Detection example [#5630](https://github.com/rerun-io/rerun/pull/5630) (thanks [@andreasnaoum](https://github.com/andreasnaoum)!) +- Updated READMEs for examples: LLM Embedding-Based Named Entity Recognition, nuScenes, Objectron, Open Photogrammetry Format, Raw Mesh [#5653](https://github.com/rerun-io/rerun/pull/5653) (thanks [@andreasnaoum](https://github.com/andreasnaoum)!) +- Updated READMEs for the examples - Batch 1 [#5620](https://github.com/rerun-io/rerun/pull/5620) (thanks [@andreasnaoum](https://github.com/andreasnaoum)!) + +#### πŸ“š Docs +- Docs: improve discoverability of image compression [#5675](https://github.com/rerun-io/rerun/pull/5675) +- Improve getting started doc section [#5689](https://github.com/rerun-io/rerun/pull/5689) +- Update web viewer links [#5738](https://github.com/rerun-io/rerun/pull/5738) +- Update docs with guides and tutorials for blueprint [#5641](https://github.com/rerun-io/rerun/pull/5641) +- Update README and description of `arkit_scenes` example [#5711](https://github.com/rerun-io/rerun/pull/5711) (thanks [@BirgerMoell](https://github.com/BirgerMoell)!) +- Improve readme of `depth_guided_stable_diffusion` example [#5593](https://github.com/rerun-io/rerun/pull/5593) (thanks [@BirgerMoell](https://github.com/BirgerMoell)!) + +#### πŸ–Ό UI Improvements +- New timezone option: seconds since unix epoch [#5450](https://github.com/rerun-io/rerun/pull/5450) (thanks [@murgeljm](https://github.com/murgeljm)!) +- Always enable entity path filter editor [#5331](https://github.com/rerun-io/rerun/pull/5331) +- Add icons for entities and components, and use them everywhere [#5318](https://github.com/rerun-io/rerun/pull/5318) +- Add support for context menu for viewport tab title and selected container's children list [#5321](https://github.com/rerun-io/rerun/pull/5321) +- Fix `ListItem` indentation so icons are properly aligned [#5340](https://github.com/rerun-io/rerun/pull/5340) +- Blueprint tree always starts at the origin now, "projected" paths are called out explicitly [#5342](https://github.com/rerun-io/rerun/pull/5342) +- Merge example page into welcome screen [#5329](https://github.com/rerun-io/rerun/pull/5329) +- `ListItem`'s collapsing triangle is now styled consistently with the rest of the item [#5354](https://github.com/rerun-io/rerun/pull/5354) +- Add helpers to enable stable and controllable collapsed state in hierarchical lists [#5362](https://github.com/rerun-io/rerun/pull/5362) +- Different icon for empty entity paths [#5338](https://github.com/rerun-io/rerun/pull/5338) +- Merge quick start guides [#5378](https://github.com/rerun-io/rerun/pull/5378) +- Update welcome screen panel illustrations [#5394](https://github.com/rerun-io/rerun/pull/5394) +- More context menu in blueprint and streams tree: + - Refactor [#5392](https://github.com/rerun-io/rerun/pull/5392) + - Add support to show/hide `DataResult`s [#5397](https://github.com/rerun-io/rerun/pull/5397) + - Add support for removing `DataResult` from a space view [#5407](https://github.com/rerun-io/rerun/pull/5407) + - Create a new space view with selected entities [#5411](https://github.com/rerun-io/rerun/pull/5411) + - Add context menu to streams tree [#5422](https://github.com/rerun-io/rerun/pull/5422) + - Add "Expand/Collapse all" actions [#5433](https://github.com/rerun-io/rerun/pull/5433) + - Cleanup [#5456](https://github.com/rerun-io/rerun/pull/5456) +- Automatically expand and scroll the blueprint tree when focusing on an item [#5482](https://github.com/rerun-io/rerun/pull/5482) +- Save blueprint to file [#5491](https://github.com/rerun-io/rerun/pull/5491) +- Add new design guidelines for title casing etc [#5501](https://github.com/rerun-io/rerun/pull/5501) +- Automatically expand and scroll the streams tree when focusing on an item [#5494](https://github.com/rerun-io/rerun/pull/5494) +- Reduce the height of the tab bars and side panel titles [#5609](https://github.com/rerun-io/rerun/pull/5609) +- Support toggling item visibility on touch screens [#5624](https://github.com/rerun-io/rerun/pull/5624) +- Select active recording if nothing else is selected [#5627](https://github.com/rerun-io/rerun/pull/5627) +- Enable selecting data sources and blueprints and recordings in them [#5646](https://github.com/rerun-io/rerun/pull/5646) +- Warn user when a software rasterizer is used [#5655](https://github.com/rerun-io/rerun/pull/5655) +- Improve spacing and alignment of menus [#5680](https://github.com/rerun-io/rerun/pull/5680) +- Simplify Welcome Screen and use card-based layout for examples [#5699](https://github.com/rerun-io/rerun/pull/5699) +- Make selection history global instead of per recordings [#5739](https://github.com/rerun-io/rerun/pull/5739) +- Improve formatting of numbers on plot Y axis [#5753](https://github.com/rerun-io/rerun/pull/5753) +- Show all loaded applications in recordings panel [#5766](https://github.com/rerun-io/rerun/pull/5766) +- Wider selection panel by default [#5777](https://github.com/rerun-io/rerun/pull/5777) +- Tighter UI for tensor, annotation-context, view coordinates, recording [#5782](https://github.com/rerun-io/rerun/pull/5782) +- Always show welcome screen, but sometimes fade it in [#5787](https://github.com/rerun-io/rerun/pull/5787) + +#### πŸ•ΈοΈ Web +- Support loading multiple recordings and/or blueprints in web-viewer [#5548](https://github.com/rerun-io/rerun/pull/5548) +- Build release `.wasm` with debug symbols [#5708](https://github.com/rerun-io/rerun/pull/5708) + +#### πŸ§‘β€πŸ’» Dev-experience +- Build wheels for Linux ARM64 [#5511](https://github.com/rerun-io/rerun/pull/5511) + + +#### πŸ“¦ Dependencies +- Update wgpu to 0.19.3 [#5409](https://github.com/rerun-io/rerun/pull/5409) +- Update h2 to 0.3.26 to address RUSTSEC-2024-0332 [#5775](https://github.com/rerun-io/rerun/pull/5775) + +#### πŸ€·β€ Other +- Build CLI for Linux ARM64 [#5503](https://github.com/rerun-io/rerun/pull/5503) +- Allow hiding/showing entity subtrees under shown/hidden parent tree [#5508](https://github.com/rerun-io/rerun/pull/5508) +- Introduce basic support for `$origin` substitution in `EntityPathFilter` [#5517](https://github.com/rerun-io/rerun/pull/5517) +- Introduce `rr.notebook_show()` to simplify notebook experience [#5715](https://github.com/rerun-io/rerun/pull/5715) +- Also remove nested inclusions when removing a subtree [#5720](https://github.com/rerun-io/rerun/pull/5720) +- Prevent gratuitous blueprint saves by not garbage collecting when the blueprint hasn't changed [#5793](https://github.com/rerun-io/rerun/pull/5793) +- Refactor `Selection` using `IndexMap` and make it more encapsulated [#5569](https://github.com/rerun-io/rerun/pull/5569) + + +## [0.14.1](https://github.com/rerun-io/rerun/compare/0.14.0...0.14.1) - C++ build artifact fix - 2024-02-29 This release is identical to 0.14.0 and merely fixes an issue in the build artifacts for C++: 0.14.0 only contained binaries for Linux x64, this release has the full set for Linux x64, Windows x64, Mac x64 & Mac Arm64. -## [0.14.0](https://github.com/rerun-io/rerun/compare/0.13.0...0.14.0) - "Unlimited" point clouds & lines, quality of life improvements, bugfixes +## [0.14.0](https://github.com/rerun-io/rerun/compare/0.13.0...0.14.0) - "Unlimited" point clouds & lines, quality of life improvements, bugfixes - 2024-02-28 -### Overview & highlights +### ✨ Overview & highlights Originally, we planned to do only a bugfix release, but we got an unexpected amount of goodies amassed already. We're still ramping up for programmable blueprints (soon!), but meanwhile enjoy these improvements in 0.14! @@ -22,7 +221,7 @@ We're still ramping up for programmable blueprints (soon!), but meanwhile enjoy - πŸ› Fixes regressions in Space View spawn heuristics from 0.13, and many more bugfixes. - πŸ§‘β€πŸ« Two new examples: [Gesture Recognition](https://github.com/rerun-io/rerun/tree/release-0.14.0/examples/python/gesture_detection) & [RRT* Pathfinding](https://github.com/rerun-io/rerun/tree/release-0.14.0/examples/python/rrt-star) -### Details +### πŸ”Ž Details #### πŸͺ΅ Log API - Add helpers for perspective cameras [#5238](https://github.com/rerun-io/rerun/pull/5238) @@ -89,7 +288,7 @@ We're still ramping up for programmable blueprints (soon!), but meanwhile enjoy ## [0.13.0](https://github.com/rerun-io/rerun/compare/0.12.1...0.13.0) - Fast time series, improved layout editing & UI overrides - 2024-02-12 -### Overview & highlights +### ✨ Overview & highlights This release focuses on scalar time series -- both from a performance and UI perspectives. Check out our [associated blog post](https://www.rerun.io/blog/fast-plots) for more information. @@ -126,7 +325,7 @@ As well as a lot of miscellaneous bug fixes and usability improvements: see deta Check out our [migration guide](https://www.rerun.io/docs/reference/migration/migration-0-13). -### Details +### πŸ”Ž Details #### πŸͺ΅ Log API - Mark TimeSeriesScalar as deprecated in all SDKs and documentation [#5102](https://github.com/rerun-io/rerun/pull/5102) @@ -260,12 +459,12 @@ Check out our [migration guide](https://www.rerun.io/docs/reference/migration/mi - Relax pyarrow dependency to `>=14.0.2` [#5054](https://github.com/rerun-io/rerun/pull/5054) - Update egui_tiles to 0.7.2 [#5107](https://github.com/rerun-io/rerun/pull/5107) -#### πŸ€·β€β™‚οΈ Other +#### 🀷 Other - Add `rerun --serve` and improve `--help` [#4834](https://github.com/rerun-io/rerun/pull/4834) - `rerun print`: print just summary, unless given `--verbose` [#5079](https://github.com/rerun-io/rerun/pull/5079) -## [0.12.1](https://github.com/rerun-io/rerun/compare/0.12.0...0.12.1) - 2024-01-17 - Data loader bug fixes +## [0.12.1](https://github.com/rerun-io/rerun/compare/0.12.0...0.12.1) - Data loader bug fixes - 2024-01-17 #### 🌊 C++ API - Fix CMake trying to pick up test folders outside of the Rerun project/zip [#4770](https://github.com/rerun-io/rerun/pull/4770) (thanks [@KevinGliewe](https://github.com/KevinGliewe)!) @@ -282,14 +481,14 @@ Check out our [migration guide](https://www.rerun.io/docs/reference/migration/mi #### πŸ§‘β€πŸ’» Dev-experience - External loaders: remove warnings on duplicated binary on `$PATH` [#4833](https://github.com/rerun-io/rerun/pull/4833) -#### πŸ€·β€β™‚οΈ Other +#### 🀷 Other - Include `Cargo.lock` in `rerun-cli` crate [#4750](https://github.com/rerun-io/rerun/pull/4750) - Replace `atty` dependency with `std::io::IsTerminal` [#4790](https://github.com/rerun-io/rerun/pull/4790) (thanks [@kpreid](https://github.com/kpreid)!) ## [0.12.0](https://github.com/rerun-io/rerun/compare/0.11.0...0.12.0) - Data Loaders, Container-editing, Python-3.12 - 2024-01-09 -### Overview & highlights +### ✨ Overview & highlights - 🌁 The Rerun Viewer now supports a plugin system for creating [arbitrary external data loaders](https://www.rerun.io/docs/howto/open-any-file). - πŸ•ΈοΈ More built-in examples are now available in the viewer. - 🐍 The Python SDK now works with Python-3.12. @@ -300,7 +499,7 @@ Check out our [migration guide](https://www.rerun.io/docs/reference/migration/mi - There is no need for " quotes around path parts, instead we now use \ to escape special characters. - You need to escape any character that isn't alphabetical, numeric, ., -, or _. -### Details +### πŸ”Ž Details #### 🌊 C++ API - Exposing `recording_id` in C and C++ SDKs [#4384](https://github.com/rerun-io/rerun/pull/4384) @@ -422,7 +621,7 @@ Check out our [migration guide](https://www.rerun.io/docs/reference/migration/mi - Update egui and wgpu [#4111](https://github.com/rerun-io/rerun/pull/4111) - Update Rust to 1.74.0 [#4390](https://github.com/rerun-io/rerun/pull/4390) -#### πŸ€·β€β™‚οΈ Other +#### 🀷 Other - Use `:` instead of `.` as the entity:component separator in paths [#4471](https://github.com/rerun-io/rerun/pull/4471) - File-like entity paths [#4476](https://github.com/rerun-io/rerun/pull/4476) - Make the new container blueprints the default behavior [#4642](https://github.com/rerun-io/rerun/pull/4642) @@ -430,7 +629,7 @@ Check out our [migration guide](https://www.rerun.io/docs/reference/migration/mi ## [0.11.0](https://github.com/rerun-io/rerun/compare/0.10.1...0.11.0) - C++ improvements & better Visible History - 2023-11-28 -### Overview & highlights +### ✨ Overview & highlights - 🌊 C++ SDK improvements - [Reference docs are live!](https://ref.rerun.io/docs/cpp/) @@ -451,7 +650,7 @@ Check out our [migration guide](https://www.rerun.io/docs/reference/migration/mi Special thanks to @dvad & @dangush for contributing! -### Details +### πŸ”Ž Details #### 🌊 C++ SDK - Support std::chrono types for `set_time` on `rerun::RecordingStream` [#4134](https://github.com/rerun-io/rerun/pull/4134) @@ -515,7 +714,7 @@ Special thanks to @dvad & @dangush for contributing! ## [0.10.1](https://github.com/rerun-io/rerun/compare/0.10.0...0.10.1) - 2023-11-02 -### Overview & highlights +### ✨ Overview & highlights This is a small release primarily to tie up some loose ends for our C++ SDK. #### 🌊 C++ SDK @@ -539,7 +738,7 @@ This is a small release primarily to tie up some loose ends for our C++ SDK. Release blog post: -### Overview & highlights +### ✨ Overview & highlights * The C++ SDK is finally here! ```cpp #include @@ -557,7 +756,7 @@ Release blog post: * Add support for NV12-encoded images [#3541](https://github.com/rerun-io/rerun/pull/3541) (thanks [@zrezke](https://github.com/zrezke)!) * We now publish pre-built binaries for each release at -### Details +### πŸ”Ž Details #### 🌊 C++ SDK - Has all the features of the Python and C++ SDK:s @@ -625,7 +824,7 @@ Release blog post: * Rust: `cargo add rerun` and `cargo install rerun-cli` * Online demo: -### Overview & highlights +### ✨ Overview & highlights - A bunch of bug fixes - Fix big performance regression when hovering images - The Rerun Viewer should now be visible to the system accessibility system @@ -687,7 +886,7 @@ Release blog post: * Online demo: -### Overview & highlights +### ✨ Overview & highlights Rerun 0.9.0 is a big release, that introduces a brand new logging API. This API is code-generated from a common definition, meaning the Python and Rust SDKs are very similar now. This will let us more easily extend and improve the API going forward. @@ -860,7 +1059,7 @@ Other highlights: - Update to egui 0.23 [#3523](https://github.com/rerun-io/rerun/pull/3523) - Update to wgpu 0.17 [#2980](https://github.com/rerun-io/rerun/pull/2980) -#### πŸ€·β€β™‚οΈ Other +#### 🀷 Other - Always protect at least one value on the timeline when running GC [#3357](https://github.com/rerun-io/rerun/pull/3357) @@ -916,7 +1115,7 @@ Other highlights: * Online demo: -### Overview & highlights +### ✨ Overview & highlights - `log_pinhole` is now easier to use in simple cases and supports non-RDF camera coordinates. [#2614](https://github.com/rerun-io/rerun/pull/2614) - You only need to set focal length and optional principal point instead of setting the full 3x3 matrix. - There is also a new argument: `camera_xyz` for setting the coordinate system. The default is RDF (the old @@ -1149,7 +1348,7 @@ for use-cases like real-time video feeds. [#2220](https://github.com/rerun-io/re - C++ codegen of structs and unions [#2707](https://github.com/rerun-io/rerun/pull/2707) - Fix cpp formatter differences [#2773](https://github.com/rerun-io/rerun/pull/2773) -#### πŸ€·β€β™‚οΈ Other +#### 🀷 Other - test_api: set different app_id based on what test is run [#2599](https://github.com/rerun-io/rerun/pull/2599) - Introduce `rerun compare` to check whether 2 rrd files are functionally equivalent [#2597](https://github.com/rerun-io/rerun/pull/2597) - Remove `files.exclude` in vscode settings [#2621](https://github.com/rerun-io/rerun/pull/2621) @@ -1159,7 +1358,7 @@ for use-cases like real-time video feeds. [#2220](https://github.com/rerun-io/re ## [0.7.0](https://github.com/rerun-io/rerun/compare/0.6.0...0.7.0) - improved transforms, better color mapping, bug & doc fixes - 2023-06-16 -### Overview & highlights +### ✨ Overview & highlights While we're working on significant updates around interfaces and customizability, here's a smaller release packed with useful improvements πŸŽ‰ @@ -1251,7 +1450,7 @@ here's a smaller release packed with useful improvements πŸŽ‰ ## [0.6.0](https://github.com/rerun-io/rerun/compare/v0.5.1...0.6.0) - 3D in 2D and SDK batching - 2023-05-26 -### Overview & highlights +### ✨ Overview & highlights - You can now show 3D objects in 2D views connected by Pinhole transforms [#2008](https://github.com/rerun-io/rerun/pull/2008) - You can quickly view images and meshes with `rerun mesh.obj image.png` [#2060](https://github.com/rerun-io/rerun/pull/2060) @@ -1413,7 +1612,7 @@ here's a smaller release packed with useful improvements πŸŽ‰ - Fix `cargo test` [#2199](https://github.com/rerun-io/rerun/pull/2199) - Fix run all for new rust-cli target & add rerun-web alias for quick running of the web player [#2203](https://github.com/rerun-io/rerun/pull/2203) -#### πŸ€·β€β™‚οΈ Other +#### 🀷 Other - Fix secret in dispatch_lint.yml [4848f98f2605a3caf9b7695273e0871efa2d44c8](https://github.com/rerun-io/rerun/commit/4848f98f2605a3caf9b7695273e0871efa2d44c8) - Only maintain a single manual-dispatch job for testing workflows [98f7de3b52b0fea6abe364f9d0ce0bd4c459caf1](https://github.com/rerun-io/rerun/commit/98f7de3b52b0fea6abe364f9d0ce0bd4c459caf1) - Add other build parametrizations to manual_dispatch.yml [dbdf275eaf17220d14811dc34b69b6a76e948e73](https://github.com/rerun-io/rerun/commit/dbdf275eaf17220d14811dc34b69b6a76e948e73) @@ -1448,7 +1647,7 @@ here's a smaller release packed with useful improvements πŸŽ‰ ## [0.5.1](https://github.com/rerun-io/rerun/compare/v0.5.1...v0.5.0) - Patch Release - 2023-05-01 -### Overview & highlights +### ✨ Overview & highlights This Release fixes a few small bugs on top of the v0.5.0 release. ### In Detail @@ -1461,7 +1660,7 @@ This Release fixes a few small bugs on top of the v0.5.0 release. ## [0.5.0](https://github.com/rerun-io/rerun/compare/v0.4.0...v0.5.0) - Jupyter MVP, GPU-based picking & colormapping, new datastore! - 2023-04-20 -### Overview & highlights +### ✨ Overview & highlights This new release adds MVP support for embedding Rerun in Jupyter notebooks, and brings significant performance improvements across all layers of the stack. @@ -1557,7 +1756,7 @@ This new release adds MVP support for embedding Rerun in Jupyter notebooks, and - Show previews of colormaps when selecting them [#1846](https://github.com/rerun-io/rerun/pull/1846) - Smooth out scroll wheel input for camera zooming [#1920](https://github.com/rerun-io/rerun/pull/1920) -#### πŸ€·β€β™‚οΈ Other Viewer improvements +#### 🀷 Other Viewer improvements - Change `EntityPathHash` to be 64 bit [#1723](https://github.com/rerun-io/rerun/pull/1723) - Central `GpuReadback` handling for re_viewer, experimental space view screenshots [#1717](https://github.com/rerun-io/rerun/pull/1717) - Readback depth from GPU picking [#1752](https://github.com/rerun-io/rerun/pull/1752) @@ -1618,7 +1817,7 @@ This new release adds MVP support for embedding Rerun in Jupyter notebooks, and - Improve PR template with better comment, and no copilot by default [#1901](https://github.com/rerun-io/rerun/pull/1901) - Optimize `generate_changelog.py` [#1912](https://github.com/rerun-io/rerun/pull/1912) -#### πŸ€·β€β™‚οΈ Other +#### 🀷 Other - Fix videos for GitHub in `CHANGELOG.md` [af7d3b192157f942e35f64d3561a9a8dbcc18bfa](https://github.com/rerun-io/rerun/commit/af7d3b192157f942e35f64d3561a9a8dbcc18bfa) - Don't run 3rd party bench suites on CI [#1787](https://github.com/rerun-io/rerun/pull/1787) - Remove `TensorTrait` [#1819](https://github.com/rerun-io/rerun/pull/1819) @@ -1633,7 +1832,7 @@ This new release adds MVP support for embedding Rerun in Jupyter notebooks, and https://user-images.githubusercontent.com/1220815/228241887-03b311e2-80e9-4541-9281-6d334a15ab04.mp4 -### Overview & highlights +### ✨ Overview & highlights * Add support for mesh vertex colors [#1671](https://github.com/rerun-io/rerun/pull/1671) * Lower memory use [#1535](https://github.com/rerun-io/rerun/pull/1535) * Improve garbage collection [#1560](https://github.com/rerun-io/rerun/pull/1560) @@ -1731,7 +1930,7 @@ We now host an experimental and unpolished web-viewer at - Turn on depth cloud backprojection by default [#1710](https://github.com/rerun-io/rerun/pull/1710) - Add radius boost for depth clouds on outline [#1713](https://github.com/rerun-io/rerun/pull/1713) -#### πŸ€·β€β™‚οΈ Other Viewer improvements +#### 🀷 Other Viewer improvements - Fix web feature name in error messages [#1521](https://github.com/rerun-io/rerun/pull/1521) - Use outlines for mesh selections instead of highlight colors [#1540](https://github.com/rerun-io/rerun/pull/1540) - Implement outlines for line renderer & use them for select & hover of "line-like" primitives in Viewer [#1553](https://github.com/rerun-io/rerun/pull/1553) @@ -1798,7 +1997,7 @@ We now host an experimental and unpolished web-viewer at - Fix `lint.py` [#1719](https://github.com/rerun-io/rerun/pull/1719) - Add a script that generates a changelog from recent PRs and their labels [#1718](https://github.com/rerun-io/rerun/pull/1718) -#### πŸ€·β€β™‚οΈ Other +#### 🀷 Other - Clean up opencv_canny example slightly [b487e550dcb87225858dc6f76b791a25e938e75e](https://github.com/rerun-io/rerun/commit/b487e550dcb87225858dc6f76b791a25e938e75e) - Lint fixes [9901e7c6735356b1970ddabc926bc5378d82e057](https://github.com/rerun-io/rerun/commit/9901e7c6735356b1970ddabc926bc5378d82e057) @@ -1809,7 +2008,7 @@ Remove potentially sensitive analytics, including path to rerun source code on p ## [0.3.0](https://github.com/rerun-io/rerun/compare/v0.2.0...v0.3.0) - 2023-03-07 -### Overview & highlights +### ✨ Overview & highlights After a successful launch a couple of weeks ago, we're back with our second release! With a few exceptions this release focuses on internal refactors & improving our processes. diff --git a/Cargo.lock b/Cargo.lock index 5c82c2ac9133..1798fa62c876 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1057,7 +1057,7 @@ dependencies = [ [[package]] name = "clock" -version = "0.15.0-alpha.5" +version = "0.15.0-rc.3" dependencies = [ "anyhow", "clap", @@ -1394,7 +1394,7 @@ checksum = "96a6ac251f4a2aca6b3f91340350eab87ae57c3f127ffeb585e92bd336717991" [[package]] name = "custom_data_loader" -version = "0.15.0-alpha.5" +version = "0.15.0-rc.3" dependencies = [ "re_build_tools", "rerun", @@ -1402,7 +1402,7 @@ dependencies = [ [[package]] name = "custom_space_view" -version = "0.15.0-alpha.5" +version = "0.15.0-rc.3" dependencies = [ "mimalloc", "re_crash_handler", @@ -1413,7 +1413,7 @@ dependencies = [ [[package]] name = "custom_store_subscriber" -version = "0.15.0-alpha.5" +version = "0.15.0-rc.3" dependencies = [ "re_build_tools", "rerun", @@ -1524,7 +1524,7 @@ dependencies = [ [[package]] name = "dna" -version = "0.15.0-alpha.5" +version = "0.15.0-rc.3" dependencies = [ "itertools 0.12.0", "rand", @@ -1955,7 +1955,7 @@ dependencies = [ [[package]] name = "extend_viewer_ui" -version = "0.15.0-alpha.5" +version = "0.15.0-rc.3" dependencies = [ "mimalloc", "re_crash_handler", @@ -2618,7 +2618,7 @@ dependencies = [ [[package]] name = "incremental_logging" -version = "0.15.0-alpha.5" +version = "0.15.0-rc.3" dependencies = [ "anyhow", "clap", @@ -2969,7 +2969,7 @@ dependencies = [ [[package]] name = "log_benchmark" -version = "0.15.0-alpha.5" +version = "0.15.0-rc.3" dependencies = [ "anyhow", "clap", @@ -2980,7 +2980,7 @@ dependencies = [ [[package]] name = "log_file" -version = "0.15.0-alpha.5" +version = "0.15.0-rc.3" dependencies = [ "anyhow", "clap", @@ -3121,14 +3121,14 @@ dependencies = [ [[package]] name = "minimal" -version = "0.15.0-alpha.5" +version = "0.15.0-rc.3" dependencies = [ "rerun", ] [[package]] name = "minimal_options" -version = "0.15.0-alpha.5" +version = "0.15.0-rc.3" dependencies = [ "anyhow", "clap", @@ -3138,7 +3138,7 @@ dependencies = [ [[package]] name = "minimal_serve" -version = "0.15.0-alpha.5" +version = "0.15.0-rc.3" dependencies = [ "rerun", "tokio", @@ -3506,7 +3506,7 @@ dependencies = [ [[package]] name = "objectron" -version = "0.15.0-alpha.5" +version = "0.15.0-rc.3" dependencies = [ "anyhow", "clap", @@ -3689,7 +3689,7 @@ dependencies = [ [[package]] name = "plot_dashboard_stress" -version = "0.15.0-alpha.5" +version = "0.15.0-rc.3" dependencies = [ "anyhow", "clap", @@ -4117,7 +4117,7 @@ checksum = "42a9830a0e1b9fb145ebb365b8bc4ccd75f290f98c0247deafbbe2c75cefb544" [[package]] name = "raw_mesh" -version = "0.15.0-alpha.5" +version = "0.15.0-rc.3" dependencies = [ "anyhow", "bytes", @@ -4156,7 +4156,7 @@ dependencies = [ [[package]] name = "re_analytics" -version = "0.15.0-alpha.5" +version = "0.15.0-rc.3" dependencies = [ "crossbeam", "directories-next", @@ -4198,7 +4198,7 @@ dependencies = [ [[package]] name = "re_build_examples" -version = "0.15.0-alpha.5" +version = "0.15.0-rc.3" dependencies = [ "anyhow", "argh", @@ -4213,11 +4213,11 @@ dependencies = [ [[package]] name = "re_build_info" -version = "0.15.0-alpha.5" +version = "0.15.0-rc.3" [[package]] name = "re_build_search_index" -version = "0.15.0-alpha.5" +version = "0.15.0-rc.3" dependencies = [ "anyhow", "argh", @@ -4242,7 +4242,7 @@ dependencies = [ [[package]] name = "re_build_tools" -version = "0.15.0-alpha.5" +version = "0.15.0-rc.3" dependencies = [ "anyhow", "cargo_metadata 0.18.1", @@ -4255,7 +4255,7 @@ dependencies = [ [[package]] name = "re_build_web_viewer" -version = "0.15.0-alpha.5" +version = "0.15.0-rc.3" dependencies = [ "anyhow", "cargo_metadata 0.18.1", @@ -4265,7 +4265,7 @@ dependencies = [ [[package]] name = "re_crash_handler" -version = "0.15.0-alpha.5" +version = "0.15.0-rc.3" dependencies = [ "backtrace", "itertools 0.12.0", @@ -4277,7 +4277,7 @@ dependencies = [ [[package]] name = "re_data_source" -version = "0.15.0-alpha.5" +version = "0.15.0-rc.3" dependencies = [ "ahash", "anyhow", @@ -4300,7 +4300,7 @@ dependencies = [ [[package]] name = "re_data_store" -version = "0.15.0-alpha.5" +version = "0.15.0-rc.3" dependencies = [ "ahash", "anyhow", @@ -4330,7 +4330,7 @@ dependencies = [ [[package]] name = "re_data_ui" -version = "0.15.0-alpha.5" +version = "0.15.0-rc.3" dependencies = [ "ahash", "anyhow", @@ -4359,7 +4359,7 @@ dependencies = [ [[package]] name = "re_entity_db" -version = "0.15.0-alpha.5" +version = "0.15.0-rc.3" dependencies = [ "ahash", "anyhow", @@ -4393,14 +4393,14 @@ dependencies = [ [[package]] name = "re_error" -version = "0.15.0-alpha.5" +version = "0.15.0-rc.3" dependencies = [ "anyhow", ] [[package]] name = "re_format" -version = "0.15.0-alpha.5" +version = "0.15.0-rc.3" dependencies = [ "comfy-table", "num-traits", @@ -4411,7 +4411,7 @@ dependencies = [ [[package]] name = "re_int_histogram" -version = "0.15.0-alpha.5" +version = "0.15.0-rc.3" dependencies = [ "criterion", "insta", @@ -4422,7 +4422,7 @@ dependencies = [ [[package]] name = "re_log" -version = "0.15.0-alpha.5" +version = "0.15.0-rc.3" dependencies = [ "env_logger", "js-sys", @@ -4435,7 +4435,7 @@ dependencies = [ [[package]] name = "re_log_encoding" -version = "0.15.0-alpha.5" +version = "0.15.0-rc.3" dependencies = [ "criterion", "ehttp", @@ -4460,7 +4460,7 @@ dependencies = [ [[package]] name = "re_log_types" -version = "0.15.0-alpha.5" +version = "0.15.0-rc.3" dependencies = [ "ahash", "anyhow", @@ -4498,7 +4498,7 @@ dependencies = [ [[package]] name = "re_memory" -version = "0.15.0-alpha.5" +version = "0.15.0-rc.3" dependencies = [ "ahash", "backtrace", @@ -4519,7 +4519,7 @@ dependencies = [ [[package]] name = "re_query" -version = "0.15.0-alpha.5" +version = "0.15.0-rc.3" dependencies = [ "backtrace", "criterion", @@ -4543,7 +4543,7 @@ dependencies = [ [[package]] name = "re_query_cache" -version = "0.15.0-alpha.5" +version = "0.15.0-rc.3" dependencies = [ "ahash", "criterion", @@ -4568,7 +4568,7 @@ dependencies = [ [[package]] name = "re_renderer" -version = "0.15.0-alpha.5" +version = "0.15.0-rc.3" dependencies = [ "ahash", "anyhow", @@ -4615,7 +4615,7 @@ dependencies = [ [[package]] name = "re_renderer_examples" -version = "0.15.0-alpha.5" +version = "0.15.0-rc.3" dependencies = [ "ahash", "anyhow", @@ -4639,7 +4639,7 @@ dependencies = [ [[package]] name = "re_sdk" -version = "0.15.0-alpha.5" +version = "0.15.0-rc.3" dependencies = [ "ahash", "anyhow", @@ -4671,7 +4671,7 @@ dependencies = [ [[package]] name = "re_sdk_comms" -version = "0.15.0-alpha.5" +version = "0.15.0-rc.3" dependencies = [ "ahash", "crossbeam", @@ -4687,7 +4687,7 @@ dependencies = [ [[package]] name = "re_smart_channel" -version = "0.15.0-alpha.5" +version = "0.15.0-rc.3" dependencies = [ "crossbeam", "parking_lot", @@ -4698,7 +4698,7 @@ dependencies = [ [[package]] name = "re_space_view" -version = "0.15.0-alpha.5" +version = "0.15.0-rc.3" dependencies = [ "ahash", "egui", @@ -4719,7 +4719,7 @@ dependencies = [ [[package]] name = "re_space_view_bar_chart" -version = "0.15.0-alpha.5" +version = "0.15.0-rc.3" dependencies = [ "egui", "egui_plot", @@ -4737,7 +4737,7 @@ dependencies = [ [[package]] name = "re_space_view_dataframe" -version = "0.15.0-alpha.5" +version = "0.15.0-rc.3" dependencies = [ "egui", "egui_extras", @@ -4755,7 +4755,7 @@ dependencies = [ [[package]] name = "re_space_view_spatial" -version = "0.15.0-alpha.5" +version = "0.15.0-rc.3" dependencies = [ "ahash", "anyhow", @@ -4793,7 +4793,7 @@ dependencies = [ [[package]] name = "re_space_view_tensor" -version = "0.15.0-alpha.5" +version = "0.15.0-rc.3" dependencies = [ "ahash", "anyhow", @@ -4819,7 +4819,7 @@ dependencies = [ [[package]] name = "re_space_view_text_document" -version = "0.15.0-alpha.5" +version = "0.15.0-rc.3" dependencies = [ "egui", "egui_commonmark", @@ -4837,7 +4837,7 @@ dependencies = [ [[package]] name = "re_space_view_text_log" -version = "0.15.0-alpha.5" +version = "0.15.0-rc.3" dependencies = [ "egui", "egui_extras", @@ -4857,7 +4857,7 @@ dependencies = [ [[package]] name = "re_space_view_time_series" -version = "0.15.0-alpha.5" +version = "0.15.0-rc.3" dependencies = [ "egui", "egui_plot", @@ -4880,7 +4880,7 @@ dependencies = [ [[package]] name = "re_string_interner" -version = "0.15.0-alpha.5" +version = "0.15.0-rc.3" dependencies = [ "ahash", "nohash-hasher", @@ -4892,7 +4892,7 @@ dependencies = [ [[package]] name = "re_time_panel" -version = "0.15.0-alpha.5" +version = "0.15.0-rc.3" dependencies = [ "egui", "itertools 0.12.0", @@ -4911,7 +4911,7 @@ dependencies = [ [[package]] name = "re_tracing" -version = "0.15.0-alpha.5" +version = "0.15.0-rc.3" dependencies = [ "puffin", "puffin_http", @@ -4921,7 +4921,7 @@ dependencies = [ [[package]] name = "re_tuid" -version = "0.15.0-alpha.5" +version = "0.15.0-rc.3" dependencies = [ "criterion", "document-features", @@ -4933,7 +4933,7 @@ dependencies = [ [[package]] name = "re_types" -version = "0.15.0-alpha.5" +version = "0.15.0-rc.3" dependencies = [ "anyhow", "array-init", @@ -4972,7 +4972,7 @@ dependencies = [ [[package]] name = "re_types_builder" -version = "0.15.0-alpha.5" +version = "0.15.0-rc.3" dependencies = [ "anyhow", "camino", @@ -4998,7 +4998,7 @@ dependencies = [ [[package]] name = "re_types_core" -version = "0.15.0-alpha.5" +version = "0.15.0-rc.3" dependencies = [ "anyhow", "backtrace", @@ -5018,7 +5018,7 @@ dependencies = [ [[package]] name = "re_ui" -version = "0.15.0-alpha.5" +version = "0.15.0-rc.3" dependencies = [ "eframe", "egui", @@ -5028,6 +5028,7 @@ dependencies = [ "parking_lot", "rand", "re_entity_db", + "re_format", "re_log", "re_log_types", "serde", @@ -5039,7 +5040,7 @@ dependencies = [ [[package]] name = "re_viewer" -version = "0.15.0-alpha.5" +version = "0.15.0-rc.3" dependencies = [ "ahash", "anyhow", @@ -5104,7 +5105,7 @@ dependencies = [ [[package]] name = "re_viewer_context" -version = "0.15.0-alpha.5" +version = "0.15.0-rc.3" dependencies = [ "ahash", "anyhow", @@ -5146,7 +5147,7 @@ dependencies = [ [[package]] name = "re_viewport" -version = "0.15.0-alpha.5" +version = "0.15.0-rc.3" dependencies = [ "ahash", "array-init", @@ -5181,7 +5182,7 @@ dependencies = [ [[package]] name = "re_web_viewer_server" -version = "0.15.0-alpha.5" +version = "0.15.0-rc.3" dependencies = [ "clap", "document-features", @@ -5196,7 +5197,7 @@ dependencies = [ [[package]] name = "re_ws_comms" -version = "0.15.0-alpha.5" +version = "0.15.0-rc.3" dependencies = [ "anyhow", "bincode", @@ -5286,7 +5287,7 @@ checksum = "216080ab382b992234dda86873c18d4c48358f5cfcb70fd693d7f6f2131b628b" [[package]] name = "rerun" -version = "0.15.0-alpha.5" +version = "0.15.0-rc.3" dependencies = [ "anyhow", "clap", @@ -5320,7 +5321,7 @@ dependencies = [ [[package]] name = "rerun-cli" -version = "0.15.0-alpha.5" +version = "0.15.0-rc.3" dependencies = [ "document-features", "mimalloc", @@ -5336,7 +5337,7 @@ dependencies = [ [[package]] name = "rerun-loader-rust-file" -version = "0.15.0-alpha.5" +version = "0.15.0-rc.3" dependencies = [ "anyhow", "argh", @@ -5345,7 +5346,7 @@ dependencies = [ [[package]] name = "rerun_c" -version = "0.15.0-alpha.5" +version = "0.15.0-rc.3" dependencies = [ "ahash", "once_cell", @@ -5357,7 +5358,7 @@ dependencies = [ [[package]] name = "rerun_py" -version = "0.15.0-alpha.5" +version = "0.15.0-rc.3" dependencies = [ "crossbeam", "document-features", @@ -5456,7 +5457,7 @@ dependencies = [ [[package]] name = "roundtrip_annotation_context" -version = "0.15.0-alpha.5" +version = "0.15.0-rc.3" dependencies = [ "anyhow", "clap", @@ -5466,7 +5467,7 @@ dependencies = [ [[package]] name = "roundtrip_arrows2d" -version = "0.15.0-alpha.5" +version = "0.15.0-rc.3" dependencies = [ "anyhow", "clap", @@ -5476,7 +5477,7 @@ dependencies = [ [[package]] name = "roundtrip_arrows3d" -version = "0.15.0-alpha.5" +version = "0.15.0-rc.3" dependencies = [ "anyhow", "clap", @@ -5486,7 +5487,7 @@ dependencies = [ [[package]] name = "roundtrip_boxes2d" -version = "0.15.0-alpha.5" +version = "0.15.0-rc.3" dependencies = [ "anyhow", "clap", @@ -5496,7 +5497,7 @@ dependencies = [ [[package]] name = "roundtrip_boxes3d" -version = "0.15.0-alpha.5" +version = "0.15.0-rc.3" dependencies = [ "anyhow", "clap", @@ -5506,7 +5507,7 @@ dependencies = [ [[package]] name = "roundtrip_depth_image" -version = "0.15.0-alpha.5" +version = "0.15.0-rc.3" dependencies = [ "anyhow", "clap", @@ -5517,7 +5518,7 @@ dependencies = [ [[package]] name = "roundtrip_disconnected_space" -version = "0.15.0-alpha.5" +version = "0.15.0-rc.3" dependencies = [ "anyhow", "clap", @@ -5527,7 +5528,7 @@ dependencies = [ [[package]] name = "roundtrip_image" -version = "0.15.0-alpha.5" +version = "0.15.0-rc.3" dependencies = [ "anyhow", "clap", @@ -5540,7 +5541,7 @@ dependencies = [ [[package]] name = "roundtrip_line_strips2d" -version = "0.15.0-alpha.5" +version = "0.15.0-rc.3" dependencies = [ "anyhow", "clap", @@ -5550,7 +5551,7 @@ dependencies = [ [[package]] name = "roundtrip_line_strips3d" -version = "0.15.0-alpha.5" +version = "0.15.0-rc.3" dependencies = [ "anyhow", "clap", @@ -5560,7 +5561,7 @@ dependencies = [ [[package]] name = "roundtrip_pinhole" -version = "0.15.0-alpha.5" +version = "0.15.0-rc.3" dependencies = [ "anyhow", "clap", @@ -5570,7 +5571,7 @@ dependencies = [ [[package]] name = "roundtrip_points2d" -version = "0.15.0-alpha.5" +version = "0.15.0-rc.3" dependencies = [ "anyhow", "clap", @@ -5580,7 +5581,7 @@ dependencies = [ [[package]] name = "roundtrip_points3d" -version = "0.15.0-alpha.5" +version = "0.15.0-rc.3" dependencies = [ "anyhow", "clap", @@ -5590,7 +5591,7 @@ dependencies = [ [[package]] name = "roundtrip_segmentation_image" -version = "0.15.0-alpha.5" +version = "0.15.0-rc.3" dependencies = [ "anyhow", "clap", @@ -5601,7 +5602,7 @@ dependencies = [ [[package]] name = "roundtrip_tensor" -version = "0.15.0-alpha.5" +version = "0.15.0-rc.3" dependencies = [ "anyhow", "clap", @@ -5612,7 +5613,7 @@ dependencies = [ [[package]] name = "roundtrip_text_document" -version = "0.15.0-alpha.5" +version = "0.15.0-rc.3" dependencies = [ "anyhow", "clap", @@ -5622,7 +5623,7 @@ dependencies = [ [[package]] name = "roundtrip_text_log" -version = "0.15.0-alpha.5" +version = "0.15.0-rc.3" dependencies = [ "anyhow", "clap", @@ -5632,7 +5633,7 @@ dependencies = [ [[package]] name = "roundtrip_transform3d" -version = "0.15.0-alpha.5" +version = "0.15.0-rc.3" dependencies = [ "anyhow", "clap", @@ -5642,7 +5643,7 @@ dependencies = [ [[package]] name = "roundtrip_view_coordinates" -version = "0.15.0-alpha.5" +version = "0.15.0-rc.3" dependencies = [ "anyhow", "clap", @@ -5658,7 +5659,7 @@ checksum = "3cd14fd5e3b777a7422cca79358c57a8f6e3a703d9ac187448d0daf220c2407f" [[package]] name = "run_wasm" -version = "0.15.0-alpha.5" +version = "0.15.0-rc.3" dependencies = [ "cargo-run-wasm", "pico-args", @@ -5933,7 +5934,7 @@ dependencies = [ [[package]] name = "shared_recording" -version = "0.15.0-alpha.5" +version = "0.15.0-rc.3" dependencies = [ "rerun", ] @@ -6069,7 +6070,7 @@ dependencies = [ [[package]] name = "snippets" -version = "0.15.0-alpha.5" +version = "0.15.0-rc.3" dependencies = [ "itertools 0.12.0", "ndarray", @@ -6091,7 +6092,7 @@ dependencies = [ [[package]] name = "spawn_viewer" -version = "0.15.0-alpha.5" +version = "0.15.0-rc.3" dependencies = [ "rerun", ] @@ -6125,7 +6126,7 @@ checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" [[package]] name = "stdio" -version = "0.15.0-alpha.5" +version = "0.15.0-rc.3" dependencies = [ "rerun", ] @@ -6221,7 +6222,7 @@ dependencies = [ [[package]] name = "template" -version = "0.15.0-alpha.5" +version = "0.15.0-rc.3" dependencies = [ "rerun", ] @@ -6237,7 +6238,7 @@ dependencies = [ [[package]] name = "test_api" -version = "0.15.0-alpha.5" +version = "0.15.0-rc.3" dependencies = [ "anyhow", "clap", @@ -6252,7 +6253,7 @@ dependencies = [ [[package]] name = "test_image_memory" -version = "0.15.0-alpha.5" +version = "0.15.0-rc.3" dependencies = [ "mimalloc", "re_format", diff --git a/Cargo.toml b/Cargo.toml index e32cc1205c89..31f58a329c51 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -20,59 +20,59 @@ include = ["../../LICENSE-APACHE", "../../LICENSE-MIT", "**/*.rs", "Cargo.toml"] license = "MIT OR Apache-2.0" repository = "https://github.com/rerun-io/rerun" rust-version = "1.74" -version = "0.15.0-alpha.5" +version = "0.15.0-rc.3" [workspace.dependencies] # When using alpha-release, always use exact version, e.g. `version = "=0.x.y-alpha.z" # This is because we treat alpha-releases as incompatible, but semver doesn't. # In particular: if we compile rerun 0.3.0-alpha.0 we only want it to use # re_log_types 0.3.0-alpha.0, NOT 0.3.0-alpha.4 even though it is newer and semver-compatible. -re_analytics = { path = "crates/re_analytics", version = "=0.15.0-alpha.5", default-features = false } -re_build_search_index = { path = "crates/re_build_search_index", version = "=0.15.0-alpha.5", default-features = false } -re_build_examples = { path = "crates/re_build_examples", version = "=0.15.0-alpha.5", default-features = false } -re_build_info = { path = "crates/re_build_info", version = "=0.15.0-alpha.5", default-features = false } -re_build_tools = { path = "crates/re_build_tools", version = "=0.15.0-alpha.5", default-features = false } -re_build_web_viewer = { path = "crates/re_build_web_viewer", version = "=0.15.0-alpha.5", default-features = false } -re_crash_handler = { path = "crates/re_crash_handler", version = "=0.15.0-alpha.5", default-features = false } -re_data_source = { path = "crates/re_data_source", version = "=0.15.0-alpha.5", default-features = false } -re_data_store = { path = "crates/re_data_store", version = "=0.15.0-alpha.5", default-features = false } -re_data_ui = { path = "crates/re_data_ui", version = "=0.15.0-alpha.5", default-features = false } -re_entity_db = { path = "crates/re_entity_db", version = "=0.15.0-alpha.5", default-features = false } -re_error = { path = "crates/re_error", version = "=0.15.0-alpha.5", default-features = false } -re_format = { path = "crates/re_format", version = "=0.15.0-alpha.5", default-features = false } -re_int_histogram = { path = "crates/re_int_histogram", version = "=0.15.0-alpha.5", default-features = false } -re_log = { path = "crates/re_log", version = "=0.15.0-alpha.5", default-features = false } -re_log_encoding = { path = "crates/re_log_encoding", version = "=0.15.0-alpha.5", default-features = false } -re_log_types = { path = "crates/re_log_types", version = "=0.15.0-alpha.5", default-features = false } -re_memory = { path = "crates/re_memory", version = "=0.15.0-alpha.5", default-features = false } -re_query = { path = "crates/re_query", version = "=0.15.0-alpha.5", default-features = false } -re_query_cache = { path = "crates/re_query_cache", version = "=0.15.0-alpha.5", default-features = false } -re_renderer = { path = "crates/re_renderer", version = "=0.15.0-alpha.5", default-features = false } -re_sdk = { path = "crates/re_sdk", version = "=0.15.0-alpha.5", default-features = false } -re_sdk_comms = { path = "crates/re_sdk_comms", version = "=0.15.0-alpha.5", default-features = false } -re_smart_channel = { path = "crates/re_smart_channel", version = "=0.15.0-alpha.5", default-features = false } -re_space_view = { path = "crates/re_space_view", version = "=0.15.0-alpha.5", default-features = false } -re_space_view_bar_chart = { path = "crates/re_space_view_bar_chart", version = "=0.15.0-alpha.5", default-features = false } -re_space_view_dataframe = { path = "crates/re_space_view_dataframe", version = "=0.15.0-alpha.5", default-features = false } -re_space_view_spatial = { path = "crates/re_space_view_spatial", version = "=0.15.0-alpha.5", default-features = false } -re_space_view_tensor = { path = "crates/re_space_view_tensor", version = "=0.15.0-alpha.5", default-features = false } -re_space_view_text_document = { path = "crates/re_space_view_text_document", version = "=0.15.0-alpha.5", default-features = false } -re_space_view_text_log = { path = "crates/re_space_view_text_log", version = "=0.15.0-alpha.5", default-features = false } -re_space_view_time_series = { path = "crates/re_space_view_time_series", version = "=0.15.0-alpha.5", default-features = false } -re_string_interner = { path = "crates/re_string_interner", version = "=0.15.0-alpha.5", default-features = false } -re_time_panel = { path = "crates/re_time_panel", version = "=0.15.0-alpha.5", default-features = false } -re_tracing = { path = "crates/re_tracing", version = "=0.15.0-alpha.5", default-features = false } -re_tuid = { path = "crates/re_tuid", version = "=0.15.0-alpha.5", default-features = false } -re_types = { path = "crates/re_types", version = "=0.15.0-alpha.5", default-features = false } -re_types_builder = { path = "crates/re_types_builder", version = "=0.15.0-alpha.5", default-features = false } -re_types_core = { path = "crates/re_types_core", version = "=0.15.0-alpha.5", default-features = false } -re_ui = { path = "crates/re_ui", version = "=0.15.0-alpha.5", default-features = false } -re_viewer = { path = "crates/re_viewer", version = "=0.15.0-alpha.5", default-features = false } -re_viewer_context = { path = "crates/re_viewer_context", version = "=0.15.0-alpha.5", default-features = false } -re_viewport = { path = "crates/re_viewport", version = "=0.15.0-alpha.5", default-features = false } -re_web_viewer_server = { path = "crates/re_web_viewer_server", version = "=0.15.0-alpha.5", default-features = false } -re_ws_comms = { path = "crates/re_ws_comms", version = "=0.15.0-alpha.5", default-features = false } -rerun = { path = "crates/rerun", version = "=0.15.0-alpha.5", default-features = false } +re_analytics = { path = "crates/re_analytics", version = "=0.15.0-rc.3", default-features = false } +re_build_search_index = { path = "crates/re_build_search_index", version = "=0.15.0-rc.3", default-features = false } +re_build_examples = { path = "crates/re_build_examples", version = "=0.15.0-rc.3", default-features = false } +re_build_info = { path = "crates/re_build_info", version = "=0.15.0-rc.3", default-features = false } +re_build_tools = { path = "crates/re_build_tools", version = "=0.15.0-rc.3", default-features = false } +re_build_web_viewer = { path = "crates/re_build_web_viewer", version = "=0.15.0-rc.3", default-features = false } +re_crash_handler = { path = "crates/re_crash_handler", version = "=0.15.0-rc.3", default-features = false } +re_data_source = { path = "crates/re_data_source", version = "=0.15.0-rc.3", default-features = false } +re_data_store = { path = "crates/re_data_store", version = "=0.15.0-rc.3", default-features = false } +re_data_ui = { path = "crates/re_data_ui", version = "=0.15.0-rc.3", default-features = false } +re_entity_db = { path = "crates/re_entity_db", version = "=0.15.0-rc.3", default-features = false } +re_error = { path = "crates/re_error", version = "=0.15.0-rc.3", default-features = false } +re_format = { path = "crates/re_format", version = "=0.15.0-rc.3", default-features = false } +re_int_histogram = { path = "crates/re_int_histogram", version = "=0.15.0-rc.3", default-features = false } +re_log = { path = "crates/re_log", version = "=0.15.0-rc.3", default-features = false } +re_log_encoding = { path = "crates/re_log_encoding", version = "=0.15.0-rc.3", default-features = false } +re_log_types = { path = "crates/re_log_types", version = "=0.15.0-rc.3", default-features = false } +re_memory = { path = "crates/re_memory", version = "=0.15.0-rc.3", default-features = false } +re_query = { path = "crates/re_query", version = "=0.15.0-rc.3", default-features = false } +re_query_cache = { path = "crates/re_query_cache", version = "=0.15.0-rc.3", default-features = false } +re_renderer = { path = "crates/re_renderer", version = "=0.15.0-rc.3", default-features = false } +re_sdk = { path = "crates/re_sdk", version = "=0.15.0-rc.3", default-features = false } +re_sdk_comms = { path = "crates/re_sdk_comms", version = "=0.15.0-rc.3", default-features = false } +re_smart_channel = { path = "crates/re_smart_channel", version = "=0.15.0-rc.3", default-features = false } +re_space_view = { path = "crates/re_space_view", version = "=0.15.0-rc.3", default-features = false } +re_space_view_bar_chart = { path = "crates/re_space_view_bar_chart", version = "=0.15.0-rc.3", default-features = false } +re_space_view_dataframe = { path = "crates/re_space_view_dataframe", version = "=0.15.0-rc.3", default-features = false } +re_space_view_spatial = { path = "crates/re_space_view_spatial", version = "=0.15.0-rc.3", default-features = false } +re_space_view_tensor = { path = "crates/re_space_view_tensor", version = "=0.15.0-rc.3", default-features = false } +re_space_view_text_document = { path = "crates/re_space_view_text_document", version = "=0.15.0-rc.3", default-features = false } +re_space_view_text_log = { path = "crates/re_space_view_text_log", version = "=0.15.0-rc.3", default-features = false } +re_space_view_time_series = { path = "crates/re_space_view_time_series", version = "=0.15.0-rc.3", default-features = false } +re_string_interner = { path = "crates/re_string_interner", version = "=0.15.0-rc.3", default-features = false } +re_time_panel = { path = "crates/re_time_panel", version = "=0.15.0-rc.3", default-features = false } +re_tracing = { path = "crates/re_tracing", version = "=0.15.0-rc.3", default-features = false } +re_tuid = { path = "crates/re_tuid", version = "=0.15.0-rc.3", default-features = false } +re_types = { path = "crates/re_types", version = "=0.15.0-rc.3", default-features = false } +re_types_builder = { path = "crates/re_types_builder", version = "=0.15.0-rc.3", default-features = false } +re_types_core = { path = "crates/re_types_core", version = "=0.15.0-rc.3", default-features = false } +re_ui = { path = "crates/re_ui", version = "=0.15.0-rc.3", default-features = false } +re_viewer = { path = "crates/re_viewer", version = "=0.15.0-rc.3", default-features = false } +re_viewer_context = { path = "crates/re_viewer_context", version = "=0.15.0-rc.3", default-features = false } +re_viewport = { path = "crates/re_viewport", version = "=0.15.0-rc.3", default-features = false } +re_web_viewer_server = { path = "crates/re_web_viewer_server", version = "=0.15.0-rc.3", default-features = false } +re_ws_comms = { path = "crates/re_ws_comms", version = "=0.15.0-rc.3", default-features = false } +rerun = { path = "crates/rerun", version = "=0.15.0-rc.3", default-features = false } # egui-crates: ecolor = "0.27.2" diff --git a/crates/re_build_examples/src/rrd.rs b/crates/re_build_examples/src/rrd.rs index fba6886ec4d4..efddf252d2d2 100644 --- a/crates/re_build_examples/src/rrd.rs +++ b/crates/re_build_examples/src/rrd.rs @@ -27,7 +27,7 @@ impl Rrd { create_dir_all(&self.output_dir)?; let workspace_root = re_build_tools::cargo_metadata()?.workspace_root; - let examples = if self.examples.is_empty() { + let mut examples = if self.examples.is_empty() { self.channel.examples(workspace_root)? } else { Channel::Nightly @@ -36,6 +36,8 @@ impl Rrd { .filter(|example| self.examples.contains(&example.name)) .collect() }; + examples.sort_by(|a, b| a.name.cmp(&b.name)); + let progress = MultiProgress::new(); let results: Vec> = examples .into_par_iter() diff --git a/crates/re_data_ui/src/component.rs b/crates/re_data_ui/src/component.rs index f572dcaee7a3..95976dbf4043 100644 --- a/crates/re_data_ui/src/component.rs +++ b/crates/re_data_ui/src/component.rs @@ -1,7 +1,9 @@ use egui::NumExt; + use re_entity_db::{EntityPath, InstancePath}; use re_query::ComponentWithInstances; use re_types::ComponentName; +use re_ui::SyntaxHighlighting as _; use re_viewer_context::{UiVerbosity, ViewerContext}; use super::{table_for_verbosity, DataUi}; @@ -99,11 +101,12 @@ impl DataUi for EntityComponentWithInstances { table_for_verbosity(verbosity, ui) .resizable(false) .cell_layout(egui::Layout::left_to_right(egui::Align::Center)) - .columns(egui_extras::Column::auto(), 2) + .column(egui_extras::Column::auto()) + .column(egui_extras::Column::remainder()) .header(re_ui::ReUi::table_header_height(), |mut header| { re_ui::ReUi::setup_table_header(&mut header); header.col(|ui| { - ui.label("Instance key"); + ui.label("Index"); }); header.col(|ui| { ui.label(self.component_name().short_name()); @@ -124,7 +127,7 @@ impl DataUi for EntityComponentWithInstances { ui, None, &instance_path, - instance_key.to_string(), + instance_key.syntax_highlighted(ui.style()), ); }); row.col(|ui| { diff --git a/crates/re_data_ui/src/item_ui.rs b/crates/re_data_ui/src/item_ui.rs index db25d48b83fb..54fd5482749f 100644 --- a/crates/re_data_ui/src/item_ui.rs +++ b/crates/re_data_ui/src/item_ui.rs @@ -283,7 +283,7 @@ pub fn instance_path_parts_buttons( } if !instance_path.instance_key.is_splat() { - ui.strong("/"); + ui.weak("["); instance_path_button_to_ex( ctx, query, @@ -294,6 +294,7 @@ pub fn instance_path_parts_buttons( instance_path.instance_key.syntax_highlighted(ui.style()), with_icon, ); + ui.weak("]"); } }) .response diff --git a/crates/re_entity_db/src/store_bundle.rs b/crates/re_entity_db/src/store_bundle.rs index e1583074f486..200d01d6f29d 100644 --- a/crates/re_entity_db/src/store_bundle.rs +++ b/crates/re_entity_db/src/store_bundle.rs @@ -40,6 +40,10 @@ impl StoreBundle { Ok(slf) } + pub fn is_empty(&self) -> bool { + self.entity_dbs.is_empty() + } + /// All loaded [`EntityDb`], both recordings and blueprints, in arbitrary order. pub fn entity_dbs(&self) -> impl Iterator { self.entity_dbs.values() @@ -78,7 +82,7 @@ impl StoreBundle { /// One is created if it doesn't already exist. pub fn entry(&mut self, id: &StoreId) -> &mut EntityDb { self.entity_dbs.entry(id.clone()).or_insert_with(|| { - re_log::debug!("Creating new store: {id}"); + re_log::debug!("Creating new store: {id:?}"); EntityDb::new(id.clone()) }) } diff --git a/crates/re_format/Cargo.toml b/crates/re_format/Cargo.toml index ba81b0d51558..32a9b4aafef5 100644 --- a/crates/re_format/Cargo.toml +++ b/crates/re_format/Cargo.toml @@ -26,6 +26,7 @@ default = [] [dependencies] num-traits.workspace = true +# TODO(emilk): remove these high-level dependencies, or split this crate into two. arrow2 = { workspace = true, optional = true } comfy-table = { workspace = true, optional = true } re_tuid = { workspace = true, optional = true } diff --git a/crates/re_renderer/src/allocator/gpu_readback_belt.rs b/crates/re_renderer/src/allocator/gpu_readback_belt.rs index ae6d0215c509..3c1c69f4a24b 100644 --- a/crates/re_renderer/src/allocator/gpu_readback_belt.rs +++ b/crates/re_renderer/src/allocator/gpu_readback_belt.rs @@ -267,8 +267,7 @@ impl GpuReadbackBelt { } else { // Allocation might be bigger than a chunk! let buffer_size = self.chunk_size.max(size_in_bytes); - // Happens relatively rarely, this is a noteworthy event! - re_log::debug!( + re_log::trace!( "Allocating new GpuReadbackBelt chunk of size {:.1} MiB", buffer_size as f32 / (1024.0 * 1024.0) ); diff --git a/crates/re_space_view_bar_chart/src/space_view_class.rs b/crates/re_space_view_bar_chart/src/space_view_class.rs index 2ee86651d9e0..fe408e003fae 100644 --- a/crates/re_space_view_bar_chart/src/space_view_class.rs +++ b/crates/re_space_view_bar_chart/src/space_view_class.rs @@ -4,8 +4,9 @@ use re_log_types::EntityPath; use re_space_view::{controls, suggest_space_view_for_each_entity}; use re_types::datatypes::TensorBuffer; use re_viewer_context::{ - auto_color, SpaceViewClass, SpaceViewClassIdentifier, SpaceViewClassRegistryError, SpaceViewId, - SpaceViewState, SpaceViewSystemExecutionError, ViewQuery, ViewerContext, + auto_color, IdentifiedViewSystem as _, IndicatedEntities, PerVisualizer, SpaceViewClass, + SpaceViewClassIdentifier, SpaceViewClassRegistryError, SpaceViewId, SpaceViewState, + SpaceViewSystemExecutionError, ViewQuery, ViewerContext, VisualizableEntities, }; use super::visualizer_system::BarChartVisualizerSystem; @@ -66,6 +67,27 @@ impl SpaceViewClass for BarChartSpaceView { None } + fn choose_default_visualizers( + &self, + entity_path: &EntityPath, + visualizable_entities_per_visualizer: &PerVisualizer, + _indicated_entities_per_visualizer: &PerVisualizer, + ) -> re_viewer_context::SmallVisualizerSet { + // Default implementation would not suggest the BarChart visualizer for tensors and 1D images, + // since they're not indicated with a BarChart indicator. + // (and as of writing, something needs to be both visualizable and indicated to be shown in a visualizer) + + // Keeping this implementation simple: We know there's only a single visualizer here. + if visualizable_entities_per_visualizer + .get(&BarChartVisualizerSystem::identifier()) + .map_or(false, |entities| entities.contains(entity_path)) + { + std::iter::once(BarChartVisualizerSystem::identifier()).collect() + } else { + Default::default() + } + } + fn spawn_heuristics( &self, ctx: &ViewerContext<'_>, diff --git a/crates/re_space_view_tensor/src/dimension_mapping.rs b/crates/re_space_view_tensor/src/dimension_mapping.rs index 2f3d89427db7..208cecee2bb6 100644 --- a/crates/re_space_view_tensor/src/dimension_mapping.rs +++ b/crates/re_space_view_tensor/src/dimension_mapping.rs @@ -45,8 +45,8 @@ impl DimensionMapping { }, 1 => DimensionMapping { - selectors: vec![DimensionSelector::new(0)], - width: None, + selectors: Default::default(), + width: Some(0), height: None, invert_width: false, invert_height: false, diff --git a/crates/re_space_view_tensor/src/space_view_class.rs b/crates/re_space_view_tensor/src/space_view_class.rs index eb6eec99447c..022840280a8b 100644 --- a/crates/re_space_view_tensor/src/space_view_class.rs +++ b/crates/re_space_view_tensor/src/space_view_class.rs @@ -565,18 +565,37 @@ pub fn selected_tensor_slice<'a, T: Copy>( assert!(dimension_mapping.is_valid(tensor.ndim())); - // TODO(andreas) - shouldn't just give up here - if dimension_mapping.width.is_none() || dimension_mapping.height.is_none() { - return tensor.view(); - } + let (width, height) = + if let (Some(width), Some(height)) = (dimension_mapping.width, dimension_mapping.height) { + (width, height) + } else if let Some(width) = dimension_mapping.width { + // If height is missing, create a 1D row. + (width, 1) + } else if let Some(height) = dimension_mapping.height { + // If width is missing, create a 1D column. + (1, height) + } else { + // If both are missing, give up. + return tensor.view(); + }; + + let view = if tensor.shape().len() == 1 { + // We want 2D slices, so for "pure" 1D tensors add a dimension. + // This is important for above width/height conversion to work since this assumes at least 2 dimensions. + tensor + .view() + .into_shape(ndarray::IxDyn(&[tensor.len(), 1])) + .unwrap() + } else { + tensor.view() + }; - let axis = dimension_mapping - .height + #[allow(clippy::tuple_array_conversions)] + let axis = [height, width] .into_iter() - .chain(dimension_mapping.width) .chain(dimension_mapping.selectors.iter().map(|s| s.dim_idx)) .collect::>(); - let mut slice = tensor.view().permuted_axes(axis); + let mut slice = view.permuted_axes(axis); for DimensionSelector { dim_idx, .. } in &dimension_mapping.selectors { let selector_value = selector_values.get(dim_idx).copied().unwrap_or_default() as usize; diff --git a/crates/re_space_view_tensor/src/visualizer_system.rs b/crates/re_space_view_tensor/src/visualizer_system.rs index 11807f00afd2..bfad099e7614 100644 --- a/crates/re_space_view_tensor/src/visualizer_system.rs +++ b/crates/re_space_view_tensor/src/visualizer_system.rs @@ -1,12 +1,10 @@ use re_data_store::{LatestAtQuery, VersionedComponent}; use re_entity_db::EntityPath; use re_log_types::RowId; -use re_space_view::diff_component_filter; use re_types::{archetypes::Tensor, components::TensorData, tensor_data::DecodedTensor}; use re_viewer_context::{ IdentifiedViewSystem, SpaceViewSystemExecutionError, TensorDecodeCache, ViewContextCollection, - ViewQuery, ViewerContext, VisualizerAdditionalApplicabilityFilter, VisualizerQueryInfo, - VisualizerSystem, + ViewQuery, ViewerContext, VisualizerQueryInfo, VisualizerSystem, }; #[derive(Default)] @@ -20,25 +18,11 @@ impl IdentifiedViewSystem for TensorSystem { } } -struct TensorVisualizerEntityFilter; - -impl VisualizerAdditionalApplicabilityFilter for TensorVisualizerEntityFilter { - fn update_applicability(&mut self, event: &re_data_store::StoreEvent) -> bool { - diff_component_filter(event, |tensor: &re_types::components::TensorData| { - !tensor.is_vector() - }) - } -} - impl VisualizerSystem for TensorSystem { fn visualizer_query_info(&self) -> VisualizerQueryInfo { VisualizerQueryInfo::from_archetype::() } - fn applicability_filter(&self) -> Option> { - Some(Box::new(TensorVisualizerEntityFilter)) - } - fn execute( &mut self, ctx: &ViewerContext<'_>, diff --git a/crates/re_types/definitions/rerun/components/instance_key.fbs b/crates/re_types/definitions/rerun/components/instance_key.fbs index a608c0d45b40..d3b639c2e056 100644 --- a/crates/re_types/definitions/rerun/components/instance_key.fbs +++ b/crates/re_types/definitions/rerun/components/instance_key.fbs @@ -10,6 +10,13 @@ namespace rerun.components; // --- /// A unique numeric identifier for each individual instance within a batch. +/// +/// Instance keys are automatically assigned by the `rerun` library and should not be set manually. +/// +/// The instance key is just the index of the instance within the batch, +/// i.e. the first point in a point cloud has `InstanceKey = 0`, the second `InstanceKey = 1`, and so on. +/// +/// We plan to remove the `InstanceKey` component in the near future. struct InstanceKey ( "attr.python.aliases": "int", "attr.python.array_aliases": "int, npt.NDArray[np.uint64]", diff --git a/crates/re_types_core/src/components/instance_key.rs b/crates/re_types_core/src/components/instance_key.rs index 7cb60278851b..f9ce52051081 100644 --- a/crates/re_types_core/src/components/instance_key.rs +++ b/crates/re_types_core/src/components/instance_key.rs @@ -22,6 +22,13 @@ use crate::{ComponentBatch, MaybeOwnedComponentBatch}; use crate::{DeserializationError, DeserializationResult}; /// **Component**: A unique numeric identifier for each individual instance within a batch. +/// +/// Instance keys are automatically assigned by the `rerun` library and should not be set manually. +/// +/// The instance key is just the index of the instance within the batch, +/// i.e. the first point in a point cloud has `InstanceKey = 0`, the second `InstanceKey = 1`, and so on. +/// +/// We plan to remove the `InstanceKey` component in the near future. #[derive( Clone, Debug, Copy, Hash, PartialEq, Eq, PartialOrd, Ord, ::bytemuck::Pod, ::bytemuck::Zeroable, )] diff --git a/crates/re_types_core/src/components/instance_key_ext.rs b/crates/re_types_core/src/components/instance_key_ext.rs index 7ff7d5085e69..1907dcbf0e66 100644 --- a/crates/re_types_core/src/components/instance_key_ext.rs +++ b/crates/re_types_core/src/components/instance_key_ext.rs @@ -53,6 +53,7 @@ impl std::fmt::Display for InstanceKey { if self.is_splat() { "splat".fmt(f) } else { + // TODO(emilk): re_format::uint(self.0).fmt(f) (fix cyclic dependency!) self.0.fmt(f) } } diff --git a/crates/re_ui/Cargo.toml b/crates/re_ui/Cargo.toml index 2ac73a1ca399..c893eaae5ebc 100644 --- a/crates/re_ui/Cargo.toml +++ b/crates/re_ui/Cargo.toml @@ -27,6 +27,7 @@ default = [] [dependencies] re_entity_db.workspace = true # syntax-highlighting for InstancePath. TODO(emilk): move InstancePath +re_format.workspace = true re_log_types.workspace = true # syntax-highlighting for EntityPath egui_commonmark = { workspace = true, features = ["pulldown_cmark"] } diff --git a/crates/re_ui/src/syntax_highlighting.rs b/crates/re_ui/src/syntax_highlighting.rs index 63af7fba5f7b..3e399234002c 100644 --- a/crates/re_ui/src/syntax_highlighting.rs +++ b/crates/re_ui/src/syntax_highlighting.rs @@ -41,9 +41,11 @@ impl SyntaxHighlighting for EntityPathPart { impl SyntaxHighlighting for InstanceKey { fn syntax_highlight_into(&self, style: &Style, job: &mut LayoutJob) { - job.append("[", 0.0, faint_text_format(style)); - job.append(&self.to_string(), 0.0, text_format(style)); - job.append("]", 0.0, faint_text_format(style)); + if self.is_splat() { + job.append("splat", 0.0, text_format(style)); + } else { + job.append(&re_format::format_uint(self.0), 0.0, text_format(style)); + } } } @@ -64,7 +66,9 @@ impl SyntaxHighlighting for InstancePath { fn syntax_highlight_into(&self, style: &Style, job: &mut LayoutJob) { self.entity_path.syntax_highlight_into(style, job); if !self.instance_key.is_splat() { + job.append("[", 0.0, faint_text_format(style)); self.instance_key.syntax_highlight_into(style, job); + job.append("]", 0.0, faint_text_format(style)); } } } diff --git a/crates/re_viewer/src/app.rs b/crates/re_viewer/src/app.rs index 0de606aebbde..48f7f2b51cf4 100644 --- a/crates/re_viewer/src/app.rs +++ b/crates/re_viewer/src/app.rs @@ -1,6 +1,6 @@ use re_data_source::{DataSource, FileContents}; use re_entity_db::entity_db::EntityDb; -use re_log_types::{ApplicationId, FileSource, LogMsg, StoreKind}; +use re_log_types::{FileSource, LogMsg, StoreKind}; use re_renderer::WgpuResourcePoolStatistics; use re_smart_channel::{ReceiveSet, SmartChannelSource}; use re_ui::{toasts, UICommand, UICommandSender}; @@ -241,7 +241,7 @@ impl App { state, background_tasks: Default::default(), store_hub: Some(StoreHub::new( - blueprint_loader(), + blueprint_persistence(), &crate::app_blueprint::setup_welcome_screen_blueprint, )), toasts: toasts::Toasts::new(), @@ -362,7 +362,7 @@ impl App { } SystemCommand::CloseAllRecordings => { - store_hub.clear_recordings(); + store_hub.clear_all_recordings(); // Stop receiving into the old recordings. // This is most important when going back to the example screen by using the "Back" @@ -382,7 +382,7 @@ impl App { SystemCommand::ClearSourceAndItsStores(source) => { self.rx.retain(|r| r.source() != &source); - store_hub.retain(|db| db.data_source.as_ref() != Some(&source)); + store_hub.retain_recordings(|db| db.data_source.as_ref() != Some(&source)); } SystemCommand::AddReceiver(rx) => { @@ -970,7 +970,7 @@ impl App { let store_id = msg.store_id(); - if store_hub.is_active_blueprint(store_id) { + if store_hub.is_active_blueprint_for_any_app(store_id) { // TODO(#5514): handle loading of active blueprints. re_log::warn_once!("Loading a blueprint {store_id} that is active. See https://github.com/rerun-io/rerun/issues/5514 for details."); } @@ -999,6 +999,13 @@ impl App { self.command_sender.send_system(SystemCommand::SetSelection( re_viewer_context::Item::StoreId(store_id.clone()), )); + + // If the viewer is in the background, tell the user that it has received something new. + egui_ctx.send_viewport_cmd( + egui::ViewportCommand::RequestUserAttention( + egui::UserAttentionType::Informational, + ), + ); } StoreKind::Blueprint => { // We wait with activating blueprints until they are fully loaded, @@ -1022,20 +1029,30 @@ impl App { } StoreKind::Blueprint => { if let Some(info) = entity_db.store_info() { - re_log::debug!( - "Activating blueprint that was loaded from {channel_source}" - ); let app_id = info.application_id.clone(); if cmd.make_default { + re_log::debug!( + "Making blueprint that was loaded from {channel_source} the default for app {app_id}" + ); store_hub.set_default_blueprint_for_app(&app_id, store_id); } if cmd.make_active { + re_log::debug!( + "Activating blueprint that was loaded from {channel_source}" + ); store_hub .set_cloned_blueprint_active_for_app(&app_id, store_id) .unwrap_or_else(|err| { re_log::warn!("Failed to make blueprint active: {err}"); }); store_hub.set_active_app(app_id); // Switch to this app, e.g. on drag-and-drop of a blueprint file + + // If the viewer is in the background, tell the user that it has received something new. + egui_ctx.send_viewport_cmd( + egui::ViewportCommand::RequestUserAttention( + egui::UserAttentionType::Informational, + ), + ); } } else { re_log::warn!( @@ -1234,7 +1251,7 @@ impl App { } #[cfg(target_arch = "wasm32")] -fn blueprint_loader() -> BlueprintPersistence { +fn blueprint_persistence() -> BlueprintPersistence { // TODO(#2579): implement persistence for web BlueprintPersistence { loader: None, @@ -1243,10 +1260,14 @@ fn blueprint_loader() -> BlueprintPersistence { } #[cfg(not(target_arch = "wasm32"))] -fn blueprint_loader() -> BlueprintPersistence { +fn blueprint_persistence() -> BlueprintPersistence { use re_entity_db::StoreBundle; - fn load_blueprint_from_disk(app_id: &ApplicationId) -> anyhow::Result> { + fn load_blueprint_from_disk( + app_id: &re_log_types::ApplicationId, + ) -> anyhow::Result> { + re_tracing::profile_function!(); + let blueprint_path = crate::saving::default_blueprint_path(app_id)?; if !blueprint_path.exists() { return Ok(None); @@ -1259,6 +1280,7 @@ fn blueprint_loader() -> BlueprintPersistence { if let Some(bundle) = crate::loading::load_blueprint_file(&blueprint_path, with_notifications) { + // Validate: for store in bundle.entity_dbs() { if store.store_kind() == StoreKind::Blueprint && !crate::blueprint::is_valid_blueprint(store) @@ -1267,23 +1289,30 @@ fn blueprint_loader() -> BlueprintPersistence { return Ok(None); } } + Ok(Some(bundle)) } else { + re_log::debug!("No blueprint found at {blueprint_path:?}"); Ok(None) } } #[cfg(not(target_arch = "wasm32"))] - fn save_blueprint_to_disk(app_id: &ApplicationId, blueprint: &EntityDb) -> anyhow::Result<()> { + fn save_blueprint_to_disk( + app_id: &re_log_types::ApplicationId, + messages: &[LogMsg], + ) -> anyhow::Result<()> { + re_tracing::profile_function!(); let blueprint_path = crate::saving::default_blueprint_path(app_id)?; - let messages = blueprint.to_messages(None)?; - // TODO(jleibs): Should we push this into a background thread? Blueprints should generally // be small & fast to save, but maybe not once we start adding big pieces of user data? crate::saving::encode_to_file(&blueprint_path, messages.iter())?; - re_log::debug!("Saved blueprint for {app_id} to {blueprint_path:?}"); + re_log::debug!( + "Saved blueprint for {app_id} to {blueprint_path:?} ({} log messages)", + messages.len() + ); Ok(()) } @@ -1299,6 +1328,7 @@ impl eframe::App for App { [0.0; 4] // transparent so we can get rounded corners when doing [`re_ui::CUSTOM_WINDOW_DECORATIONS`] } + /// Will be called periodically (auto-save), and on shutdown. fn save(&mut self, storage: &mut dyn eframe::Storage) { if !self.startup_options.persist_state { return; @@ -1310,13 +1340,15 @@ impl eframe::App for App { eframe::set_value(storage, eframe::APP_KEY, &self.state); // Save the blueprints - // TODO(#2579): implement web-storage for blueprints as well if let Some(hub) = &mut self.store_hub { - match hub.gc_and_persist_app_blueprints(&self.state.app_options) { - Ok(f) => f, - Err(err) => { - re_log::error!("Saving blueprints failed: {err}"); - } + if self.state.app_options.blueprint_gc { + // First make the blueprints smaller: + hub.gc_blueprints(); + } + + // Then save them: + if let Err(err) = hub.save_app_blueprints() { + re_log::error!("Saving blueprints failed: {err}"); }; } else { re_log::error!("Could not save blueprints: the store hub is not available"); @@ -1417,29 +1449,15 @@ impl eframe::App for App { self.show_text_logs_as_notifications(); self.receive_messages(&mut store_hub, egui_ctx); - store_hub.gc_blueprints(self.app_options()); + if self.app_options().blueprint_gc { + store_hub.gc_blueprints(); + } store_hub.purge_empty(); self.state.cleanup(&store_hub); file_saver_progress_ui(egui_ctx, &mut self.background_tasks); // toasts for background file saver - // Make sure some app is active - // Must be called before `read_context` below. - if store_hub.active_app().is_none() { - let apps: std::collections::BTreeSet<&ApplicationId> = store_hub - .store_bundle() - .entity_dbs() - .filter_map(|db| db.app_id()) - .filter(|&app_id| app_id != &StoreHub::welcome_screen_app_id()) - .collect(); - if let Some(app_id) = apps.first().cloned() { - store_hub.set_active_app(app_id.clone()); - } else { - store_hub.set_active_app(StoreHub::welcome_screen_app_id()); - } - } - let store_context = store_hub.read_context(); let app_blueprint = AppBlueprint::new( diff --git a/crates/re_viewer/src/ui/welcome_screen/welcome_section.rs b/crates/re_viewer/src/ui/welcome_screen/welcome_section.rs index 1724ddf51afd..76bb02cc41cc 100644 --- a/crates/re_viewer/src/ui/welcome_screen/welcome_section.rs +++ b/crates/re_viewer/src/ui/welcome_screen/welcome_section.rs @@ -40,9 +40,9 @@ pub(super) fn welcome_section_ui(ui: &mut egui::Ui) { ui.add_space(4.0); }; - bullet_text(ui, "Log with the Rerun SDK in C++, Python, or Rust"); + bullet_text(ui, "Log data with the Rerun SDK in C++, Python, or Rust"); bullet_text(ui, "Visualize and explore live or recorded data"); - bullet_text(ui, "Customize using the UI or through code"); + bullet_text(ui, "Configure the viewer interactively or through code"); ui.add_space(9.0); if ui diff --git a/crates/re_viewer_context/src/command_sender.rs b/crates/re_viewer_context/src/command_sender.rs index 4f18cc74e7f8..b0ace1da85e3 100644 --- a/crates/re_viewer_context/src/command_sender.rs +++ b/crates/re_viewer_context/src/command_sender.rs @@ -34,6 +34,8 @@ pub enum SystemCommand { ActivateRecording(StoreId), /// Close a recording or blueprint (free its memory). + /// + /// If this is a blueprint, it may be saved to disk first. CloseStore(StoreId), /// Close all stores and show the welcome screen again. diff --git a/crates/re_viewer_context/src/store_hub.rs b/crates/re_viewer_context/src/store_hub.rs index 496e0bb07bb4..af3fd471f21c 100644 --- a/crates/re_viewer_context/src/store_hub.rs +++ b/crates/re_viewer_context/src/store_hub.rs @@ -9,7 +9,7 @@ use re_entity_db::{EntityDb, StoreBundle}; use re_log_types::{ApplicationId, StoreId, StoreKind}; use re_query_cache::CachesStats; -use crate::{AppOptions, StoreContext}; +use crate::StoreContext; /// Interface for accessing all blueprints and recordings /// @@ -32,14 +32,24 @@ use crate::{AppOptions, StoreContext}; /// /// The default blueprint is usually the blueprint set by the SDK. /// This lets users reset the active blueprint to the one sent by the SDK. +/// +/// We almost never free blueprints - we keep them in RAM even if we close an app, +/// in case the user re-opens the same app. +/// Blueprints are generally small, so this is fine. pub struct StoreHub { /// How we load and save blueprints. persistence: BlueprintPersistence, active_rec_id: Option, - active_application_id: Option, - default_blueprint_by_app_id: HashMap, - active_blueprint_by_app_id: HashMap, + + /// The default and fallback value for this is the welcome screen. + active_app_id: ApplicationId, + + /// Once added, we never remove a key from this map. + /// Instead we clear the contents of [`AppBlueprints`]. + app_blueprints: HashMap, + + /// The contents of all the stores. store_bundle: StoreBundle, /// The [`StoreGeneration`] from when the [`EntityDb`] was last saved @@ -49,13 +59,49 @@ pub struct StoreHub { blueprint_last_gc: HashMap, } +#[derive(Clone, Debug, Default, PartialEq, Eq)] +struct AppBlueprints { + /// The active blueprint is what the user sees and edits (if this is the active app). + /// + /// If there is no active blueprint, the default will be cloned and made active, + /// using a new [`StoreId`]. + pub active: Option, + + /// The default blueprint is usually the blueprint set by the SDK. + /// + /// Storing this lets users reset the active blueprint to the one sent by the SDK. + pub default: Option, +} + +impl AppBlueprints { + fn iter(&self) -> impl Iterator { + let Self { active, default } = self; + active.iter().chain(default.iter()) + } + + fn retain(&mut self, mut keep: impl FnMut(&StoreId) -> bool) { + let Self { active, default } = self; + if let Some(active) = active { + if !keep(active) { + self.active = None; + } + } + if let Some(default) = default { + if !keep(default) { + self.default = None; + } + } + } +} + /// Load a blueprint from persisted storage, e.g. disk. /// Returns `Ok(None)` if no blueprint is found. pub type BlueprintLoader = dyn Fn(&ApplicationId) -> anyhow::Result> + Send + Sync; /// Save a blueprint to persisted storage, e.g. disk. -pub type BlueprintSaver = dyn Fn(&ApplicationId, &EntityDb) -> anyhow::Result<()> + Send + Sync; +pub type BlueprintSaver = + dyn Fn(&ApplicationId, &[re_log_types::LogMsg]) -> anyhow::Result<()> + Send + Sync; /// How to save and load blueprints pub struct BlueprintPersistence { @@ -109,12 +155,15 @@ impl StoreHub { setup_welcome_screen_blueprint: &dyn Fn(&mut EntityDb), ) -> Self { re_tracing::profile_function!(); - let mut default_blueprint_by_app_id = HashMap::new(); + let mut blueprints_by_app_id = HashMap::new(); let mut store_bundle = StoreBundle::default(); - default_blueprint_by_app_id.insert( + blueprints_by_app_id.insert( Self::welcome_screen_app_id(), - Self::welcome_screen_blueprint_id(), + AppBlueprints { + active: None, + default: Some(Self::welcome_screen_blueprint_id()), + }, ); let welcome_screen_blueprint = @@ -125,9 +174,8 @@ impl StoreHub { persistence, active_rec_id: None, - active_application_id: None, - default_blueprint_by_app_id, - active_blueprint_by_app_id: Default::default(), + active_app_id: Self::welcome_screen_app_id(), + app_blueprints: blueprints_by_app_id, store_bundle, blueprint_last_save: Default::default(), @@ -135,6 +183,49 @@ impl StoreHub { } } + // --------------------- + + /// Ensure there is an active blueprint for the active application. + /// + /// Either load it, or clone the default blueprint. + fn ensure_active_blueprint(&mut self) { + // First some defensive coding: Check that default and active blueprints exists, + // in case some of our book-keeping is broken. + for app_blueprints in self.app_blueprints.values_mut() { + if let Some(blueprint_id) = &app_blueprints.active { + if !self.store_bundle.contains(blueprint_id) { + app_blueprints.active = None; + } + } + if let Some(blueprint_id) = &app_blueprints.default { + if !self.store_bundle.contains(blueprint_id) { + app_blueprints.default = None; + } + } + } + + let app_id = self.active_app_id.clone(); + + if app_id != Self::welcome_screen_app_id() { + // Try restoring from disk/persistence: + let app_blueprints = self.app_blueprints.entry(app_id.clone()).or_default(); + if app_blueprints.active.is_none() { + self.load_persisted_blueprint(&app_id); + } + } + + let app_blueprints = self.app_blueprints.entry(app_id.clone()).or_default(); + if app_blueprints.active.is_none() { + // If there's no active blueprint for this app, try to make the current default one active. + if let Some(blueprint_id) = app_blueprints.default.clone() { + self.set_cloned_blueprint_active_for_app(&app_id, &blueprint_id) + .unwrap_or_else(|err| { + re_log::warn!("Failed to make blueprint active: {err}"); + }); + } + } + } + // --------------------- // Accessors @@ -152,37 +243,16 @@ impl StoreHub { static EMPTY_ENTITY_DB: once_cell::sync::Lazy = once_cell::sync::Lazy::new(|| EntityDb::new(re_log_types::StoreId::empty_recording())); - // If we have an app-id, then use it to look up the blueprint. - let app_id = self.active_application_id.clone()?; + let app_id = self.active_app_id.clone(); - // Defensive coding: Check that default and active blueprints exists, - // in case some of our book-keeping is broken. - if let Some(blueprint_id) = self.default_blueprint_by_app_id.get(&app_id) { - if !self.store_bundle.contains(blueprint_id) { - self.default_blueprint_by_app_id.remove(&app_id); - } - } - if let Some(blueprint_id) = self.active_blueprint_by_app_id.get(&app_id) { - if !self.store_bundle.contains(blueprint_id) { - self.active_blueprint_by_app_id.remove(&app_id); - } - } + self.ensure_active_blueprint(); - // If there's no active blueprint for this app, try to make the current default one active. - if !self.active_blueprint_by_app_id.contains_key(&app_id) { - if let Some(blueprint_id) = self.default_blueprint_by_app_id.get(&app_id).cloned() { - self.set_cloned_blueprint_active_for_app(&app_id, &blueprint_id) - .unwrap_or_else(|err| { - re_log::warn!("Failed to make blueprint active: {err}"); - }); - } - } + let app_blueprints = self.app_blueprints.entry(app_id.clone()).or_default(); - // Get the id is of whatever blueprint is now active, falling back on the "app blueprint" if needed. - let active_blueprint_id = self - .active_blueprint_by_app_id - .entry(app_id.clone()) - .or_insert_with(|| StoreId::from_string(StoreKind::Blueprint, app_id.clone().0)); + // Get the id of whatever blueprint is now active, falling back on the "app blueprint" if needed. + let active_blueprint_id = app_blueprints + .active + .get_or_insert_with(|| StoreId::from_string(StoreKind::Blueprint, app_id.clone().0)); // Get or create the blueprint: self.store_bundle.blueprint_entry(active_blueprint_id); @@ -223,6 +293,11 @@ impl StoreHub { } pub fn remove(&mut self, store_id: &StoreId) { + // Save blueprints before maybe removing them: + if let Err(err) = self.save_app_blueprints() { + re_log::error!("Saving blueprints failed: {err}"); + }; + let removed_store = self.store_bundle.remove(store_id); let Some(removed_store) = removed_store else { @@ -244,10 +319,9 @@ impl StoreHub { } } StoreKind::Blueprint => { - self.active_blueprint_by_app_id - .retain(|_, id| id != store_id); - self.default_blueprint_by_app_id - .retain(|_, id| id != store_id); + for app_blueprints in self.app_blueprints.values_mut() { + app_blueprints.retain(|id| id != store_id); + } } } @@ -255,13 +329,26 @@ impl StoreHub { if let Some(new_selection) = self.store_bundle.find_closest_recording(store_id) { self.set_active_recording_id(new_selection.clone()); } else { - self.active_application_id = None; + self.active_app_id = Self::welcome_screen_app_id(); self.active_rec_id = None; } } + + self.ensure_active_blueprint(); } - pub fn retain(&mut self, mut should_retain: impl FnMut(&EntityDb) -> bool) { + /// Only retain recordings that matches the predicate. + pub fn retain_recordings(&mut self, mut should_retain: impl FnMut(&EntityDb) -> bool) { + self.retain_stores(|db| match db.store_kind() { + StoreKind::Recording => should_retain(db), + StoreKind::Blueprint => true, + }); + } + + /// Only retain recordings and blueprints that matches the predicate. + /// + /// Consider if you don't want to use [`Self::retain_recordings`] instead! + fn retain_stores(&mut self, mut should_retain: impl FnMut(&EntityDb) -> bool) { let stores_to_remove: Vec = self .store_bundle .entity_dbs() @@ -279,12 +366,11 @@ impl StoreHub { } /// Remove all open recordings and applications, and go to the welcome page. - pub fn clear_recordings(&mut self) { - // Keep only the welcome screen: - self.store_bundle - .retain(|db| db.app_id() == Some(&Self::welcome_screen_app_id())); + pub fn clear_all_recordings(&mut self) { + // Note: we keep the blueprints around in case the user re-opens the same app. + self.retain_recordings(|_| false); self.active_rec_id = None; - self.active_application_id = Some(Self::welcome_screen_app_id()); + self.active_app_id = Self::welcome_screen_app_id(); } // --------------------- @@ -293,19 +379,16 @@ impl StoreHub { /// Change the active [`ApplicationId`] #[allow(clippy::needless_pass_by_value)] pub fn set_active_app(&mut self, app_id: ApplicationId) { - // If we don't know of a blueprint for this `ApplicationId` yet, - // try to load one from the persisted store - if !self.active_blueprint_by_app_id.contains_key(&app_id) { - if let Err(err) = self.try_to_load_persisted_blueprint(&app_id) { - re_log::warn!("Failed to load persisted blueprint: {err}"); - } + // If we don't know of this app id yet, try to load its persisted blueprints. + if !self.app_blueprints.contains_key(&app_id) { + self.load_persisted_blueprint(&app_id); } - if self.active_application_id.as_ref() == Some(&app_id) { + if self.active_app_id == app_id { return; } - self.active_application_id = Some(app_id.clone()); + self.active_app_id = app_id.clone(); self.active_rec_id = None; // Find any matching recording and activate it @@ -319,24 +402,21 @@ impl StoreHub { return; } } + + self.ensure_active_blueprint(); } /// Close this application and all its recordings. pub fn close_app(&mut self, app_id: &ApplicationId) { - self.store_bundle.retain(|db| db.app_id() != Some(app_id)); + // Note: we keep the blueprints around in case the user re-opens the same app. + self.retain_recordings(|db| db.app_id() != Some(app_id)); - if self.active_application_id.as_ref() == Some(app_id) { - self.active_application_id = None; + if &self.active_app_id == app_id { + self.active_app_id = Self::welcome_screen_app_id(); self.active_rec_id = None; } - self.default_blueprint_by_app_id.remove(app_id); - self.active_blueprint_by_app_id.remove(app_id); - } - - #[inline] - pub fn active_app(&self) -> Option<&ApplicationId> { - self.active_application_id.as_ref() + self.ensure_active_blueprint(); } // --------------------- @@ -390,7 +470,9 @@ impl StoreHub { // Default blueprint pub fn default_blueprint_id_for_app(&self, app_id: &ApplicationId) -> Option<&StoreId> { - self.default_blueprint_by_app_id.get(app_id) + self.app_blueprints + .get(app_id) + .and_then(|app_blueprints| app_blueprints.default.as_ref()) } pub fn default_blueprint_for_app(&self, app_id: &ApplicationId) -> Option<&EntityDb> { @@ -406,14 +488,14 @@ impl StoreHub { blueprint_id: &StoreId, ) { re_log::debug!("Switching default blueprint for {app_id} to {blueprint_id}"); - self.default_blueprint_by_app_id - .insert(app_id.clone(), blueprint_id.clone()); + let app_blueprints = self.app_blueprints.entry(app_id.clone()).or_default(); + app_blueprints.default = Some(blueprint_id.clone()); } - /// Clear the current default blueprint + /// Clear the current default blueprint. This cannot be undone. pub fn clear_default_blueprint(&mut self) { - if let Some(app_id) = &self.active_application_id { - if let Some(blueprint_id) = self.default_blueprint_by_app_id.remove(app_id) { + if let Some(app_blueprints) = self.app_blueprints.get_mut(&self.active_app_id) { + if let Some(blueprint_id) = app_blueprints.default.take() { self.remove(&blueprint_id); } } @@ -424,12 +506,13 @@ impl StoreHub { /// What is the active blueprint for the active application? pub fn active_blueprint_id(&self) -> Option<&StoreId> { - self.active_app() - .and_then(|app_id| self.active_blueprint_id_for_app(app_id)) + self.active_blueprint_id_for_app(&self.active_app_id) } pub fn active_blueprint_id_for_app(&self, app_id: &ApplicationId) -> Option<&StoreId> { - self.active_blueprint_by_app_id.get(app_id) + self.app_blueprints + .get(app_id) + .and_then(|app_blueprints| app_blueprints.active.as_ref()) } pub fn active_blueprint_for_app(&self, app_id: &ApplicationId) -> Option<&EntityDb> { @@ -461,24 +544,23 @@ impl StoreHub { self.store_bundle.insert(new_blueprint); - self.active_blueprint_by_app_id - .insert(app_id.clone(), new_id); + let app_blueprints = self.app_blueprints.entry(app_id.clone()).or_default(); + app_blueprints.active = Some(new_id); Ok(()) } /// Is the given blueprint id the active blueprint for any app id? - pub fn is_active_blueprint(&self, blueprint_id: &StoreId) -> bool { - self.active_blueprint_by_app_id + pub fn is_active_blueprint_for_any_app(&self, blueprint_id: &StoreId) -> bool { + self.app_blueprints .values() - .any(|id| id == blueprint_id) + .any(|app_blueprints| app_blueprints.active.as_ref() == Some(blueprint_id)) } - /// Clear the currently active blueprint + /// Clear the currently active blueprint. This cannot be undone. pub fn clear_active_blueprint(&mut self) { - if let Some(app_id) = &self.active_application_id { - if let Some(blueprint_id) = self.active_blueprint_by_app_id.remove(app_id) { - re_log::debug!("Clearing blueprint for {app_id}: {blueprint_id}"); + if let Some(app_blueprints) = self.app_blueprints.get_mut(&self.active_app_id) { + if let Some(blueprint_id) = app_blueprints.active.take() { self.remove(&blueprint_id); } } @@ -490,7 +572,7 @@ impl StoreHub { /// Cloned blueprints are the ones the user has edited, /// i.e. NOT sent from the SDK. pub fn clear_all_cloned_blueprints(&mut self) { - self.retain(|db| match db.store_kind() { + self.retain_stores(|db| match db.store_kind() { StoreKind::Recording => true, StoreKind::Blueprint => db.cloned_from().is_none(), }); @@ -498,7 +580,14 @@ impl StoreHub { /// Remove any empty [`EntityDb`]s from the hub pub fn purge_empty(&mut self) { - self.retain(|entity_db| !entity_db.is_empty()); + self.retain_stores(|entity_db| { + if entity_db.is_empty() { + re_log::debug!("Removing empty store {:?}", entity_db.store_id()); + false + } else { + true + } + }); } /// Call [`EntityDb::purge_fraction_of_ram`] on every recording @@ -529,6 +618,7 @@ impl StoreHub { // No point keeping an empty recording around. if entity_db.is_empty() { + re_log::debug!("Removing empty store {store_id:?} to free up RAM"); self.remove(&store_id); return; } @@ -543,6 +633,7 @@ impl StoreHub { // log new things anyhow. let num_recordings = store_bundle.recordings().count(); if store_size_before == store_size_after && num_recordings > 1 { + re_log::debug!("Removing oldest store {store_id:?} to free up RAM"); self.remove(&store_id); } @@ -557,7 +648,8 @@ impl StoreHub { /// Remove any recordings with a network source pointing at this `uri`. pub fn remove_recording_by_uri(&mut self, uri: &str) { - self.retain(|db| { + // Note: we keep the blueprints around in case the user re-opens the same app. + self.retain_recordings(|db| { let Some(data_source) = &db.data_source else { // no data source, keep return true; @@ -576,70 +668,75 @@ impl StoreHub { }); } - pub fn gc_blueprints(&mut self, app_options: &AppOptions) { + pub fn gc_blueprints(&mut self) { re_tracing::profile_function!(); - if app_options.blueprint_gc { - for blueprint_id in self - .active_blueprint_by_app_id - .values() - .chain(self.default_blueprint_by_app_id.values()) - { - if let Some(blueprint) = self.store_bundle.get_mut(blueprint_id) { - if self.blueprint_last_gc.get(blueprint_id) == Some(&blueprint.generation()) { - continue; // no change since last gc - } - - // TODO(jleibs): Decide a better tuning for this. Would like to save a - // reasonable amount of history, or incremental snapshots. - blueprint.gc_everything_but_the_latest_row(); - self.blueprint_last_gc - .insert(blueprint_id.clone(), blueprint.generation()); + for blueprint_id in self + .app_blueprints + .values() + .flat_map(|app_blueprints| app_blueprints.iter()) + { + if let Some(blueprint) = self.store_bundle.get_mut(blueprint_id) { + if self.blueprint_last_gc.get(blueprint_id) == Some(&blueprint.generation()) { + continue; // no change since last gc } + + // TODO(jleibs): Decide a better tuning for this. Would like to save a + // reasonable amount of history, or incremental snapshots. + blueprint.gc_everything_but_the_latest_row(); + + self.blueprint_last_gc + .insert(blueprint_id.clone(), blueprint.generation()); } } } /// Persist any in-use blueprints to durable storage. - // TODO(#2579): implement persistence for web - #[allow(clippy::unnecessary_wraps)] - pub fn gc_and_persist_app_blueprints( - &mut self, - app_options: &AppOptions, - ) -> anyhow::Result<()> { - re_tracing::profile_function!(); + pub fn save_app_blueprints(&mut self) -> anyhow::Result<()> { + let Some(saver) = &self.persistence.saver else { + return Ok(()); + }; - // Because we save blueprints based on their `ApplicationId`, we only - // save the blueprints referenced by `blueprint_by_app_id`, even though - // there may be other Blueprints in the Hub. + re_tracing::profile_function!(); - for (app_id, blueprint_id) in &self.active_blueprint_by_app_id { - let Some(blueprint) = self.store_bundle.get_mut(blueprint_id) else { - re_log::debug!("Failed to find blueprint {blueprint_id}."); - continue; - }; - if self.blueprint_last_save.get(blueprint_id) == Some(&blueprint.generation()) { - continue; // no change since last save + for (app_id, app_blueprints) in &self.app_blueprints { + if app_id == &Self::welcome_screen_app_id() { + continue; // Don't save changes to the welcome screen } - if app_options.blueprint_gc { - blueprint.gc_everything_but_the_latest_row(); - self.blueprint_last_gc - .insert(blueprint_id.clone(), blueprint.generation()); - } + if let Some(blueprint_id) = &app_blueprints.active { + let Some(blueprint) = self.store_bundle.get_mut(blueprint_id) else { + re_log::debug!("Failed to find blueprint {blueprint_id}."); + continue; + }; + if self.blueprint_last_save.get(blueprint_id) == Some(&blueprint.generation()) { + continue; // no change since last save + } - if blueprint.app_id() == Some(&Self::welcome_screen_app_id()) { - // Don't save changes to the welcome screen - } else if let Some(saver) = &self.persistence.saver { - (saver)(app_id, blueprint)?; + let messages = blueprint.to_messages(None)?; + (saver)(app_id, &messages)?; self.blueprint_last_save .insert(blueprint_id.clone(), blueprint.generation()); + } else { + // Save an empty blueprint file for this app. + // This is important for the case when the user has reset/cleared/deleted the active blueprint, + // in which case we want to over-write the old blueprint file. + re_log::debug!("Saving empty active blueprint for {app_id}"); + let messages = []; + (saver)(app_id, &messages)?; } } Ok(()) } + fn load_persisted_blueprint(&mut self, app_id: &ApplicationId) { + self.try_to_load_persisted_blueprint(app_id) + .unwrap_or_else(|err| { + re_log::warn!("Failed to load persisted blueprint: {err}"); + }); + } + /// Try to load the persisted blueprint for the given `ApplicationId`. /// Note: If no blueprint exists at the expected path, the result is still considered `Ok`. /// It is only an `Error` if a blueprint exists but fails to load. @@ -651,6 +748,12 @@ impl StoreHub { }; if let Some(mut bundle) = (loader)(app_id)? { + let app_blueprints = self.app_blueprints.entry(app_id.clone()).or_default(); + + if bundle.is_empty() { + re_log::debug!("Found empty blueprint file with no stores in it for {app_id}"); + } + for store in bundle.drain_entity_dbs() { match store.store_kind() { StoreKind::Recording => { @@ -676,8 +779,7 @@ impl StoreHub { "Activating new blueprint {} for {app_id}; loaded from disk", store.store_id(), ); - self.active_blueprint_by_app_id - .insert(app_id.clone(), store.store_id().clone()); + app_blueprints.active = Some(store.store_id().clone()); self.blueprint_last_save .insert(store.store_id().clone(), store.generation()); self.store_bundle.insert(store); @@ -694,18 +796,15 @@ impl StoreHub { pub fn stats(&self, detailed_cache_stats: bool) -> StoreHubStats { re_tracing::profile_function!(); - // If we have an app-id, then use it to look up the blueprint. - let blueprint = self - .active_application_id - .as_ref() - .and_then(|app_id| self.active_blueprint_by_app_id.get(app_id)) + let active_blueprint = self + .active_blueprint_id() .and_then(|blueprint_id| self.store_bundle.get(blueprint_id)); - let blueprint_stats = blueprint + let blueprint_stats = active_blueprint .map(|entity_db| DataStoreStats::from_store(entity_db.store())) .unwrap_or_default(); - let blueprint_config = blueprint + let blueprint_config = active_blueprint .map(|entity_db| entity_db.store().config().clone()) .unwrap_or_default(); diff --git a/crates/re_viewport/src/viewport_blueprint_ui.rs b/crates/re_viewport/src/viewport_blueprint_ui.rs index 507326724fae..aa57a27e67b1 100644 --- a/crates/re_viewport/src/viewport_blueprint_ui.rs +++ b/crates/re_viewport/src/viewport_blueprint_ui.rs @@ -573,40 +573,38 @@ impl Viewport<'_, '_> { let subdued = !space_view_visible || !visible; - let list_item = ListItem::new(ctx.re_ui, item_label) + let mut list_item = ListItem::new(ctx.re_ui, item_label) .selected(is_selected) .with_icon(guess_instance_path_icon( ctx, &InstancePath::from(entity_path.clone()), )) .subdued(subdued) - .force_hovered(is_item_hovered) - .with_buttons(|re_ui: &_, ui: &mut egui::Ui| { - let vis_response = if !empty_origin { - let mut visible_after = visible; - let vis_response = - visibility_button_ui(re_ui, ui, space_view_visible, &mut visible_after); - if visible_after != visible { - if let Some(data_result_node) = data_result_node { - data_result_node - .data_result - .save_recursive_override_or_clear_if_redundant( - ctx, - &query_result.tree, - &Visible(visible_after), - ); - } + .force_hovered(is_item_hovered); + + // We force the origin to be displayed, even if it's fully empty, in which case it can be + // neither shown/hidden nor removed. + if !empty_origin { + list_item = list_item.with_buttons(|re_ui: &_, ui: &mut egui::Ui| { + let mut visible_after = visible; + let vis_response = + visibility_button_ui(re_ui, ui, space_view_visible, &mut visible_after); + if visible_after != visible { + if let Some(data_result_node) = data_result_node { + data_result_node + .data_result + .save_recursive_override_or_clear_if_redundant( + ctx, + &query_result.tree, + &Visible(visible_after), + ); } + } - Some(vis_response) - } else { - None - }; - - let mut response = remove_button_ui( + let response = remove_button_ui( re_ui, ui, - "Remove group and all its children from the space view", + "Remove this entity and all its children from the space view", ); if response.clicked() { space_view @@ -614,12 +612,9 @@ impl Viewport<'_, '_> { .remove_subtree_and_matching_rules(ctx, entity_path.clone()); } - if let Some(vis_response) = vis_response { - response |= vis_response; - } - - response + response | vis_response }); + } // If there's any children on the data result nodes, show them, otherwise we're good with this list item as is. let has_children = data_result_node.map_or(false, |n| !n.children.is_empty()); diff --git a/docs/content/concepts/annotation-context.md b/docs/content/concepts/annotation-context.md index 492f609bd03e..4a4d97f1a760 100644 --- a/docs/content/concepts/annotation-context.md +++ b/docs/content/concepts/annotation-context.md @@ -1,6 +1,6 @@ --- title: Annotation Context -order: 4 +order: 800 --- ## Overview diff --git a/docs/content/concepts/apps-and-recordings.md b/docs/content/concepts/apps-and-recordings.md index c1df8db97876..a336b8788e28 100644 --- a/docs/content/concepts/apps-and-recordings.md +++ b/docs/content/concepts/apps-and-recordings.md @@ -1,6 +1,6 @@ --- title: Application IDs and Recording IDs -order: 8 +order: 500 --- ## Application ID diff --git a/docs/content/concepts/batches.md b/docs/content/concepts/batches.md index 14e8444431c9..93bb06d05314 100644 --- a/docs/content/concepts/batches.md +++ b/docs/content/concepts/batches.md @@ -1,6 +1,6 @@ --- title: Batch Data -order: 5 +order: 700 --- Rerun has built-in support for batch data. Whenever you have a collection of things that all have the same type, rather @@ -16,7 +16,6 @@ In the Python APIs, the majority of archetypes are named with the plural form, f - An *entity* is a collection of *components* (see [Entities and Components](entity-component.md)). - When an entity is batched, it's components individual elements are called *instances*. -- Each instance is identified within the entity by its *instance key*. - When every instance within an entity has the same value for a component, that component is called a *splat*. This is a common pattern and has dedicated support for it (see the [Splats](#splats) section below). For instance, you can set all the colors of a point cloud to the same color by passing a single color value to the @@ -45,13 +44,12 @@ This can be quite convenient since updating different components at different ti organization of your code. It even means if a component on an entity is static, you only need to log it once. However, if both a batch of colors and a batch of positions have been logged at two different points in time, we need a way -to know which point receives which color. This is what Rerun uses the instance keys for. When a component batch is -logged it is always assigned a set of instance keys. By default, this key is based on the sequential index within the -logged array. When querying a batched component, the component-values are joined together based on these keys. +to know which point receives which color. +For that, Rerun uses the index of the instance. +When querying a batched component, the component-values are joined together based on this index. Logically, this happens as a *left-join* using the primary component for the entity. For example, if you log 3 points and then later log 5 colors, you will still only see 3 points in the viewer. -In the future you will be able to specify the instance keys manually while logging ([#1309](https://github.com/rerun-io/rerun/issues/1309)). ## Splats diff --git a/docs/content/concepts/blueprint.md b/docs/content/concepts/blueprint.md index 46f871ad48f3..6b8668facefe 100644 --- a/docs/content/concepts/blueprint.md +++ b/docs/content/concepts/blueprint.md @@ -1,10 +1,13 @@ --- title: Blueprint -order: 9 +order: 600 --- ## Blueprints and recordings + + + When you are working with the Rerun viewer, there are two separate pieces that combine to produce what you see: the "recording" and the "blueprint." @@ -21,8 +24,8 @@ for the viewer to display. ## Loose coupling The blueprint and the recording are only loosely coupled. Rerun uses the -"application ID" to determine whether a blueprint and a recording should be used -together, but they are not directly linked beyond that. +[application ID](apps-and-recordings.md) to determine whether a blueprint and a +recording should be used together, but they are not directly linked beyond that. This means that either can be changed independently of the other. Keeping the blueprint constant while changing the recording will allow you to compare @@ -42,6 +45,17 @@ viewer, you are actually modifying the blueprint. (Note that while there may be some exceptions to this rule at the moment, the intent is to eventually migrate all state to the blueprint.) +## Current, default, and heuristics blueprints + + + + +Blueprints may originate from multiple sources. + +- The "current blueprint" for a given application ID is the one that is used by the viewer to display data at any given time. It is updated for each change made to the visualization within the viewer, and may be saved to a blueprint file at any time. +- The "default blueprint" is a snapshot that is set or updated when a blueprint is received from code or loaded from a file. The current blueprint may be reset to default blueprint at any time by using the "reset" button in the blueprint panel's header. +- The "heuristic blueprint" is an automatically-produced blueprint based on the recording data. When no default blueprint is available, the heuristic blueprint is used when resetting the current blueprint. It is also possible to reset to the heuristic blueprint in the selection panel after selecting an application. + ## What is a blueprint Under the hood, the blueprint is just data. It is represented by a diff --git a/docs/content/concepts/entity-component.md b/docs/content/concepts/entity-component.md index e3c7925d31d8..e91785c4784e 100644 --- a/docs/content/concepts/entity-component.md +++ b/docs/content/concepts/entity-component.md @@ -1,6 +1,6 @@ --- title: Entities and Components -order: 1 +order: 100 --- ## Data model diff --git a/docs/content/concepts/entity-path.md b/docs/content/concepts/entity-path.md index e9f1473402c6..bffb28c6efd7 100644 --- a/docs/content/concepts/entity-path.md +++ b/docs/content/concepts/entity-path.md @@ -1,6 +1,6 @@ --- title: The Entity Path Hierarchy -order: 1 +order: 200 --- ## Entity paths diff --git a/docs/content/concepts/spaces-and-transforms.md b/docs/content/concepts/spaces-and-transforms.md index ecd98567c404..674ca0d3c4a9 100644 --- a/docs/content/concepts/spaces-and-transforms.md +++ b/docs/content/concepts/spaces-and-transforms.md @@ -1,6 +1,6 @@ --- title: Spaces and Transforms -order: 2 +order: 300 --- ## The definition of a space diff --git a/docs/content/concepts/timelines.md b/docs/content/concepts/timelines.md index 7237c1d408be..5c4ae16bf3ba 100644 --- a/docs/content/concepts/timelines.md +++ b/docs/content/concepts/timelines.md @@ -1,12 +1,15 @@ --- title: Events and Timelines -order: 3 +order: 400 --- ## Timelines Each piece of logged data is associated with one or more timelines. -By default, each log is added to the `log_time` timeline, with a timestamp assigned by the SDK. + +The logging SDK always creates two timelines for you: +* `log_tick` - a sequence timeline with the sequence number of the log call +* `log_time` - a temporal timeline with the time of the log call You can use the _set time_ functions (Python reference: [set_time_sequence](https://ref.rerun.io/docs/python/stable/common/logging_functions/#rerun.set_time_sequence), [set_time_seconds](https://ref.rerun.io/docs/python/stable/common/logging_functions/#rerun.set_time_seconds), [set_time_nanos](https://ref.rerun.io/docs/python/stable/common/logging_functions/#rerun.set_time_nanos)) to associate logs with other timestamps on other timelines. For example: diff --git a/docs/content/getting-started/configure-the-viewer.md b/docs/content/getting-started/configure-the-viewer.md index 2d47dd5bb0c6..21fc71ae590d 100644 --- a/docs/content/getting-started/configure-the-viewer.md +++ b/docs/content/getting-started/configure-the-viewer.md @@ -1,11 +1,11 @@ --- -title: Configure the Viewer +title: Configure the viewer order: 5 --- -Although the Rerun Viewer tries to do a reasonable job of using heuristics to automatically determine -an appropriate layout given the data that you provide, there will always be situations where the heuristic -results don't match the needs of a particular use-case. +By default, the Rerun viewer uses heuristics to automatically determine an appropriate +layout given the data that you provide. However, there will always be situations +where the heuristic results don't match the needs of a particular use-case. Fortunately, almost all aspects of the viewer can be configured via the [Blueprint](../reference/viewer/blueprint.md). diff --git a/docs/content/getting-started/configure-the-viewer/navigating-the-viewer-continued.md b/docs/content/getting-started/configure-the-viewer/navigating-the-viewer-continued.md deleted file mode 100644 index 91c60cb97ce7..000000000000 --- a/docs/content/getting-started/configure-the-viewer/navigating-the-viewer-continued.md +++ /dev/null @@ -1,119 +0,0 @@ ---- -title: Navigating the viewer (continued) -order: 4 ---- - -This guide builds on top of the previous tutorial: -[Navigating the viewer](../navigating-the-viewer.md). Please follow that tutorial first if you haven't already. - -This guide will familiarize you with the basics of using the Rerun Viewer with an example dataset. By the end you should -be comfortable with the following topics: - -- [Configuring views](#configuring-views) -- [Creating new views](#creating-new-views) - -## Configuring views - -Views in Rerun are configured by [Blueprints](../../reference/viewer/blueprint.md). We will now use blueprints to adjust -both an individual entity as well as the contents of a space view itself. - -### Adjusting entity properties - -First, click to select the entity named `points` in the `/ (Spatial)` view in the Blueprint panel. Now, look and the -selection panel -- in addition to the information about the data associated with that entity, you will see a "Blueprint" -section. - -Try toggling "visible" on and off and you will see that the points disappear and reappear. Next, click the control -labeled "visible history" and drag it to the right to increase the value. As you drag farther you will see more points -show up in the view. This is making historical points, from farther back in time visible within the time point of this -view. Because the points are logged in stationary 3D space, aggregating them here gives us a more complete view of the -car. Leave the visible history with a value of 50. - - - - - - - viewer walkthrough adjusting visible history screenshot - - -### Modifying the contents of a space view - -Now select the `/ (Spatial)` view itself. We will start by giving this space view a different name. At the very -top of the selection panel you will see a text box labeled "Space view:". Go ahead and change the name to -`Reconstruction`. The name will also update in the blueprint panel on the left. - -Like with the entity selection, you will see a Blueprint section within the Selection panel. This time, click on the -button labeled "Add/Remove Entities". This pop-up shows all of the entities that were logged as part of this session. -You can click on the "+" or "-" buttons to add or remove entities from this view. Go ahead and remove the entity called -"keypoints," and then add them back again. Unlike hiding an entity, you will notice that as you remove entities they -completely disappear from the blueprint panel on the left. Entities that are incompatible with the selected view will be -grayed out. For example, you cannot add a scalar to a spatial scene. - - - - - - - viewer walkthrough modifying contents of a space view screenshot - - -## Creating new views - -New views & view containers (grid, vertical, etc.) can be created using the "+" button at the top of the Blueprint panel or -from the selection panel when selecting a container. - -After creating a view you usually want to proceed to editing its origin and query (which entities are shown) in the selection panel. - -Your view layout might be feeling a little cluttered now. You can quickly hide views you're -not using from the blueprint panel by hovering over the view and then clicking the icon that looks like an eye. Go ahead -and hide the `image` and `avg_reproj_err` views, and collapse the expanded timeline panel using the button in the upper -right corner. Note that even with the timeline collapsed you still have access to timeline controls, including a slider. - - - - - - - viewer walkthrough toggle visibility screenshot - - -### Reusing what you've learned - -Finally, use what we covered in the previous section to change the contents of this view. Select the new `camera` view, -then choose "Add/remove entities." Remove the 2D "keypoints" and add in the 3D "points." Note that these points do not -have visible history turned on -- that's because the blueprint is part of the view and not part of the entity. -Select the points within this view by clicking on them in the blueprint or the view itself, and then give them visible -history as well. When you are done, your view should look like this: - - - - - - - viewer walkthrough camera view screenshot - - -Now move the slider back and forth and see what happens. Even though they are both views of the same camera and point -entities, they behave quite differently. On the top the camera moves relative to the car, while on the bottom the car -moves relative to the camera. This is because the new views have _different_ space roots, and Rerun uses the transform -system to transform or project all data into the space root for the given view. - -## Conclusion - -That brings us to the end of this walkthrough. To recap, you have learned how to: - -- Configure entity blueprint properties. -- Add and remove entities from views. -- Create and configure new views. -- And some basics of how transforms work. - -Again, if you ran into any issues following this guide, please don't hesitate to [open an issue](https://github.com/rerun-io/rerun/issues/new/choose). - -### Up next - -To get started with writing a program to logging data with the Rerun SDK see the [getting started guides](../quick-start). - -To see and explore other data, you can check out the [examples](/examples). - -For deeper context on the ideas covered here, consult the [Concept overview](../../concepts.md). diff --git a/docs/content/getting-started/configure-the-viewer/through-code-tutorial.md b/docs/content/getting-started/configure-the-viewer/through-code-tutorial.md index cdecb3f7a07b..7157fe07ac6d 100644 --- a/docs/content/getting-started/configure-the-viewer/through-code-tutorial.md +++ b/docs/content/getting-started/configure-the-viewer/through-code-tutorial.md @@ -3,14 +3,28 @@ title: Configure the viewer through code order: 3 --- -This tutorial will walk you through using the Blueprint APIs to better control the -layout and appearance of your data in the Rerun Viewer in Python. +This tutorial will walk you through using the +[Blueprint APIs](../../howto/configure-viewer-through-code.md) to better control +the layout and appearance of your data in the Rerun Viewer in Python. This walkthrough is based on the [stock charts](https://github.com/rerun-io/rerun/tree/main/examples/python/blueprint_stocks?speculative-link) example. The main differences between this tutorial and the linked example are related to additional processing of command-line flags, which are omitted here for simplicity. -## Create an environment for you example +All of the examples in this tutorial use the exact same data. However, by changing the blueprint using +small statements such as: +```python +rrb.Blueprint( + rrb.Vertical( + rrb.TextDocumentView(name="Info", origin="/stocks/AAPL/info"), + rrb.TimeSeriesView(name="Chart", origin="/stocks/AAPL"), + row_shares=[1, 4], + ) +) +``` +we will completely change the way the data is presented. + +## Create an environment for the example We start by creating a new virtual environment and installing the Rerun SDK along with the dependencies we will use in this example. diff --git a/docs/content/getting-started/data-in/cpp.md b/docs/content/getting-started/data-in/cpp.md index b52dfe809b10..c2d7bbfb8cd1 100644 --- a/docs/content/getting-started/data-in/cpp.md +++ b/docs/content/getting-started/data-in/cpp.md @@ -7,8 +7,7 @@ In this section we'll log and visualize our first non-trivial dataset, putting m In a few lines of code, we'll go from a blank sheet to something you don't see every day: an animated, interactive, DNA-shaped abacus: This guide aims to go wide instead of deep. diff --git a/docs/content/getting-started/data-in/python.md b/docs/content/getting-started/data-in/python.md index 73d73b6e40b5..7b587def3bbe 100644 --- a/docs/content/getting-started/data-in/python.md +++ b/docs/content/getting-started/data-in/python.md @@ -7,8 +7,7 @@ In this section we'll log and visualize our first non-trivial dataset, putting m In a few lines of code, we'll go from a blank sheet to something you don't see every day: an animated, interactive, DNA-shaped abacus: This guide aims to go wide instead of deep. @@ -27,9 +26,16 @@ Start by opening your editor of choice and creating a new file called `dna_examp The first thing we need to do is to import `rerun` and initialize the SDK by calling [`rr.init`](https://ref.rerun.io/docs/python/stable/common/initialization_functions/#rerun.init). This init call is required prior to using any of the global logging calls, and allows us to name our recording using an `ApplicationId`. +We also import some other utilities we will use later in the example. + ```python import rerun as rr +from math import tau +import numpy as np +from rerun.utilities import build_color_spiral +from rerun.utilities import bounce_lerp + rr.init("rerun_example_dna_abacus") ``` @@ -66,30 +72,12 @@ And with that, we're ready to start sending out data: By default, the SDK will start a viewer in another process and automatically pipe the data through. There are other means of sending data to a viewer as we'll see at the end of this section, but for now this default will work great as we experiment. ---- - -The following sections will require importing a few different things to your script. -We will do so incrementally, but if you just want to update your imports once and call it a day, feel free to add the following to the top of your script: - -```python -from math import tau -import numpy as np -from rerun.utilities import build_color_spiral -from rerun.utilities import bounce_lerp -``` - ---- - ## Logging our first points The core structure of our DNA looking shape can easily be described using two point clouds shaped like spirals. Add the following to your file: ```python -# new imports -from rerun.utilities import build_color_spiral -from math import tau - NUM_POINTS = 100 # points and colors are both np.array((NUM_POINTS, 3)) @@ -167,10 +155,6 @@ rr.log( Which only leaves the beads: ```python -# new imports -import numpy as np -from rerun.utilities import bounce_lerp - offsets = np.random.rand(NUM_POINTS) beads = [bounce_lerp(points1[n], points2[n], offsets[n]) for n in range(NUM_POINTS)] colors = [[int(bounce_lerp(80, 230, offsets[n] * 2))] for n in range(NUM_POINTS)] @@ -215,10 +199,8 @@ Rerun has rich support for time: whether you want concurrent or disjoint timelin Let's add our custom timeline: ```python -# new imports -from rerun.utilities import bounce_lerp - time_offsets = np.random.rand(NUM_POINTS) + for i in range(400): time = i * 0.01 rr.set_time_seconds("stable_time", time) diff --git a/docs/content/getting-started/data-in/rust.md b/docs/content/getting-started/data-in/rust.md index 7ef473309956..c6c3ee682228 100644 --- a/docs/content/getting-started/data-in/rust.md +++ b/docs/content/getting-started/data-in/rust.md @@ -7,8 +7,7 @@ In this section we'll log and visualize our first non-trivial dataset, putting m In a few lines of code, we'll go from a blank sheet to something you don't see every day: an animated, interactive, DNA-shaped abacus: This guide aims to go wide instead of deep. diff --git a/docs/content/getting-started/navigating-the-viewer.md b/docs/content/getting-started/navigating-the-viewer.md index 65b0119856a1..f3f7fbfe2390 100644 --- a/docs/content/getting-started/navigating-the-viewer.md +++ b/docs/content/getting-started/navigating-the-viewer.md @@ -244,12 +244,7 @@ Again, if you ran into any issues following this guide, please don't hesitate to ### Up next -The followup to this tutorial involves further configuring how the viewer displays the data. - -- See: [Configure the viewer interactively](./configure-the-viewer/interactively.md) - -To get started with writing a program to logging data with the Rerun SDK see the [getting started guides](./quick-start). - -To see and explore other data, you can check out the [examples](/examples). - -For deeper context on the ideas covered here, consult the [Concept overview](../concepts.md). +- [Get started](./quick-start) by writing a program to log data with the Rerun SDK. +- Learn how to further [configure the viewer](./configure-the-viewer) to suit your data. +- Explore other [examples of using Rerun](/examples). +- Consult the [concept overview](../concepts.md) for more context on the ideas covered here. diff --git a/docs/content/getting-started/quick-start.md b/docs/content/getting-started/quick-start.md index cb8b97e1988e..378e620051bb 100644 --- a/docs/content/getting-started/quick-start.md +++ b/docs/content/getting-started/quick-start.md @@ -3,7 +3,12 @@ title: Quick start order: 1 --- -Dive right into using the Rerun SDK with your favorite programming language! +Rerun is an SDK and engine for visualizing and interacting with multimodal data +streams. + +The primary way to get data into the Rerun viewer is to write code in +one of the supported languages. To learn more about installing Rerun and +the basics of getting started, choose your language: * [C++](./quick-start/cpp.md) * [Python](./quick-start/python.md) diff --git a/docs/content/getting-started/what-is-rerun.md b/docs/content/getting-started/what-is-rerun.md index 7b5546aabf54..bc40a17dd238 100644 --- a/docs/content/getting-started/what-is-rerun.md +++ b/docs/content/getting-started/what-is-rerun.md @@ -2,13 +2,11 @@ title: What is Rerun? order: 0 --- -To get a feeling of what you can do with Rerun -- browse the [example gallery](/examples) or -- try Rerun directly [in your browser](/viewer?speculative-link). -## What is Rerun? - -Rerun is an SDK and engine for visualizing and interacting with multimodal data streams. +Rerun is an SDK and viewer for visualizing and interacting with multimodal data streams. +The SDK lets you send data from anywhere, and the viewer, +which consists of an in-memory database and a visualization engine, +collects the data and aligns it so that you can scroll back and forth in time to understand what happened. Rerun is - Free to use @@ -18,23 +16,21 @@ Rerun is - Built in Rust to be cross platform and fast - Open source, dual licensed under MIT and Apache 2 -Rerun is used by engineers and researchers in fields like computer vision and robotics -to verify, debug, and demo. - -For a list of built-in data types, see the [Types](../reference/types.md) section. +Rerun is used by engineers and researchers in fields like robotics, +spatial computing, 2D/3D simulation, and finance to verify, debug, and demo. ## How do you use it? - - - - - + + + + + 1. Stream multimodal data from your code by logging it with the Rerun SDK 2. Visualize and interact with live or recorded streams, whether local or remote -3. Interactively build layouts and customize visualizations +3. Build layouts and customize visualizations interactively in the UI or through the SDK 4. Extend Rerun when you need to ## How does it work? @@ -43,7 +39,6 @@ Rerun goes to extreme lengths to make handling and visualizing multimodal data streams easy and performant. - ## Can't find what you're looking for? - Join us in the [Rerun Community Discord](https://discord.gg/xwcxHUjD35) diff --git a/docs/content/howto/extend/custom-data.md b/docs/content/howto/extend/custom-data.md index d5bb8d0c9830..bdc0b3df3e6f 100644 --- a/docs/content/howto/extend/custom-data.md +++ b/docs/content/howto/extend/custom-data.md @@ -5,7 +5,18 @@ description: How to use Rerun with custom data --- Rerun comes with many pre-built [Types](../../reference/types.md) that you can use out of the box. As long as your own data can be decomposed into Rerun [components](../../reference/types/components.md) or can be serialized with [Apache Arrow](https://arrow.apache.org/), you can log it directly without needing to recompile Rerun. -All you need to do is implement the `AsComponents` [Python protocol](https://ref.rerun.io/docs/python/0.9.0/common/interfaces/#rerun.AsComponents) or [Rust trait](https://docs.rs/rerun/latest/rerun/trait.AsComponents.html), which means implementing the function, `as_component_batches()`. +For Python we have a helper for this, called [`AnyValues`](https://ref.rerun.io/docs/python/main/common/custom_data/), allowing you to easily attach custom values to any entity instance: + +``` +rr.log( + "my_entity", rr.AnyValues( + confidence=[1.2, 3.4, 5.6], + description="Bla bla bla…", + ), +) +``` + +You can also create your own component by implementing the `AsComponents` [Python protocol](https://ref.rerun.io/docs/python/0.9.0/common/interfaces/#rerun.AsComponents) or [Rust trait](https://docs.rs/rerun/latest/rerun/trait.AsComponents.html), which means implementing the function, `as_component_batches()`. ## Remapping to a Rerun archetype Let's start with a simple example where you have your own point cloud class that is perfectly representable as a Rerun archetype. diff --git a/docs/content/reference/migration.md b/docs/content/reference/migration.md index a32edd8a4283..e5e473e4a2d2 100644 --- a/docs/content/reference/migration.md +++ b/docs/content/reference/migration.md @@ -1,5 +1,5 @@ --- title: Migration Guides order: 900 -redirect: reference/migration/migration-0-13 +redirect: reference/migration/migration-0-15 --- diff --git a/docs/content/reference/migration/migration-0-13.md b/docs/content/reference/migration/migration-0-13.md index 26a6908f8f68..bc1f9ac2b619 100644 --- a/docs/content/reference/migration/migration-0-13.md +++ b/docs/content/reference/migration/migration-0-13.md @@ -1,6 +1,6 @@ --- title: Migrating from 0.12 to 0.13 -order: 11 +order: 130 --- ## `TimeSeriesScalar` deprecated in favor of [Scalar](../types/archetypes/scalar.md) & [SeriesLine](../types/archetypes/series_line.md)/[SeriesPoint](../types/archetypes/series_point.md) diff --git a/docs/content/reference/migration/migration-0-15.md b/docs/content/reference/migration/migration-0-15.md new file mode 100644 index 000000000000..7f08871e6ddd --- /dev/null +++ b/docs/content/reference/migration/migration-0-15.md @@ -0,0 +1,32 @@ +--- +title: Migrating from 0.14 to 0.15 +order: 150 +--- + +## `InstanceKey` removed from our logging APIs +In PR [#5395](https://github.com/rerun-io/rerun/pull/5395) we removed the `InstanceKey` component from all our archetypes. + +What were instance keys? + +In Rerun, each entity can be a batch of _instances_. +For instance, a point cloud is usually logged as one entity where each point is an instance of that entity. +An entity is made up of several components (e.g. position, color, …), and you may log these different components separately. +For example, this lets you update only the colors of a point cloud, keeping the same positions. + +Instance keys were a way to assign identities to these instances so that you could update the components of a specific set of instances. +This was a complicated and little used feature, that caused far more complexity in our code than it was worth. + +For now the `InstanceKey` component as such still remains, but is always assigned the integer index at log time (i.e. `0, 1, 2, 3, …`). +This means that if you first log the positions `A, B, C` and then later log the colors `red, green, blue` to the same entity, they will always be matched as `(A, red), (B, green), (C, blue)`. +We still support _splatting_, where you log one single color for the whole point cloud. + +If you were relying on `InstanceKey` solely to identify your instances when inspecting them in the viewer, then you can replace it with a custom value using [custom data](../../howto/extend/custom-data.md): + +```py +rr.log( + "my/points", + rr.AnyValues(point_id=[17, 42, 103]), +) +``` + +In the future we plan on introducing a better way to identify and track instances over time. diff --git a/docs/content/reference/migration/migration-0-9.md b/docs/content/reference/migration/migration-0-9.md index f36080950ca8..3a65d3bcbde2 100644 --- a/docs/content/reference/migration/migration-0-9.md +++ b/docs/content/reference/migration/migration-0-9.md @@ -1,6 +1,6 @@ --- title: Migrating from 0.8 to 0.9 -order: 12 +order: 90 --- Rerun-0.9 introduces a new set of type-oriented logging APIs built on top of an updated, more concrete, diff --git a/docs/content/reference/roadmap.md b/docs/content/reference/roadmap.md index da56878ad4bd..24547d74dcb5 100644 --- a/docs/content/reference/roadmap.md +++ b/docs/content/reference/roadmap.md @@ -16,20 +16,22 @@ This page is meant to give an high level overview of ongoing and planned work. ## Roadmap of major feature areas -### Early January 2024: Release 0.12 -- Parallelized rendering and processing for all views -- Plugin system for loading any file into Rerun - -### Near term: Now - Q1 2024 -- End to end performance for high frequency time series logging -- Layout and configuration from code (blueprint) -- Datasets that are bigger than RAM for the native viewer -- CLI for manipulating and exporting data from rrd files - -### Medium term (Q2-3 2024) -- Broader coverage of robotics data types +### Early April 2024: Release 0.15 +- Layout and viewport content from code (blueprint part 1) +- Data-loader plugins callable from the SDK +- Linux ARM64 support in pre-built artifacts + +### Near term: Now - end of Q2 2024 +- Property overrides from code (blueprint part 2) + - Includes setting visible time range from code +- Broader coverage of robotics and spatial computing data types - Extension mechanisms for dynamically reading from external storage - For example files like: VRS, MCAP, or mp4 + - Also brings support for datasets that are bigger than RAM in the native viewer + +### Medium term (Q3-4 2024) +- Make Rerun easier to use when training and evaluating ML models +- Deeper support for modalities like text and audio - Callbacks and the ability to build interactive applications with Rerun - For example: UI for tweaking configs, custom data annotation tools, etc diff --git a/docs/content/reference/types/components/instance_key.md b/docs/content/reference/types/components/instance_key.md index aa626ad7f7ff..099005b47318 100644 --- a/docs/content/reference/types/components/instance_key.md +++ b/docs/content/reference/types/components/instance_key.md @@ -4,6 +4,13 @@ title: "InstanceKey" A unique numeric identifier for each individual instance within a batch. +Instance keys are automatically assigned by the `rerun` library and should not be set manually. + +The instance key is just the index of the instance within the batch, +i.e. the first point in a point cloud has `InstanceKey = 0`, the second `InstanceKey = 1`, and so on. + +We plan to remove the `InstanceKey` component in the near future. + ## Fields * value: `u64` diff --git a/docs/content/reference/viewer/blueprint.md b/docs/content/reference/viewer/blueprint.md index 234a1a7bcdee..e4b1c40593bd 100644 --- a/docs/content/reference/viewer/blueprint.md +++ b/docs/content/reference/viewer/blueprint.md @@ -12,7 +12,8 @@ Everything visible in the [Viewport](viewport.md) has a representation here, making it an easy way to select a Space View and the [Entities](../../concepts/entity-component.md) it shows. - screenshot of the blueprint view + + diff --git a/docs/cspell.json b/docs/cspell.json index a2e0288db9dc..01be2bbadcd0 100644 --- a/docs/cspell.json +++ b/docs/cspell.json @@ -25,6 +25,7 @@ "arrowified", "artefacts", "astype", + "atexit", "Aubry", "authkey", "autogen'd", @@ -48,10 +49,10 @@ "binsearching", "binstall", "binutils", - "blendshape", - "blendshapes", "Birger", "Birkl", + "blendshape", + "blendshapes", "booktitle", "braindump", "bringup", @@ -88,10 +89,10 @@ "DCMAKE", "deallocate", "deallocation", - "denoising", - "Denoising", "debuginfo", "dedup", + "denoising", + "Denoising", "depgraph", "deskewed", "deskewing", @@ -99,6 +100,7 @@ "Dhariwal", "dicom", "Dilara", + "discoverability", "docstring", "docstrings", "Doersch", @@ -125,9 +127,9 @@ "everytime", "ewebsock", "extrinsics", - "farbfeld", - "FACEMESH", "facemesh", + "FACEMESH", + "farbfeld", "Farooq", "Feichtenhofer", "fieldname", @@ -139,6 +141,7 @@ "frontmatter", "gcloud", "Georgios", + "getpid", "Girshick", "Gkioxari", "glb", @@ -181,6 +184,7 @@ "keypoint", "keypointid", "keypoints", + "Kinect", "Kirillov", "klass", "kpreid", @@ -233,6 +237,7 @@ "MΓΌller", "multimodal", "multiview", + "murgeljm", "mydata", "myfile", "myfiles", @@ -251,6 +256,8 @@ "noqa", "numpages", "numpy", + "nusc", + "nuscene", "nuScenes", "nyud", "obbs", @@ -275,6 +282,7 @@ "Piotr", "pipx", "pixi", + "playroooms", "pngcrush", "pointcloud", "Pollefeys", @@ -291,11 +299,12 @@ "pypi", "pyright", "pytest", - "pytz", "Pythonic", + "pytz", "quickstart", "randn", "randr", + "randrange", "rasterizer", "Ravi", "readback", @@ -312,6 +321,7 @@ "repr", "reproj", "reproject", + "reprojection", "reqwest", "RGBA", "rgbd", @@ -335,8 +345,9 @@ "SCCACHE", "scipy", "scrollwheel", + "segmentations", + "segs", "segs", - "Segmentations", "serde", "Shaohui", "Shap", @@ -364,8 +375,8 @@ "subsampled", "superquadrics", "tableofcontents", - "tensorboard", "taplo", + "tensorboard", "Tete", "Tewari", "Texcoord", @@ -384,6 +395,7 @@ "Tpng", "tqdm", "trackpad", + "traversaro", "trimesh", "Trimesh", "trimleft", // doxygen command @@ -391,17 +403,18 @@ "tungstenite", "turtlebot", "TURTLEBOT", + "tvec", "UI's", + "umap", + "UMAP", "uncollapsed", - "unmultiplied", "uncond", + "unmultiplied", "Unorm", "unsetting", "upcasting", "upsampling", "upvote", - "UMAP", - "umap", "urdf", "URDF", "ureq", @@ -414,8 +427,10 @@ "Viktor", "virtualenv", "visualizability", - "voxels", "Vizzo", + "voxel", + "voxels", + "voxvol", "vstack", "vsuryamurthy", "vulkan", diff --git a/docs/snippets/all/any_values.py b/docs/snippets/all/any_values.py index 5dc11bef1865..36496ee0d8d0 100644 --- a/docs/snippets/all/any_values.py +++ b/docs/snippets/all/any_values.py @@ -6,7 +6,7 @@ rr.log( "any_values", rr.AnyValues( - foo=[1.2, 3.4, 5.6], - bar="hello world", + confidence=[1.2, 3.4, 5.6], + description="Bla bla bla…", ), ) diff --git a/examples/README.md b/examples/README.md index 2be0e8cdc9eb..a668e06f8c47 100644 --- a/examples/README.md +++ b/examples/README.md @@ -28,26 +28,21 @@ examples/ main.rs ``` -The important part is that each example has a `README.md` file. This file contains a brief description of the example, as well as installation/usage instructions. The `README.md` file also contains metadata in the form of frontmatter: -``` ---- -title: Text Logging -python: https://github.com/rerun-io/rerun/tree/latest/examples/python/detect_and_track_objects/main.py -tags: [2D, huggingface, object-detection, object-tracking, opencv] ---- +The important part is that each example has a `README.md` file. The contents of this `README.md` is used to render the examples in [the documentation](https://rerun.io/examples). +Check out [`examples/python/template/README.md`](python/template/README.md) to see its format. -… +You are also encourage to add a _short_ `DESCRIPTION = """…"""` markdown to the top of the `main.py` and then log it with: +```py +rr.log("description", rr.TextDocument(DESCRIPTION, media_type=rr.MediaType.MARKDOWN), timeless=True) ``` -The contents of this `README.md` file and its frontmatter are used to render the examples in [the documentation](https://rerun.io/examples). Individual examples are currently "stitched together" to form one large markdown file for every category of examples (`artificial-data`, `real-data`). - -The `manifest.toml` file describes the structure of the examples contained in this repository. Only the examples which appear in the manifest are included in the [generated documentation](https://rerun.io/examples). The file contains a description of its own format. - ## Adding a new example You can base your example off of `python/template` or `rust/template`. Once it's ready to be displayed in the docs, add it to the [manifest](./manifest.toml). +The `manifest.toml` file describes the structure of the examples contained in this repository. Only the examples which appear in the manifest are included in the [generated documentation](https://rerun.io/examples). The file contains a description of its own format. + If you want to run the example on CI and include it in the in-viewer example page, add a `channel` entry to its README frontmatter. The available channels right now are: - `main` for simple/fast examples built on each PR and the `main` branch diff --git a/examples/cpp/dna/README.md b/examples/cpp/dna/README.md index 0919fea73036..a9cb282cd91c 100644 --- a/examples/cpp/dna/README.md +++ b/examples/cpp/dna/README.md @@ -1,12 +1,12 @@ +Simple example of logging point and line primitives to draw a 3D helix. @@ -16,9 +16,6 @@ channel = "main" -Simple example of logging point and line primitives to draw a 3D helix. - - To build it from a checkout of the repository (requires a Rust toolchain): ```bash cmake . diff --git a/examples/cpp/eigen_opencv/README.md b/examples/cpp/eigen_opencv/README.md index 0498388e78d0..d13a57e48bc1 100644 --- a/examples/cpp/eigen_opencv/README.md +++ b/examples/cpp/eigen_opencv/README.md @@ -6,9 +6,7 @@ thumbnail = "https://static.rerun.io/eigen-and-opencv-c-integration/5d271725bb92 thumbnail_dimensions = [480, 480] --> -This is a minimal CMake project that shows how to use Rerun in your code in conjunction with [Eigen](https://eigen.tuxfamily.org/) and [OpenCV](https://opencv.org/). -You can find the example at . @@ -17,3 +15,61 @@ You can find the example at + +This is a minimal CMake project that shows how to use Rerun in your code in conjunction with [Eigen](https://eigen.tuxfamily.org/) and [OpenCV](https://opencv.org/). + + +# Used Rerun types +[`Image`](https://www.rerun.io/docs/reference/types/archetypes/image), [`Points3D`](https://www.rerun.io/docs/reference/types/archetypes/points3d), [`Pinhole`](https://www.rerun.io/docs/reference/types/archetypes/pinhole), [`Transform3D`](https://www.rerun.io/docs/reference/types/archetypes/transform3d) + +# Background +This C++ example demonstrates the integration of the Rerun with Eigen and OpenCV libraries. +Eigen handles 3D point calculations and camera orientations, while OpenCV assists with image processing tasks like reading and converting images. + +# Logging and visualizing with Rerun + +The visualizations in this example were created with the following Rerun code: + + +## 3D points +The positions of 3D points are logged to the "world/points_from_vector" and "world/points_from_matrix" entities using the [`Points3D`](https://www.rerun.io/docs/reference/types/archetypes/points3d) archetype. +```cpp +rec.log("world/points_from_vector", rerun::Points3D(points3d_vector)); +``` + +```cpp +rec.log("world/points_from_matrix", rerun::Points3D(points3d_matrix)); +``` + +## Pinhole camera +A pinhole camera is logged to "world/camera" using the [`Pinhole`](https://www.rerun.io/docs/reference/types/archetypes/pinhole) archetype. +Additionally, the 3D transformation of the camera, including its position and orientation, is logged using the [`Transform3D`](https://www.rerun.io/docs/reference/types/archetypes/transform3d) archetype. +```cpp +rec.log( + "world/camera", + rerun::Pinhole::from_focal_length_and_resolution({500.0, 500.0}, {640.0, 480.0}) +); +``` + +```cpp +rec.log( + "world/camera", + rerun::Transform3D( + rerun::Vec3D(camera_position.data()), + rerun::Mat3x3(camera_orientation.data()) + ) +); +``` + +## Images +Images are logged using the [`Image`](https://www.rerun.io/docs/reference/types/archetypes/image) archetype. Two methods are demonstrated: logging images with a tensor buffer and logging images by passing a pointer to the image data. +```cpp +// Log image to rerun using the tensor buffer adapter defined in `collection_adapters.hpp`. +rec.log("image0", rerun::Image(tensor_shape(img), rerun::TensorBuffer::u8(img))); + +// Or by passing a pointer to the image data. +rec.log("image1", rerun::Image(tensor_shape(img), reinterpret_cast(img.data))); +``` + +# Run the code +You can find the build instructions here: [C++ Example with OpenCV and Eigen](https://github.com/rerun-io/cpp-example-opencv-eigen/blob/main/README.md) diff --git a/examples/cpp/incremental_logging/README.md b/examples/cpp/incremental_logging/README.md index 90c8dce2bdfa..39db773b8f6b 100644 --- a/examples/cpp/incremental_logging/README.md +++ b/examples/cpp/incremental_logging/README.md @@ -1,11 +1,11 @@ +Showcases how to incrementally log data belonging to the same archetype, and re-use some or all of it across frames. @@ -15,8 +15,8 @@ thumbnail_dimensions = [480, 301] -Showcases how to incrementally log data belonging to the same archetype, and re-use some or all of it across frames. +## Run the code To build it from a checkout of the repository (requires a Rust toolchain): ```bash diff --git a/examples/cpp/kiss-icp/README.md b/examples/cpp/kiss-icp/README.md index 9096b859cb5f..cbeb1326bd00 100644 --- a/examples/cpp/kiss-icp/README.md +++ b/examples/cpp/kiss-icp/README.md @@ -2,11 +2,12 @@ title = "KISS-ICP" tags = ["3D", "Point cloud"] source = "https://github.com/rerun-io/kiss-icp" -description = "Visualizes the KISS-ICP LiDAR odometry pipeline on the NCLT dataset." thumbnail = "https://static.rerun.io/kiss-icp-screenshot/881ec7c7c0a0e50ec5d78d82875efaf3bb3c6e01/480w.png" thumbnail_dimensions = [480, 288] --> +Visualizes the KISS-ICP LiDAR odometry pipeline on the NCLT dataset. + Estimating the odometry is a common problem in robotics and in the [2023, "KISS-ICP: In Defense of Point-to-Point ICP -- Simple, Accurate, and Robust Registration If Done the Right Way" Ignacio Vizzo et al.](https://arxiv.org/abs/2209.15397) they show how one can use an ICP (iterative closest point) algorithm to robustly and accurately estimate poses from LiDAR data. We will demonstrate the KISS-ICP pipeline on the [NCLT dataset](http://robots.engin.umich.edu/nclt/) along with some brief explanations, for a more detailed explanation you should look at the [original paper](https://arxiv.org/abs/2209.15397). diff --git a/examples/cpp/ros_bridge/README.md b/examples/cpp/ros_bridge/README.md index 5ed6f6115946..82c4221e1b16 100644 --- a/examples/cpp/ros_bridge/README.md +++ b/examples/cpp/ros_bridge/README.md @@ -1,5 +1,5 @@ - -This is an example that shows how to use [Rerun](https://github.com/rerun-io/rerun)'s C++ API to log and view [VRS](https://github.com/facebookresearch/vrs) files. - -> VRS is a file format optimized to record & playback streams of sensor data, such as images, audio samples, and any other discrete sensors (IMU, temperature, etc), stored in per-device streams of time-stamped records. - -You can find the example at . - + +This is an example that shows how to use Rerun's C++ API to log and view [VRS](https://github.com/facebookresearch/vrs) files. + + +# Used Rerun types + +[`Arrows3D`](https://www.rerun.io/docs/reference/types/archetypes/arrows3d), [`Image`](https://www.rerun.io/docs/reference/types/archetypes/image), [`Scalar`](https://www.rerun.io/docs/reference/types/archetypes/scalar), [`TextDocument`](https://www.rerun.io/docs/reference/types/archetypes/text_document) + +# Background +This C++ example demonstrates how to visualize VRS files with Rerun. +VRS is a file format optimized to record & playback streams of sensor data, such as images, audio samples, and any other discrete sensors (IMU, temperature, etc), stored in per-device streams of time-stamped records. + +# Logging and visualizing with Rerun + +The visualizations in this example were created with the following Rerun code: + +## 3D arrows +```cpp +void IMUPlayer::log_accelerometer(const std::array& accelMSec2) { + _rec->log(_entity_path + "/accelerometer", rerun::Arrows3D::from_vectors({accelMSec2})); + // … existing code for scalars … +} +``` + +## Scalars +```cpp +void IMUPlayer::log_accelerometer(const std::array& accelMSec2) { + // … existing code for Arrows3D … + _rec->log(_entity_path + "/accelerometer/x", rerun::Scalar(accelMSec2[0])); + _rec->log(_entity_path + "/accelerometer/y", rerun::Scalar(accelMSec2[1])); + _rec->log(_entity_path + "/accelerometer/z", rerun::Scalar(accelMSec2[2])); +} +``` + +```cpp +void IMUPlayer::log_gyroscope(const std::array& gyroRadSec) { + _rec->log(_entity_path + "/gyroscope/x", rerun::Scalar(gyroRadSec[0])); + _rec->log(_entity_path + "/gyroscope/y", rerun::Scalar(gyroRadSec[1])); + _rec->log(_entity_path + "/gyroscope/z", rerun::Scalar(gyroRadSec[2])); +} +``` + +```cpp +void IMUPlayer::log_magnetometer(const std::array& magTesla) { + _rec->log(_entity_path + "/magnetometer/x", rerun::Scalar(magTesla[0])); + _rec->log(_entity_path + "/magnetometer/y", rerun::Scalar(magTesla[1])); + _rec->log(_entity_path + "/magnetometer/z", rerun::Scalar(magTesla[2])); +} +``` + +## Images +```cpp +_rec->log( + _entity_path, + rerun::Image({ + frame->getHeight(), + frame->getWidth(), + frame->getSpec().getChannelCountPerPixel()}, + frame->getBuffer() + ) +); +``` + +## Text document +```cpp +_rec->log_timeless(_entity_path + "/configuration", rerun::TextDocument(layout_str)); +``` + +# Run the code +You can find the build instructions here: [C++ Example: VRS Viewer](https://github.com/rerun-io/cpp-example-vrs) diff --git a/examples/python/arflow/README.md b/examples/python/arflow/README.md index c9a0cc0935af..a9be17a4c53f 100644 --- a/examples/python/arflow/README.md +++ b/examples/python/arflow/README.md @@ -6,6 +6,10 @@ thumbnail = "https://static.rerun.io/arflow/a6b509af10a42b3c7ad3909d44e972a3cb1a thumbnail_dimensions = [480, 480] --> +This is an external project that uses Rerun as a core component. + +## External project presentation + [Paper](https://doi.org/10.1145/3638550.3643617) | [BibTeX](#bibtex) | [Project Page](https://cake.wpi.edu/ARFlow/) | [Video](https://youtu.be/mml8YrCgfTk) diff --git a/examples/python/arkit_scenes/README.md b/examples/python/arkit_scenes/README.md index f640d1a2d68d..f205e3f193ba 100644 --- a/examples/python/arkit_scenes/README.md +++ b/examples/python/arkit_scenes/README.md @@ -1,12 +1,13 @@ +This example visualizes the [ARKitScenes dataset](https://github.com/apple/ARKitScenes/) using Rerun. +The dataset contains color images, depth images, the reconstructed mesh, and labeled bounding boxes around furniture. @@ -16,9 +17,6 @@ channel = "main" ARKit Scenes screenshot -This example visualizes the [ARKitScenes dataset](https://github.com/apple/ARKitScenes/) using Rerun. The dataset -contains color images, depth images, the reconstructed mesh, and labeled bounding boxes around furniture. - ## Used Rerun types [`Image`](https://www.rerun.io/docs/reference/types/archetypes/image), [`DepthImage`](https://www.rerun.io/docs/reference/types/archetypes/depth_image), [`Transform3D`](https://www.rerun.io/docs/reference/types/archetypes/transform3d), diff --git a/examples/python/arkit_scenes/main.py b/examples/python/arkit_scenes/main.py index ff0cbbda27a9..914b7869a55e 100755 --- a/examples/python/arkit_scenes/main.py +++ b/examples/python/arkit_scenes/main.py @@ -16,6 +16,15 @@ from scipy.spatial.transform import Rotation as R from tqdm import tqdm +DESCRIPTION = """ +# ARKitScenes +This example visualizes the [ARKitScenes dataset](https://github.com/apple/ARKitScenes/) using Rerun. The dataset +contains color images, depth images, the reconstructed mesh, and labeled bounding boxes around furniture. + +The full source code for this example is available +[on GitHub](https://github.com/rerun-io/rerun/blob/latest/examples/python/arkit_scenes). +""".strip() + Color = Tuple[float, float, float, float] # hack for now since dataset does not provide orientation information, only known after initial visual inspection @@ -30,58 +39,6 @@ assert len(ORIENTATION) == len(AVAILABLE_RECORDINGS) assert set(ORIENTATION.keys()) == set(AVAILABLE_RECORDINGS) -DESCRIPTION = """ -# ARKit Scenes -This example visualizes the [ARKitScenes dataset](https://github.com/apple/ARKitScenes/) using Rerun. The dataset -contains color images, depth images, the reconstructed mesh, and labeled bounding boxes around furniture. - -## How it was made -The full source code for this example is available -[on GitHub](https://github.com/rerun-io/rerun/blob/latest/examples/python/arkit_scenes/main.py). - -### Moving RGB-D camera -To log a moving RGB-D camera we need to log four objects: the pinhole camera (intrinsics), the camera pose -(extrinsics), the color image and the depth image. - -The [rr.Pinhole archetype](https://www.rerun.io/docs/reference/types/archetypes/pinhole) is logged to -[world/camera_lowres](recording://world/camera_lowres) to define the intrinsics of the camera. This -determines how to go from the 3D camera frame to the 2D image plane. The extrinsics are logged as an -[rr.Transform3D archetype](https://www.rerun.io/docs/reference/types/archetypes/transform3d) to the -[same entity world/camera_lowres](recording://world/camera_lowres). Note that we could also log the extrinsics to -`world/camera` and the intrinsics to `world/camera/image` instead. Here, we log both on the same entity path to keep -the paths shorter. - -The RGB image is logged as an -[rr.Image archetype](https://www.rerun.io/docs/reference/types/archetypes/image) to the -[world/camera_lowres/rgb entity](recording://world/camera_lowres/rgb) as a child of the intrinsics + extrinsics -entity described in the previous paragraph. Similarly the depth image is logged as an -[rr.DepthImage archetype](https://www.rerun.io/docs/reference/types/archetypes/depth_image) to -[world/camera_lowres/depth](recording://world/camera_lowres/depth). - -### Ground-truth mesh -The mesh is logged as an [rr.Mesh3D archetype](https://www.rerun.io/docs/reference/types/archetypes/mesh3d). -In this case the mesh is composed of mesh vertices, indices (i.e., which vertices belong to the same face), and vertex -colors. Given a `trimesh.Trimesh` the following call is used to log it to Rerun -```python -rr.log( - "world/mesh", - rr.Mesh3D( - vertex_positions=mesh.vertices, - vertex_colors=mesh.visual.vertex_colors, - indices=mesh.faces, - ), - timeless=True, -) -``` -Here, the mesh is logged to the [world/mesh entity](recording://world/mesh) and is marked as timeless, since it does not -change in the context of this visualization. - -### 3D bounding boxes -The bounding boxes around the furniture is visualized by logging the -[rr.Boxes3D archetype](https://www.rerun.io/docs/reference/types/archetypes/boxes3d). In this example, each -bounding box is logged as a separate entity to the common [world/annotations](recording://world/annotations) parent. -""".strip() - LOWRES_POSED_ENTITY_PATH = "world/camera_lowres" HIGHRES_ENTITY_PATH = "world/camera_highres" @@ -349,6 +306,7 @@ def main() -> None: name="2D", ), rrb.TextDocumentView(name="Readme"), + row_shares=[2, 1], ), ) diff --git a/examples/python/blueprint_stocks/README.md b/examples/python/blueprint_stocks/README.md index e4f296fb2a2e..094026f4572d 100644 --- a/examples/python/blueprint_stocks/README.md +++ b/examples/python/blueprint_stocks/README.md @@ -1,11 +1,13 @@ +This example fetches the last 5 days of stock data for a few different stocks. +We show how Rerun blueprints can then be used to present many different views of the same data. + @@ -14,8 +16,6 @@ thumbnail_dimensions = [480, 270] -This example fetches the last 5 days of stock data for a few different stocks. -We show how Rerun blueprints can then be used to present many different views of the same data. ```bash pip install -r examples/python/blueprint_stocks/requirements.txt diff --git a/examples/python/clock/README.md b/examples/python/clock/README.md index d3a4b5c189d2..de59a6e27af6 100644 --- a/examples/python/clock/README.md +++ b/examples/python/clock/README.md @@ -1,5 +1,6 @@ @@ -15,6 +16,62 @@ thumbnail_dimensions = [480, 480] An example visualizing an analog clock with hour, minute and seconds hands using Rerun Arrow3D primitives. +# Used Rerun types + +[`Boxes3D`](https://www.rerun.io/docs/reference/types/archetypes/boxes3d), [`Points3D`](https://www.rerun.io/docs/reference/types/archetypes/points3d), [`Arrows3D`](https://www.rerun.io/docs/reference/types/archetypes/arrows3d) + +# Logging and visualizing with Rerun + +The visualizations in this example were created with the following Rerun code: + +The clock's frame is logged as a 3D box using [`Boxes3D`](https://www.rerun.io/docs/reference/types/archetypes/boxes3d) archetype. + ```python +rr.log( + "world/frame", + rr.Boxes3D(half_sizes=[LENGTH_S, LENGTH_S, 1.0], centers=[0.0, 0.0, 0.0]), + timeless=True, +) + ``` + +Then, the positions and colors of points and arrows representing the hands of a clock for seconds, minutes, and hours are logged in each simulation time. +It first sets the simulation time using [`timelines`](https://www.rerun.io/docs/concepts/timelines), calculates the data for each hand, and logs it using [`Points3D`](https://www.rerun.io/docs/reference/types/archetypes/points3d) and [`Arrows3D`](https://www.rerun.io/docs/reference/types/archetypes/arrows3d) archetypes. +This enables the visualization of the clock's movement over time. + + ```python +for step in range(steps): + rr.set_time_seconds("sim_time", t_secs) + + # … calculating seconds … + rr.log("world/seconds_pt", rr.Points3D(positions=point_s, colors=color_s)) + rr.log("world/seconds_hand", rr.Arrows3D(vectors=point_s, colors=color_s, radii=WIDTH_S)) + + # … calculating minutes … + rr.log("world/minutes_pt", rr.Points3D(positions=point_m, colors=color_m)) + rr.log("world/minutes_hand", rr.Arrows3D(vectors=point_m, colors=color_m, radii=WIDTH_M)) + + # … calculating hours … + rr.log("world/hours_pt", rr.Points3D(positions=point_h, colors=color_h)) + rr.log("world/hours_hand", rr.Arrows3D(vectors=point_h, colors=color_h, radii=WIDTH_H)) + ``` + +# Run the code +To run this example, make sure you have the Rerun repository checked out and the latest SDK installed: +```bash +# Setup +pip install --upgrade rerun-sdk # install the latest Rerun SDK +git clone git@github.com:rerun-io/rerun.git # Clone the repository +cd rerun +git checkout latest # Check out the commit matching the latest SDK release +``` +Install the necessary libraries specified in the requirements file: +```bash +pip install -r examples/python/clock/requirements.txt +``` +To experiment with the provided example, simply execute the main Python script: +```bash +python examples/python/clock/main.py # run the example +``` +If you wish to customize it, explore additional features, or save it use the CLI with the `--help` option for guidance: ```bash -python examples/python/clock/main.py +python examples/python/clock/main.py --help ``` diff --git a/examples/python/controlnet/README.md b/examples/python/controlnet/README.md index 29710a24ab0c..c4e5d9d3c38c 100644 --- a/examples/python/controlnet/README.md +++ b/examples/python/controlnet/README.md @@ -1,11 +1,12 @@ +Use [Hugging Face's ControlNet](https://huggingface.co/docs/diffusers/using-diffusers/controlnet#controlnet) to generate an image from text, conditioned on detected edges from another image. + @@ -14,9 +15,6 @@ thumbnail_dimensions = [480, 480] -Use [Hugging Face's ControlNet](https://huggingface.co/docs/diffusers/using-diffusers/controlnet#controlnet) to generate an image from text, conditioned on detected edges from another image. - - ## Used Rerun types [`Image`](https://www.rerun.io/docs/reference/types/archetypes/image), [`Tensor`](https://www.rerun.io/docs/reference/types/archetypes/tensor), [`TextDocument`](https://www.rerun.io/docs/reference/types/archetypes/text_document) @@ -27,10 +25,10 @@ Use [Hugging Face's ControlNet](https://huggingface.co/docs/diffusers/using-diff https://vimeo.com/870289439?autoplay=1&loop=1&autopause=0&background=1&muted=1&ratio=1440:1080 -# Logging and visualizing with Rerun +## Logging and visualizing with Rerun The visualizations in this example were created with the following Rerun code. -## Images +### Images ```python rr.log("input/raw", rr.Image(image), timeless=True) rr.log("input/canny", rr.Image(canny_image), timeless=True) @@ -41,14 +39,14 @@ Timeless entities belong to all timelines (existing ones, and ones not yet creat This designation ensures their constant availability across all timelines in Rerun, aiding in consistent comparison and documentation. -## Prompts +### Prompts ```python rr.log("positive_prompt", rr.TextDocument(prompt), timeless=True) rr.log("negative_prompt", rr.TextDocument(negative_prompt), timeless=True) ``` The positive and negative prompt used for generation is logged to Rerun. -## Custom diffusion step callback +### Custom diffusion step callback We use a custom callback function for ControlNet that logs the output and the latent values at each timestep, which makes it possible for us to view all timesteps of the generation in Rerun. ```python def controlnet_callback( @@ -60,13 +58,13 @@ def controlnet_callback( rr.log("latent", rr.Tensor(latents.squeeze(), dim_names=["channel", "height", "width"])) ``` -## Output image +### Output image ```python rr.log("output", rr.Image(images)) ``` Finally we log the output image generated by ControlNet. -# Run the code +## Run the code To run this example, make sure you have the Rerun repository checked out and the latest SDK installed: ```bash diff --git a/examples/python/depth_guided_stable_diffusion/README.md b/examples/python/depth_guided_stable_diffusion/README.md index b3761753f2ca..ac014c2628b1 100644 --- a/examples/python/depth_guided_stable_diffusion/README.md +++ b/examples/python/depth_guided_stable_diffusion/README.md @@ -1,11 +1,12 @@ +Leverage [Depth Guided Stable Diffusion](https://github.com/Stability-AI/stablediffusion?tab=readme-ov-file#depth-conditional-stable-diffusion) to generate images with enhanced depth perception. This method integrates depth maps to guide the Stable Diffusion model, creating more visually compelling and contextually accurate images. + @@ -14,25 +15,23 @@ thumbnail_dimensions = [480, 266] Depth-guided stable diffusion example -Leverage [Depth Guided Stable Diffusion](https://github.com/Stability-AI/stablediffusion?tab=readme-ov-file#depth-conditional-stable-diffusion) to generate images with enhanced depth perception. This method integrates depth maps to guide the Stable Diffusion model, creating more visually compelling and contextually accurate images. - ## Used Rerun types [`Image`](https://www.rerun.io/docs/reference/types/archetypes/image), [`Tensor`](https://www.rerun.io/docs/reference/types/archetypes/tensor), [`DepthImage`](https://www.rerun.io/docs/reference/types/archetypes/depth_image), [`TextDocument`](https://www.rerun.io/docs/reference/types/archetypes/text_document),[`TextLog`](https://www.rerun.io/docs/reference/types/archetypes/text_log)[`BarChart`](https://www.rerun.io/docs/reference/types/archetypes/bar_chart) ## Background Depth Guided Stable Diffusion enriches the image generation process by incorporating depth information, providing a unique way to control the spatial composition of generated images. This approach allows for more nuanced and layered creations, making it especially useful for scenes requiring a sense of three-dimensionality. -# Logging and visualizing with Rerun +## Logging and visualizing with Rerun The visualizations in this example were created with the Rerun SDK, demonstrating the integration of depth information in the Stable Diffusion image generation process. Here is the code for generating the visualization in Rerun. -## Prompt +### Prompt Visualizing the prompt and negative prompt ```python rr.log("prompt/text", rr.TextLog(prompt)) rr.log("prompt/text_negative", rr.TextLog(negative_prompt)) ``` -## Text +### Text Visualizing the text input ids, the text attention mask and the unconditional input ids ```python rr.log("prompt/text_input/ids", rr.BarChart(text_input_ids)) @@ -40,14 +39,14 @@ rr.log("prompt/text_input/attention_mask", rr.BarChart(text_inputs.attention_mas rr.log("prompt/uncond_input/ids", rr.Tensor(uncond_input.input_ids)) ``` -## Text embeddings +### Text embeddings Visualizing the text embeddings. The text embeddings are generated in response to the specific prompts used while the unconditional text embeddings represent a neutral or baseline state without specific input conditions. ```python rr.log("prompt/text_embeddings", rr.Tensor(text_embeddings)) rr.log("prompt/uncond_embeddings", rr.Tensor(uncond_embeddings)) ``` -## Depth map +### Depth map Visualizing the pixel values of the depth estimation, estimated depth image, interpolated depth image and normalized depth image ```python rr.log("depth/input_preprocessed", rr.Tensor(pixel_values)) @@ -56,13 +55,13 @@ rr.log("depth/interpolated", rr.DepthImage(depth_map)) rr.log("depth/normalized", rr.DepthImage(depth_map)) ``` -## Latents +### Latents Log the latents, the representation of the images in the format used by the diffusion model. ```python rr.log("diffusion/latents", rr.Tensor(latents, dim_names=["b", "c", "h", "w"])) ``` -## Denoising loop +### Denoising loop For each step in the denoising loop we set a time sequence with step and timestep and log the latent model input, noise predictions, latents and image. This make is possible for us to see all denoising steps in the Rerun viewer. ```python rr.set_time_sequence("step", i) @@ -73,14 +72,14 @@ rr.log("diffusion/latents", rr.Tensor(latents, dim_names=["b", "c", "h", "w"])) rr.log("image/diffused", rr.Image(image)) ``` -## Diffused image +### Diffused image Finally we log the diffused image generated by the model. ```python rr.log("image/diffused", rr.Image(image_8)) ``` -# Run the code +## Run the code To run this example, make sure you have the Rerun repository checked out and the latest SDK installed: ```bash diff --git a/examples/python/detect_and_track_objects/README.md b/examples/python/detect_and_track_objects/README.md index 664dc1991bb7..26db52356a92 100644 --- a/examples/python/detect_and_track_objects/README.md +++ b/examples/python/detect_and_track_objects/README.md @@ -1,13 +1,12 @@ - +Visualize object detection and segmentation using the [Huggingface's Transformers](https://huggingface.co/docs/transformers/index) and [CSRT](https://arxiv.org/pdf/1611.08461.pdf) from OpenCV. @@ -17,28 +16,26 @@ channel = "release" -Visualize object detection and segmentation using the [Huggingface's Transformers](https://huggingface.co/docs/transformers/index) and [CSRT](https://arxiv.org/pdf/1611.08461.pdf) from OpenCV. - -# Used Rerun Types +## Used Rerun types [`Image`](https://www.rerun.io/docs/reference/types/archetypes/image), [`SegmentationImage`](https://www.rerun.io/docs/reference/types/archetypes/segmentation_image), [`AnnotationContext`](https://www.rerun.io/docs/reference/types/archetypes/annotation_context), [`Boxes2D`](https://www.rerun.io/docs/reference/types/archetypes/boxes2d), [`TextLog`](https://www.rerun.io/docs/reference/types/archetypes/text_log) -# Background +## Background In this example, CSRT (Channel and Spatial Reliability Tracker), a tracking API introduced in OpenCV, is employed for object detection and tracking across frames. Additionally, the example showcases basic object detection and segmentation on a video using the Huggingface transformers library. -# Logging and Visualizing with Rerun +## Logging and visualizing with Rerun The visualizations in this example were created with the following Rerun code. -## Timelines +### Timelines For each processed video frame, all data sent to Rerun is associated with the [`timelines`](https://www.rerun.io/docs/concepts/timelines) `frame_idx`. ```python rr.set_time_sequence("frame", frame_idx) ``` -## Video +### Video The input video is logged as a sequence of [`Image`](https://www.rerun.io/docs/reference/types/archetypes/image) to the `image` entity. ```python @@ -58,7 +55,7 @@ rr.log( ) ``` -## Segmentations +### Segmentations The segmentation results is logged through a combination of two archetypes. The segmentation image itself is logged as an [`SegmentationImage`](https://www.rerun.io/docs/reference/types/archetypes/segmentation_image) and @@ -86,10 +83,10 @@ rr.log( ) ``` -## Detections +### Detections The detections and tracked bounding boxes are visualized by logging the [`Boxes2D`](https://www.rerun.io/docs/reference/types/archetypes/boxes2d) to Rerun. -### Detections +#### Detections ```python rr.log( "segmentation/detections/things", @@ -111,7 +108,7 @@ rr.log( ), ) ``` -### Tracked bounding boxes +#### Tracked bounding boxes ```python rr.log( "segmentation/detections/background", @@ -131,7 +128,7 @@ same color. Note that it is also possible to log multiple annotation contexts should different colors and / or labels be desired. The annotation context is resolved by seeking up the entity hierarchy. -## Text Log +### Text log Rerun integrates with the [Python logging module](https://docs.python.org/3/library/logging.html). Through the [`TextLog`](https://www.rerun.io/docs/reference/types/archetypes/text_log#textlogintegration) text at different importance level can be logged. After an initial setup that is described on the [`TextLog`](https://www.rerun.io/docs/reference/types/archetypes/text_log#textlogintegration), statements @@ -151,7 +148,7 @@ def main() -> None: ``` In the viewer you can adjust the filter level and look at the messages time-synchronized with respect to other logged data. -# Run the Code +## Run the code To run this example, make sure you have the Rerun repository checked out and the latest SDK installed: ```bash # Setup diff --git a/examples/python/detect_and_track_objects/main.py b/examples/python/detect_and_track_objects/main.py index f2d386ffc9d6..404a726155e0 100755 --- a/examples/python/detect_and_track_objects/main.py +++ b/examples/python/detect_and_track_objects/main.py @@ -17,6 +17,18 @@ import rerun as rr # pip install rerun-sdk from PIL import Image +DESCRIPTION = """ +# Detect and track objects + +This is a more elaborate example applying simple object detection and segmentation on a video using the Huggingface +`transformers` library. Tracking across frames is performed using [CSRT](https://arxiv.org/abs/1611.08461) from +OpenCV. The results are visualized using Rerun. + +The full source code for this example is available +[on GitHub](https://github.com/rerun-io/rerun/blob/latest/examples/python/detect_and_track_objects). +""".strip() + + EXAMPLE_DIR: Final = Path(os.path.dirname(__file__)) DATASET_DIR: Final = EXAMPLE_DIR / "dataset" / "tracking_sequences" DATASET_URL_BASE: Final = "https://storage.googleapis.com/rerun-example-datasets/tracking_sequences" @@ -36,55 +48,6 @@ DetrForSegmentation, ) -DESCRIPTION = """ -# Detect and Track Objects - -This is a more elaborate example applying simple object detection and segmentation on a video using the Huggingface -`transformers` library. Tracking across frames is performed using [CSRT](https://arxiv.org/abs/1611.08461) from -OpenCV. The results are visualized using Rerun. - -## How it was made -The full source code for this example is available -[on GitHub](https://github.com/rerun-io/rerun/blob/latest/examples/python/detect_and_track_objects/main.py). - -### Input Video -The input video is logged as a sequence of -[rr.Image objects](https://www.rerun.io/docs/reference/types/archetypes/image) to the -[image entity](recording://image). Since the detection and segmentation model operates on smaller images the -resized images are logged to the separate [segmentation/rgb_scaled entity](recording://segmentation/rgb_scaled). This allows us to -subsequently visualize the segmentation mask on top of the video. - -### Segmentations -The [segmentation result](recording://image_segmentation/segmentation) is logged through a combination of two archetypes. -The segmentation image itself is logged as an -[rr.SegmentationImage archetype](https://www.rerun.io/docs/reference/types/archetypes/segmentation_image) and -contains the id for each pixel. It is logged to the [segmentation entity](recording://segmentation). - -The color and label for each class is determined by the -[rr.AnnotationContext archetype](https://www.rerun.io/docs/reference/types/archetypes/annotation_context) which is -logged to the root entity using `rr.log("/", …, timeless=True` as it should apply to the whole sequence and all -entities that have a class id. - -### Detections -The detections and tracked bounding boxes are visualized by logging the -[rr.Boxes2D archetype](https://www.rerun.io/docs/reference/types/archetypes/boxes2d) to Rerun. - -The color and label of the bounding boxes is determined by their class id, relying on the same -[rr.AnnotationContext archetype](https://www.rerun.io/docs/reference/types/archetypes/annotation_context) as the -segmentation images. This ensures that a bounding box and a segmentation image with the same class id will also have the -same color. - -Note that it is also possible to log multiple annotation contexts should different colors and / or labels be desired. -The annotation context is resolved by seeking up the entity hierarchy. - -### Text Log -Through the [rr.TextLog archetype] text at different importance level can be logged. Rerun integrates with the -[Python logging module](https://docs.python.org/3/library/logging.html). After an initial setup that is described on the -[rr.TextLog page](https://www.rerun.io/docs/reference/types/archetypes/text_log#textlogintegration), statements -such as `logging.info("...")`, `logging.debug("...")`, etc. will show up in the Rerun viewer. In the viewer you can -adjust the filter level and look at the messages time-synchronized with respect to other logged data. -""".strip() - @dataclass class Detection: diff --git a/examples/python/dicom_mri/README.md b/examples/python/dicom_mri/README.md index cf7fc26b35ee..4f4c6b3f56ae 100644 --- a/examples/python/dicom_mri/README.md +++ b/examples/python/dicom_mri/README.md @@ -1,12 +1,12 @@ +Visualize a [DICOM](https://en.wikipedia.org/wiki/DICOM) MRI scan. This demonstrates the flexible tensor slicing capabilities of the Rerun viewer. @@ -16,15 +16,13 @@ channel = "main" -Visualize a [DICOM](https://en.wikipedia.org/wiki/DICOM) MRI scan. This demonstrates the flexible tensor slicing capabilities of the Rerun viewer. - -# Used Rerun Types +## Used Rerun types [`Tensor`](https://www.rerun.io/docs/reference/types/archetypes/tensor), [`TextDocument`](https://www.rerun.io/docs/reference/types/archetypes/text_document) -# Background +## Background Digital Imaging and Communications in Medicine (DICOM) serves as a technical standard for the digital storage and transmission of medical images. In this instance, an MRI scan is visualized using Rerun. -# Logging and Visualizing with Rerun +## Logging and visualizing with Rerun The visualizations in this example were created with just the following line. ```python @@ -39,7 +37,7 @@ give semantic meaning to each axis. After selecting the tensor view, you can adj settings on the right-hand side. For example, you can adjust the color map, the brightness, which dimensions to show as an image and which to select from, and more. -# Run the Code +## Run the code To run this example, make sure you have the Rerun repository checked out and the latest SDK installed: ```bash # Setup diff --git a/examples/python/dicom_mri/main.py b/examples/python/dicom_mri/main.py index a207ebb4ed07..78b0fae77a3f 100755 --- a/examples/python/dicom_mri/main.py +++ b/examples/python/dicom_mri/main.py @@ -23,33 +23,22 @@ import requests import rerun as rr # pip install rerun-sdk -DATASET_DIR: Final = Path(os.path.dirname(__file__)) / "dataset" -DATASET_URL: Final = "https://storage.googleapis.com/rerun-example-datasets/dicom.zip" - DESCRIPTION = """ # Dicom MRI This example visualizes an MRI scan using Rerun. -## How it was made -The full source code for this example is available -[on GitHub](https://github.com/rerun-io/rerun/blob/latest/examples/python/dicom_mri/main.py). - The visualization of the data consists of just the following line ```python rr.log("tensor", rr.Tensor(voxels_volume_u16, dim_names=["right", "back", "up"])) ``` -`voxels_volume_u16` is a `numpy.array` of shape `(512, 512, 512)` containing volumetric MRI intensities. We can -visualize such information in Rerun by logging the `numpy.array` as an -[rr.Tensor archetype](https://www.rerun.io/docs/reference/types/archetypes/tensor). Here the tensor is logged to -the [tensor entity](recording://tensor), however any other name for the entity could have been chosen. - -In the Rerun viewer you can inspect the data in detail. The `dim_names` provided in the above call to `rr.log` help to -give semantic meaning to each axis. After selecting the tensor view, you can adjust various settings in the Blueprint -settings on the right-hand side. For example, you can adjust the color map, the brightness, which dimensions to show as -an image and which to select from, and more. +The full source code for this example is available +[on GitHub](https://github.com/rerun-io/rerun/blob/latest/examples/python/dicom_mri). """ +DATASET_DIR: Final = Path(os.path.dirname(__file__)) / "dataset" +DATASET_URL: Final = "https://storage.googleapis.com/rerun-example-datasets/dicom.zip" + def extract_voxel_data( dicom_files: Iterable[Path], diff --git a/examples/python/differentiable_blocks_world/README.md b/examples/python/differentiable_blocks_world/README.md index bca0aafbbb51..2b6f4b2472af 100644 --- a/examples/python/differentiable_blocks_world/README.md +++ b/examples/python/differentiable_blocks_world/README.md @@ -6,6 +6,10 @@ thumbnail = "https://static.rerun.io/differentiable-blocks/42f3a5481162a0e75f1c5 thumbnail_dimensions = [480, 480] --> +This example is a visual walkthrough of the paper "Differentiable Block Worlds". +All the visualizations were created by editing the original source code to log data with the Rerun SDK. + +## Visual paper walkthrough Finding a textured mesh decomposition from a collection of posed images is a very challenging optimization problem. "Differentiable Block Worlds" by Tom Monnier et al. shows impressive results using differentiable rendering. Here we visualize how this optimization works using the Rerun SDK. diff --git a/examples/python/dna/README.md b/examples/python/dna/README.md index 4d0b0ead556a..6ef9d77836ad 100644 --- a/examples/python/dna/README.md +++ b/examples/python/dna/README.md @@ -1,12 +1,13 @@ +Simple example of logging point and line primitives to draw a 3D helix. + @@ -15,8 +16,6 @@ channel = "main" -Simple example of logging point and line primitives to draw a 3D helix. - ```bash python examples/python/dna/main.py ``` diff --git a/examples/python/dna/main.py b/examples/python/dna/main.py index e0168a453b4d..6bfb72a6ebb4 100755 --- a/examples/python/dna/main.py +++ b/examples/python/dna/main.py @@ -18,31 +18,8 @@ This is a minimal example that logs synthetic 3D data in the shape of a double helix. The underlying data is generated using numpy and visualized using Rerun. -## How it was made The full source code for this example is available -[on GitHub](https://github.com/rerun-io/rerun/blob/latest/examples/python/dna/main.py). - -### Colored 3D points -The colored 3D points were added to the scene by logging the -[rr.Points3D archetype](https://www.rerun.io/docs/reference/types/archetypes/points3d) to the -[helix/structure/left](recording://helix/structure/left) and [helix/structure/right](recording://helix/structure/right) -entities. - -### 3D line strips -The 3D line strips connecting the 3D point pairs are logged as an -[rr.LineStrips3D archetype](https://www.rerun.io/docs/reference/types/archetypes/line_strips3d) to the -[helix/structure/scaffolding entity](recording://helix/structure/scaffolding). - -### Rotation -The whole structure is rotated over time by logging a -[rr.Transform3D archetype](https://www.rerun.io/docs/reference/types/archetypes/transform3d) to the -[helix/structure entity](recording://helix/structure:Transform3D) that changes over time. This transform determines the rotation of -the [structure entity](recording://helix/structure) relative to the [helix](recording://helix) entity. Since all other -entities are children of [helix/structure](recording://helix/structure) they will also rotate based on this transform. - -You can visualize this rotation by selecting the two entities on the left-hand side and activating `Show transform` in -the Blueprint settings on the right-hand side. You will see one static frame (i.e., the frame of -[helix](recording://helix)) and the rotating frame (i.e., the frame of [structure](recording://helix/structure)). +[on GitHub](https://github.com/rerun-io/rerun/blob/latest/examples/python/dna). """.strip() diff --git a/examples/python/face_tracking/README.md b/examples/python/face_tracking/README.md index 33549e5e3b0f..ead37fd1dd5d 100644 --- a/examples/python/face_tracking/README.md +++ b/examples/python/face_tracking/README.md @@ -1,11 +1,12 @@ +Use the [MediaPipe](https://google.github.io/mediapipe/) Face Detector and Landmarker solutions to detect and track a human face in image, video, and camera stream. + @@ -15,13 +16,10 @@ thumbnail_dimensions = [480, 480] screenshot of the Rerun visualization of the MediaPipe Face Detector and Landmarker - -Use the [MediaPipe](https://google.github.io/mediapipe/) Face Detector and Landmarker solutions to detect and track a human face in image, video, and camera stream. - -# Used Rerun Types +## Used Rerun types [`Image`](https://www.rerun.io/docs/reference/types/archetypes/image), [`Points2D`](https://www.rerun.io/docs/reference/types/archetypes/points2d), [`Points3D`](https://www.rerun.io/docs/reference/types/archetypes/points3d), [`Boxes2D`](https://www.rerun.io/docs/reference/types/archetypes/boxes2d), [`AnnotationContext`](https://www.rerun.io/docs/reference/types/archetypes/annotation_context), [`Scalar`](https://www.rerun.io/docs/reference/types/archetypes/scalar) -# Background +## Background The face and face landmark detection technology aims to give the ability of the devices to interpret face movements and facial expressions as commands or inputs. At the core of this technology, a pre-trained machine-learning model analyses the visual input, locates face and identifies face landmarks and blendshape scores (coefficients representing facial expression). Human-Computer Interaction, Robotics, Gaming, and Augmented Reality are among the fields where this technology shows significant promise for applications. @@ -29,10 +27,10 @@ Human-Computer Interaction, Robotics, Gaming, and Augmented Reality are among th In this example, the [MediaPipe](https://developers.google.com/mediapipe/) Face and Face Landmark Detection solutions were utilized to detect human face, detect face landmarks and identify facial expressions. Rerun was employed to visualize the output of the Mediapipe solution over time to make it easy to analyze the behavior. -# Logging and Visualizing with Rerun +## Logging and visualizing with Rerun The visualizations in this example were created with the following Rerun code. -## Timelines +### Timelines For each processed video frame, all data sent to Rerun is associated with the two [`timelines`](https://www.rerun.io/docs/concepts/timelines) `time` and `frame_idx`. @@ -41,7 +39,7 @@ rr.set_time_seconds("time", bgr_frame.time) rr.set_time_sequence("frame_idx", bgr_frame.idx) ``` -## Video +### Video The input video is logged as a sequence of [`Image`](https://www.rerun.io/docs/reference/types/archetypes/image) objects to the 'Video' entity. ```python rr.log( @@ -50,7 +48,7 @@ rr.log( ) ``` -## Face Landmark Points +### Face landmark points Logging the face landmarks involves specifying connections between the points, extracting face landmark points and logging them to the Rerun SDK. The 2D points are visualized over the video/image for a better understanding and visualization of the face. The 3D points allows the creation of a 3D model of the face reconstruction for a more comprehensive representation of the face. @@ -60,7 +58,7 @@ The 2D and 3D points are logged through a combination of two archetypes. First, the keypoints. Defining these connections automatically renders lines between them. Second, the actual keypoint positions are logged in 2D and 3D as [`Points2D`](https://www.rerun.io/docs/reference/types/archetypes/points2d) and [`Points3D`](https://www.rerun.io/docs/reference/types/archetypes/points3d) archetypes, respectively. -### Label Mapping and Keypoint Connections +#### Label mapping and keypoint connections An annotation context is logged with one class ID assigned per facial feature. The class description includes the connections between corresponding keypoints extracted from the MediaPipe face mesh solution. A class ID array is generated to match the class IDs in the annotation context with keypoint indices (to be utilized as the class_ids argument to rr.log). @@ -119,7 +117,7 @@ rr.log( timeless=True, ) ``` -### Bounding Box +#### Bounding box ```python rr.log( @@ -132,7 +130,7 @@ rr.log( ``` -### 2D Points +#### 2D points ```python rr.log( @@ -148,7 +146,7 @@ rr.log( ) ``` -### 3D Points +#### 3D points ```python rr.log( @@ -161,7 +159,7 @@ rr.log( ) ``` -## Scalar +### Scalar Blendshapes are essentially predefined facial expressions or configurations that can be detected by the face landmark detection model. Each blendshape typically corresponds to a specific facial movement or expression, such as blinking, squinting, smiling, etc. The blendshapes are logged along with their corresponding scores. @@ -171,7 +169,7 @@ for blendshape in blendshapes: rr.log(f"blendshapes/{i}/{blendshape.category_name}", rr.Scalar(blendshape.score)) ``` -# Run the Code +## Run the code To run this example, make sure you have the Rerun repository checked out and the latest SDK installed: ```bash # Setup diff --git a/examples/python/face_tracking/requirements.txt b/examples/python/face_tracking/requirements.txt index 03268960a288..c018d69ff2e2 100644 --- a/examples/python/face_tracking/requirements.txt +++ b/examples/python/face_tracking/requirements.txt @@ -1,4 +1,7 @@ -mediapipe>=0.10.1 ; python_version <= '3.11' # no 3.12 version yet (https://pypi.org/project/mediapipe/) +# no 3.12 version yet (https://pypi.org/project/mediapipe/) +# 0.10.10 no longer supports the legacy Pose model: https://github.com/rerun-io/rerun/issues/5859 +mediapipe==0.10.9 ; python_version <= '3.11' + numpy opencv-python>4.6 # Avoid opencv-4.6 since it rotates images incorrectly (https://github.com/opencv/opencv/issues/22088) requests diff --git a/examples/python/gesture_detection/README.md b/examples/python/gesture_detection/README.md index d8d6e72d08aa..e3e855e0ac0a 100644 --- a/examples/python/gesture_detection/README.md +++ b/examples/python/gesture_detection/README.md @@ -1,11 +1,12 @@ +Use the [MediaPipe](https://google.github.io/mediapipe/) Hand Landmark and Gesture Detection solutions to +track hands and recognize gestures in images, video, and camera stream. @@ -15,13 +16,10 @@ thumbnail_dimensions = [480, 480] -Use the [MediaPipe](https://google.github.io/mediapipe/) Hand Landmark and Gesture Detection solutions to -track hands and recognize gestures in images, video, and camera stream. - -# Used rerun types +## Used rerun types [`Image`](https://www.rerun.io/docs/reference/types/archetypes/image), [`Points2D`](https://www.rerun.io/docs/reference/types/archetypes/points2d), [`Points3D`](https://www.rerun.io/docs/reference/types/archetypes/points3d), [`LineStrips2D`](https://www.rerun.io/docs/reference/types/archetypes/line_strips2d), [`ClassDescription`](https://www.rerun.io/docs/reference/types/datatypes/class_description), [`AnnotationContext`](https://www.rerun.io/docs/reference/types/archetypes/annotation_context), [`TextDocument`](https://www.rerun.io/docs/reference/types/archetypes/text_document) -# Background +## Background The hand tracking and gesture recognition technology aims to give the ability of the devices to interpret hand movements and gestures as commands or inputs. At the core of this technology, a pre-trained machine-learning model analyses the visual input and identifies hand landmarks and hand gestures. The real applications of such technology vary, as hand movements and gestures can be used to control smart devices. @@ -30,10 +28,10 @@ Human-Computer Interaction, Robotics, Gaming, and Augmented Reality are a few of In this example, the [MediaPipe](https://developers.google.com/mediapipe/) Gesture and Hand Landmark Detection solutions were utilized to detect and track hand landmarks and recognize gestures. Rerun was employed to visualize the output of the Mediapipe solution over time to make it easy to analyze the behavior. -# Logging and visualizing with Rerun +## Logging and visualizing with Rerun The visualizations in this example were created with the following Rerun code. -## Timelines +### Timelines For each processed video frame, all data sent to Rerun is associated with the two [`timelines`](https://www.rerun.io/docs/concepts/timelines) `time` and `frame_idx`. @@ -42,7 +40,7 @@ rr.set_time_sequence("frame_nr", frame_idx) rr.set_time_nanos("frame_time", frame_time_nano) ``` -## Video +### Video The input video is logged as a sequence of [`Image`](https://www.rerun.io/docs/reference/types/archetypes/image) objects to the `Media/Video` entity. ```python rr.log( @@ -51,7 +49,7 @@ rr.log( ) ``` -## Hand landmark points +### Hand landmark points Logging the hand landmarks involves specifying connections between the points, extracting pose landmark points and logging them to the Rerun SDK. The 2D points are visualized over the video and at a separate entity. Meanwhile, the 3D points allows the creation of a 3D model of the hand for a more comprehensive representation of the hand landmarks. @@ -62,7 +60,7 @@ As for the 3D points, the logging process involves two steps. First, a timeless the keypoints. Defining these connections automatically renders lines between them. Mediapipe provides the `HAND_CONNECTIONS` variable which contains the list of `(from, to)` landmark indices that define the connections. Second, the actual keypoint positions are logged in 3D [`Points3D`](https://www.rerun.io/docs/reference/types/archetypes/points3d) archetype. -### Label Mapping and Keypoint Connections +#### Label mapping and keypoint connections ```python rr.log( @@ -79,7 +77,7 @@ rr.log( rr.log("Hand3D", rr.ViewCoordinates.LEFT_HAND_Y_DOWN, timeless=True) ``` -### 2D Points +#### 2D points ```python # Log points to the image and Hand Entity @@ -97,7 +95,7 @@ for log_key in ["Media/Connections", "Hand/Connections"]: ) ``` -### 3D points +#### 3D points ```python rr.log( @@ -111,7 +109,7 @@ rr.log( ) ``` -## Detection +### Detection To showcase gesture recognition, an image of the corresponding gesture emoji is displayed within a `TextDocument` under the `Detection` entity. @@ -123,7 +121,7 @@ rr.log( ) ``` -# Run the Code +## Run the code To run this example, make sure you have the Rerun repository checked out and the latest SDK installed: ```bash # Setup diff --git a/examples/python/gesture_detection/requirements.txt b/examples/python/gesture_detection/requirements.txt index 23d673c2f837..aecc4bce71e3 100644 --- a/examples/python/gesture_detection/requirements.txt +++ b/examples/python/gesture_detection/requirements.txt @@ -1,4 +1,7 @@ -mediapipe==0.10.9 ; python_version <= '3.11' # no 3.12 version yet (https://pypi.org/project/mediapipe/) # For mac also you may need to run this: export SYSTEM_VERSION_COMPAT=0 +# no 3.12 version yet (https://pypi.org/project/mediapipe/) +# 0.10.10 no longer supports the legacy Pose model: https://github.com/rerun-io/rerun/issues/5859 +mediapipe==0.10.9 ; python_version <= '3.11' + numpy opencv-python>4.9 requests>=2.31,<3 diff --git a/examples/python/human_pose_tracking/README.md b/examples/python/human_pose_tracking/README.md index 04322df3c82b..ef8215607aec 100644 --- a/examples/python/human_pose_tracking/README.md +++ b/examples/python/human_pose_tracking/README.md @@ -1,12 +1,13 @@ +Use the [MediaPipe Pose Landmark Detection](https://developers.google.com/mediapipe/solutions/vision/pose_landmarker) solution to detect and track a human pose in video. + @@ -15,14 +16,10 @@ channel = "main" -Use the [MediaPipe Pose Landmark Detection](https://developers.google.com/mediapipe/solutions/vision/pose_landmarker) solution to detect and track a human pose in video. - - - -# Used Rerun types +## Used Rerun types [`Image`](https://www.rerun.io/docs/reference/types/archetypes/image), [`Points2D`](https://www.rerun.io/docs/reference/types/archetypes/points2d), [`Points3D`](https://www.rerun.io/docs/reference/types/archetypes/points3d), [`ClassDescription`](https://www.rerun.io/docs/reference/types/datatypes/class_description), [`AnnotationContext`](https://www.rerun.io/docs/reference/types/archetypes/annotation_context), [`SegmentationImage`](https://www.rerun.io/docs/reference/types/archetypes/segmentation_image) -# Background +## Background Human pose tracking is a task in computer vision that focuses on identifying key body locations, analyzing posture, and categorizing movements. At the heart of this technology is a pre-trained machine-learning model to assess the visual input and recognize landmarks on the body in both image coordinates and 3D world coordinates. The use cases and applications of this technology include but are not limited to Human-Computer Interaction, Sports Analysis, Gaming, Virtual Reality, Augmented Reality, Health, etc. @@ -31,10 +28,10 @@ In this example, the [MediaPipe Pose Landmark Detection](https://developers.goog Rerun was employed to visualize the output of the Mediapipe solution over time to make it easy to analyze the behavior. -# Logging and visualizing with Rerun +## Logging and visualizing with Rerun The visualizations in this example were created with the following Rerun code. -## Timelines +### Timelines For each processed video frame, all data sent to Rerun is associated with the two [`timelines`](https://www.rerun.io/docs/concepts/timelines) `time` and `frame_idx`. @@ -43,7 +40,7 @@ rr.set_time_seconds("time", bgr_frame.time) rr.set_time_sequence("frame_idx", bgr_frame.idx) ``` -## Video +### Video The input video is logged as a sequence of [`Image`](https://www.rerun.io/docs/reference/types/archetypes/image) objects to the 'Video' entity. ```python @@ -53,7 +50,7 @@ rr.log( ) ``` -## Segmentation mask +### Segmentation mask The segmentation result is logged through a combination of two archetypes. The segmentation image itself is logged as an @@ -62,22 +59,22 @@ contains the id for each pixel. The color is determined by the [`AnnotationContext`](https://www.rerun.io/docs/reference/types/archetypes/annotation_context) which is logged with `timeless=True` as it should apply to the whole sequence. -### Label mapping +#### Label mapping ```python rr.log( - "video/mask", - rr.AnnotationContext( - [ - rr.AnnotationInfo(id=0, label="Background"), - rr.AnnotationInfo(id=1, label="Person", color=(0, 0, 0)), - ] - ), - timeless=True, - ) + "video/mask", + rr.AnnotationContext( + [ + rr.AnnotationInfo(id=0, label="Background"), + rr.AnnotationInfo(id=1, label="Person", color=(0, 0, 0)), + ] + ), + timeless=True, +) ``` -### Segmentation image +#### Segmentation image ```python rr.log( @@ -86,7 +83,7 @@ rr.log( ) ``` -## Body pose points +### Body pose points Logging the body pose landmarks involves specifying connections between the points, extracting pose landmark points and logging them to the Rerun SDK. The 2D points are visualized over the image/video for a better understanding and visualization of the body pose. The 3D points allows the creation of a 3D model of the body posture for a more comprehensive representation of the human pose. @@ -99,7 +96,7 @@ Defining these connections automatically renders lines between them. Mediapipe p and 3D as [`Points2D`](https://www.rerun.io/docs/reference/types/archetypes/points2d) and [`Points3D`](https://www.rerun.io/docs/reference/types/archetypes/points3d) archetypes, respectively. -### Label mapping and keypoint connections +#### Label mapping and keypoint connections ```python rr.log( @@ -115,7 +112,7 @@ rr.log( ) ``` -### 2D points +#### 2D points ```python rr.log( @@ -124,7 +121,7 @@ rr.log( ) ``` -### 3D points +#### 3D points ```python rr.log( @@ -133,7 +130,7 @@ rr.log( ) ``` -# Run the code +## Run the code To run this example, make sure you have the Rerun repository checked out and the latest SDK installed: ```bash diff --git a/examples/python/human_pose_tracking/main.py b/examples/python/human_pose_tracking/main.py index e10089c0131e..163b62f5bc5b 100755 --- a/examples/python/human_pose_tracking/main.py +++ b/examples/python/human_pose_tracking/main.py @@ -18,47 +18,19 @@ import rerun as rr # pip install rerun-sdk import rerun.blueprint as rrb -EXAMPLE_DIR: Final = Path(os.path.dirname(__file__)) -DATASET_DIR: Final = EXAMPLE_DIR / "dataset" / "pose_movement" -DATASET_URL_BASE: Final = "https://storage.googleapis.com/rerun-example-datasets/pose_movement" - DESCRIPTION = """ -# Human Pose Tracking +# Human pose tracking This example uses Rerun to visualize the output of [MediaPipe](https://developers.google.com/mediapipe)-based tracking of a human pose in 2D and 3D. -## How it was made The full source code for this example is available -[on GitHub](https://github.com/rerun-io/rerun/blob/latest/examples/python/human_pose_tracking/main.py). - -### Input Video -The input video is logged as a sequence of -[rr.Image objects](https://www.rerun.io/docs/reference/types/archetypes/image) to the [video entity](recording://video). - -### Segmentation -The [segmentation result](recording://video/mask) is logged through a combination of two archetypes. The segmentation -image itself is logged as an -[rr.SegmentationImage archetype](https://www.rerun.io/docs/reference/types/archetypes/segmentation_image) and -contains the id for each pixel. The color is determined by the -[rr.AnnotationContext archetype](https://www.rerun.io/docs/reference/types/archetypes/annotation_context) which is -logged with `rr.log(…, timeless=True` as it should apply to the whole sequence. - -### Skeletons -The [2D](recording://video/pose/points) and [3D skeletons](recording://person/pose/points) are also logged through a -similar combination of two entities. - -First, a timeless -[rr.ClassDescription](https://www.rerun.io/docs/reference/types/datatypes/class_description) is logged (note, that -this is equivalent to logging an -[rr.AnnotationContext archetype](https://www.rerun.io/docs/reference/types/archetypes/annotation_context) as in the -segmentation case). The class description contains the information which maps keypoint ids to labels and how to connect -the keypoints to a skeleton. - -Second, the actual keypoint positions are logged in 2D -nd 3D as [rr.Points2D](https://www.rerun.io/docs/reference/types/archetypes/points2d) and -[rr.Points3D](https://www.rerun.io/docs/reference/types/archetypes/points3d) archetypes, respectively. +[on GitHub](https://github.com/rerun-io/rerun/blob/latest/examples/python/human_pose_tracking). """.strip() +EXAMPLE_DIR: Final = Path(os.path.dirname(__file__)) +DATASET_DIR: Final = EXAMPLE_DIR / "dataset" / "pose_movement" +DATASET_URL_BASE: Final = "https://storage.googleapis.com/rerun-example-datasets/pose_movement" + def track_pose(video_path: str, *, segment: bool, max_frame_count: int | None) -> None: mp_pose = mp.solutions.pose diff --git a/examples/python/human_pose_tracking/requirements.txt b/examples/python/human_pose_tracking/requirements.txt index b5a9a535e2eb..5bec67ecf1ad 100644 --- a/examples/python/human_pose_tracking/requirements.txt +++ b/examples/python/human_pose_tracking/requirements.txt @@ -1,4 +1,7 @@ -mediapipe>=0.10.9 ; python_version <= '3.11' # no 3.12 version yet (https://pypi.org/project/mediapipe/) +# no 3.12 version yet (https://pypi.org/project/mediapipe/) +# 0.10.10 no longer supports the legacy Pose model: https://github.com/rerun-io/rerun/issues/5859 +mediapipe==0.10.9 ; python_version <= '3.11' + numpy opencv-python>4.6 # Avoid opencv-4.6 since it rotates images incorrectly (https://github.com/opencv/opencv/issues/22088) requests>=2.31,<3 diff --git a/examples/python/incremental_logging/README.md b/examples/python/incremental_logging/README.md index c0c5d61f74e1..cb4333b32773 100644 --- a/examples/python/incremental_logging/README.md +++ b/examples/python/incremental_logging/README.md @@ -1,11 +1,11 @@ +Showcases how to incrementally log data belonging to the same archetype, and re-use some or all of it across frames. @@ -15,9 +15,6 @@ thumbnail_dimensions = [480, 301] -Showcases how to incrementally log data belonging to the same archetype, and re-use some or all of it across frames. - - To build it from a checkout of the repository (requires a Rust toolchain): ```bash python examples/python/incremental_logging/main.py diff --git a/examples/python/kiss-icp/README.md b/examples/python/kiss-icp/README.md index 9096b859cb5f..a5f345da8bcf 100644 --- a/examples/python/kiss-icp/README.md +++ b/examples/python/kiss-icp/README.md @@ -1,13 +1,14 @@ -Estimating the odometry is a common problem in robotics and in the [2023, "KISS-ICP: In Defense of Point-to-Point ICP -- Simple, Accurate, and Robust Registration If Done the Right Way" Ignacio Vizzo et al.](https://arxiv.org/abs/2209.15397) they show how one can use an ICP (iterative closest point) algorithm to robustly and accurately estimate poses from LiDAR data. We will demonstrate the KISS-ICP pipeline on the [NCLT dataset](http://robots.engin.umich.edu/nclt/) along with some brief explanations, for a more detailed explanation you should look at the [original paper](https://arxiv.org/abs/2209.15397). +Visualizes the KISS-ICP LiDAR odometry pipeline on the [NCLT dataset](http://robots.engin.umich.edu/nclt/). + +Estimating the odometry is a common problem in robotics and in the [2023, "KISS-ICP: In Defense of Point-to-Point ICP -- Simple, Accurate, and Robust Registration If Done the Right Way" Ignacio Vizzo et al.](https://arxiv.org/abs/2209.15397) they show how one can use an ICP (iterative closest point) algorithm to robustly and accurately estimate poses from LiDAR data. We will demonstrate the KISS-ICP pipeline on the NCLT dataset along with some brief explanations, for a more detailed explanation you should look at the [original paper](https://arxiv.org/abs/2209.15397). diff --git a/examples/python/lidar/README.md b/examples/python/lidar/README.md index 390da060f579..283fcc34a83f 100644 --- a/examples/python/lidar/README.md +++ b/examples/python/lidar/README.md @@ -1,11 +1,11 @@ +Visualize the LiDAR data from the [nuScenes dataset](https://www.nuscenes.org/). @@ -15,19 +15,17 @@ thumbnail_dimensions = [480, 480] -Visualize the LiDAR data from the [nuScenes dataset](https://www.nuscenes.org/). - -# Used Rerun Types +## Used Rerun types [`Points3D`](https://www.rerun.io/docs/reference/types/archetypes/points3d) -# Background +## Background This example demonstrates the ability to read and visualize LiDAR data from the nuScenes dataset, which is a public large-scale dataset specifically designed for autonomous driving. The scenes in this dataset encompass data collected from a comprehensive suite of sensors on autonomous vehicles, including 6 cameras, 1 LIDAR, 5 RADAR, GPS and IMU sensors. It's important to note that in this example, only the LiDAR data is visualized. For a more extensive example including other sensors and annotations check out the [nuScenes example](https://www.rerun.io/examples/real-data/nuscenes). -# Logging and Visualizing with Rerun +## Logging and visualizing with Rerun The visualization in this example was created with just the following lines. @@ -40,7 +38,7 @@ rr.log("world/lidar", rr.Points3D(points, colors=point_colors)) # Log the 3D dat When logging data to Rerun, it's possible to associate it with specific time by using the Rerun's [`timelines`](https://www.rerun.io/docs/concepts/timelines). In the following code, we first establish the desired time frame and then proceed to log the 3D data points. -# Run the Code +## Run the code To run this example, make sure you have the Rerun repository checked out and the latest SDK installed: ```bash # Setup diff --git a/examples/python/limap/README.md b/examples/python/limap/README.md index 795bd4b73253..ee8918b40a6f 100644 --- a/examples/python/limap/README.md +++ b/examples/python/limap/README.md @@ -6,6 +6,10 @@ thumbnail = "https://static.rerun.io/3d-line-mapping-revisited/be0a3b8ac08360368 thumbnail_dimensions = [480, 480] --> +This example is a visual walkthrough of the paper "3D Line Mapping Revisited". +All the visualizations were created by editing the original source code to log data with the Rerun SDK. + +## Visual paper walkthrough Human-made environments contain a lot of straight lines, which are currently not exploited by most mapping approaches. With their recent work "3D Line Mapping Revisited" Shaohui Liu et al. take steps towards changing that. diff --git a/examples/python/live_camera_edge_detection/README.md b/examples/python/live_camera_edge_detection/README.md index b197b9c6adca..dff535f818d8 100644 --- a/examples/python/live_camera_edge_detection/README.md +++ b/examples/python/live_camera_edge_detection/README.md @@ -1,11 +1,11 @@ +Visualize the [OpenCV Canny Edge Detection](https://docs.opencv.org/4.x/da/d22/tutorial_py_canny.html) results from a live camera stream. @@ -15,20 +15,18 @@ thumbnail_dimensions = [480, 480] Live Camera Edge Detection example screenshot -Visualize the [OpenCV Canny Edge Detection](https://docs.opencv.org/4.x/da/d22/tutorial_py_canny.html) results from a live camera stream. - -# Used Rerun Types +## Used Rerun types [`Image`](https://www.rerun.io/docs/reference/types/archetypes/image) -# Background +## Background In this example, the results of the [OpenCV Canny Edge Detection](https://docs.opencv.org/4.x/da/d22/tutorial_py_canny.html) algorithm are visualized. Canny Edge Detection is a popular edge detection algorithm, and can efficiently extract important structural information from visual objects while notably reducing the computational load. The process in this example involves converting the input image to RGB, then to grayscale, and finally applying the Canny Edge Detector for precise edge detection. -# Logging and Visualizing with Rerun +## Logging and visualizing with Rerun The visualization in this example were created with the following Rerun code: -## RGB Image +### RGB image The original image is read and logged in RGB format under the entity "image/rgb". ```python @@ -37,7 +35,7 @@ rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) rr.log("image/rgb", rr.Image(rgb)) ``` -## Grayscale Image +### Grayscale image The input image is converted from BGR color space to grayscale, and the resulting grayscale image is logged under the entity "image/gray". ```python @@ -46,7 +44,7 @@ gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) rr.log("image/gray", rr.Image(gray)) ``` -## Canny Edge Detection Image +### Canny edge detection image The Canny edge detector is applied to the grayscale image, and the resulting edge-detected image is logged under the entity "image/canny". ```python @@ -56,7 +54,7 @@ rr.log("image/canny", rr.Image(canny)) ``` -# Run the Code +## Run the code To run this example, make sure you have the Rerun repository checked out and the latest SDK installed: ```bash # Setup diff --git a/examples/python/live_depth_sensor/README.md b/examples/python/live_depth_sensor/README.md index f5d0acb76272..9d564d34c501 100644 --- a/examples/python/live_depth_sensor/README.md +++ b/examples/python/live_depth_sensor/README.md @@ -1,11 +1,11 @@ +Visualize the live-streaming frames from an Intel RealSense depth sensor. @@ -15,17 +15,15 @@ thumbnail_dimensions = [480, 360] Live Depth Sensor example screenshot -Visualize the live-streaming frames from an Intel RealSense depth sensor. - This example requires a connected realsense depth sensor. -# Used Rerun Types +## Used Rerun types [`Pinhole`](https://www.rerun.io/docs/reference/types/archetypes/pinhole), [`Transform3D`](https://www.rerun.io/docs/reference/types/archetypes/transform3d), [`Image`](https://www.rerun.io/docs/reference/types/archetypes/image), [`DepthImage`](https://www.rerun.io/docs/reference/types/archetypes/depth_image) -# Background +## Background The Intel RealSense depth sensor can stream live depth and color data. To visualize this data output, we utilized Rerun. -# Logging and Visualizing with Rerun +## Logging and visualizing with Rerun The RealSense sensor captures data in both RGB and depth formats, which are logged using the [`Image`](https://www.rerun.io/docs/reference/types/archetypes/image) and [`DepthImage`](https://www.rerun.io/docs/reference/types/archetypes/depth_image) archetypes, respectively. Additionally, to provide a 3D view, the visualization includes a pinhole camera using the [`Pinhole`](https://www.rerun.io/docs/reference/types/archetypes/pinhole) and [`Transform3D`](https://www.rerun.io/docs/reference/types/archetypes/transform3d) archetypes. @@ -38,7 +36,7 @@ rr.log("realsense", rr.ViewCoordinates.RDF, timeless=True) # Visualize the data -## Image +### Image First, the pinhole camera is set using the [`Pinhole`](https://www.rerun.io/docs/reference/types/archetypes/pinhole) and [`Transform3D`](https://www.rerun.io/docs/reference/types/archetypes/transform3d) archetypes. Then, the images captured by the RealSense sensor are logged as an [`Image`](https://www.rerun.io/docs/reference/types/archetypes/image) object, and they're associated with the time they were taken. @@ -46,26 +44,26 @@ First, the pinhole camera is set using the [`Pinhole`](https://www.rerun.io/docs ```python rgb_from_depth = depth_profile.get_extrinsics_to(rgb_profile) - rr.log( - "realsense/rgb", - rr.Transform3D( - translation=rgb_from_depth.translation, - mat3x3=np.reshape(rgb_from_depth.rotation, (3, 3)), - from_parent=True, - ), - timeless=True, +rr.log( + "realsense/rgb", + rr.Transform3D( + translation=rgb_from_depth.translation, + mat3x3=np.reshape(rgb_from_depth.rotation, (3, 3)), + from_parent=True, + ), + timeless=True, ) ``` ```python rr.log( - "realsense/rgb/image", - rr.Pinhole( - resolution=[rgb_intr.width, rgb_intr.height], - focal_length=[rgb_intr.fx, rgb_intr.fy], - principal_point=[rgb_intr.ppx, rgb_intr.ppy], - ), - timeless=True, + "realsense/rgb/image", + rr.Pinhole( + resolution=[rgb_intr.width, rgb_intr.height], + focal_length=[rgb_intr.fx, rgb_intr.fy], + principal_point=[rgb_intr.ppx, rgb_intr.ppy], + ), + timeless=True, ) ``` ```python @@ -73,19 +71,19 @@ rr.set_time_sequence("frame_nr", frame_nr) rr.log("realsense/rgb/image", rr.Image(color_image)) ``` -## Depth Image +### Depth image Just like the RGB images, the RealSense sensor also captures depth data. The depth images are logged as [`DepthImage`](https://www.rerun.io/docs/reference/types/archetypes/depth_image) objects and are linked with the time they were captured. ```python rr.log( - "realsense/depth/image", - rr.Pinhole( - resolution=[depth_intr.width, depth_intr.height], - focal_length=[depth_intr.fx, depth_intr.fy], - principal_point=[depth_intr.ppx, depth_intr.ppy], - ), - timeless=True, + "realsense/depth/image", + rr.Pinhole( + resolution=[depth_intr.width, depth_intr.height], + focal_length=[depth_intr.fx, depth_intr.fy], + principal_point=[depth_intr.ppx, depth_intr.ppy], + ), + timeless=True, ) ``` ```python @@ -93,11 +91,7 @@ rr.set_time_sequence("frame_nr", frame_nr) rr.log("realsense/depth/image", rr.DepthImage(depth_image, meter=1.0 / depth_units)) ``` - - - - -# Run the Code +## Run the code To run this example, make sure you have the Rerun repository checked out and the latest SDK installed: ```bash # Setup diff --git a/examples/python/llm_embedding_ner/README.md b/examples/python/llm_embedding_ner/README.md index d36dcf8d7ab5..7451237c2969 100644 --- a/examples/python/llm_embedding_ner/README.md +++ b/examples/python/llm_embedding_ner/README.md @@ -1,10 +1,12 @@ + +Visualize the [BERT-based named entity recognition (NER)](https://huggingface.co/dslim/bert-base-NER) with UMAP Embeddings. + @@ -13,39 +15,36 @@ thumbnail_dimensions = [480, 480] -Visualize the [BERT-based named entity recognition (NER)](https://huggingface.co/dslim/bert-base-NER) with UMAP Embeddings. - -# Used Rerun types +## Used Rerun types [`TextDocument`](https://www.rerun.io/docs/reference/types/archetypes/text_document), [`AnnotationContext`](https://www.rerun.io/docs/reference/types/archetypes/annotation_context), [`Points3D`](https://www.rerun.io/docs/reference/types/archetypes/points3d) -# Background +## Background This example splits text into tokens, feeds the token sequence into a large language model (BERT), which outputs an embedding per token. The embeddings are then classified into four types of entities: location (LOC), organizations (ORG), person (PER) and Miscellaneous (MISC). The embeddings are projected to a 3D space using [UMAP](https://umap-learn.readthedocs.io/en/latest), and visualized together with all other data in Rerun. -# Logging and visualizing with Rerun +## Logging and visualizing with Rerun The visualizations in this example were created with the following Rerun code: -## Text +### Text The logging begins with the original text. Following this, the tokenized version is logged for further analysis, and the named entities identified by the NER model are logged separately. All texts are logged using [`TextDocument`](https://www.rerun.io/docs/reference/types/archetypes/text_document) as a Markdown document to preserves structure and formatting. -### Original text + +#### Original text ```python rr.log("text", rr.TextDocument(text, media_type=rr.MediaType.MARKDOWN)) ``` -### Tokenized text +#### Tokenized text ```python rr.log("tokenized_text", rr.TextDocument(markdown, media_type=rr.MediaType.MARKDOWN)) ``` -### Named entities +#### Named entities ```python rr.log("named_entities", rr.TextDocument(named_entities_str, media_type=rr.MediaType.MARKDOWN)) ``` -## UMAP embeddings - -[//]: # (The embeddings to UMAP facilitates the exploration, understanding, and evaluation of the NER model's output in a more interpretable and visually appealing manner.) +### UMAP embeddings UMAP is used in this example for dimensionality reduction and visualization of the embeddings generated by a Named Entity Recognition (NER) model. UMAP preserves the essential structure and relationships between data points, and helps in identifying clusters or patterns within the named entities. @@ -70,14 +69,14 @@ rr.log("/", rr.AnnotationContext(annotation_context)) ```python rr.log( - "umap_embeddings", - rr.Points3D(umap_embeddings, class_ids=class_ids), - rr.AnyValues(**{"Token": token_words, "Named Entity": entity_per_token(token_words, ner_results)}), + "umap_embeddings", + rr.Points3D(umap_embeddings, class_ids=class_ids), + rr.AnyValues(**{"Token": token_words, "Named Entity": entity_per_token(token_words, ner_results)}), ) ``` -# Run the code +## Run the code To run this example, make sure you have the Rerun repository checked out and the latest SDK installed: ```bash # Setup diff --git a/examples/python/log_file/README.md b/examples/python/log_file/README.md index 95bdce41785f..4298249c522f 100644 --- a/examples/python/log_file/README.md +++ b/examples/python/log_file/README.md @@ -1,5 +1,8 @@ Demonstrates how to log any file from the SDK using the [`DataLoader`](https://www.rerun.io/docs/howto/open-any-file) machinery. diff --git a/examples/python/mcc/README.md b/examples/python/mcc/README.md index d7bb9e8c4a68..977be02d7d38 100644 --- a/examples/python/mcc/README.md +++ b/examples/python/mcc/README.md @@ -6,6 +6,9 @@ thumbnail = "https://static.rerun.io/single-image-3D-reconstruction/c54498053d53 thumbnail_dimensions = [480, 480] --> +This example project combines several popular computer vision methods and uses Rerun to visualize the results and how the pieces fit together. + +# Visual project walkthrough By combining MetaAI's [Segment Anything Model (SAM)](https://github.com/facebookresearch/segment-anything) and [Multiview Compressive Coding (MCC)](https://github.com/facebookresearch/MCC) we can get a 3D object from a single image. @@ -32,7 +35,7 @@ MCC encodes the colored points and then creates a reconstruction by sweeping thr https://vimeo.com/865973880?autoplay=1&loop=1&autopause=0&background=1&muted=1&ratio=1:1 -This is a really great example of how a lot of cool solutions are built these days; by stringing together more targeted pre-trained models.The details of the three building blocks can be found in the respective papers: +This is a really great example of how a lot of cool solutions are built these days; by stringing together more targeted pre-trained models. The details of the three building blocks can be found in the respective papers: - [Segment Anything](https://arxiv.org/abs/2304.02643) by Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alexander C. Berg, Wan-Yen Lo, Piotr DollΓ‘r, and Ross Girshick - [Multiview Compressive Coding for 3D Reconstruction](https://arxiv.org/abs/2301.08247) by Chao-Yuan Wu, Justin Johnson, Jitendra Malik, Christoph Feichtenhofer, and Georgia Gkioxari - [ZoeDepth: Zero-shot Transfer by Combining Relative and Metric Depth](https://arxiv.org/abs/2302.12288) by Shariq Farooq Bhat, Reiner Birkl, Diana Wofk, Peter Wonka, and Matthias MΓΌller diff --git a/examples/python/minimal/README.md b/examples/python/minimal/README.md index 388248e9d268..233f814742ce 100644 --- a/examples/python/minimal/README.md +++ b/examples/python/minimal/README.md @@ -2,8 +2,10 @@ title = "Minimal example" thumbnail = "https://static.rerun.io/minimal-example/9e694c0689f20323ed0053506a7a099f7391afca/480w.png" thumbnail_dimensions = [480, 480] +tags = ["3D", "API example"] --> +Generates a 3D colored cube and demonstrates how to log a point cloud. @@ -13,9 +15,55 @@ thumbnail_dimensions = [480, 480] Minimal example screenshot -The simplest example of how to use Rerun, showing how to log a point cloud. -This is part of the [Quick Start guide](https://www.rerun.io/docs/getting-started/python). +Straightforward example from the [Quick Start guide](https://www.rerun.io/docs/getting-started/python) to generate a 3D colored cube and demonstrate how to log a point cloud. +# Used Rerun types + +[`Points3D`](https://www.rerun.io/docs/reference/types/archetypes/points3d) + +# Logging and visualizing with Rerun + +The visualizations in this example were created with the following Rerun code: + +It logs 3D points, each associated with a specific color, forming a grid pattern using [`Points3D`](https://www.rerun.io/docs/reference/types/archetypes/points3d) archetype. +```python +import rerun as rr +import numpy as np + +rr.init("rerun_example_my_data", spawn=True) + +SIZE = 10 + +pos_grid = np.meshgrid(*[np.linspace(-10, 10, SIZE)]*3) +positions = np.vstack([d.reshape(-1) for d in pos_grid]).T + +col_grid = np.meshgrid(*[np.linspace(0, 255, SIZE)]*3) +colors = np.vstack([c.reshape(-1) for c in col_grid]).astype(np.uint8).T + +rr.log( + "my_points", + rr.Points3D(positions, colors=colors, radii=0.5) +) + ``` + +# Run the code +To run this example, make sure you have the Rerun repository checked out and the latest SDK installed: +```bash +# Setup +pip install --upgrade rerun-sdk # install the latest Rerun SDK +git clone git@github.com:rerun-io/rerun.git # Clone the repository +cd rerun +git checkout latest # Check out the commit matching the latest SDK release +``` +Install the necessary libraries specified in the requirements file: +```bash +pip install -r examples/python/minimal/requirements.txt +``` +To experiment with the provided example, simply execute the main Python script: +```bash +python examples/python/minimal/main.py # run the example +``` +If you wish to customize it, explore additional features, or save it use the CLI with the `--help` option for guidance: ```bash -python examples/python/minimal/main.py +python examples/python/minimal/main.py --help ``` diff --git a/examples/python/multiprocessing/README.md b/examples/python/multiprocessing/README.md index 9448190c965c..306e041df973 100644 --- a/examples/python/multiprocessing/README.md +++ b/examples/python/multiprocessing/README.md @@ -2,8 +2,10 @@ title = "Multiprocessing" thumbnail = "https://static.rerun.io/multiprocessing/959e2c675f52a7ca83e11e5170903e8f0f53f5ed/480w.png" thumbnail_dimensions = [480, 480] +tags = ["API example"] --> +Demonstrates how rerun can work with the python `multiprocessing` library. @@ -13,8 +15,78 @@ thumbnail_dimensions = [480, 480] -Demonstrates how rerun can work with the python `multiprocessing` library. +# Used Rerun types +[`Boxes2D`](https://www.rerun.io/docs/reference/types/archetypes/boxes2d), [`TextLog`](https://www.rerun.io/docs/reference/types/archetypes/text_log) + +# Logging and visualizing with Rerun +This example demonstrates how to use the rerun with `multiprocessing` to log data from multiple processes to the same Rerun viewer. +It starts with the definition of the function for logging, the `task`, followed by typical usage of Python's `multiprocessing` library. + +The function `task` is decorated with `@rr.shutdown_at_exit`. This decorator ensures that data is flushed when the task completes, even if the normal `atexit`-handlers are not called at the termination of a multiprocessing process. + +```python +@rr.shutdown_at_exit +def task(child_index: int) -> None: + rr.init("rerun_example_multiprocessing") + + rr.connect() + + title = f"task_{child_index}" + rr.log( + "log", + rr.TextLog( + f"Logging from pid={os.getpid()}, thread={threading.get_ident()} using the Rerun recording id {rr.get_recording_id()}" + ) + ) + if child_index == 0: + rr.log(title, rr.Boxes2D(array=[5, 5, 80, 80], array_format=rr.Box2DFormat.XYWH, labels=title)) + else: + rr.log( + title, + rr.Boxes2D( + array=[10 + child_index * 10, 20 + child_index * 5, 30, 40], + array_format=rr.Box2DFormat.XYWH, + labels=title, + ), + ) +``` + +The main function initializes rerun with a specific application ID and manages the multiprocessing processes for logging data to the Rerun viewer. + +> Caution: Ensure that the `recording id` specified in the main function matches the one used in the logging functions + ```python +def main() -> None: + # … existing code … + rr.init("rerun_example_multiprocessing") + rr.spawn(connect=False) # this is the viewer that each child process will connect to + + task(0) + + for i in [1, 2, 3]: + p = multiprocessing.Process(target=task, args=(i,)) + p.start() + p.join() + ``` + +# Run the code +To run this example, make sure you have the Rerun repository checked out and the latest SDK installed: +```bash +# Setup +pip install --upgrade rerun-sdk # install the latest Rerun SDK +git clone git@github.com:rerun-io/rerun.git # Clone the repository +cd rerun +git checkout latest # Check out the commit matching the latest SDK release +``` +Install the necessary libraries specified in the requirements file: +```bash +pip install -r examples/python/multiprocessing/requirements.txt +``` +To experiment with the provided example, simply execute the main Python script: +```bash +python examples/python/multiprocessing/main.py # run the example +``` +If you wish to customize it, explore additional features, or save it use the CLI with the `--help` option for guidance: ```bash -python examples/python/multiprocessing/main.py +python examples/python/multiprocessing/main.py --help ``` diff --git a/examples/python/multithreading/README.md b/examples/python/multithreading/README.md index 730090633cc6..2a4fc0ee76f2 100644 --- a/examples/python/multithreading/README.md +++ b/examples/python/multithreading/README.md @@ -2,8 +2,10 @@ title = "Multithreading" thumbnail = "https://static.rerun.io/multithreading/80a3e566d6d9f8f17b04c839cd0ae2380c2baf02/480w.png" thumbnail_dimensions = [480, 480] +tags = ["API example"] --> +Demonstration of logging to Rerun from multiple threads. @@ -13,8 +15,57 @@ thumbnail_dimensions = [480, 480] Multithreading example screenshot -Demonstration of logging to Rerun from multiple threads. +# Used Rerun types +[`Boxes2D`](https://www.rerun.io/docs/reference/types/archetypes/boxes2d) + +# Logging and visualizing with Rerun +This example showcases logging from multiple threads, starting with the definition of the function for logging, the `rect_logger`, followed by typical usage of Python's `threading` module in the main function. + + ```python +def rect_logger(path: str, color: npt.NDArray[np.float32]) -> None: + for _ in range(1000): + rects_xy = np.random.rand(5, 2) * 1024 + rects_wh = np.random.rand(5, 2) * (1024 - rects_xy + 1) + rects = np.hstack((rects_xy, rects_wh)) + rr.log(path, rr.Boxes2D(array=rects, array_format=rr.Box2DFormat.XYWH, colors=color)) # Log the rectangles using Rerun + ``` + +The main function manages the multiple threads for logging data to the Rerun viewer. + ```python +def main() -> None: + # … existing code … + + threads = [] + + for i in range(10): # Create 10 threads to run the rect_logger function with different paths and colors. + t = threading.Thread(target=rect_logger, args=(f"thread/{i}", [random.randrange(255) for _ in range(3)])) + t.start() + threads.append(t) + + for t in threads: # Wait for all threads to complete before proceeding. + t.join() + # … existing code … +``` + +# Run the code +To run this example, make sure you have the Rerun repository checked out and the latest SDK installed: +```bash +# Setup +pip install --upgrade rerun-sdk # install the latest Rerun SDK +git clone git@github.com:rerun-io/rerun.git # Clone the repository +cd rerun +git checkout latest # Check out the commit matching the latest SDK release +``` +Install the necessary libraries specified in the requirements file: +```bash +pip install -r examples/python/multithreading/requirements.txt +``` +To experiment with the provided example, simply execute the main Python script: +```bash +python examples/python/multithreading/main.py # run the example +``` +If you wish to customize it, explore additional features, or save it use the CLI with the `--help` option for guidance: ```bash -python examples/python/multithreading/main.py +python examples/python/multithreading/main.py --help ``` diff --git a/examples/python/nuscenes/README.md b/examples/python/nuscenes/README.md index 8c5fc77b0ee5..5976e0303da4 100644 --- a/examples/python/nuscenes/README.md +++ b/examples/python/nuscenes/README.md @@ -1,13 +1,14 @@ +Visualize the [nuScenes dataset](https://www.nuscenes.org/) including lidar, radar, images, and bounding boxes data. + @@ -16,51 +17,49 @@ build_args = ["--seconds=5"] -Visualize the [nuScenes dataset](https://www.nuscenes.org/) including lidar, radar, images, and bounding boxes data. - -# Used Rerun types +## Used Rerun types [`Transform3D`](https://www.rerun.io/docs/reference/types/archetypes/transform3d), [`Points3D`](https://www.rerun.io/docs/reference/types/archetypes/points3d), [`Boxes3D`](https://www.rerun.io/docs/reference/types/archetypes/boxes3d), [`Pinhole`](https://www.rerun.io/docs/reference/types/archetypes/pinhole), [`Image`](https://ref.rerun.io/docs/python/0.14.1/common/image_helpers/#rerun.ImageEncoded)* -# Background +## Background This example demonstrates the ability to read and visualize scenes from the nuScenes dataset, which is a public large-scale dataset specifically designed for autonomous driving. The scenes in this dataset encompass data collected from a comprehensive suite of sensors on autonomous vehicles. These include 6 cameras, 1 LIDAR, 5 RADAR, GPS and IMU sensors. Consequently, the dataset provides information about the vehicle's pose, the images captured, the recorded sensor data and the results of object detection at any given moment. -# Logging and visualizing with Rerun +## Logging and visualizing with Rerun The visualizations in this example were created with the following Rerun code: -## Sensor calibration +### Sensor calibration First, pinhole cameras and sensor poses are initialized to offer a 3D view and camera perspective. This is achieved using the [`Pinhole`](https://www.rerun.io/docs/reference/types/archetypes/pinhole) and [`Transform3D`](https://www.rerun.io/docs/reference/types/archetypes/transform3d) archetypes. ```python rr.log( - f"world/ego_vehicle/{sensor_name}", - rr.Transform3D( - translation=calibrated_sensor["translation"], - rotation=rr.Quaternion(xyzw=rotation_xyzw), - from_parent=False, - ), - timeless=True, - ) + f"world/ego_vehicle/{sensor_name}", + rr.Transform3D( + translation=calibrated_sensor["translation"], + rotation=rr.Quaternion(xyzw=rotation_xyzw), + from_parent=False, + ), + timeless=True, +) ``` ```python rr.log( - f"world/ego_vehicle/{sensor_name}", - rr.Pinhole( - image_from_camera=calibrated_sensor["camera_intrinsic"], - width=sample_data["width"], - height=sample_data["height"], - ), - timeless=True, - ) + f"world/ego_vehicle/{sensor_name}", + rr.Pinhole( + image_from_camera=calibrated_sensor["camera_intrinsic"], + width=sample_data["width"], + height=sample_data["height"], + ), + timeless=True, +) ``` -## Timelines +### Timelines All data logged using Rerun in the following sections is initially connected to a specific time. Rerun assigns a timestamp to each piece of logged data, and these timestamps are associated with [`timelines`](https://www.rerun.io/docs/concepts/timelines). @@ -70,7 +69,7 @@ rr.set_time_seconds("timestamp", sample_data["timestamp"] * 1e-6) ``` -## Vehicle pose +### Vehicle pose As the vehicle is moving, its pose needs to be updated. Consequently, the positions of pinhole cameras and sensors must also be adjusted using [`Transform3D`](https://www.rerun.io/docs/reference/types/archetypes/transform3d). ```python @@ -84,33 +83,56 @@ rr.log( ) ``` -## LiDAR data +### LiDAR data LiDAR data is logged as [`Points3D`](https://www.rerun.io/docs/reference/types/archetypes/points3d) archetype. ```python rr.log(f"world/ego_vehicle/{sensor_name}", rr.Points3D(points, colors=point_colors)) ``` -## Camera data +### Camera data Camera data is logged as encoded images using [`ImageEncoded`](https://ref.rerun.io/docs/python/0.14.1/common/image_helpers/#rerun.ImageEncoded). ```python rr.log(f"world/ego_vehicle/{sensor_name}", rr.ImageEncoded(path=data_file_path)) ``` -## Radar data +### Radar data Radar data is logged similar to LiDAR data, as [`Points3D`](https://www.rerun.io/docs/reference/types/archetypes/points3d). ```python rr.log(f"world/ego_vehicle/{sensor_name}", rr.Points3D(points, colors=point_colors)) ``` -## Annotations +### Annotations Annotations are logged as [`Boxes3D`](https://www.rerun.io/docs/reference/types/archetypes/boxes3d), containing details such as object positions, sizes, and rotation. ```python rr.log("world/anns", rr.Boxes3D(sizes=sizes, centers=centers, rotations=rotations, class_ids=class_ids)) ``` +### Setting up the default blueprint + +The default blueprint for this example is created by the following code: + +```python +sensor_space_views = [ + rrb.Spatial2DView( + name=sensor_name, + origin=f"world/ego_vehicle/{sensor_name}", + ) + for sensor_name in nuscene_sensor_names(nusc, args.scene_name) +] +blueprint = rrb.Vertical( + rrb.Spatial3DView(name="3D", origin="world"), + rrb.Grid(*sensor_space_views), + row_shares=[3, 2], +) +``` + +We programmatically create one view per sensor and arrange them in a grid layout, which is convenient when the number of views can significantly vary from dataset to dataset. This code also showcases the `row_shares` argument for vertical containers: it can be used to assign a relative size to each of the container's children. A similar `column_shares` argument exists for horizontal containers, while grid containers accept both. + + + -# Run the code +## Run the code To run this example, make sure you have Python version at least 3.9, the Rerun repository checked out and the latest SDK installed: ```bash # Setup diff --git a/examples/python/nuscenes/main.py b/examples/python/nuscenes/main.py index 8072177d3682..3879cbeb6bf6 100755 --- a/examples/python/nuscenes/main.py +++ b/examples/python/nuscenes/main.py @@ -13,6 +13,15 @@ from download_dataset import MINISPLIT_SCENES, download_minisplit from nuscenes import nuscenes +DESCRIPTION = """ +# nuScenes + +Visualize the [nuScenes dataset](https://www.nuscenes.org/) including lidar, radar, images, and bounding boxes data. + +The full source code for this example is available +[on GitHub](https://github.com/rerun-io/rerun/blob/latest/examples/python/nuscenes). +""" + EXAMPLE_DIR: Final = pathlib.Path(os.path.dirname(__file__)) DATASET_DIR: Final = EXAMPLE_DIR / "dataset" @@ -264,13 +273,19 @@ def main() -> None: for sensor_name in nuscene_sensor_names(nusc, args.scene_name) ] blueprint = rrb.Vertical( - rrb.Spatial3DView(name="3D", origin="world"), + rrb.Horizontal( + rrb.Spatial3DView(name="3D", origin="world"), + rrb.TextDocumentView(origin="description", name="Description"), + column_shares=[3, 1], + ), rrb.Grid(*sensor_space_views), - row_shares=[3, 2], + row_shares=[4, 2], ) rr.script_setup(args, "rerun_example_nuscenes", default_blueprint=blueprint) + rr.log("description", rr.TextDocument(DESCRIPTION, media_type=rr.MediaType.MARKDOWN), timeless=True) + log_nuscenes(nusc, args.scene_name, max_time_sec=args.seconds) rr.script_teardown(args) diff --git a/examples/python/nv12/README.md b/examples/python/nv12/README.md index 049409941dfd..aa615390c2d5 100644 --- a/examples/python/nv12/README.md +++ b/examples/python/nv12/README.md @@ -1,18 +1,16 @@ +This example displays an NV12 encoded video stream from a webcam in rerun. - + -This example displays an NV12 encoded video stream from a webcam in rerun. + +## Run the code ```bash pip install -r examples/python/nv12/requirements.txt diff --git a/examples/python/objectron/README.md b/examples/python/objectron/README.md index 36d3dcec6ed2..22fc8a0f56bf 100644 --- a/examples/python/objectron/README.md +++ b/examples/python/objectron/README.md @@ -1,13 +1,14 @@ +Visualize the [Google Research Objectron](https://github.com/google-research-datasets/Objectron) dataset including camera poses, sparse point-clouds and surfaces characterization. + @@ -16,24 +17,20 @@ build_args = ["--frames=150"] Objectron example screenshot -[//]: # (Visualize the [Google Research Objectron](https://github.com/google-research-datasets/Objectron) dataset, which contains camera poses, sparse point-clouds and characterization of the planar surfaces in the surrounding environment.) - -Visualize the [Google Research Objectron](https://github.com/google-research-datasets/Objectron) dataset including camera poses, sparse point-clouds and surfaces characterization. - -# Used Rerun types +## Used Rerun types [`Points3D`](https://www.rerun.io/docs/reference/types/archetypes/points3d), [`Boxes3D`](https://www.rerun.io/docs/reference/types/archetypes/boxes3d), [`Image`](https://ref.rerun.io/docs/python/0.14.1/common/image_helpers/#rerun.ImageEncoded)*, [`Transform3D`](https://www.rerun.io/docs/reference/types/archetypes/transform3d), [`Pinhole`](https://www.rerun.io/docs/reference/types/archetypes/pinhole) -# Background +## Background This example visualizes the Objectron database, a rich collection of object-centric video clips accompanied by AR session metadata. With high-resolution images, object pose, camera pose, point-cloud, and surface plane information available for each sample, the visualization offers a comprehensive view of the object from various angles. Additionally, the dataset provides manually annotated 3D bounding boxes, enabling precise object localization and orientation. -# Logging and visualizing with Rerun +## Logging and visualizing with Rerun The visualizations in this example were created with the following Rerun code: -## Timelines +### Timelines For each processed frame, all data sent to Rerun is associated with the two [`timelines`](https://www.rerun.io/docs/concepts/timelines) `time` and `frame_idx`. @@ -42,7 +39,7 @@ rr.set_time_sequence("frame", sample.index) rr.set_time_seconds("time", sample.timestamp) ``` -## Video +### Video Pinhole camera is utilized for achieving a 3D view and camera perspective through the use of the [`Pinhole`](https://www.rerun.io/docs/reference/types/archetypes/pinhole) and [`Transform3D`](https://www.rerun.io/docs/reference/types/archetypes/transform3d) archetypes. @@ -68,7 +65,7 @@ The input video is logged as a sequence of [`ImageEncoded`](https://ref.rerun.io rr.log("world/camera", rr.ImageEncoded(path=sample.image_path)) ``` -## Sparse point clouds +### Sparse point clouds Sparse point clouds from `ARFrame` are logged as [`Points3D`](https://www.rerun.io/docs/reference/types/archetypes/points3d) archetype to the `world/points` entity. @@ -76,7 +73,7 @@ Sparse point clouds from `ARFrame` are logged as [`Points3D`](https://www.rerun. rr.log("world/points", rr.Points3D(positions, colors=[255, 255, 255, 255])) ``` -## Annotated bounding boxes +### Annotated bounding boxes Bounding boxes annotated from `ARFrame` are logged as [`Boxes3D`](https://www.rerun.io/docs/reference/types/archetypes/boxes3d), containing details such as object position, sizes, center and rotation. @@ -94,7 +91,22 @@ rr.log( ) ``` -# Run the code +### Setting up the default blueprint + +The default blueprint is configured with the following code: + +```python +blueprint = rrb.Horizontal( + rrb.Spatial3DView(origin="/world", name="World"), + rrb.Spatial2DView(origin="/world/camera", name="Camera", contents=["/world/**"]), +) +``` + +In particular, we want to reproject the points and the 3D annotation box in the 2D camera view corresponding to the pinhole logged at `"/world/camera"`. This is achieved by setting the view's contents to the entire `"/world/**"` subtree, which include both the pinhole transform and the image data, as well as the point cloud and the 3D annotation box. + + + +## Run the code To run this example, make sure you have Python version at least 3.9, the Rerun repository checked out and the latest SDK installed: ```bash # Setup diff --git a/examples/python/objectron/main.py b/examples/python/objectron/main.py index f07549a59c51..41bcd3726f4b 100755 --- a/examples/python/objectron/main.py +++ b/examples/python/objectron/main.py @@ -215,13 +215,14 @@ def main() -> None: rr.script_add_args(parser) args = parser.parse_args() + blueprint = rrb.Horizontal( + rrb.Spatial3DView(origin="/world", name="World"), + rrb.Spatial2DView(origin="/world/camera", name="Camera", contents=["/world/**"]), + ) rr.script_setup( args, "rerun_example_objectron", - default_blueprint=rrb.Horizontal( - rrb.Spatial3DView(origin="/world", name="World"), - rrb.Spatial2DView(origin="/world/camera", name="Camera", contents=["+ $origin/**", "+ /world/**"]), - ), + default_blueprint=blueprint, ) dir = ensure_recording_available(args.recording, args.dataset_dir, args.force_reprocess_video) diff --git a/examples/python/open_photogrammetry_format/README.md b/examples/python/open_photogrammetry_format/README.md index 456b01c06b9a..0bc19932441f 100644 --- a/examples/python/open_photogrammetry_format/README.md +++ b/examples/python/open_photogrammetry_format/README.md @@ -1,13 +1,14 @@ +Uses [`pyopf`](https://github.com/Pix4D/pyopf) to load and display a photogrammetrically reconstructed 3D point cloud in the [Open Photogrammetry Format (OPF)](https://www.pix4d.com/open-photogrammetry-format/). + @@ -16,23 +17,20 @@ build_args = ["--jpeg-quality=50"] - -Uses [`pyopf`](https://github.com/Pix4D/pyopf) to load and display a photogrammetrically reconstructed 3D point cloud in the [Open Photogrammetry Format (OPF)](https://www.pix4d.com/open-photogrammetry-format/). - -# Used Rerun types +## Used Rerun types [`Image`](https://www.rerun.io/docs/reference/types/archetypes/image), [`Points3D`](https://www.rerun.io/docs/reference/types/archetypes/points3d), [`Transform3D`](https://www.rerun.io/docs/reference/types/archetypes/transform3d), [`Pinhole`](https://www.rerun.io/docs/reference/types/archetypes/pinhole) -# Background +## Background This example loads an Open Photogrammetry Format (OPF) project and displays the cameras and point cloud data. OPF, which stands for 'open photogrammetry format,' is a file format used for photogrammetry data. It contains all the necessary information related to a reconstructed 3D model made with photogrammetry, including calibration, point clouds and dense reconstruction. -# Logging and visualizing with Rerun +## Logging and visualizing with Rerun The visualizations in this example were created with the following Rerun code: -## Timelines +### Timelines For each processed frame, all data sent to Rerun is associated with specific time using [`timelines`](https://www.rerun.io/docs/concepts/timelines). @@ -40,7 +38,7 @@ The visualizations in this example were created with the following Rerun code: rr.set_time_sequence("image", i) ``` -## Video +### Video Pinhole camera is utilized for achieving a 3D view and camera perspective through the use of the [`Pinhole`](https://www.rerun.io/docs/reference/types/archetypes/pinhole) and [`Transform3D`](https://www.rerun.io/docs/reference/types/archetypes/transform3d) archetypes. @@ -67,7 +65,7 @@ The input video is logged as a sequence of [`Image`](https://www.rerun.io/docs/r rr.log("world/cameras/image/rgb", rr.Image(np.array(img)).compress(jpeg_quality=jpeg_quality)) ``` -## Point clouds +### Point clouds Point clouds from the project are logged as [`Points3D`](https://www.rerun.io/docs/reference/types/archetypes/points3d) archetype to the `world/points` entity. @@ -76,7 +74,7 @@ rr.log("world/points", rr.Points3D(points.position, colors=points.color), timele ``` -# Run the code +## Run the code > This example requires Python 3.10 or higher because of [`pyopf`](https://pypi.org/project/pyopf/). diff --git a/examples/python/open_photogrammetry_format/main.py b/examples/python/open_photogrammetry_format/main.py index 70883c4b47a6..6c3e1f34fa2b 100755 --- a/examples/python/open_photogrammetry_format/main.py +++ b/examples/python/open_photogrammetry_format/main.py @@ -2,10 +2,6 @@ """ Load an Open Photogrammetry Format (OPF) project and display the cameras and point cloud. -OPF specification: https://pix4d.github.io/opf-spec/index.html -Dataset source: https://support.pix4d.com/hc/en-us/articles/360000235126-Example-projects-real-photogrammetry-data#OPF1 -pyopf: https://github.com/Pix4D/pyopf - Requires Python 3.10 or higher because of [pyopf](https://pypi.org/project/pyopf/). """ from __future__ import annotations @@ -25,6 +21,20 @@ from pyopf.io import load from pyopf.resolve import resolve +DESCRIPTION = """ +# Open Photogrammetry Format + +Visualizes an Open Photogrammetry Format (OPF) project, displaying the cameras and point cloud. + +The full source code for this example is available +[on GitHub](https://github.com/rerun-io/rerun/blob/latest/examples/python/open_photogrammetry_format). + +### Links +* [OPF specification](https://pix4d.github.io/opf-spec/index.html) +* [Dataset source](https://support.pix4d.com/hc/en-us/articles/360000235126-Example-projects-real-photogrammetry-data#OPF1) +* [pyopf](https://github.com/Pix4D/pyopf) +""" + @dataclass class DatasetSpec: @@ -227,6 +237,7 @@ def main() -> None: # display everything in Rerun rr.script_setup(args, "rerun_example_open_photogrammetry_format") + rr.log("description", rr.TextDocument(DESCRIPTION, media_type=rr.MediaType.MARKDOWN), timeless=True) rr.log("world", rr.ViewCoordinates.RIGHT_HAND_Z_UP, timeless=True) project.log_point_cloud() project.log_calibrated_cameras(jpeg_quality=args.jpeg_quality) diff --git a/examples/python/plots/README.md b/examples/python/plots/README.md index 33e38699bc30..6c9a888bf49d 100644 --- a/examples/python/plots/README.md +++ b/examples/python/plots/README.md @@ -1,12 +1,12 @@ +This example demonstrates how to log simple plots with the Rerun SDK. Charts can be created from 1-dimensional tensors, or from time-varying scalars. @@ -16,8 +16,122 @@ channel = "main" Plots example screenshot -This example demonstrates how to log simple plots with the Rerun SDK. Charts can be created from 1-dimensional tensors, or from time-varying scalars. +# Used Rerun types + +[`BarChart`](https://www.rerun.io/docs/reference/types/archetypes/bar_chart), [`Scalar`](https://www.rerun.io/docs/reference/types/archetypes/scalar), [`SeriesPoint`](https://www.rerun.io/docs/reference/types/archetypes/series_point), [`SeriesLine`](https://www.rerun.io/docs/reference/types/archetypes/series_line), [`TextDocument`](https://www.rerun.io/docs/reference/types/archetypes/text_document) + +# Logging and visualizing with Rerun + +This example shows various plot types that you can create using Rerun. Common usecases for such plots would be logging +losses or metrics over time, histograms, or general function plots. + +The bar chart is created by logging the [`BarChart`](https://www.rerun.io/docs/reference/types/archetypes/bar_chart) archetype. +All other plots are created using the [`Scalar`](https://www.rerun.io/docs/reference/types/archetypes/scalar) archetype. +Each plot is created by logging scalars at different time steps (i.e., the x-axis). +Additionally, the plots are styled using the [`SeriesLine`](https://www.rerun.io/docs/reference/types/archetypes/series_line) and +[`SeriesPoint`](https://www.rerun.io/docs/reference/types/archetypes/series_point) archetypes respectively. + +The visualizations in this example were created with the following Rerun code: + +## Bar chart + +The `log_bar_chart` function logs a bar chat. +It generates data for a Gaussian bell curve and logs it using [`BarChart`](https://www.rerun.io/docs/reference/types/archetypes/bar_chart) archetype. +```python +def log_bar_chart() -> None: + # … existing code … + rr.log("bar_chart", rr.BarChart(y)) +``` + +## Curves +The `log_parabola` function logs a parabola curve (sine and cosine functions) as a time series. + +It first sets up a time sequence using [`timelines`](https://www.rerun.io/docs/concepts/timelines), then calculates the y-value of the parabola at each time step, and logs it using [`Scalar`](https://www.rerun.io/docs/reference/types/archetypes/scalar) archetype. +It also adjusts the width and color of the plotted line based on the calculated y value using [`SeriesLine`](https://www.rerun.io/docs/reference/types/archetypes/series_line) archetype. + +```python +def log_parabola() -> None: + # Name never changes, log it only once. + rr.log("curves/parabola", rr.SeriesLine(name="f(t) = (0.01t - 3)Β³ + 1"), timeless=True) + + # Log a parabola as a time series + for t in range(0, 1000, 10): + rr.set_time_sequence("frame_nr", t) + + # … existing code … + + rr.log( + "curves/parabola", + rr.Scalar(f_of_t), + rr.SeriesLine(width=width, color=color), + ) +``` + +## Trig + +The `log_trig` function logs sin and cos functions as time series. Sin and cos are logged with the same parent entity (i.e.,`trig/{cos,sin}`) which will put them in the same view by default. +It first logs the styling properties of the sin and cos plots using [`SeriesLine`](https://www.rerun.io/docs/reference/types/archetypes/series_line) archetype. +Then, it iterates over a range of time steps, calculates the sin and cos values at each time step, and logs them using [`Scalar`](https://www.rerun.io/docs/reference/types/archetypes/scalar) archetype. + + ```python +def log_trig() -> None: + # Styling doesn't change over time, log it once with timeless=True. + rr.log("trig/sin", rr.SeriesLine(color=[255, 0, 0], name="sin(0.01t)"), timeless=True) + rr.log("trig/cos", rr.SeriesLine(color=[0, 255, 0], name="cos(0.01t)"), timeless=True) + + for t in range(0, int(tau * 2 * 100.0)): + rr.set_time_sequence("frame_nr", t) + + sin_of_t = sin(float(t) / 100.0) + rr.log("trig/sin", rr.Scalar(sin_of_t)) + + cos_of_t = cos(float(t) / 100.0) + rr.log("trig/cos", rr.Scalar(cos_of_t)) + ``` + +## Classification + +The `log_classification` function simulates a classification problem by logging a line function and randomly generated samples around that line. + +It first logs the styling properties of the line plot using [`SeriesLine`](https://www.rerun.io/docs/reference/types/archetypes/series_line) archetype. +Then, it iterates over a range of time steps, calculates the y value of the line function at each time step, and logs it as a scalar using [`Scalar`](https://www.rerun.io/docs/reference/types/archetypes/scalar) archetype. +Additionally, it generates random samples around the line function and logs them using [`Scalar`](https://www.rerun.io/docs/reference/types/archetypes/scalar) and [`SeriesPoint`](https://www.rerun.io/docs/reference/types/archetypes/series_point) archetypes. + + ```python +def log_classification() -> None: + # Log components that don't change only once: + rr.log("classification/line", rr.SeriesLine(color=[255, 255, 0], width=3.0), timeless=True) + + for t in range(0, 1000, 2): + rr.set_time_sequence("frame_nr", t) + + # … existing code … + rr.log("classification/line", rr.Scalar(f_of_t)) + + # … existing code … + rr.log("classification/samples", rr.Scalar(g_of_t), rr.SeriesPoint(color=color, marker_size=marker_size)) + ``` + + +# Run the code +To run this example, make sure you have the Rerun repository checked out and the latest SDK installed: +```bash +# Setup +pip install --upgrade rerun-sdk # install the latest Rerun SDK +git clone git@github.com:rerun-io/rerun.git # Clone the repository +cd rerun +git checkout latest # Check out the commit matching the latest SDK release +``` +Install the necessary libraries specified in the requirements file: +```bash +pip install -r examples/python/plots/requirements.txt +``` +To experiment with the provided example, simply execute the main Python script: +```bash +python examples/python/plots/main.py # run the example +``` +If you wish to customize it, explore additional features, or save it use the CLI with the `--help` option for guidance: ```bash -python examples/python/plots/main.py +python examples/python/plots/main.py --help ``` diff --git a/examples/python/plots/main.py b/examples/python/plots/main.py index 725ea8c2dda3..368f4f53eec6 100755 --- a/examples/python/plots/main.py +++ b/examples/python/plots/main.py @@ -22,27 +22,7 @@ This example shows various plot types that you can create using Rerun. Common usecases for such plots would be logging losses or metrics over time, histograms, or general function plots. -## How it was made -The full source code for this example is available [on GitHub](https://github.com/rerun-io/rerun/blob/latest/examples/python/plots/main.py). - -### Bar charts -The [bar chart](recording://bar_chart) is created by logging the [rr.BarChart archetype](https://www.rerun.io/docs/reference/types/archetypes/bar_chart). - -### Time series -All other plots are created using the -[rr.Scalar archetype](https://www.rerun.io/docs/reference/types/archetypes/scalar) -archetype. -Each plot is created by logging scalars at different time steps (i.e., the x-axis). -Additionally, the plots are styled using the -[rr.SeriesLine](https://www.rerun.io/docs/reference/types/archetypes/series_line) and -[rr.SeriesPoint](https://www.rerun.io/docs/reference/types/archetypes/series_point) -archetypes respectively. - -For the [parabola](recording://curves/parabola) the radius and color is changed over time, -the other plots use timeless for their styling properties where possible. - -[sin](recording://trig/sin) and [cos](recording://trig/cos) are logged with the same parent entity (i.e., -`trig/{cos,sin}`) which will put them in the same view by default. +The full source code for this example is available [on GitHub](https://github.com/rerun-io/rerun/blob/latest/examples/python/plots). """.strip() @@ -136,7 +116,7 @@ def main() -> None: rrb.TimeSeriesView(name="Classification", origin="/classification"), ), rrb.TextDocumentView(name="Description", origin="/description"), - column_shares=[2, 1], + column_shares=[3, 1], ), rrb.SelectionPanel(expanded=False), rrb.TimePanel(expanded=False), diff --git a/examples/python/raw_mesh/README.md b/examples/python/raw_mesh/README.md index c30762f765fa..f4a2071147da 100644 --- a/examples/python/raw_mesh/README.md +++ b/examples/python/raw_mesh/README.md @@ -1,12 +1,13 @@ +Demonstrates logging of raw 3D mesh data (so-called "triangle soups") with simple material properties and their transform hierarchy. + @@ -15,22 +16,20 @@ channel = "release" -Demonstrates logging of raw 3D mesh data (so-called "triangle soups") with simple material properties and their transform hierarchy. - -# Used Rerun types +## Used Rerun types [`Transform3D`](https://www.rerun.io/docs/reference/types/archetypes/transform3d), [`Mesh3D`](https://www.rerun.io/docs/reference/types/archetypes/mesh3d) -# Background +## Background Raw 3D mesh data refers to the basic geometric representation of a three-dimensional object, typically composed of interconnected triangles. These triangles collectively form the surface of the object, defining its shape and structure in a digital environment. Rerun was employed to visualize and manage this raw mesh data, along with its associated simple material properties and transform hierarchy. -# Logging and visualizing with Rerun +## Logging and visualizing with Rerun The visualizations in this example were created with the following Rerun code: -## 3D mesh data +### 3D mesh data The raw 3D mesh data are logged as [`Mesh3D`](https://www.rerun.io/docs/reference/types/archetypes/mesh3d) objects, and includes details about vertex positions, colors, normals, texture coordinates, material properties, and face indices for an accurate reconstruction and visualization. ```python @@ -59,7 +58,7 @@ rr.log( ``` -# Run the code +## Run the code To run this example, make sure you have the Rerun repository checked out and the latest SDK installed: ```bash # Setup diff --git a/examples/python/raw_mesh/main.py b/examples/python/raw_mesh/main.py index 7b92e7097fd5..41452016aa47 100755 --- a/examples/python/raw_mesh/main.py +++ b/examples/python/raw_mesh/main.py @@ -16,10 +16,18 @@ import numpy as np import rerun as rr # pip install rerun-sdk +import rerun.blueprint as rrb import trimesh from download_dataset import AVAILABLE_MESHES, ensure_mesh_downloaded from rerun.components import Material +DESCRIPTION = """ +# Raw meshes +This example shows how you can log a hierarchial 3D mesh, including its transform hierarchy. + +The full source code for this example is available [on GitHub](https://github.com/rerun-io/rerun/blob/latest/examples/python/raw_mesh). +""" + def load_scene(path: Path) -> trimesh.Scene: print(f"loading scene {path}…") @@ -119,8 +127,6 @@ def main() -> None: rr.script_add_args(parser) args = parser.parse_args() - rr.script_setup(args, "rerun_example_raw_mesh") - scene_path = args.scene_path if scene_path is None: scene_path = ensure_mesh_downloaded(args.scene) @@ -128,6 +134,15 @@ def main() -> None: root = next(iter(scene.graph.nodes)) + blueprint = rrb.Horizontal( + rrb.Spatial3DView(name="Mesh", origin="/world"), + rrb.TextDocumentView(name="Description", origin="/description"), + column_shares=[3, 1], + ) + + rr.script_setup(args, "rerun_example_raw_mesh", default_blueprint=blueprint) + rr.log("description", rr.TextDocument(DESCRIPTION, media_type=rr.MediaType.MARKDOWN), timeless=True) + # glTF always uses a right-handed coordinate system when +Y is up and meshes face +Z. rr.log(root, rr.ViewCoordinates.RUB, timeless=True) log_scene(scene, root) diff --git a/examples/python/rgbd/README.md b/examples/python/rgbd/README.md index 08d5b018178b..7e5429cfc591 100644 --- a/examples/python/rgbd/README.md +++ b/examples/python/rgbd/README.md @@ -1,13 +1,14 @@ +Visualizes an example recording from [the NYUD dataset](https://cs.nyu.edu/~silberman/datasets/nyu_depth_v2.html) with RGB and Depth channels. + @@ -16,9 +17,74 @@ build_args = ["--frames=300"] RGBD example screenshot -Example using an [example dataset](https://cs.nyu.edu/~silberman/datasets/nyu_depth_v2.html) from New York University with RGB and Depth channels. +# Used Rerun types +[`Image`](https://www.rerun.io/docs/reference/types/archetypes/image), [`Pinhole`](https://www.rerun.io/docs/reference/types/archetypes/pinhole), [`DepthImage`](https://www.rerun.io/docs/reference/types/archetypes/depth_image) + +# Background +The dataset, known as the NYU Depth V2 dataset, consists of synchronized pairs of RGB and depth frames recorded by the Microsoft Kinect in various indoor scenes. +This example visualizes one scene of this dataset, and offers a rich source of data for object recognition, scene understanding, depth estimation, and more. + +# Logging and visualizing with Rerun + +The visualizations in this example were created with the following Rerun code: + +## Timelines + +All data logged using Rerun in the following sections is connected to a specific time. +Rerun assigns a timestamp to each piece of logged data, and these timestamps are associated with a [`timeline`](https://www.rerun.io/docs/concepts/timelines). + + ```python +rr.set_time_seconds("time", time.timestamp()) + ``` + +## Image +The example image is logged as [`Image`](https://www.rerun.io/docs/reference/types/archetypes/image) to the `world/camera/image/rgb` entity. +```python +rr.log("world/camera/image/rgb", rr.Image(img_rgb).compress(jpeg_quality=95)) +``` + +## Depth image + +Pinhole camera is utilized for achieving a 3D view and camera perspective through the use of the [`Pinhole`](https://www.rerun.io/docs/reference/types/archetypes/pinhole). + +```python +rr.log( + "world/camera/image", + rr.Pinhole( + resolution=[img_depth.shape[1], img_depth.shape[0]], + focal_length=0.7 * img_depth.shape[1], + ), +) +``` + +Then, the depth image is logged as an [`DepthImage`](https://www.rerun.io/docs/reference/types/archetypes/depth_image) to the `world/camera/image/depth` entity. + +```python +rr.log("world/camera/image/depth", rr.DepthImage(img_depth, meter=DEPTH_IMAGE_SCALING)) +``` +# Run the code +To run this example, make sure you have the Rerun repository checked out and the latest SDK installed: +```bash +# Setup +pip install --upgrade rerun-sdk # install the latest Rerun SDK +git clone git@github.com:rerun-io/rerun.git # Clone the repository +cd rerun +git checkout latest # Check out the commit matching the latest SDK release +``` +Install the necessary libraries specified in the requirements file: ```bash pip install -r examples/python/rgbd/requirements.txt -python examples/python/rgbd/main.py +``` +To experiment with the provided example, simply execute the main Python script: +```bash +python examples/python/rgbd/main.py # run the example +``` +You can specify the recording: +```bash +python examples/python/rgbd/main.py --recording {cafe,basements,studies,office_kitchens,playroooms} +``` +If you wish to customize it, explore additional features, or save it use the CLI with the `--help` option for guidance: +```bash +python examples/python/rgbd/main.py --help ``` diff --git a/examples/python/rgbd/main.py b/examples/python/rgbd/main.py index 33bf11f9f959..e43bc1b59da4 100755 --- a/examples/python/rgbd/main.py +++ b/examples/python/rgbd/main.py @@ -22,6 +22,13 @@ import rerun.blueprint as rrb from tqdm import tqdm +DESCRIPTION = """ +# RGBD +Visualizes an example recording from [the NYUD dataset](https://cs.nyu.edu/~silberman/datasets/nyu_depth_v2.html) with RGB and Depth channels. + +The full source code for this example is available [on GitHub](https://github.com/rerun-io/rerun/blob/latest/examples/python/rgbd). +""" + DEPTH_IMAGE_SCALING: Final = 1e4 DATASET_DIR: Final = Path(os.path.dirname(__file__)) / "dataset" DATASET_URL_BASE: Final = "https://static.rerun.io/rgbd_dataset" @@ -169,15 +176,22 @@ def main() -> None: rrb.Vertical( # Put the origin for both 2D spaces where the pinhole is logged. Doing so allows them to understand how they're connected to the 3D space. # This enables interactions like clicking on a point in the 3D space to show the corresponding point in the 2D spaces and vice versa. - rrb.Spatial2DView(name="Depth & RGB", origin="world/camera/image"), - rrb.Spatial2DView(name="RGB", origin="world/camera/image", contents="world/camera/image/rgb"), + rrb.Spatial2DView(name="RGB & Depth", origin="world/camera/image"), + rrb.Tabs( + rrb.Spatial2DView(name="RGB", origin="world/camera/image", contents="world/camera/image/rgb"), + rrb.Spatial2DView(name="Depth", origin="world/camera/image", contents="world/camera/image/depth"), + ), + rrb.TextDocumentView(name="Description", origin="/description"), name="2D", + row_shares=[3, 3, 2], ), column_shares=[2, 1], ), ) recording_path = ensure_recording_downloaded(args.recording) + rr.log("description", rr.TextDocument(DESCRIPTION, media_type=rr.MediaType.MARKDOWN), timeless=True) + log_nyud_data( recording_path=recording_path, subset_idx=args.subset_idx, diff --git a/examples/python/ros_node/README.md b/examples/python/ros_node/README.md index db4cb217f185..8174a5d3250c 100644 --- a/examples/python/ros_node/README.md +++ b/examples/python/ros_node/README.md @@ -1,10 +1,13 @@ +A minimal example of creating a ROS node that subscribes to topics and converts the messages to rerun log calls. + +The solution here is mostly a toy example to show how ROS concepts can be mapped to Rerun. @@ -14,17 +17,26 @@ thumbnail_dimensions = [480, 480] -# Overview +# Used Rerun types +[`Image`](https://www.rerun.io/docs/reference/types/archetypes/image), [`Pinhole`](https://www.rerun.io/docs/reference/types/archetypes/pinhole), [`Transform3D`](https://www.rerun.io/docs/reference/types/archetypes/transform3d), [`Boxes3D`](https://www.rerun.io/docs/reference/types/archetypes/boxes3d), [`Points3D`](https://www.rerun.io/docs/reference/types/archetypes/points3d), [`LineStrips3D`](https://www.rerun.io/docs/reference/types/archetypes/line_strips3d), [`Scalar`](https://www.rerun.io/docs/reference/types/archetypes/scalar) -A minimal example of creating a ROS node that subscribes to topics and converts the messages to rerun log calls. +# Background +The [Robot Operating System (ROS)](https://www.ros.org) helps build robot applications through software libraries and tools. +Although Rerun doesn't have native ROS support, you can easily create a basic ROS 2 Python node to subscribe to common ROS topics and log them to Rerun. +In this example, Rerun visualizes simulation data, including robot pose, images, camera position, laser scans, point clouds, and velocities, as the [Turtlebot](http://wiki.ros.org/turtlebot3) navigates the environment. -The solution here is mostly a toy example to show how ROS concepts can be mapped to Rerun. Fore more information on -future improved ROS support, see the tracking issue: [#1527](https://github.com/rerun-io/rerun/issues/1537) +# Logging and visualizing with Rerun -NOTE: Unlike many of the other examples, this example requires a system installation of ROS -in addition to the packages from requirements.txt. +Find the detailed code walkthrough and explanation for visualizing this example here: [Using Rerun with ROS 2](https://www.rerun.io/docs/howto/ros2-nav-turtlebot). -# Dependencies +For more information on future improved ROS support, see tracking issue: [#1527](https://github.com/rerun-io/rerun/issues/1537) + +# Run the code + +## Dependencies + +> NOTE: Unlike many of the other examples, this example requires a system installation of ROS +in addition to the packages from requirements.txt. This example was developed and tested on top of [ROS2 Humble Hawksbill](https://docs.ros.org/en/humble/index.html) and the [turtlebot3 navigation example](https://navigation.ros.org/getting_started/index.html). @@ -34,6 +46,19 @@ Installing ROS is outside the scope of this example, but you will need the equiv sudo apt install ros-humble-desktop gazebo ros-humble-navigation2 ros-humble-turtlebot3 ros-humble-turtlebot3-gazebo ``` +Make sure you have the Rerun repository checked out and the latest SDK installed: +```bash +# Setup +pip install --upgrade rerun-sdk # install the latest Rerun SDK +git clone git@github.com:rerun-io/rerun.git # Clone the repository +cd rerun +git checkout latest # Check out the commit matching the latest SDK release +``` +Install the necessary libraries specified in the requirements file: +```bash +pip install -r examples/python/ros_node/requirements.txt +``` + In addition to installing the dependencies from `requirements.txt` into a venv you will also need to source the ROS setup script: ``` @@ -41,8 +66,7 @@ source venv/bin/active source /opt/ros/humble/setup.bash ``` - -# Running +## Run the code First, in one terminal launch the nav2 turtlebot demo: ``` @@ -56,6 +80,11 @@ ros2 launch nav2_bringup tb3_simulation_launch.py headless:=False As described in the nav demo, use the rviz window to initialize the pose estimate and set a navigation goal. You can now connect to the running ROS system by running: +```bash +python examples/python/ros_node/main.py # run the example ``` -python3 examples/python/ros_node/main.py + +If you wish to customize it, or explore additional features, use the CLI with the `--help` option for guidance: +```bash +python examples/python/ros_node/main.py --help ``` diff --git a/examples/python/rrt-star/README.md b/examples/python/rrt-star/README.md index 005caff1aa86..f57f256ff030 100644 --- a/examples/python/rrt-star/README.md +++ b/examples/python/rrt-star/README.md @@ -1,12 +1,13 @@ +This example visualizes the path finding algorithm RRT\* in a simple environment. + RRT* example screenshot @@ -15,13 +16,104 @@ channel = "main" -This example visualizes the path finding algorithm RRT\* in a simple environment. +# Used Rerun types +[`LineStrips2D`](https://www.rerun.io/docs/reference/types/archetypes/line_strips2d), [`Points2D`](https://www.rerun.io/docs/reference/types/archetypes/points2d), [`TextDocument`](https://www.rerun.io/docs/reference/types/archetypes/text_document) + +# Background +The algorithm finds a path between two points by randomly expanding a tree from the start point. +After it has added a random edge to the tree it looks at nearby nodes to check if it's faster to reach them through this new edge instead, +and if so it changes the parent of these nodes. This ensures that the algorithm will converge to the optimal path given enough time. A detailed explanation can be found in the original paper Karaman, S. Frazzoli, S. 2011. "Sampling-based algorithms for optimal motion planning". or in [this medium article](https://theclassytim.medium.com/robotic-path-planning-rrt-and-rrt-212319121378) + +# Logging and visualizing with Rerun + +All points are logged using the [`Points2D`](https://www.rerun.io/docs/reference/types/archetypes/points2d) archetype, while the lines are logged using the LineStrips2D [`LineStrips2D`](https://www.rerun.io/docs/reference/types/archetypes/line_strips2d). + +The visualizations in this example were created with the following Rerun code: + +## Map + +### Starting point +```python +rr.log("map/start", rr.Points2D([start_point], radii=0.02, colors=[[255, 255, 255, 255]])) +``` + +### Destination point +```python +rr.log("map/destination", rr.Points2D([end_point], radii=0.02, colors=[[255, 255, 0, 255]])) +``` + +### Obstacles +```python +rr.log("map/obstacles", rr.LineStrips2D(self.obstacles)) +``` + + +## RRT tree + +### Edges +```python +rr.log("map/tree/edges", rr.LineStrips2D(tree.segments(), radii=0.0005, colors=[0, 0, 255, 128])) +``` + +### New edges +```python +rr.log("map/new/new_edge", rr.LineStrips2D([(closest_node.pos, new_point)], colors=[color], radii=0.001)) +``` + +### Vertices +```python +rr.log("map/tree/vertices", rr.Points2D([node.pos for node in tree], radii=0.002), rr.AnyValues(cost=[float(node.cost) for node in tree])) +``` + +### Close nodes +```python +rr.log("map/new/close_nodes", rr.Points2D([node.pos for node in close_nodes])) +``` + +### Closest node +```python +rr.log("map/new/closest_node", rr.Points2D([closest_node.pos], radii=0.008)) +``` + +### Random points +```python +rr.log("map/new/random_point", rr.Points2D([random_point], radii=0.008)) +``` + +### New points +```python +rr.log("map/new/new_point", rr.Points2D([new_point], radii=0.008)) +``` + +### Path +```python +rr.log("map/path", rr.LineStrips2D(segments, radii=0.002, colors=[0, 255, 255, 255])) +``` + + +# Run the code +To run this example, make sure you have the Rerun repository checked out and the latest SDK installed: +```bash +# Setup +pip install --upgrade rerun-sdk # install the latest Rerun SDK +git clone git@github.com:rerun-io/rerun.git # Clone the repository +cd rerun +git checkout latest # Check out the commit matching the latest SDK release +``` +Install the necessary libraries specified in the requirements file: ```bash pip install -r examples/python/rrt-star/requirements.txt -python examples/python/rrt-star/main.py +``` +To experiment with the provided example, simply execute the main Python script: +```bash +python examples/python/rrt-star/main.py # run the example +``` +If you wish to customize it, explore additional features, or save it use the CLI with the `--help` option for guidance: +```bash +python examples/python/rrt-star/main.py --help ``` diff --git a/examples/python/rrt-star/main.py b/examples/python/rrt-star/main.py index 1438640d1392..36a90e6158b2 100755 --- a/examples/python/rrt-star/main.py +++ b/examples/python/rrt-star/main.py @@ -26,6 +26,21 @@ import numpy as np import numpy.typing as npt import rerun as rr +import rerun.blueprint as rrb + +DESCRIPTION = """ +Visualizes the path finding algorithm RRT* in a simple environment. + +The algorithm finds a [path](recording://map/path) between two points by randomly expanding a [tree](recording://map/tree/edges) from the [start point](recording://map/start). +After it has added a [random edge](recording://map/new/new_edge) to the tree it looks at [nearby nodes](recording://map/new/close_nodes) to check if it's faster to reach them through this [new edge](recording://map/new/new_edge) instead, and if so it changes the parent of these nodes. +This ensures that the algorithm will converge to the optimal path given enough time. + +A more detailed explanation can be found in the original paper +Karaman, S. Frazzoli, S. 2011. "Sampling-based algorithms for optimal motion planning". +or in [this medium article](https://theclassytim.medium.com/robotic-path-planning-rrt-and-rrt-212319121378). + +The full source code for this example is available [on GitHub](https://github.com/rerun-io/rerun/blob/latest/examples/python/rrt-star). +""".strip() Point2D = Annotated[npt.NDArray[np.float64], Literal[2]] @@ -259,7 +274,13 @@ def main() -> None: parser.add_argument("--max-step-size", type=float, default=0.1) parser.add_argument("--iterations", type=int, help="How many iterations it should do") args = parser.parse_args() - rr.script_setup(args, "rerun_example_rrt_star") + + blueprint = rrb.Horizontal( + rrb.Spatial2DView(name="Map", origin="/map"), + rrb.TextDocumentView(name="Description", origin="/description"), + column_shares=[3, 1], + ) + rr.script_setup(args, "rerun_example_rrt_star", default_blueprint=blueprint) max_step_size = args.max_step_size neighborhood_size = max_step_size * 1.5 @@ -268,23 +289,7 @@ def main() -> None: end_point = np.array([1.8, 0.5]) rr.set_time_sequence("step", 0) - rr.log( - "description", - rr.TextDocument( - """ -Visualizes the path finding algorithm RRT* in a simple environment. - -The algorithm finds a [path](recording://map/path) between two points by randomly expanding a [tree](recording://map/tree/edges) from the [start point](recording://map/start). -After it has added a [random edge](recording://map/new/new_edge) to the tree it looks at [nearby nodes](recording://map/new/close_nodes) to check if it's faster to reach them through this [new edge](recording://map/new/new_edge) instead, and if so it changes the parent of these nodes. -This ensures that the algorithm will converge to the optimal path given enough time. - -A more detailed explanation can be found in the original paper -Karaman, S. Frazzoli, S. 2011. "Sampling-based algorithms for optimal motion planning". -or in [this medium article](https://theclassytim.medium.com/robotic-path-planning-rrt-and-rrt-212319121378) - """.strip(), - media_type=rr.MediaType.MARKDOWN, - ), - ) + rr.log("description", rr.TextDocument(DESCRIPTION, media_type=rr.MediaType.MARKDOWN), timeless=True) rr.log( "map/start", rr.Points2D([start_point], radii=0.02, colors=[[255, 255, 255, 255]]), diff --git a/examples/python/segment_anything_model/README.md b/examples/python/segment_anything_model/README.md index f4d9a24d0445..ba2b5b7974d7 100644 --- a/examples/python/segment_anything_model/README.md +++ b/examples/python/segment_anything_model/README.md @@ -1,12 +1,12 @@ +Example of using Rerun to log and visualize the output of [Meta AI's Segment Anything model](https://segment-anything.com/). @@ -16,11 +16,69 @@ channel = "release" Segment Anything Model example screenshot -Example of using Rerun to log and visualize the output of Meta AI's Segment Anything model. +# Used Rerun types +[`Image`](https://www.rerun.io/docs/reference/types/archetypes/image), [`Tensor`](https://www.rerun.io/docs/reference/types/archetypes/tensor), [`SegmentationImage`](https://www.rerun.io/docs/reference/types/archetypes/segmentation_image), [`Boxes2D`](https://www.rerun.io/docs/reference/types/archetypes/boxes2d) -For more info see [here](https://segment-anything.com/). +# Background +This example showcases the visualization capabilities of [Meta AI's Segment Anything model](https://segment-anything.com/). +The visualization provided in this example demonstrates the precise and accurate segmentation capabilities of the model, effectively distinguishing each object from the background and creating a transparent mask around them. +# Logging and visualizing with Rerun + +The visualizations in this example were created with the following Rerun code: + +## Timelines + +All data logged using Rerun in the following sections is connected to a specific frame. +Rerun assigns a frame to each piece of logged data, and these timestamps are associated with a [`timeline`](https://www.rerun.io/docs/concepts/timelines). + + ```python +for n, image_uri in enumerate(args.images): + rr.set_time_sequence("image", n) + image = load_image(image_uri) + run_segmentation(mask_generator, image) + ``` + +## Image +The input image is logged as [`Image`](https://www.rerun.io/docs/reference/types/archetypes/image) to the `image` entity. +```python +rr.log("image", rr.Image(image)) +``` +## Segmentation +All masks are stacked together and logged using the [`Tensor`](https://www.rerun.io/docs/reference/types/archetypes/tensor) archetype. +```python +rr.log("mask_tensor", rr.Tensor(mask_tensor)) +``` +Then, all the masks are layered together and the result is logged as a [`SegmentationImage`](https://www.rerun.io/docs/reference/types/archetypes/segmentation_image) to the `image/masks` entity. +```python +rr.log("image/masks", rr.SegmentationImage(segmentation_img.astype(np.uint8))) +``` +For object localization, bounding boxes of segmentations are logged as [`Boxes2D`](https://www.rerun.io/docs/reference/types/archetypes/boxes2d). +```python +rr.log( + "image/boxes", + rr.Boxes2D(array=mask_bbox, array_format=rr.Box2DFormat.XYWH, class_ids=[id for id, _ in masks_with_ids]), +) +``` + +# Run the code +To run this example, make sure you have the Rerun repository checked out and the latest SDK installed: +```bash +# Setup +pip install --upgrade rerun-sdk # install the latest Rerun SDK +git clone git@github.com:rerun-io/rerun.git # Clone the repository +cd rerun +git checkout latest # Check out the commit matching the latest SDK release +``` +Install the necessary libraries specified in the requirements file: ```bash pip install -r examples/python/segment_anything_model/requirements.txt -python examples/python/segment_anything_model/main.py +``` +To experiment with the provided example, simply execute the main Python script: +```bash +python examples/python/segment_anything_model/main.py # run the example +``` +If you wish to customize it or explore additional features, use the CLI with the `--help` option for guidance: +```bash +python examples/python/segment_anything_model/main.py --help ``` diff --git a/examples/python/segment_anything_model/main.py b/examples/python/segment_anything_model/main.py index 5124182d4658..cbd8f7597543 100755 --- a/examples/python/segment_anything_model/main.py +++ b/examples/python/segment_anything_model/main.py @@ -1,8 +1,6 @@ #!/usr/bin/env python3 """ -Example of using Rerun to log and visualize the output of segment-anything. - -See: [segment_anything](https://segment-anything.com/). +Example of using Rerun to log and visualize the output of [Segment Anything](https://segment-anything.com/). Can be used to test mask-generation on one or more images. Images can be local file-paths or remote urls. @@ -29,6 +27,7 @@ import numpy as np import requests import rerun as rr # pip install rerun-sdk +import rerun.blueprint as rrb import torch import torchvision from cv2 import Mat @@ -36,6 +35,12 @@ from segment_anything.modeling import Sam from tqdm import tqdm +DESCRIPTION = """ +Example of using Rerun to log and visualize the output of [Segment Anything](https://segment-anything.com/). + +The full source code for this example is available [on GitHub](https://github.com/rerun-io/rerun/blob/latest/examples/python/segment_anything_model). +""".strip() + MODEL_DIR: Final = Path(os.path.dirname(__file__)) / "model" MODEL_URLS: Final = { "vit_h": "https://dl.fbaipublicfiles.com/segment_anything/sam_vit_h_4b8939.pth", @@ -177,10 +182,22 @@ def main() -> None: rr.script_add_args(parser) args = parser.parse_args() - rr.script_setup(args, "rerun_example_segment_anything_model") + blueprint = rrb.Vertical( + rrb.Spatial2DView(name="Image and segmentation mask", origin="/image"), + rrb.Horizontal( + rrb.TextLogView(name="Log", origin="/logs"), + rrb.TextDocumentView(name="Description", origin="/description"), + column_shares=[2, 1], + ), + row_shares=[3, 1], + ) + + rr.script_setup(args, "rerun_example_segment_anything_model", default_blueprint=blueprint) logging.getLogger().addHandler(rr.LoggingHandler("logs")) logging.getLogger().setLevel(logging.INFO) + rr.log("description", rr.TextDocument(DESCRIPTION, media_type=rr.MediaType.MARKDOWN), timeless=True) + sam = create_sam(args.model, args.device) mask_config = {"points_per_batch": args.points_per_batch} diff --git a/examples/python/shape_pointe/README.md b/examples/python/shape_pointe/README.md index dedf923e73bd..d4df5865718c 100644 --- a/examples/python/shape_pointe/README.md +++ b/examples/python/shape_pointe/README.md @@ -6,6 +6,9 @@ thumbnail = "https://static.rerun.io/point-e/5b5beb36dce77d2dac7123b197b825421af thumbnail_dimensions = [480, 480] --> +This example is a visual comparison of two popular text-to-3D methods that uses Rerun to compare the generation process and results. + +## Visual paper comparison OpenAI has released two models for text-to-3D generation: Point-E and Shape-E. Both of these methods are fast and interesting but still low fidelity for now. diff --git a/examples/python/signed_distance_fields/README.md b/examples/python/signed_distance_fields/README.md index b58e942f7529..6100fa9fadbe 100644 --- a/examples/python/signed_distance_fields/README.md +++ b/examples/python/signed_distance_fields/README.md @@ -1,23 +1,96 @@ +Visualize the results of the Generate Signed Distance Fields for arbitrary meshes using both traditional methods and the one described in the [DeepSDF paper](https://arxiv.org/abs/1901.05103) + - Signed Distance Fields example screenshot - - - - + + + + + Signed Distance Fields example screenshot -Generate Signed Distance Fields for arbitrary meshes using both traditional methods and the one described in the [DeepSDF paper](https://arxiv.org/abs/1901.05103), and visualize the results using the Rerun SDK. +# Used Rerun types +[`Tensor`](https://www.rerun.io/docs/reference/types/archetypes/tensor), [`Asset3D`](https://www.rerun.io/docs/reference/types/archetypes/asset3d), [`Points3D`](https://www.rerun.io/docs/reference/types/archetypes/points3d), [`AnnotationContext`](https://www.rerun.io/docs/reference/types/archetypes/annotation_context), [`TextLog`](https://www.rerun.io/docs/reference/types/archetypes/text_log) + +# Background + +This example illustrates the visualization of the results obtained from generating Signed Distance Fields (SDFs) for arbitrary meshes using both traditional methods and the approach described in the [DeepSDF paper](https://arxiv.org/abs/1901.05103). +DeepSDF introduces a learned continuous representation of shapes using SDFs, enabling high-quality shape representation, interpolation, and completion from partial and noisy 3D input data. +This novel approach offers improved performance and reduced model size compared to previous methods. +The generated SDFs help with accurate 3D reconstruction and visualization. + +# Logging and visualizing with Rerun + +The visualizations in this example were created with the following Rerun code: + +## 3D asset + +```python +# Internally, `mesh_to_sdf` will normalize everything to a unit sphere centered around the center of mass. +bs1 = mesh.bounding_sphere +bs2 = mesh_to_sdf.scale_to_unit_sphere(mesh).bounding_sphere +scale = bs2.scale / bs1.scale +center = bs2.center - bs1.center * scale +``` + +```python +# Logging the 3D asset with the unit sphere +mesh3d = rr.Asset3D(path=path) +mesh3d.transform = rr.OutOfTreeTransform3DBatch(rr.TranslationRotationScale3D(translation=center, scale=scale)) +rr.log("world/mesh", mesh3d) +``` + +## Sample SDF + +The sampled points and their corresponding signed distances are visualized using the [`Points3D`](https://www.rerun.io/docs/reference/types/archetypes/points3d) archetype within the `world/sdf/points` entity. + +```python +# Points inside the object are highlighted in red, while those outside are marked in green. +rr.log("world/sdf", rr.AnnotationContext([(0, "inside", (255, 0, 0)), (1, "outside", (0, 255, 0))]), timeless=False) +``` + +```python +rr.log("world/sdf/points", rr.Points3D(points, class_ids=np.array(sdf > 0, dtype=np.uint8))) # Visualizing Sample SDF +``` + +## Volumetric SDF + +The computed distances for each voxel are visualized using the [`Tensor`](https://www.rerun.io/docs/reference/types/archetypes/tensor) archetype to the `tensor` entity, which represents a 3D grid with dimensions for width, height, and depth. + +```python +rr.log("tensor", rr.Tensor(voxvol, dim_names=["width", "height", "depth"])) # Visualizing Volumetric SDF +``` + +# Run the code +> _Known issue_: On macOS, this example may present artefacts in the SDF and/or fail. +To run this example, make sure you have the Rerun repository checked out and the latest SDK installed: +```bash +# Setup +pip install --upgrade rerun-sdk # install the latest Rerun SDK +git clone git@github.com:rerun-io/rerun.git # Clone the repository +cd rerun +git checkout latest # Check out the commit matching the latest SDK release +``` +Install the necessary libraries specified in the requirements file: ```bash pip install -r examples/python/signed_distance_fields/requirements.txt -python examples/python/signed_distance_fields/main.py ``` - -_Known issue_: On macOS, this example may present artefacts in the SDF and/or fail. +To experiment with the provided example, simply execute the main Python script: +```bash +python examples/python/signed_distance_fields/main.py # run the example +``` +You can specify the mesh: +```bash +python examples/python/signed_distance_fields/main.py --mesh {lantern,avocado,buggy,brain_stem} +``` +If you wish to customize it, explore additional features, or save it use the CLI with the `--help` option for guidance: +```bash +python examples/python/signed_distance_fields/main.py --help +``` diff --git a/examples/python/simplerecon/README.md b/examples/python/simplerecon/README.md index 599cb0f15cf3..18fd078584db 100644 --- a/examples/python/simplerecon/README.md +++ b/examples/python/simplerecon/README.md @@ -6,6 +6,11 @@ thumbnail = "https://static.rerun.io/simplecon/e0f234159cc0f934e6d4a26886b751579 thumbnail_dimensions = [480, 480] --> +This example is a visual walkthrough of the paper "SimpleRecon: 3D reconstruction without 3D convolutions". +All the visualizations were created by editing the original source code to log data with the Rerun SDK. + +## Visual paper walkthrough + SimpleRecon is a back-to-basics approach for 3D scene reconstruction from posed monocular images by Niantic Labs. It offers state-of-the-art depth accuracy and competitive 3D scene reconstruction which makes it perfect for resource-constrained environments. diff --git a/examples/python/slahmr/README.md b/examples/python/slahmr/README.md index d566f2f311b2..a0ed3aced25f 100644 --- a/examples/python/slahmr/README.md +++ b/examples/python/slahmr/README.md @@ -7,6 +7,11 @@ thumbnail_dimensions = [480, 480] --> +This example is a visual walkthrough of the paper β€œDecoupling Human and Camera Motion from Videos in the Wild”. +All the visualizations were created by editing the original source code to log data with the Rerun SDK. + +## Visual paper walkthrough + SLAHMR robustly tracks the motion of multiple moving people filmed with a moving camera and works well on β€œin-the-wild” videos. It’s a great showcase of how to build working computer vision systems by intelligently combining several single purpose models. https://vimeo.com/865974657?autoplay=1&loop=1&autopause=0&background=1&muted=1&ratio=10000:6835 diff --git a/examples/python/structure_from_motion/README.md b/examples/python/structure_from_motion/README.md index 09c3ca223b70..7c3ce6a61263 100644 --- a/examples/python/structure_from_motion/README.md +++ b/examples/python/structure_from_motion/README.md @@ -1,13 +1,13 @@ +Visualize a sparse reconstruction by [COLMAP](https://colmap.github.io/index.html), a general-purpose Structure-from-Motion (SfM) and Multi-View Stereo (MVS) pipeline with a graphical and command-line interface @@ -17,14 +17,102 @@ build_args = ["--dataset=colmap_fiat", "--resize=800x600"] Structure From Motion example screenshot -An example using Rerun to log and visualize the output of COLMAP's sparse reconstruction. +# Background -[COLMAP](https://colmap.github.io/index.html) is a general-purpose Structure-from-Motion (SfM) and Multi-View Stereo (MVS) pipeline with a graphical and command-line interface. +COLMAP is a general-purpose Structure-from-Motion (SfM) and Multi-View Stereo (MVS) pipeline. +In this example, a short video clip has been processed offline using the COLMAP pipeline. +The processed data was then visualized using Rerun, which allowed for the visualization of individual camera frames, estimation of camera poses, and creation of point clouds over time. +By using COLMAP in combination with Rerun, a highly-detailed reconstruction of the scene depicted in the video was generated. -In this example a short video clip has been processed offline by the COLMAP pipeline, and we use Rerun to visualize the individual camera frames, estimated camera poses, and resulting point clouds over time. +# Used Rerun types +[`Points2D`](https://www.rerun.io/docs/reference/types/archetypes/points2d), [`Points3D`](https://www.rerun.io/docs/reference/types/archetypes/points3d), [`Transform3D`](https://www.rerun.io/docs/reference/types/archetypes/transform3d), [`SeriesLine`](https://www.rerun.io/docs/reference/types/archetypes/series_line), [`Scalar`](https://www.rerun.io/docs/reference/types/archetypes/scalar), [`Pinhole`](https://www.rerun.io/docs/reference/types/archetypes/pinhole), [`Image`](https://www.rerun.io/docs/reference/types/archetypes/image), [`TextDocument`](https://www.rerun.io/docs/reference/types/archetypes/text_document) +# Logging and visualizing with Rerun + +The visualizations in this example were created with the following Rerun code: + +## Timelines + +All data logged using Rerun in the following sections is connected to a specific frame. +Rerun assigns a frame id to each piece of logged data, and these frame ids are associated with a [`timeline`](https://www.rerun.io/docs/concepts/timelines). + + ```python +rr.set_time_sequence("frame", frame_idx) + ``` + +## Images +The images are logged through the [`Image`](https://www.rerun.io/docs/reference/types/archetypes/image) to the `camera/image` entity. + +```python +rr.log("camera/image", rr.Image(rgb).compress(jpeg_quality=75)) +``` + +## Cameras +The images stem from pinhole cameras located in the 3D world. To visualize the images in 3D, the pinhole projection has +to be logged and the camera pose (this is often referred to as the intrinsics and extrinsics of the camera, +respectively). + +The [`Pinhole`](https://www.rerun.io/docs/reference/types/archetypes/pinhole) is logged to the `camera/image` entity and defines the intrinsics of the camera. +This defines how to go from the 3D camera frame to the 2D image plane. The extrinsics are logged as an +[`Transform3D`](https://www.rerun.io/docs/reference/types/archetypes/transform3d) to the `camera` entity. + +```python +rr.log("camera", rr.Transform3D(translation=image.tvec, rotation=rr.Quaternion(xyzw=quat_xyzw), from_parent=True)) +``` + +```python +rr.log( + "camera/image", + rr.Pinhole( + resolution=[camera.width, camera.height], + focal_length=camera.params[:2], + principal_point=camera.params[2:], + ), +) +``` + +## Reprojection error +For each image a [`Scalar`](https://www.rerun.io/docs/reference/types/archetypes/scalar) archetype containing the average reprojection error of the keypoints is logged to the +`plot/avg_reproj_err` entity. + +```python +rr.log("plot/avg_reproj_err", rr.Scalar(np.mean(point_errors))) +``` + +## 2D points +The 2D image points that are used to triangulate the 3D points are visualized by logging as [`Points2D`](https://www.rerun.io/docs/reference/types/archetypes/points2d) +to the `camera/image/keypoints` entity. Note that these keypoints are a child of the +`camera/image` entity, since the points should show in the image plane. + +```python +rr.log("camera/image/keypoints", rr.Points2D(visible_xys, colors=[34, 138, 167])) +``` + +## 3D points +The colored 3D points were added to the visualization by logging the [`Points3D`](https://www.rerun.io/docs/reference/types/archetypes/points3d) archetype to the `points` entity. +```python +rr.log("points", rr.Points3D(points, colors=point_colors), rr.AnyValues(error=point_errors)) +``` + +# Run the code +To run this example, make sure you have the Rerun repository checked out and the latest SDK installed: +```bash +# Setup +pip install --upgrade rerun-sdk # install the latest Rerun SDK +git clone git@github.com:rerun-io/rerun.git # Clone the repository +cd rerun +git checkout latest # Check out the commit matching the latest SDK release +``` +Install the necessary libraries specified in the requirements file: ```bash pip install -r examples/python/structure_from_motion/requirements.txt -python examples/python/structure_from_motion/main.py +``` +To experiment with the provided example, simply execute the main Python script: +```bash +python examples/python/structure_from_motion/main.py # run the example +``` +If you wish to customize it, explore additional features, or save it use the CLI with the `--help` option for guidance: +```bash +python examples/python/structure_from_motion/main.py --help ``` diff --git a/examples/python/structure_from_motion/main.py b/examples/python/structure_from_motion/main.py index a8f01bbd0ab7..4861a3998157 100755 --- a/examples/python/structure_from_motion/main.py +++ b/examples/python/structure_from_motion/main.py @@ -19,13 +19,8 @@ from read_write_model import Camera, read_model from tqdm import tqdm -DATASET_DIR: Final = Path(os.path.dirname(__file__)) / "dataset" -DATASET_URL_BASE: Final = "https://storage.googleapis.com/rerun-example-datasets/colmap" -# When dataset filtering is turned on, drop views with less than this many valid points. -FILTER_MIN_VISIBLE: Final = 500 - DESCRIPTION = """ -# Sparse Reconstruction by COLMAP +# Sparse reconstruction by COLMAP This example was generated from the output of a sparse reconstruction done with COLMAP. [COLMAP](https://colmap.github.io/index.html) is a general-purpose Structure-from-Motion (SfM) and Multi-View Stereo @@ -34,47 +29,15 @@ In this example a short video clip has been processed offline by the COLMAP pipeline, and we use Rerun to visualize the individual camera frames, estimated camera poses, and resulting point clouds over time. -## How it was made The full source code for this example is available -[on GitHub](https://github.com/rerun-io/rerun/blob/latest/examples/python/structure_from_motion/main.py). - -### Images -The images are logged through the [rr.Image archetype](https://www.rerun.io/docs/reference/types/archetypes/image) -to the [camera/image entity](recording://camera/image). - -### Cameras -The images stem from pinhole cameras located in the 3D world. To visualize the images in 3D, the pinhole projection has -to be logged and the camera pose (this is often referred to as the intrinsics and extrinsics of the camera, -respectively). - -The [rr.Pinhole archetype](https://www.rerun.io/docs/reference/types/archetypes/pinhole) is logged to -the [camera/image entity](recording://camera/image) and defines the intrinsics of the camera. This defines how to go -from the 3D camera frame to the 2D image plane. The extrinsics are logged as an -[rr.Transform3D archetype](https://www.rerun.io/docs/reference/types/archetypes/transform3d) to the -[camera entity](recording://camera). - -### Reprojection error -For each image a [rr.Scalar archetype](https://www.rerun.io/docs/reference/types/archetypes/scalar) -containing the average reprojection error of the keypoints is logged to the -[plot/avg_reproj_err entity](recording://plot/avg_reproj_err). - -### 2D points -The 2D image points that are used to triangulate the 3D points are visualized by logging -[rr.Points3D archetype](https://www.rerun.io/docs/reference/types/archetypes/points2d) -to the [camera/image/keypoints entity](recording://camera/image/keypoints). Note that these keypoints are a child of the -[camera/image entity](recording://camera/image), since the points should show in the image plane. - -### Colored 3D points -The colored 3D points were added to the scene by logging the -[rr.Points3D archetype](https://www.rerun.io/docs/reference/types/archetypes/points3d) -to the [points entity](recording://points): -```python -rr.log("points", rr.Points3D(points, colors=point_colors), rr.AnyValues(error=point_errors)) -``` -**Note:** we added some [custom per-point errors](recording://points) that you can see when you -hover over the points in the 3D view. +[on GitHub](https://github.com/rerun-io/rerun/blob/latest/examples/python/structure_from_motion). """.strip() +DATASET_DIR: Final = Path(os.path.dirname(__file__)) / "dataset" +DATASET_URL_BASE: Final = "https://storage.googleapis.com/rerun-example-datasets/colmap" +# When dataset filtering is turned on, drop views with less than this many valid points. +FILTER_MIN_VISIBLE: Final = 500 + def scale_camera(camera: Camera, resize: tuple[int, int]) -> tuple[Camera, npt.NDArray[np.float_]]: """Scale the camera intrinsics to match the resized image.""" diff --git a/examples/python/template/README.md b/examples/python/template/README.md index 391f11a6e133..ad0a63f0978e 100644 --- a/examples/python/template/README.md +++ b/examples/python/template/README.md @@ -1,16 +1,27 @@ +This is an example template. It is not a real example. You can duplicate the directory and use it as a starting point for writing a real example. +Put a short description at the top, like this. -This is an example template. It is not a real example. You can duplicate the directory and use it as a starting point for writing a real example. +## Used Rerun types +[`Image`](https://www.rerun.io/docs/reference/types/archetypes/image), … + +## Optional details here +… + +## Run the code ```bash pip install -r examples/python/template/requirements.txt diff --git a/examples/python/tfrecord_loader/README.md b/examples/python/tfrecord_loader/README.md index 169c781afd25..06782e3f9f79 100644 --- a/examples/python/tfrecord_loader/README.md +++ b/examples/python/tfrecord_loader/README.md @@ -1,5 +1,5 @@ +This example is a visual walkthrough of the paper "Learning to render novel views from wide-baseline stereo pairs". +All the visualizations were created by editing the original source code to log data with the Rerun SDK. + +## Visual paper walkthrough Novel view synthesis has made remarkable progress in recent years, but most methods require per-scene optimization on many images. In their [CVPR 2023 paper](https://openaccess.thecvf.com/content/CVPR2023/html/Du_Learning_To_Render_Novel_Views_From_Wide-Baseline_Stereo_Pairs_CVPR_2023_paper.html) Yilun Du et al. propose a method that works with just 2 views. I created a visual walkthrough of the work using the Rerun SDK. diff --git a/examples/rust/clock/Cargo.toml b/examples/rust/clock/Cargo.toml index d8d010536180..7c6272ad1888 100644 --- a/examples/rust/clock/Cargo.toml +++ b/examples/rust/clock/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "clock" -version = "0.15.0-alpha.5" +version = "0.15.0-rc.3" edition = "2021" rust-version = "1.74" license = "MIT OR Apache-2.0" diff --git a/examples/rust/custom_data_loader/Cargo.toml b/examples/rust/custom_data_loader/Cargo.toml index ac5f22c5234d..dbfe1fa9071d 100644 --- a/examples/rust/custom_data_loader/Cargo.toml +++ b/examples/rust/custom_data_loader/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "custom_data_loader" -version = "0.15.0-alpha.5" +version = "0.15.0-rc.3" edition = "2021" rust-version = "1.74" license = "MIT OR Apache-2.0" diff --git a/examples/rust/custom_space_view/Cargo.toml b/examples/rust/custom_space_view/Cargo.toml index acdca2beaa81..09b4cd75f06e 100644 --- a/examples/rust/custom_space_view/Cargo.toml +++ b/examples/rust/custom_space_view/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "custom_space_view" -version = "0.15.0-alpha.5" +version = "0.15.0-rc.3" edition = "2021" rust-version = "1.74" license = "MIT OR Apache-2.0" diff --git a/examples/rust/custom_store_subscriber/Cargo.toml b/examples/rust/custom_store_subscriber/Cargo.toml index 541745d2e86f..17c540f3406e 100644 --- a/examples/rust/custom_store_subscriber/Cargo.toml +++ b/examples/rust/custom_store_subscriber/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "custom_store_subscriber" -version = "0.15.0-alpha.5" +version = "0.15.0-rc.3" edition = "2021" rust-version = "1.74" license = "MIT OR Apache-2.0" diff --git a/examples/rust/dna/Cargo.toml b/examples/rust/dna/Cargo.toml index 66f92f5b5a2e..31b674131ca1 100644 --- a/examples/rust/dna/Cargo.toml +++ b/examples/rust/dna/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "dna" -version = "0.15.0-alpha.5" +version = "0.15.0-rc.3" edition = "2021" rust-version = "1.74" license = "MIT OR Apache-2.0" diff --git a/examples/rust/dna/README.md b/examples/rust/dna/README.md index 3d25f72f56a0..3b74b8c97db8 100644 --- a/examples/rust/dna/README.md +++ b/examples/rust/dna/README.md @@ -1,12 +1,12 @@ +Simple example of logging point and line primitives to draw a 3D helix. @@ -16,8 +16,6 @@ channel = "main" -Simple example of logging point and line primitives to draw a 3D helix. - ```bash cargo run --release ``` diff --git a/examples/rust/extend_viewer_ui/Cargo.toml b/examples/rust/extend_viewer_ui/Cargo.toml index e8b37eae7596..f600ec6e9e91 100644 --- a/examples/rust/extend_viewer_ui/Cargo.toml +++ b/examples/rust/extend_viewer_ui/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "extend_viewer_ui" -version = "0.15.0-alpha.5" +version = "0.15.0-rc.3" edition = "2021" rust-version = "1.74" license = "MIT OR Apache-2.0" diff --git a/examples/rust/external_data_loader/Cargo.toml b/examples/rust/external_data_loader/Cargo.toml index 7ee3d4e7565a..fab05e0197c2 100644 --- a/examples/rust/external_data_loader/Cargo.toml +++ b/examples/rust/external_data_loader/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "rerun-loader-rust-file" -version = "0.15.0-alpha.5" +version = "0.15.0-rc.3" edition = "2021" rust-version = "1.74" license = "MIT OR Apache-2.0" diff --git a/examples/rust/incremental_logging/Cargo.toml b/examples/rust/incremental_logging/Cargo.toml index 3dea27f2945e..22c0ab1860c9 100644 --- a/examples/rust/incremental_logging/Cargo.toml +++ b/examples/rust/incremental_logging/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "incremental_logging" -version = "0.15.0-alpha.5" +version = "0.15.0-rc.3" edition = "2021" rust-version = "1.74" license = "MIT OR Apache-2.0" diff --git a/examples/rust/incremental_logging/README.md b/examples/rust/incremental_logging/README.md index 143ab0b9d1f7..aed1e97ee619 100644 --- a/examples/rust/incremental_logging/README.md +++ b/examples/rust/incremental_logging/README.md @@ -1,11 +1,11 @@ +Showcases how to incrementally log data belonging to the same archetype, and re-use some or all of it across frames. @@ -15,8 +15,6 @@ thumbnail_dimensions = [480, 301] -Showcases how to incrementally log data belonging to the same archetype, and re-use some or all of it across frames. - ```bash cargo run --release diff --git a/examples/rust/log_file/Cargo.toml b/examples/rust/log_file/Cargo.toml index 0b81f4632c78..1a9af3d6912c 100644 --- a/examples/rust/log_file/Cargo.toml +++ b/examples/rust/log_file/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "log_file" -version = "0.15.0-alpha.5" +version = "0.15.0-rc.3" edition = "2021" rust-version = "1.74" license = "MIT OR Apache-2.0" diff --git a/examples/rust/minimal/Cargo.toml b/examples/rust/minimal/Cargo.toml index 08ccde4af01a..11e0781f5d71 100644 --- a/examples/rust/minimal/Cargo.toml +++ b/examples/rust/minimal/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "minimal" -version = "0.15.0-alpha.5" +version = "0.15.0-rc.3" edition = "2021" rust-version = "1.74" license = "MIT OR Apache-2.0" diff --git a/examples/rust/minimal_options/Cargo.toml b/examples/rust/minimal_options/Cargo.toml index f91c036bb215..f4723c68952e 100644 --- a/examples/rust/minimal_options/Cargo.toml +++ b/examples/rust/minimal_options/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "minimal_options" -version = "0.15.0-alpha.5" +version = "0.15.0-rc.3" edition = "2021" rust-version = "1.74" license = "MIT OR Apache-2.0" diff --git a/examples/rust/minimal_serve/Cargo.toml b/examples/rust/minimal_serve/Cargo.toml index 7f1b6d795c99..7b21c2b8f0bf 100644 --- a/examples/rust/minimal_serve/Cargo.toml +++ b/examples/rust/minimal_serve/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "minimal_serve" -version = "0.15.0-alpha.5" +version = "0.15.0-rc.3" edition = "2021" rust-version = "1.74" license = "MIT OR Apache-2.0" diff --git a/examples/rust/objectron/Cargo.toml b/examples/rust/objectron/Cargo.toml index 331b7d51f225..31716acf5f93 100644 --- a/examples/rust/objectron/Cargo.toml +++ b/examples/rust/objectron/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "objectron" -version = "0.15.0-alpha.5" +version = "0.15.0-rc.3" edition = "2021" rust-version = "1.74" license = "MIT OR Apache-2.0" diff --git a/examples/rust/objectron/README.md b/examples/rust/objectron/README.md index ee7bd1b31f66..bcb8b327a422 100644 --- a/examples/rust/objectron/README.md +++ b/examples/rust/objectron/README.md @@ -1,12 +1,13 @@ +Example of using the Rerun SDK to log the [Objectron](https://github.com/google-research-datasets/Objectron) dataset. + @@ -15,8 +16,6 @@ build_args = ["--frames=100"] Objectron example screenshot -Example of using the Rerun SDK to log the [Objectron](https://github.com/google-research-datasets/Objectron) dataset. - > The Objectron dataset is a collection of short, object-centric video clips, which are accompanied by AR session metadata that includes camera poses, sparse point-clouds and characterization of the planar surfaces in the surrounding environment. ```bash diff --git a/examples/rust/raw_mesh/Cargo.toml b/examples/rust/raw_mesh/Cargo.toml index c9be9caa5ddb..dedb994a0a00 100644 --- a/examples/rust/raw_mesh/Cargo.toml +++ b/examples/rust/raw_mesh/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "raw_mesh" -version = "0.15.0-alpha.5" +version = "0.15.0-rc.3" edition = "2021" rust-version = "1.74" license = "MIT OR Apache-2.0" diff --git a/examples/rust/raw_mesh/README.md b/examples/rust/raw_mesh/README.md index b59fbdbba380..8c7463598b74 100644 --- a/examples/rust/raw_mesh/README.md +++ b/examples/rust/raw_mesh/README.md @@ -1,10 +1,11 @@ +This example demonstrates how to use the Rerun SDK to log raw 3D meshes (so-called "triangle soups") and their transform hierarchy. Simple material properties are supported. + @@ -13,8 +14,6 @@ thumbnail_dimensions = [480, 480] -This example demonstrates how to use the Rerun SDK to log raw 3D meshes (so-called "triangle soups") and their transform hierarchy. Simple material properties are supported. - ```bash cargo run --release ``` diff --git a/examples/rust/shared_recording/Cargo.toml b/examples/rust/shared_recording/Cargo.toml index 1110239a5e0a..fde3595a2119 100644 --- a/examples/rust/shared_recording/Cargo.toml +++ b/examples/rust/shared_recording/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "shared_recording" -version = "0.15.0-alpha.5" +version = "0.15.0-rc.3" edition = "2021" rust-version = "1.74" license = "MIT OR Apache-2.0" diff --git a/examples/rust/spawn_viewer/Cargo.toml b/examples/rust/spawn_viewer/Cargo.toml index 140a8c77f043..ab710623c7d9 100644 --- a/examples/rust/spawn_viewer/Cargo.toml +++ b/examples/rust/spawn_viewer/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "spawn_viewer" -version = "0.15.0-alpha.5" +version = "0.15.0-rc.3" edition = "2021" rust-version = "1.74" license = "MIT OR Apache-2.0" diff --git a/examples/rust/stdio/Cargo.toml b/examples/rust/stdio/Cargo.toml index 5379a7585c7c..fd00690aedfe 100644 --- a/examples/rust/stdio/Cargo.toml +++ b/examples/rust/stdio/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "stdio" -version = "0.15.0-alpha.5" +version = "0.15.0-rc.3" edition = "2021" rust-version = "1.74" license = "MIT OR Apache-2.0" diff --git a/examples/rust/template/Cargo.toml b/examples/rust/template/Cargo.toml index a83e38fbca9a..a9b56bfa651b 100644 --- a/examples/rust/template/Cargo.toml +++ b/examples/rust/template/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "template" -version = "0.15.0-alpha.5" +version = "0.15.0-rc.3" edition = "2021" rust-version = "1.74" license = "MIT OR Apache-2.0" diff --git a/examples/rust/template/README.md b/examples/rust/template/README.md index 790ee89e7342..76f87bda681b 100644 --- a/examples/rust/template/README.md +++ b/examples/rust/template/README.md @@ -1,15 +1,27 @@ +This is an example template. It is not a real example. You can duplicate the directory and use it as a starting point for writing a real example. +Put a short description at the top, like this. -This is an example template. It is not a real example. You can duplicate the directory and use it as a starting point for writing a real example. +## Used Rerun types +[`Image`](https://www.rerun.io/docs/reference/types/archetypes/image), … + +## More details here +… + +## Run the code ```bash cargo run --release -p template diff --git a/rerun_cpp/src/rerun/c/rerun.h b/rerun_cpp/src/rerun/c/rerun.h index 6d8235c31fa4..72b811207d27 100644 --- a/rerun_cpp/src/rerun/c/rerun.h +++ b/rerun_cpp/src/rerun/c/rerun.h @@ -268,7 +268,7 @@ typedef struct rr_error { /// /// This should match the string returned by `rr_version_string`. /// If not, the SDK's binary and the C header are out of sync. -#define RERUN_SDK_HEADER_VERSION "0.15.0-alpha.5" +#define RERUN_SDK_HEADER_VERSION "0.15.0-rc.3" /// Returns a human-readable version string of the Rerun C SDK. /// diff --git a/rerun_cpp/src/rerun/components/instance_key.hpp b/rerun_cpp/src/rerun/components/instance_key.hpp index c3d92a4bcf82..c7593674b9c1 100644 --- a/rerun_cpp/src/rerun/components/instance_key.hpp +++ b/rerun_cpp/src/rerun/components/instance_key.hpp @@ -21,6 +21,13 @@ namespace arrow { namespace rerun::components { /// **Component**: A unique numeric identifier for each individual instance within a batch. + /// + /// Instance keys are automatically assigned by the `rerun` library and should not be set manually. + /// + /// The instance key is just the index of the instance within the batch, + /// i.e. the first point in a point cloud has `InstanceKey = 0`, the second `InstanceKey = 1`, and so on. + /// + /// We plan to remove the `InstanceKey` component in the near future. struct InstanceKey { uint64_t value; diff --git a/rerun_js/web-viewer-react/README.md b/rerun_js/web-viewer-react/README.md index 48269d84a909..eb06521f33de 100644 --- a/rerun_js/web-viewer-react/README.md +++ b/rerun_js/web-viewer-react/README.md @@ -35,7 +35,7 @@ export default function App() { ``` The `rrd` in the snippet above should be a URL pointing to either: -- A hosted `.rrd` file, such as +- A hosted `.rrd` file, such as - A WebSocket connection to the SDK opened via the [`serve`](https://www.rerun.io/docs/reference/sdk-operating-modes#serve) API If `rrd` is not set, the viewer will display the same welcome screen as . diff --git a/rerun_js/web-viewer-react/package.json b/rerun_js/web-viewer-react/package.json index 06ce069a1df8..b890f53a3416 100644 --- a/rerun_js/web-viewer-react/package.json +++ b/rerun_js/web-viewer-react/package.json @@ -1,6 +1,6 @@ { "name": "@rerun-io/web-viewer-react", - "version": "0.15.0-alpha.5", + "version": "0.15.0-rc.3", "description": "Embed the Rerun web viewer in your React app", "licenses": [ { @@ -39,7 +39,7 @@ "tsconfig.json" ], "dependencies": { - "@rerun-io/web-viewer": "0.15.0-alpha.5", + "@rerun-io/web-viewer": "0.15.0-rc.3", "@types/react": "^18.2.33", "react": "^18.2.0" }, diff --git a/rerun_js/web-viewer/README.md b/rerun_js/web-viewer/README.md index 65d058fa1091..d1a742984212 100644 --- a/rerun_js/web-viewer/README.md +++ b/rerun_js/web-viewer/README.md @@ -41,7 +41,7 @@ viewer.stop(); ``` The `rrd` in the snippet above should be a URL pointing to either: -- A hosted `.rrd` file, such as +- A hosted `.rrd` file, such as - A WebSocket connection to the SDK opened via the [`serve`](https://www.rerun.io/docs/reference/sdk-operating-modes#serve) API If `rrd` is not set, the viewer will display the same welcome screen as . diff --git a/rerun_js/web-viewer/package.json b/rerun_js/web-viewer/package.json index c3841b2223c2..be73a2d76397 100644 --- a/rerun_js/web-viewer/package.json +++ b/rerun_js/web-viewer/package.json @@ -1,6 +1,6 @@ { "name": "@rerun-io/web-viewer", - "version": "0.15.0-alpha.5", + "version": "0.15.0-rc.3", "description": "Embed the Rerun web viewer in your app", "licenses": [ { diff --git a/rerun_py/rerun_sdk/rerun/_spawn.py b/rerun_py/rerun_sdk/rerun/_spawn.py new file mode 100644 index 000000000000..1d3b9b2979d0 --- /dev/null +++ b/rerun_py/rerun_sdk/rerun/_spawn.py @@ -0,0 +1,93 @@ +from __future__ import annotations + +import socket + + +# TODO(#4019): application-level handshake +def _check_for_existing_viewer(port: int) -> bool: + try: + # Try opening a connection to the port to see if something is there + s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + s.settimeout(1) + s.connect(("127.0.0.1", port)) + return True + except Exception: + # If the connection times out or is refused, the port is not open + return False + finally: + # Always close the socket to release resources + s.close() + + +def _spawn_viewer( + *, + port: int = 9876, + memory_limit: str = "75%", +) -> None: + """ + Internal helper to spawn a Rerun Viewer, listening on the given port. + + Blocks until the viewer is ready to accept connections. + + Used by [rerun.spawn][] + + Parameters + ---------- + port: + The port to listen on. + memory_limit: + An upper limit on how much memory the Rerun Viewer should use. + When this limit is reached, Rerun will drop the oldest data. + Example: `16GB` or `50%` (of system total). + + """ + + import os + import subprocess + import sys + from time import sleep + + # Let the spawned rerun process know it's just an app + new_env = os.environ.copy() + # NOTE: If `_RERUN_TEST_FORCE_SAVE` is set, all recording streams will write to disk no matter + # what, thus spawning a viewer is pointless (and probably not intended). + if os.environ.get("_RERUN_TEST_FORCE_SAVE") is not None: + return + new_env["RERUN_APP_ONLY"] = "true" + + # sys.executable: the absolute path of the executable binary for the Python interpreter + python_executable = sys.executable + if python_executable is None: + python_executable = "python3" + + # TODO(jleibs): More options to opt out of this behavior. + if _check_for_existing_viewer(port): + # Using print here for now rather than `logging.info` because logging.info isn't + # visible by default. + # + # If we spawn a process it's going to send a bunch of stuff to stdout anyways. + print(f"Found existing process on port {port}. Trying to connect.") + else: + # start_new_session=True ensures the spawned process does NOT die when + # we hit ctrl-c in the terminal running the parent Python process. + subprocess.Popen( + [ + python_executable, + "-c", + "import rerun_bindings; rerun_bindings.main()", + f"--port={port}", + f"--memory-limit={memory_limit}", + "--expect-data-soon", + ], + env=new_env, + start_new_session=True, + ) + + # Give the newly spawned Rerun Viewer some time to bind. + # + # NOTE: The timeout only covers the TCP handshake: if no process is bound to that address + # at all, the connection will fail immediately, irrelevant of the timeout configuration. + # For that reason we use an extra loop. + for _ in range(0, 5): + _check_for_existing_viewer(port) + sleep(0.1) diff --git a/rerun_py/rerun_sdk/rerun/any_value.py b/rerun_py/rerun_sdk/rerun/any_value.py index 8bd445081dd9..75396f74eed7 100644 --- a/rerun_py/rerun_sdk/rerun/any_value.py +++ b/rerun_py/rerun_sdk/rerun/any_value.py @@ -102,7 +102,8 @@ class AnyValues(AsComponents): ```python rr.log( "any_values", rr.AnyValues( - foo=[1.2, 3.4, 5.6], bar="hello world", + confidence=[1.2, 3.4, 5.6], + description="Bla bla bla…", ), ) ``` diff --git a/rerun_py/rerun_sdk/rerun/blueprint/api.py b/rerun_py/rerun_sdk/rerun/blueprint/api.py index fe1c693a571d..7f79e5381cb2 100644 --- a/rerun_py/rerun_sdk/rerun/blueprint/api.py +++ b/rerun_py/rerun_sdk/rerun/blueprint/api.py @@ -5,6 +5,7 @@ import rerun_bindings as bindings +from .._spawn import _spawn_viewer from ..datatypes import EntityPathLike, Utf8ArrayLike, Utf8Like from ..memory import MemoryRecording from ..notebook import as_html @@ -456,6 +457,49 @@ def _repr_html_(self) -> Any: """IPython interface to conversion to html.""" return as_html(blueprint=self) + def connect( + self, + application_id: str, + *, + addr: str | None = None, + make_active: bool = True, + make_default: bool = True, + ) -> None: + """ + Connect to a remote Rerun Viewer on the given ip:port and send this blueprint. + + Parameters + ---------- + application_id: + The application ID to use for this blueprint. This must match the application ID used + when initiating rerun for any data logging you wish to associate with this blueprint. + addr: + The ip:port to connect to + make_active: + Immediately make this the active blueprint for the associated `app_id`. + Note that setting this to `false` does not mean the blueprint may not still end + up becoming active. In particular, if `make_default` is true and there is no other + currently active blueprint. + make_default: + Make this the default blueprint for the `app_id`. + The default blueprint will be used as the template when the user resets the + blueprint for the app. It will also become the active blueprint if no other + blueprint is currently active. + + """ + blueprint_stream = RecordingStream( + bindings.new_blueprint( + application_id=application_id, + make_default=False, + make_thread_default=False, + default_enabled=True, + ) + ) + blueprint_stream.set_time_sequence("blueprint", 0) # type: ignore[attr-defined] + self._log_to_stream(blueprint_stream) + + bindings.connect_blueprint(addr, make_active, make_default, blueprint_stream.to_native()) + def save(self, application_id: str, path: str | None = None) -> None: """ Save this blueprint to a file. Rerun recommends the `.rbl` suffix. @@ -473,7 +517,7 @@ def save(self, application_id: str, path: str | None = None) -> None: if path is None: path = f"{application_id}.rbl" - blueprint_file = RecordingStream( + blueprint_stream = RecordingStream( bindings.new_blueprint( application_id=application_id, make_default=False, @@ -481,10 +525,30 @@ def save(self, application_id: str, path: str | None = None) -> None: default_enabled=True, ) ) - blueprint_file.set_time_sequence("blueprint", 0) # type: ignore[attr-defined] - self._log_to_stream(blueprint_file) + blueprint_stream.set_time_sequence("blueprint", 0) # type: ignore[attr-defined] + self._log_to_stream(blueprint_stream) + + bindings.save_blueprint(path, blueprint_stream.to_native()) + + def spawn(self, application_id: str, port: int = 9876, memory_limit: str = "75%") -> None: + """ + Spawn a Rerun viewer with this blueprint. - bindings.save_blueprint(path, blueprint_file.to_native()) + Parameters + ---------- + application_id: + The application ID to use for this blueprint. This must match the application ID used + when initiating rerun for any data logging you wish to associate with this blueprint. + port: + The port to listen on. + memory_limit: + An upper limit on how much memory the Rerun Viewer should use. + When this limit is reached, Rerun will drop the oldest data. + Example: `16GB` or `50%` (of system total). + + """ + _spawn_viewer(port=port, memory_limit=memory_limit) + self.connect(application_id=application_id, addr=f"127.0.0.1:{port}") BlueprintLike = Union[Blueprint, SpaceView, Container] diff --git a/rerun_py/rerun_sdk/rerun/components/instance_key.py b/rerun_py/rerun_sdk/rerun/components/instance_key.py index 7fc6df0e43fc..edb361d45a58 100644 --- a/rerun_py/rerun_sdk/rerun/components/instance_key.py +++ b/rerun_py/rerun_sdk/rerun/components/instance_key.py @@ -20,7 +20,16 @@ @define(init=False) class InstanceKey(InstanceKeyExt): - """**Component**: A unique numeric identifier for each individual instance within a batch.""" + """ + **Component**: A unique numeric identifier for each individual instance within a batch. + + Instance keys are automatically assigned by the `rerun` library and should not be set manually. + + The instance key is just the index of the instance within the batch, + i.e. the first point in a point cloud has `InstanceKey = 0`, the second `InstanceKey = 1`, and so on. + + We plan to remove the `InstanceKey` component in the near future. + """ def __init__(self: Any, value: InstanceKeyLike): """Create a new instance of the InstanceKey component.""" diff --git a/rerun_py/rerun_sdk/rerun/sinks.py b/rerun_py/rerun_sdk/rerun/sinks.py index 55edd03f51e9..af227c1d8d99 100644 --- a/rerun_py/rerun_sdk/rerun/sinks.py +++ b/rerun_py/rerun_sdk/rerun/sinks.py @@ -2,13 +2,14 @@ import logging import pathlib -import socket import rerun_bindings as bindings # type: ignore[attr-defined] from rerun.blueprint.api import BlueprintLike, create_in_memory_blueprint from rerun.recording_stream import RecordingStream, get_application_id +from ._spawn import _spawn_viewer + # --- Sinks --- @@ -299,22 +300,6 @@ def send_blueprint( bindings.send_blueprint(blueprint_storage, make_active, make_default, recording=recording) -# TODO(#4019): application-level handshake -def _check_for_existing_viewer(port: int) -> bool: - try: - # Try opening a connection to the port to see if something is there - s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - s.settimeout(1) - s.connect(("127.0.0.1", port)) - return True - except Exception: - # If the connection times out or is refused, the port is not open - return False - finally: - # Always close the socket to release resources - s.close() - - def spawn( *, port: int = 9876, @@ -357,55 +342,7 @@ def spawn( logging.warning("Rerun is disabled - spawn() call ignored.") return - import os - import subprocess - import sys - from time import sleep - - # Let the spawned rerun process know it's just an app - new_env = os.environ.copy() - # NOTE: If `_RERUN_TEST_FORCE_SAVE` is set, all recording streams will write to disk no matter - # what, thus spawning a viewer is pointless (and probably not intended). - if os.environ.get("_RERUN_TEST_FORCE_SAVE") is not None: - return - new_env["RERUN_APP_ONLY"] = "true" - - # sys.executable: the absolute path of the executable binary for the Python interpreter - python_executable = sys.executable - if python_executable is None: - python_executable = "python3" - - # TODO(jleibs): More options to opt out of this behavior. - if _check_for_existing_viewer(port): - # Using print here for now rather than `logging.info` because logging.info isn't - # visible by default. - # - # If we spawn a process it's going to send a bunch of stuff to stdout anyways. - print(f"Found existing process on port {port}. Trying to connect.") - else: - # start_new_session=True ensures the spawned process does NOT die when - # we hit ctrl-c in the terminal running the parent Python process. - subprocess.Popen( - [ - python_executable, - "-c", - "import rerun_bindings; rerun_bindings.main()", - f"--port={port}", - f"--memory-limit={memory_limit}", - "--expect-data-soon", - ], - env=new_env, - start_new_session=True, - ) - - # Give the newly spawned Rerun Viewer some time to bind. - # - # NOTE: The timeout only covers the TCP handshake: if no process is bound to that address - # at all, the connection will fail immediately, irrelevant of the timeout configuration. - # For that reason we use an extra loop. - for _ in range(0, 5): - _check_for_existing_viewer(port) - sleep(0.1) + _spawn_viewer(port=port, memory_limit=memory_limit) if connect: _connect(f"127.0.0.1:{port}", recording=recording, default_blueprint=default_blueprint) diff --git a/rerun_py/src/python_bridge.rs b/rerun_py/src/python_bridge.rs index 66adc08165b7..c2ce5f8967a6 100644 --- a/rerun_py/src/python_bridge.rs +++ b/rerun_py/src/python_bridge.rs @@ -163,6 +163,7 @@ fn rerun_bindings(_py: Python<'_>, m: &PyModule) -> PyResult<()> { // sinks m.add_function(wrap_pyfunction!(is_enabled, m)?)?; m.add_function(wrap_pyfunction!(connect, m)?)?; + m.add_function(wrap_pyfunction!(connect_blueprint, m)?)?; m.add_function(wrap_pyfunction!(save, m)?)?; m.add_function(wrap_pyfunction!(save_blueprint, m)?)?; m.add_function(wrap_pyfunction!(stdout, m)?)?; @@ -588,6 +589,48 @@ fn connect( Ok(()) } +#[pyfunction] +#[pyo3(signature = (addr, make_active, make_default, blueprint_stream))] +/// Special binding for directly sending a blueprint stream to a connection. +fn connect_blueprint( + addr: Option, + make_active: bool, + make_default: bool, + blueprint_stream: &PyRecordingStream, + py: Python<'_>, +) -> PyResult<()> { + let addr = if let Some(addr) = addr { + addr.parse()? + } else { + rerun::default_server_addr() + }; + + if let Some(blueprint_id) = (*blueprint_stream).store_info().map(|info| info.store_id) { + // The call to save, needs to flush. + // Release the GIL in case any flushing behavior needs to cleanup a python object. + py.allow_threads(|| { + // Flush all the pending blueprint messages before we include the Ready message + blueprint_stream.flush_blocking(); + + let activation_cmd = BlueprintActivationCommand { + blueprint_id, + make_active, + make_default, + }; + + blueprint_stream.record_msg(activation_cmd.into()); + + blueprint_stream.connect_opts(addr, None); + flush_garbage_queue(); + }); + Ok(()) + } else { + Err(PyRuntimeError::new_err( + "Blueprint stream has no store info".to_owned(), + )) + } +} + #[pyfunction] #[pyo3(signature = (path, default_blueprint = None, recording = None))] fn save( @@ -633,14 +676,14 @@ fn save_blueprint( blueprint_stream: &PyRecordingStream, py: Python<'_>, ) -> PyResult<()> { - if let Some(recording_id) = (*blueprint_stream).store_info().map(|info| info.store_id) { + if let Some(blueprint_id) = (*blueprint_stream).store_info().map(|info| info.store_id) { // The call to save, needs to flush. // Release the GIL in case any flushing behavior needs to cleanup a python object. py.allow_threads(|| { // Flush all the pending blueprint messages before we include the Ready message blueprint_stream.flush_blocking(); - let activation_cmd = BlueprintActivationCommand::make_active(recording_id.clone()); + let activation_cmd = BlueprintActivationCommand::make_active(blueprint_id.clone()); blueprint_stream.record_msg(activation_cmd.into()); diff --git a/scripts/ci/update_pr_body.py b/scripts/ci/update_pr_body.py index 7dccdd88fbd9..397d571cbb2a 100755 --- a/scripts/ci/update_pr_body.py +++ b/scripts/ci/update_pr_body.py @@ -18,13 +18,6 @@ from jinja2 import DebugUndefined, select_autoescape from jinja2.sandbox import SandboxedEnvironment -DOCS_PREVIEW_MARKER = "" -DOCS_PREVIEW_BARE_LINK = "- [Docs preview](https://rerun.io/preview/{{ pr.commit }}/docs) " -EXAMPLES_PREVIEW_MARKER = "" -EXAMPLES_PREVIEW_BARE_LINK = ( - "- [Examples preview](https://rerun.io/preview/{{ pr.commit }}/examples) " -) - # Need to protect code-blocks in the PR template. # See https://github.com/rerun-io/rerun/issues/3972 # @@ -119,20 +112,6 @@ def main() -> None: new_body = pr.body - docs_preview_link_end = new_body.find(DOCS_PREVIEW_MARKER) - if docs_preview_link_end != -1: - docs_preview_link_end += len(DOCS_PREVIEW_MARKER) - docs_preview_link_start = new_body.rfind("\n", 0, docs_preview_link_end) + 1 - new_body = new_body[:docs_preview_link_start] + DOCS_PREVIEW_BARE_LINK + new_body[docs_preview_link_end:] - - examples_preview_link_end = new_body.find(EXAMPLES_PREVIEW_MARKER) - if examples_preview_link_end != -1: - examples_preview_link_end += len(EXAMPLES_PREVIEW_MARKER) - examples_preview_link_start = new_body.rfind("\n", 0, examples_preview_link_end) + 1 - new_body = ( - new_body[:examples_preview_link_start] + EXAMPLES_PREVIEW_BARE_LINK + new_body[examples_preview_link_end:] - ) - lines = new_body.splitlines() codeblocks = extract_code_blocks(lines) text = "\n".join(lines) diff --git a/scripts/generate_changelog.py b/scripts/generate_changelog.py index 6fcb31488e8a..5dd558f7c48e 100755 --- a/scripts/generate_changelog.py +++ b/scripts/generate_changelog.py @@ -279,7 +279,7 @@ def main() -> None: print_section("πŸ§‘β€πŸ’» Dev-experience", dev_experience) print_section("πŸ—£ Refactors", refactor) print_section("πŸ“¦ Dependencies", dependencies) - print_section("πŸ€·β€β™‚οΈ Other", misc) + print_section("πŸ€·β€ Other", misc) print() print_section("Chronological changes (don't include these)", chronological) diff --git a/scripts/lint.py b/scripts/lint.py index 377be1982f17..0f51b9d6e798 100755 --- a/scripts/lint.py +++ b/scripts/lint.py @@ -38,7 +38,6 @@ double_the = re.compile(r"\bthe the\b") double_word = re.compile(r" ([a-z]+) \1[ \.]") - Frontmatter = Dict[str, Any] @@ -90,6 +89,9 @@ def lint_line( if line == "": return None + if "\t" in line: + return "Found tab character: use spaces for indentation instead!" + if prev_line is None: prev_line_stripped = "" else: @@ -655,15 +657,27 @@ def is_acronym_or_pascal_case(s: str) -> bool: return " ".join(new_words) -def lint_markdown(lines_in: list[str]) -> tuple[list[str], list[str]]: +def lint_markdown(filepath: str, lines_in: list[str]) -> tuple[list[str], list[str]]: """Only for .md files.""" errors = [] lines_out = [] + in_example_readme = "/examples/python/" in filepath and filepath.endswith("README.md") + + in_code_block = False + in_frontmatter = False for line_nr, line in enumerate(lines_in): line_nr = line_nr + 1 + if line.startswith("```"): + in_code_block = not in_code_block + + if line.startswith(""): + in_frontmatter = False + # Check the casing on markdown headers if m := re.match(r"(\#+ )(.*)", line): new_header = fix_header_casing(m.group(2)) @@ -678,6 +692,13 @@ def lint_markdown(lines_in: list[str]) -> tuple[list[str], list[str]]: errors.append(f"{line_nr}: Titles should NOT be title cased. This should be '{new_title}'.") line = f'title = "{new_title}"\n' + if in_example_readme and not in_code_block and not in_frontmatter: + # Check that

is not used in example READMEs + if line.startswith("#") and not line.startswith("##"): + errors.append( + f"{line_nr}: Do not use top-level headers in example READMEs, they are reserved for page title." + ) + lines_out.append(line) return errors, lines_out @@ -749,14 +770,17 @@ def lint_frontmatter(filepath: str, content: str) -> list[str]: if not filepath.endswith(".md"): return errors - fm = load_frontmatter(content) + try: + fm = load_frontmatter(content) + except Exception as e: + errors.append(f"Error parsing frontmatter: {e}") + return errors + if fm is None: return [] errors += lint_example_description(filepath, fm) - # TODO(ab): check for missing fields (when descriptions are populated everywhere) - return errors @@ -819,7 +843,7 @@ def error(self, message: str, *, line_nr: int | None = None, index: int | None = if line_nr is None: return f"{self.path}:{message}" else: - return f"{self.path}:{line_nr+1}: {message}" + return f"{self.path}:{line_nr + 1}: {message}" def lint_file(filepath: str, args: Any) -> int: @@ -865,7 +889,7 @@ def lint_file(filepath: str, args: Any) -> int: source.rewrite(lines_out) if filepath.endswith(".md") and args.extra: - errors, lines_out = lint_markdown(source.lines) + errors, lines_out = lint_markdown(filepath, source.lines) for error in errors: print(source.error(error)) diff --git a/scripts/upload_image.py b/scripts/upload_image.py index 7236a25744c4..ec357db78138 100755 --- a/scripts/upload_image.py +++ b/scripts/upload_image.py @@ -72,8 +72,6 @@ 1200, ] -ASPECT_RATIO_RANGE = (1.6, 1.8) - def build_image_stack(image: Image) -> list[tuple[int | None, Image]]: image_stack: list[tuple[int | None, Image]] = [(None, image)] @@ -120,32 +118,14 @@ def image_from_clipboard() -> Image: class Uploader: - def __init__(self, auto_accept: bool): + def __init__(self): gcs = storage.Client("rerun-open") self.bucket = gcs.bucket("rerun-static-img") - self.auto_accept = auto_accept def _check_aspect_ratio(self, image: Path | Image) -> None: if isinstance(image, Path): image = PIL.Image.open(image) - aspect_ratio = image.width / image.height - aspect_ok = ASPECT_RATIO_RANGE[0] < aspect_ratio < ASPECT_RATIO_RANGE[1] - - if not aspect_ok and not self.auto_accept: - logging.warning( - f"Aspect ratio is {aspect_ratio:.2f} but should be between {ASPECT_RATIO_RANGE[0]} and " - f"{ASPECT_RATIO_RANGE[1]}." - ) - # do not pass prompt to input as this goes to stdout - print( - "The image aspect ratio is outside the range recommended for example screenshots. Continue? [y/N] ", - end="", - file=sys.stderr, - ) - if input().lower() != "y": - sys.exit(1) - def upload_file(self, path: Path) -> str: """ Upload a single file to Google Cloud. @@ -355,7 +335,7 @@ def download_file(url: str, path: Path) -> None: def run(args: argparse.Namespace) -> None: """Run the script based on the provided args.""" try: - uploader = Uploader(args.auto_accept) + uploader = Uploader() if args.single: if args.path is None: @@ -415,7 +395,6 @@ def main() -> None: "--single", action="store_true", help="Upload a single image instead of creating a multi-resolution stack." ) parser.add_argument("--name", type=str, help="Image name (required when uploading from clipboard).") - parser.add_argument("--auto-accept", action="store_true", help="Auto-accept the aspect ratio confirmation prompt") parser.add_argument("--debug", action="store_true", help="Enable debug logging.") args = parser.parse_args() diff --git a/tests/python/blueprint/save_blueprint.py b/tests/python/blueprint/save_blueprint.py index bb64a3df5614..495ee982305c 100644 --- a/tests/python/blueprint/save_blueprint.py +++ b/tests/python/blueprint/save_blueprint.py @@ -9,4 +9,4 @@ rrb.BlueprintPanel(expanded=False), ) -blueprint.save("rerun_example_blueprint_test") +blueprint.save("rerun_example_blueprint_test.rbl") diff --git a/tests/python/blueprint/spawn_blueprint.py b/tests/python/blueprint/spawn_blueprint.py new file mode 100644 index 000000000000..8c08a60d1910 --- /dev/null +++ b/tests/python/blueprint/spawn_blueprint.py @@ -0,0 +1,12 @@ +from __future__ import annotations + +import rerun.blueprint as rrb + +blueprint = rrb.Blueprint( + rrb.Spatial3DView(origin="/test1"), + rrb.TimePanel(expanded=False), + rrb.SelectionPanel(expanded=False), + rrb.BlueprintPanel(expanded=False), +) + +blueprint.spawn("rerun_example_blueprint_test") diff --git a/tests/python/release_checklist/check_1d_tensor_data.py b/tests/python/release_checklist/check_1d_tensor_data.py new file mode 100644 index 000000000000..ed334dede2b9 --- /dev/null +++ b/tests/python/release_checklist/check_1d_tensor_data.py @@ -0,0 +1,56 @@ +from __future__ import annotations + +import os +from argparse import Namespace +from uuid import uuid4 + +import numpy as np +import rerun as rr + +README = """ +# 1D Image/Tensor/BarChart + +This checks the different ways 1D arrays can be visualized. + +### Actions + +You should see: +* a tensor view with 1D data +* an image view with a 1D image +* a bar chart + +Bonus actions: +* use the ui to create a tensor/bar-chart with each of the entities no matter how it was logged + * TODO(#5847): Right now tensors & bar charts can not be reinterpreted as 2D images. + In this example, image is correctly not suggested for the `tensor` and `image` entities, + since they are of 1D shape, but this would be relevant if they were 1xN or Nx1. + +""" + + +def log_readme() -> None: + rr.log("readme", rr.TextDocument(README, media_type=rr.MediaType.MARKDOWN), timeless=True) + + +def log_1d_data() -> None: + x = np.linspace(0.0, 100.0, 100) + rr.log("tensor", rr.Tensor(x)) + rr.log("barchart", rr.BarChart(x)) + # We're not allowing "real" 1D here and force users to be explicit about width/height + rr.log("image", rr.Image(np.reshape(x, (1, 100)))) + + +def run(args: Namespace) -> None: + rr.script_setup(args, f"{os.path.basename(__file__)}", recording_id=uuid4()) + + log_readme() + log_1d_data() + + +if __name__ == "__main__": + import argparse + + parser = argparse.ArgumentParser(description="Interactive release checklist") + rr.script_add_args(parser) + args = parser.parse_args() + run(args) diff --git a/tests/rust/plot_dashboard_stress/Cargo.toml b/tests/rust/plot_dashboard_stress/Cargo.toml index c939b36f9c51..18891b172840 100644 --- a/tests/rust/plot_dashboard_stress/Cargo.toml +++ b/tests/rust/plot_dashboard_stress/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "plot_dashboard_stress" -version = "0.15.0-alpha.5" +version = "0.15.0-rc.3" edition = "2021" rust-version = "1.74" license = "MIT OR Apache-2.0"