|
| 1 | +import path from "node:path"; |
| 2 | +import * as fs from "fs/promises"; |
| 3 | +import assert from "node:assert"; |
| 4 | +import {getViewSection, getViewSubSection, waitForTreeItems} from "./utils"; |
| 5 | +import {sleep, TreeItem} from "wdio-vscode-service"; |
| 6 | + |
| 7 | +describe("Run python on cluster", async function () { |
| 8 | + let projectDir: string; |
| 9 | + this.timeout(2 * 60 * 1000); |
| 10 | + |
| 11 | + before(async () => { |
| 12 | + assert(process.env.TEST_DEFAULT_CLUSTER_ID); |
| 13 | + assert(process.env.TEST_REPO_PATH); |
| 14 | + assert(process.env.WORKSPACE_PATH); |
| 15 | + projectDir = process.env.WORKSPACE_PATH; |
| 16 | + |
| 17 | + await fs.mkdir(path.join(projectDir, ".databricks")); |
| 18 | + |
| 19 | + await fs.writeFile( |
| 20 | + path.join(projectDir, ".databricks", "project.json"), |
| 21 | + JSON.stringify({ |
| 22 | + clusterId: process.env["TEST_DEFAULT_CLUSTER_ID"], |
| 23 | + profile: "DEFAULT", |
| 24 | + workspacePath: process.env["TEST_REPO_PATH"], |
| 25 | + }) |
| 26 | + ); |
| 27 | + await fs.writeFile( |
| 28 | + path.join(projectDir, "file.py"), |
| 29 | + `spark.sql('SELECT "hello world"').show()` |
| 30 | + ); |
| 31 | + |
| 32 | + await fs.writeFile( |
| 33 | + path.join(projectDir, "notebook.py"), |
| 34 | + [ |
| 35 | + "# Databricks notebook source", |
| 36 | + `spark.sql('SELECT "hello world"').show()`, |
| 37 | + ].join("\n") |
| 38 | + ); |
| 39 | + |
| 40 | + const section = await getViewSection("CONFIGURATION"); |
| 41 | + assert(section); |
| 42 | + await waitForTreeItems(section); |
| 43 | + }); |
| 44 | + |
| 45 | + beforeEach(async () => { |
| 46 | + const section = await getViewSection("CLUSTERS"); |
| 47 | + await section?.collapse(); |
| 48 | + |
| 49 | + const repoConfigItem = await getViewSubSection("CONFIGURATION", "Repo"); |
| 50 | + assert(repoConfigItem); |
| 51 | + |
| 52 | + let status: TreeItem | undefined = undefined; |
| 53 | + for (const i of await repoConfigItem.getChildren()) { |
| 54 | + if ((await i.getLabel()).includes("State:")) { |
| 55 | + status = i; |
| 56 | + break; |
| 57 | + } |
| 58 | + } |
| 59 | + assert(status); |
| 60 | + if ((await status.getDescription())?.includes("STOPPED")) { |
| 61 | + const buttons = await repoConfigItem.getActionButtons(); |
| 62 | + await buttons[0].elem.click(); |
| 63 | + } |
| 64 | + |
| 65 | + await browser.waitUntil( |
| 66 | + async () => { |
| 67 | + const repoConfigItem = await getViewSubSection( |
| 68 | + "CONFIGURATION", |
| 69 | + "Repo" |
| 70 | + ); |
| 71 | + assert(repoConfigItem); |
| 72 | + |
| 73 | + status = undefined; |
| 74 | + for (const i of await repoConfigItem.getChildren()) { |
| 75 | + if ((await i.getLabel()).includes("State:")) { |
| 76 | + status = i; |
| 77 | + break; |
| 78 | + } |
| 79 | + } |
| 80 | + assert(status); |
| 81 | + const description = await status?.getDescription(); |
| 82 | + return ( |
| 83 | + description !== undefined && |
| 84 | + description.includes("WATCHING_FOR_CHANGES") |
| 85 | + ); |
| 86 | + }, |
| 87 | + { |
| 88 | + timeout: 20000, |
| 89 | + timeoutMsg: "Couldn't finish sync in 20s", |
| 90 | + } |
| 91 | + ); |
| 92 | + }); |
| 93 | + |
| 94 | + it("should run a python notebook as a job on a cluster", async () => { |
| 95 | + const workbench = await driver.getWorkbench(); |
| 96 | + const editorView = workbench.getEditorView(); |
| 97 | + await editorView.closeAllEditors(); |
| 98 | + |
| 99 | + // open file |
| 100 | + const input = await workbench.openCommandPrompt(); |
| 101 | + await sleep(200); |
| 102 | + await input.setText("notebook.py"); |
| 103 | + await input.confirm(); |
| 104 | + await sleep(500); |
| 105 | + |
| 106 | + // run file |
| 107 | + await workbench.executeQuickPick( |
| 108 | + "Databricks: Run File as Workflow on Databricks" |
| 109 | + ); |
| 110 | + |
| 111 | + const webView = await workbench.getWebviewByTitle(/Databricks Job Run/); |
| 112 | + await webView.open(); |
| 113 | + |
| 114 | + /* eslint-disable @typescript-eslint/naming-convention */ |
| 115 | + const labelToDefaults = { |
| 116 | + taskRunId: {label: "task-run-id", default: /N\\A/}, |
| 117 | + clusterId: {label: "cluster", default: /N\\A/}, |
| 118 | + startTime: {label: "run-start-time", default: /-/}, |
| 119 | + endTime: {label: "run-end-time", default: /-/}, |
| 120 | + duration: {label: "run-duration", default: /-/}, |
| 121 | + status: {label: "run-status", default: /Synchronizing/}, |
| 122 | + }; |
| 123 | + /* eslint-enable @typescript-eslint/naming-convention */ |
| 124 | + |
| 125 | + // wait for job to get a task id |
| 126 | + await browser.waitUntil( |
| 127 | + async () => |
| 128 | + ( |
| 129 | + await browser.getTextByLabel( |
| 130 | + labelToDefaults.taskRunId.label |
| 131 | + ) |
| 132 | + ).match(labelToDefaults.taskRunId.default) === null, |
| 133 | + { |
| 134 | + timeoutMsg: "Job did not start", |
| 135 | + } |
| 136 | + ); |
| 137 | + |
| 138 | + expect( |
| 139 | + await browser.getTextByLabel(labelToDefaults.startTime.label) |
| 140 | + ).not.toHaveText(labelToDefaults.startTime.default); |
| 141 | + |
| 142 | + await browser.waitUntil( |
| 143 | + async () => |
| 144 | + ( |
| 145 | + await browser.getTextByLabel(labelToDefaults.status.label) |
| 146 | + ).match(/Succeeded/) !== null, |
| 147 | + { |
| 148 | + timeout: 20000, |
| 149 | + interval: 50, |
| 150 | + timeoutMsg: "Job did not reach succeeded status after 20s.", |
| 151 | + } |
| 152 | + ); |
| 153 | + |
| 154 | + webView.close(); |
| 155 | + }); |
| 156 | + |
| 157 | + it("should run a python file as a job on a cluster", async () => { |
| 158 | + const workbench = await driver.getWorkbench(); |
| 159 | + const editorView = workbench.getEditorView(); |
| 160 | + await editorView.closeAllEditors(); |
| 161 | + |
| 162 | + // open file |
| 163 | + const input = await workbench.openCommandPrompt(); |
| 164 | + await sleep(200); |
| 165 | + await input.setText("file.py"); |
| 166 | + await input.confirm(); |
| 167 | + await sleep(500); |
| 168 | + |
| 169 | + // run file |
| 170 | + await workbench.executeQuickPick( |
| 171 | + "Databricks: Run File as Workflow on Databricks" |
| 172 | + ); |
| 173 | + |
| 174 | + const webView = await workbench.getWebviewByTitle(/Databricks Job Run/); |
| 175 | + await webView.open(); |
| 176 | + |
| 177 | + /* eslint-disable @typescript-eslint/naming-convention */ |
| 178 | + const labelToDefaults = { |
| 179 | + taskRunId: {label: "task-run-id", default: /N\\A/}, |
| 180 | + clusterId: {label: "cluster", default: /N\\A/}, |
| 181 | + startTime: {label: "run-start-time", default: /-/}, |
| 182 | + endTime: {label: "run-end-time", default: /-/}, |
| 183 | + duration: {label: "run-duration", default: /-/}, |
| 184 | + status: {label: "run-status", default: /Synchronizing/}, |
| 185 | + }; |
| 186 | + /* eslint-enable @typescript-eslint/naming-convention */ |
| 187 | + |
| 188 | + // wait for job to get a task id |
| 189 | + await browser.waitUntil( |
| 190 | + async () => |
| 191 | + ( |
| 192 | + await browser.getTextByLabel( |
| 193 | + labelToDefaults.taskRunId.label |
| 194 | + ) |
| 195 | + ).match(labelToDefaults.taskRunId.default) === null, |
| 196 | + { |
| 197 | + timeoutMsg: "Job did not start", |
| 198 | + } |
| 199 | + ); |
| 200 | + |
| 201 | + expect( |
| 202 | + await browser.getTextByLabel(labelToDefaults.startTime.label) |
| 203 | + ).not.toHaveText(labelToDefaults.startTime.default); |
| 204 | + |
| 205 | + await browser.waitUntil( |
| 206 | + async () => |
| 207 | + ( |
| 208 | + await browser.getTextByLabel(labelToDefaults.status.label) |
| 209 | + ).includes("Succeeded"), |
| 210 | + { |
| 211 | + timeout: 20000, |
| 212 | + interval: 100, |
| 213 | + timeoutMsg: "Job did not reach succeeded status after 20s.", |
| 214 | + } |
| 215 | + ); |
| 216 | + |
| 217 | + webView.close(); |
| 218 | + }); |
| 219 | +}); |
0 commit comments