Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
45 changes: 45 additions & 0 deletions e2e/tests/demo.spec.ts
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,19 @@ import { test, expect } from '@playwright/test';
// Assertions here verify that the WASM binary compiles to something that
// actually runs in a browser, not just that the build directory exists.

test.beforeEach(async ({ page }) => {
// Clear OPFS stelekit directory before each test to prevent inter-test bleed
await page.goto('/');
await page.evaluate(async () => {
try {
const root = await navigator.storage.getDirectory();
await root.removeEntry('stelekit', { recursive: true });
} catch {
// Directory may not exist on first run
}
Comment on lines +8 to +16
Copy link
Copy Markdown
Owner Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The beforeEach goto is only to gain same-origin access for the OPFS cleanup evaluate call — the app doesn't fully initialize there. The actual test's goto('/') starts the app under the listener. The ordering is intentional; leaving as-is.

});
});

test('SteleKit WASM demo: canvas initializes and Compose paints', async ({ page }) => {
const errors: string[] = [];
page.on('pageerror', err => {
Expand Down Expand Up @@ -62,3 +75,35 @@ test('SteleKit WASM demo: canvas initializes and Compose paints', async ({ page
// Step 4: no uncaught JS exceptions during startup.
expect(errors, `Uncaught JS errors: ${errors.join(' | ')}`).toHaveLength(0);
});

test('SteleKit OPFS: data persists across page reload', async ({ page }) => {
const errors: string[] = [];
page.on('pageerror', err => errors.push(err.message));

await page.goto('/');

await page.waitForFunction(
() => (window as any).__stelekit_ready === true,
{ timeout: 30_000 },
);

await page.reload();

await page.waitForFunction(
() => (window as any).__stelekit_ready === true,
{ timeout: 30_000 },
);

const hasOpfsData = await page.evaluate(async () => {
try {
const root = await navigator.storage.getDirectory();
await root.getDirectoryHandle('stelekit', { create: false });
return true;
} catch {
return false;
}
});
expect(hasOpfsData, 'OPFS stelekit directory must exist after app init').toBe(true);

expect(errors, `Uncaught JS errors: ${errors.join(' | ')}`).toHaveLength(0);
});
4 changes: 3 additions & 1 deletion kmp/build.gradle.kts
Original file line number Diff line number Diff line change
Expand Up @@ -65,6 +65,7 @@ kotlin {
// SQLDelight
implementation("app.cash.sqldelight:runtime:2.3.2")
implementation("app.cash.sqldelight:coroutines-extensions:2.3.2")
implementation("app.cash.sqldelight:async-extensions:2.3.2")

// Compose Multiplatform
implementation("org.jetbrains.compose.runtime:runtime:1.7.3")
Expand Down Expand Up @@ -151,7 +152,7 @@ kotlin {
if (project.findProperty("enableJs") == "true") {
val wasmJsMain by getting {
dependencies {
// Phase B: add @sqlite.org/sqlite-wasm driver here
implementation(npm("@sqlite.org/sqlite-wasm", "3.46.1"))
}
}

Expand Down Expand Up @@ -807,6 +808,7 @@ sqldelight {
databases {
create("SteleDatabase") {
packageName.set("dev.stapler.stelekit.db")
generateAsync.set(true)
}
}
}
Expand Down
Original file line number Diff line number Diff line change
@@ -1,9 +1,11 @@
package dev.stapler.stelekit.db

import android.content.Context
import app.cash.sqldelight.async.coroutines.synchronous
import app.cash.sqldelight.db.SqlDriver
import app.cash.sqldelight.driver.android.AndroidSqliteDriver
import io.requery.android.database.sqlite.RequerySQLiteOpenHelperFactory
import kotlinx.coroutines.runBlocking


actual class DriverFactory actual constructor() {
Expand Down Expand Up @@ -37,7 +39,7 @@ actual class DriverFactory actual constructor() {
// AndroidSqliteDriver handles schema creation (fresh installs) and numbered .sqm
// migrations (via SQLiteOpenHelper.onUpgrade) automatically.
val driver = AndroidSqliteDriver(
schema = SteleDatabase.Schema,
schema = SteleDatabase.Schema.synchronous(),
context = context,
name = dbName,
factory = RequerySQLiteOpenHelperFactory()
Expand All @@ -57,7 +59,7 @@ actual class DriverFactory actual constructor() {
try { driver.execute(null, "PRAGMA cache_size=-8000;", 0) } catch (_: Exception) { }

// Apply incremental DDL migrations (idempotent, hash-tracked).
MigrationRunner.applyAll(driver)
runBlocking { MigrationRunner.applyAll(driver) }

return driver
}
Expand Down

This file was deleted.

Original file line number Diff line number Diff line change
Expand Up @@ -315,7 +315,7 @@ class DatabaseWriteActor(
* Log INSERT or UPDATE operations for each block in the batch.
* Blocks not present in [existingByUuid] are treated as inserts.
*/
private fun logSaveBlocks(blocks: List<Block>, existingByUuid: Map<String, Block>) {
private suspend fun logSaveBlocks(blocks: List<Block>, existingByUuid: Map<String, Block>) {
val logger = opLogger ?: return
for (block in blocks) {
val existing = existingByUuid[block.uuid]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -186,7 +186,7 @@ object MigrationRunner {
* (which would leave the schema in an indeterminate state). The correct fix is always
* to add a new migration entry rather than edit an existing one.
*/
fun applyAll(driver: SqlDriver) {
suspend fun applyAll(driver: SqlDriver) {
// Bootstrap the tracking table — must succeed before anything else.
driver.execute(
identifier = null,
Expand All @@ -198,9 +198,13 @@ object MigrationRunner {
)
""".trimIndent(),
parameters = 0
)
).await()

// Load both name and hash so we can detect tampering.
// Use QueryResult.Value (not AsyncValue) so synchronous drivers (AndroidSqliteDriver)
// can call .getValue() on the mapper result without throwing.
// Both AndroidCursor.next() and JsRowCursor.next() return QueryResult.Value, so
// cursor.next().value is safe across all platforms.
val appliedByName: Map<String, String> = driver.executeQuery(
identifier = null,
sql = "SELECT name, hash FROM schema_migrations",
Expand All @@ -214,7 +218,7 @@ object MigrationRunner {
QueryResult.Value(map as Map<String, String>)
},
parameters = 0
).value
).await()

for (migration in all) {
val recordedHash = appliedByName[migration.name]
Expand All @@ -231,7 +235,7 @@ object MigrationRunner {

for (sql in migration.statements) {
try {
driver.execute(null, sql.trimIndent(), 0)
driver.execute(null, sql.trimIndent(), 0).await()
} catch (e: CancellationException) {
throw e
} catch (_: Exception) {
Expand All @@ -248,7 +252,7 @@ object MigrationRunner {
) {
bindString(0, migration.hash)
bindString(1, migration.name)
}
}.await()
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ class OperationLogger(
private var seq: Long = -1L

@OptIn(DirectSqlWrite::class)
private fun nextSeq(): Long {
private suspend fun nextSeq(): Long {
if (seq < 0) {
seq = db.steleDatabaseQueries.selectLogicalClock(sessionId)
.executeAsOneOrNull() ?: 0L
Expand All @@ -65,50 +65,50 @@ class OperationLogger(
return seq
}

fun logInsert(block: Block) = log(
suspend fun logInsert(block: Block) = log(
opType = OpType.INSERT_BLOCK,
entityUuid = block.uuid,
pageUuid = block.pageUuid,
payload = OpPayload(after = block.toSnapshot()),
)

fun logUpdate(before: Block, after: Block) = log(
suspend fun logUpdate(before: Block, after: Block) = log(
opType = OpType.UPDATE_BLOCK,
entityUuid = after.uuid,
pageUuid = after.pageUuid,
payload = OpPayload(before = before.toSnapshot(), after = after.toSnapshot()),
)

fun logDelete(block: Block) = log(
suspend fun logDelete(block: Block) = log(
opType = OpType.DELETE_BLOCK,
entityUuid = block.uuid,
pageUuid = block.pageUuid,
payload = OpPayload(before = block.toSnapshot()),
)

fun logSyncBarrier() = log(
suspend fun logSyncBarrier() = log(
opType = OpType.SYNC_BARRIER,
entityUuid = null,
pageUuid = null,
payload = OpPayload(),
)

fun logBatchStart(batchId: String) = log(
suspend fun logBatchStart(batchId: String) = log(
opType = OpType.BATCH_START,
entityUuid = null,
pageUuid = null,
payload = OpPayload(batchId = batchId),
)

fun logBatchEnd(batchId: String) = log(
suspend fun logBatchEnd(batchId: String) = log(
opType = OpType.BATCH_END,
entityUuid = null,
pageUuid = null,
payload = OpPayload(batchId = batchId),
)

@OptIn(DirectSqlWrite::class)
private fun log(opType: OpType, entityUuid: String?, pageUuid: String?, payload: OpPayload) {
private suspend fun log(opType: OpType, entityUuid: String?, pageUuid: String?, payload: OpPayload) {
try {
val opId = UuidGenerator.generateV7()
val payloadJson = json.encodeToString(payload)
Expand Down
Loading
Loading