diff --git a/expo/android/src/main/java/land/gno/gnonative/GnonativeModule.kt b/expo/android/src/main/java/land/gno/gnonative/GnonativeModule.kt index 196324cf..8d7bd31e 100644 --- a/expo/android/src/main/java/land/gno/gnonative/GnonativeModule.kt +++ b/expo/android/src/main/java/land/gno/gnonative/GnonativeModule.kt @@ -17,6 +17,7 @@ class GnonativeModule : Module() { private var rootDir: File? = null private var socketPort = 0 private var bridgeGnoNative: Bridge? = null + private var nativeDBManager: NativeDBManager? = null // Each module class must implement the definition function. The definition consists of components // that describes the module's functionality and behavior. @@ -33,6 +34,7 @@ class GnonativeModule : Module() { OnCreate { context = appContext.reactContext rootDir = context!!.filesDir + nativeDBManager = NativeDBManager(context!!) } OnDestroy { @@ -53,8 +55,8 @@ class GnonativeModule : Module() { try { val config: BridgeConfig = Gnonative.newBridgeConfig() ?: throw Exception("") config.rootDir = rootDir!!.absolutePath + config.nativeDB = nativeDBManager bridgeGnoNative = Gnonative.newBridge(config) - promise.resolve(true) } catch (err: CodedException) { promise.reject(err) diff --git a/expo/android/src/main/java/land/gno/gnonative/NativeDBManager.kt b/expo/android/src/main/java/land/gno/gnonative/NativeDBManager.kt new file mode 100644 index 00000000..f1ded00d --- /dev/null +++ b/expo/android/src/main/java/land/gno/gnonative/NativeDBManager.kt @@ -0,0 +1,324 @@ +package land.gno.gnonative + +import android.content.Context +import android.content.SharedPreferences +import android.os.Build +import android.security.keystore.KeyGenParameterSpec +import android.security.keystore.KeyProperties +import android.util.Base64 +import gnolang.gno.gnonative.NativeDB +import java.io.ByteArrayOutputStream +import java.nio.ByteBuffer +import java.security.KeyStore +import javax.crypto.Cipher +import javax.crypto.KeyGenerator +import javax.crypto.SecretKey +import javax.crypto.spec.GCMParameterSpec +import kotlin.math.min +import androidx.core.content.edit + +class NativeDBManager( + context: Context, + private val prefsName: String = "gnonative_secure_db", + private val keyAlias: String = "gnonative_aes_key" +) : NativeDB { + + // -------- storage / index -------- + private val prefs: SharedPreferences = + context.getSharedPreferences(prefsName, Context.MODE_PRIVATE) + private val entryPrefix = "kv:" // entryPrefix + hexKey -> Base64(encrypted blob) + private val idxKey = "__idx__" // CSV of hex keys in ascending order + + // -------- crypto -------- + private val ks: KeyStore = KeyStore.getInstance(ANDROID_KEYSTORE).apply { load(null) } + + private val lock = Any() + + init { + ensureAesKey() + if (!prefs.contains(idxKey)) prefs.edit { putString(idxKey, "") } + } + + // ========== NativeDB implementation ========== + + override fun delete(p0: ByteArray?) { + val key = requireKey(p0) + val hex = hex(key) + synchronized(lock) { + val idx = loadIndexAsc().toMutableList() + val pos = lowerBound(idx, hex) + if (pos < idx.size && idx[pos] == hex) { + idx.removeAt(pos) + saveIndexAsc(idx) + } + prefs.edit { remove("$entryPrefix$hex") } + } + } + + override fun deleteSync(p0: ByteArray?) { + delete(p0) + } + + override fun get(p0: ByteArray?): ByteArray { + val key = requireKey(p0) + val hex = hex(key) + val b64 = synchronized(lock) { prefs.getString("$entryPrefix$hex", null) } + ?: return ByteArray(0) // gomobile generated non-null return -> use empty on miss + val blob = Base64.decode(b64, Base64.NO_WRAP) + return decrypt(blob) ?: ByteArray(0) + } + + override fun has(p0: ByteArray?): Boolean { + val key = requireKey(p0) + val hex = hex(key) + return synchronized(lock) { prefs.contains("$entryPrefix$hex") } + } + + override fun scanChunk( + p0: ByteArray?, // start + p1: ByteArray?, // end + p2: ByteArray?, // seekKey + p3: Long, // limit + p4: Boolean // reverse + ): ByteArray { + val limit = if (p3 < 0) 0 else min(p3, Int.MAX_VALUE.toLong()).toInt() + return synchronized(lock) { + val asc = loadIndexAsc() // ascending hex keys + val startHex = p0?.let { hex(it) } + val endHex = p1?.let { hex(it) } + val seekHex = p2?.let { hex(it) } + + val loBase = startHex?.let { lowerBound(asc, it) } ?: 0 + val hiBase = endHex?.let { lowerBound(asc, it) } ?: asc.size + var slice: List = if (hiBase <= loBase) emptyList() else asc.subList(loBase, hiBase) + + // seek positioning & direction + slice = if (!p4) { + val from = seekHex?.let { upperBound(slice, it) } ?: 0 + if (from >= slice.size) emptyList() else slice.subList(from, slice.size) + } else { + val positioned = if (seekHex != null) { + val idx = upperBound(slice, seekHex) - 1 + if (idx < 0) emptyList() else slice.subList(0, idx + 1) + } else slice + positioned.asReversed() + } + + val page = if (limit == 0) emptyList() else slice.take(limit) + val hasMore = page.isNotEmpty() && page.size < slice.size + val nextSeekHex = if (hasMore) page.last() else null + + // materialize kv pairs in traversal order + val pairs = ArrayList>(page.size) + for (h in page) { + val b64 = prefs.getString("$entryPrefix$h", null) ?: continue + val v = decrypt(Base64.decode(b64, Base64.NO_WRAP)) ?: continue + pairs += (unhex(h) to v) + } + + // flags(1) | count(u32 BE) | [kLen k vLen v]* | nextSeekLen(u32 BE) | nextSeek + encodeChunkBlobBE(pairs, nextSeekHex?.let { unhex(it) }, hasMore) + } + } + + override fun set(p0: ByteArray?, p1: ByteArray?) { + val key = requireKey(p0) + val value = requireValue(p1) + val hex = hex(key) + val enc = encrypt(value) + val b64 = Base64.encodeToString(enc, Base64.NO_WRAP) + synchronized(lock) { + val idx = loadIndexAsc().toMutableList() + val pos = lowerBound(idx, hex) + if (pos == idx.size || idx[pos] != hex) { + idx.add(pos, hex) + saveIndexAsc(idx) + } + prefs.edit { putString("$entryPrefix$hex", b64) } + } + } + + override fun setSync(p0: ByteArray?, p1: ByteArray?) { + set(p0, p1) + } + + // ========== helpers ========== + + private fun requireKey(b: ByteArray?): ByteArray { + require(!(b == null || b.isEmpty())) { "key must not be null/empty" } + return b + } + private fun requireValue(b: ByteArray?): ByteArray { + require(b != null) { "value must not be null" } + return b + } + + // ----- index (csv of hex keys, ascending) ----- + private fun loadIndexAsc(): List { + val csv = prefs.getString(idxKey, "") ?: "" + return if (csv.isEmpty()) emptyList() else csv.split(',').filter { it.isNotEmpty() } + } + private fun saveIndexAsc(keys: List) { + prefs.edit { putString(idxKey, if (keys.isEmpty()) "" else keys.joinToString(",")) } + } + + private fun lowerBound(list: List, key: String): Int { + var lo = 0; var hi = list.size + while (lo < hi) { + val mid = (lo + hi) ushr 1 + if (list[mid] < key) lo = mid + 1 else hi = mid + } + return lo + } + private fun upperBound(list: List, key: String): Int { + var lo = 0; var hi = list.size + while (lo < hi) { + val mid = (lo + hi) ushr 1 + if (list[mid] <= key) lo = mid + 1 else hi = mid + } + return lo + } + + // crypto AES/GCM, StrongBox preferred + private fun ensureAesKey() { + if (getAesKey() != null) return + + val kg = KeyGenerator.getInstance(KeyProperties.KEY_ALGORITHM_AES, ANDROID_KEYSTORE) + val base = KeyGenParameterSpec.Builder( + keyAlias, KeyProperties.PURPOSE_ENCRYPT or KeyProperties.PURPOSE_DECRYPT + ) + .setBlockModes(KeyProperties.BLOCK_MODE_GCM) + .setEncryptionPaddings(KeyProperties.ENCRYPTION_PADDING_NONE) + .setKeySize(256) + .setRandomizedEncryptionRequired(true) + + try { + if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.P) { + base.setIsStrongBoxBacked(true) + } + kg.init(base.build()) + kg.generateKey() + return + } catch (_: Throwable) { + // fall back below without StrongBox + } + + kg.init( + KeyGenParameterSpec.Builder( + keyAlias, KeyProperties.PURPOSE_ENCRYPT or KeyProperties.PURPOSE_DECRYPT + ) + .setBlockModes(KeyProperties.BLOCK_MODE_GCM) + .setEncryptionPaddings(KeyProperties.ENCRYPTION_PADDING_NONE) + .setKeySize(256) + .setRandomizedEncryptionRequired(true) + .build() + ) + kg.generateKey() + } + + private fun getAesKey(): SecretKey? { + val e = ks.getEntry(keyAlias, null) as? KeyStore.SecretKeyEntry + return e?.secretKey + } + + private fun encrypt(plain: ByteArray): ByteArray { + val key = getAesKey() ?: error("AES key missing") + + val c = Cipher.getInstance(AES_GCM) + c.init(Cipher.ENCRYPT_MODE, key) + + val iv = c.iv + val ct = c.doFinal(plain) + + // payload: [version=1][ivLen][iv][ct] + val out = ByteArray(1 + 1 + iv.size + ct.size) + var i = 0 + out[i++] = 1 + out[i++] = iv.size.toByte() + System.arraycopy(iv, 0, out, i, iv.size); i += iv.size + System.arraycopy(ct, 0, out, i, ct.size) + return out + } + + private fun decrypt(blob: ByteArray?): ByteArray? { + if (blob == null || blob.size < 1 + 1 + 12) return null // iv is usually 12 bytes + var i = 0 + val ver = blob[i++] + require(ver.toInt() == 1) { "bad payload version=$ver" } + val ivLen = blob[i++].toInt() and 0xFF + require(ivLen in 12..32) { "bad iv length" } + require(blob.size >= 1 + 1 + ivLen + 1) { "short blob" } + val iv = ByteArray(ivLen) + System.arraycopy(blob, i, iv, 0, ivLen); i += ivLen + val ct = ByteArray(blob.size - i) + System.arraycopy(blob, i, ct, 0, ct.size) + + val key = getAesKey() ?: error("AES key missing") + val c = Cipher.getInstance(AES_GCM) + c.init(Cipher.DECRYPT_MODE, key, GCMParameterSpec(128, iv)) + return c.doFinal(ct) + } + + // chunk framing (match Go format) + private fun encodeChunkBlobBE( + entries: List>, + nextSeek: ByteArray?, + hasMore: Boolean + ): ByteArray { + val bos = ByteArrayOutputStream() + + // flags (bit0 = hasMore) + bos.write(if (hasMore) 0x01 else 0x00) + + // count (u32 BE) + bos.write(u32be(entries.size)) + + // entries + for ((k, v) in entries) { + bos.write(u32be(k.size)); bos.write(k) + bos.write(u32be(v.size)); bos.write(v) + } + + // nextSeek + val ns = nextSeek ?: ByteArray(0) + bos.write(u32be(ns.size)) + if (ns.isNotEmpty()) bos.write(ns) + + return bos.toByteArray() + } + + // ----- utils ----- + private fun u32be(n: Int): ByteArray { + val bb = ByteBuffer.allocate(4) + bb.putInt(n) // big-endian by default + return bb.array() + } + + private fun hex(b: ByteArray): String { + val out = CharArray(b.size * 2) + val h = "0123456789abcdef".toCharArray() + var i = 0 + for (v in b) { + val x = v.toInt() and 0xFF + out[i++] = h[x ushr 4]; out[i++] = h[x and 0x0F] + } + return String(out) + } + + private fun unhex(s: String): ByteArray { + require(s.length % 2 == 0) { "odd hex length" } + val out = ByteArray(s.length / 2) + var i = 0; var j = 0 + while (i < s.length) { + val hi = Character.digit(s[i++], 16) + val lo = Character.digit(s[i++], 16) + out[j++] = ((hi shl 4) or lo).toByte() + } + return out + } + + companion object { + private const val ANDROID_KEYSTORE = "AndroidKeyStore" + private const val AES_GCM = "AES/GCM/NoPadding" + } +} diff --git a/expo/ios/GnonativeModule.swift b/expo/ios/GnonativeModule.swift index 3d44db64..0d18388a 100644 --- a/expo/ios/GnonativeModule.swift +++ b/expo/ios/GnonativeModule.swift @@ -60,6 +60,7 @@ public class GnonativeModule: Module { } config.rootDir = self.appRootDir! config.tmpDir = self.tmpDir! + config.nativeDB = NativeDBManager.shared // On simulator we can't create an UDS, see comment below #if targetEnvironment(simulator) diff --git a/expo/ios/NativeDBManager.swift b/expo/ios/NativeDBManager.swift new file mode 100644 index 00000000..d79fd116 --- /dev/null +++ b/expo/ios/NativeDBManager.swift @@ -0,0 +1,262 @@ +// +// NativeDBManager.swift +// Pods +// +// Created by Rémi BARBERO on 25/09/2025. +// + +import Foundation +import Security +import GnoCore + +public class NativeDBManager: NSObject, GnoGnonativeNativeDBProtocol { + public static var shared: NativeDBManager = NativeDBManager() + + // MARK: - Private Properties + private let service: String + private let accessGroup: String? + + // MARK: - Initialization + init(service: String = Bundle.main.bundleIdentifier ?? "GnoNativeService", accessGroup: String? = nil) { + self.service = service + self.accessGroup = accessGroup + } + + // MARK: - Public Interface Implementation + + public func get(_ key: Data?) -> Data? { + guard let key = key else { return nil } + + let account = keyToAccount(key) + + var query: [String: Any] = [ + kSecClass as String: kSecClassGenericPassword, + kSecAttrService as String: service, + kSecAttrAccount as String: account, + kSecReturnData as String: true, + kSecMatchLimit as String: kSecMatchLimitOne + ] + + if let accessGroup = accessGroup { + query[kSecAttrAccessGroup as String] = accessGroup + } + + var result: AnyObject? + let status = SecItemCopyMatching(query as CFDictionary, &result) + + guard status == errSecSuccess else { + return nil + } + + return result as? Data + } + + public func delete(_ key: Data?) { + DispatchQueue.global(qos: .utility).async { + self.deleteSync(key) + } + } + + public func deleteSync(_ key: Data?) { + guard let key = key else { return } + + let account = keyToAccount(key) + + var query: [String: Any] = [ + kSecClass as String: kSecClassGenericPassword, + kSecAttrService as String: service, + kSecAttrAccount as String: account + ] + + if let accessGroup = accessGroup { + query[kSecAttrAccessGroup as String] = accessGroup + } + + SecItemDelete(query as CFDictionary) + } + + public func has(_ key: Data?) -> Bool { + guard let key = key else { return false } + + let account = keyToAccount(key) + + var query: [String: Any] = [ + kSecClass as String: kSecClassGenericPassword, + kSecAttrService as String: service, + kSecAttrAccount as String: account, + kSecReturnData as String: false, + kSecMatchLimit as String: kSecMatchLimitOne + ] + + if let accessGroup = accessGroup { + query[kSecAttrAccessGroup as String] = accessGroup + } + + let status = SecItemCopyMatching(query as CFDictionary, nil) + return status == errSecSuccess + } + + public func set(_ key: Data?, p1 value: Data?) { + DispatchQueue.global(qos: .utility).async { + self.setSync(key, p1: value) + } + } + + public func setSync(_ key: Data?, p1 value: Data?) { + guard let key = key, let value = value else { return } + + let account = keyToAccount(key) + + // First, try to update existing item + var query: [String: Any] = [ + kSecClass as String: kSecClassGenericPassword, + kSecAttrService as String: service, + kSecAttrAccount as String: account + ] + + if let accessGroup = accessGroup { + query[kSecAttrAccessGroup as String] = accessGroup + } + + let attributes: [String: Any] = [ + kSecValueData as String: value + ] + + let updateStatus = SecItemUpdate(query as CFDictionary, attributes as CFDictionary) + + if updateStatus == errSecItemNotFound { + // Item doesn't exist, create new one + var newItem = query + newItem[kSecValueData as String] = value + newItem[kSecAttrAccessible as String] = kSecAttrAccessibleWhenUnlockedThisDeviceOnly + + SecItemAdd(newItem as CFDictionary, nil) + } + } + + public func scanChunk(_ start: Data?, end: Data?, seekKey: Data?, limit: Int, reverse: Bool) throws -> Data { + // 1) fetch all items for this service + var query: [String: Any] = [ + kSecClass as String: kSecClassGenericPassword, + kSecAttrService as String: service, + kSecMatchLimit as String: kSecMatchLimitAll, + kSecReturnAttributes as String: true, + kSecReturnData as String: true, + ] + + if let accessGroup = accessGroup { + query[kSecAttrAccessGroup as String] = accessGroup + } + var result: CFTypeRef? + let status = SecItemCopyMatching(query as CFDictionary, &result) + + var pairs: [(key: Data, val: Data)] = [] + if status == errSecSuccess, let items = result as? [[String: Any]] { + for item in items { + guard let account = item[kSecAttrAccount as String] as? String, + let keyBytes = accountToKey(account), + let val = item[kSecValueData as String] as? Data else { continue } + if inRange(keyBytes, start: start, end: end) { + pairs.append((keyBytes, val)) + } + } + } + + // 2) sort + if reverse { + pairs.sort { a, b in + // descending: a > b + return lt(b.key, a.key) + } + } else { + pairs.sort { a, b in + // ascending: a < b + return lt(a.key, b.key) + } + } + + // 3) apply seekKey (exclusive) + if let sk = seekKey, !sk.isEmpty { + if reverse { + // keep items with key < seekKey + let idx = pairs.firstIndex(where: { lt($0.key, sk) }) ?? pairs.count + // pairs are descending, so drop while key >= seekKey + pairs = Array(pairs[idx...]) + } else { + // keep items with key > seekKey + let idx = pairs.lastIndex(where: { lte($0.key, sk) }) ?? -1 + let startIdx = idx + 1 + pairs = (startIdx < pairs.count) ? Array(pairs[startIdx...]) : [] + } + } + + // 4) limit + let lim = max(0, Int(limit)) + let chunk = (lim > 0 && lim < pairs.count) ? Array(pairs.prefix(lim)) : pairs + let hasMore = chunk.count < pairs.count + let nextSeek = chunk.last?.key ?? Data() + + // 6) Frame the blob + var blob = Data(capacity: 1 + 4) // will grow as needed + var flags: UInt8 = 0 + if hasMore { flags |= 0x01 } + blob.append(&flags, count: 1) + + var countBE = UInt32(chunk.count).bigEndian + withUnsafeBytes(of: &countBE) { blob.append($0.bindMemory(to: UInt8.self)) } + + for (k, v) in chunk { + var klen = UInt32(k.count).bigEndian + var vlen = UInt32(v.count).bigEndian + withUnsafeBytes(of: &klen) { blob.append($0.bindMemory(to: UInt8.self)) } + blob.append(k) + withUnsafeBytes(of: &vlen) { blob.append($0.bindMemory(to: UInt8.self)) } + blob.append(v) + } + + var nlen = UInt32(nextSeek.count).bigEndian + withUnsafeBytes(of: &nlen) { blob.append($0.bindMemory(to: UInt8.self)) } + blob.append(nextSeek) + + return blob + } + + // MARK: - Utility Methods + + private func keyToAccount(_ key: Data) -> String { + return String(data: key, encoding: .utf8) ?? key.base64EncodedString() + } + + private func accountToKey(_ account: String) -> Data? { + // Try UTF-8 first + if let utf8Data = account.data(using: .utf8) { + // Check if this was originally a base64 string by trying to decode it + if let base64Data = Data(base64Encoded: account), base64Data != utf8Data { + // This account was base64 encoded, return the decoded data + return base64Data + } else { + // This was a UTF-8 string, return the UTF-8 data + return utf8Data + } + } + + // Fallback: try base64 decoding + return Data(base64Encoded: account) + } + + // --- byte-wise comparisons on decoded keys --- + @inline(__always) + private func lt(_ a: Data, _ b: Data) -> Bool { + a.lexicographicallyPrecedes(b) + } + @inline(__always) + private func gte(_ a: Data, _ b: Data) -> Bool { !lt(a, b) } + @inline(__always) + private func lte(_ a: Data, _ b: Data) -> Bool { !lt(b, a) } + @inline(__always) + private func inRange(_ k: Data, start: Data?, end: Data?) -> Bool { + if let s = start, lt(k, s) { return false } // k >= s + if let e = end, !lt(k, e) { return false } // k < e + return true + } +} diff --git a/framework/service/bridge.go b/framework/service/bridge.go index e1d7aa3c..47c15d51 100644 --- a/framework/service/bridge.go +++ b/framework/service/bridge.go @@ -19,6 +19,7 @@ import ( ) type BridgeConfig struct { + NativeDB NativeDB RootDir string TmpDir string UseTcpListener bool @@ -63,6 +64,13 @@ func NewBridge(config *BridgeConfig) (*Bridge, error) { // start gRPC service { + if config.NativeDB != nil { + // use provided NativeDB + svcOpts = append(svcOpts, + service.WithNativeDB(&db{NativeDB: config.NativeDB}), + ) + } + svcOpts = append(svcOpts, service.WithRootDir(config.RootDir), service.WithTmpDir(config.TmpDir), diff --git a/framework/service/db.go b/framework/service/db.go new file mode 100644 index 00000000..d2126189 --- /dev/null +++ b/framework/service/db.go @@ -0,0 +1,306 @@ +package gnonative + +import ( + "encoding/binary" + "fmt" + "sync" + "sync/atomic" + + mdb "github.com/gnolang/gno/tm2/pkg/db" +) + +var errShort = fmt.Errorf("chunk blob: short buffer") + +// NativeDB is implemented in the native (Kotlin/Swift) layer. +type NativeDB interface { + Get([]byte) []byte + Has(key []byte) bool + Set([]byte, []byte) + SetSync([]byte, []byte) + Delete([]byte) + DeleteSync([]byte) + ScanChunk(start, end, seekKey []byte, limit int, reverse bool) ([]byte, error) +} + +type db struct { + NativeDB + + closed atomic.Bool + mu sync.RWMutex // used for optional safety around Stats/Print +} + +func (d *db) Close() error { + d.closed.Store(true) + return nil +} + +func (d *db) ensureOpen() { + if d.closed.Load() { + panic("db: use after Close") + } +} + +func (db *db) Iterator(start, end []byte) mdb.Iterator { + db.ensureOpen() + it := &iterator{ + db: db, + start: append([]byte(nil), start...), + end: append([]byte(nil), end...), + reverse: false, + chunkLimit: 256, + } + it.fill() + return it +} + +func (db *db) ReverseIterator(start, end []byte) mdb.Iterator { + db.ensureOpen() + it := &iterator{ + db: db, + start: append([]byte(nil), start...), + end: append([]byte(nil), end...), + reverse: true, + chunkLimit: 256, + } + it.fill() + return it +} + +func (d *db) Print() { + d.mu.RLock() + defer d.mu.RUnlock() + // With only point ops exposed, we can't enumerate keys here. + // Keep as a stub or log something useful for debugging: + fmt.Println("db.Print(): NativeDB has no range API; nothing to print") +} + +func (d *db) Stats() map[string]string { + d.mu.RLock() + defer d.mu.RUnlock() + // Return whatever you track on the Go side. Native side has no stats here. + return map[string]string{ + "closed": fmt.Sprintf("%v", d.closed.Load()), + } +} + +func (d *db) NewBatch() mdb.Batch { + d.ensureOpen() + return &batch{db: d} +} + +// --- Batch implementation (pure Go) --- + +type batch struct { + db *db + ops []op +} + +type op struct { + del bool + key []byte + value []byte // nil for delete +} + +func (b *batch) Set(key, value []byte) { + b.db.ensureOpen() + // Defensive copies: gomobile & callers must not mutate later. + k := append([]byte(nil), key...) + v := append([]byte(nil), value...) + b.ops = append(b.ops, op{del: false, key: k, value: v}) +} + +func (b *batch) Delete(key []byte) { + b.db.ensureOpen() + k := append([]byte(nil), key...) + b.ops = append(b.ops, op{del: true, key: k}) +} + +// Write applies ops using async variants. +func (b *batch) Write() { + b.db.ensureOpen() + for _, o := range b.ops { + if o.del { + b.db.Delete(o.key) + } else { + b.db.Set(o.key, o.value) + } + } + // Clear buffer to allow reuse if desired. + b.ops = b.ops[:0] +} + +// WriteSync applies ops using sync variants. +func (b *batch) WriteSync() { + b.db.ensureOpen() + for _, o := range b.ops { + if o.del { + b.db.DeleteSync(o.key) + } else { + b.db.SetSync(o.key, o.value) + } + } + b.ops = b.ops[:0] +} + +func (b *batch) Close() { + // Drop buffered ops; allow GC. + b.ops = nil + b.db = nil +} + +type kv struct { + k []byte + v []byte +} + +type iterator struct { + db *db + start []byte + end []byte + reverse bool + seekKey []byte + chunk []kv + i int + hasMore bool + closed bool + chunkLimit int +} + +func (it *iterator) Domain() (start, end []byte) { return it.start, it.end } + +func (it *iterator) Valid() bool { + if it.closed { + return false + } + for it.i >= len(it.chunk) && it.hasMore { + it.fill() + } + return it.i < len(it.chunk) +} + +func (it *iterator) Next() { + if !it.Valid() { + return + } + cur := it.chunk[it.i] + it.i++ + // keep seekKey strictly at the last returned key + it.seekKey = append(it.seekKey[:0], cur.k...) +} + +func (it *iterator) Key() []byte { + if !it.Valid() { + return nil + } + return it.chunk[it.i].k +} + +func (it *iterator) Value() []byte { + if !it.Valid() { + return nil + } + return it.chunk[it.i].v +} +func (it *iterator) Close() { it.closed = true; it.chunk = nil } + +func (it *iterator) fill() { + if it.closed { + return + } + blob, err := it.db.ScanChunk(it.start, it.end, it.seekKey, it.chunkLimit, it.reverse) + if err != nil { + it.chunk, it.i, it.hasMore = nil, 0, false + return + } + pairs, nextSeek, hasMore, err := decodeChunkBlob(blob) + if err != nil { + it.chunk, it.i, it.hasMore = nil, 0, false + return + } + it.chunk = pairs + it.i = 0 + it.hasMore = hasMore + if len(nextSeek) > 0 { + it.seekKey = append(it.seekKey[:0], nextSeek...) + } +} + +// --- framing decode --- + +// decodeChunkBlob parses a single binary blob produced by NativeDB.ScanChunk. +// +// Blob layout (all integers are big-endian): +// +// +---------+-------------------+---------------------------------------+--------------------------+------------------------+ +// | Offset | Field | Description | Type/Size | Notes | +// +---------+-------------------+---------------------------------------+--------------------------+------------------------+ +// | 0 | flags | bit0 = hasMore (1 => more pages) | uint8 (1 byte) | other bits reserved | +// | 1 | count | number of K/V pairs that follow | uint32 (4 bytes, BE) | N | +// | 5 | pairs[0..N-1] | repeated K/V frames: | | | +// | | - klen | key length | uint32 (4 bytes, BE) | | +// | | - key | key bytes | klen bytes | | +// | | - vlen | value length | uint32 (4 bytes, BE) | | +// | | - value | value bytes | vlen bytes | | +// | ... | nextSeekLen | length of the nextSeek key | uint32 (4 bytes, BE) | 0 if empty | +// | ... | nextSeek | nextSeek key bytes | nextSeekLen bytes | | +// +---------+-------------------+---------------------------------------+--------------------------+------------------------+ +// +// Semantics: +// - The iterator uses 'hasMore' to know if additional pages exist. +// - 'nextSeek' is typically the last key of this page; pass it back as 'seekKey' (exclusive) +// on the next ScanChunk call to continue from the next item. +// - Keys/values are raw bytes; ordering and range checks are done on the raw key bytes. +// +// On decode errors (short buffer / lengths out of range), the function returns errShort. +func decodeChunkBlob(b []byte) (pairs []kv, nextSeek []byte, hasMore bool, err error) { + if len(b) < 1+4 { + return nil, nil, false, errShort + } + + flags := b[0] + hasMore = (flags & 0x01) != 0 + b = b[1:] + + count := int(binary.BigEndian.Uint32(b[:4])) + b = b[4:] + + pairs = make([]kv, 0, count) + for i := 0; i < count; i++ { + if len(b) < 4 { + return nil, nil, false, errShort + } + klen := int(binary.BigEndian.Uint32(b[:4])) + b = b[4:] + if klen < 0 || len(b) < klen { + return nil, nil, false, errShort + } + k := append([]byte(nil), b[:klen]...) + b = b[klen:] + + if len(b) < 4 { + return nil, nil, false, errShort + } + vlen := int(binary.BigEndian.Uint32(b[:4])) + b = b[4:] + if vlen < 0 || len(b) < vlen { + return nil, nil, false, errShort + } + v := append([]byte(nil), b[:vlen]...) + b = b[vlen:] + + pairs = append(pairs, kv{k: k, v: v}) + } + + if len(b) < 4 { + return nil, nil, false, errShort + } + nlen := int(binary.BigEndian.Uint32(b[:4])) + b = b[4:] + if nlen < 0 || len(b) < nlen { + return nil, nil, false, errShort + } + if nlen > 0 { + nextSeek = append([]byte(nil), b[:nlen]...) + } + return pairs, nextSeek, hasMore, nil +} diff --git a/service/config.go b/service/config.go index 031f401f..3edd5b46 100644 --- a/service/config.go +++ b/service/config.go @@ -4,20 +4,24 @@ import ( "os" "path/filepath" + "github.com/gnolang/gno/tm2/pkg/db" api_gen "github.com/gnolang/gnonative/v4/api/gen/go" "github.com/pkg/errors" "go.uber.org/zap" ) -const DEFAULT_TCP_ADDR = ":26658" -const DEFAULT_SOCKET_SUBDIR = "s" -const DEFAULT_SOCKET_FILE = "gno" +const ( + DEFAULT_TCP_ADDR = ":26658" + DEFAULT_SOCKET_SUBDIR = "s" + DEFAULT_SOCKET_FILE = "gno" +) // Config describes a set of settings for a GnoNativeService type Config struct { Logger *zap.Logger Remote string ChainID string + NativeDB db.DB RootDir string TmpDir string TcpAddr string @@ -160,6 +164,16 @@ var WithFallbacChainID GnoNativeOption = func(cfg *Config) error { return nil } +// --- NativeDB options --- + +// WithNativeDB sets the given native DB. +var WithNativeDB = func(db db.DB) GnoNativeOption { + return func(cfg *Config) error { + cfg.NativeDB = db + return nil + } +} + // --- RootDir options --- // WithRootDir sets the given root directory path. diff --git a/service/service.go b/service/service.go index 7a48c02a..34d94966 100644 --- a/service/service.go +++ b/service/service.go @@ -107,7 +107,17 @@ func initService(cfg *Config) (*gnoNativeService, error) { return nil, err } - svc.keybase, _ = keys.NewKeyBaseFromDir(cfg.RootDir) + if cfg.NativeDB != nil { + cfg.Logger.Debug("using nativeDB for keybase") + svc.keybase = keys.NewDBKeybase(cfg.NativeDB) + } else { + var err error + cfg.Logger.Debug("using filesystem for keybase", zap.String("rootdir", cfg.RootDir)) + svc.keybase, err = keys.NewKeyBaseFromDir(cfg.RootDir) + if err != nil { + return nil, err + } + } var err error svc.rpcClient, err = rpcclient.NewHTTPClient(cfg.Remote)