diff --git a/docs/StardustDocs/topics/dataSources/ApacheArrow.md b/docs/StardustDocs/topics/dataSources/ApacheArrow.md index c303f02ca9..c63ea11abb 100644 --- a/docs/StardustDocs/topics/dataSources/ApacheArrow.md +++ b/docs/StardustDocs/topics/dataSources/ApacheArrow.md @@ -25,7 +25,7 @@ and in [`%use dataframe`](SetupKotlinNotebook.md#integrate-kotlin-dataframe) for > {style="warning"} > Structured (nested) Arrow types such as Struct are not supported yet in Kotlin DataFrame. -> See an issue: [Add inner / Struct type support in Arrow](https://github.com/Kotlin/dataframe/issues/536) +> See the issue: [Add inner / Struct type support in Arrow](https://github.com/Kotlin/dataframe/issues/536) > {style="warning"} ## Read diff --git a/docs/StardustDocs/topics/dataSources/Parquet.md b/docs/StardustDocs/topics/dataSources/Parquet.md index 7cc2728ad0..18d2aa1f7f 100644 --- a/docs/StardustDocs/topics/dataSources/Parquet.md +++ b/docs/StardustDocs/topics/dataSources/Parquet.md @@ -23,7 +23,7 @@ Requires the [`dataframe-arrow` module](Modules.md#dataframe-arrow), which is in > {style="warning"} > Structured (nested) Arrow types such as Struct are not supported yet in Kotlin DataFrame. -> See an issue: [Add inner / Struct type support in Arrow](https://github.com/Kotlin/dataframe/issues/536) +> See the issue: [Add inner / Struct type support in Arrow](https://github.com/Kotlin/dataframe/issues/536) > {style="warning"} ## Reading Parquet Files @@ -68,7 +68,7 @@ Dataset API to scan the data and materialize it as a Kotlin `DataFrame`. ```kotlin // Read from file paths (as strings) -val df1 = DataFrame.readParquet("data/sales.parquet") +val df = DataFrame.readParquet("data/sales.parquet") ``` diff --git a/samples/src/test/kotlin/org/jetbrains/kotlinx/dataframe/samples/io/Parquet.kt b/samples/src/test/kotlin/org/jetbrains/kotlinx/dataframe/samples/io/Parquet.kt index c423cbdfc0..3d235e296e 100644 --- a/samples/src/test/kotlin/org/jetbrains/kotlinx/dataframe/samples/io/Parquet.kt +++ b/samples/src/test/kotlin/org/jetbrains/kotlinx/dataframe/samples/io/Parquet.kt @@ -1,14 +1,13 @@ package org.jetbrains.kotlinx.dataframe.samples.io import io.kotest.matchers.shouldBe -import java.io.File -import java.nio.file.Path -import java.nio.file.Paths import org.jetbrains.kotlinx.dataframe.DataFrame import org.jetbrains.kotlinx.dataframe.api.NullabilityOptions -import org.junit.Test import org.jetbrains.kotlinx.dataframe.io.readParquet import org.jetbrains.kotlinx.dataframe.testParquet +import org.junit.Test +import java.io.File +import java.nio.file.Paths class Parquet { @Test @@ -56,7 +55,7 @@ class Parquet { val df = DataFrame.readParquet( file, nullability = NullabilityOptions.Infer, - batchSize = 64L * 1024 + batchSize = 64L * 1024, ) // SampleEnd df.rowsCount() shouldBe 300