Skip to content

Commit

Permalink
revert default reader to iceberg reader
Browse files Browse the repository at this point in the history
  • Loading branch information
huaxingao committed Feb 1, 2025
1 parent 2f28af7 commit d36fea0
Show file tree
Hide file tree
Showing 7 changed files with 11 additions and 15 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,6 @@
import org.apache.spark.sql.catalyst.analysis.NoSuchTableException;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Ignore;
import org.junit.Test;

public class SmokeTest extends SparkExtensionsTestBase {
Expand All @@ -45,7 +44,7 @@ public void dropTable() {
// Run through our Doc's Getting Started Example
// TODO Update doc example so that it can actually be run, modifications were required for this
// test suite to run
@Ignore
@Test
public void testGettingStarted() throws IOException {
// Creating a table
sql("CREATE TABLE %s (id bigint, data string) USING iceberg", tableName);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ private SparkSQLProperties() {}

// Controls which Parquet reader implementation to use
public static final String PARQUET_READER_TYPE = "spark.sql.iceberg.parquet.reader-type";
public static final ParquetReaderType PARQUET_READER_TYPE_DEFAULT = ParquetReaderType.COMET;
public static final ParquetReaderType PARQUET_READER_TYPE_DEFAULT = ParquetReaderType.ICEBERG;

// Controls whether reading/writing timestamps without timezones is allowed
@Deprecated
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,6 @@
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Ignore;
import org.junit.Test;

public class TestDataFrameWriterV2 extends SparkTestBaseWithCatalog {
Expand Down Expand Up @@ -215,7 +214,7 @@ public void testWriteWithCaseSensitiveOption() throws NoSuchTableException, Pars
Assert.assertEquals(4, fields.size());
}

@Ignore
@Test
public void testMergeSchemaIgnoreCastingLongToInt() throws Exception {
sql(
"ALTER TABLE %s SET TBLPROPERTIES ('%s'='true')",
Expand Down Expand Up @@ -255,7 +254,7 @@ public void testMergeSchemaIgnoreCastingLongToInt() throws Exception {
assertThat(idField.type().typeId()).isEqualTo(Type.TypeID.LONG);
}

@Ignore
@Test
public void testMergeSchemaIgnoreCastingDoubleToFloat() throws Exception {
removeTables();
sql("CREATE TABLE %s (id double, data string) USING iceberg", tableName);
Expand Down Expand Up @@ -297,7 +296,7 @@ public void testMergeSchemaIgnoreCastingDoubleToFloat() throws Exception {
assertThat(idField.type().typeId()).isEqualTo(Type.TypeID.DOUBLE);
}

@Ignore
@Test
public void testMergeSchemaIgnoreCastingDecimalToDecimalWithNarrowerPrecision() throws Exception {
removeTables();
sql("CREATE TABLE %s (id decimal(6,2), data string) USING iceberg", tableName);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,6 @@
import org.apache.iceberg.Table;
import org.apache.iceberg.catalog.TableIdentifier;
import org.apache.iceberg.spark.extensions.ExtensionsTestBase;
import org.junit.Ignore;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.TestTemplate;
import org.junit.jupiter.api.extension.ExtendWith;
Expand All @@ -41,7 +40,7 @@ public void dropTable() {
// Run through our Doc's Getting Started Example
// TODO Update doc example so that it can actually be run, modifications were required for this
// test suite to run
@Ignore
@TestTemplate
public void testGettingStarted() throws IOException {
// Creating a table
sql("CREATE TABLE %s (id bigint, data string) USING iceberg", tableName);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ private SparkSQLProperties() {}

// Controls which Parquet reader implementation to use
public static final String PARQUET_READER_TYPE = "spark.sql.iceberg.parquet.reader-type";
public static final ParquetReaderType PARQUET_READER_TYPE_DEFAULT = ParquetReaderType.COMET;
public static final ParquetReaderType PARQUET_READER_TYPE_DEFAULT = ParquetReaderType.ICEBERG;
// Controls whether to perform the nullability check during writes
public static final String CHECK_NULLABILITY = "spark.sql.iceberg.check-nullability";
public static final boolean CHECK_NULLABILITY_DEFAULT = true;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,6 @@
import org.apache.spark.sql.internal.SQLConf;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Disabled;
import org.junit.jupiter.api.TestTemplate;

public class TestDataFrameWriterV2 extends TestBaseWithCatalog {
Expand Down Expand Up @@ -249,7 +248,7 @@ public void testMergeSchemaSparkConfiguration() throws Exception {
sql("select * from %s order by id", tableName));
}

@Disabled
@TestTemplate
public void testMergeSchemaIgnoreCastingLongToInt() throws Exception {
sql(
"ALTER TABLE %s SET TBLPROPERTIES ('%s'='true')",
Expand Down Expand Up @@ -289,7 +288,7 @@ public void testMergeSchemaIgnoreCastingLongToInt() throws Exception {
assertThat(idField.type().typeId()).isEqualTo(Type.TypeID.LONG);
}

@Disabled
@TestTemplate
public void testMergeSchemaIgnoreCastingDoubleToFloat() throws Exception {
removeTables();
sql("CREATE TABLE %s (id double, data string) USING iceberg", tableName);
Expand Down Expand Up @@ -331,7 +330,7 @@ public void testMergeSchemaIgnoreCastingDoubleToFloat() throws Exception {
assertThat(idField.type().typeId()).isEqualTo(Type.TypeID.DOUBLE);
}

@Disabled
@TestTemplate
public void testMergeSchemaIgnoreCastingDecimalToDecimalWithNarrowerPrecision() throws Exception {
removeTables();
sql("CREATE TABLE %s (id decimal(6,2), data string) USING iceberg", tableName);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -670,7 +670,7 @@ public void testEqualityDeleteWithDifferentScanAndDeleteColumns() throws IOExcep
ParquetBatchReadConf conf =
ImmutableParquetBatchReadConf.builder()
.batchSize(7)
.readerType(ParquetReaderType.COMET)
.readerType(ParquetReaderType.ICEBERG)
.build();

for (CombinedScanTask task : tasks) {
Expand Down

0 comments on commit d36fea0

Please sign in to comment.