|
22 | 22 | import org.apache.fluss.exception.InvalidTableException; |
23 | 23 | import org.apache.fluss.lake.lakestorage.TestingLakeCatalogContext; |
24 | 24 | import org.apache.fluss.metadata.Schema; |
| 25 | +import org.apache.fluss.metadata.TableChange; |
25 | 26 | import org.apache.fluss.metadata.TableDescriptor; |
26 | 27 | import org.apache.fluss.metadata.TablePath; |
27 | 28 | import org.apache.fluss.types.DataTypes; |
|
30 | 31 | import org.apache.iceberg.SortDirection; |
31 | 32 | import org.apache.iceberg.SortField; |
32 | 33 | import org.apache.iceberg.Table; |
| 34 | +import org.apache.iceberg.catalog.Catalog; |
33 | 35 | import org.apache.iceberg.catalog.TableIdentifier; |
34 | 36 | import org.apache.iceberg.types.Types; |
35 | 37 | import org.assertj.core.api.Assertions; |
@@ -455,4 +457,88 @@ void testIllegalPartitionKeyType(boolean isPrimaryKeyTable) throws Exception { |
455 | 457 | .hasMessage( |
456 | 458 | "Partition key only support string type for iceberg currently. Column `c1` is not string type."); |
457 | 459 | } |
| 460 | + |
| 461 | + @Test |
| 462 | + void alterTableProperties() { |
| 463 | + String database = "test_alter_table_db"; |
| 464 | + String tableName = "test_alter_table"; |
| 465 | + |
| 466 | + Schema flussSchema = Schema.newBuilder().column("id", DataTypes.BIGINT()).build(); |
| 467 | + |
| 468 | + TableDescriptor tableDescriptor = |
| 469 | + TableDescriptor.builder() |
| 470 | + .schema(flussSchema) |
| 471 | + .distributedBy(3) |
| 472 | + .property("iceberg.commit.retry.num-retries", "5") |
| 473 | + .property("table.datalake.freshness", "30s") |
| 474 | + .build(); |
| 475 | + |
| 476 | + TablePath tablePath = TablePath.of(database, tableName); |
| 477 | + TestingLakeCatalogContext context = new TestingLakeCatalogContext(); |
| 478 | + flussIcebergCatalog.createTable(tablePath, tableDescriptor, context); |
| 479 | + |
| 480 | + Catalog catalog = flussIcebergCatalog.getIcebergCatalog(); |
| 481 | + assertThat(catalog.loadTable(TableIdentifier.of(database, tableName)).properties()) |
| 482 | + .containsEntry("commit.retry.num-retries", "5") |
| 483 | + .containsEntry("fluss.table.datalake.freshness", "30s") |
| 484 | + .doesNotContainKeys("iceberg.commit.retry.num-retries", "table.datalake.freshness"); |
| 485 | + |
| 486 | + // set new iceberg property |
| 487 | + flussIcebergCatalog.alterTable( |
| 488 | + tablePath, |
| 489 | + List.of(TableChange.set("iceberg.commit.retry.min-wait-ms", "1000")), |
| 490 | + context); |
| 491 | + assertThat(catalog.loadTable(TableIdentifier.of(database, tableName)).properties()) |
| 492 | + .containsEntry("commit.retry.min-wait-ms", "1000") |
| 493 | + .containsEntry("commit.retry.num-retries", "5") |
| 494 | + .containsEntry("fluss.table.datalake.freshness", "30s") |
| 495 | + .doesNotContainKeys( |
| 496 | + "iceberg.commit.retry.min-wait-ms", |
| 497 | + "iceberg.commit.retry.num-retries", |
| 498 | + "table.datalake.freshness"); |
| 499 | + |
| 500 | + // update existing properties |
| 501 | + flussIcebergCatalog.alterTable( |
| 502 | + tablePath, |
| 503 | + List.of( |
| 504 | + TableChange.set("iceberg.commit.retry.num-retries", "10"), |
| 505 | + TableChange.set("table.datalake.freshness", "23s")), |
| 506 | + context); |
| 507 | + assertThat(catalog.loadTable(TableIdentifier.of(database, tableName)).properties()) |
| 508 | + .containsEntry("commit.retry.min-wait-ms", "1000") |
| 509 | + .containsEntry("commit.retry.num-retries", "10") |
| 510 | + .containsEntry("fluss.table.datalake.freshness", "23s") |
| 511 | + .doesNotContainKeys( |
| 512 | + "iceberg.commit.retry.min-wait-ms", |
| 513 | + "iceberg.commit.retry.num-retries", |
| 514 | + "table.datalake.freshness"); |
| 515 | + |
| 516 | + // remove existing properties |
| 517 | + flussIcebergCatalog.alterTable( |
| 518 | + tablePath, |
| 519 | + List.of( |
| 520 | + TableChange.reset("iceberg.commit.retry.min-wait-ms"), |
| 521 | + TableChange.reset("table.datalake.freshness")), |
| 522 | + context); |
| 523 | + assertThat(catalog.loadTable(TableIdentifier.of(database, tableName)).properties()) |
| 524 | + .containsEntry("commit.retry.num-retries", "10") |
| 525 | + .doesNotContainKeys( |
| 526 | + "commit.retry.min-wait-ms", |
| 527 | + "iceberg.commit.retry.min-wait-ms", |
| 528 | + "table.datalake.freshness", |
| 529 | + "fluss.table.datalake.freshness"); |
| 530 | + |
| 531 | + // remove non-existing property |
| 532 | + flussIcebergCatalog.alterTable( |
| 533 | + tablePath, List.of(TableChange.reset("iceberg.non-existing.property")), context); |
| 534 | + assertThat(catalog.loadTable(TableIdentifier.of(database, tableName)).properties()) |
| 535 | + .containsEntry("commit.retry.num-retries", "10") |
| 536 | + .doesNotContainKeys( |
| 537 | + "non-existing.property", |
| 538 | + "iceberg.non-existing.property", |
| 539 | + "commit.retry.min-wait-ms", |
| 540 | + "iceberg.commit.retry.min-wait-ms", |
| 541 | + "table.datalake.freshness", |
| 542 | + "fluss.table.datalake.freshness"); |
| 543 | + } |
458 | 544 | } |
0 commit comments