diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index ee19108103f9..71c10c8829a4 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -350,6 +350,7 @@ jobs:
!:trino-ignite,
!:trino-jdbc,
!:trino-kafka,
+ !:trino-lakehouse,
!:trino-main,
!:trino-mariadb,
!:trino-memory,
@@ -470,6 +471,7 @@ jobs:
- { modules: plugin/trino-iceberg, profile: minio-and-avro }
- { modules: plugin/trino-ignite }
- { modules: plugin/trino-kafka }
+ - { modules: plugin/trino-lakehouse }
- { modules: plugin/trino-mariadb }
- { modules: plugin/trino-mongodb }
- { modules: plugin/trino-mysql }
diff --git a/core/trino-server/src/main/provisio/trino.xml b/core/trino-server/src/main/provisio/trino.xml
index 039862e382fd..97a366b39290 100644
--- a/core/trino-server/src/main/provisio/trino.xml
+++ b/core/trino-server/src/main/provisio/trino.xml
@@ -148,6 +148,15 @@
+
+
+
+
+
+
+
+
+
diff --git a/docs/src/main/sphinx/connector.md b/docs/src/main/sphinx/connector.md
index 2d9781a69616..a954b30cf059 100644
--- a/docs/src/main/sphinx/connector.md
+++ b/docs/src/main/sphinx/connector.md
@@ -25,6 +25,7 @@ Iceberg
Ignite
JMX
Kafka
+Lakehouse
Loki
MariaDB
Memory
diff --git a/docs/src/main/sphinx/connector/lakehouse.md b/docs/src/main/sphinx/connector/lakehouse.md
new file mode 100644
index 000000000000..1c65e0f33c3f
--- /dev/null
+++ b/docs/src/main/sphinx/connector/lakehouse.md
@@ -0,0 +1,105 @@
+# Lakehouse connector
+
+The Lakehouse connector provides a unified way to interact with data stored
+in various table formats across different storage systems and metastore services.
+This single connector allows you to query and write data seamlessly, regardless of
+whether it's in Iceberg, Delta Lake, or Hudi table formats, or traditional Hive tables.
+
+This connector offers flexible connectivity to popular metastore services including
+AWS Glue and Hive Metastore. For data storage, it supports a wide range of options
+including cloud storage services such as AWS S3, S3-compatible storage,
+Google Cloud Storage (GCS), and Azure Blob Storage, as well as HDFS installations.
+
+The connector combines the features of the
+[Hive](/connector/hive), [Iceberg](/connector/iceberg),
+[Delta Lake](/connector/delta-lake), and [Hudi](/connector/hudi)
+connectors into a single connector. The configuration properties,
+session properties, table properties, and beahvior come from the underlying
+connectors. Please refer to the documentation for the underlying connectors
+for the table formats that you are using.
+
+## General configuration
+
+To configure the Lakehouse connector, create a catalog properties file
+`etc/catalog/example.properties` with the following content, replacing the
+properties as appropriate:
+
+```text
+connector.name=lakehouse
+```
+
+You must configure a [AWS Glue or a Hive metastore](/object-storage/metastores).
+The `hive.metastore` property will also configure the Iceberg catalog.
+Do not specify `iceberg.catalog.type`.
+
+You must select and configure one of the
+[supported file systems](lakehouse-file-system-configuration).
+
+## Configuration properties
+
+The following configuration properties are available:
+
+:::{list-table}
+:widths: 30, 58, 12
+:header-rows: 1
+
+* - Property name
+ - Description
+ - Default
+* - `lakehouse.table-type`
+ - The default table type for newly created tables when the `format`
+ table property is not specified. Possible values:
+ * `HIVE`
+ * `ICEBERG`
+ * `DELTA`
+ - `ICEBERG`
+:::
+
+(lakehouse-file-system-configuration)=
+## File system access configuration
+
+The connector supports accessing the following file systems:
+
+* [](/object-storage/file-system-azure)
+* [](/object-storage/file-system-gcs)
+* [](/object-storage/file-system-s3)
+* [](/object-storage/file-system-hdfs)
+
+You must enable and configure the specific file system access.
+
+## Examples
+
+Create an Iceberg table:
+
+```sql
+CREATE TABLE iceberg_table (
+ c1 INTEGER,
+ c2 DATE,
+ c3 DOUBLE
+)
+WITH (
+ type = 'ICEBERG'
+ format = 'PARQUET',
+ partitioning = ARRAY['c1', 'c2'],
+ sorted_by = ARRAY['c3']
+);
+```
+
+Create a Hive table:
+
+```sql
+CREATE TABLE hive_page_views (
+ view_time TIMESTAMP,
+ user_id BIGINT,
+ page_url VARCHAR,
+ ds DATE,
+ country VARCHAR
+)
+WITH (
+ type = 'HIVE',
+ format = 'ORC',
+ partitioned_by = ARRAY['ds', 'country'],
+ bucketed_by = ARRAY['user_id'],
+ bucket_count = 50
+)
+```
diff --git a/plugin/trino-lakehouse/pom.xml b/plugin/trino-lakehouse/pom.xml
new file mode 100644
index 000000000000..c838e5c5fd4c
--- /dev/null
+++ b/plugin/trino-lakehouse/pom.xml
@@ -0,0 +1,274 @@
+
+
+ 4.0.0
+
+
+ io.trino
+ trino-root
+ 477-SNAPSHOT
+ ../../pom.xml
+
+
+ trino-lakehouse
+ trino-plugin
+ Trino - Lakehouse connector
+
+
+
+ com.google.guava
+ guava
+
+
+ org.jspecify
+ jspecify
+
+
+
+
+
+ com.google.inject
+ guice
+
+
+
+ io.airlift
+ bootstrap
+
+
+
+ io.airlift
+ configuration
+
+
+
+ io.airlift
+ json
+
+
+
+ io.trino
+ trino-delta-lake
+
+
+
+ io.trino
+ trino-filesystem-manager
+
+
+
+ io.trino
+ trino-hive
+
+
+
+ io.trino
+ trino-hudi
+
+
+ org.jspecify
+ jspecify
+
+
+
+
+
+ io.trino
+ trino-iceberg
+
+
+
+ io.trino
+ trino-metastore
+
+
+
+ io.trino
+ trino-plugin-toolkit
+
+
+
+ jakarta.validation
+ jakarta.validation-api
+
+
+
+ org.weakref
+ jmxutils
+
+
+
+ com.fasterxml.jackson.core
+ jackson-annotations
+ provided
+
+
+
+ io.airlift
+ slice
+ provided
+
+
+
+ io.opentelemetry
+ opentelemetry-api
+ provided
+
+
+
+ io.opentelemetry
+ opentelemetry-api-incubator
+ provided
+
+
+
+ io.opentelemetry
+ opentelemetry-context
+ provided
+
+
+
+ io.trino
+ trino-spi
+ provided
+
+
+
+ org.openjdk.jol
+ jol-core
+ provided
+
+
+
+ io.airlift
+ log-manager
+ runtime
+
+
+
+ io.airlift
+ configuration-testing
+ test
+
+
+
+ io.airlift
+ http-server
+ test
+
+
+
+ io.airlift
+ junit-extensions
+ test
+
+
+
+ io.airlift
+ testing
+ test
+
+
+
+ io.minio
+ minio
+ test
+
+
+
+ io.trino
+ trino-hdfs
+ test
+
+
+
+ io.trino
+ trino-hive
+ test-jar
+ test
+
+
+
+ io.trino
+ trino-main
+ test
+
+
+
+ io.trino
+ trino-main
+ test-jar
+ test
+
+
+
+ io.trino
+ trino-spi
+ test-jar
+ test
+
+
+
+ io.trino
+ trino-testing
+ test
+
+
+
+ io.trino
+ trino-testing-containers
+ test
+
+
+
+ io.trino
+ trino-testing-services
+ test
+
+
+
+ io.trino
+ trino-tpcds
+ test
+
+
+
+ io.trino
+ trino-tpch
+ test
+
+
+
+ org.assertj
+ assertj-core
+ test
+
+
+
+ org.junit.jupiter
+ junit-jupiter-api
+ test
+
+
+
+ org.junit.jupiter
+ junit-jupiter-engine
+ test
+
+
+
+
+
+
+ org.basepom.maven
+ duplicate-finder-maven-plugin
+
+
+ iceberg-build.properties
+ mozilla/public-suffix-list.txt
+ mime.types
+
+
+
+
+
+
diff --git a/plugin/trino-lakehouse/src/main/java/io/trino/plugin/lakehouse/LakehouseConfig.java b/plugin/trino-lakehouse/src/main/java/io/trino/plugin/lakehouse/LakehouseConfig.java
new file mode 100644
index 000000000000..4d8bc3e1221b
--- /dev/null
+++ b/plugin/trino-lakehouse/src/main/java/io/trino/plugin/lakehouse/LakehouseConfig.java
@@ -0,0 +1,37 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.trino.plugin.lakehouse;
+
+import io.airlift.configuration.Config;
+import jakarta.validation.constraints.NotNull;
+
+import static io.trino.plugin.lakehouse.TableType.ICEBERG;
+
+public class LakehouseConfig
+{
+ private TableType tableType = ICEBERG;
+
+ @NotNull
+ public TableType getTableType()
+ {
+ return tableType;
+ }
+
+ @Config("lakehouse.table-type")
+ public LakehouseConfig setTableType(TableType tableType)
+ {
+ this.tableType = tableType;
+ return this;
+ }
+}
diff --git a/plugin/trino-lakehouse/src/main/java/io/trino/plugin/lakehouse/LakehouseConnector.java b/plugin/trino-lakehouse/src/main/java/io/trino/plugin/lakehouse/LakehouseConnector.java
new file mode 100644
index 000000000000..8ee552c98930
--- /dev/null
+++ b/plugin/trino-lakehouse/src/main/java/io/trino/plugin/lakehouse/LakehouseConnector.java
@@ -0,0 +1,162 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.trino.plugin.lakehouse;
+
+import com.google.inject.Inject;
+import io.airlift.bootstrap.LifeCycleManager;
+import io.trino.plugin.hive.HiveSchemaProperties;
+import io.trino.plugin.iceberg.IcebergMaterializedViewProperties;
+import io.trino.spi.connector.Connector;
+import io.trino.spi.connector.ConnectorCapabilities;
+import io.trino.spi.connector.ConnectorMetadata;
+import io.trino.spi.connector.ConnectorNodePartitioningProvider;
+import io.trino.spi.connector.ConnectorPageSinkProvider;
+import io.trino.spi.connector.ConnectorPageSourceProviderFactory;
+import io.trino.spi.connector.ConnectorSession;
+import io.trino.spi.connector.ConnectorSplitManager;
+import io.trino.spi.connector.ConnectorTransactionHandle;
+import io.trino.spi.session.PropertyMetadata;
+import io.trino.spi.transaction.IsolationLevel;
+
+import java.util.List;
+import java.util.Set;
+
+import static com.google.common.collect.Sets.immutableEnumSet;
+import static io.trino.spi.connector.ConnectorCapabilities.MATERIALIZED_VIEW_GRACE_PERIOD;
+import static io.trino.spi.connector.ConnectorCapabilities.NOT_NULL_COLUMN_CONSTRAINT;
+import static io.trino.spi.transaction.IsolationLevel.READ_UNCOMMITTED;
+import static io.trino.spi.transaction.IsolationLevel.checkConnectorSupports;
+import static java.util.Objects.requireNonNull;
+
+public class LakehouseConnector
+ implements Connector
+{
+ private final LifeCycleManager lifeCycleManager;
+ private final LakehouseTransactionManager transactionManager;
+ private final LakehouseSplitManager splitManager;
+ private final LakehousePageSourceProviderFactory pageSourceProviderFactory;
+ private final LakehousePageSinkProvider pageSinkProvider;
+ private final LakehouseNodePartitioningProvider nodePartitioningProvider;
+ private final LakehouseSessionProperties sessionProperties;
+ private final LakehouseTableProperties tableProperties;
+ private final IcebergMaterializedViewProperties materializedViewProperties;
+
+ @Inject
+ public LakehouseConnector(
+ LifeCycleManager lifeCycleManager,
+ LakehouseTransactionManager transactionManager,
+ LakehouseSplitManager splitManager,
+ LakehousePageSourceProviderFactory pageSourceProviderFactory,
+ LakehousePageSinkProvider pageSinkProvider,
+ LakehouseNodePartitioningProvider nodePartitioningProvider,
+ LakehouseSessionProperties sessionProperties,
+ LakehouseTableProperties tableProperties,
+ IcebergMaterializedViewProperties materializedViewProperties)
+ {
+ this.lifeCycleManager = requireNonNull(lifeCycleManager, "lifeCycleManager is null");
+ this.transactionManager = requireNonNull(transactionManager, "transactionManager is null");
+ this.splitManager = requireNonNull(splitManager, "splitManager is null");
+ this.pageSourceProviderFactory = requireNonNull(pageSourceProviderFactory, "pageSourceProviderFactory is null");
+ this.pageSinkProvider = requireNonNull(pageSinkProvider, "pageSinkProvider is null");
+ this.nodePartitioningProvider = requireNonNull(nodePartitioningProvider, "nodePartitioningProvider is null");
+ this.sessionProperties = requireNonNull(sessionProperties, "sessionProperties is null");
+ this.tableProperties = requireNonNull(tableProperties, "tableProperties is null");
+ this.materializedViewProperties = requireNonNull(materializedViewProperties, "materializedViewProperties is null");
+ }
+
+ @Override
+ public ConnectorTransactionHandle beginTransaction(IsolationLevel isolationLevel, boolean readOnly, boolean autoCommit)
+ {
+ checkConnectorSupports(READ_UNCOMMITTED, isolationLevel);
+ return transactionManager.begin();
+ }
+
+ @Override
+ public ConnectorMetadata getMetadata(ConnectorSession session, ConnectorTransactionHandle transactionHandle)
+ {
+ return transactionManager.get(transactionHandle, session.getIdentity());
+ }
+
+ @Override
+ public ConnectorSplitManager getSplitManager()
+ {
+ return splitManager;
+ }
+
+ @Override
+ public ConnectorPageSourceProviderFactory getPageSourceProviderFactory()
+ {
+ return pageSourceProviderFactory;
+ }
+
+ @Override
+ public ConnectorPageSinkProvider getPageSinkProvider()
+ {
+ return pageSinkProvider;
+ }
+
+ @Override
+ public ConnectorNodePartitioningProvider getNodePartitioningProvider()
+ {
+ return nodePartitioningProvider;
+ }
+
+ @Override
+ public void commit(ConnectorTransactionHandle transactionHandle)
+ {
+ transactionManager.commit(transactionHandle);
+ }
+
+ @Override
+ public void rollback(ConnectorTransactionHandle transactionHandle)
+ {
+ transactionManager.rollback(transactionHandle);
+ }
+
+ @Override
+ public List> getSessionProperties()
+ {
+ return sessionProperties.getSessionProperties();
+ }
+
+ @Override
+ public List> getSchemaProperties()
+ {
+ return HiveSchemaProperties.SCHEMA_PROPERTIES;
+ }
+
+ @Override
+ public List> getTableProperties()
+ {
+ return tableProperties.getTableProperties();
+ }
+
+ @Override
+ public List> getMaterializedViewProperties()
+ {
+ return materializedViewProperties.getMaterializedViewProperties();
+ }
+
+ @Override
+ public void shutdown()
+ {
+ lifeCycleManager.stop();
+ }
+
+ @Override
+ public Set getCapabilities()
+ {
+ return immutableEnumSet(NOT_NULL_COLUMN_CONSTRAINT, MATERIALIZED_VIEW_GRACE_PERIOD);
+ }
+}
diff --git a/plugin/trino-lakehouse/src/main/java/io/trino/plugin/lakehouse/LakehouseConnectorFactory.java b/plugin/trino-lakehouse/src/main/java/io/trino/plugin/lakehouse/LakehouseConnectorFactory.java
new file mode 100644
index 000000000000..790757481b96
--- /dev/null
+++ b/plugin/trino-lakehouse/src/main/java/io/trino/plugin/lakehouse/LakehouseConnectorFactory.java
@@ -0,0 +1,91 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.trino.plugin.lakehouse;
+
+import com.google.inject.Injector;
+import io.airlift.bootstrap.Bootstrap;
+import io.airlift.json.JsonModule;
+import io.opentelemetry.api.OpenTelemetry;
+import io.opentelemetry.api.trace.Tracer;
+import io.trino.plugin.base.TypeDeserializerModule;
+import io.trino.plugin.base.jmx.ConnectorObjectNameGeneratorModule;
+import io.trino.plugin.base.jmx.MBeanServerModule;
+import io.trino.plugin.hive.NodeVersion;
+import io.trino.plugin.hive.security.HiveSecurityModule;
+import io.trino.spi.NodeManager;
+import io.trino.spi.PageIndexerFactory;
+import io.trino.spi.PageSorter;
+import io.trino.spi.VersionEmbedder;
+import io.trino.spi.catalog.CatalogName;
+import io.trino.spi.classloader.ThreadContextClassLoader;
+import io.trino.spi.connector.CatalogHandle;
+import io.trino.spi.connector.Connector;
+import io.trino.spi.connector.ConnectorContext;
+import io.trino.spi.connector.ConnectorFactory;
+import io.trino.spi.connector.MetadataProvider;
+import org.weakref.jmx.guice.MBeanModule;
+
+import java.util.Map;
+
+import static io.trino.plugin.base.Versions.checkStrictSpiVersionMatch;
+
+public class LakehouseConnectorFactory
+ implements ConnectorFactory
+{
+ @Override
+ public String getName()
+ {
+ return "lakehouse";
+ }
+
+ @Override
+ public Connector create(String catalogName, Map config, ConnectorContext context)
+ {
+ checkStrictSpiVersionMatch(context, this);
+ try (var _ = new ThreadContextClassLoader(getClass().getClassLoader())) {
+ Bootstrap app = new Bootstrap(
+ new MBeanModule(),
+ new MBeanServerModule(),
+ new ConnectorObjectNameGeneratorModule("io.trino.plugin", "trino.plugin"),
+ new JsonModule(),
+ new TypeDeserializerModule(context.getTypeManager()),
+ new LakehouseModule(),
+ new LakehouseHiveModule(),
+ new LakehouseIcebergModule(),
+ new LakehouseDeltaModule(),
+ new LakehouseHudiModule(),
+ new HiveSecurityModule(),
+ new LakehouseFileSystemModule(catalogName, context),
+ binder -> {
+ binder.bind(OpenTelemetry.class).toInstance(context.getOpenTelemetry());
+ binder.bind(Tracer.class).toInstance(context.getTracer());
+ binder.bind(NodeVersion.class).toInstance(new NodeVersion(context.getNodeManager().getCurrentNode().getVersion()));
+ binder.bind(NodeManager.class).toInstance(context.getNodeManager());
+ binder.bind(VersionEmbedder.class).toInstance(context.getVersionEmbedder());
+ binder.bind(MetadataProvider.class).toInstance(context.getMetadataProvider());
+ binder.bind(PageIndexerFactory.class).toInstance(context.getPageIndexerFactory());
+ binder.bind(CatalogHandle.class).toInstance(context.getCatalogHandle());
+ binder.bind(CatalogName.class).toInstance(new CatalogName(catalogName));
+ binder.bind(PageSorter.class).toInstance(context.getPageSorter());
+ });
+
+ Injector injector = app
+ .doNotInitializeLogging()
+ .setRequiredConfigurationProperties(config)
+ .initialize();
+
+ return injector.getInstance(LakehouseConnector.class);
+ }
+ }
+}
diff --git a/plugin/trino-lakehouse/src/main/java/io/trino/plugin/lakehouse/LakehouseDeltaModule.java b/plugin/trino-lakehouse/src/main/java/io/trino/plugin/lakehouse/LakehouseDeltaModule.java
new file mode 100644
index 000000000000..cd30d99e515a
--- /dev/null
+++ b/plugin/trino-lakehouse/src/main/java/io/trino/plugin/lakehouse/LakehouseDeltaModule.java
@@ -0,0 +1,111 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.trino.plugin.lakehouse;
+
+import com.google.inject.Binder;
+import com.google.inject.Scopes;
+import io.airlift.configuration.AbstractConfigurationAwareModule;
+import io.trino.plugin.deltalake.DataFileInfo;
+import io.trino.plugin.deltalake.DeltaLakeConfig;
+import io.trino.plugin.deltalake.DeltaLakeExecutorModule;
+import io.trino.plugin.deltalake.DeltaLakeMergeResult;
+import io.trino.plugin.deltalake.DeltaLakeMetadataFactory;
+import io.trino.plugin.deltalake.DeltaLakeNodePartitioningProvider;
+import io.trino.plugin.deltalake.DeltaLakePageSinkProvider;
+import io.trino.plugin.deltalake.DeltaLakePageSourceProvider;
+import io.trino.plugin.deltalake.DeltaLakeSessionProperties;
+import io.trino.plugin.deltalake.DeltaLakeSplitManager;
+import io.trino.plugin.deltalake.DeltaLakeSynchronizerModule;
+import io.trino.plugin.deltalake.DeltaLakeTableProperties;
+import io.trino.plugin.deltalake.DeltaLakeTransactionManager;
+import io.trino.plugin.deltalake.DeltaLakeWriterStats;
+import io.trino.plugin.deltalake.metastore.DeltaLakeMetastoreModule;
+import io.trino.plugin.deltalake.metastore.DeltaLakeTableMetadataScheduler;
+import io.trino.plugin.deltalake.metastore.file.DeltaLakeFileMetastoreModule;
+import io.trino.plugin.deltalake.metastore.glue.DeltaLakeGlueMetastoreModule;
+import io.trino.plugin.deltalake.metastore.thrift.DeltaLakeThriftMetastoreModule;
+import io.trino.plugin.deltalake.statistics.CachingExtendedStatisticsAccess;
+import io.trino.plugin.deltalake.statistics.CachingExtendedStatisticsAccess.ForCachingExtendedStatisticsAccess;
+import io.trino.plugin.deltalake.statistics.ExtendedStatistics;
+import io.trino.plugin.deltalake.statistics.ExtendedStatisticsAccess;
+import io.trino.plugin.deltalake.statistics.MetaDirStatisticsAccess;
+import io.trino.plugin.deltalake.transactionlog.TransactionLogAccess;
+import io.trino.plugin.deltalake.transactionlog.checkpoint.CheckpointSchemaManager;
+import io.trino.plugin.deltalake.transactionlog.checkpoint.CheckpointWriterManager;
+import io.trino.plugin.deltalake.transactionlog.checkpoint.LastCheckpoint;
+import io.trino.plugin.deltalake.transactionlog.reader.FileSystemTransactionLogReaderFactory;
+import io.trino.plugin.deltalake.transactionlog.reader.TransactionLogReaderFactory;
+import io.trino.plugin.deltalake.transactionlog.writer.FileSystemTransactionLogWriterFactory;
+import io.trino.plugin.deltalake.transactionlog.writer.NoIsolationSynchronizer;
+import io.trino.plugin.deltalake.transactionlog.writer.TransactionLogSynchronizerManager;
+import io.trino.plugin.deltalake.transactionlog.writer.TransactionLogWriterFactory;
+import io.trino.plugin.hive.metastore.MetastoreTypeConfig;
+
+import static io.airlift.configuration.ConfigBinder.configBinder;
+import static io.airlift.json.JsonCodecBinder.jsonCodecBinder;
+import static org.weakref.jmx.guice.ExportBinder.newExporter;
+
+public class LakehouseDeltaModule
+ extends AbstractConfigurationAwareModule
+{
+ @Override
+ protected void setup(Binder binder)
+ {
+ install(new DeltaLakeSynchronizerModule());
+ install(new DeltaLakeMetastoreModule());
+
+ configBinder(binder).bindConfig(DeltaLakeConfig.class);
+
+ binder.bind(DeltaLakeNodePartitioningProvider.class).in(Scopes.SINGLETON);
+ binder.bind(DeltaLakePageSinkProvider.class).in(Scopes.SINGLETON);
+ binder.bind(DeltaLakePageSourceProvider.class).in(Scopes.SINGLETON);
+ binder.bind(DeltaLakeSessionProperties.class).in(Scopes.SINGLETON);
+ binder.bind(DeltaLakeSplitManager.class).in(Scopes.SINGLETON);
+ binder.bind(DeltaLakeTableProperties.class).in(Scopes.SINGLETON);
+
+ binder.bind(DeltaLakeTransactionManager.class).in(Scopes.SINGLETON);
+ binder.bind(DeltaLakeMetadataFactory.class).in(Scopes.SINGLETON);
+ binder.bind(DeltaLakeWriterStats.class).in(Scopes.SINGLETON);
+ binder.bind(CheckpointSchemaManager.class).in(Scopes.SINGLETON);
+ binder.bind(CheckpointWriterManager.class).in(Scopes.SINGLETON);
+ binder.bind(TransactionLogAccess.class).in(Scopes.SINGLETON);
+ binder.bind(TransactionLogReaderFactory.class).to(FileSystemTransactionLogReaderFactory.class).in(Scopes.SINGLETON);
+ binder.bind(TransactionLogWriterFactory.class).to(FileSystemTransactionLogWriterFactory.class).in(Scopes.SINGLETON);
+ binder.bind(TransactionLogSynchronizerManager.class).in(Scopes.SINGLETON);
+ binder.bind(NoIsolationSynchronizer.class).in(Scopes.SINGLETON);
+
+ binder.bind(CachingExtendedStatisticsAccess.class).in(Scopes.SINGLETON);
+ binder.bind(ExtendedStatisticsAccess.class).to(CachingExtendedStatisticsAccess.class).in(Scopes.SINGLETON);
+ binder.bind(ExtendedStatisticsAccess.class).annotatedWith(ForCachingExtendedStatisticsAccess.class).to(MetaDirStatisticsAccess.class).in(Scopes.SINGLETON);
+
+ binder.bind(TransactionLogAccess.class).in(Scopes.SINGLETON);
+ newExporter(binder).export(TransactionLogAccess.class).withGeneratedName();
+
+ binder.bind(DeltaLakeTableMetadataScheduler.class).in(Scopes.SINGLETON);
+ newExporter(binder).export(DeltaLakeTableMetadataScheduler.class).withGeneratedName();
+
+ jsonCodecBinder(binder).bindJsonCodec(DataFileInfo.class);
+ jsonCodecBinder(binder).bindJsonCodec(DeltaLakeMergeResult.class);
+ jsonCodecBinder(binder).bindJsonCodec(ExtendedStatistics.class);
+ jsonCodecBinder(binder).bindJsonCodec(LastCheckpoint.class);
+
+ install(switch (buildConfigObject(MetastoreTypeConfig.class).getMetastoreType()) {
+ case THRIFT -> new DeltaLakeThriftMetastoreModule();
+ case FILE -> new DeltaLakeFileMetastoreModule();
+ case GLUE -> new DeltaLakeGlueMetastoreModule();
+ });
+
+ binder.install(new DeltaLakeExecutorModule());
+ }
+}
diff --git a/plugin/trino-lakehouse/src/main/java/io/trino/plugin/lakehouse/LakehouseFileSystemModule.java b/plugin/trino-lakehouse/src/main/java/io/trino/plugin/lakehouse/LakehouseFileSystemModule.java
new file mode 100644
index 000000000000..ad151824f0ce
--- /dev/null
+++ b/plugin/trino-lakehouse/src/main/java/io/trino/plugin/lakehouse/LakehouseFileSystemModule.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.trino.plugin.lakehouse;
+
+import com.google.inject.Binder;
+import io.airlift.configuration.AbstractConfigurationAwareModule;
+import io.opentelemetry.api.OpenTelemetry;
+import io.trino.filesystem.manager.FileSystemModule;
+import io.trino.plugin.iceberg.IcebergConfig;
+import io.trino.spi.NodeManager;
+import io.trino.spi.connector.ConnectorContext;
+
+import static java.util.Objects.requireNonNull;
+
+class LakehouseFileSystemModule
+ extends AbstractConfigurationAwareModule
+{
+ private final String catalogName;
+ private final NodeManager nodeManager;
+ private final OpenTelemetry openTelemetry;
+
+ public LakehouseFileSystemModule(String catalogName, ConnectorContext context)
+ {
+ this.catalogName = requireNonNull(catalogName, "catalogName is null");
+ this.nodeManager = context.getNodeManager();
+ this.openTelemetry = context.getOpenTelemetry();
+ }
+
+ @Override
+ protected void setup(Binder binder)
+ {
+ boolean metadataCacheEnabled = buildConfigObject(IcebergConfig.class).isMetadataCacheEnabled();
+ install(new FileSystemModule(catalogName, nodeManager, openTelemetry, metadataCacheEnabled));
+ }
+}
diff --git a/plugin/trino-lakehouse/src/main/java/io/trino/plugin/lakehouse/LakehouseHiveModule.java b/plugin/trino-lakehouse/src/main/java/io/trino/plugin/lakehouse/LakehouseHiveModule.java
new file mode 100644
index 000000000000..2892a81d0116
--- /dev/null
+++ b/plugin/trino-lakehouse/src/main/java/io/trino/plugin/lakehouse/LakehouseHiveModule.java
@@ -0,0 +1,140 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.trino.plugin.lakehouse;
+
+import com.google.inject.Binder;
+import com.google.inject.Scopes;
+import io.airlift.configuration.AbstractConfigurationAwareModule;
+import io.trino.plugin.hive.HiveConfig;
+import io.trino.plugin.hive.HiveExecutorModule;
+import io.trino.plugin.hive.HiveFileWriterFactory;
+import io.trino.plugin.hive.HiveLocationService;
+import io.trino.plugin.hive.HiveMetadataFactory;
+import io.trino.plugin.hive.HiveNodePartitioningProvider;
+import io.trino.plugin.hive.HivePageSinkProvider;
+import io.trino.plugin.hive.HivePageSourceFactory;
+import io.trino.plugin.hive.HivePageSourceProvider;
+import io.trino.plugin.hive.HivePartitionManager;
+import io.trino.plugin.hive.HiveSessionProperties;
+import io.trino.plugin.hive.HiveSplitManager;
+import io.trino.plugin.hive.HiveTableProperties;
+import io.trino.plugin.hive.HiveTransactionManager;
+import io.trino.plugin.hive.HiveWriterStats;
+import io.trino.plugin.hive.LocationService;
+import io.trino.plugin.hive.PartitionUpdate;
+import io.trino.plugin.hive.PartitionsSystemTableProvider;
+import io.trino.plugin.hive.PropertiesSystemTableProvider;
+import io.trino.plugin.hive.RcFileFileWriterFactory;
+import io.trino.plugin.hive.SystemTableProvider;
+import io.trino.plugin.hive.TransactionalMetadataFactory;
+import io.trino.plugin.hive.avro.AvroFileWriterFactory;
+import io.trino.plugin.hive.avro.AvroPageSourceFactory;
+import io.trino.plugin.hive.fs.CachingDirectoryLister;
+import io.trino.plugin.hive.fs.DirectoryLister;
+import io.trino.plugin.hive.fs.TransactionScopeCachingDirectoryListerFactory;
+import io.trino.plugin.hive.line.CsvFileWriterFactory;
+import io.trino.plugin.hive.line.CsvPageSourceFactory;
+import io.trino.plugin.hive.line.JsonFileWriterFactory;
+import io.trino.plugin.hive.line.JsonPageSourceFactory;
+import io.trino.plugin.hive.line.OpenXJsonFileWriterFactory;
+import io.trino.plugin.hive.line.OpenXJsonPageSourceFactory;
+import io.trino.plugin.hive.line.RegexFileWriterFactory;
+import io.trino.plugin.hive.line.RegexPageSourceFactory;
+import io.trino.plugin.hive.line.SimpleSequenceFilePageSourceFactory;
+import io.trino.plugin.hive.line.SimpleSequenceFileWriterFactory;
+import io.trino.plugin.hive.line.SimpleTextFilePageSourceFactory;
+import io.trino.plugin.hive.line.SimpleTextFileWriterFactory;
+import io.trino.plugin.hive.metastore.HiveMetastoreConfig;
+import io.trino.plugin.hive.metastore.HiveMetastoreModule;
+import io.trino.plugin.hive.orc.OrcFileWriterFactory;
+import io.trino.plugin.hive.orc.OrcPageSourceFactory;
+import io.trino.plugin.hive.parquet.ParquetFileWriterFactory;
+import io.trino.plugin.hive.parquet.ParquetPageSourceFactory;
+import io.trino.plugin.hive.rcfile.RcFilePageSourceFactory;
+
+import java.util.Optional;
+
+import static com.google.inject.multibindings.Multibinder.newSetBinder;
+import static io.airlift.configuration.ConfigBinder.configBinder;
+import static io.airlift.json.JsonCodecBinder.jsonCodecBinder;
+import static org.weakref.jmx.guice.ExportBinder.newExporter;
+
+class LakehouseHiveModule
+ extends AbstractConfigurationAwareModule
+{
+ @Override
+ protected void setup(Binder binder)
+ {
+ install(new HiveMetastoreModule(Optional.empty()));
+
+ configBinder(binder).bindConfig(HiveConfig.class);
+ configBinder(binder).bindConfig(HiveMetastoreConfig.class);
+
+ binder.bind(HiveNodePartitioningProvider.class).in(Scopes.SINGLETON);
+ binder.bind(HivePageSinkProvider.class).in(Scopes.SINGLETON);
+ binder.bind(HivePageSourceProvider.class).in(Scopes.SINGLETON);
+ binder.bind(HiveSessionProperties.class).in(Scopes.SINGLETON);
+ binder.bind(HiveSplitManager.class).in(Scopes.SINGLETON);
+ binder.bind(HiveTableProperties.class).in(Scopes.SINGLETON);
+
+ binder.bind(HiveTransactionManager.class).in(Scopes.SINGLETON);
+ binder.bind(HiveMetadataFactory.class).in(Scopes.SINGLETON);
+ binder.bind(TransactionalMetadataFactory.class).to(HiveMetadataFactory.class).in(Scopes.SINGLETON);
+ binder.bind(HivePartitionManager.class).in(Scopes.SINGLETON);
+ binder.bind(LocationService.class).to(HiveLocationService.class).in(Scopes.SINGLETON);
+ binder.bind(TransactionScopeCachingDirectoryListerFactory.class).in(Scopes.SINGLETON);
+
+ jsonCodecBinder(binder).bindJsonCodec(PartitionUpdate.class);
+
+ binder.bind(HiveWriterStats.class).in(Scopes.SINGLETON);
+ newExporter(binder).export(HiveWriterStats.class).withGeneratedName();
+
+ binder.bind(CachingDirectoryLister.class).in(Scopes.SINGLETON);
+ newExporter(binder).export(CachingDirectoryLister.class).withGeneratedName();
+ binder.bind(DirectoryLister.class).to(CachingDirectoryLister.class).in(Scopes.SINGLETON);
+
+ binder.bind(OrcFileWriterFactory.class).in(Scopes.SINGLETON);
+ newExporter(binder).export(OrcFileWriterFactory.class).withGeneratedName();
+
+ var systemTableProviders = newSetBinder(binder, SystemTableProvider.class);
+ systemTableProviders.addBinding().to(PartitionsSystemTableProvider.class).in(Scopes.SINGLETON);
+ systemTableProviders.addBinding().to(PropertiesSystemTableProvider.class).in(Scopes.SINGLETON);
+
+ var pageSourceFactoryBinder = newSetBinder(binder, HivePageSourceFactory.class);
+ pageSourceFactoryBinder.addBinding().to(CsvPageSourceFactory.class).in(Scopes.SINGLETON);
+ pageSourceFactoryBinder.addBinding().to(JsonPageSourceFactory.class).in(Scopes.SINGLETON);
+ pageSourceFactoryBinder.addBinding().to(OpenXJsonPageSourceFactory.class).in(Scopes.SINGLETON);
+ pageSourceFactoryBinder.addBinding().to(RegexPageSourceFactory.class).in(Scopes.SINGLETON);
+ pageSourceFactoryBinder.addBinding().to(SimpleTextFilePageSourceFactory.class).in(Scopes.SINGLETON);
+ pageSourceFactoryBinder.addBinding().to(SimpleSequenceFilePageSourceFactory.class).in(Scopes.SINGLETON);
+ pageSourceFactoryBinder.addBinding().to(OrcPageSourceFactory.class).in(Scopes.SINGLETON);
+ pageSourceFactoryBinder.addBinding().to(ParquetPageSourceFactory.class).in(Scopes.SINGLETON);
+ pageSourceFactoryBinder.addBinding().to(RcFilePageSourceFactory.class).in(Scopes.SINGLETON);
+ pageSourceFactoryBinder.addBinding().to(AvroPageSourceFactory.class).in(Scopes.SINGLETON);
+
+ var fileWriterFactoryBinder = newSetBinder(binder, HiveFileWriterFactory.class);
+ fileWriterFactoryBinder.addBinding().to(CsvFileWriterFactory.class).in(Scopes.SINGLETON);
+ fileWriterFactoryBinder.addBinding().to(JsonFileWriterFactory.class).in(Scopes.SINGLETON);
+ fileWriterFactoryBinder.addBinding().to(RegexFileWriterFactory.class).in(Scopes.SINGLETON);
+ fileWriterFactoryBinder.addBinding().to(OpenXJsonFileWriterFactory.class).in(Scopes.SINGLETON);
+ fileWriterFactoryBinder.addBinding().to(SimpleTextFileWriterFactory.class).in(Scopes.SINGLETON);
+ fileWriterFactoryBinder.addBinding().to(SimpleSequenceFileWriterFactory.class).in(Scopes.SINGLETON);
+ fileWriterFactoryBinder.addBinding().to(OrcFileWriterFactory.class).in(Scopes.SINGLETON);
+ fileWriterFactoryBinder.addBinding().to(RcFileFileWriterFactory.class).in(Scopes.SINGLETON);
+ fileWriterFactoryBinder.addBinding().to(AvroFileWriterFactory.class).in(Scopes.SINGLETON);
+ fileWriterFactoryBinder.addBinding().to(ParquetFileWriterFactory.class).in(Scopes.SINGLETON);
+
+ binder.install(new HiveExecutorModule());
+ }
+}
diff --git a/plugin/trino-lakehouse/src/main/java/io/trino/plugin/lakehouse/LakehouseHudiModule.java b/plugin/trino-lakehouse/src/main/java/io/trino/plugin/lakehouse/LakehouseHudiModule.java
new file mode 100644
index 000000000000..8861c369885b
--- /dev/null
+++ b/plugin/trino-lakehouse/src/main/java/io/trino/plugin/lakehouse/LakehouseHudiModule.java
@@ -0,0 +1,48 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.trino.plugin.lakehouse;
+
+import com.google.inject.Binder;
+import com.google.inject.Scopes;
+import io.airlift.configuration.AbstractConfigurationAwareModule;
+import io.trino.plugin.hudi.HudiConfig;
+import io.trino.plugin.hudi.HudiExecutorModule;
+import io.trino.plugin.hudi.HudiMetadataFactory;
+import io.trino.plugin.hudi.HudiPageSourceProvider;
+import io.trino.plugin.hudi.HudiSessionProperties;
+import io.trino.plugin.hudi.HudiSplitManager;
+import io.trino.plugin.hudi.HudiTableProperties;
+import io.trino.plugin.hudi.HudiTransactionManager;
+
+import static io.airlift.configuration.ConfigBinder.configBinder;
+
+public class LakehouseHudiModule
+ extends AbstractConfigurationAwareModule
+{
+ @Override
+ protected void setup(Binder binder)
+ {
+ configBinder(binder).bindConfig(HudiConfig.class);
+
+ binder.bind(HudiPageSourceProvider.class).in(Scopes.SINGLETON);
+ binder.bind(HudiSessionProperties.class).in(Scopes.SINGLETON);
+ binder.bind(HudiSplitManager.class).in(Scopes.SINGLETON);
+ binder.bind(HudiTableProperties.class).in(Scopes.SINGLETON);
+
+ binder.bind(HudiTransactionManager.class).in(Scopes.SINGLETON);
+ binder.bind(HudiMetadataFactory.class).in(Scopes.SINGLETON);
+
+ binder.install(new HudiExecutorModule());
+ }
+}
diff --git a/plugin/trino-lakehouse/src/main/java/io/trino/plugin/lakehouse/LakehouseIcebergModule.java b/plugin/trino-lakehouse/src/main/java/io/trino/plugin/lakehouse/LakehouseIcebergModule.java
new file mode 100644
index 000000000000..ffaffb8fb1fd
--- /dev/null
+++ b/plugin/trino-lakehouse/src/main/java/io/trino/plugin/lakehouse/LakehouseIcebergModule.java
@@ -0,0 +1,81 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.trino.plugin.lakehouse;
+
+import com.google.inject.Binder;
+import com.google.inject.Key;
+import com.google.inject.Scopes;
+import io.airlift.configuration.AbstractConfigurationAwareModule;
+import io.trino.metastore.HiveMetastoreFactory;
+import io.trino.metastore.RawHiveMetastoreFactory;
+import io.trino.plugin.hive.metastore.MetastoreTypeConfig;
+import io.trino.plugin.iceberg.CommitTaskData;
+import io.trino.plugin.iceberg.IcebergConfig;
+import io.trino.plugin.iceberg.IcebergExecutorModule;
+import io.trino.plugin.iceberg.IcebergFileSystemFactory;
+import io.trino.plugin.iceberg.IcebergFileWriterFactory;
+import io.trino.plugin.iceberg.IcebergMaterializedViewProperties;
+import io.trino.plugin.iceberg.IcebergMetadataFactory;
+import io.trino.plugin.iceberg.IcebergNodePartitioningProvider;
+import io.trino.plugin.iceberg.IcebergPageSinkProvider;
+import io.trino.plugin.iceberg.IcebergPageSourceProviderFactory;
+import io.trino.plugin.iceberg.IcebergSessionProperties;
+import io.trino.plugin.iceberg.IcebergSplitManager;
+import io.trino.plugin.iceberg.IcebergTableProperties;
+import io.trino.plugin.iceberg.IcebergTransactionManager;
+import io.trino.plugin.iceberg.TableStatisticsWriter;
+import io.trino.plugin.iceberg.catalog.file.IcebergFileMetastoreCatalogModule;
+import io.trino.plugin.iceberg.catalog.glue.IcebergGlueCatalogModule;
+import io.trino.plugin.iceberg.catalog.hms.IcebergHiveMetastoreCatalogModule;
+import io.trino.plugin.iceberg.catalog.rest.DefaultIcebergFileSystemFactory;
+
+import static com.google.inject.multibindings.OptionalBinder.newOptionalBinder;
+import static io.airlift.configuration.ConfigBinder.configBinder;
+import static io.airlift.json.JsonCodecBinder.jsonCodecBinder;
+
+public class LakehouseIcebergModule
+ extends AbstractConfigurationAwareModule
+{
+ @Override
+ protected void setup(Binder binder)
+ {
+ configBinder(binder).bindConfig(IcebergConfig.class);
+
+ binder.bind(IcebergNodePartitioningProvider.class).in(Scopes.SINGLETON);
+ binder.bind(IcebergPageSinkProvider.class).in(Scopes.SINGLETON);
+ binder.bind(IcebergPageSourceProviderFactory.class).in(Scopes.SINGLETON);
+ binder.bind(IcebergSessionProperties.class).in(Scopes.SINGLETON);
+ binder.bind(IcebergSplitManager.class).in(Scopes.SINGLETON);
+ binder.bind(IcebergTableProperties.class).in(Scopes.SINGLETON);
+ binder.bind(IcebergMaterializedViewProperties.class).in(Scopes.SINGLETON);
+
+ binder.bind(IcebergTransactionManager.class).in(Scopes.SINGLETON);
+ binder.bind(IcebergMetadataFactory.class).in(Scopes.SINGLETON);
+ binder.bind(IcebergFileWriterFactory.class).in(Scopes.SINGLETON);
+ binder.bind(TableStatisticsWriter.class).in(Scopes.SINGLETON);
+ binder.bind(IcebergFileSystemFactory.class).to(DefaultIcebergFileSystemFactory.class).in(Scopes.SINGLETON);
+
+ newOptionalBinder(binder, Key.get(HiveMetastoreFactory.class, RawHiveMetastoreFactory.class));
+
+ jsonCodecBinder(binder).bindJsonCodec(CommitTaskData.class);
+
+ install(switch (buildConfigObject(MetastoreTypeConfig.class).getMetastoreType()) {
+ case THRIFT -> new IcebergHiveMetastoreCatalogModule();
+ case FILE -> new IcebergFileMetastoreCatalogModule();
+ case GLUE -> new IcebergGlueCatalogModule();
+ });
+
+ binder.install(new IcebergExecutorModule());
+ }
+}
diff --git a/plugin/trino-lakehouse/src/main/java/io/trino/plugin/lakehouse/LakehouseMetadata.java b/plugin/trino-lakehouse/src/main/java/io/trino/plugin/lakehouse/LakehouseMetadata.java
new file mode 100644
index 000000000000..99cb65cff0e3
--- /dev/null
+++ b/plugin/trino-lakehouse/src/main/java/io/trino/plugin/lakehouse/LakehouseMetadata.java
@@ -0,0 +1,1030 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.trino.plugin.lakehouse;
+
+import com.google.common.collect.Iterators;
+import io.airlift.slice.Slice;
+import io.trino.metastore.Table;
+import io.trino.plugin.deltalake.DeltaLakeInsertTableHandle;
+import io.trino.plugin.deltalake.DeltaLakeMergeTableHandle;
+import io.trino.plugin.deltalake.DeltaLakeMetadata;
+import io.trino.plugin.deltalake.DeltaLakeOutputTableHandle;
+import io.trino.plugin.deltalake.DeltaLakePartitioningHandle;
+import io.trino.plugin.deltalake.DeltaLakeTableHandle;
+import io.trino.plugin.deltalake.procedure.DeltaLakeTableExecuteHandle;
+import io.trino.plugin.hive.HiveInsertTableHandle;
+import io.trino.plugin.hive.HiveMergeTableHandle;
+import io.trino.plugin.hive.HiveOutputTableHandle;
+import io.trino.plugin.hive.HivePartitioningHandle;
+import io.trino.plugin.hive.HiveTableExecuteHandle;
+import io.trino.plugin.hive.HiveTableHandle;
+import io.trino.plugin.hive.TransactionalMetadata;
+import io.trino.plugin.hudi.HudiMetadata;
+import io.trino.plugin.hudi.HudiTableHandle;
+import io.trino.plugin.iceberg.IcebergMergeTableHandle;
+import io.trino.plugin.iceberg.IcebergMetadata;
+import io.trino.plugin.iceberg.IcebergPartitioningHandle;
+import io.trino.plugin.iceberg.IcebergTableHandle;
+import io.trino.plugin.iceberg.IcebergWritableTableHandle;
+import io.trino.plugin.iceberg.procedure.IcebergTableExecuteHandle;
+import io.trino.spi.RefreshType;
+import io.trino.spi.connector.AggregateFunction;
+import io.trino.spi.connector.AggregationApplicationResult;
+import io.trino.spi.connector.BeginTableExecuteResult;
+import io.trino.spi.connector.ColumnHandle;
+import io.trino.spi.connector.ColumnMetadata;
+import io.trino.spi.connector.ColumnPosition;
+import io.trino.spi.connector.ConnectorAccessControl;
+import io.trino.spi.connector.ConnectorAnalyzeMetadata;
+import io.trino.spi.connector.ConnectorInsertTableHandle;
+import io.trino.spi.connector.ConnectorMaterializedViewDefinition;
+import io.trino.spi.connector.ConnectorMergeTableHandle;
+import io.trino.spi.connector.ConnectorMetadata;
+import io.trino.spi.connector.ConnectorOutputMetadata;
+import io.trino.spi.connector.ConnectorOutputTableHandle;
+import io.trino.spi.connector.ConnectorPartitioningHandle;
+import io.trino.spi.connector.ConnectorSession;
+import io.trino.spi.connector.ConnectorTableExecuteHandle;
+import io.trino.spi.connector.ConnectorTableHandle;
+import io.trino.spi.connector.ConnectorTableLayout;
+import io.trino.spi.connector.ConnectorTableMetadata;
+import io.trino.spi.connector.ConnectorTableProperties;
+import io.trino.spi.connector.ConnectorTableSchema;
+import io.trino.spi.connector.ConnectorTableVersion;
+import io.trino.spi.connector.ConnectorViewDefinition;
+import io.trino.spi.connector.Constraint;
+import io.trino.spi.connector.ConstraintApplicationResult;
+import io.trino.spi.connector.LimitApplicationResult;
+import io.trino.spi.connector.MaterializedViewFreshness;
+import io.trino.spi.connector.ProjectionApplicationResult;
+import io.trino.spi.connector.RelationColumnsMetadata;
+import io.trino.spi.connector.RelationCommentMetadata;
+import io.trino.spi.connector.RelationType;
+import io.trino.spi.connector.RetryMode;
+import io.trino.spi.connector.RowChangeParadigm;
+import io.trino.spi.connector.SampleApplicationResult;
+import io.trino.spi.connector.SampleType;
+import io.trino.spi.connector.SaveMode;
+import io.trino.spi.connector.SchemaTableName;
+import io.trino.spi.connector.SchemaTablePrefix;
+import io.trino.spi.connector.SortItem;
+import io.trino.spi.connector.SystemTable;
+import io.trino.spi.connector.TableColumnsMetadata;
+import io.trino.spi.connector.TopNApplicationResult;
+import io.trino.spi.connector.WriterScalingOptions;
+import io.trino.spi.expression.ConnectorExpression;
+import io.trino.spi.expression.Constant;
+import io.trino.spi.function.LanguageFunction;
+import io.trino.spi.function.SchemaFunctionName;
+import io.trino.spi.security.GrantInfo;
+import io.trino.spi.security.Privilege;
+import io.trino.spi.security.RoleGrant;
+import io.trino.spi.security.TrinoPrincipal;
+import io.trino.spi.statistics.ComputedStatistics;
+import io.trino.spi.statistics.TableStatistics;
+import io.trino.spi.statistics.TableStatisticsMetadata;
+import io.trino.spi.type.Type;
+
+import java.util.Collection;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+import java.util.OptionalLong;
+import java.util.Set;
+import java.util.function.UnaryOperator;
+
+import static io.trino.plugin.hive.util.HiveUtil.isDeltaLakeTable;
+import static io.trino.plugin.hive.util.HiveUtil.isHudiTable;
+import static io.trino.plugin.hive.util.HiveUtil.isIcebergTable;
+import static io.trino.plugin.iceberg.IcebergTableName.isIcebergTableName;
+import static io.trino.plugin.iceberg.IcebergTableName.isMaterializedViewStorage;
+import static io.trino.plugin.lakehouse.LakehouseTableProperties.getTableType;
+import static java.util.Objects.requireNonNull;
+
+public class LakehouseMetadata
+ implements ConnectorMetadata
+{
+ private final LakehouseTableProperties tableProperties;
+ private final TransactionalMetadata hiveMetadata;
+ private final IcebergMetadata icebergMetadata;
+ private final DeltaLakeMetadata deltaMetadata;
+ private final HudiMetadata hudiMetadata;
+
+ public LakehouseMetadata(
+ LakehouseTableProperties tableProperties,
+ TransactionalMetadata hiveMetadata,
+ IcebergMetadata icebergMetadata,
+ DeltaLakeMetadata deltaMetadata,
+ HudiMetadata hudiMetadata)
+ {
+ this.tableProperties = requireNonNull(tableProperties, "tableProperties is null");
+ this.hiveMetadata = requireNonNull(hiveMetadata, "hiveMetadata is null");
+ this.icebergMetadata = requireNonNull(icebergMetadata, "icebergMetadata is null");
+ this.deltaMetadata = requireNonNull(deltaMetadata, "deltaMetadata is null");
+ this.hudiMetadata = requireNonNull(hudiMetadata, "hudiMetadata is null");
+ }
+
+ @Override
+ public boolean schemaExists(ConnectorSession session, String schemaName)
+ {
+ return hiveMetadata.schemaExists(session, schemaName);
+ }
+
+ @Override
+ public List listSchemaNames(ConnectorSession session)
+ {
+ return hiveMetadata.listSchemaNames(session);
+ }
+
+ @Override
+ public ConnectorTableHandle getTableHandle(ConnectorSession session, SchemaTableName tableName, Optional startVersion, Optional endVersion)
+ {
+ if (isIcebergTableName(tableName.getTableName()) && isMaterializedViewStorage(tableName.getTableName())) {
+ return icebergMetadata.getTableHandle(session, tableName, startVersion, endVersion);
+ }
+
+ Table table = hiveMetadata.getMetastore()
+ .getTable(tableName.getSchemaName(), tableName.getTableName())
+ .orElse(null);
+ if (table == null) {
+ return null;
+ }
+ if (isIcebergTable(table)) {
+ return icebergMetadata.getTableHandle(session, tableName, startVersion, endVersion);
+ }
+ if (isDeltaLakeTable(table)) {
+ return deltaMetadata.getTableHandle(session, tableName, startVersion, endVersion);
+ }
+ if (isHudiTable(table)) {
+ return hudiMetadata.getTableHandle(session, tableName, startVersion, endVersion);
+ }
+ return hiveMetadata.getTableHandle(session, tableName, startVersion, endVersion);
+ }
+
+ @Override
+ public Optional getTableHandleForExecute(ConnectorSession session, ConnectorAccessControl accessControl, ConnectorTableHandle tableHandle, String procedureName, Map executeProperties, RetryMode retryMode)
+ {
+ return forHandle(tableHandle).getTableHandleForExecute(session, accessControl, tableHandle, procedureName, executeProperties, retryMode);
+ }
+
+ @Override
+ public Optional getLayoutForTableExecute(ConnectorSession session, ConnectorTableExecuteHandle tableExecuteHandle)
+ {
+ return forHandle(tableExecuteHandle).getLayoutForTableExecute(session, tableExecuteHandle);
+ }
+
+ @Override
+ public BeginTableExecuteResult beginTableExecute(ConnectorSession session, ConnectorTableExecuteHandle tableExecuteHandle, ConnectorTableHandle updatedSourceTableHandle)
+ {
+ return forHandle(tableExecuteHandle).beginTableExecute(session, tableExecuteHandle, updatedSourceTableHandle);
+ }
+
+ @Override
+ public void finishTableExecute(ConnectorSession session, ConnectorTableExecuteHandle tableExecuteHandle, Collection fragments, List