|
| 1 | +/* |
| 2 | + * Licensed to the Apache Software Foundation (ASF) under one |
| 3 | + * or more contributor license agreements. See the NOTICE file |
| 4 | + * distributed with this work for additional information |
| 5 | + * regarding copyright ownership. The ASF licenses this file |
| 6 | + * to you under the Apache License, Version 2.0 (the |
| 7 | + * "License"); you may not use this file except in compliance |
| 8 | + * with the License. You may obtain a copy of the License at |
| 9 | + * |
| 10 | + * http://www.apache.org/licenses/LICENSE-2.0 |
| 11 | + * |
| 12 | + * Unless required by applicable law or agreed to in writing, |
| 13 | + * software distributed under the License is distributed on an |
| 14 | + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY |
| 15 | + * KIND, either express or implied. See the License for the |
| 16 | + * specific language governing permissions and limitations |
| 17 | + * under the License. |
| 18 | + */ |
| 19 | +package org.apache.iceberg.flink; |
| 20 | + |
| 21 | +import org.apache.flink.configuration.ConfigConstants; |
| 22 | +import org.apache.flink.configuration.Configuration; |
| 23 | +import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; |
| 24 | +import org.apache.flink.table.api.Table; |
| 25 | +import org.apache.flink.table.api.TableEnvironment; |
| 26 | +import org.apache.flink.table.api.TableResult; |
| 27 | +import org.apache.flink.table.api.bridge.java.StreamTableEnvironment; |
| 28 | +import org.apache.flink.types.Row; |
| 29 | +import org.apache.flink.util.CloseableIterator; |
| 30 | + |
| 31 | +public class LogStoreExample { |
| 32 | + |
| 33 | + private LogStoreExample() {} |
| 34 | + |
| 35 | + public static void main(String[] args) throws Exception { |
| 36 | + |
| 37 | + Configuration configuration = new Configuration(); |
| 38 | +// configuration.setString("table.exec.iceberg.use-flip27-source", "true"); |
| 39 | + configuration.setString("execution.checkpointing.interval", "60s"); |
| 40 | + configuration.setString("state.checkpoint-storage", "jobmanager"); |
| 41 | + configuration.setBoolean(ConfigConstants.LOCAL_START_WEBSERVER, true); |
| 42 | + |
| 43 | + StreamExecutionEnvironment env = |
| 44 | + StreamExecutionEnvironment.createLocalEnvironmentWithWebUI(configuration); |
| 45 | + TableEnvironment tEnv = StreamTableEnvironment.create(env); |
| 46 | + |
| 47 | + tEnv.executeSql( |
| 48 | + "CREATE CATALOG hive_catalog WITH (\n" |
| 49 | + + " 'type'='iceberg',\n" |
| 50 | + + " 'uri'='thrift://flink03:9083',\n" |
| 51 | + + " 'warehouse'='hdfs://ns1/dtInsight/hive/warehouse'\n" |
| 52 | + + ")"); |
| 53 | + |
| 54 | + tEnv.executeSql("USE CATALOG hive_catalog"); |
| 55 | + |
| 56 | + tEnv.executeSql("USE sec_index"); |
| 57 | + |
| 58 | + tEnv.executeSql("CREATE TABLE default_catalog.default_database.f (\n" + |
| 59 | + " id BIGINT,\n" + |
| 60 | + " name STRING\n" + |
| 61 | + ") WITH (\n" + |
| 62 | + " 'connector' = 'filesystem',\n" + |
| 63 | + " 'path' = 'file:///Users/ada/tmp/log-store',\n" + |
| 64 | + " 'format' = 'csv'\n" + |
| 65 | + ")"); |
| 66 | + |
| 67 | + // tEnv.executeSql("CREATE TABLE log_store_v2 (\n" + |
| 68 | + // " id BIGINT COMMENT 'unique id',\n" + |
| 69 | + // " name STRING\n" + |
| 70 | + // ") WITH (\n" + |
| 71 | + // " 'format-version' = '2',\n" + |
| 72 | + // " 'log-store' = 'kafka',\n" + |
| 73 | + // " 'kafka.bootstrap.servers' = '172.16.100.109:9092',\n" + |
| 74 | + // " 'kafka.topic' = 'log-store.v2'\n" + |
| 75 | + // ")"); |
| 76 | + |
| 77 | +// tEnv.executeSql("ALTER TABLE log_store_v1 SET ('log-store.kafka.bootstrap.servers'='172.16.100.109:9092')"); |
| 78 | +// tEnv.executeSql("ALTER TABLE log_store_v1 SET ('log-store.kafka.topic'='log-store.v2')"); |
| 79 | + |
| 80 | + // tEnv.executeSql("INSERT INTO log_store_v2 VALUES (3, 'bar')"); |
| 81 | +// tEnv.executeSql( |
| 82 | +// "SELECT * FROM log_store_v2 /*+ OPTIONS('streaming'='true', 'monitor-interval'='1s')*/") |
| 83 | +// .print(); |
| 84 | + |
| 85 | + tEnv.executeSql( |
| 86 | + "INSERT INTO default_catalog.default_database.f SELECT * FROM log_store_v2 /*+ OPTIONS('streaming'='true', 'monitor-interval'='1s', 'log-store'='none') */") |
| 87 | + ; |
| 88 | + |
| 89 | +// tEnv.executeSql( |
| 90 | +// "INSERT INTO default_catalog.default_database.f VALUES(1, 'foo')"); |
| 91 | +// ; |
| 92 | + |
| 93 | +// tEnv.executeSql( |
| 94 | +// "SELECT * FROM log_store_v2 /*+ OPTIONS('log-store'='') */") |
| 95 | +// .print(); |
| 96 | + } |
| 97 | +} |
0 commit comments