二十六、Flinkソース読み--sql実行変換過程

4015 ワード

flink sqlは実行中にどのようにsql文またはtable appiから最後のDataStreamタスクまたはDataSetタスクに移行しますか?
DEMO
これはflinkのユニットテスト方法です。リアルタイムデータクエリをシミュレートします。
@Test
	public void testSelect() throws Exception {
		StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
		StreamTableEnvironment tableEnv = TableEnvironment.getTableEnvironment(env);
		StreamITCase.clear();

		DataStream> ds = JavaStreamTestData.getSmall3TupleDataSet(env);
		Table in = tableEnv.fromDataStream(ds, "a,b,c");
		tableEnv.registerTable("MyTable", in);

		String sqlQuery = "SELECT * FROM MyTable";
		Table result = tableEnv.sqlQuery(sqlQuery);

		DataStream resultSet = tableEnv.toAppendStream(result, Row.class);
		resultSet.addSink(new StreamITCase.StringSink());
		env.execute();

		List expected = new ArrayList<>();
		expected.add("1,1,Hi");
		expected.add("2,2,Hello");
		expected.add("3,2,Hello world");

		StreamITCase.compareWithList(expected);
	}
レジストリ
tableEnv.registerTable("MyTable", in);
==>
StreamTableEnvironment.registerDataStream
==>
registerDataStreamInternal
==>
registerTableInternal
==>
protected def registerTableInternal(name: String, table: AbstractTable): Unit = {
  if (isRegistered(name)) {
    throw new TableException(s"Table \'$name\' already exists. " +
      s"Please, choose a different name.")
  } else {
    rootSchema.add(name, table)
  }
}
       schema ,     。
Table生成過程
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
StreamTableEnvironment tableEnv = TableEnvironment.getTableEnvironment(env);

Table result = tableEnv.sqlQuery(sqlQuery);

===>
def sqlQuery(query: String): Table = {
   val planner = new FlinkPlannerImpl(getFrameworkConfig, getPlanner, getTypeFactory)
   // parse the sql query
   val parsed = planner.parse(query)//  SqlNode     ,SqlNode    ,   SqlSelect,SqlDelete,SqlJoin,SqlAlter 
   if (null != parsed && parsed.getKind.belongsTo(SqlKind.QUERY)) {
     // validate the sql query
     val validated = planner.validate(parsed)//   SqlNode     
     // transform to a relational treex
     val relational = planner.rel(validated)//Ast--> logic plan
     new Table(this, LogicalRelNode(relational.rel))//relational.rel  Logic plan,     LogicalProject  
   } else {
     throw new TableException(
       "Unsupported SQL query! sqlQuery() only accepts SQL queries of type " +
         "SELECT, UNION, INTERSECT, EXCEPT, VALUES, and ORDER_BY.")
   }
 }
Tableオブジェクトを作成するプロセスは、SqlをSql Nodeに変更し、再チェックして、論理計画に移行することです。コールの過程は全部casiteと同じです。casiteは参考にできます。https://matt33.com/2019/03/07/apache-calcite-process-flow/
TableがDataStreamに移行する過程
DataStream resultSet = tableEnv.toAppendStream(result, Row.class);
resultSet.addSink(new StreamITCase.StringSink());
env.execute();

==>

def toAppendStream[T](
     table: Table,
     clazz: Class[T],
     queryConfig: StreamQueryConfig): DataStream[T] = {
   val typeInfo = TypeExtractor.createTypeInfo(clazz)
   TableEnvironment.validateType(typeInfo)
   translate[T](table, queryConfig, updatesAsRetraction = false, withChangeFlag = false)(typeInfo)
 }

==>

protected def translate[A](
    table: Table,
    queryConfig: StreamQueryConfig,
    updatesAsRetraction: Boolean,
    withChangeFlag: Boolean)(implicit tpe: TypeInformation[A]): DataStream[A] = {
  val relNode = table.getRelNode//      
  val dataStreamPlan = optimize(relNode, updatesAsRetraction)//          

  val rowType = getResultType(relNode, dataStreamPlan)

  translate(dataStreamPlan, rowType, queryConfig, withChangeFlag)
}
==》translateToCRow ==》DataStreamScan.translateToPlan ==》convertToInternalRow ==》generateConversionProcessFunction        
DataSetも同じ翻訳過程です。最終的にsqlはDataStreamのように任務を実行できます。