复制代码
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28import org.apache.spark.sql.Dataset; import org.apache.spark.sql.Row; import org.apache.spark.sql.SparkSession; import java.util.Properties; public class MysqlTohive { // TODO: 2020/9/16 将mysql查询的表数据插入到hive库 public static void main(String[] args) { SparkSession spark = SparkSession.builder().appName("sqlToHive").getOrCreate(); Properties dbConfPro = new Properties(); dbConfPro.setProperty("user", "root"); dbConfPro.setProperty("password", "root"); String query="(select * from student) m"; Dataset<Row> result = spark.read().jdbc("jdbc:mysql://192.168.8.103:3306/test?rewriteBatchedStatements=true", query, dbConfPro); //创建对应的视图表 result.createOrReplaceTempView("student"); spark.catalog().setCurrentDatabase("test"); String query2="insert overwrite table student select * from student"; spark.sql(query2); } }
复制代码
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35所需pom.xml <dependencies> <dependency> <groupId>org.apache.spark</groupId> <artifactId>spark-core_2.11</artifactId> <version>2.1.0</version> </dependency> <dependency> <groupId>org.apache.spark</groupId> <artifactId>spark-sql_2.11</artifactId> <version>2.1.0</version> </dependency> <!-- https://mvnrepository.com/artifact/org.apache.spark/spark-hive --> <dependency> <groupId>org.apache.spark</groupId> <artifactId>spark-hive_2.11</artifactId> <version>2.1.0</version> <scope>provided</scope> </dependency> <dependency> <groupId>mysql</groupId> <artifactId>mysql-connector-java</artifactId> <version>5.1.27</version> </dependency> <!-- https://mvnrepository.com/artifact/org.apache.logging.log4j/log4j-core --> <dependency> <groupId>org.apache.logging.log4j</groupId> <artifactId>log4j-core</artifactId> <version>2.12.1</version> </dependency> </dependencies>
最后
以上就是年轻小蜜蜂最近收集整理的关于将mysql查询的表数据插入到hive库的全部内容,更多相关将mysql查询内容请搜索靠谱客的其他文章。
本图文内容来源于网友提供,作为学习参考使用,或来自网络收集整理,版权属于原作者所有。
发表评论 取消回复