1、配置hadoop环境并且制定 VM

spark 本地连接hive集群

 

 

2、配置hive的依赖包

 <dependency>
      <groupId>org.apache.spark</groupId>
      <artifactId>spark-hive_2.11</artifactId>
      <version>2.3.0</version>
      <!--<scope>provided</scope>-->
    </dependency>

3、读取配置文件

 val sparkBuilder=SparkSession.builder
    val conf =new Configuration()
    val c=new Path("F:\\IdeaWorkspace\\lzm\\Resource\\core-site.xml")
    val hd=new Path("F:\\IdeaWorkspace\\lzm\\Resource\\hdfs-site.xml")
    val hi=new Path("F:\\IdeaWorkspace\\lzm\\Resource\\hive-site.xml")
    val y=new Path("F:\\IdeaWorkspace\\lzm\\Resource\\yarn-site.xml")
    val m=new Path("F:\\IdeaWorkspace\\lzm\\Resource\\mapred-site.xml")
    conf.addResource(hd)
    conf.addResource(c)
    conf.addResource(hi)
    conf.addResource(m)
    conf.addResource(y)
    for(c<-conf.iterator()){
      sparkBuilder.config(c.getKey,c.getValue)
    }

4、连接hive

 val spark:SparkSession=sparkBuilder.master("local[2]").enableHiveSupport().getOrCreate()

5、配置连接账户

 System.setProperty("user.name", "dip")

 

相关文章:

  • 2021-08-05
  • 2021-08-20
  • 2022-12-23
  • 2022-12-23
  • 2022-12-23
  • 2022-12-23
  • 2021-11-03
  • 2022-12-23
猜你喜欢
  • 2022-12-23
  • 2021-06-16
  • 2022-12-23
  • 2022-12-23
  • 2021-08-09
  • 2022-12-23
  • 2021-05-24
相关资源
相似解决方案