继续浏览精彩内容
慕课网APP
程序员的梦工厂
打开
继续
感谢您的支持,我会继续努力的
赞赏金额会直接到老师账户
将二维码发送给自己后长按识别
微信支付
支付宝支付

Spark整合ElasticSearch

慕码人8056858
关注TA
已关注
手记 1247
粉丝 350
获赞 1323

spark整合elasticsearch两种方式

1.自己生成_id等元数据
2.使用ES默认生成

引入对应依赖

<dependency>
  <groupId>org.elasticsearch</groupId>
  <artifactId>elasticsearch-spark-13_2.10</artifactId>
  <version>5.0.1</version></dependency>

生成元数据方式

import org.apache.spark.{SparkConf, SparkContext}import org.elasticsearch.spark._import utils.PropertiesUtilsimport scala.collection.immutableimport scala.collection.mutable.ListBuffer
object Spark_ES_WithMeta {

  val buffer = new ListBuffer[Tuple2[String,immutable.Map[String,String]]]
  def main(args: Array[String]): Unit = {
    val conf = new SparkConf().setAppName("Custmer_Statistics").setMaster("local[2]")
    conf.set("es.nodes","rmhadoop01,rmhadoop02,rmhadoop03");
    conf.set("es.port","9200");
    conf.set("es.index.auto.create", "true");
    val sc = new SparkContext(conf)    //读取本地文件

    val result = sc.textFile("C:/work/ideabench/SparkSQL/data/es/gd_py_corp_sharehd_info.txt")
      .map(_.split("\\t"))
      .foreach(d =>{          if(PropertiesUtils.getStringByKey("gd_py_corp_sharehd_info").equals("one2many")){

            val map = Map("id"->d(0),              "batch_seq_num"->d(1),              "name"->d(2),              "contributiveFund"->d(3),              "contributivePercent"->d(4),              "currency"->d(5),              "contributiveDate"->d(6),              "corp_basic_info_id"->d(7),              "query_time"->d(8)
            )

            buffer.append((d(0),map))            //buffer

          }else if(PropertiesUtils.getStringByKey("gd_py_corp_sharehd_info").equals("one2one")){            //Map(d(1) ->gd_py_corp_sharehd_info(d(0), d(1), d(2), d(3), d(4), d(5), d(6), d(7), d(8)))
          }

      } )

    sc.makeRDD(buffer).saveToEsWithMeta("spark/guofei_gd_py_corp_sharehd_info")
  }  /**
    * 使用模板类描述表元数据信息
    *
    */
  case class gd_py_corp_sharehd_info(id:String,batch_seq_num:String,
                                     name:String,contributiveFund:String,
                                     contributivePercent:String,currency:String,
                                     contributiveDate:String,corp_basic_info_id:String,
                                     query_time:String)

}
ES-UI界面

webp

ES.png

使用ES默认元数据方式

import org.apache.spark.sql.SQLContextimport org.apache.spark.{SparkConf, SparkContext}import org.elasticsearch.spark.sql._
object SparkSQL_ES {  /**
    * 使用模板类描述表元数据信息
    * 
    */
  case class gd_py_corp_sharehd_info(id:String,batch_seq_num:String,
                                     name:String,contributiveFund:String,
                                     contributivePercent:String,currency:String,
                                     contributiveDate:String,corp_basic_info_id:String,
                                     query_time:String)

  def main(args: Array[String]): Unit = {
    val conf = new SparkConf().setAppName("Custmer_Statistics").setMaster("local[2]")
    conf.set("es.nodes","192.168.20.128");
    conf.set("es.port","9200");
    conf.set("es.index.auto.create", "true");
    val sc = new SparkContext(conf)
    val sqlContext = new SQLContext(sc)    //RDD隐式转换成DataFrame
    import sqlContext.implicits._    //读取本地文件
    val gd_py_corp_sharehd_infoDF = sc.textFile("C:/work/ideabench/SparkSQL/data/es/gd_py_corp_sharehd_info.txt")
      .map(_.split("\\t"))
      .map(d => gd_py_corp_sharehd_info(d(0), d(1), d(2), d(3), d(4), d(5), d(6), d(7), d(8)))
      .toDF()    //注册表
    gd_py_corp_sharehd_infoDF.registerTempTable("gd_py_corp_sharehd_info")    /**
      * 
      */
    val result = sqlContext
      .sql("select * from gd_py_corp_sharehd_info limit 10")
      .toDF()

    result.saveToEs("spark/gd_py_corp_sharehd_info")
  }

}



作者:MichaelFly
链接:https://www.jianshu.com/p/a5c669d0ceba


打开App,阅读手记
0人推荐
发表评论
随时随地看视频慕课网APP