概述
1.模拟数据
需求:模拟30个用户信息(手机号)存入mysql,订单数据每5秒钟产生一条
输出格式:订单号 手机号 打车时间 起始位置 终止位置 距离 价格 是否拼单 平台 叫车类型(专车快车顺风车) 城市 是否跨城 是否异常 姓名 年龄 信用评分
package Student_DIDI.KafkaServres
import Student_DIDI.Common
import org.apache.kafka.clients.producer.{KafkaProducer, ProducerConfig, ProducerRecord}
import java.time.LocalDateTime
import java.util
import java.util.Properties
import scala.collection.mutable
import scala.collection.mutable.ListBuffer
import scala.util.Random
/** ****************************************
*
* @ClassName KafkaStreamingProducer
* @Description 实时生产数据的工具类
* @Author sanglong
* @Date 2022/8/4 9:11
* @Version 1.8
* *************************************** */
object KafkaStreamingProducer {
//初始化生成测试数据所需原始数据
//号段数组
val phone_array = Array(130, 159, 181, 133, 177)
//城市列表
val city_array = Array("哈尔滨", "齐齐哈尔", "牡丹江")
//各城市区域列表
val area_hrb = Array("南岗区", "道里区", "道外区", "香坊区", "松北区", "平房区", "呼兰区", "阿城区")
val nangang_address = Array("南岗区王岗大街", "南岗区-中和街")
val daoli_address = Array("道里区-建国街", "道里区-顾新街")
val daowai_address = Array("道外区-南康街", "道外区-南直路")
val xiangfang_address = Array("香坊区-通站街", "香坊区-中山路")
val songbei_address = Array("松北区-创新一路", "松北区-创新路")
val pingfang_address = Array("平方区-新疆大街", "平方区-友协大街")
val hulan_address = Array("呼兰区-北大街", "呼兰区-公园路")
val acheng_address = Array("阿城区-上京大道", "阿城区-延川大街")
val area_qqhe = Array("龙沙区", "建华区", "龙江县")
val longsha_address = Array("龙沙区-开发路", "龙沙区-丰恒路")
val jianhua_address = Array("建华区-中华南路", "建华区-双华路")
val longjiang_address = Array("龙江县-龙江镇正阳路", "龙江县-通达街")
val area_mdj = Array("爱民区", "东安区", "西安区", "阳明区")
val aimin_address = Array("爱民区-天安路", "爱民区-向阳街")
val dongan_address = Array("东安区-东长安街", "东安区-东新安街")
val xian_address = Array("西安区-西长安街", "西安区-仙城大街")
val yangming_address = Array("阳明区-光华街", "阳明区-太平路")
val serviceTypeList = List("顺风车", "专车", "快车")
val platformList = List("花小打车", "众出行", "滴打车")
val rand = new Random()
val surname_array = Array("赵", "钱", "孙", "李", "周")
val name_array = Array("问天", "浩然", "诗诗", "傲天", "败天", "战天")
val phoneListBuff = new ListBuffer[String]
//获得30条用户信息
def getUser(): util.HashMap[String, String] = {
for (k <- phone_array) {
for (i <- 0 to 5) {
val phonenum = new StringBuffer()
phonenum.append(k)
for (j <- 0 to 7) {
phonenum.append(rand.nextInt(10))
}
phoneListBuff.append(phonenum.toString)
}
}
val phoneList = phoneListBuff.toList
val userMap = new util.HashMap[String, String]()
val nameListBuff = new ListBuffer[String]
for (i <- surname_array) {
for (j <- name_array) {
nameListBuff.append(i.concat(j))
}
}
val nameList = nameListBuff.toList
for (i <- 0 to 29) {
userMap.put(phoneList(i), nameList(i))
}
userMap
}
/**
* 生成测试数据
* phonenum city+area time distance price
*/
def genTestData(userMap: util.HashMap[String, String]) = {
val dataList = mutable.ListBuffer[String]()
//调用一次此方法,生成1条数据
//生成第一个字段-手机号
val tempPhone = new StringBuffer()
tempPhone.append(phoneListBuff.toList(rand.nextInt(30)))
//生成第二个字段-城市+区域+起始/终止地址
val addressbuff = new ListBuffer[(String, String)]
val resultCityBuff = new ListBuffer[String]
for (i <- 0 to 1) {
val city = city_array(rand.nextInt(city_array.length))
resultCityBuff.append(city)
var area = ""
var startOrEndAddress = ""
city match {
case "哈尔滨" => {
//暂时存储临时的随机城市
area = area_hrb(rand.nextInt(area_hrb.length))
//匹配以下城市在哪,获得起始地址
area match {
case "南岗区" => {
startOrEndAddress = nangang_address(rand.nextInt(2)).concat(rand.nextInt(100).toString + "号")
}
case "道里区" => {
startOrEndAddress = daoli_address(rand.nextInt(2)).concat(rand.nextInt(100).toString + "号")
}
case "道外区" => {
startOrEndAddress = daowai_address(rand.nextInt(2)).concat(rand.nextInt(100).toString + "号")
}
case "香坊区" => {
startOrEndAddress = xiangfang_address(rand.nextInt(2)).concat(rand.nextInt(100).toString + "号")
}
case "松北区" => {
startOrEndAddress = songbei_address(rand.nextInt(2)).concat(rand.nextInt(100).toString + "号")
}
case "平房区" => {
startOrEndAddress = pingfang_address(rand.nextInt(2)).concat(rand.nextInt(100).toString + "号")
}
case "呼兰区" => {
startOrEndAddress = hulan_address(rand.nextInt(2)).concat(rand.nextInt(100).toString + "号")
}
case "阿城区" => {
startOrEndAddress = acheng_address(rand.nextInt(2)).concat(rand.nextInt(100).toString + "号")
}
}
}
case "齐齐哈尔" => {
area = area_qqhe(rand.nextInt(area_qqhe.length))
area match {
case "龙沙区" => {
startOrEndAddress = longsha_address(rand.nextInt(2)).concat(rand.nextInt(100).toString + "号")
}
case "建华区" => {
startOrEndAddress = jianhua_address(rand.nextInt(2)).concat(rand.nextInt(100).toString + "号")
}
case "龙江县" => {
startOrEndAddress = longjiang_address(rand.nextInt(2)).concat(rand.nextInt(100).toString + "号")
}
}
}
case "牡丹江" => {
area = area_mdj(rand.nextInt(area_mdj.length))
area match {
case "爱民区" => {
startOrEndAddress = aimin_address(rand.nextInt(2)).concat(rand.nextInt(100).toString + "号")
}
case "东安区" => {
startOrEndAddress = dongan_address(rand.nextInt(2)).concat(rand.nextInt(100).toString + "号")
}
case "西安区" => {
startOrEndAddress = xian_address(rand.nextInt(2)).concat(rand.nextInt(100).toString + "号")
}
case "阳明区" => {
startOrEndAddress = yangming_address(rand.nextInt(2)).concat(rand.nextInt(100).toString + "号")
}
}
}
}
//0号下标是起始,1好下标是到达位置
addressbuff.append((city + area, startOrEndAddress))
}
//可变转为不可变
val addressList = addressbuff.toList
val resultCityList = resultCityBuff.toList
//生成第三个字段-时间
val dateTime = LocalDateTime.now().toString
//生成第四个字段-距离,并同时判断是否跨区,跨区让距离额外加100
var distance = 0
var interregional = false
if (addressList(0)._1.equals(addressList(1)._2)) {
distance = rand.nextInt(70)
interregional = false
} else {
distance = rand.nextInt(201) + 100
interregional = true
}
//生成第五个字段-价格,同时考虑是否拼单了,拼了在价格的基础上乘90%,同时还有叫车的类型限制
//只有不拼单才能区选叫车的类型
//给起步价格
var price = 10.0
//默认是顺风车
var serviceType = serviceTypeList(0)
//是否拼单,随机模拟
val spell = rand.nextBoolean()
if (distance > 3) {
val morePrice = 2 * (distance - 3)
price += morePrice
}
//判断是不是拼单了,不拼单在继续随机分配叫车类型
if (spell) {
price = price * 0.9
} else {
serviceType = serviceTypeList(rand.nextInt(3))
//专车乘140%,快车乘120%
serviceType match {
case "专车" => {
price = price * 1.4
}
case "快车" => {
price = price * 1.2
}
case "顺风车" => {
price = price
}
}
}
//在判断平台类型,不同平台价格不同,随机给
val platform = platformList(rand.nextInt(3))
platform match {
case "花小打车" => {
price = price * 0.9
}
case "众出行" => {
price = price * 1.1
}
case "滴打车" => {
price = price * 0.8
}
}
//用户是否在出发前取消了订单,是就是一次异常的订单,因为大部分订单是正常的,所以增大正常订单的概率
var Cancel_order = false
if (rand.nextInt(6) > 4) {
Cancel_order = true
}
//生成===手机号 打车时间 起始位置 终止位置 距离 价格 是否拼单 平台 叫车类型(专车快车顺风车) 城市 是否跨城 是否异常 姓名 年龄 信用评分
val tempBuff = new StringBuffer(tempPhone.toString)
tempBuff.append(",")
tempBuff.append(dateTime)
tempBuff.append(",")
tempBuff.append(addressList(0)._1.concat(addressList(0)._2))
tempBuff.append(",")
tempBuff.append(addressList(1)._1.concat(addressList(1)._2))
tempBuff.append(",")
tempBuff.append(distance)
tempBuff.append(",")
tempBuff.append(price)
tempBuff.append(",")
tempBuff.append(spell)
tempBuff.append(",")
tempBuff.append(platform)
tempBuff.append(",")
tempBuff.append(serviceType)
tempBuff.append(",")
tempBuff.append(resultCityList(0))
tempBuff.append(",")
tempBuff.append(interregional)
tempBuff.append(",")
tempBuff.append(Cancel_order)
tempBuff.append(",")
tempBuff.append(userMap.get(tempPhone.toString))
tempBuff.append(",")
tempBuff.append("16")
tempBuff.append(",")
tempBuff.append("0")
dataList.append(tempBuff.toString)
//返回值
dataList
}
/**
* kafka生产者 将随机产生的数据发送到kafka
*/
def kafkaProducer() = {
//设置kafka生产者所需属性
val prop = new Properties()
prop.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, Common.Common.BOOTSTRAP)
prop.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, Common.Common.SERIALIZER_CLASS)
prop.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, Common.Common.SERIALIZER_CLASS)
//创建kafka生产者对象
val producer = new KafkaProducer[String, String](prop)
val userMap = getUser()
//无限循环,模拟永不停止的实时数据
while (true) {
val list = genTestData(userMap).toList
//把每条数据发送给kafka的具体某个主题
val record = new ProducerRecord[String, String](Common.Common.KAFKA_TOPIC_ORACLE, list(0))
producer.send(record)
//每5秒钟生成1条数据(genTestData方法中一次生成1条)
Thread.sleep(5000)
}
}
def main(args: Array[String]): Unit = {
kafkaProducer()
}
}
2.把模拟数据导入指定的表中
package Student_DIDI.KafkaServres
import Student_DIDI.Common
import org.apache.kafka.clients.consumer.ConsumerConfig
import org.apache.spark.sql.{SaveMode, SparkSession}
import org.apache.spark.streaming.StreamingContext
import org.apache.spark.streaming.kafka010.{ConsumerStrategies, KafkaUtils, LocationStrategies}
import java.util.Properties
/** ****************************************
*
* @ClassName KafkaStreamingConsumer
* @Description 实时的消费Kafka产生的数据
* @Author sanglong
* @Date 2022/8/4 9:09
* @Version 1.8
* *************************************** */
object KafkaStreamingConsumer {
//mysql的相关配置信息,放在此处避免每次写入重新定义
val prop: Properties = new Properties();
prop.put(Common.Common.MYSQL_USER, Common.Common.MYSQL_USERNAME_VALUE)
prop.put(Common.Common.MYSQL_PASSWORD, Common.Common.MYSQL_PASSWORD_VALUE)
/**
* kafka消费者
*
* @param ssc
*/
def kafkaConsumer(ssc: StreamingContext): Unit = {
//设置kafka消费者相关属性
val kafkaPara = Map[String, Object](
ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG -> Common.Common.BOOTSTRAP,
ConsumerConfig.GROUP_ID_CONFIG -> Common.Common.KAFKA_GROUP_ORACLE,
ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG -> Common.Common.DESERIALIZER_CLASS,
ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG -> Common.Common.DESERIALIZER_CLASS
)
//通过第三方kafkaUtils类创建数据流,生成DStream
val kafkaData = KafkaUtils.createDirectStream[String, String](
ssc,
LocationStrategies.PreferConsistent,
ConsumerStrategies.Subscribe[String, String](Set(Common.Common.KAFKA_TOPIC_ORACLE), kafkaPara)
)
// kafkaData.persist()
//kafka消费过来的数据,完整写入表,保存原始订单数据信息
kafkaData.map(data => {
val singleLineData = data.value()
val fields = singleLineData.split(",")
// 手机号 打车时间 起始位置 终止位置 距离 价格 是否拼单 平台 叫车类型(专车快车顺风车) 城市 是否跨城 是否异常 姓名 年龄 信用评分
(fields(0), fields(1), fields(2), fields(3), fields(4).toInt, fields(5).toDouble, fields(6), fields(7), fields(8), fields(9), fields(10), fields(11), fields(12), fields(13), fields(14))
}).foreachRDD(rdd => {
val spark = SparkSession.builder().config(rdd.sparkContext.getConf).getOrCreate()
import spark.implicits._
val orderDF = rdd.toDF("phone", "time", "startAddress", "endAddress", "distance", "price", "spell", "platform", "serviceType", "startCity", "interregional", "Cancel_order", "name", "age", "score")
orderDF.createOrReplaceTempView("order")
val resultTableDF = spark.sql(
"""
|select UUID() as id,phone,time,startAddress,endAddress,
|distance,round(price,1) as price,spell,platform,serviceType,
|startCity,interregional,Cancel_order
|from
|order
|""".stripMargin).toDF("id", "phone", "time", "startAddress", "endAddress", "distance", "price", "spell", "platform", "serviceType", "startCity", "interregional", "Cancel_order")
resultTableDF.write.mode(SaveMode.Overwrite).jdbc(
Common.Common.MYSQL_URL_VALUE_TAIX,
Common.Common.MYSQL_TABLE_ORDERDATA,
prop
)
val userTableDF = spark.sql(
"""
|select phone,name,age,score
|from
|order
|""".stripMargin).toDF("phone", "name", "age", "score")
userTableDF.write.mode(SaveMode.Overwrite).jdbc(
Common.Common.MYSQL_URL_VALUE_TAIX,
Common.Common.MYSQL_TABLE_USER,
prop
)
})
}
def main(args: Array[String]): Unit = {
val ssc = Common.Common.getStreamingContext("local[*]", "app")
//后面做updateStateByKey时需要保存状态
ssc.checkpoint(Common.Common.FOREACHRDD_CHECKPOINT_DIR)
kafkaConsumer(ssc)
ssc.start()
ssc.awaitTermination()
}
}
3.对个人信誉进行分析
package Student_DIDI.Process
import Student_DIDI.Common
import org.apache.spark.sql.{SaveMode, SparkSession}
import java.util.Properties
/** ****************************************
*
* @ClassName ScoreAdd
* @Description 用户信用评分类
* @Author sanglong
* @Date 2022/8/4 20:22
* @Version 1.8
* *************************************** */
object ScoreAdd {
//mysql的相关配置信息,放在此处避免每次写入重新定义
val prop: Properties = new Properties();
prop.put(Common.Common.MYSQL_USER, Common.Common.MYSQL_USERNAME_VALUE)
prop.put(Common.Common.MYSQL_PASSWORD, Common.Common.MYSQL_PASSWORD_VALUE)
def orderAnalysis(spark: SparkSession) = {
val orderData = spark.read.jdbc(
Common.Common.MYSQL_URL_VALUE_TAIX,
Common.Common.MYSQL_TABLE_ORDERDATA,
prop
)
orderData.createOrReplaceTempView("orderDataView")
val userData = spark.read.jdbc(
Common.Common.MYSQL_URL_VALUE_TAIX,
Common.Common.MYSQL_TABLE_USER,
prop
)
userData.createOrReplaceTempView("userView")
//查询出同一个用户连续5个订单为非异常单,用户信用评分+1
spark.sql(
"""
|select userView.phone phone,name,age,(score+nvl(add,0)) score
|from
|userView left join
|(
|select DISTINCT phone,(num/5) add
|from
|(
|select phone,Cancel_order,num,lag(num,1,0) over(partition by phone order by phone) tempNum
|from
|(
|select phone,Cancel_order,num
|from
|(
|select phone,Cancel_order,time,count(*) over(partition by phone order by time) num
|from
|orderDataView
|)
|where Cancel_order=false
|)
|)
|where num%5=0 and tempNum=(num-1)
|)
|l1 on
|userView.phone=l1.phone
|""".stripMargin).write.mode(SaveMode.Overwrite).jdbc(
Common.Common.MYSQL_URL_VALUE_TAIX,
Common.Common.MYSQL_TABLE_USER,
prop
)
}
def main(args: Array[String]): Unit = {
orderAnalysis(Common.Common.getSparkSession("local[*]", "orderAnalysis"))
}
}
4.每天每个平台跨城订单占总订单的百分比以及分析每天每个平台订单数量排名
package Student_DIDI.Process
import Student_DIDI.Common
import org.apache.spark.sql.{SaveMode, SparkSession}
import java.util.Properties
/** ****************************************
*
* @ClassName OrderRank
* @Description 每天每个平台订单数量排名
* @Author sanglong
* @Date 2022/8/4 20:53
* @Version 1.8
* *************************************** */
object OrderRank {
//mysql的相关配置信息,放在此处避免每次写入重新定义
val prop: Properties = new Properties();
prop.put(Common.Common.MYSQL_USER, Common.Common.MYSQL_USERNAME_VALUE)
prop.put(Common.Common.MYSQL_PASSWORD, Common.Common.MYSQL_PASSWORD_VALUE)
def dayOrderRank(spark: SparkSession) = {
val orderData = spark.read.jdbc(
Common.Common.MYSQL_URL_VALUE_TAIX,
Common.Common.MYSQL_TABLE_ORDERDATA,
prop
)
orderData.createOrReplaceTempView("orderDataView")
// 每天每个平台订单数量排名
val rankTbaleDF = spark.sql(
"""
|select *,dense_rank() over(order by number desc) rank
|from
|(
|select platform,time,count(*) number
|from
|(
|select platform,substring(time,6,5) as time
|from
|orderDataView
|)
|group by platform,time
|)
|""".stripMargin)
rankTbaleDF.createOrReplaceTempView("rankView")
rankTbaleDF.write.mode(SaveMode.Overwrite).jdbc(
Common.Common.MYSQL_URL_VALUE_TAIX,
Common.Common.MYSQL_TABLE_RANK,
prop
)
// 每天每个平台跨城订单占总订单的百分比
spark.sql(
"""
|select l1.platform platform,l1.time time,round((interregionalNumber/number),2) proportion
|from
|(
|select platform,time,count(*) interregionalNumber
|from
|(
|select platform,substring(time,6,5) as time,interregional
|from
|orderDataView
|)
|where interregional=true
|group by platform,time
|) l1,rankView
|where rankView.platform=l1.platform and
|rankView.time=l1.time
|""".stripMargin).write.mode(SaveMode.Overwrite).jdbc(
Common.Common.MYSQL_URL_VALUE_TAIX,
Common.Common.MYSQL_TABLE_PROPORTION,
prop
)
}
def main(args: Array[String]): Unit = {
dayOrderRank(Common.Common.getSparkSession("local[*]", "dayOrderRank"))
}
}
5.对不同城市的订单数据进行统计分析
package Student_DIDI.Process
import Student_DIDI.Common
import org.apache.kafka.clients.consumer.ConsumerConfig
import org.apache.spark.sql.{SaveMode, SparkSession}
import org.apache.spark.streaming.StreamingContext
import org.apache.spark.streaming.kafka010.{ConsumerStrategies, KafkaUtils, LocationStrategies}
import java.util.Properties
/** ****************************************
*
* @ClassName OrderDQL
* @Description 每隔1分钟查询一次,每个城市订单的数量
* @Author sanglong
* @Date 2022/8/4 20:21
* @Version 1.8
* *************************************** */
object OrderDQL {
//mysql的相关配置信息,放在此处避免每次写入重新定义
val prop: Properties = new Properties();
prop.put(Common.Common.MYSQL_USER, Common.Common.MYSQL_USERNAME_VALUE)
prop.put(Common.Common.MYSQL_PASSWORD, Common.Common.MYSQL_PASSWORD_VALUE)
/**
* kafka消费者
*
* @param ssc
*/
def kafkaConsumer(ssc: StreamingContext): Unit = {
//设置kafka消费者相关属性
val kafkaPara = Map[String, Object](
ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG -> Common.Common.BOOTSTRAP,
ConsumerConfig.GROUP_ID_CONFIG -> Common.Common.KAFKA_GROUP_ORACLE,
ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG -> Common.Common.DESERIALIZER_CLASS,
ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG -> Common.Common.DESERIALIZER_CLASS
)
//通过第三方kafkaUtils类创建数据流,生成DStream
Thread.sleep(55000)
val kafkaData = KafkaUtils.createDirectStream[String, String](
ssc,
LocationStrategies.PreferConsistent,
ConsumerStrategies.Subscribe[String, String](Set(Common.Common.KAFKA_TOPIC_ORACLE), kafkaPara)
)
// kafkaData.persist()
//kafka消费过来的数据,完整写入表,保存原始订单数据信息
kafkaData.map(data => {
val singleLineData = data.value()
val fields = singleLineData.split(",")
// 手机号 打车时间 起始位置 终止位置 距离 价格 是否拼单 平台 叫车类型(专车快车顺风车) 城市 是否跨城 是否异常 姓名 年龄 信用评分
(fields(1), fields(9))
}).foreachRDD(rdd => {
val spark = SparkSession.builder().config(rdd.sparkContext.getConf).getOrCreate()
import spark.implicits._
val orderDF = rdd.toDF("time", "startCity")
orderDF.createOrReplaceTempView("order")
spark.sql(
"""
|select startCity,number,time,
|case
|when number<5 then '用车低谷期'
|when number>=5 and number<=10 then '用车普通期'
|when number>10 then '用车高峰期'
|end useCar
|from
|(
|select startCity,count(*) as number,collect_set(time)[0] as time
|from
|(
|select time,startCity
|from
|order
|)
|group by startCity
|)
|""".stripMargin).write.mode(SaveMode.Overwrite).jdbc(Common.Common.MYSQL_URL_VALUE_TAIX, Common.Common.MYSQL_TABLE_ORDERDQL, prop)
})
}
def main(args: Array[String]): Unit = {
val ssc = Common.Common.getStreamingContext("local[*]", "app")
//后面做updateStateByKey时需要保存状态
ssc.checkpoint(Common.Common.FOREACHRDD_CHECKPOINT_DIR)
kafkaConsumer(ssc)
ssc.start()
ssc.awaitTermination()
}
}
公共常量类
package Student_DIDI.Common
import org.apache.spark.SparkConf
import org.apache.spark.sql.SparkSession
import org.apache.spark.streaming.{Seconds, StreamingContext}
/** ****************************************
*
* @ClassName Common
* @Description 常量配置类
* @Author sanglong
* @Date 2022/8/4 9:03
* @Version 1.8
* *************************************** */
object Common {
val BOOTSTRAP = "node102:9092"
val SERIALIZER_CLASS = "org.apache.kafka.common.serialization.StringSerializer"
val DESERIALIZER_CLASS = "org.apache.kafka.common.serialization.StringDeserializer"
val KAFKA_TOPIC_ORACLE = "taixStreaming"
val KAFKA_GROUP_ORACLE = "taixStreaming"
val MYSQL_USER = "user"
val MYSQL_PASSWORD = "password"
val MYSQL_URL_VALUE = "jdbc:mysql://127.0.0.1:3306/"
val MYSQL_DB_TAIX = "taix"
val MYSQL_TABLE_ORDERDATA = "orderdata"
val MYSQL_TABLE_ORDERCOUNT = "ordercount"
val MYSQL_TABLE_USER = "user"
val MYSQL_TABLE_ORDERDQL = "oneminte"
val MYSQL_TABLE_RANK = "rank"
val MYSQL_TABLE_PROPORTION = "proportion"
val MYSQL_USERNAME_VALUE = "root"
val MYSQL_PASSWORD_VALUE = "root"
val MYSQL_URL_VALUE_TAIX = MYSQL_URL_VALUE + MYSQL_DB_TAIX + "?useSSL=false"
val FOREACHRDD_CHECKPOINT_DIR = "E:\MyDataBases\updateStateByKey"
//封装会话获得方法
def getSparkSession(master: String, appName: String): SparkSession = {
SparkSession.builder().master(master).appName(appName).getOrCreate()
}
def getStreamingContext(master: String, appName: String): StreamingContext = {
val conf = new SparkConf()
.setMaster(master)
.setAppName(appName)
new StreamingContext(conf, Seconds(5))
}
}
在运行之前要保证你的zk集群和kafka集群是正常启动的,并且kafka已经创建了对应的主题
相应的依赖导入正确,如果程序执行出来的结果有错误,可能是代码逻辑有问题,仅供参考
最后
以上就是感动火车为你收集整理的用户打车行为分析的全部内容,希望文章能够帮你解决用户打车行为分析所遇到的程序开发问题。
如果觉得靠谱客网站的内容还不错,欢迎将靠谱客网站推荐给程序员好友。
本图文内容来源于网友提供,作为学习参考使用,或来自网络收集整理,版权属于原作者所有。
发表评论 取消回复