DataFrame操作

3488 ワード

package sparkSQL
import org.apache.spark.sql.{DataFrame, SparkSession}
/**
  * Created by sicong on 2017/3/9.
  */
object sparkKodo {
//
  def main(args: Array[String]): Unit = {
    val spark = SparkSession.builder().appName("Spark SQL Example").master("local[3]")
      .config("spark.some.config.option", "some-value")
      .getOrCreate()
    loggin(spark)
  }
  def loggin(sparkSession: SparkSession): Unit ={
    val df = sparkSession.read.json("/Users/sicong/Downloads/kodo-z1-req-io2016-12-11-00-00-26-b162d6nqdg60cgrca5tg.json")
    df.createOrReplaceTempView("people")

    val sqlDF = sparkSession.sql("SELECT path  FROM people")
    playbackurl(sqlDF)
  }
  def playbackurl(dataFrame: DataFrame): Unit ={
    println("================"+dataFrame.count())
    //meipai-live
        dataFrame.filter(x=>x.get(0).toString.split("/").length>2).filter(x=>x.get(0).toString.contains("meipai-live")&&x.get(0).toString.contains("m3u8")).foreach(x=>println(x))
    //    dataFrame.filter(x=>x.get(0).toString.split("/").length>2).filter(x=>jusu(x.get(0),"meipai-live","ts")).foreach(x=>println(x))
  }
  def jusu(x:Any,title:String,types:String):Boolean={
    try {
      val datFraments = x.toString.split("/")
      val hubFrament = datFraments(2).split("\\.").contains(title)
      val filetype = datFraments(datFraments.length - 1).split("\\.").contains(types)
      return datFraments.contains("fragments") && hubFrament && filetype
    }catch {
      case e: Exception => {
       println("===================="+x)
        return false
      }
    }
  }


}