主要是通過實驗比較三者的速度蒸痹。
數據生成Python代碼
import csv
import random
import pymysql
if __name__ == "__main__":
def getOneTraj():
vme_id = 'S90110000' + str(random.randint(2, 9))
gps_time = '2015-08-' + str(random.randint(10, 30)) + ' 09:29:11'
work_state = str(random.randint(0, 1))
ultrasonic_station = str(random.randint(0, 1))
limit_switch_state = str(random.randint(0, 1))
work_deep = str(random.randint(0, 1000))
longtitude = str(random.uniform(60, 90))
latitude = str(random.uniform(30, 60))
elevation = str(random.uniform(0, 1160))
speed = str(random.uniform(0, 60))
course_direction = str(random.randint(0, 599))
str1 = vme_id + '\t' + gps_time + '\t' + work_state + '\t' + ultrasonic_station + '\t' + limit_switch_state + '\t' + work_deep + '\t' + longtitude + '\t' + latitude + '\t' + elevation + '\t' + speed + '\t' + course_direction + '\n'
return str1
count = 10
for i in [4000,4000,4000,4000,4000,4000,4000,4000,4000,4000]:
fileName = 'test'+str(count)+'.csv'
count = count + 1
f = open(fileName, 'w')
print(i)
for k in range(0,i):
str1=''
for j in range(0,10000):
str1 = str1+ getOneTraj()
f.write(str1)
f.close()
性能評測sql語句
select count(*) from trajectory;
select count(*) from trajectory group by vme_id ;
1、mysql查尋速度的測試
# 建表
CREATE DATABASE dbtac;
USE dbtac;
CREATE TABLE trajectory (vme_id varchar(100),gps_time varchar(100),work_state INT,ultrasonic_station INT,limit_switch_state INT,work_deep INT,longtitude DOUBLE,latitude DOUBLE,elevation INT,speed INT, course_direction INT);
#測試插入數據
insert into trajectory values('sdd','21',1,1,1,1,1,1,1,1,1);
mysql可以直接導入csv呛哟,但是對于Ubuntu系統(tǒng)有很多權限問題叠荠,試了好幾次不成功,所以就用一下方法扫责。
使用Python編程榛鼎,通過pymysql 往mysql添加數據
import pymysql
import random
import datetime
print('連接到mysql服務器...')
conn = pymysql.connect("localhost","root","123","dbtac")
print('連接上了!')
cursor = conn.cursor()
values=[]
for i in range(400): #往數據庫寫400次,也就是加4億條數據
values=[]
now=datetime.datetime.now()
for j in range(1000000):#每100w條插入數據庫一次公给,簡單測試效果最佳
vme_id = 'S90110000' + str(random.randint(2, 9))
gps_time = '2015-08-' + str(random.randint(10, 30)) + ' 09:29:11'
work_state = (random.randint(0, 1))
ultrasonic_station = (random.randint(0, 1))
limit_switch_state = (random.randint(0, 1))
work_deep = (random.randint(0, 900))
longtitude = (random.uniform(60, 90))
latitude = (random.uniform(30, 60))
elevation = (random.uniform(0, 1160))
speed = (random.uniform(0, 60))
course_direction = (random.randint(0, 599))
value=(vme_id,gps_time,work_state,ultrasonic_station,limit_switch_state,work_deep,longtitude,latitude,elevation,speed,course_direction)
values.append(value)
end=datetime.datetime.now()
print('get the number: ')
print ((end - now))
now=datetime.datetime.now()
cursor.executemany("insert into trajectory values(%s,%s,%s, %s, %s, %s, %s,%s,%s, %s, %s)",values)
conn.commit()
end=datetime.datetime.now()
print('insert:')
print (( end-now))
cursor.close()
conn.close()
quit()
注:因為往mysql數據庫中添加4億條數據(約40G)很慢借帘,超過12個小時。
解決方法:多運行幾個Python終端淌铐,同時執(zhí)行上面的程序肺然,需要改i的值。
2.spark-sql
#啟動Hadoop集群
start-all.sh
#啟動spark 集群
start-master.sh
start-slaves.sh
#啟動spark-sql集群
spark-sql --master spark://master:7077 --driver-cores 8 --driver-java-options "-Dspark.driver.port=4050" --conf spark.sql.warehouse.dir = hdfs://master:9000/user/hive/warehouse --master yarn
#如果失敗 可以用本地模式啟動spark-sql
spark-sql
參考: 在集群上運行spark
查看Hadoop集群運行情況 網址 http://localhost:50070/
查看spark集群運營情況網址 http://master:8080/
進入spark-sql后運行
CREATE DATABASE dbtac;
use dbtac;
DROP TABLE IF EXISTS trajectory;
#或者使用下面語句刪除
truncate table trajectory;
CREATE TABLE dbtac.trajectory(vme_id STRING,gps_time STRING,work_state INT,ultrasonic_station INT,limit_switch_state INT,work_deep INT,longtitude DOUBLE,latitude DOUBLE,elevation INT,speed INT, course_direction INT)ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t' STORED AS TEXTFILE ;
#數據存在hdfs上加載數據的方式
load data inpath 'hdfs://Master:9000/dbtac/test/test10.csv' into table trajectory;
#數據存在本地的加載數據的方式
LOAD DATA LOCAL INPATH '/win/test/test10.csv' INTO TABLE trajectory;
查看性能
select count(*) from trajectory where work_deep>40;
select count(*) from trajectory where work_deep>40 and speed >20;
刪除一定數量的記錄
delete from trajectory limit 100000腿准;
3际起、 hive
上傳文件到hdfs
hdfs dfs -mkdir /dbtac
hdfs dfs -put /usr/local/dbtac/tac /dbtac
hdfs dfs -ls /dbtac/tac
hdfs dfs -rm -r /dbtac #刪除dbtac文件夾
啟動hive
cd /usr/local/hive
./bin/hive
進入hive命令
CREATE DATABASE dbtac;
use dbtac;#切換到dbtac數據庫
DROP TABLE IF EXISTS trajectory;
#創(chuàng)建數據庫
CREATE EXTERNAL TABLE dbtac.trajectory(vme_id STRING,gps_time STRING,work_state INT,ultrasonic_station INT,limit_switch_state INT,work_deep INT,longtitude DOUBLE,latitude DOUBLE,elevation INT,speed INT, course_direction INT)ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t' STORED AS TEXTFILE ;
# 從hdfs加載數據
load data inpath 'hdfs://Master:9000/user/hive/warehouse/dbtac.db/trajectory/test.csv' into table trajectory;
其他
如何將window上的共享目錄加載到Ubuntu,共享window的數據
sudo apt-get install cifs-utils
sudo mkdir /win
sudo mount -t cifs -o username=xiaojing,password=xxx //192.168.17.122/mydata /win