hive表的數(shù)據(jù)有時(shí)會(huì)發(fā)生partition還在显拜,但是數(shù)據(jù)已經(jīng)被刪除了的情況宋税。為了找出這些partition,并刪除數(shù)據(jù)已經(jīng)不存在的partition讼油,做了幾個(gè)小的腳本杰赛。
先列出所有的partition.
在mysql中建立一個(gè)表,用來(lái)存儲(chǔ)partition檢查的結(jié)果矮台。
status: -1:未知 0:不存在 1:存在 2:dropped
create table meta.partition_loc (
`id` int(11) unsigned NOT NULL AUTO_INCREMENT,
`LOCATION` varchar(255) COLLATE utf8_bin NOT NULL DEFAULT '',
`PART_NAME` varchar(255) COLLATE utf8_bin NOT NULL DEFAULT '',
`CREATE_TIME` int(11) NOT NULL,
`TBL_NAME` varchar(128) COLLATE utf8_bin NOT NULL DEFAULT '',
`DB_NAME` varchar(128) COLLATE utf8_bin NOT NULL DEFAULT '',
`STATUS` int(11) NOT NULL DEFAULT '-1',
PRIMARY KEY (`id`)
);
從hive的metastore的數(shù)據(jù)庫(kù)中乏屯,找到所有的partition,寫(xiě)入這個(gè)表瘦赫。
insert into meta.partition_loc
SELECT
null as id,
s.`LOCATION`,
p.`PART_NAME`,
p.`CREATE_TIME`,
t.`TBL_NAME` ,
d.`NAME`,
-1 as `status`
from
hive.SDS s
JOIN hive.`PARTITIONS` p on s.SD_ID = p.SD_ID
join hive.TBLS t on p.tbL_id = t.tbl_id
JOIN hive.DBS d on t.DB_ID=d.DB_ID
;
逐個(gè)檢查目錄是否存在辰晕,并更新表 meta.partition_loc
#!/usr/bin/env python
import sys
import pymysql
from snakebite.client import AutoConfigClient as HDFSClient
client = HDFSClient()
dbconn = pymysql.connect(
host=‘$host',
user=‘$user',
password=‘******',
database='meta',
port=3306
)
cursor = dbconn.cursor()
sql = '''SELECT id, LOCATION, PART_NAME, TBL_NAME, DB_NAME
FROM meta.partition_loc WHERE STATUS = -1 limit 100'''
update_sql = '''UPDATE meta.partition_loc SET STATUS=%s WHERE id=%s'''
try:
n = 0
while True:
cursor.execute(sql)
rows = cursor.fetchall()
for row in rows: # 拿出來(lái)一批partition
_id, location, part_name, tbl_name , db_name = row
if location.startswith('hdfs://nameservice1/'): # 去除hdfs://xxxx前綴
# 檢查是否存在。 此處執(zhí)行時(shí)會(huì)頻繁請(qǐng)求NameNode确虱,大概每秒300~500次含友。可以在業(yè)務(wù)低峰期時(shí)進(jìn)行
# 這里替換成實(shí)際的nameservice名稱
s = client.test(location.replace('hdfs://nameservice1/', '/'), exists=True)
else:
s = False
cursor.execute(update_sql, (int(True), _id)) # 更新數(shù)據(jù)庫(kù)
n += 1
print('handled', n)
dbconn.commit()
if not rows:
break
except Exception, e:
print(e.message)
dbconn.rollback()
finally:
cursor.close()
dbconn.close()
刪除分區(qū), 生成SQL校辩,用hive來(lái)執(zhí)行
import sys
import os
import pymysql
from snakebite.client import AutoConfigClient as HDFSClient
def to_part_spec(part_name):
t = part_name.split('/')
s = []
for part_ in t:
p = part_.split('=')
s.append("%s='%s'" % (p[0], p[1]))
return "PARTITION(%s)" % ','.join(s)
client = HDFSClient()
dbconn = pymysql.connect(
host=‘$host',
user=‘$user',
password=‘******',
database='meta',
port=3306
)
cursor = dbconn.cursor()
update_sql = '''UPDATE meta.partition_loc SET STATUS=%s WHERE id=%s'''
drop_sql = '''SELECT id, PART_NAME, concat(DB_NAME, '.', TBL_NAME)
FROM meta.partition_loc WHERE STATUS = 0 limit 100'''
try:
n = 0
for _b in range(10):
cursor.execute(drop_sql)
rows = cursor.fetchall()
if not rows:
break
#print 'no rows'
data = {} # key: table_name, value: list of partitions
for row in rows:
_id, part_name, tbl = row
data.setdefault(tbl, []).append(part_name)
n += 1
# 先生成一個(gè)sql文件窘问,再用hive去執(zhí)行
with open('/tmp/remove-partition.sql', 'w') as fout:
for tbl in data:
sql = '''ALTER TABLE %s DROP \n%s;\n''' % (tbl, ',\n'.join([to_part_spec(p) for p in data[tbl]]))
fout.write(sql)
# 執(zhí)行sql中的語(yǔ)句來(lái)刪除空的partition
os.system('hive -f /tmp/remove-partition.sql')
for row in rows:
cursor.execute(update_sql, (2, row[0]))
dbconn.commit()
print('dropped', n)
except Exception, e:
print(e.message)
dbconn.rollback()
finally:
cursor.close()
dbconn.close()