前言
本篇整理信息收集階段运吓,大概會占滲透測試總時間的60%渴邦,視能力和情況而定疯趟,做到不影響企業(yè)業(yè)務正常運行的情況下拘哨,收集更多的資產(chǎn)信息谋梭,細心和耐心很關鍵。
主站
快速定位倦青,快速產(chǎn)出
先瀏覽下主站瓮床,可以很直觀的了解目標站點的大致業(yè)務/性質(zhì)/模式,會對整個滲透過程起到很大的幫助产镐。
對于boss要求的部分站點需要快速產(chǎn)出漏洞(銷售好談客戶)隘庄,主站很多都是可以拿下的,比如四位數(shù)字無限制爆破驗證碼癣亚,sqli丑掺,備份文件,組件框架歷史漏洞述雾,后臺弱口令街州,郵箱收集/爆破/社工theHarvester等等。
對于無范圍全網(wǎng)測的廠商玻孟,可通過以下途徑獲取相關資產(chǎn)唆缴,比如收集開發(fā)運維專用的域名和解析到內(nèi)網(wǎng)的域名,主機ip所屬等黍翎。
搜索引擎hacking語法面徽,搜target.com|公司名字,有時也能查到匣掸,但是數(shù)據(jù)需要清洗趟紊。
whois查詢/注冊人反查/郵箱反查/相關資產(chǎn)
本階段收集的信息,為下一步收集/爆破子域名做好準備碰酝。
子域名
子域名的收集途徑很多织阳,Layer子域名挖掘機4.2紀念版,subDomainsBrute李劼杰砰粹,wydomain豬豬俠唧躲,Sublist3r,site:target.com碱璃,Github代碼倉庫弄痹,抓包分析請求返回值(跳轉(zhuǎn)/文件上傳/app/api接口等),站長幫手links等在線查詢網(wǎng)站嵌器,部分會受到泛解析的影響肛真,記錄下學習到的,
域傳送漏洞
linux:
[dig @ns.example.com example=.com AXFR]
windows:
[nslookup -> set type=ns ->target.com -> server ns.target.com -> ls target.com]
n級子域名爆破
端口服務
假設從layer導出來的子域名列表為target.com.txt,使用nmap掃默認端口
nmap -v -A -F -iL target.com.txt -oX target_f.xml
掃描全端口
nmap -v -A -p1-65535 -iL target.com.txt -oX target_all.xml
web探測
探測每個站點的web信息爽航,假設所有子域名都在:target.com_domains.txt(Layer直接導出域名)
詳細可參考:滲透標準
qq群蚓让,微信公眾號和百度文庫等社交途徑有時也能收集到一頁特定規(guī)則的密碼表,直接附上一個小腳本
main.py
實現(xiàn)批量掃描target.com_domains.txt站點
#-*-coding:UTF-8-*-
importrequests
importre
importos
frombs4importBeautifulSoup
frommultiprocessing.poolimportPool
importthreading
importtime
#from whatcms import *
fromdirscanimport*
frombaidu_siteimport*
#from baidu_inurl import *
#from getalllink import *
importwarnings
warnings.filterwarnings("ignore")
importsys
reload(sys)
sys.setdefaultencoding('utf-8')
globalcookie
cookie='1'
output_file=sys.argv[1].split('.')[0]+time.strftime('%Y-%m-%d',time.localtime(time.time()))+'.html'#-%H-%I-%S
defcheck(url):
try:
printurl
header={
'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Accept-Encoding':'gzip, deflate, sdch',
'Upgrade-Insecure-Requests':'1',
'Accept-Language':'zh-CN,zh;q=0.8',
'Cookie':cookie,
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.106 Safari/537.36'}
req=requests.get(url,headers=header,timeout=5,allow_redirects=False)
status=req.status_code
html=req.content
soup=BeautifulSoup(html,from_encoding="utf-8")
server='??????'
try:
server=req.headers['Server']
except:pass
title='??'
try:
title=soup.title.string
except:pass
X_Powered_By='????'
try:
X_Powered_By=req.headers['X-Powered-By']
except:pass
output=open(output_file,"a")
str1='''
'''%(str(status),server,X_Powered_By,url,url,title,baidu_site(url),dirscan(url))#,dirlink(url))#,baidu_inurl(url)whatcms(url),dirscan(url),yunxi_cmsapi(url)%s
output.write(str1)
output.close()
return1
except:
return0
finally:
return0
defget_domain(adr):
files=open(adr,'r')
regex=re.compile(r"(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+",re.IGNORECASE)
whileTrue:
line=files.readline()
ifline:
urls=regex.findall(line)
foriinurls:
forportinweb_port:
domain_http='http://'+i+':'+port
ifdomain_httpnotindomains:
domains.append(domain_http)
str=line.split('\n')
domain_https='https://'+str[0]
ifdomain_httpsnotindomains:
domains.append(domain_https)
else:break
if__name__=='__main__':
domains=[]
web_port=['80',]#,'8080',] #
in_domain=sys.argv[1]
get_domain(in_domain)
output=open(output_file,"w")
str1=""
output.write(str1)
output.close()
pool=Pool(2)
pool.map(check,domains)
pool.close()
pool.join()
os._exit(0)
dirscan.py
實現(xiàn)簡單的目錄fuzz乾忱,
import requests
import sys
import urlparse
import random
import re
def list(url):
keys = []
f = open('list.txt','r')
bak = urlparse.urlparse(url).hostname#.netloc.replace(':8080','').replace(':80','')
for i in f.readlines():
key = i.strip().replace('%flag%',bak)
if key not in keys:
keys.append(key)
return keys
def dirscan(url):
flag = []
keys = list(url)
headers = {
'User-Agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.116 Safari/537.36',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate, sdch',
'Accept-Language': 'zh-CN,zh;q=0.8,en-US;q=0.6,en;q=0.4'}
user_agent = ['Mozilla/5.0 (Windows; U; Win98; en-US; rv:1.8.1) Gecko/20061010 Firefox/2.0',
'Mozilla/5.0 (Windows; U; Windows NT 5.0; en-US) AppleWebKit/532.0 (KHTML, like Gecko) Chrome/3.0.195.6 Safari/532.0',
'Mozilla/5.0 (Windows; U; Windows NT 5.1 ; x64; en-US; rv:1.9.1b2pre) Gecko/20081026 Firefox/3.1b2pre',
'Opera/10.60 (Windows NT 5.1; U; zh-cn) Presto/2.6.30 Version/10.60','Opera/8.01 (J2ME/MIDP; Opera Mini/2.0.4062; en; U; ssr)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; ; rv:1.9.0.14) Gecko/2009082707 Firefox/3.0.14',
'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.133 Safari/537.36',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; fr; rv:1.9.2.4) Gecko/20100523 Firefox/3.6.4 ( .NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; fr-FR) AppleWebKit/528.16 (KHTML, like Gecko) Version/4.0 Safari/528.16',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; fr-FR) AppleWebKit/533.18.1 (KHTML, like Gecko) Version/5.0.2 Safari/533.18.5']
check = requests.get(url=url,timeout=3,headers=headers)
if check.url[-1:] != '/':
check.url = check.url + '/'
#print check.url
#print url
try:
if check.headers['X-Frame-Options']:
return 'Nginx 404/401'
except:pass
if check.url[:-1] != url:
return check.url[:-1]
#url.replace(':8080','').replace(':80','')
#url = urlparse.urlsplit(check.url).scheme+'://'+urlparse.urlsplit(check.url).netloc
for i in keys:
urli = url + i
UA = random.choice(user_agent)
headers['User-Agent'] = UA
try:
r = requests.get(url=urli,timeout=3,headers=headers)
#print r.status_code
#print r.url
#print len(r.content),len(check.content)
if r.status_code == 200 and len(check.content) != len(r.content) and r.url == urli:
flag.append(i)
except:pass
if len(flag) > 25:
return
else:
return flag
'''
if re.findall(r"\['/robots\.txt',? (.*?) '/tmp', '/file'\]",str(flag)):
return
else:
return flag'''
if __name__ == '__main__':
print dirscan(sys.argv[1])
# svn :text/plain
# Ds_stroe:'application/octet-stream' == r.headers['Content-Type']
#if 'application' in r.headers['Content-Type']:
#? flag.append(i)
baidu_site.py
加入site:子域名,
importrequests
importre
importsys
importurlparse
importrandom
defbaidu_site(url):
url=urlparse.urlparse(url).hostname
baidu_url='https://www.baidu.com/s?ie=UTF-8&wd=site:{}'.format(url)
headers={
'User-Agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.116 Safari/537.36',
'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Accept-Encoding':'gzip, deflate, sdch',
'Accept-Language':'zh-CN,zh;q=0.8,en-US;q=0.6,en;q=0.4'}
user_agent=['Mozilla/5.0 (Windows; U; Win98; en-US; rv:1.8.1) Gecko/20061010 Firefox/2.0',
'Mozilla/5.0 (Windows; U; Windows NT 5.0; en-US) AppleWebKit/532.0 (KHTML, like Gecko) Chrome/3.0.195.6 Safari/532.0',
'Mozilla/5.0 (Windows; U; Windows NT 5.1 ; x64; en-US; rv:1.9.1b2pre) Gecko/20081026 Firefox/3.1b2pre',
'Opera/10.60 (Windows NT 5.1; U; zh-cn) Presto/2.6.30 Version/10.60','Opera/8.01 (J2ME/MIDP; Opera Mini/2.0.4062; en; U; ssr)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; ; rv:1.9.0.14) Gecko/2009082707 Firefox/3.0.14',
'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.133 Safari/537.36',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; fr; rv:1.9.2.4) Gecko/20100523 Firefox/3.6.4 ( .NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; fr-FR) AppleWebKit/528.16 (KHTML, like Gecko) Version/4.0 Safari/528.16',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; fr-FR) AppleWebKit/533.18.1 (KHTML, like Gecko) Version/5.0.2 Safari/533.18.5']
UA=random.choice(user_agent)
headers['User-Agent']=UA
try:
r=requests.get(url=baidu_url,headers=headers,timeout=5).content
if'class="nors"'notinr:
else:
return''
except:
pass
return''
if__name__=='__main__':
printbaidu_site(sys.argv[1])
list.txt
目錄字典历极,
/robots.txt
/.git/config
/.svn/entries
/.svn/wc.db
/README.md
/.viminfo
/.bash_history
/.bashrc
/crossdomain.xml
/nginx.conf
/httpd.conf
/user.txt
/sitemap.xml
/username.txt
/pass.txt
/passwd.txt
/password.txt
/.DS_Store
/.htaccess
/log
/log.txt
/phpinfo.php
/info.php
/www.7z
/www.rar
/www.zip
/www.tar.gz
/wwwroot.zip
/wwwroot.rar
/wwwroot.7z
/wwwroot.tar.gz
/%flag%.7z
/%flag%.rar
/%flag%.zip
/%flag%.tar.gz
/backup
/backup.7z
/backup.rar
/backup.sql
/backup.tar
/backup.tar.gz
/backup.zip
/database.sql
/index.7z
/index.rar
/index.sql
/index.tar
/index.tar.gz
/index.zip
/index.html
/index.php
/index.asp
/index.aspx
/index.jsp
/index.action
/users.sql
/login
/phpmyadmin
/pma
/SiteServer
/admin
/install
/backup
/test
/tmp
/file
效果:
所需輔助小插件窄瘟,可自行添加
推薦個完善的datasploit
轉(zhuǎn)載:http://www.cnnetarmy.com/%e4%b9%99%e6%96%b9%e6%b8%97%e9%80%8f%e6%b5%8b%e8%af%95%e4%b9%8b%e4%bf%a1%e6%81%af%e6%94%b6%e9%9b%86/