网站渗透常用Python小脚本same_host旁站查询(网站渗透工具)
作者:admin发布时间:2017-04-16 09:41浏览数量:10578次评论数量:0次
网站渗透常用Python小脚本same_host旁站查询
旁站查询来源:
http://dns.aizhan.com
http://s.tool.chinaz.com/same
http://i.links.cn/sameip/
http://www.ip2hosts.com/
效果图如下:
以百度网站和小残博客为例:
以百度网站为例:
PS:直接调用以上4个旁注接口查询同服服务器域名信息包含服务器类型 比如小残博客使用的是Tengine
#!/usr/bin/env python
#encoding: utf-8
import re
import sys
import json
import time
import requests
import urllib
import requests.packages.urllib3
from multiprocessing import Pool
from BeautifulSoup import BeautifulSoup
requests.packages.urllib3.disable_warnings()
headers = {'User-Agent' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) AppleWebKit/535.20 (KHTML, like Gecko) Chrome/19.0.1036.7 Safari/535.20'}
def links_ip(host):
'''
查询同IP网站
'''
ip2hosts = []
ip2hosts.append("http://"+host)
try:
source = requests.get('http://i.links.cn/sameip/' + host + '.html', headers=headers,verify=False)
soup = BeautifulSoup(source.text)
divs = soup.findAll(style="word-break:break-all")
if divs == []: #抓取结果为空
print 'Sorry! Not found!'
return ip2hosts
for div in divs:
#print div.a.string
ip2hosts.append(div.a.string)
except Exception, e:
print str(e)
return ip2hosts
return ip2hosts
def ip2host_get(host):
ip2hosts = []
ip2hosts.append("http://"+host)
try:
req=requests.get('http://www.ip2hosts.com/search.php?ip='+str(host), headers=headers,verify=False)
src=req.content
if src.find('result') != -1:
result = json.loads(src)['result']
ip = json.loads(src)['ip']
if len(result)>0:
for item in result:
if len(item)>0:
#log(scan_type,host,port,str(item))
ip2hosts.append(item)
except Exception, e:
print str(e)
return ip2hosts
return ip2hosts
def filter(host):
'''
打不开的网站...
'''
try:
response = requests.get(host, headers=headers ,verify=False)
server = response.headers['Server']
title = re.findall(r'<title>(.*?)</title>',response.content)[0]
except Exception,e:
#print "%s" % str(e)
#print host
pass
else:
print host,server
def aizhan(host):
ip2hosts = []
ip2hosts.append("http://"+host)
regexp = r'''<a href="[^']+?([^']+?)/" rel="nofollow" target="_blank">\1</a>'''
regexp_next = r'''<a href="http://dns.aizhan.com/[^/]+?/%d/">%d</a>'''
url = 'http://dns.aizhan.com/%s/%d/'
page = 1
while True:
if page > 2:
time.sleep(1) #防止拒绝访问
req = requests.get(url % (host , page) ,headers=headers ,verify=False)
try:
html = req.content.decode('utf-8') #取得页面
if req.status_code == 400:
break
except Exception as e:
print str(e)
pass
for site in re.findall(regexp , html):
ip2hosts.append("http://"+site)
if re.search(regexp_next % (page+1 , page+1) , html) is None:
return ip2hosts
break
page += 1
return ip2hosts
def chinaz(host):
ip2hosts = []
ip2hosts.append("http://"+host)
regexp = r'''<a href='[^']+?([^']+?)' target=_blank>\1</a>'''
regexp_next = r'''<a href="javascript:" val="%d" class="item[^"]*?">%d</a>'''
url = 'http://s.tool.chinaz.com/same?s=%s&page=%d'
page = 1
while True:
if page > 1:
time.sleep(1) #防止拒绝访问
req = requests.get(url % (host , page) , headers=headers ,verify=False)
html = req.content.decode('utf-8') #取得页面
for site in re.findall(regexp , html):
ip2hosts.append("http://"+site)
if re.search(regexp_next % (page+1 , page+1) , html) is None:
return ip2hosts
break
page += 1
return ip2hosts
def same_ip(host):
mydomains = []
mydomains.extend(ip2host_get(host))
mydomains.extend(links_ip(host))
mydomains.extend(aizhan(host))
mydomains.extend(chinaz(host))
mydomains = list(set(mydomains))
p = Pool()
for host in mydomains:
p.apply_async(filter, args=(host,))
p.close()
p.join()
if __name__=="__main__":
if len(sys.argv) == 2:
same_ip(sys.argv[1])
else:
print ("usage: %s host" % sys.argv[0])
sys.exit(-1)
#!/usr/bin/env python
#encoding: utf-8
import re
import sys
import json
import time
import requests
import urllib
import requests.packages.urllib3
from multiprocessing import Pool
from BeautifulSoup import BeautifulSoup
requests.packages.urllib3.disable_warnings()
headers = {'User-Agent' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) AppleWebKit/535.20 (KHTML, like Gecko) Chrome/19.0.1036.7 Safari/535.20'}
def links_ip(host):
'''
查询同IP网站
'''
ip2hosts = []
ip2hosts.append("http://"+host)
try:
source = requests.get('http://i.links.cn/sameip/' + host + '.html', headers=headers,verify=False)
soup = BeautifulSoup(source.text)
divs = soup.findAll(style="word-break:break-all")
if divs == []: #抓取结果为空
print 'Sorry! Not found!'
return ip2hosts
for div in divs:
#print div.a.string
ip2hosts.append(div.a.string)
except Exception, e:
print str(e)
return ip2hosts
return ip2hosts
def ip2host_get(host):
ip2hosts = []
ip2hosts.append("http://"+host)
try:
req=requests.get('http://www.ip2hosts.com/search.php?ip='+str(host), headers=headers,verify=False)
src=req.content
if src.find('result') != -1:
result = json.loads(src)['result']
ip = json.loads(src)['ip']
if len(result)>0:
for item in result:
if len(item)>0:
#log(scan_type,host,port,str(item))
ip2hosts.append(item)
except Exception, e:
print str(e)
return ip2hosts
return ip2hosts
def filter(host):
'''
打不开的网站...
'''
try:
response = requests.get(host, headers=headers ,verify=False)
server = response.headers['Server']
title = re.findall(r'<title>(.*?)</title>',response.content)[0]
except Exception,e:
#print "%s" % str(e)
#print host
pass
else:
print host,server
def aizhan(host):
ip2hosts = []
ip2hosts.append("http://"+host)
regexp = r'''<a href="[^']+?([^']+?)/" rel="nofollow" target="_blank">\1</a>'''
regexp_next = r'''<a href="http://dns.aizhan.com/[^/]+?/%d/">%d</a>'''
url = 'http://dns.aizhan.com/%s/%d/'
page = 1
while True:
if page > 2:
time.sleep(1) #防止拒绝访问
req = requests.get(url % (host , page) ,headers=headers ,verify=False)
try:
html = req.content.decode('utf-8') #取得页面
if req.status_code == 400:
break
except Exception as e:
print str(e)
pass
for site in re.findall(regexp , html):
ip2hosts.append("http://"+site)
if re.search(regexp_next % (page+1 , page+1) , html) is None:
return ip2hosts
break
page += 1
return ip2hosts
def chinaz(host):
ip2hosts = []
ip2hosts.append("http://"+host)
regexp = r'''<a href='[^']+?([^']+?)' target=_blank>\1</a>'''
regexp_next = r'''<a href="javascript:" val="%d" class="item[^"]*?">%d</a>'''
url = 'http://s.tool.chinaz.com/same?s=%s&page=%d'
page = 1
while True:
if page > 1:
time.sleep(1) #防止拒绝访问
req = requests.get(url % (host , page) , headers=headers ,verify=False)
html = req.content.decode('utf-8') #取得页面
for site in re.findall(regexp , html):
ip2hosts.append("http://"+site)
if re.search(regexp_next % (page+1 , page+1) , html) is None:
return ip2hosts
break
page += 1
return ip2hosts
def same_ip(host):
mydomains = []
mydomains.extend(ip2host_get(host))
mydomains.extend(links_ip(host))
mydomains.extend(aizhan(host))
mydomains.extend(chinaz(host))
mydomains = list(set(mydomains))
p = Pool()
for host in mydomains:
p.apply_async(filter, args=(host,))
p.close()
p.join()
if __name__=="__main__":
if len(sys.argv) == 2:
same_ip(sys.argv[1])
else:
print ("usage: %s host" % sys.argv[0])
sys.exit(-1)
大家可以发挥添加或者修改任意查询接口。
网站渗透常用Python小脚本same_host旁站查询
免责声明: 本站提供的资源,都来自网络,版权争议与本站无关,所有内容及软件的文章仅限用于学习和研究目的。不得将上述内容用于商业或者非法用途,否则,一切后果请用户自负,我们不保证内容的长久可用性,通过使用本站内容随之而来的风险与本站无关,您必须在下载后的24个小时之内,从您的电脑/手机中彻底删除上述内容。如果您喜欢该程序,请支持正版软件,购买注册。如有侵权删除请致信E-mail:Heashion@163.com