其他
python_mmdt:ssdeep、tlsh、vhash、mmdthash对比
本文为看雪论坛优秀文章
看雪论坛作者ID:大大薇薇
前文回顾
python_mmdt:一种基于敏感哈希生成特征向量的python库(一)
(https://bbs.pediy.com/thread-265211.htm)
我们介绍了一种叫mmdthash(敏感哈希)生成方法,并对其中的概念做了基本介绍。
python_mmdt:从0到1--实现简单恶意代码分类器(二)
(https://bbs.pediy.com/thread-265499.htm)
我们介绍了基于mmdthash的一种简单恶意代码分类器应用。
python_mmdt:从1到2--实现基于KNN的机器学习恶意代码分类器(三)
(https://bbs.pediy.com/thread-265860.htm)
我们介绍基于mmdthash的机器学习恶意代码分类器应用。
python_mmdt:mmdthash的在线使用(四)
(https://bbs.pediy.com/thread-271243.htm)
我们介绍如何使用mmdthash实现在线恶意文件检测。
python_mmdt:KNN机器学习分类结果测试分析(五)
(https://bbs.pediy.com/thread-271265.htm)
我们对基于KNN机器学习算法的分类结果进行统计测试,评估分类模型。
本篇,我们对ssdeep、tlsh、vhash、mmdthash这四类敏感哈希算法效果进行对比。
项目地址
github代码地址:python_mmdt(https://github.com/a232319779/python_mmdt)
对比结论
准确率ACC:tlsh > mmdthash > ssdeep > vhash
召回率REC:tlsh > mmdthash > ssdeep > vhash
精确率PRE:mmdthash = ssdeep = vhash > tlsh
敏感哈希介绍
对比思路
对比过程
1. ssdeep计算
# -*- coding: utf-8 -*-
import os
import sys
import hashlib
import json
import ssdeep
# 遍历目录
def list_dir(root_dir):
files = os.listdir(root_dir)
for f in files:
file_path = os.path.join(root_dir, f)
yield file_path
# 生成sha1
def gen_sha1(file_name):
with open(file_name, 'rb') as f:
s = f.read()
_s = hashlib.sha1()
_s.update(s)
return _s.hexdigest()
def main():
# 输入785个文件的路径
file_path = sys.argv[1]
ssdeep_dict = dict()
for file_name in list_dir(file_path):
file_sha1 = gen_sha1(file_name)
ssdeep_hash = ssdeep.hash_from_file(file_name)
print('%s,%s' % (file_sha1, ssdeep_hash))
ssdeep_dict[file_sha1] = ssdeep_hash
# 使用json文件保存结果
with open('ssdeep_test.json', 'w') as f:
f.write(json.dumps(ssdeep_dict, indent=4))
if __name__ == '__main__':
main()
cat ssdeep_test.json
{
"0ec279513e9e8a0e8f6e7c170b9462b60d9888c6": "6144:w9qaZ5E6fCvH5H42SUiTV2MTb54y94HTFboTWhmzeOws:w9d96yeKV2MTb5X4zZQWhmqd",
"0ad6db9128353742b3d4c8a5fc1993ca8bf399f1": "1536:NxiIXeGNc0BL0IFx34bPMkG/KsrKlEqjjPWUJ7h/dbZkv13t43O:eIXeGNtV0KIQjr5ehlbSv13t43O",
"e3dc592a0fa552beb35ebcb4160e5e4cb4686f17": "1536:qKXppRU0D2KmMESllkQSp5jcUyT/jAdp/hsonBqar5mVNCG:JpGjKm9fQSp5sjAfAa1mVMG",
"c8e1100b1e38e5c5e671a23cd49d98e315b74a36": "3072:XwZcFNCpegr+L3Y5D+LRohyOBGbNc8GMmE/A9VpGLGWtQeGwX1gnuZPZc2:XHCNEY5D+LfOi3GbE/AsAeGwXwc5",
"0ae0cba5b411541cc8d9f94e01151fec9d6b9242": "384:enXKs1aOcWkZ1WgoELXuf9OO5GD+IGA4p1XMWfg7CF:enp1aOasDOOM+ut",
......
}
2. tlsh计算
# -*- coding: utf-8 -*-
import os
import sys
import hashlib
import json
import tlsh
# 遍历目录
def list_dir(root_dir):
files = os.listdir(root_dir)
for f in files:
file_path = os.path.join(root_dir, f)
yield file_path
# 生成sha1
def gen_sha1(file_name):
with open(file_name, 'rb') as f:
s = f.read()
_s = hashlib.sha1()
_s.update(s)
return _s.hexdigest()
def gen_tlsh(file_name):
with open(file_name, 'rb') as f:
s = f.read()
_s = tlsh.hash(s)
return _s
def main():
# 输入785个文件的路径
file_path = sys.argv[1]
tlsh_dict = dict()
for file_name in list_dir(file_path):
file_sha1 = gen_sha1(file_name)
tlsh_hash = gen_tlsh(file_name)
print('%s,%s' % (file_sha1, tlsh_hash))
tlsh_dict[file_sha1] = tlsh_hash
with open('tlsh_test.json', 'w') as f:
f.write(json.dumps(tlsh_dict, indent=4))
if __name__ == '__main__':
main()
cat tlsh_test.json
{
"0ec279513e9e8a0e8f6e7c170b9462b60d9888c6": "T1616423D5248C5DF8E251CCF4C73AB60493EADA48BF516B75BDD9C2692FF2480C93A214",
"0ad6db9128353742b3d4c8a5fc1993ca8bf399f1": "T13D73024483EBEDA8EE040AB0124C43B9CBAD8D1B7659653DFD3864D1FC064AE47269A6",
"e3dc592a0fa552beb35ebcb4160e5e4cb4686f17": "T1CF93293D766924E5E139C17CC5474E0AF772B025071227EF06A4C2BE1F97BE06C39AA5",
"c8e1100b1e38e5c5e671a23cd49d98e315b74a36": "T17F34391A57EC0465F1B7923589B34919F233B8625731E2DF109082BC2E27FD8BE36B56",
"0ae0cba5b411541cc8d9f94e01151fec9d6b9242": "T12D5208C71F69F7D4C19F85F84A3B623E1EA4616A6111412057DD3E92BC1C3DBFA2A09C",
......
}
3. vhash计算
# -*- coding: utf-8 -*-
import sys
import json
import requests
from time import sleep
# virustotal api key
x_apikey = 'xxxx'
def read_hash(file_name):
with open(file_name, 'r') as f:
datas = f.readlines()
return [file_hash.strip() for file_hash in datas]
def parse_vt_report(vt_report_json):
attributes = vt_report_json.get('data', {}).get('attributes', {})
parse_data = dict()
if attributes:
# 同时记录文件的ssdeep/tlsh/vhash/文件类型
parse_data['vhash'] = attributes.get('vhash', '')
parse_data['magic'] = attributes.get('magic', '')
parse_data['tlsh'] = attributes.get('tlsh', '')
parse_data['ssdeep'] = attributes.get('ssdeep', '')
return parse_data
def vt_search(sha1_hash):
url = "https://www.virustotal.com/api/v3/files/{}".format(sha1_hash)
headers = {
"Accept": "application/json",
"x-apikey": x_apikey
}
response = requests.request("GET", url, headers=headers)
try:
parse_data = parse_vt_report(response.json())
except Exception as e:
print('error: %s, reason: %s' % (sha1_hash, str(e)))
return parse_data
def main():
# 包含待查询哈希的文件路径
file_path = sys.argv[1]
vhash_dict = dict()
file_hashs = read_hash(file_path)
for file_hash in file_hashs:
parse_data = vt_search(file_hash)
print('%s,%s' % (file_hash, json.dumps(parse_data)))
if parse_data:
vhash_dict[file_hash] = parse_data
else:
break
sleep(1)
with open('vhash_test.json', 'w') as f:
f.write(json.dumps(vhash_dict, indent=4))
if __name__ == '__main__':
main()
cat vhash_test.json
{
"aba1301af627506cf67fd61410800b37c973dcb6": {
"vhash": "1240451d05151\"z",
"magic": "PE32+ executable for MS Windows (DLL) (console)",
"tlsh": "T151B22A828BB81403FA767D7013A8D6837D3D67D60820856915AAF5AA2C833C5EF10F7E",
"ssdeep": "192:8fPNlWZYWfUyfUlHDBQABJB3ejpC52qnaj68tj:iNlWZYW+DBRJ4Nle8tj"
},
"5f3ebf2c443f7010d3a5c2e5fa77c62b03ca1279": {
"vhash": "1240451d05151\"z",
"magic": "PE32+ executable for MS Windows (DLL) (console)",
"tlsh": "T140B239D6CBBC0547E9663EB012A8E9873D3E73EB4820416905A5F1981C837C5EF00F6E",
"ssdeep": "192:8Ih6WxwWFUyfUlHDBQABJj1N80Hy5qnajWi8sA+F:Vh6WxwW0DBRJjPsl+yF"
},
"3d57ce2f5149f1d9609608bc732d86637fe20cce": {
"vhash": "1240451d05151\"z",
"magic": "PE32+ executable for MS Windows (DLL) (console)",
"tlsh": "T18FB23AC2CBEC5443EAA67A7043A8E58B7D3DB3D21C60855904A6E1591CD33C2EF24E7E",
"ssdeep": "192:8JWhOMrlWBwWYUyfUlHDBQABJ5cWvKxEHsqnajTT0f7:kWhOMRWBwWhDBRJNKxUsl3TM"
},
......
}
4. mmdthash计算
结果对比
1. ssdeep、tlsh、vhash、mmdthash结果整合
{
"aba1301af627506cf67fd61410800b37c973dcb6": {
"vhash": "1240451d05151\"z",
"magic": "PE32+ executable for MS Windows (DLL) (console)",
"tlsh": "T151B22A828BB81403FA767D7013A8D6837D3D67D60820856915AAF5AA2C833C5EF10F7E",
"ssdeep": "192:8fPNlWZYWfUyfUlHDBQABJB3ejpC52qnaj68tj:iNlWZYW+DBRJ4Nle8tj",
"mmdthash": "07022B59:7202890402200212DA032EC310AFEF8A"
},
"5f3ebf2c443f7010d3a5c2e5fa77c62b03ca1279": {
"vhash": "1240451d05151\"z",
"magic": "PE32+ executable for MS Windows (DLL) (console)",
"tlsh": "T140B239D6CBBC0547E9663EB012A8E9873D3E73EB4820416905A5F1981C837C5EF00F6E",
"ssdeep": "192:8Ih6WxwWFUyfUlHDBQABJj1N80Hy5qnajWi8sA+F:Vh6WxwW0DBRJjPsl+yF",
"mmdthash": "07022B59:7102870402200212DD032DC30EA0F1A9"
},
......
}
2. 敏感哈希相似度计算
# -*- coding: utf-8 -*-
import json
import ssdeep
import tlsh
def read_hash(file_name):
with open(file_name, 'r') as f:
datas = f.readlines()
return [file_hash.strip() for file_hash in datas]
def ssdeep_compare(data1, data2):
h1 = data1.get('ssdeep', '')
h2 = data2.get('ssdeep', '')
score = ssdeep.compare(h1, h2)
return score/100.0
def tlsh_compare(data1, data2):
h1 = data1.get('tlsh', '')
h2 = data2.get('tlsh', '')
score = tlsh.diff(h1, h2)
return 1 - score/1160.0
def vhash_compare(data1, data2):
h1 = data1.get('tlsh', '')
h2 = data2.get('tlsh', '')
score = 1.0 if h1 == h2 else 0.0
return score
def main():
mmdt_hash_sim = read_hash('./mmdt_sim.csv')
with open('./ssdeep_tlsh_vhash_mmdthash_test.json', 'r') as f:
vhash_json = json.loads(f.read())
print('原始文件,相似文件,mmdt相似度,ssdeep相似度,tlsh相似度,vhash相似度,原始文件类型,相似文件类型')
for mhs in mmdt_hash_sim:
tmp = mhs.split(',')
ori_hash = tmp[0]
sim_hash = tmp[1]
mmdt_sim = float(tmp[2])
ori_data = vhash_json[ori_hash]
sim_data = vhash_json[sim_hash]
ssdeep_sim = ssdeep_compare(ori_data, sim_data)
tlsh_sim = tlsh_compare(ori_data, sim_data)
vhash_sim = vhash_compare(ori_data, sim_data)
ori_type = ori_data.get('magic', '').split(' ')[0]
sim_type = sim_data.get('magic', '').split(' ')[0]
print('%s,%s,%.3f,%.3f,%.3f,%.3f,%s,%s' % (
ori_hash,sim_hash,mmdt_sim,ssdeep_sim,tlsh_sim,vhash_sim,ori_type,sim_type
))
if __name__ == '__main__':
main()
3. 结果分析
tlsh的准确率、召回率、误报率最高(误报率 = 1.0 - 准确率)
mmdthash的准确率和召回率次之,准确率比tlsh低1.5%,召回率比tlsh低9.0%,误报率比tlsh低5.5%
ssdeep的准确率和召回率再次之,准确率比tlsh低6.8%,召回率比tlsh低21.4%,误报率比tlsh低5.5%
vhash情况最特殊,数据做观察对比即可
其他
1. 400个测试文件类型分布
PE文件占比96%
ELF文件占比2%
其他文件占比2%
2. 400个测试文件计算耗时
反思与收获
① ssdeep的论文写的真不错,通俗易懂,论证过程清晰,数学理论支撑非常充足,非常棒。
② tlsh的生态做的真的好,已经出了6篇相关的paper,参与了6次会议,足以证明其应用效果。
③ 在实现mmdthash的过程中,过于闭门造车,有很多不成熟的想法、思路在tlsh相关的paper里都讲述的很清楚、很详细,应用也非常成熟了。
④ mmdthash的性能还要持续优化。
看雪ID:大大薇薇
https://bbs.pediy.com/user-home-467421.htm
# 往期推荐
2.Windows本地代码执行漏洞(CVE-2012-1876)x86/x64平台分析
5.CVE-2021-4034 pkexec本地提权漏洞复现与原理分析
球分享
球点赞
球在看
点击“阅读原文”,了解更多!