pax_global_header00006660000000000000000000000064127507760540014527gustar00rootroot0000000000000052 comment=edb027c060168c93b03407473dfb91b32cde7d68 Pompem-0.2.0/000077500000000000000000000000001275077605400127635ustar00rootroot00000000000000Pompem-0.2.0/.gitignore000077500000000000000000000000261275077605400147540ustar00rootroot00000000000000bin include lib *.pyc Pompem-0.2.0/README.markdown000077500000000000000000000045261275077605400154760ustar00rootroot00000000000000# Pompem - Exploit and Vulnerability Finder Pompem is an open source tool, designed to automate the search for Exploits and Vulnerability in the most important databases. Developed in Python, has a system of advanced search, that help the work of pentesters and ethical hackers. In the current version, performs searches in PacketStorm security, CXSecurity, ZeroDay, Vulners, National Vulnerability Database, WPScan Vulnerability Database ... ## Screenshots ![](http://i.imgur.com/lhBRLhl.png) ![](http://i.imgur.com/taqkdtT.png) ![](http://i.imgur.com/uNyqNF0.png) ## Installation You can download the latest tarball by clicking [here](https://github.com/rfunix/Pompem/tarball/master) or latest zipball by clicking [here](https://github.com/rfunix/Pompem/zipball/master). You can download pompem by cloning the [Git](https://github.com/rfunix/Pompem) repository: ``` git clone https://github.com/rfunix/Pompem.git ``` Pompem works out of the box with [Python](http://www.python.org/download/) version '''3.5.x''' on any platform. Pompem lib uses the following setup: * [Requests](http://docs.python-requests.org/en/latest/) You can use virtualenv too: 1. Create virtual env -> ```virtualenv -p python3 .env``` 2. Activate virtualenv -> ```source .env/bin/activate``` 3. Install dependĂȘncies -> ```pip install -r requirements.txt``` > If you use pip and have not vitualenv use 'sudo pip install virtualenv' or [virtualenv website](http://www.virtualenv.org/en/latest/). ## Usage To get the list of basic options and information about the project: ```bash python pompem.py -h ``` Examples of use: python pompem.py -s Wordpress python pompem.py -s Joomla --html python pompem.py -s "Internet Explorer,joomla,wordpress" --html python pompem.py -s FortiGate --txt python pompem.py -s ssh,ftp,mysql ## License This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. Pompem is free software, keeping the picture can USE AND ABUSE Pompem-0.2.0/common/000077500000000000000000000000001275077605400142535ustar00rootroot00000000000000Pompem-0.2.0/common/__init__.py000066400000000000000000000000001275077605400163520ustar00rootroot00000000000000Pompem-0.2.0/common/html_page/000077500000000000000000000000001275077605400162135ustar00rootroot00000000000000Pompem-0.2.0/common/html_page/base.html000066400000000000000000000016571275077605400200240ustar00rootroot00000000000000 Pompem - Exploit Finder


   __________
   \______   \____   _____ ______   ____   _____
    |     ___/  _ \ /     \\____ \_/ __ \ /     \
    |    |  (  <_> )  Y Y  \  |_> >  ___/|  Y Y  \
    |____|   \____/|__|_|  /   __/ \___  >__|_|  /
                         \/|__|        \/      \/
                    

{list_dict_result}

Pompem-0.2.0/common/html_page/style.css000066400000000000000000000002401275077605400200610ustar00rootroot00000000000000h1 { position:absolute; left:23%; top:2%; margin-left:-110px; margin-top:-40px; } #url_download:hover { background-color: green; } Pompem-0.2.0/common/print_messages.py000066400000000000000000000040461275077605400176540ustar00rootroot00000000000000#!/usr/bin/python # -*- coding: utf-8 -*- HELP_MESSAGE = """ Options: -h, --help show this help message and exit -s, --search text for search --txt Write txt File --html Write html File --update upgrade to latest version -g, --get Download exploit files """ BASIC_INFO_MESSAGE = """ __________ \______ \____ _____ ______ ____ _____ | ___/ _ \ / \\____ \_/ __ \ / \\ | | ( <_> ) Y Y \ |_> > ___/| Y Y \\ |____| \____/|__|_| / __/ \___ >__|_| / \/|__| \/ \/ Rafael Francischini (Programmer and Ethical Hacker) - @rfunix Bruno Fraga (Security Researcher) - @brunofraga_net Usage: pompem.py [-s/--search ] [--txt Write txt file ] [--html Write html file ] Get basic options and Help, use: -h\--help """ GENERATE_TXT_FILE = """ + Generate txt output file -> out.txt """ MAX_PRINT_PER_SITE = 30 def show_results(key_word, list_results): print ("+Results {0}".format(key_word)) print ("+" + "-" * 200 + "+") print ( "+Date Description Url ") print ("+" + "-" * 200 + "+") for dict_result in list_results: count_print = 0 for key , result in dict_result.items(): for exploit_data in result: if (count_print > MAX_PRINT_PER_SITE): break count_print += 1 print("+ {0} | {1} | {2} ".format(exploit_data["date"], str(exploit_data["name"])[0:50], exploit_data["url"])) print ("+" + "-" * 200 + "+") Pompem-0.2.0/common/writers.py000066400000000000000000000066011275077605400163270ustar00rootroot00000000000000# -*- coding: utf-8 -*- import os import sys import subprocess def write_html(dict_all_results): """ The write_html method read file base.html and stores in the variable ::html. In the file base.html we have two keys for usage later in my_string.format(). These two keys are: word_search and list_dict_results. We use the method __helper_write_html() for get data for these keys. The file base.html can't internal style sheet because the your syntax uses the { and } characters, making the location of the keys mentioned above by the method format() string type. For this reason the internal style sheets were modified for the inline style sheet on html. """ html = ''.join(open("common/html_page/base.html", "r").readlines()) data = __helper_write_html(dict_all_results) final_html = html.format(word_search=data["word_search"], list_dict_result=data["table_rows"]) with open(r"out.html", "w") as f: f.write(final_html) def __helper_write_html(iterable_data): data_result = {} table_rows = "" for word_search, list_results in iterable_data.items(): table_rows += r"""

Results for search: {0}


""".format(word_search) for dict_result in list_results: for key, result in dict_result.items(): for exploit_data in result: data_result["word_search"] = word_search table_rows += r""" """.format(exploit_data["date"], str(exploit_data["name"]), exploit_data["url"], ) table_rows += """
Date Description Url
{0} {1} {2}
""" data_result["table_rows"] = table_rows return data_result def open_url(url): if sys.platform == 'win32': os.startfile(url) elif sys.platform == 'darwin': subprocess.Popen(['open', url]) else: try: subprocess.Popen(['xdg-open', url]) except OSError: print ('Please open a browser on: ' + url) def write_txt(dict_all_results): """ Write result in file out.txt for better viewing. """ with open("out.txt", "w") as f: f.write("date;description;url\n") for word_search, list_results in dict_all_results.items(): for dict_result in list_results: for key, result in dict_result.items(): for exploit_data in result: f.write("{0};{1};{2}\n".format( exploit_data["date"], str(exploit_data["name"]), exploit_data["url"]) ) Pompem-0.2.0/core/000077500000000000000000000000001275077605400137135ustar00rootroot00000000000000Pompem-0.2.0/core/__init__.py000066400000000000000000000000001275077605400160120ustar00rootroot00000000000000Pompem-0.2.0/core/exploit_finder.py000066400000000000000000000030241275077605400172770ustar00rootroot00000000000000#!/usr/bin/python # -*- coding: utf-8 -*- NUM_WORKERS = 5 from core.scrapers import PacketStorm, CXSecurity, ZeroDay, Vulners, \ NationaVulnerabilityDB, WpvulndbB from common.print_messages import show_results from common.writers import write_html, write_txt from common.writers import open_url from common.print_messages import GENERATE_TXT_FILE class ExploitFinder(object): def __init__(self, args=None): self.parameters = args self.key_words = self.parameters.keywords.split(',') self.list_scrapers = [PacketStorm, CXSecurity, ZeroDay, Vulners, NationaVulnerabilityDB, WpvulndbB ] def run(self, ): all_data = {} for word_search in self.key_words: l_result = [] l_threads = [] for scraper_class in self.list_scrapers: scraper_instance = scraper_class(word_search) scraper_instance.start() l_threads.append(scraper_instance) [l_result.append( {'{0}'.format(th.name_class): th.join()}) for th in l_threads] all_data[word_search] = l_result show_results(word_search, l_result) if self.parameters.html_out: write_html(all_data) open_url('out.html') if self.parameters.txt_out: write_txt(all_data) print(GENERATE_TXT_FILE) Pompem-0.2.0/core/request_worker.py000066400000000000000000000037371275077605400173600ustar00rootroot00000000000000# -*- coding: utf-8 -*- from threading import Thread import requests from requests.packages.urllib3.exceptions import InsecureRequestWarning import http.client import json requests.packages.urllib3.disable_warnings(InsecureRequestWarning) class RequestWorker(Thread): def __init__(self, url, data=None, session_url=None): Thread.__init__(self) self._url = url self._html = None self._data = data self._session_url = session_url self._headers = { 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X x.y; rv:10.0) Gecko/20100101 Firefox/10.0' } if self._session_url: self._session = requests.session() self._request_session = self._session.post(self._session_url, data=self._data, headers=self._headers) self._response_sesssion = self._request_session.text def run(self): if self._session_url: req = self._session.get(self._url, verify=False, headers=self._headers) else: req = requests.get(self._url, verify=False, headers=self._headers) req.decode = 'utf-8' self._html = req.text def join(self): Thread.join(self) return self._html class RequestWorkerHttpLib(Thread): def __init__(self, domain, path, data={}, type_req="POST"): Thread.__init__(self) self._domain = domain self._path = path self._headers = { 'content-type': "application/json", 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X x.y; rv:10.0) Gecko/20100101 Firefox/10.0' } self._data = json.dumps(data) self._type_req = type_req def run(self): conn = http.client.HTTPSConnection(self._domain) conn.request(self._type_req, self._path, self._data, self._headers) res = conn.getresponse() data = res.read() self._html = data.decode("utf-8") def join(self): Thread.join(self) return self._html Pompem-0.2.0/core/scrapers.py000066400000000000000000000260001275077605400161050ustar00rootroot00000000000000#!/usr/bin/python # -*- coding: utf-8 -*- from threading import Thread import datetime import re from core.request_worker import RequestWorker, RequestWorkerHttpLib import json class Scraper(Thread): def __init__(self): Thread.__init__(self) self.list_result = None self.list_req_workers = [] def _parser(self): raise NotImplementedError() def join(self): Thread.join(self) return self.list_result def _get_results(self): for r_worker in self.list_req_workers: try: html = r_worker.join() self._parser(html) except Exception as e: import traceback traceback.print_exc() class PacketStorm(Scraper): def __init__(self, key_word): Scraper.__init__(self) self.name_site = "Packet Storm Security" self.name_class = PacketStorm.__name__ self.base_url = "https://packetstormsecurity.com" self.key_word = key_word self.url = "https://packetstormsecurity.com/search/files/page{0}/?q={1}" self.page_max = 2 self.list_result = [] self.regex_item = re.compile(r'(?ms)(
)') self.regex_url = re.compile(r'href="(/files/\d+?\/[^"]*?)"') self.regex_date = re.compile(r'href="/files/date/(\d{4}-\d{2}-\d{2})') self.regex_name = re.compile(r'href="/files/\d+?\/[^"]*?".*?title.*?>([^<]*?)<') def run(self, ): for page in range(self.page_max): try: url_search = self.url.format(page + 1, self.key_word) req_worker = RequestWorker(url_search) req_worker.start() self.list_req_workers.append(req_worker) except Exception as e: import traceback traceback.print_exc() self._get_results() def _parser(self, html): for item in self.regex_item.finditer(html): item_html = item.group(0) dict_result = {} url_exploit = "{0}{1}".format( self.base_url, self.regex_url.search(item_html).group(1) ) dict_result['url'] = url_exploit dict_result['date'] = self.regex_date.search(item_html).group(1) dict_result['name'] = self.regex_name.search(item_html).group(1) self.list_result.append(dict_result) class CXSecurity(Scraper): def __init__(self, key_word): Scraper.__init__(self) self.name_site = "CXSecurity" self.name_class = CXSecurity.__name__ self.key_word = key_word self.url = "https://cxsecurity.com/search/wlb/DESC/AND/{0}.1999.1.1/{1}/10/{3}/" self.page_max = 2 self.list_result = [] self.regex_item = re.compile(r'(?msi).*?') self.regex_url = re.compile(r'(?msi)(\d{2})\.(\d{2})\.(\d{4})') self.regex_name = re.compile(r'(?msi)title="([^"]*?)"') def run(self, ): now_date = '{dt.year}.{dt.month}.{dt.day}'.format( dt=datetime.datetime.now() ) for page in range(self.page_max): try: url_search = self.url.format(now_date, page + 1, self.page_max, self.key_word) req_worker = RequestWorker(url_search) req_worker.start() self.list_req_workers.append(req_worker) except Exception as e: import traceback traceback.print_exc() self._get_results() def _parser(self, html): for item in self.regex_item.finditer(html): item_html = item.group(0) dict_result = {} url_exploit = self.regex_url.search(item_html).group(1) dict_result['url'] = url_exploit match_date = self.regex_date.search(item_html) date = "{0}-{1}-{2}".format(match_date.group(3), match_date.group(2), match_date.group(1) ) dict_result['date'] = date dict_result['name'] = self.regex_name.search(item_html).group(1) self.list_result.append(dict_result) class ZeroDay(Scraper): def __init__(self, key_word): Scraper.__init__(self) self.name_site = "ZeroDay" self.name_class = ZeroDay.__name__ self.key_word = key_word self.url = "https://j5dtyooqyukedkrl.onion.to/search?search_request={0}" self.session_url = "https://j5dtyooqyukedkrl.onion.to" self.base_url = "https://j5dtyooqyukedkrl.onion.to" self.list_result = [] self.regex_item = re.compile(r"(?msi)
") self.regex_date = re.compile(r"(?msi)href='/date.*?>(\d{2})-(\d{2})-(\d{4})") self.regex_url = re.compile(r"(?msi)href='(/exploit.*?)'") self.regex_name = re.compile(r"(?msi)href='/exploit.*?'>([^<]*?)<") def run(self, ): try: url_search = self.url.format(self.key_word) req_worker = RequestWorker(url=url_search, data={'agree': 'Yes%2C+I+agree'}, session_url=self.session_url) req_worker.start() self.list_req_workers.append(req_worker) except Exception as e: import traceback traceback.print_exc() self._get_results() def _parser(self, html): for item in self.regex_item.finditer(html): item_html = item.group(0) dict_result = {} dict_result['url'] = self.base_url + self.regex_url.search(item_html).group(1) match_date = self.regex_date.search(item_html) date = "{0}-{1}-{2}".format(match_date.group(3), match_date.group(2), match_date.group(1) ) dict_result['date'] = date dict_result['name'] = self.regex_name.search(item_html).group(1) self.list_result.append(dict_result) class Vulners(Scraper): def __init__(self, key_word): Scraper.__init__(self) self.name_site = "Vulners" self.name_class = Vulners.__name__ self.key_word = key_word self.url_domain = "vulners.com" self.path = "/api/v3/search/lucene/" self.list_result = [] self.regex_date = re.compile(r"(\d{4})-(\d{2})-(\d{2})") def run(self, ): try: data = {} data['query'] = "{0} last year".format(self.key_word) req_worker = RequestWorkerHttpLib(self.url_domain, self.path, data) req_worker.start() self.list_req_workers.append(req_worker) except Exception as e: import traceback traceback.print_exc() self._get_results() def _parser(self, html): json_data = json.loads(html) for data in json_data['data']['search']: dict_result = {} dict_result['url'] = data["_source"]['href'] dict_result['name'] = data["_source"]["title"] dict_result['date'] = self.regex_date.search(data["_source"]["published"]).group(0) self.list_result.append(dict_result) class NationaVulnerabilityDB(Scraper): def __init__(self, key_word): Scraper.__init__(self) self.name_site = "NationaVulnerabilityDB" self.name_class = NationaVulnerabilityDB.__name__ self.key_word = key_word self.url = "https://web.nvd.nist.gov/view/vuln/search-results?query={0}&search_type=all&cves=on&startIndex={1}" self.base_url = 'https://web.nvd.nist.gov/view/vuln/' self.page_max = 60 self.list_result = [] self.regex_item = re.compile(r'(?msi)
.*?a href="detail.*?') self.regex_name = re.compile(r'(?msi)
.*?Summary:.*?>([^<]*?)<') self.regex_date = re.compile(r'(?msi)
.*?Summary:.*?>.*?Published:.*?>.*?(\d{1,2})\/(\d{1,2})\/(\d{4})') self.regex_url = re.compile(r'(?msi)
.*?href="([^"]*?vulnId.*?)"') def run(self, ): for page in range(0,self.page_max+1,20): try: url_search = self.url.format( self.key_word, page ) req_worker = RequestWorker(url_search) req_worker.start() self.list_req_workers.append(req_worker) except Exception as e: import traceback traceback.print_exc() self._get_results() def _parser(self, html): for item in self.regex_item.finditer(html): item_html = item.group(0) dict_results = {} dict_results['name'] = self.regex_name.search(item_html).group(1) match_date = self.regex_date.search(item_html) date = "{0}-{1}-{2}".format(match_date.group(3), match_date.group(1), match_date.group(2) ) dict_results['date'] = date dict_results['url'] = self.base_url + self.regex_url.search(item_html).group(1) self.list_result.append(dict_results) class WpvulndbB(Scraper): def __init__(self, key_word): Scraper.__init__(self) self.name_site = "Wpvulndb" self.name_class = NationaVulnerabilityDB.__name__ self.key_word = key_word self.url = "https://wpvulndb.com/searches?page={1}&text={0}&utf8=%E2%9C%93&vuln_type=" self.url_base = "https://wpvulndb.com" self.page_max = 2 self.list_result = [] self.regex_item = re.compile(r'(?msi).*?.*?') self.regex_name = re.compile(r'(?msi)\d+?<.*?href.*?>([^<]*?)<') self.regex_date = re.compile(r'(?msi)created-at">([^<]*?)<') self.regex_url = re.compile(r'(?msi)\d+?<') def run(self, ): for page in range(self.page_max+1): try: url_search = self.url.format( self.key_word, page ) req_worker = RequestWorker(url_search) req_worker.start() self.list_req_workers.append(req_worker) except Exception as e: import traceback traceback.print_exc() self._get_results() def _parser(self, html): for item in self.regex_item.finditer(html): dict_results = {} item_html = item.group(0) url = self.url_base + self.regex_url.search(item_html).group(1) dict_results['url'] = url dict_results['name'] = self.regex_name.search(item_html).group(1) dict_results['date'] = self.regex_date.search(item_html).group(1) self.list_result.append(dict_results) Pompem-0.2.0/pompem.py000066400000000000000000000025451275077605400146400ustar00rootroot00000000000000#!/usr/bin/python # -*- coding: utf-8 -*- import optparse from core.exploit_finder import ExploitFinder from common.print_messages import HELP_MESSAGE, BASIC_INFO_MESSAGE def main(): parser = optparse.OptionParser(add_help_option=False) parser.add_option("-s", "--search", dest="keywords", type="string", help="text for search", ) parser.add_option("--txt", dest="txt_out", action="store_true", help="Generate txt output file", ) parser.add_option("--html", dest="html_out", action="store_true", help="Generate html output file", ) parser.add_option("--update", action="store_true", dest="update", help="upgrade to latest version") parser.add_option("-g", "--get", action="store_true", dest="get_exploit", help="Download Exploits") parser.add_option("-h", "--help", action="store_true", dest="help", help="-h") args = parser.parse_args() parameters = args[0] if not parameters.keywords: if parameters.help: print (HELP_MESSAGE) exit(0) else: print (BASIC_INFO_MESSAGE) exit(0) exploit_finder = ExploitFinder(parameters) exploit_finder.run() if __name__ == "__main__": main() Pompem-0.2.0/requirements.txt000077500000000000000000000000201275077605400162420ustar00rootroot00000000000000requests==2.9.1