spkproxy/ 0040775 0000765 0000765 00000000000 07671334650 011430 5 ustar dave dave spkproxy/daveutil.py 0100775 0000765 0000765 00000034631 07670116403 013617 0 ustar dave dave #daveutil.py
import os
import random
import string
import cPickle
from htmllib import HTMLParser
from formatter import DumbWriter, AbstractFormatter
from cStringIO import StringIO
#need this for body()
import spkproxy
#returns data read or "" if none
def readuntil(mysocket,mybreakpoint):
data=""
length=-len(mybreakpoint)
#print "length=%d"%length
while data[length:]!=mybreakpoint:
newdata=mysocket.recv(1)
data+=newdata
#print "data=%s"%(data[length:])
return data
def urlnormalize(url):
"""
/cow/../../../bob/bob2.php -> /bob/bob2.php
"""
#for win32 users
f=url.replace("\\","/")
if f[-1]=="/":
tailslash=1
else:
tailslash=0
dot=f.split("/")
while "." in dot:
dot=dot.remove(".")
while "" in dot:
dot.remove("")
while ".." in dot:
firstdotdot=dot.index("..")
#go one directory up
if firstdotdot==0:
dot.remove(dot[0])
continue
#get rid of parent directory
dot.remove(dot[firstdotdot-1])
#do this again to get rid of the ..
dot.remove(dot[firstdotdot-1])
fin="/".join(dot)+"/"*tailslash
if fin=="":
fin="/"
if fin[0]!="/":
fin="/"+fin
return fin
#stolen from http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/82465
def dmkdir(newdir):
"""works the way a good mkdir should :)
- already exists, silently complete
- regular file in the way, raise an exception
- parent directory(ies) does not exist, make them as well
"""
if os.path.isdir(newdir):
pass
elif os.path.isfile(newdir):
raise OSError("a file with the same name as the desired " \
"dir, '%s', already exists." % newdir)
else:
head, tail = os.path.split(newdir)
if head and not os.path.isdir(head):
dmkdir(head)
#print "_mkdir %s" % repr(newdir)
if tail:
os.mkdir(newdir)
#joins all the spaces in a string
def joinallspaces(input):
inputold=""
inputnew=input[:]
while inputold!=inputnew:
inputold=inputnew[:]
inputnew=inputnew.replace(" "," ")
return inputnew
def getrandomnumber():
return random.randrange(1,100000,1)
def pathjoin(*paths):
temp=""
for path in paths:
#print "Pathjoin "+path
if path!="":
if path[0]=="/" or path[0]=="\\":
#we are windoze compliant!
path=path[1:]
temp=os.path.join(temp,path)
#if the first was an absolute path...
if paths[0][0]=="/":
temp="/"+temp
#add that back
return temp
def pathsplit(path):
temp=path
last="tempval"
retList=[]
while last!="":
temp,last=os.path.split(temp)
if last!="":
retList=[last]+retList
return retList
#following inits are for prettyprint
norm = string.maketrans('', '') #builds list of all characters
non_alnum = string.translate(norm, norm, string.letters+string.digits)
trans_nontext=string.maketrans(non_alnum,'#'*len(non_alnum))
def prettyprint(data):
cleaned=string.translate(data,trans_nontext)
return cleaned
#openss a requestandresponse object in a file and
#obtains the url from the header
def getURLfromFile(file):
#load our request and response object
infile=open(file,"rb")
obj=cPickle.load(infile)
infile.close()
url=obj.clientheader.URL
return url
#takes a url like /bob/bob2/bob3/asdf.cgi
#and returns [bob,bob2,bob3,asdf.cgi]
def getDirsFromURL(url):
dirList=url.split("/")
#check for a file at the last one
if dirList[-1].count(".")>0:
dirList=dirList[:-1]
#now combine them up
start="/"
realDirList=[]
for dir in dirList:
start+=dir+"/"
start=start.replace("_directory_","")
start=start.replace("///","/")
start=start.replace("//","/")
realDirList.append(start)
return realDirList
#constructs a request given a header and optionally a body
def constructRequest(myheader,mybody=None):
#for null value
if (mybody==None):
mybody=spkproxy.body()
#debug
if 0:
return "GET / HTTP/1.1\r\nHost: www.immunitysec.com\r\nContent-Length: 0\r\n\r\n"
request=myheader.verb+" "+myheader.getProxyHeader()+myheader.URL
#if we have arguments
if myheader.useRawArguments:
if len(myheader.allURLargs) > 0:
request+="?"+myheader.allURLargs
else:
if len(myheader.URLargsDict) > 0:
request+="?"
request+=joinargs(myheader.URLargsDict,orderlist=myheader.orderlist)
request+=" "+myheader.version+"\r\n"
#ok, the first line is done!
#do the rest of the headers that need order
#I dunno if any except Host really need ordering, but I do it
#to erase any chance of lame bugs later on
#plus, python makes it quite easy
needOrdered=["Host","User-Agent","Accept","Accept-Language","Accept-Encoding","Accept-Charset","Keep-Alive","Connection","Pragma","Cache-Control"]
for avalue in needOrdered:
request+=myheader.grabHeader(avalue)
#now work on the header pairs we haven't already done
for akey in myheader.headerValuesDict.keys():
if akey not in needOrdered:
request+=myheader.grabHeader(akey)
#ok, headers are all done except for content-length
#Content-Length: 0 should always be valid, but it's
#not working for some reason on get requests!
if mybody.mysize!=0 or myheader.verb!="GET":
if not myheader.surpressContentLength():
request+="Content-Length: "+str(len(mybody.data))+"\r\n"
#ok, all headers are done, finish with blank line
request+="\r\n"
#ok, now add body
request+="".join(mybody.data)
#done!
return request
#takes in a dict, returls A=B&C=D,etc
def joinargs(argdict,orderlist=[]):
first=1
result=""
donelist=[]
for akey in orderlist:
donelist.append(akey)
if not first:
result+="&"
first=0
result+=akey+"="+argdict[akey]
for akey in argdict.keys():
if akey in donelist:
continue
if not first:
result+="&"
first=0
result+=akey+"="+argdict[akey]
return result
#returns None on error
#returns a dictionary of a string split like a normal HTTP argument list
def splitargs(argstring,orderlist=[]):
resultDict={}
templist=argstring.split("&")
for pair in templist:
if pair!="":
templist2=pair.split("=")
if len(templist2)<2:
#print "Failed to parse the URL arguments because of
#invalid number of equal signs in one argument in:
#\""+pair+"\" len="+str(len(templist2))
return None
else:
#add this argument to the Dict
orderlist.append(templist2[0])
resultDict[templist2[0]]="=".join(templist2[1:])
return resultDict
#turns a string into a one character list
def splitstring(astring):
alist=[]
for ch in astring:
alist.append(ch)
return alist
def printFormEntry(name,value):
result= name+": \n"
return result
#here is where you would add actions that you want to take when
#you have an argument or variable (on the rewrite request page)
def printFormEntryAndValue(name,key,value,file=""):
result=""
result+=name+": : "
result+=""
result+=" \n"
#if we have a file (e.g. we are an actual argument)
if file != "":
result+="Entry Actions: INJECTIONSCAN"
result+="\n"
result+="PASSWORD"
result+=" \n"
return result
def printFormCheckbox(name,checked):
result=""
result+=name+": \n"
return result
def printHiddenEntry(name,value):
result=""
result+="\n"
return result
#returns 1 if they are basically the same
#filters out Date: and whatnot
#this is basically to detect if we get different Cookies
#this is lame, but it should work ok
def headerdictcmp(dict1,dict2):
for akey in dict1.keys():
if akey=="Date":
continue
if not dict2.has_key(akey):
return 0
for bkey in dict1[akey]:
if not bkey in dict2[akey]:
return 0
return 1
#hashes a requestandresponse so we can do matches quickly
#returns a string as the hash
def genhash(clientheader,clientbody,serverheader,serverbody):
#print "in genhash"
CH=clientheader.genhash()
CB=clientbody.genhash()
SH=serverheader.genhash()
SB=serverbody.genhash()
return CH+CB+SH+SB
#hashes a string to a number, then returns that number as a string
def hashstring(astring):
i=0
#print "in hashstring"
if astring=="":
return ""
hashnum=0
l=len(astring)
while i")
if index2==-1:
continue
form=form[:index2]
if debug_daveformparse:
print "***Form Url is "+url
argsDict={}
inputList=form.split("
def collectURLSFromPage(page):
resultList=[]
#print "Doing form parser"
if page.count("
"
if len(cb.data)>0:
bodyargs=daveutil.splitargs("".join(obj.clientbody.data))
if bodyargs!=None:
for akey in bodyargs.keys():
result+=daveutil.printFormEntryAndValue("BodyArg"+str(i),akey,bodyargs[akey],requestfile)
i=i+1
#some extra body arguments if the user wants
for i in range(i,i+5,1):
result+=daveutil.printFormEntryAndValue("BodyArg"+str(i),"","")
result+=""
result+=""
result+=""
return result
#just a little default header thing
def addHeader(self,data):
result="HTTP/1.1 200 OK\r\n"
result+="Server: SPIKE Proxy 1.1\r\n"
result+="Content-Type: text/html\r\n"
result+="Content-Length: "+str(len(data))+"\r\n"
result+="\r\n"
result+=data
return result
#supports rewrite!
#sends the actual request to the remote server!
def sendrequest(self,myheader,mybody):
result=""
#new header and body to fill up
newh=spkproxy.header()
newb=spkproxy.body()
#now disassemble myheader
#debug
#keys=myheader.URLargsDict.keys()
#print "Keys: "+str(keys)
bodyDict=mybody.getArgsDict()
#print "BodyDict=%s"%str(bodyDict)
newh.URL=urllib.unquote_plus(bodyDict["URL"])
#print "newh.URL="+newh.URL
newh.verb=urllib.unquote_plus(bodyDict["Verb"])
newh.connectHost=urllib.unquote_plus(bodyDict["ConnectHost"])
newh.connectPort=urllib.unquote_plus(bodyDict["ConnectPort"])
newh.version="HTTP/1.1"
#checkbox, only exists if it is checked
newh.clientisSSL= bodyDict.has_key("SSL")
#handle each other
did=["SPIKE_TRIGGER","URL","Verb","ConnectHost","ConnectPort","SSL"]
firstbodyarg=1
for akey in bodyDict.keys():
#filter the ones we already did
if akey in did:
continue
#is it a header value?
if akey.count("Header"):
#names
if akey[-1]=="N":
valuename=akey[:-1]+"V"
headername=akey
else:
valuename=akey
headername=akey[:-1]+"N"
header=bodyDict[headername]
value=bodyDict[valuename]
#add this to the did list so we don't do it again
did.append(valuename)
did.append(headername)
if header!="":
if not newh.headerValuesDict.has_key(header):
newh.headerValuesDict[header]=[]
newh.headerValuesDict[header].append(urllib.unquote_plus(value))
if akey.count("URLArg"):
#names
if akey[-1]=="N":
valuename=akey[:-1]+"V"
argname=akey
else:
valuename=akey
argname=akey[:-1]+"N"
arg=urllib.quote_plus(bodyDict[argname])
value=bodyDict[valuename]
#add this to the did list so we don't do it again
did.append(valuename)
did.append(argname)
#store it
if arg!="":
newh.URLargsDict[arg]=urllib.unquote_plus(value)
if akey.count("Body"):
#names
if akey[-1]=="N":
valuename=akey[:-1]+"V"
argname=akey
else:
valuename=akey
argname=akey[:-1]+"N"
arg=urllib.quote_plus(bodyDict[argname])
value=bodyDict[valuename]
#add this to the did list so we don't do it again
did.append(valuename)
did.append(argname)
#storeit
#print "arg=%s argname=%s valuename=%s"%(arg,argname,valuename)
#print "Args:%s"%str(myheader.URLargsDict)
if arg!="":
if not firstbodyarg:
newb.data.append("&")
#we must put characters into the body, not strings
newstring=urllib.unquote_plus(arg)+"="+urllib.unquote_plus(value)
for ch in newstring:
newb.data.append(ch)
firstbodyarg=0
#ok, so now we have a new header and body (newh, newb)
#print "newbody=%s"%str(newb.data)
result=self.makeRequest(newh,newb)
return result
def saveInRequestCache(self,filename):
self.requestCache=[filename]+self.requestCache
#cut the last entry off if we're getting too big
if len(self.requestCache)==self.requestCacheMaxLength:
del self.requestCache[-1]
def log(self,loginfo):
timeoflog=time.asctime()
logstring= "[%s] : %s" % (timeoflog,loginfo)
#print it out to our running string
print logstring
self.logs=[logstring]+self.logs
if len(self.logs)==self.maxlogs:
del self.logs[-1]
#makes a request - doesn't fork off a new thread
#takes in a header and body
#does handle SSL
#returns a header and body from the server as a string
def makeRequest(self,newh,newb):
#we send ourselves in as the UI for our child request
myconnection=spkproxy.spkProxyConnection(None,self,proxy=self.proxy,ntlm=self.ntlm)
myconnection.clientisSSL=newh.clientisSSL
if newh.clientisSSL:
myconnection.sslHost=newh.connectHost
myconnection.sslPort=newh.connectPort
result=myconnection.sendRequest(newh,newb)
return result
#sets up the triggers for errors messages we detect
def setupTriggers(self):
self.scantriggers=[]
self.scantriggers.append(("ODBC","ODBC Error!"))
self.scantriggers.append(("Internal Server","Internal Server Error!"))
self.scantriggers.append(("SQLException","SQL Injection flaw on DB2!"))
self.scantriggers.append(("SQLSTATE","SQL Injection flaw on DB2!"))
self.scantriggers.append(("Volume in drive","dir.exe was spawned!"))
self.scantriggers.append(("Microsoft Windows 2000 [Version","cmd.exe was spawned!"))
self.scantriggers.append(("Internal Server Error","Internal Server Error was detected!"))
#returns a 1 if triggered on something
#BUGS: we only return one trigger at a time
def scanForTriggers(self,serverheader,serverbody):
retval=""
allbody="".join(serverbody.data)
for triggerstring,triggervalue in self.scantriggers:
if allbody.count(triggerstring):
retval=triggerstring
return retval
###XML TESTS
def doXMLTest(self,file):
#init code
self.XMLDirectoriesScanned=[]
self.XMLFilesScanned=[]
self.XMLSitesScanned=[]
self.xmlTest(file)
#fin code
self.log("Completely done with VulnXML Test on %s!"%file)
return "Done with VulnXML Tests on %s" % (file)
def xmlTest(self,file):
#if the file is a directory or site, recurse into it
#if the site-only has been clicked, only run site tests
realfile=daveutil.pathjoin(self.basedir,file)
#we have to use the directory to get the SITE because
#we do not necessarally have an actual request file
#get the first directory after the basedir
site=daveutil.pathsplit(realfile.replace(self.basedir,""))[0]
#split it up
siteList=site.split("_")
#get the site info
sitename=siteList[0]
siteport=siteList[1]
siteSSL=int(siteList[2])
self.runXMLSiteTests(sitename,siteport,siteSSL)
#here we handle directories transparently!
if os.path.isdir(realfile):
filelist=os.listdir(realfile)
for newfile in filelist:
realnewfile=daveutil.pathjoin(file,newfile)
self.log("Delving XML test into: "+realnewfile)
#self.log("XML SITES SCANNED %s"%str(self.XMLSitesScanned))
self.xmlTest(realnewfile)
return "Done with xmlTesting the %s directory!"%file
#we are a file - meaning we are an actual request
self.runXMLFileandVariableTests(file)
self.log("Finished xmlTest on %s"%file)
return "Done with xmlTest"
def runXMLSiteTests(self,sitename,siteport,siteSSL):
if "_".join([sitename,str(siteport),str(siteSSL)]) in self.XMLSitesScanned:
#self.log("MATCHED")
return
self.log("Doing XML Site Tests on %s %s %d"%(sitename,siteport,siteSSL))
time.sleep(15)
#self.log("XML SITES SCANNED %s"%str(self.XMLSitesScanned))
self.XMLSitesScanned.append("_".join([sitename,str(siteport),str(siteSSL)]))
#self.XMLSitesScanned.append("ASDF")
siteTestsDir=os.path.join(os.path.join(os.getcwd(),self.VulnXMLDirectory),self.VulnXMLSiteTestDirectory)
allSiteTests=os.listdir(siteTestsDir)
for sitetest in allSiteTests:
if sitetest[-4:]!=".xml":
continue
if self.stopallactions==1:
return "stopped"
#load the site test
siteTest=VulnXML.VulnXMLTest(file=daveutil.pathjoin(siteTestsDir,sitetest))
siteTest.setUI(self)
#run the site test against our site
results=siteTest.SiteRun((sitename,siteport,siteSSL))
#report the results
for message in results.logMessages:
self.log("%s"%message)
return
def runXMLDirectoryTests(self,directory,infile):
#check if we've done this one
if directory in self.XMLDirectoriesScanned:
return
self.log("runXMLDirectoryTests on %s"%directory)
self.XMLDirectoriesScanned.append(directory)
dirTestsDir=os.path.join(os.path.join(os.getcwd(),self.VulnXMLDirectory),self.VulnXMLDirectoryTestsDirectory)
allDirTests=os.listdir(dirTestsDir)
for dirtest in allDirTests:
if dirtest[-4:]!=".xml":
continue
if self.stopallactions==1:
return "stopped"
#load the site test
dirTest=VulnXML.VulnXMLTest(file=daveutil.pathjoin(dirTestsDir,dirtest))
dirTest.setUI(self)
#run the directory test against our site
#using infile as the template request
#print "Directory = %s"%directory
results=dirTest.DirRun(directory,daveutil.pathjoin(self.basedir,infile))
#report the results
for message in results.logMessages:
self.log("%s"%message)
return
def runXMLFileandVariableTests(self,infile):
self.log("RunXMLFileandVariableTests on %s"%infile)
url=daveutil.getURLfromFile(daveutil.pathjoin(self.basedir,infile))
dirs=daveutil.getDirsFromURL(url)
for dir in dirs:
self.runXMLDirectoryTests(dir,os.path.join(self.basedir,infile))
fileTestsDir=os.path.join(os.path.join(os.getcwd(),self.VulnXMLDirectory),self.VulnXMLFileTestsDirectory)
allFileTests=os.listdir(fileTestsDir)
for filetest in allFileTests:
if filetest[-4:]!=".xml":
continue
if self.stopallactions==1:
return "stopped"
ftest=VulnXML.VulnXMLTest(file=os.path.join(fileTestsDir,filetest))
ftest.setUI(self)
results=ftest.FileRun(daveutil.pathjoin(self.basedir,infile))
for message in results.logMessages:
self.log("%s"%message)
variableTestsDir=os.path.join(os.path.join(os.getcwd(),self.VulnXMLDirectory),self.VulnXMLVariableTestsDirectory)
allVariableTests=os.listdir(variableTestsDir)
for variabletest in allVariableTests:
if variabletest[-4:]!=".xml":
continue
if self.stopallactions==1:
return "stopped"
vtest=VulnXML.VulnXMLTest(file=os.path.join(variableTestsDir,variabletest))
vtest.setUI(self)
results=vtest.VariablesRun(os.path.join(self.basedir,infile))
for message in results.logMessages:
self.log("%s"%message)
return "Done with runXMLFileandVariableTests"
spkproxy/spkproxy.py 0100775 0000765 0000765 00000147667 07667706523 013735 0 ustar dave dave #!/usr/bin/python
#
#SPIKE Proxy file: spkproxy.py
#
#Usage: python spkproxy.py [port:8080]
###################################################################
#Requires:
#pyOpenSSL v 5.0 pre or >
#python 2.2
###################################################################
#Version 1.1
#Author: Dave Aitel (dave@immunitysec.com)
#License: GPL v 2.0
####################################################################
#Known Bugs:
#1.
#http://www.btinternet.com/~wildfire/reference/httpstatus/500.htm
#for some reason the header is not parsed correctly...possibly TheCounter/2.1
#web server does not use \r\n?
#2. Netscape toolbar (from cnn.com) is not quite right
#FIXED 3. BBC.co.uk news - needs :// joined to work
#####################################################################
#BEGIN IMPORTS
import socket
import sys
from threading import Thread
import string
import os
from OpenSSL import SSL
import getopt
sys.path.append('ntlm')
#need to import aps stuff so we can send NTLM to the remote system
#annoyingly, he named it utils
from ntlmutils import str2unicode
import ntlm_messages
import ntlm_procs
#threadsafe workaround for PyOpenSSL 1.5.1
#import OpenSSL.tsafe
#should use OpenSSL.tsafe.Connection instead of OpenSSL.SSL.Connection!
#but we can't cause it doesn't seem to work
#default UI, could add others.
import spikeProxyUI
import daveutil
import time
#time all sockets out at ten seconds
import timeoutsocket
timeoutsocket.setDefaultSocketTimeout(3)
import versioncheck
#END IMPORTS
#Begin Code!
VERSION="1.4.8"
default404stringlist=["Page Not Found"]
#### you change these to say what hosts and pages are ALLOWED. If they
#are not set, ALL are allowed
restrictedhosts=[]
restrictedpages=[]
denied1="ErrorYou are not allowed to visit that page during this test, sorry. Try unsetting your proxy temporarily."
deniedstring="HTTP/1.1 404 404 Access Denied !\r\nContent-Length: %d"%len(denied1)+"\r\n\r\n"+denied1
#Class myConnection is used to wrap sockets so we can have some basic
#abstraction over which ssl library we use, for example
#we basically wrap a few socket calls here
class MyConnection:
def __init__(self,conn):
self.doSSL=0
self.mysocket=conn
def recv(self,size):
#if self.doSSL:
# print "Reciving data as ssl!"
#print "Recieving %d bytes" % size
#if self.doSSL:
# print "Reading since we are SSL"
# return self.mysocket.read(size)
result=self.mysocket.recv(size)
#print "Returned from recv()"
return result
#reliable send over socket
def send(self,data):
sizetosend=len(data)
sentsize=0
while sentsize
def startSSLserver(self):
debug_ssl=0
dir = os.path.dirname(sys.argv[0])
if dir == '':
dir = os.curdir
self.mysocket.send("HTTP/1.1 200 Connection established\r\n\r\n")
ctx = SSL.Context(SSL.SSLv23_METHOD)
ctx.set_timeout(5)
ctx.set_verify(SSL.VERIFY_NONE, self.verify_cb) # Don't demand a certificate
try:
ctx.use_privatekey_file (os.path.join(dir, 'server.pkey'))
except:
print "Couldn't find file %s"%(os.path.join(dir, 'server.pkey'))
ctx.use_certificate_file(os.path.join(dir, 'server.cert'))
ctx.load_verify_locations(os.path.join(dir, 'CA.cert'))
#normally would be SSL.connection, but we want to be threadsafe
self.mysocket = SSL.Connection(ctx, self.mysocket)
#only works with pyOpenSSL 5.0pre or >
self.mysocket.set_accept_state()
if debug_ssl:
print "State="+self.mysocket.state_string()
#done automatically
#self.mysocket.do_handshake()
self.doSSL=1
if debug_ssl:
print "Now using SSL to talk to client"
#wraps socket.close
def close(self):
#print "calling connection.close"
self.mysocket.close()
return
#we read 0 and on any exception return 1
def gotclosed(self):
#print "Checking if we got closed"
try:
data=self.mysocket.send("")
except:
#print "CAUGHT EXCEPTION CHECKING IF WE WERE CLOSED"
return 1
return 0
###########################################################################
#class header is what we use to store request and reponse headers
class header:
def __init__(self):
self.clear()
#these two functions arn't necessary anymore
#we have to remove any sockets from our header...
#def __getstate__(self):
# odict = self.__dict__.copy() # copy the dict since we change it
# del odict['connection'] # remove filehandle entry
# return odict
#you need to sometimes set the connection's state into SSL
#def setConnection(self,conn):
# self.connection=conn
# return
#clears out the data structure - used for init
def clear(self):
self.data=[]
self.done=0
self.goodHeader=0
self.clientisSSL=0
self.verb=""
#for the first request, we see a CONNECT verb
self.sawCONNECT=0
self.firstline="" #sheesh
#1 if we are reading a response instead of a GET/POST, etc
self.responseHeader=0
self.wasChunked=0
#here is basically what we return from parsing the headers
self.URLargsDict={}
self.headerValuesDict={}
self.useSSL=0
self.connectHost=""
self.URL=""
self.sawsslinit=0
self.connectPort=0
self.mybodysize=0
self.useRawArguments=0
self.allURLargs=""
self.version=""
#set this to not send a content-length
self.doSurpressContentLength=0
#variables for server response headers
self.returncode=""
self.returnmessage=""
self.proxyHeader=""
self.orderlist=[]
return
def getProxyHeader(self):
return self.proxyHeader
#fixes the URL to not have a ? in it if it happens to
def normalize(self):
if self.URL.count("?")>0 and self.URLargsDict=={} and self.useRawArguments==0:
urlbit=self.URL[:]
#if we have a url as well
self.URL=urlbit.split("?")[0]
#if we have arguments too
if len(urlbit.split("?"))>1:
self.allURLargs="?".join(urlbit.split("?")[1:])
#print "SELF.allURLARGS=%s"%self.allURLargs
#print "SELF.URL=%s"%self.URL
self.URLargsDict=daveutil.splitargs(self.allURLargs,orderlist=self.orderlist)
if self.URLargsDict==None:
self.URLargsDict={}
self.useRawArguments=1
return
else:
self.URL+="?"
return
#returns a site tuple (used for VulnXML)
def getSiteTuple(self):
result=(self.connectHost,self.connectPort,self.clientisSSL)
return result
#sets us up from a site tuple
def setSiteTuple(self,site):
self.connectHost=site[0]
self.connectPort=site[1]
self.clientisSSL=site[2]
return
#debug routine
def printme(self):
#print "All my stuff:"
result=""
result+= "Host: "+self.connectHost + "\n"
result+= "Port: "+str(self.connectPort) + "\n"
result+= "SSL : "
if self.clientisSSL:
result+="Yes"
else:
result+="No"
result+="\n\n"
result+=self.verb
for key in self.headerValuesDict.keys():
for value in self.headerValuesDict[key]:
result+=key+": "+value+"\n"
return result
#returns http://www.cnn.com from our header information
def getSite(self):
result=""
if self.useSSL:
result+="https://"
else:
result+="http://"
result+=self.connectHost
if self.connectPort!=80 and self.connectPort!=443:
result+=":"+str(self.connectPort)
return result
#returns 1 if 2 headers (self and other) are basically the same
def issame(self,other):
#we don't compare the header itself. That makes us
#get false negatives with Date: headers and such
#self.headerValuesDict==other.headerValuesDict and \\
if cmp(self.URL,other.URL)==0 and \
self.clientisSSL==other.clientisSSL and \
self.firstline==other.firstline and \
cmp(self.URLargsDict,other.URLargsDict)==0 and \
self.connectPort==other.connectPort and \
self.mybodysize==other.mybodysize and \
daveutil.headerdictcmp(self.headerValuesDict,other.headerValuesDict) and \
self.allURLargs==other.allURLargs:
return 1
return 0
#returns a string that is a "hash"
def genhash(self):
hash=""
hash+=self.verb+self.returncode
hash+=daveutil.hashstring(self.URL+self.allURLargs)
#hash the cookies
if self.headerValuesDict.has_key("Cookie"):
for key in self.headerValuesDict["Cookie"]:
hash+=daveutil.hashstring(key)
if self.headerValuesDict.has_key("Set-Cookie"):
for key in self.headerValuesDict["Set-Cookie"]:
hash+=daveutil.hashstring(key)
#done!
#return it encoded so we get rid of slashes
return daveutil.strencode(hash,"A")
def setSurpressContentLength(self):
self.doSurpressContentLength=1
return
def surpressContentLength(self):
return self.doSurpressContentLength
def setclientSSL(self):
self.useSSL=1
self.clientisSSL=1
return
def addData(self,moredata):
#print "addData "+moredata
self.data.append(moredata)
#print self.data[-4:]
if self.data[-4:]==['\r', '\n', '\r', '\n']:
#print "Got end of header!"
self.done=1
#print "All data="+"".join(self.data)
self.verifyHeader()
#we shouldn't NEED this, but economist.com has a misbehaving
#IIS 5.0 server which does this!!!
if self.data[-2:]==['\n','\n']:
print "Weird \\n\\n in header!"
self.done=1
self.verifyHeader()
return
#keys is a set of values for which we're going to look and
#return an integer associated with them from the headers
#we return the first value in the header list as an int
def getIntValue(self,keys):
#iterate over all the keys in the argument until we have a match
#print "all header keys: "+str(self.headerValuesDict.keys())
for akey in keys:
if self.headerValuesDict.has_key(akey):
#print "Int key: "+akey+" matched "+self.headerValuesDict[akey][0]
#we just return the first one we encounter, sorry
#so multiple headers will just be on a first come
#first serve basis
return int(self.headerValuesDict[akey][0])
return 0
#we return the first value in the header list as a string
#KEYS IS A LIST, NOT A STRING!
def getStrValue(self,keys):
#print "all header keys: "+str(self.headerValuesDict.keys())
for akey in keys:
#print "str: "+akey
if self.headerValuesDict.has_key(akey):
return str(self.headerValuesDict[akey][0])
return "0"
def removeHeaders(self,hstring):
if self.headerValuesDict.has_key(hstring):
del self.headerValuesDict[hstring]
def addHeader(self,newheader,newheadervalue):
#print "Adding header "+newheader+": "+newheadervalue
#now we store it, at last
if not self.headerValuesDict.has_key(newheader):
#intialize it as a list
self.headerValuesDict[newheader]=[]
else:
#print "Duplicate KEY: "+newheader
pass
#just separating them by commas doesn't work for hotmail.com
self.headerValuesDict[newheader].append(newheadervalue)
def verifyHeader(self):
#this little ditty returns a list of lines, without \r\n's
#the -2 is because there were 2 null \r\n thingies on the end
self.allheaders="".join(self.data).split("\r\n")
#print "Self.allheaders="+str(self.allheaders)
firstline=self.allheaders[0]
self.allheaders=self.allheaders[:-2]
#this will fail if we can't parse the first line
if not self.parseFirstLine(firstline):
print "Couldn't parse first line!"
return 0
#did we see a CONNECT?
if self.sawCONNECT:
#print "Saw SSL CONNECT request!"
self.sawsslinit=1
return 1
for headerLine in self.allheaders[1:]:
#print "Doing header line: "+headerLine
tempvalues=headerLine.split(": ")
if len(tempvalues)<2:
#MS hotmail login is lame - uses this header, notice no space:
#P3P:CP="BUS CUR CONo FIN IVDo ONL OUR PHY SAMo TELo"
#so we handle that condition now
tempvalues=headerLine.split(":")
if len(tempvalues)<2:
print "len(tempvalues)!=2 ="+str(len(tempvalues))+" in "+str(tempvalues)
return 0
self.addHeader(tempvalues[0],":".join(tempvalues[1:]))
#print "About to call massageHeaders"
self.massageHeaders()
#print "Headers="+str(self.headerValuesDict)
#print "Got a good header."
self.goodHeader=1
return
#this function takes in
def massageHeaders(self):
#print "Inside massageHeaders"
#non-IE user Agent, for reference
#User-Agent: Mozilla/5.0 Galeon/1.0.3 (X11; Linux i686; U;) Gecko/0
#IE string
IEstring="Mozilla/4.0 (compatible; MSIE 5.0; Windows NT; Bob)"
nonIEstring="Mozilla/5.0 Galeon/1.0.3 (X11; Linux i686; U;) Gecko/0"
#always massage chunked out of the way
#this will cause problems if someone sends over a gig of data
#I doubt that will happen though
if self.getStrValue(["Transfer-Encoding"])=="chunked":
del self.headerValuesDict["Transfer-Encoding"]
self.wasChunked=1
#massage a response differently from a non-response
#uncomment this for dave's cludgy early morning NTLM pass-through
## if self.responseHeader:
## #print "Did not see connection"
## #print "Auth: "+self.getStrValue(["WWW-Authenticate"])
## authenticate=self.getStrValue(["WWW-Authenticate"])
## if authenticate.count("NTLM")>0 or authenticate.count("Negotiate")>0:
## print "Doing band-aide for NTLM"
## self.addHeader("Proxy-Authenticate",authenticate)
## del self.headerValuesDict["WWW-Authenticate"]
## #must replace return code for some reason as well
## #see http://squid.sourceforge.net/ntlm/client_proxy_protocol.html
## self.firstline=self.firstline.replace("401","407")
## return
## #here we replace Proxy-Authentication with Authentication for NTLM
## if self.headerValuesDict.has_key("Proxy-Authorization"):
## self.headerValuesDict["Authorization"]=self.headerValuesDict["Proxy-Authorization"][:]
## del self.headerValuesDict["Proxy-Authorization"]
#by default, use IE 5.0
replaceUserAgent=1
userAgent=IEstring
#change Proxy-Connection to Connection
if self.headerValuesDict.has_key("Proxy-Connection"):
#DEBUG
#print "MassageHeaders: has key proxy-connection"
self.headerValuesDict["Connection"]=self.headerValuesDict["Proxy-Connection"][:]
#print "Connection is now: "+str(self.headerValuesDict["Connection"])
del self.headerValuesDict["Proxy-Connection"]
#replace the User-Agent
if replaceUserAgent:
#just overwrite the damn thing
if self.headerValuesDict.has_key("User-Agent"):
del self.headerValuesDict["User-Agent"]
#comment out the next line for NO user agent
self.addHeader("User-Agent",userAgent)
pass
#save this off before we delete it
self.mybodysize=self.getIntValue(["Content-length","Content-Length"])
#get rid of Content-Length or Content-length - this is
#a requirement since we recalcuate it later for fun!
if self.headerValuesDict.has_key("Content-length"):
del self.headerValuesDict["Content-length"]
if self.headerValuesDict.has_key("Content-Length"):
del self.headerValuesDict["Content-Length"]
#no return value for massageHeaders
return
def parseFirstLine(self,firstline):
#print "firstline="+firstline
templist=firstline.split(" ")
if len(templist)<2:
print "First line of header has less than 2 members!"
return 0
self.verb=templist[0]
if self.verb in [ "HTTP/1.1", "HTTP/1.0" ]:
#print "Response header - not verifying the first line of %s!" % (firstline)
self.responseHeader=1
if len(templist)>1:
self.returncode=templist[1]
if len(templist)>2:
self.returnmessage=templist[2]
self.firstline=firstline
return 1
#TODO: remove this code from the header class out into the spkProxy class
#this is the only place we use self.connection!
#SSL proxy check
if self.verb=="CONNECT":
#WE ARE SSL!
#signifies we connect to server with ssl
self.useSSL=1
#signifies we connect to client with ssl
self.clientisSSL=1
self.sawCONNECT=1
self.connectHost=templist[1].split(":")[0]
#no port would be weird, but maybe it'll happen...
if templist[1].split(":") < 2:
self.connectPort=443
else:
self.connectPort=templist[1].split(":")[1]
return 1
if not self.processProxyUrl(templist[1]):
return 0
#HTTP/1.1 or HTTP/1.0
self.version=templist[2]
#print "VERB="+self.verb+" URL="+self.URL+" version="+self.version
return 1
def processProxyUrl(self, proxyurl):
#here is basically what we return
self.URLargsDict={}
self.useSSL=0
self.connectHost=""
#this might already be set if we got an SSL proxy request
if not self.connectPort:
self.connectPort=80
self.URL=""
#print "processProxyUrl: "+proxyurl
#just in case we ARE doing ssl...
urlbit=proxyurl
#if we're not doing an SSL proxy
if not self.clientisSSL:
#print "proxyURL is not SSL"
#rip the http:// off
urltype=proxyurl.split("://")[0]
if len(proxyurl.split("://")) < 2:
print "Need something after the http:// - exiting this thread"
return 0
#else we are good to go...we reassign urlbit here
#need to do join because of multiple :// in arguments and stuff
#should fix bbc news error
urlbit="://".join(proxyurl.split("://")[1:])
if urltype=="https":
#this is probably broken: REVISIT
self.useSSL=1
elif urltype!="http":
print "unknown url type "+urltype
return 0
#must have http://something
if len(proxyurl.split("://"))<2:
print "must have http://something"
return 0
self.connectHost=urlbit.split("/")[0]
#get rid of the host from urlbit
if len(urlbit.split("/"))<2:
urlbit="/"
else:
urlbit="/".join(urlbit.split("/")[1:])
if urlbit=="":
urlbit="/"
#lame, but should work
if urlbit[0]!="/":
urlbit="/"+urlbit
#print "connectHost="+self.connectHost
if len(self.connectHost.split(":"))>1:
#print "ConnectHost Split: "+str(self.connectHost.split(":"))
self.connectPort=int(self.connectHost.split(":")[1])
self.connectHost=self.connectHost.split(":")[0]
#print "Set self.connectHost to "+self.connectHost
if self.connectHost=="":
print "Error: empty connect host!"
return 0
#end if self.clientisSSL==0:
#TODO: Fix this to work on blah.ng/asdf=asdf&asdf2=asdf2
#this should work, but there's no way for me, as the client
#to really know
if urlbit.count("?")==0 and urlbit.count("=")>0:
indexequal=urlbit.find("=")
if indexequal!=-1:
indexfirstslash=urlbit.rfind("/",0,indexequal)
if indexfirstslash!=-1:
#print "original = "+urlbit
#print "indexequal="+str(indexequal)
#print "indexfirstslash="+str(indexfirstslash)
urlbit=urlbit[:indexfirstslash]+"?"+urlbit[indexfirstslash+1:]
#print "new="+urlbit
#if we have a url as well
self.URL=urlbit.split("?")[0]
#if we have arguments too
if len(urlbit.split("?"))>1:
self.allURLargs="?".join(urlbit.split("?")[1:])
#print "SELF.allURLARGS=%s"%self.allURLargs
#print "SELF.URL=%s"%self.URL
self.URLargsDict=daveutil.splitargs(self.allURLargs,orderlist=self.orderlist)
if self.URLargsDict==None:
self.URLargsDict={}
self.useRawArguments=1
return 1
#got here! success!
#we now have URLargsDict
return 1
def isdone(self):
#print "self.isdone called "+str(self.done)
if self.done==0:
return 0
return 1
def gotGoodHeader(self):
return self.goodHeader
def bodySize(self):
return self.mybodysize
def grabHeader(self,header):
if self.headerValuesDict.has_key(header):
returnstr=""
#iterate over the list and add a line for each
for value in self.headerValuesDict[header]:
returnstr+=header+": "+value+"\r\n"
return returnstr
else:
return ""
def setProxyHeader(self,newheader):
self.proxyHeader=newheader
return
class body:
def __init__(self):
self.mysize=0
self.data=[]
def printme(self):
result= "".join(self.data)
result=daveutil.prettyprint(result)
return result
def setSize(self,size):
self.mysize=size
return
#just compare sizes for speed.
def issame(self,other):
#and self.data==other.data:
if self.mysize==other.mysize :
return 1
return 0
def genhash(self):
hash=""
hash+=daveutil.hashstring("".join(self.data))
return hash
def getArgsDict(self):
argsDict=daveutil.splitargs("".join(self.data))
if argsDict==None:
argsDict={}
return argsDict
def readBlock(self,connection,size):
targetsize=size
tempdata=""
while targetsize > len(tempdata):
#read some data
tempdata+=connection.recv(targetsize-len(tempdata))
#print "Targetsize=%d, len(tempdata)=%d" % (targetsize,len(tempdata))
#print "read "+str(len(tempdata))+" bytes of data in readblock, wanted "+str(size)
self.data+=tempdata
self.mysize+=targetsize
return size
#This handles chunked data cleanly - well, handles it anyways
#this is the cruftiest function ever made.
def read(self,connection,size,waschunked,readtillclosed):
if not waschunked:
if readtillclosed and size==0:
#print "reading till closed"
temp=""
while 1:
#this is a lame way to do it, but hopefully it will work
try:
length=len(temp)
#print "len="+str(length)
temp+=connection.recv(1000)
#print "len2="+str(len(temp))
#WAY crufty here...
if (length==len(temp)):
break
if temp.count("