pax_global_header00006660000000000000000000000064134642347350014525gustar00rootroot0000000000000052 comment=a30b3be1b398dd69f34950a9c9a43b29bfc0ec23 pytsk-20190507/000077500000000000000000000000001346423473500131505ustar00rootroot00000000000000pytsk-20190507/.gitignore000066400000000000000000000004061346423473500151400ustar00rootroot00000000000000# Files to ignore by git # Back-up files *~ *.swp # Generic auto-generated build files *.pyc *.pyo # Specific auto-generated build files /.tox /a.out /__pycache__ /build /dist /MANIFEST /pytsk3.egg-info /tmp # Project specific auto-generated files /pytsk3.c pytsk-20190507/.gitmodules000066400000000000000000000001341346423473500153230ustar00rootroot00000000000000[submodule "sleuthkit"] path = sleuthkit url = https://github.com/sleuthkit/sleuthkit.git pytsk-20190507/.travis.yml000066400000000000000000000017011346423473500152600ustar00rootroot00000000000000language: python matrix: include: - os: linux dist: xenial sudo: required group: edge python: 2.7 - os: linux dist: xenial sudo: required group: edge python: 3.5 - os: osx osx_image: xcode8.3 language: generic env: PYTHONPATH=/Library/Python/2.7/site-packages/ before_install: - if test ${TRAVIS_OS_NAME} = "linux"; then sudo apt-get update -q && sudo apt-get install -y autopoint; fi - if test ${TRAVIS_OS_NAME} = "osx"; then brew update && brew install gettext && brew link --force gettext; fi script: - python setup.py update - python setup.py build - PYTHONPATH=`ls -1d build/lib.*` python run_tests.py - python setup.py sdist - python setup.py bdist - if test ${TRAVIS_OS_NAME} = "linux"; then mkdir -p ${PWD}/tmp/lib/python${TRAVIS_PYTHON_VERSION}/site-packages/ && PYTHONPATH=${PWD}/tmp/lib/python${TRAVIS_PYTHON_VERSION}/site-packages/ python setup.py install --prefix=${PWD}/tmp/; fi pytsk-20190507/LICENSE000066400000000000000000000011011346423473500141460ustar00rootroot00000000000000Copyright 2010, Michael Cohen . Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. pytsk-20190507/MANIFEST.in000066400000000000000000000017041346423473500147100ustar00rootroot00000000000000include LICENSE version.txt include *.c include *.h include *.py exclude *.pyc exclude .git .gitignore .gitmodules exclude .travis.yml exclude API-CHANGES.txt config.log ruleset.xml setupDevRepos.py travis_build.sh recursive-include dpkg * recursive-include examples *.py recursive-include patches *.patch recursive-include sleuthkit * recursive-exclude sleuthkit/autom4te.cache * recursive-exclude sleuthkit/bindings * recursive-exclude sleuthkit/debian * recursive-exclude sleuthkit/docs * recursive-exclude sleuthkit/framework * recursive-exclude sleuthkit/man * recursive-exclude sleuthkit/packages * recursive-exclude sleuthkit/rejistry++ * recursive-exclude sleuthkit/release * recursive-exclude sleuthkit/samples * recursive-exclude sleuthkit/tests * recursive-exclude sleuthkit/tools * recursive-exclude sleuthkit/unit_tests * recursive-exclude sleuthkit/win32 * recursive-exclude sleuthkit/xcode * recursive-include talloc * recursive-exclude test_data * pytsk-20190507/README000066400000000000000000000046021346423473500140320ustar00rootroot00000000000000pytsk is a Python binding for the SleuthKit. The SleuthKit is a complete filesystem analysis tool. In the past PyFlag shipped a Python binding for a statically compiled version which was incorporated in the PyFlag source tree (Version 2.78). That version is now very old and does not support HFS+ which SleuthKit 3.1 does. At the time there were some important functions that we needed to link to but the old libtsk (the shared object produced by older SleuthKit binaries) did not export these - which is the reason for incorporating a slightly modified version in the source tree. These days things are much better - libtsk is designed to be a general purpose library with many useful functions linked in. The overall architecture has been tremendously improved and it is now very easy to use it from an external program. This is a Python binding against the libtsk shared object. Our aim is to make the binding reflect the TSK API as much as possible in capabilities, while at the same time having a nice Pythonic OO interface: 4.2: http://www.sleuthkit.org/sleuthkit/docs/api-docs/4.2/ 4.3: http://www.sleuthkit.org/sleuthkit/docs/api-docs/4.3/ The new binding just links to libtsk which should make it easier to maintain against newer versions. We should be able to rewrite all the SleuthKit tools in Python (using the library and bindings) as a demonstration of what is possible with the new bindings. This page documents how to use the binding from a practical point of view - we want to show examples of how to do some common tasks. There are lots of sample programs in the samples directory to demonstrate how these bindings can be used. If downloaded pytsk using git you'll have to first run: python setup.py update If you want to use the latest version of Sleuthkit that is checked into git (also known as HEAD), instead of the currently supported version, you can run: python setup.py update --use-head To build the bindings just use the standard Python distutils method: python setup.py build python setup.py install At the top level of the source tree. The Python binding is autogenerated from the libtsk header files using a small OO C shim. This means that most of the fields in many of the structs are already available. We aim to provide most of the functionality using this shim (e.g. traversing and iterating over lists etc). The authoritative source of documentation is the library API linked above. pytsk-20190507/aff4_errors.h000066400000000000000000000047521346423473500155450ustar00rootroot00000000000000/* AFF4 error functions. * * Copyright 2010, Michael Cohen . * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef AFF4_ERRORS_H_ #define AFF4_ERRORS_H_ #include "class.h" // Some helpful little things #define ERROR_BUFFER_SIZE 1024 /** This is used for error reporting. This is similar to the way python does it, i.e. we set the error flag and return NULL. */ #define EZero 0 #define EGeneric 1 #define EOverflow 2 #define EWarning 3 #define EUnderflow 4 #define EIOError 5 #define ENoMemory 6 #define EInvalidParameter 7 #define ERuntimeError 8 #define EKeyError 9 // Reserved for impossible conditions #define EProgrammingError 10 DLL_PUBLIC void *aff4_raise_errors(int t, char *string, ...); /** We only set the error state if its not already set */ #define RaiseError(t, message, ...) \ aff4_raise_errors(t, "%s: (%s:%d) " message, __FUNCTION__, __FILE__, __LINE__, ## __VA_ARGS__); #define LogWarnings(format, ...) \ do { \ RaiseError(EWarning, format, ## __VA_ARGS__); \ PrintError(); \ } while(0); #define ClearError() \ do {*aff4_get_current_error(NULL) = EZero;} while(0); #define PrintError() \ do {char *error_str; if(*aff4_get_current_error(&error_str)) fprintf(stdout, "%s", error_str); fflush(stdout); ClearError(); }while(0); #define CheckError(error) \ (*aff4_get_current_error(NULL) == error) /** The current error state is returned by this function. This is done in a thread safe manner. */ DLL_PUBLIC int *aff4_get_current_error(char **error_str); // These macros are used when we need to do something which might // change the error state on the error path of a function. #define PUSH_ERROR_STATE { int *tmp_error_p = aff4_get_current_error(NULL); int tmp_error = *tmp_error_p; int exception __attribute__((unused)); #define POP_ERROR_STATE *tmp_error_p = tmp_error;}; #endif /* !AFF4_ERRORS_H_ */ pytsk-20190507/appveyor.yml000066400000000000000000000007611346423473500155440ustar00rootroot00000000000000environment: matrix: - TARGET: "Python 2.7" PYTHON: "C:\\Python27" PYTHON_VERSION: "2.7" - TARGET: "Python 3.6" PYTHON: "C:\\Python36" PYTHON_VERSION: "3.6" install: - cmd: '"C:\Program Files\Microsoft SDKs\Windows\v7.1\Bin\SetEnv.cmd" /x86 /release' build_script: - "%PYTHON%\\python.exe setup.py update" - "%PYTHON%\\python.exe setup.py build" test_script: - "set PYTHONPATH=build\\lib.win32-%PYTHON_VERSION%" - "%PYTHON%\\python.exe run_tests.py" pytsk-20190507/class.c000066400000000000000000000027741346423473500144330ustar00rootroot00000000000000/* C class and object types functions. * * Copyright 2013, Michael Cohen . * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "misc.h" #include "class.h" #define BUFF_SIZE 1024 // Noone should instantiate Object directly. this should be already // allocated therefore: DLL_PUBLIC void Object_init(Object this) { this->__class__ = &__Object; this->__super__ = NULL; }; struct Object_t __Object = { &__Object, //.__class__ &__Object, //.__super__ "Object", //.__name__ "", //.__doc__ sizeof(struct Object_t), //.__size NULL //.__extension }; int issubclass(Object obj, Object class) { obj = obj->__class__; while(1) { if(obj == class->__class__) return 1; obj=obj->__super__; if(obj == &__Object || obj==NULL) return 0; }; }; void unimplemented(Object self) { printf("%s contains unimplemented functions.. is it an abstract class?\n", NAMEOF(self)); abort(); }; pytsk-20190507/class.h000066400000000000000000000355231346423473500144360ustar00rootroot00000000000000/* C class and object types functions. * * Copyright 2013, Michael Cohen . * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef __CLASS_H__ #define __CLASS_H__ /* Classes and objects in C This file makes it easy to implement classes and objects in C. To define a class we need to perform three steps: Define the class prototype. This is suitable to go in a .h file for general use by other code. Note all classes extend Object. Example:: CLASS(Foo, Object) int x; int y; //This declares a method of a class Foo, called Con returning a //Foo object. In other words it is a constructor. Foo METHOD(Foo, Con, int x, int y); int METHOD(Foo, add); END_CLASS Now we need to define some functions for the constructor and methods. Note that the constuctor is using ALLOCATE_CLASS to allocate space for the class structures. Callers may call with self==NULL to force allocation of a new class. Note that we do not call the constructor of our superclass implicitly here. (Calling the sperclass constructor is optional, but ALLOCATE_CLASS is not.). Foo Foo_Con(Foo self,int x,int y) { self->x = x; self->y = y; return self; }; int Foo_add(Foo this) { return (this->x + this->y); }; Now we need to define the Virtual function table - These are those functions and attributes which are defined in this class (over its superclass). Basically these are all those things in the class definition above, with real function names binding them. (Note that by convention we preceed the name of the method with the name of the class): VIRTUAL(Foo,Object) VMETHOD(Con) = Foo_Con; VMETHOD(add) = Foo_add; END_VIRTUAL We can use inheritance too: CLASS(Bar, Foo) Bar METHOD(Bar, Con, char *something) END_CLASS Here Bar extends Foo and defines a new constructor with a different prototype: VIRTUAL(Bar,Foo) VMETHOD(Con) = Bar_Con END_VIRTUAL If there is a function which expects a Foo, we will need to over ride the Foo constructor in the Bar, so the function will not see the difference between the Foo and Bar: CLASS(Bar,Foo) int bar_attr; END_CLASS Foo Bar_Con(Foo self, int x, int y) { ... } VIRTUAL(Bar, Foo) VMETHOD(super.Con) = Bar_Con END_VIRTUAL Note that in this case we are over riding the Con method defined in Foo while creating derived Bar classes. The notation in the VIRTUAL table is to use super.Con, because Foo's Con method (the one we are over riding), can be located by using super.Con inside a Bar object. Imagine now that in Bar_Con we wish to use methods and attributes defined in Bar. Since Bar_Con over rides Bar's base class (Foo) it must have the prototype described above. Since self is of type Foo its impossible to use self->bar_attr (There is no bar_attr in Foo - its in Bar). In this case, we need to make a type cast to convice C that self is actually a Bar not a Foo: Foo Bar_Con(Foo self, int x, int y) { Bar this = (Bar)self; this->bar_attr=1 }; This allows us to access bars attributes. This is a general oddity with C style classes, which C++ and Java hide. In C we must always know which class defines which method and attribute and reference the right class's method. So for example if we want to call a Bar's add method: Bar a; a->super.add() because add is defined in Bar's super class (Foo). Constract this with C++ or Java which hide where methods are defined and simply make all methods appear like they were defined inside the derived class. This takes a while to get used to but the compiler will ensure that the references are correct - otherwise things will generally not compile properly. This difference can be used for good and bad. It is possible in C to call the base class's version of the method at any time (despite the fact it was over ridden). For example: CLASS(Derived, Foo) int METHOD(Derived, add); END_CLASS VIRTUAL(Derived, Foo) VMETHOD(add) = Derived_add END_VIRTUAL If d is a Derived object, we can call Foo's version like this: d->super.add() But Derived's version is accessed by: d->add() Sometimes a derived class may want to over ride the base class's methods as well, in this case the VIRTUAL section should over ride super.add as well. */ #ifdef __cplusplus extern "C" { #endif #include "misc.h" #include #define CLASS(class,super_class) \ typedef struct class ## _t *class; \ DLL_PUBLIC class alloc_ ## class(void); /* Allocates object memory */ \ DLL_PUBLIC int class ## _init(Object self); /* Class initializer */ \ DLL_PUBLIC extern struct class ## _t __ ## class; /* Public class template */ \ struct class ## _t { \ struct super_class ## _t super; /* Superclass Fields we inherit */ \ class __class__; /* Pointer to our own class */ \ super_class __super__; /* Pointer to our superclass */ #define METHOD(cls, name, ... ) \ (* name)(cls self, ## __VA_ARGS__ ) // Class methods are attached to the class but are not called with // an instance. This is similar to the python class method or java // static methods. #define CLASS_METHOD(name, ... ) \ (*name)(__VA_ARGS__) /* This is a convenience macro which may be used if x if really large */ #define CALL(x, method, ... ) \ (x)->method((x), ## __VA_ARGS__) #define END_CLASS }; /* This is used to set the classes up for use: * * class_init = checks the class template (__class) to see if it has * been allocated. otherwise allocates it in the global context. * * class_Alloc = Allocates new memory for an instance of the * class. This is a recursive function calling each super class in * turn and setting the currently over ridden defaults. So for eample * suppose this class (foo) derives from bar, we first fill the * template with bars methods, and attributes. Then we over write * those with foos methods and attributes. */ #define VIRTUAL(class,superclass) \ struct class ## _t __ ## class; \ \ DLL_PUBLIC class alloc_ ## class(void) { \ class result = talloc_memdup(NULL, &__## class, sizeof(__## class)); \ return result; \ }; \ \ DLL_PUBLIC int class ## _init(Object this) { \ class self = (class)this; \ if(self->__super__) return 1; \ superclass ##_init(this); \ this->__class__ = (Object)&__ ## class; \ self->__class__ = (class)&__ ## class; \ this->__super__ = (Object)&__ ## superclass; \ self->__super__ = (superclass)&__ ## superclass; \ this->__size = sizeof(struct class ## _t); \ this->__name__ = #class; #define SET_DOCSTRING(string) \ ((Object)self)->__doc__ = string #define END_VIRTUAL return 1; }; #define VMETHOD(method) \ (self)->method #define VMETHOD_BASE(base, method) \ (((base)self)->method) #define CLASS_ATTR(self, base, method) \ (((base)self)->method) #define VATTR(attribute) \ (self)->attribute #define NAMEOF(obj) \ ((Object)obj)->__name__ #define SIZEOF(obj) \ ((Object)obj)->__size #define DOCSTRING(obj) \ ((Object)obj)->__doc__ #define INIT_CLASS(class) \ class ## _init((Object)&__ ## class) /* This MACRO is used to construct a new Class using a constructor. * * This is done to try and hide the bare (unbound) method names in * order to prevent name space pollution. (Bare methods may be * defined as static within the implementation file). This macro * ensures that class structures are initialised properly before * calling their constructors. * * We require the following args: * class - the type of class to make * virt_class - The class where the method was defined * constructors - The constructor method to use * context - a talloc context to use. * * Note that the class and virt_class do not have to be the same if * the method was not defined in the current class. For example * suppose Foo extends Bar, but method is defined in Bar but * inherited in Foo: * * CONSTRUCT(Foo, Bar, super.method, context) * * virt_class is Bar because thats where method was defined. */ // The following only initialises the class if the __super__ element // is NULL. This is fast as it wont call the initaliser unnecessaily // This requires the class initializers to have been called // previously. Therefore they are not exported. #define CONSTRUCT(class, virt_class, constructor, context, ...) \ (class)(((virt_class) (&__ ## class))->constructor( \ (virt_class) _talloc_memdup( \ context, &__ ## class, \ sizeof(struct class ## _t), \ __location__ "(" #class ")"), \ ## __VA_ARGS__) ) /* _talloc_memdup version #define CONSTRUCT_CREATE(class, virt_class, context) \ (virt_class) _talloc_memdup(context, &__ ## class, sizeof(struct class ## _t), __location__ "(" #class ")") */ #define CONSTRUCT_CREATE(class, virt_class, context) \ (virt_class) talloc_memdup(context, &__ ## class, sizeof(struct class ## _t)) #define CONSTRUCT_INITIALIZE(class, virt_class, constructor, object, ...) \ (class)(((virt_class) (&__ ## class))->constructor(object, ## __VA_ARGS__)) /* This variant is useful when all we have is a class reference * (GETCLASS(Foo)) or &__Foo */ #define CONSTRUCT_FROM_REFERENCE(class, constructor, context, ... ) \ ( (class)->constructor( \ (void *)_talloc_memdup(context, ((Object)class), ((Object)class)->__size, __location__ "(" #class "." #constructor ")"), \ ## __VA_ARGS__) ) /* Finds the size of the class in x */ #define CLASS_SIZE(class) \ ((Object)class)->__size typedef struct Object_t *Object; struct Object_t { //A reference to a class instance - this is useful to be able to //tell which class an object really belongs to: Object __class__; //And its super class: Object __super__; char *__name__; /** Objects may have a doc string associated with them. */ char *__doc__; //How large the class is: int __size; /* A pointer to an extension - An extension is some other arbitrary object which may be linked with this one. */ void *extension; }; #define SUPER(base, imp, method, ...) \ ((base)&__ ## imp)->method((base)self, ## __VA_ARGS__) #define GETCLASS(class) \ (Object)&__ ## class // Returns true if the obj belongs to the class #define ISINSTANCE(obj,class) \ (((Object)obj)->__class__ == GETCLASS(class)) // This is a string comparison version of ISINSTANCE which works // across different shared objects. #define ISNAMEINSTANCE(obj, class) \ (obj && !strcmp(class, NAMEOF(obj))) // We need to ensure that class was properly initialised: #define ISSUBCLASS(obj,class) \ issubclass((Object)obj, (Object)&__ ## class) #define CLASSOF(obj) \ ((Object)obj)->__class__ DLL_PUBLIC void Object_init(Object); DLL_PUBLIC extern struct Object_t __Object; /** Find out if obj is an instance of cls or a derived class. Use like this: if(issubclass(obj, (Object)&__FileLikeObject)) { ... }; You can also do this in a faster way if you already know the class hierarchy (but it could break if the hierarchy changes): { Object cls = ((Object)obj)->__class__; if(cls == (Object)&__Image || \ cls == (Object)&__FileLikeObject || \ cls == (Object)&__AFFObject || ....) { ... }; }; */ int issubclass(Object obj, Object class); DLL_PUBLIC extern void unimplemented(Object self); #define UNIMPLEMENTED(class, method) \ ((class)self)->method = (void *)unimplemented; #define ZSTRING_NO_NULL(str) str , (strlen(str)) #define ZSTRING(str) str , (strlen(str)+1) // These dont do anything but are useful to indicate when a function // parameter is used purely to return a value. They are now used to // assist the python binding generator in generating the right sort // of code #define OUT #define IN // This modifier before a class means that the class is abstract and // does not have an implementation - we do not generate bindings for // that class then. #define ABSTRACT // This modifier indicates that the following pointer is pointing to // a borrowed reference - callers must not free the memory after use. #define BORROWED // This tells the autobinder to generated bindings to this struct #define BOUND // This tells the autobinder to ignore this class as it should be // private to the implementation - external callers should not // access this. #define PRIVATE // This attribute of a method means that this method is a // desctructor - the object is no longer valid after this method is // run #define DESTRUCTOR // including this after an argument definition will cause the // autogenerator to assign default values to that parameter and make // it optional #define DEFAULT(x) // This explicitely denote that the type is a null terminated char // ptr as opposed to a pointer to char and length. typedef char * ZString; /* The following is a direction for the autogenerator to proxy the given class. This is done in the following way: 1) a new python type is created called Proxy_class_name() with a constructor which takes a surrogate object. 2) The proxy class contains a member "base" of the type of the proxied C class. 3) The returned python object may be passed to any C functions which expect the proxied class, and internal C calls will be converted to python method calls on the proxied object. */ #define PROXY_CLASS(name) /* This signals the autogenerator to bind the named struct */ #define BIND_STRUCT(name) // This means that the memory owned by this pointer is managed // externally (not using talloc). It is dangerous to use this // keyword too much because we are unable to manage its memory // appropriately and it can be free'd from under us. #define FOREIGN #ifdef __cplusplus } /* closing brace for extern "C" */ #endif #endif /* ifndef __CLASS_H__ */ pytsk-20190507/class_parser.py000066400000000000000000004523751346423473500162230ustar00rootroot00000000000000#!/usr/bin/python # # Copyright 2010, Michael Cohen . # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Documentation regarding the Python bounded code. This code originally released as part of the AFF4 project (http://code.google.com/p/aff4/). Memory Management ================= AFF4 uses a reference count system for memory management similar in many ways to the native Python system. The basic idea is that memory returned by the library always carries a new reference. When the caller is done with the memory, they must call aff4_free() on the memory, afterwhich the memory is considered invalid. The memory may still not be freed at this point depending on its total reference count. New references may be taken to the same memory at any time using the aff4_incref() function. This increases the reference count of the object, and prevents it from being really freed until the correct number of aff4_free() calls are made to it. This idea is important for example in the following sequence: FileLikeObject fd = resolver->create(resolver, "w"); RDFURN uri = fd->urn; Now uri hold a reference to the urn attribute of fd, but that attribute is actually owned by fd. If fd is freed in future, e.g. (the close method actually frees the fd implicitely): fd->close(fd); Now the uri object is dangling. To prevent fd->urn from disappearing when fd is freed, we need to take another reference to it: FileLikeObject fd = resolver->create(resolver, "w"); RDFURN uri = fd->urn; aff4_incref(uri); fd->close(fd); Now uri is valid (but fd is no longer valid). When we are finished with uri we just call: aff4_free(uri); Python Integration ------------------ For every AFF4 object, we create a Python wrapper object of the corresponding type. The wrapper object contains Python wrapper methods to allow access to the AFF4 object methods, as well as getattr methods for attributes. It is very important to allow Python to inherit from C classes directly - this requires every internal C method call to be diverted to the Python object. The C object looks like this normally: struct obj { __class__ pointer to static struct initialised with C method pointers ... Some private members ... Attributes; /* Following are the methods */ int (*method)(struct obj *self, ....); }; I.e. when the method is called the struct.method member is dereferenced to find the location of the function handling it, the object is stuffed into the first arg, and the parameters are stuffed into following args. Directing Python calls ---------------------- The Python object which is created is a proxy for the c object. When Python methods are called in the Python object, they need to be directed into the C structure and a C call must be made, then the return value must be reconverted into Python objects and returned into Python. This occurs automatically by the wrapper: struct PythonWrapper { PyObject_HEAD void *base; }; When a Python method is called on this new Python type this is what happens: 1) The method name is looked up in the PyMethodDef struct as per normal. 2) If the method is recognised as a valid method the Python wrapper function is called (pyCLASSNAME_method) 3) This method is broken into the general steps: PyObject *pyCLASSNAME_method(PythonWrapper self, PyObject *args, PyObject *kwds) { set up c declerations for all args - call .definition() on all the args and return type parse argument using PyArg_ParseTupleAndKeywords Precall preparations Make the C call Post call processing of the returned value (check for errors etc) Convert the return value to a Python object using: return_type.to_Python_object() return the Python object or raise an exception }; So the aim of the wrapper function is to convert Python args to C args, find the C method corresponding to the method name by dereferencing the c object and then call it. The problem now is what happens when a C method internally calls another method. This is a problem because the C method has no idea its running within Python and so will just call the regular C method that was there already. This makes it impossible to subclass the class and update the C method with a Python method. What we really want is when a C method is called internally, we want to end up calling the Python object instead to allow a purely Python implementation to override the C method. This happens by way of a ProxiedMethod - A proxied method is in a sense the reverse of the wrapper method: return_type ProxyCLASSNAME_method(CLASSNAME self, ....) { Take all C args and create Python objects from them Dereference the object extension ((Object) self)->extension to obtain the Python object which wraps this class. If an extension does not exist, just call the method as normal, otherwise make a Python call on the wrapper object. Convert the returned Python object to a C type and return it. }; To make all this work we have the following structures: struct PythonWrapper { PyObject_HEAD struct CLASSNAME *base - This is a copy of the item, with all function pointer pointing at proxy functions. We can always get the original C function pointers through base->__class__ - We also set the base object extension to be the Python object: ((Object) base)->extension = PythonWrapper. This allows us to get back the Python object from base. }; When a Python method is invoked, we use cbase to find the C method pointer, but we pass to it base: self->base->__class__->method(self->base, ....) base is a proper C object which had its methods dynamically replaced with proxies. Now if an internal C method is called, the method will dereference base and retrieve the proxied method. Calling the proxied method will retreive the original Python object from the object extension and make a Python call. In the case where a method is not overridden by Python, internal C method calls will generate an unnecessary conversion from C to Python and then back to C. Memory management in Python extension ------------------------------------- When calling a method which returns a new reference, we just store the reference in the "base" member of the Python object. When Python garbage collects our Python object, we call aff4_free() on it. The getattr method creates a new Python wrapper object of the correct type, and sets its base attribute to point at the target AFF4 object. We then aff4_incref() the target to ensure that it does not get freed until we are finished with it. Python Object ----- | P1 | C Object | Base|-->+------+ | | | C1 | | | | | ----- |Member|--------------+-->+----+ +------+ | | C2 | | | | Getattr ------- | | | Member | P2 | | +----+ | Base |--+ New reference ------- Python Object Figure 1: Python object 1 owns C1's memory (when P1 is GC'ed C1 is freed). A reference to a member of C1 is made via P1's getattr method. The getattr method creates P2 to provide access to C2 by setting base to C2's address. We need to guarantee however, that C2 will not be freed suddenly (e.g. if C1 is freed). We therefore increase C2's reference count using aff4_incref(); """ import io import os import pdb import re import sys import lexer DEBUG = 0 # The pytsk3 version. VERSION = "20190507" # These functions are used to manage library memory. FREE = "aff4_free" INCREF = "aff4_incref" CURRENT_ERROR_FUNCTION = "aff4_get_current_error" CONSTANTS_BLACKLIST = ["TSK3_H_"] # Some constants. DOCSTRING_RE = re.compile("[ ]*\n[ \t]+[*][ ]?") def dispatch(name, type, *args, **kwargs): if not type: return PVoid(name) m = re.match("struct ([a-zA-Z0-9]+)_t *", type) if m: type = m.group(1) type_components = type.split() attributes = set() if type_components[0] in method_attributes: attributes.add(type_components.pop(0)) type = " ".join(type_components) result = type_dispatcher[type](name, type, *args, **kwargs) result.attributes = attributes return result def log(msg): if DEBUG > 0: sys.stderr.write("{0:s}\n".format(msg)) def format_as_docstring(string): # Remove C/C++ comment code statements. string = DOCSTRING_RE.sub("\n", string) byte_string = string.encode("unicode-escape") # Escapes double quoted string. We need to run this after unicode-escape to # prevent this operation to escape the escape character (\). In Python 3 # the replace method requires the arguments to be byte strings. byte_string = byte_string.replace(b"\"", b"\\\"") # Make sure to return the string a Unicode otherwise in Python 3 the string # is prefixed with b when written or printed. return byte_string.decode("utf-8") class Module(object): public_api = None public_header = None def __init__(self, name): self.name = name self.constants = set() self.constants_blacklist = CONSTANTS_BLACKLIST self.classes = {} self.headers = "#include \n" self.files = [] self.active_structs = set() self.function_definitions = set() init_string = "" def initialization(self): result = self.init_string + ( "\n" "talloc_set_log_fn((void (*)(const char *)) printf);\n" "// DEBUG: talloc_enable_leak_report();\n" "// DEBUG: talloc_enable_leak_report_full();\n") for cls in self.classes.values(): if cls.is_active(): result += cls.initialise() return result def add_constant(self, constant, type="numeric"): """This will be called to add #define constant macros.""" self.constants.add((constant, type)) def add_class(self, cls, handler): self.classes[cls.class_name] = cls # Make a wrapper in the type dispatcher so we can handle # passing this class from/to Python type_dispatcher[cls.class_name] = handler def get_string(self): """Retrieves a string representation.""" result = "Module {0:s}\n".format(self.name) classes_list = list(self.classes.values()) classes_list.sort(key=lambda cls: cls.class_name) for cls in classes_list: if cls.is_active(): result += " {0:s}\n".format(cls.get_string()) constants_list = list(self.constants) constants_list.sort() result += "Constants:\n" for name, _ in constants_list: result += " {0:s}\n".format(name) return result def private_functions(self): """Emits hard coded private functions for doing various things""" values_dict = { "classes_length": len(self.classes) + 1, "get_current_error": CURRENT_ERROR_FUNCTION} return """ /* The following is a static array mapping CLASS() pointers to their * Python wrappers. This is used to allow the correct wrapper to be * chosen depending on the object type found - regardless of the * prototype. * * This is basically a safer way for us to cast the correct Python type * depending on context rather than assuming a type based on the .h * definition. For example consider the function * * AFFObject Resolver.open(uri, mode) * * The .h file implies that an AFFObject object is returned, but this is * not true as most of the time an object of a derived class will be * returned. In C we cast the returned value to the correct type. In the * Python wrapper we just instantiate the correct Python object wrapper * at runtime depending on the actual returned type. We use this lookup * table to do so. */ static int TOTAL_CLASSES=0; /* This is a global reference to this module so classes can call each * other. */ static PyObject *g_module = NULL; #define CONSTRUCT_INITIALIZE(class, virt_class, constructor, object, ...) \\ (class)(((virt_class) (&__ ## class))->constructor(object, ## __VA_ARGS__)) #undef BUFF_SIZE #define BUFF_SIZE 10240 /* Python compatibility macros */ #if !defined( PyMODINIT_FUNC ) #if PY_MAJOR_VERSION >= 3 #define PyMODINIT_FUNC PyObject * #else #define PyMODINIT_FUNC void #endif #endif /* !defined( PyMODINIT_FUNC ) */ #if !defined( PyVarObject_HEAD_INIT ) #define PyVarObject_HEAD_INIT( type, size ) \\ PyObject_HEAD_INIT( type ) \\ size, #endif /* !defined( PyVarObject_HEAD_INIT ) */ #if PY_MAJOR_VERSION >= 3 #define Py_TPFLAGS_HAVE_ITER 0 #endif #if !defined( Py_TYPE ) #define Py_TYPE( object ) \\ ( ( (PyObject *) object )->ob_type ) #endif /* !defined( Py_TYPE ) */ /* Generic wrapper type */ typedef struct Gen_wrapper_t *Gen_wrapper; struct Gen_wrapper_t {{ PyObject_HEAD void *base; /* Value to indicate the base is a Python object. */ int base_is_python_object; /* Value to indicate the base is managed internal. */ int base_is_internal; PyObject *python_object1; PyObject *python_object2; }}; static struct python_wrapper_map_t {{ Object class_ref; PyTypeObject *python_type; void (*initialize_proxies)(Gen_wrapper self, void *item); }} python_wrappers[{classes_length:d}]; /* Create the relevant wrapper from the item based on the lookup table. */ Gen_wrapper new_class_wrapper(Object item, int item_is_python_object) {{ Gen_wrapper result = NULL; Object cls = NULL; struct python_wrapper_map_t *python_wrapper = NULL; int cls_index = 0; // Return a Py_None object for a NULL pointer if(item == NULL) {{ Py_IncRef((PyObject *) Py_None); return (Gen_wrapper) Py_None; }} // Search for subclasses for(cls = (Object) item->__class__; cls != cls->__super__; cls = cls->__super__) {{ for(cls_index = 0; cls_index < TOTAL_CLASSES; cls_index++) {{ python_wrapper = &(python_wrappers[cls_index]); if(python_wrapper->class_ref == cls) {{ PyErr_Clear(); result = (Gen_wrapper) _PyObject_New(python_wrapper->python_type); result->base = item; result->base_is_python_object = item_is_python_object; result->base_is_internal = 1; result->python_object1 = NULL; result->python_object2 = NULL; python_wrapper->initialize_proxies(result, (void *) item); return result; }} }} }} PyErr_Format(PyExc_RuntimeError, "Unable to find a wrapper for object %s", NAMEOF(item)); return NULL; }} static PyObject *resolve_exception(char **error_buff) {{ int *type = (int *){get_current_error:s}(error_buff); switch(*type) {{ case EProgrammingError: return PyExc_SystemError; case EKeyError: return PyExc_KeyError; case ERuntimeError: return PyExc_RuntimeError; case EInvalidParameter: return PyExc_TypeError; case EWarning: return PyExc_AssertionError; case EIOError: return PyExc_IOError; default: return PyExc_RuntimeError; }} }} static int type_check(PyObject *obj, PyTypeObject *type) {{ PyTypeObject *tmp = NULL; // Recurse through the inheritance tree and check if the types are expected if(obj) {{ for(tmp = Py_TYPE(obj); tmp && tmp != &PyBaseObject_Type; tmp = tmp->tp_base) {{ if(tmp == type) return 1; }} }} return 0; }} static int check_error() {{ char *buffer = NULL; int *error_type = (int *)aff4_get_current_error(&buffer); if(*error_type != EZero) {{ PyObject *exception = resolve_exception(&buffer); if(buffer != NULL) {{ PyErr_Format(exception, "%s", buffer); }} else {{ PyErr_Format(exception, "Unable to retrieve exception reason."); }} ClearError(); return 1; }} return 0; }} /* This function checks if a method was overridden in self over a * method defined in type. This is used to determine if a Python class is * extending this C type. If not, a proxy function is not written and C * calls are made directly. * * This is an optimization to eliminate the need for a call into Python * in the case where Python objects do not actually extend any methods. * * We basically just iterate over the MRO and determine if a method is * defined in each level until we reach the base class. */ static int check_method_override(PyObject *self, PyTypeObject *type, char *method) {{ struct _typeobject *ob_type = NULL; PyObject *mro = NULL; PyObject *py_method = NULL; PyObject *item_object = NULL; PyObject *dict = NULL; Py_ssize_t item_index = 0; Py_ssize_t number_of_items = 0; int found = 0; ob_type = Py_TYPE(self); if(ob_type == NULL ) {{ return 0; }} mro = ob_type->tp_mro; #if PY_MAJOR_VERSION >= 3 py_method = PyUnicode_FromString(method); #else py_method = PyString_FromString(method); #endif number_of_items = PySequence_Size(mro); for(item_index = 0; item_index < number_of_items; item_index++) {{ item_object = PySequence_GetItem(mro, item_index); // Ok - we got to the base class - finish up if(item_object == (PyObject *) type) {{ Py_DecRef(item_object); break; }} /* Extract the dict and check if it contains the method (the * dict is not a real dictionary so we can not use * PyDict_Contains). */ dict = PyObject_GetAttrString(item_object, "__dict__"); if(dict != NULL && PySequence_Contains(dict, py_method)) {{ found = 1; }} Py_DecRef(dict); Py_DecRef(item_object); if(found != 0) {{ break; }} }} Py_DecRef(py_method); PyErr_Clear(); return found; }} /* Fetches the Python error (exception) */ void pytsk_fetch_error(void) {{ PyObject *exception_traceback = NULL; PyObject *exception_type = NULL; PyObject *exception_value = NULL; PyObject *string_object = NULL; char *str_c = NULL; char *error_str = NULL; int *error_type = (int *) {get_current_error:s}(&error_str); #if PY_MAJOR_VERSION >= 3 PyObject *utf8_string_object = NULL; #endif // Fetch the exception state and convert it to a string: PyErr_Fetch(&exception_type, &exception_value, &exception_traceback); string_object = PyObject_Repr(exception_value); #if PY_MAJOR_VERSION >= 3 utf8_string_object = PyUnicode_AsUTF8String(string_object); if(utf8_string_object != NULL) {{ str_c = PyBytes_AsString(utf8_string_object); }} #else str_c = PyString_AsString(string_object); #endif if(str_c != NULL) {{ strncpy(error_str, str_c, BUFF_SIZE-1); error_str[BUFF_SIZE - 1] = 0; *error_type = ERuntimeError; }} PyErr_Restore(exception_type, exception_value, exception_traceback); #if PY_MAJOR_VERSION >= 3 if( utf8_string_object != NULL ) {{ Py_DecRef(utf8_string_object); }} #endif Py_DecRef(string_object); return; }} /* Copies a Python int or long object to an unsigned 64-bit value */ uint64_t integer_object_copy_to_uint64(PyObject *integer_object) {{ #if defined( HAVE_LONG_LONG ) PY_LONG_LONG long_value = 0; #else long long_value = 0; #endif int result = 0; if(integer_object == NULL) {{ PyErr_Format(PyExc_ValueError, "Missing integer object"); return (uint64_t) -1; }} PyErr_Clear(); result = PyObject_IsInstance(integer_object, (PyObject *) &PyLong_Type); if(result == -1) {{ pytsk_fetch_error(); return (uint64_t) -1; }} else if(result != 0) {{ PyErr_Clear(); #if defined( HAVE_LONG_LONG ) long_value = PyLong_AsUnsignedLongLong(integer_object); #else long_value = PyLong_AsUnsignedLong(integer_object); #endif }} #if PY_MAJOR_VERSION < 3 if(result == 0) {{ PyErr_Clear(); result = PyObject_IsInstance(integer_object, (PyObject *) &PyInt_Type); if(result == -1) {{ pytsk_fetch_error(); return (uint64_t) -1; }} else if(result != 0) {{ PyErr_Clear(); #if defined( HAVE_LONG_LONG ) long_value = PyInt_AsUnsignedLongLongMask(integer_object); #else long_value = PyInt_AsUnsignedLongMask(integer_object); #endif }} }} #endif /* PY_MAJOR_VERSION < 3 */ if(result == 0) {{ if(PyErr_Occurred()) {{ pytsk_fetch_error(); return (uint64_t) -1; }} }} #if defined( HAVE_LONG_LONG ) #if ( SIZEOF_LONG_LONG > 8 ) if((long_value < (PY_LONG_LONG) 0) || (long_value > (PY_LONG_LONG) UINT64_MAX)) {{ #else if(long_value < (PY_LONG_LONG) 0) {{ #endif PyErr_Format(PyExc_ValueError, "Integer object value out of bounds"); return (uint64_t) -1; }} #else #if ( SIZEOF_LONG > 8 ) if((long_value < (long) 0) || (long_value > (long) UINT64_MAX)) {{ #else if(long_value < (PY_LONG_LONG) 0) {{ #endif PyErr_Format(PyExc_ValueError, "Integer object value out of bounds"); return (uint64_t) -1; }} #endif return (uint64_t) long_value; }} """.format(**values_dict) def initialise_class(self, class_name, out, done=None): if done and class_name in done: return done.add(class_name) cls = self.classes[class_name] """Write out class initialisation code into the main init function.""" if cls.is_active(): base_class = self.classes.get(cls.base_class_name) if base_class and base_class.is_active(): # We have a base class - ensure it gets written out # first: self.initialise_class(cls.base_class_name, out, done) # Now assign ourselves as derived from them out.write( " {0:s}_Type.tp_base = &{1:s}_Type;".format( cls.class_name, cls.base_class_name)) values_dict = { "name": cls.class_name} out.write(( " {name:s}_Type.tp_new = PyType_GenericNew;\n" " if (PyType_Ready(&{name:s}_Type) < 0) {{\n" " goto on_error;\n" " }}\n" " Py_IncRef((PyObject *)&{name:s}_Type);\n" " PyModule_AddObject(module, \"{name:s}\", (PyObject *)&{name:s}_Type);\n").format( **values_dict)) def write(self, out): # Write the headers if self.public_api: self.public_api.write( "#ifdef BUILDING_DLL\n" "#include \"misc.h\"\n" "#else\n" "#include \"aff4_public.h\"\n" "#endif\n") # Prepare all classes for cls in self.classes.values(): cls.prepare() out.write(( "/*************************************************************\n" " * Autogenerated module {0:s}\n" " *\n" " * This module was autogenerated from the following files:\n").format( self.name)) for filename in self.files: out.write(" * {0:s}\n".format(filename)) out.write( " *\n" " * This module implements the following classes:\n") out.write(self.get_string()) out.write( " ************************************************************/\n") out.write(self.headers) out.write(self.private_functions()) for cls in self.classes.values(): if cls.is_active(): out.write( "/******************** {0:s} ***********************/".format( cls.class_name)) cls.struct(out) cls.prototypes(out) out.write( "/*****************************************************\n" " * Implementation\n" " ****************************************************/\n" "\n") for cls in self.classes.values(): if cls.is_active(): cls.PyMethodDef(out) cls.PyGetSetDef(out) cls.code(out) cls.PyTypeObject(out) # Write the module initializer values_dict = { "module": self.name, "version": VERSION, "version_length": len(VERSION)} out.write(( "/* Retrieves the {module:s} version\n" " * Returns a Python object if successful or NULL on error\n" " */\n" "PyObject *{module:s}_get_version(PyObject *self, PyObject *arguments) {{\n" " const char *errors = NULL;\n" " return(PyUnicode_DecodeUTF8(\"{version:s}\", (Py_ssize_t) {version_length:d}, errors));\n" "}}\n" "\n" "static PyMethodDef {module:s}_module_methods[] = {{\n" " {{ \"get_version\",\n" " (PyCFunction) {module:s}_get_version,\n" " METH_NOARGS,\n" " \"get_version() -> String\\n\"\n" " \"\\n\"\n" " \"Retrieves the version.\" }},\n" "\n" " {{NULL, NULL, 0, NULL}} /* Sentinel */\n" "}};\n" "\n" "#if PY_MAJOR_VERSION >= 3\n" "\n" "/* The {module:s} module definition\n" " */\n" "PyModuleDef {module:s}_module_definition = {{\n" " PyModuleDef_HEAD_INIT,\n" "\n" " /* m_name */\n" " \"{module:s}\",\n" " /* m_doc */\n" " \"Python {module:s} module.\",\n" " /* m_size */\n" " -1,\n" " /* m_methods */\n" " {module:s}_module_methods,\n" " /* m_reload */\n" " NULL,\n" " /* m_traverse */\n" " NULL,\n" " /* m_clear */\n" " NULL,\n" " /* m_free */\n" " NULL,\n" "}};\n" "\n" "#endif /* PY_MAJOR_VERSION >= 3 */\n" "\n" "/* Initializes the {module:s} module\n" " */\n" "#if PY_MAJOR_VERSION >= 3\n" "PyMODINIT_FUNC PyInit_{module:s}(void) {{\n" "#else\n" "PyMODINIT_FUNC init{module:s}(void) {{\n" "#endif\n" " PyGILState_STATE gil_state;\n" "\n" " PyObject *module = NULL;\n" " PyObject *d = NULL;\n" " PyObject *tmp = NULL;\n" "\n" " /* Create the module\n" " * This function must be called before grabbing the GIL\n" " * otherwise the module will segfault on a version mismatch\n" " */\n" "#if PY_MAJOR_VERSION >= 3\n" " module = PyModule_Create(\n" " &{module:s}_module_definition );\n" "#else\n" " module = Py_InitModule3(\n" " \"{module:s}\",\n" " {module:s}_module_methods,\n" " \"Python {module:s} module.\" );\n" "#endif\n" " if (module == NULL) {{\n" "#if PY_MAJOR_VERSION >= 3\n" " return(NULL);\n" "#else\n" " return;\n" "#endif\n" " }}\n" " d = PyModule_GetDict(module);\n" "\n" " /* Make sure threads are enabled */\n" " PyEval_InitThreads();\n" " gil_state = PyGILState_Ensure();\n" "\n" " g_module = module;\n").format(**values_dict)) # The trick is to initialise the classes in order of their # inheritance. The following code will order initializations # according to their inheritance tree done = set() for class_name in self.classes.keys(): self.initialise_class(class_name, out, done) # Add the constants in here for constant, type in self.constants: if type == "integer": out.write( " tmp = PyLong_FromUnsignedLongLong((uint64_t) {0:s});\n".format(constant)) elif type == "string": if constant == "TSK_VERSION_STR": out.write(( "#if PY_MAJOR_VERSION >= 3\n" " tmp = PyUnicode_FromString((char *){0:s});\n" "#else\n" " tmp = PyString_FromString((char *){0:s});\n" "#endif\n").format(constant)) else: out.write(( "#if PY_MAJOR_VERSION >= 3\n" " tmp = PyBytes_FromString((char *){0:s});\n" "#else\n" " tmp = PyString_FromString((char *){0:s});\n" "#endif\n").format(constant)) else: out.write( " /* I dont know how to convert {0:s} type {1:s} */\n".format( constant, type)) continue out.write(( " PyDict_SetItemString(d, \"{0:s}\", tmp);\n" " Py_DecRef(tmp);\n").format(constant)) out.write(self.initialization()) out.write( " PyGILState_Release(gil_state);\n" "\n" "#if PY_MAJOR_VERSION >= 3\n" " return module;\n" "#else\n" " return;\n" "#endif\n" "\n" "on_error:\n" " PyGILState_Release(gil_state);\n" "\n" "#if PY_MAJOR_VERSION >= 3\n" " return NULL;\n" "#else\n" " return;\n" "#endif\n" "}\n") class Type(object): interface = None buildstr = "O" sense = "IN" error_value = "return 0;" active = True def __init__(self, name, type, *args, **kwargs): super(Type, self).__init__() self.name = name self.type = type self.attributes = set() self.additional_args = kwargs def comment(self): return "{0:s} {1:s} ".format(self.type, self.name) def get_string(self): """Retrieves a string representation.""" if self.name == "func_return": return self.type if "void" in self.type: return "" return "{0:s} : {1:s}".format(self.type, self.name) def python_name(self): return self.name def python_proxy_post_call(self): """This is called after a proxy call""" return "" def returned_python_definition(self, *arg, **kwargs): return self.definition(*arg, **kwargs) def definition(self, default=None, **kwargs): if default: return "{0:s} {1:s}={2:s};\n".format( self.type, self.name, default) elif "array_size" in self.additional_args: return ( "int array_index = 0;\n" "{0:s} UNUSED *{1:s};\n").format( self.type, self.name) else: return "{0:s} UNUSED {1:s};\n".format( self.type, self.name) def local_definition(self, default=None, **kwargs): return "" def byref(self): return "&{0:s}".format(self.name) def call_arg(self): return self.name def passthru_call(self): """Returns how we should call the function when simply passing args directly""" return self.call_arg() def pre_call(self, method, **kwargs): return "" def assign(self, call, method, target=None, **kwargs): return ( "Py_BEGIN_ALLOW_THREADS\n" "{0:s} = {1:s};\n" "Py_END_ALLOW_THREADS\n").format( target or self.name, call) def post_call(self, method): # Check for errors result = ( "if(check_error()) {\n" " goto on_error;\n" "}\n") if "DESTRUCTOR" in self.attributes: result += "self->base = NULL; //DESTRUCTOR - C object no longer valid\n" return result def from_python_object(self, source, destination, method, **kwargs): return "" def return_value(self, value): return "return {0!s};".format(value) class String(Type): interface = "string" buildstr = "s" error_value = "return NULL;" def __init__(self, name, type, *args, **kwargs): super(String, self).__init__(name, type, *args, **kwargs) self.length = "strlen({0:s})".format(name) def byref(self): return "&{0:s}".format(self.name) def to_python_object(self, name=None, result="Py_result", **kwargs): values_dict = { "length": self.length, "name": name or self.name, "result": result} result = ( " PyErr_Clear();\n" "\n" " if(!{name:s}) {{\n" " Py_IncRef(Py_None);\n" " {result:s} = Py_None;\n" " }} else {{\n" "#if PY_MAJOR_VERSION >= 3\n" " {result:s} = PyBytes_FromStringAndSize((char *){name:s}, {length:s});\n" "#else\n" " {result:s} = PyString_FromStringAndSize((char *){name:s}, {length:s});\n" "#endif\n" " if(!{result:s}) {{\n" " goto on_error;\n" " }}\n" " }}\n").format(**values_dict) if "BORROWED" not in self.attributes and "BORROWED" not in kwargs: result += "talloc_unlink(NULL, {0:s});\n".format(name) return result def from_python_object(self, source, destination, method, context="NULL"): method.error_set = True values_dict = { "context": context, "destination": destination, "source": source} return ( "{{\n" " char *buff = NULL;\n" " Py_ssize_t length = 0;\n" "\n" " PyErr_Clear();\n" "\n" "#if PY_MAJOR_VERSION >= 3\n" " if(PyBytes_AsStringAndSize({source:s}, &buff, &length) == -1) {{\n" "#else\n" " if(PyString_AsStringAndSize({source:s}, &buff, &length) == -1) {{\n" "#endif\n" " goto on_error;\n" " }}\n" " {destination:s} = talloc_size({context:s}, length + 1);\n" " memcpy({destination:s}, buff, length);\n" " {destination:s}[length] = 0;\n" "}};\n").format(**values_dict) class ZString(String): interface = "null_terminated_string" class BorrowedString(String): def to_python_object(self, name=None, result="Py_result", **kwargs): values_dict = { "length": self.length, "name": name or self.name, "result": result} return ( " PyErr_Clear();\n" "#if PY_MAJOR_VERSION >= 3\n" " {result:s} = PyBytes_FromStringAndSize((char *){name:s}, {length:s});\n" "#else\n" " {result:s} = PyString_FromStringAndSize((char *){name:s}, {length:s});\n" "#endif\n").format(**values_dict) class Char_and_Length(Type): interface = "char_and_length" buildstr = "s#" error_value = "return NULL;" def __init__(self, data, data_type, length, length_type, *args, **kwargs): super(Char_and_Length, self).__init__(data, data_type, *args, **kwargs) self.name = data self.data_type = data_type self.length = length self.length_type = length_type def comment(self): return "{0:s} {1:s}, {2:s} {3:s}".format( self.data_type, self.name, self.length_type, self.length) def definition(self, default="\"\"", **kwargs): return ( "char *{0:s}={1:s};\n" "Py_ssize_t {2:s}=strlen({3:s});\n").format( self.name, default, self.length, default) def byref(self): return "&{0:s}, &{1:s}".format(self.name, self.length) def call_arg(self): return "({0:s}){1:s}, ({2:s}){3:s}".format( self.data_type, self.name, self.length_type, self.length) def to_python_object(self, name=None, result="Py_result", **kwargs): values_dict = { "length": self.length, "name": self.name, "result": result} return ( " PyErr_Clear();\n" "#if PY_MAJOR_VERSION >= 3\n" " {result:s} = PyBytes_FromStringAndSize((char *){name:s}, {length:s});\n" "#else\n" " {result:s} = PyString_FromStringAndSize((char *){name:s}, {length:s});\n" "#endif\n" "\n" " if(!{result:s}) {{\n" " goto on_error;\n" " }}\n").format(**values_dict) class Integer(Type): interface = "integer" buildstr = "i" int_type = "int" def __init__(self, name, type, *args, **kwargs): super(Integer, self).__init__(name, type, *args, **kwargs) self.type = self.int_type self.original_type = type def to_python_object(self, name=None, result="Py_result", **kwargs): values_dict = { "name": name or self.name, "result": result} return ( " PyErr_Clear();\n" "#if PY_MAJOR_VERSION >= 3\n" " {result:s} = PyLong_FromLong({name:s});\n" "#else\n" " {result:s} = PyInt_FromLong({name:s});\n" "#endif\n").format(**values_dict) def from_python_object(self, source, destination, method, **kwargs): values_dict = { "destination": destination, "source": source} return ( " PyErr_Clear();\n" "#if PY_MAJOR_VERSION >= 3\n" " {destination:s} = PyLong_AsLongMask({source:s});\n" "#else\n" " {destination:s} = PyInt_AsLongMask({source:s});\n" "#endif\n").format(**values_dict) def comment(self): return "{0:s} {1:s} ".format(self.original_type, self.name) class IntegerUnsigned(Integer): buildstr = "I" int_type = "unsigned int" def to_python_object(self, name=None, result="Py_result", **kwargs): if "array_size" in self.additional_args: values_dict = { "name": name or self.name, "result": result, "array_size": self.additional_args["array_size"] } return ( " PyErr_Clear();\n" " {result:s} = PyList_New(0);\n" " for(array_index = 0; array_index < {array_size:s}; array_index++) {{\n" "#if PY_MAJOR_VERSION >= 3\n" " PyList_Append({result:s}, PyLong_FromLong((long) {name:s}[array_index]));\n" "#else\n" " PyList_Append({result:s}, PyInt_FromLong((long) {name:s}[array_index]));\n" "#endif\n" " }}\n" ).format(**values_dict) else: values_dict = { "name": name or self.name, "result": result} return ( " PyErr_Clear();\n" "#if PY_MAJOR_VERSION >= 3\n" " {result:s} = PyLong_FromLong((long) {name:s});\n" "#else\n" " {result:s} = PyInt_FromLong((long) {name:s});\n" "#endif\n").format(**values_dict) def from_python_object(self, source, destination, method, **kwargs): values_dict = { "destination": destination, "source": source} return ( " PyErr_Clear();\n" "#if PY_MAJOR_VERSION >= 3\n" " {destination:s} = PyLong_AsUnsignedLongMask({source:s});\n" "#else\n" " {destination:s} = PyInt_AsUnsignedLongMask({source:s});\n" "#endif\n").format(**values_dict) class Integer8(Integer): int_type = "int8_t" class Integer8Unsigned(IntegerUnsigned): int_type = "uint8_t" class Integer16(Integer): int_type = "int16_t" class Integer16Unsigned(IntegerUnsigned): int_type = "uint16_t" class Integer32(Integer): int_type = "int32_t" class Integer32Unsigned(IntegerUnsigned): int_type = "uint32_t" class Integer64(Integer): buildstr = "L" int_type = "int64_t" def to_python_object(self, name=None, result="Py_result", **kwargs): values_dict = { "name": name or self.name, "result": result} return ( " PyErr_Clear();\n" "#if defined( HAVE_LONG_LONG )\n" " {result:s} = PyLong_FromLongLong({name:s});\n" "#else\n" " {result:s} = PyLong_FromLong({name:s});\n" "#endif\n").format(**values_dict) def from_python_object(self, source, destination, method, **kwargs): values_dict = { "destination": destination, "source": source} return ( " PyErr_Clear();\n" "#if PY_MAJOR_VERSION >= 3\n" "#if defined( HAVE_LONG_LONG )\n" " {destination:s} = PyLong_AsLongLongMask({source:s});\n" "#else\n" " {destination:s} = PyLong_AsLongMask({source:s});\n" "#endif\n" "#else\n" "#if defined( HAVE_LONG_LONG )\n" " {destination:s} = PyInt_AsLongLongMask({source:s});\n" "#else\n" " {destination:s} = PyInt_AsLongMask({source:s});\n" "#endif\n" "#endif /* PY_MAJOR_VERSION >= 3 */\n").format(**values_dict) class Integer64Unsigned(Integer): buildstr = "K" int_type = "uint64_t" def to_python_object(self, name=None, result="Py_result", **kwargs): values_dict = { "name": name or self.name, "result": result} return ( " PyErr_Clear();\n" "#if defined( HAVE_LONG_LONG )\n" " {result:s} = PyLong_FromUnsignedLongLong({name:s});\n" "#else\n" " {result:s} = PyLong_FromUnsignedLong({name:s});\n" "#endif\n").format(**values_dict) def from_python_object(self, source, destination, method, **kwargs): values_dict = { "destination": destination, "source": source} # TODO: use integer_object_copy_to_uint64 instead to support both # long and int objects. return ( " PyErr_Clear();\n" "#if PY_MAJOR_VERSION >= 3\n" "#if defined( HAVE_LONG_LONG )\n" " {destination:s} = PyLong_AsUnsignedLongLongMask({source:s});\n" "#else\n" " {destination:s} = PyLong_AsUnsignedLongMask({source:s});\n" "#endif\n" "#else\n" "#if defined( HAVE_LONG_LONG )\n" " {destination:s} = PyInt_AsUnsignedLongLongMask({source:s});\n" "#else\n" " {destination:s} = PyInt_AsUnsignedLongMask({source:s});\n" "#endif\n" "#endif /* PY_MAJOR_VERSION >= 3 */\n").format(**values_dict) class Long(Integer): buildstr = "l" int_type = "long" def to_python_object(self, name=None, result="Py_result", **kwargs): values_dict = { "name": name or self.name, "result": result} return ( "PyErr_Clear();\n" "{result:s} = PyLong_FromLongLong({name:s});\n").format( **values_dict) def from_python_object(self, source, destination, method, **kwargs): values_dict = { "destination": destination, "source": source} return ( "PyErr_Clear();\n" "{destination:s} = PyLong_AsLongMask({source:s});\n").format( **values_dict) class LongUnsigned(Integer): buildstr = "k" int_type = "unsigned long" def to_python_object(self, name=None, result="Py_result", **kwargs): values_dict = { "name": name or self.name, "result": result} return ( "PyErr_Clear();\n" "{result:s} = PyLong_FromUnsignedLong({name:s});\n").format( **values_dict) def from_python_object(self, source, destination, method, **kwargs): values_dict = { "destination": destination, "source": source} return ( "PyErr_Clear();\n" "{destination:s} = PyLong_AsUnsignedLongMask({source:s});\n").format( **values_dict) class Char(Integer): buildstr = "s" interface = "small_integer" def to_python_object(self, name=None, result="Py_result", **kwargs): # We really want to return a string here values_dict = { "name": name or self.name, "result": result} return ( "{{\n" " char *str_{name:s} = &{name:s};\n" "\n" " PyErr_Clear();\n" "#if PY_MAJOR_VERSION >= 3\n" " {result:s} = PyBytes_FromStringAndSize(str_{name:s}, 1);\n" "#else\n" " {result:s} = PyString_FromStringAndSize(str_{name:s}, 1);\n" "#endif\n" "\n" " if(!{result:s}) {{\n" " goto on_error;\n" "}}\n").format(**values_dict) def definition(self, default="\"\\x0\"", **kwargs): # Shut up unused warnings return ( "char {0:s} UNUSED=0;\n" "char *str_{0:s} UNUSED = {1:s};\n").format( self.name, default) def byref(self): return "&str_{0:s}".format(self.name) def pre_call(self, method, **kwargs): method.error_set = True values_dict = { "name": self.name} return ( " if(strlen(str_{name:s}) != 1) {\n" " PyErr_Format(PyExc_RuntimeError, \"You must only provide a single character for arg {name:s}\");\n" " goto on_error;\n" " }\n" "\n" " {name:s} = str_{name:s}[0];\n").format( **values_dict) class StringOut(String): sense = "OUT" class IntegerOut(Integer): """Handle Integers pushed out through OUT int *result.""" sense = "OUT_DONE" buildstr = "" int_type = "int *" def definition(self, default=0, **kwargs): # We need to make static storage for the pointers storage = "storage_{0:s}".format(self.name) bare_type = self.type.split()[0] type_definition = Type.definition( self, "&{0:s}".format(storage)) return ( "{0:s} {1:s} = 0;\n" "{2:s}\n").format( bare_type, storage, type_definition) def to_python_object(self, name=None, result="Py_result", **kwargs): values_dict = { "name": name or self.name, "result": result} return ( "PyErr_Clear();\n" "{result:s} = PyLong_FromLongLong(*{name:s});\n").format( **values_dict) def python_name(self): return None def byref(self): return self.name def call_arg(self): return "{0:s}".format(self.name) def passthru_call(self): return self.name class PInteger32UnsignedOut(IntegerOut): buildstr = "" int_type = "uint32_t *" class PInteger64UnsignedOut(IntegerOut): buildstr = "" int_type = "uint64_t *" class Char_and_Length_OUT(Char_and_Length): sense = "OUT_DONE" buildstr = "l" def definition(self, default=0, **kwargs): values_dict = { "default": default, "length": self.length, "name": self.name} return ( " char *{name:s} = NULL;\n" " Py_ssize_t {length:s} = {default:d};\n" " PyObject *tmp_{name:s} = NULL;\n").format( **values_dict) def error_cleanup(self): values_dict = { "name": self.name} return ( " if(tmp_{name:s} != NULL) {{\n" " Py_DecRef(tmp_{name:s});\n" " }}\n").format(**values_dict) def python_name(self): return self.length def byref(self): return "&{0:s}".format(self.length) def pre_call(self, method, **kwargs): values_dict = { "length": self.length, "name": self.name} return ( " PyErr_Clear();\n" "\n" "#if PY_MAJOR_VERSION >= 3\n" " tmp_{name:s} = PyBytes_FromStringAndSize(NULL, {length:s});\n" "#else\n" " tmp_{name:s} = PyString_FromStringAndSize(NULL, {length:s});\n" "#endif\n" " if(!tmp_{name:s}) {{\n" " goto on_error;\n" " }}\n" "\n" "#if PY_MAJOR_VERSION >= 3\n" " PyBytes_AsStringAndSize(tmp_{name:s}, &{name:s}, (Py_ssize_t *)&{length:s});\n" "#else\n" " PyString_AsStringAndSize(tmp_{name:s}, &{name:s}, (Py_ssize_t *)&{length:s});\n" "#endif\n").format(**values_dict) def to_python_object(self, name=None, result="Py_result", sense="in", **kwargs): if "results" in kwargs: kwargs["results"].pop(0) if sense == "proxied": return "py_{0:s} = PyLong_FromLong({1:s});\n".format( self.name, self.length) values_dict = { "length": self.length, "name": name or self.name, "result": result} return ( " /* NOTE - this should never happen\n" " * it might indicate an overflow condition.\n" " */\n" " if(func_return > {length:s}) {{\n" " printf(\"Programming Error - possible overflow!!\\n\");\n" " abort();\n" "\n" " // Do we need to truncate the buffer for a short read?\n" " }} else if(func_return < {length:s}) {{\n" "#if PY_MAJOR_VERSION >= 3\n" " _PyBytes_Resize(&tmp_{name:s}, (Py_ssize_t)func_return);\n" "#else\n" " _PyString_Resize(&tmp_{name:s}, (Py_ssize_t)func_return);\n" "#endif\n" " }}\n" "\n" " {result:s} = tmp_{name:s};\n").format(**values_dict) def python_proxy_post_call(self, result="Py_result"): values_dict = { "name": self.name, "result": result} return ( "{{\n" " char *tmp_buff = NULL;\n" " Py_ssize_t tmp_len = 0;\n" "\n" "#if PY_MAJOR_VERSION >= 3\n" " if(PyBytes_AsStringAndSize({result:s}, &tmp_buff, &tmp_len) == -1) {{\n" "#else\n" " if(PyString_AsStringAndSize({result:s}, &tmp_buff, &tmp_len) == -1) {{\n" "#endif\n" " goto on_error;\n" " }}\n" " memcpy({name:s}, tmp_buff, tmp_len);\n" " Py_DecRef({result:s});\n" " {result:s} = PyLong_FromLong(tmp_len);\n" "}}\n").format(**values_dict) class TDB_DATA_P(Char_and_Length_OUT): bare_type = "TDB_DATA" def __init__(self, name, type, *args, **kwargs): super(TDB_DATA_P, self).__init__(name, type, *args, **kwargs) def definition(self, default=None, **kwargs): return Type.definition(self) def byref(self): return "{0:s}.dptr, &{0:s}.dsize".format(self.name) def pre_call(self, method, **kwargs): return "" def call_arg(self): return Type.call_arg(self) def to_python_object(self, name=None, result="Py_result", **kwargs): values_dict = { "name": name or self.name, "result": result} return ( " PyErr_Clear();\n" "#if PY_MAJOR_VERSION >= 3\n" " {result:s} = PyBytes_FromStringAndSize((char *){name:s}->dptr, {name:s}->dsize);\n" "#else\n" " {result:s} = PyString_FromStringAndSize((char *){name:s}->dptr, {name:s}->dsize);\n" "#endif\n" " talloc_free({name:s});\n").format(**values_dict) def from_python_object(self, source, destination, method, **kwargs): method.error_set = True values_dict = { "bare_type": self.bare_type, "destination": destination, "source": source} return ( "{destination:s} = talloc_zero(self, {bare_type:s});\n" "{{\n" " char *buf = NULL;\n" " Py_ssize_t tmp = 0;\n" "\n" " PyErr_Clear();\n" "\n" "#if PY_MAJOR_VERSION >= 3\n" " if(PyBytes_AsStringAndSize({source:s}, &buf, &tmp) == -1) {{\n" "#else\n" " if(PyString_AsStringAndSize({source:s}, &buf, &tmp) == -1) {{\n" "#endif\n" " goto on_error;\n" " }}\n" "\n" " // Take a copy of the Python string\n" " {destination:s}->dptr = talloc_memdup({destination:s}, buf, tmp);\n" " {destination:s}->dsize = tmp;\n" "}}\n" "// We no longer need the Python object\n" "Py_DecRef({source:s});\n").format(**values_dict) class TDB_DATA(TDB_DATA_P): error_value = ( "{result:s}.dptr = NULL;\n" "return {result:s};") def from_python_object(self, source, destination, method, **kwargs): method.error_set = True values_dict = { "destination": destination, "source": source} return ( "{{\n" " char *buf = NULL;\n" " Py_ssize_t tmp = 0;\n" "\n" " PyErr_Clear();\n" "\n" "#if PY_MAJOR_VERSION >= 3\n" " if(PyBytes_AsStringAndSize({source:s}, &buf, &tmp) == -1) {{\n" "#else\n" " if(PyString_AsStringAndSize({source:s}, &buf, &tmp) == -1) {{\n" "#endif\n" " goto on_error;\n" " }}\n" " // Take a copy of the Python string - This leaks - how to fix it?\n" " {destination:s}.dptr = talloc_memdup(NULL, buf, tmp);\n" " {destination:s}.dsize = tmp;\n" "}}\n" "// We no longer need the Python object\n" "Py_DecRef({source:s});\n").format(**values_dict) def to_python_object(self, name=None, result="Py_result", **kwargs): values_dict = { "name": name or self.name, "result": result} return ( " PyErr_Clear();\n" "#if PY_MAJOR_VERSION >= 3\n" " {result:s} = PyBytes_FromStringAndSize((char *){name:s}.dptr, {name:s}.dsize);\n" "#else\n" " {result:s} = PyString_FromStringAndSize((char *){name:s}.dptr, {name:s}.dsize);\n" "#endif\n").format(**values_dict) class Void(Type): buildstr = "" error_value = "return;" original_type = "" def __init__(self, name, type="void", *args, **kwargs): super(Void, self).__init__(name, type, *args, **kwargs) def comment(self): return "void *ctx" def definition(self, default=None, **kwargs): return "" def to_python_object(self, name=None, result="Py_result", **kwargs): return ( "Py_IncRef(Py_None);\n" "Py_result = Py_None;\n") def call_arg(self): return "NULL" def byref(self): return None def assign(self, call, method, target=None, **kwargs): # We don't assign the result to anything. return ( " Py_BEGIN_ALLOW_THREADS\n" " (void) {0:s};\n" " Py_END_ALLOW_THREADS\n").format(call) def return_value(self, value): return "return;" class PVoid(Void): def __init__(self, name, type="void *", *args, **kwargs): super(PVoid, self).__init__(name, type, *args, **kwargs) class StringArray(String): interface = "array" buildstr = "O" def definition(self, default="\"\"", **kwargs): return ( "char **{0:s} = NULL;\n" "PyObject *py_{0:s} = NULL;\n").format(self.name) def byref(self): return "&py_{0:s}".format(self.name) def from_python_object(self, source, destination, method, context="NULL"): method.error_set = True values_dict = { "destination": destination, "source": source} return ( "{{\n" " Py_ssize_t i = 0;\n" " Py_ssize_t size = 0;\n" "\n" " if({source:s}) {{\n" " if(!PySequence_Check({source:s})) {{\n" " PyErr_Format(PyExc_ValueError, \"{destination:s} must be a sequence\");\n" " goto on_error;\n" " }}\n" " size = PySequence_Size({source:s});\n" " }}\n" " {destination:s} = talloc_zero_array(NULL, char *, size + 1);\n" "\n" " for(i = 0; i < size; i++) {{\n" " PyObject *tmp = PySequence_GetItem({source:s}, i);\n" " if(!tmp) {{\n" " goto on_error;\n" " }}\n" "#if PY_MAJOR_VERSION >= 3\n" " {destination:s}[i] = PyBytes_AsString(tmp);\n" "#else\n" " {destination:s}[i] = PyString_AsString(tmp);\n" "#endif\n" "\n" " if(!{destination:s}[i]) {{\n" " Py_DecRef(tmp);\n" " goto on_error;\n" " }}\n" " Py_DecRef(tmp);\n" " }}\n" "}}\n").format(**values_dict) def pre_call(self, method, **kwargs): return self.from_python_object( "py_{0:s}".format(self.name), self.name, method) def error_condition(self): return ( " if({0:s}) {{\n" " talloc_free({0:s});\n" " }}\n").format(self.name) class Wrapper(Type): """This class represents a wrapped C type """ sense = "IN" error_value = "return NULL;" def from_python_object(self, source, destination, method, **kwargs): values_dict = { "destination": destination, "source": source, "type": self.type} return ( " /* First check that the returned value is in fact a Wrapper */\n" " if(!type_check({source:s}, &{type:s}_Type)) {{\n" " PyErr_Format(PyExc_RuntimeError, \"function must return an {type:s} instance\");\n" " goto on_error;\n" " }}\n" "\n" " {destination:s} = ((Gen_wrapper) {source:s})->base;\n" "\n" " if(!{destination:s}) {{\n" " PyErr_Format(PyExc_RuntimeError, \"{type:s} instance is no longer valid (was it gc'ed?)\");\n" " goto on_error;\n" "}}\n" "\n").format(**values_dict) def to_python_object(self, **kwargs): return "" def returned_python_definition(self, default="NULL", sense="in", **kwargs): return "{0:s} {1:s} = {2:s};\n".format( self.type, self.name, default) def byref(self): return "&wrapped_{0:s}".format(self.name) def definition(self, default="NULL", sense="in", **kwargs): result = " Gen_wrapper wrapped_{0:s} UNUSED = {1:s};\n".format( self.name, default) if sense == "in" and not "OUT" in self.attributes: result += " {0:s} UNUSED {1:s};\n".format( self.type, self.name) return result def call_arg(self): return "{0:s}".format(self.name) def pre_call(self, method, python_object_index=1, **kwargs): if "OUT" in self.attributes or self.sense == "OUT": return "" self.original_type = self.type.split()[0] values_dict = { "name": self.name, "original_type": self.original_type, "python_object_index": python_object_index} return ( " if(wrapped_{name:s} == NULL || (PyObject *)wrapped_{name:s} == Py_None) {{\n" " {name:s} = NULL;\n" " }} else if(!type_check((PyObject *)wrapped_{name:s},&{original_type:s}_Type)) {{\n" " PyErr_Format(PyExc_RuntimeError, \"{name:s} must be derived from type {original_type:s}\");\n" " goto on_error;\n" " }} else if(wrapped_{name:s}->base == NULL) {{\n" " PyErr_Format(PyExc_RuntimeError, \"{original_type:s} instance is no longer valid (was it gc'ed?)\");\n" " goto on_error;\n" " }} else {{\n" " {name:s} = wrapped_{name:s}->base;\n" " if(self->python_object{python_object_index:d} == NULL) {{\n" " self->python_object{python_object_index:d} = (PyObject *) wrapped_{name:s};\n" " Py_IncRef(self->python_object{python_object_index:d});\n" " }}\n" " }}\n").format(**values_dict) def assign(self, call, method, target=None, **kwargs): method.error_set = True; values_dict = { "call": call.strip(), "incref": INCREF, "name": target or self.name, "type": self.type} result = ( " {{\n" " Object returned_object = NULL;\n" "\n" " ClearError();\n" "\n" " Py_BEGIN_ALLOW_THREADS\n" " // This call will return a Python object if the base is a proxied Python object\n" " // or a talloc managed object otherwise.\n" " returned_object = (Object) {call:s};\n" " Py_END_ALLOW_THREADS\n" "\n" " if(check_error()) {{\n" " if(returned_object != NULL) {{\n" " if(self->base_is_python_object != 0) {{\n" " Py_DecRef((PyObject *) returned_object);\n" " }} else if(self->base_is_internal != 0) {{\n" " talloc_free(returned_object);\n" " }}\n" " }}\n" " goto on_error;\n" " }}\n").format(**values_dict) # Is NULL an acceptable return type? In some Python code NULL # can be returned (e.g. in iterators) but usually it should # be converted to Py_None. if "NULL_OK" in self.attributes: result += ( " if(returned_object == NULL) {\n" " goto on_error;\n" " }\n") result += ( " wrapped_{name:s} = new_class_wrapper(returned_object, self->base_is_python_object);\n" "\n" " if(wrapped_{name:s} == NULL) {{\n" " if(returned_object != NULL) {{\n" " if(self->base_is_python_object != 0) {{\n" " Py_DecRef((PyObject *) returned_object);\n" " }} else if(self->base_is_internal != 0) {{\n" " talloc_free(returned_object);\n" " }}\n" " }}\n" " goto on_error;\n" " }}\n").format(**values_dict) if "BORROWED" in self.attributes: result += ( " #error unchecked BORROWED code segment\n" " {incref:s}(wrapped_{name:s}->base);\n" " if(((Object) wrapped_{name:s}->base)->extension) {{\n" " Py_IncRef((PyObject *) ((Object) wrapped_{name:s}->base)->extension);\n" " }}\n").format(**values_dict) result += ( " }\n") return result def to_python_object( self, name=None, result="Py_result", sense="in", **kwargs): values_dict = { "name": name or self.name, "result": result} if sense == "proxied": return ( "{result:s} = (PyObject *) new_class_wrapper((Object){name:s}, 0);\n").format( **values_dict) return "{result:s} = (PyObject *) wrapped_{name:s};\n".format( **values_dict) class PointerWrapper(Wrapper): """ A pointer to a wrapped class """ def __init__(self, name, type, *args, **kwargs): type = type.split()[0] super(PointerWrapper, self).__init__(name, type, *args, **kwargs) def comment(self): return "{0:s} *{1:s}".format(self.type, self.name) def definition(self, default="NULL", sense="in", **kwargs): result = "Gen_wrapper wrapped_{0:s} = {1:s};".format( self.name, default) if sense == "in" and not "OUT" in self.attributes: result += " {0:s} *{1:s};\n".format(self.type, self.name) return result def byref(self): return "&wrapped_{0:s}".format(self.name) def pre_call(self, method, **kwargs): if "OUT" in self.attributes or self.sense == "OUT": return "" self.original_type = self.type.split()[0] values_dict = { "name": self.name, "original_type": self.original_type} return ( "if(!wrapped_{name:s} || (PyObject *)wrapped_{name:s}==Py_None) {{\n" " {name:s} = NULL;\n" "}} else if(!type_check((PyObject *)wrapped_{name:s},&{original_type:s}_Type)) {{\n" " PyErr_Format(PyExc_RuntimeError, \"{name:s} must be derived from type {original_type:s}\");\n" " goto on_error;\n" "}} else {{\n" " {name:s} = ({original_type:s} *)&wrapped_{name:s}->base;\n" "}};\n").format(**values_dict) class StructWrapper(Wrapper): """ A wrapper for struct classes """ active = False def __init__(self, name, type, *args, **kwargs): super(StructWrapper, self).__init__(name, type, *args, **kwargs) self.original_type = type.split()[0] def assign(self, call, method, target=None, borrowed=True, **kwargs): self.original_type = self.type.split()[0] values_dict = { "call": call.strip(), "name": target or self.name, "type": self.original_type} result = ( "\n" " PyErr_Clear();\n" "\n" " wrapped_{name:s} = (Gen_wrapper) PyObject_New(py{type:s}, &{type:s}_Type);\n" "\n").format(**values_dict) if borrowed: result += ( " // Base is borrowed from another object.\n" " wrapped_{name:s}->base = {call:s};\n" " wrapped_{name:s}->base_is_python_object = 0;\n" " wrapped_{name:s}->base_is_internal = 0;\n" " wrapped_{name:s}->python_object1 = NULL;\n" " wrapped_{name:s}->python_object2 = NULL;\n" "\n").format(**values_dict) else: result += ( " wrapped_{name:s}->base = {call:s};\n" " wrapped_{name:s}->base_is_python_object = 0;\n" " wrapped_{name:s}->base_is_internal = 1;\n" " wrapped_{name:s}->python_object1 = NULL;\n" " wrapped_{name:s}->python_object2 = NULL;\n" "\n").format(**values_dict) if "NULL_OK" in self.attributes: result += ( " if(wrapped_{name:s}->base == NULL) {{\n" " Py_DecRef((PyObject *) wrapped_{name:s});\n" " return NULL;\n" " }}\n").format(**values_dict) result += ( " // A NULL object gets translated to a None\n" " if(wrapped_{name:s}->base == NULL) {{\n" " Py_DecRef((PyObject *) wrapped_{name:s});\n" " Py_IncRef(Py_None);\n" " wrapped_{name:s} = (Gen_wrapper) Py_None;\n" " }}\n").format(**values_dict) # TODO: with the following code commented out is makes no sense to have the else clause here. # " }} else {{\n").format(**values_dict) # if "FOREIGN" in self.attributes: # result += "// Not taking references to foreign memory\n" # elif "BORROWED" in self.attributes: # result += "talloc_reference({name:s}->ctx, {name:s}->base);\n".format(**values_dict) # else: # result += "talloc_steal({name:s}->ctx, {name:s}->base);\n".format(**values_dict) # result += "}}\n" return result def byref(self): return "&{0:s}".format(self.name) def definition(self, default="NULL", sense="in", **kwargs): result = "Gen_wrapper wrapped_{0:s} = {1:s};".format( self.name, default) if sense == "in" and not "OUT" in self.attributes: result += " {0:s} *{1:s} = NULL;\n".format( self.original_type, self.name) return result; class PointerStructWrapper(StructWrapper): def from_python_object(self, source, destination, method, **kwargs): return "{0:s} = ((Gen_wrapper) {1:s})->base;\n".format( destination, source) def byref(self): return "&wrapped_{0:s}".format(self.name) class Timeval(Type): """Handle struct timeval values.""" interface = "numeric" buildstr = "f" def definition(self, default=None, **kwargs): return ( "struct timeval {0:s};\n".format(self.name) + self.local_definition(default, **kwargs)) def local_definition(self, default=None, **kwargs): return "float {0:s}_flt;\n".format(self.name) def byref(self): return "&{0:s}_flt".format(self.name) def pre_call(self, method, **kwargs): return ( "{0:s}.tv_sec = (int){0:s}_flt;\n" "{0:s}.tv_usec = ({0:s}_flt - {0:s}.tv_sec) * 1e6;\n").format( self.name) def to_python_object(self, name=None, result="Py_result", **kwargs): values_dict = { "name": name or self.name, "result": result} return ( "{name:s}_flt = (double)({name:s}.tv_sec) + {name:s}.tv_usec;\n" "{result:s} = PyFloat_FromDouble({name:s}_flt);\n").format( **values_dict) class PyObject(Type): """Accept an opaque Python object.""" interface = "opaque" buildstr = "O" def definition(self, default="NULL", **kwargs): self.default = default values_dict = { "default": self.default, "name": self.name} return ( "PyObject *{name:s} = {default:s};\n").format( **values_dict) def byref(self): return "&{0:s}".format(self.name) type_dispatcher = { "IN unsigned char *": String, "IN char *": String, "unsigned char *": String, "char *": String, "ZString": ZString, "OUT unsigned char *": StringOut, "OUT char *": StringOut, "OUT uint64_t *": PInteger64UnsignedOut, "OUT uint32_t *": PInteger32UnsignedOut, "void *": PVoid, "void": Void, "TDB_DATA *": TDB_DATA_P, "TDB_DATA": TDB_DATA, "TSK_INUM_T": Integer, "off_t": Integer64, "size_t": Integer64Unsigned, "ssize_t": Integer64, "time_t": Integer64, "unsigned long": LongUnsigned, "long": Long, "unsigned long int": LongUnsigned, "long int": Integer, "unsigned int": Integer, "int": Integer, "uint64_t": Integer64Unsigned, "uint32_t": Integer32Unsigned, "uint16_t": Integer16Unsigned, "uint8_t": Integer8Unsigned, "int64_t": Integer64, "int32_t": Integer32, "int16_t": Integer16, "int8_t": Integer8, "char": Char, "struct timeval": Timeval, "char **": StringArray, "PyObject *": PyObject, } method_attributes = ["BORROWED", "DESTRUCTOR", "IGNORE"] class ResultException(object): value = 0 exception = "PyExc_IOError" def __init__(self, check, exception, message): self.check = check self.exception = exception self.message = message def write(self, out): out.write(( "\n" "/* Handle exceptions */\n" "if({0:s}) {{\n" " PyErr_Format(PyExc_{1:s}, {2:s});\n" " goto on_error;\n" "}}\n" "\n").format(self.check, self.exception, self.message)) class Method(object): default_re = re.compile("DEFAULT\(([A-Z_a-z0-9]+)\) =(.+);") exception_re = re.compile("RAISES\(([^,]+),\s*([^\)]+)\) =(.+);") typedefed_re = re.compile(r"struct (.+)_t \*") def __init__( self, class_name, base_class_name, name, args, return_type, myclass=None): if not isinstance(myclass, ClassGenerator): raise RuntimeError("myclass must be a class generator") self.args = [] self.base_class_name = base_class_name self.class_name = class_name self.defaults = {} self.definition_class_name = class_name self.docstring = "" self.error_set = False self.exception = None self.name = name self.myclass = myclass for type, name in args: self.add_arg(type, name) try: self.return_type = dispatch("func_return", return_type) self.return_type.attributes.add("OUT") self.return_type.original_type = return_type except KeyError: # Is it a wrapped type? if return_type: log("Unable to handle return type {0:s}.{1:s} {2:s}".format( self.class_name, self.name, return_type)) # pdb.set_trace() self.return_type = PVoid("func_return") def get_string(self): """Retrieves a string representation.""" return "def {0:s} {1:s}({2:s}):".format( self.return_type.get_string(), self.name, " , ".join([a.get_string() for a in self.args])) def clone(self, new_class_name): self.find_optional_vars() result = self.__class__( new_class_name, self.base_class_name, self.name, [], "void *", myclass=self.myclass) result.args = self.args result.return_type = self.return_type result.definition_class_name = self.definition_class_name result.defaults = self.defaults result.exception = self.exception return result def find_optional_vars(self): for line in self.docstring.splitlines(): m = self.default_re.search(line) if m: name = m.group(1) value = m.group(2) log("Setting default value for {0:s} of {1:s}".format( m.group(1), m.group(2))) self.defaults[name] = value m = self.exception_re.search(line) if m: self.exception = ResultException( m.group(1), m.group(2), m.group(3)) def write_local_vars(self, out): self.find_optional_vars() # We do it in two passes - first mandatory then optional kwlist = " static char *kwlist[] = {" # Mandatory for type in self.args: python_name = type.python_name() if python_name and python_name not in self.defaults: kwlist += "\"{0:s}\",".format(python_name) for type in self.args: python_name = type.python_name() if python_name and python_name in self.defaults: kwlist += "\"{0:s}\",".format(python_name) kwlist += " NULL};\n" for type in self.args: out.write( " // DEBUG: local arg type: {0:s}\n".format( type.__class__.__name__)) python_name = type.python_name() try: out.write(type.definition(default=self.defaults[python_name])) except KeyError: out.write(type.definition()) # Make up the format string for the parse args in two pases parse_line = "" for type in self.args: python_name = type.python_name() if type.buildstr and python_name not in self.defaults: parse_line += type.buildstr optional_args = "" for type in self.args: python_name = type.python_name() if type.buildstr and python_name in self.defaults: optional_args += type.buildstr if optional_args: parse_line += "|" + optional_args # Iterators have a different prototype and do not need to # unpack any args if not "iternext" in self.name: # Now parse the args from Python objects out.write("\n") out.write(kwlist) out.write(( "\n" " if(!PyArg_ParseTupleAndKeywords(args, kwds, \"{0:s}\", ").format( parse_line)) tmp = ["kwlist"] for type in self.args: ref = type.byref() if ref: tmp.append(ref) out.write(",".join(tmp)) self.error_set = True out.write( ")) {\n" " goto on_error;\n" " }\n") def error_condition(self): result = "" if "DESTRUCTOR" in self.return_type.attributes: result += "self->base = NULL;\n" if hasattr(self, "args"): for type in self.args: if hasattr(type, "error_cleanup"): result += type.error_cleanup() result += " return NULL;\n"; return result def write_definition(self, out): out.write( "\n" "/********************************************************\n" "Autogenerated wrapper for function:\n") out.write(self.comment()) out.write("********************************************************/\n") self._prototype(out) out.write(( "{{\n" " PyObject *returned_result = NULL;\n" " PyObject *Py_result = NULL;\n" "\n" " // DEBUG: return type: {0:s}\n" " ").format( self.return_type.__class__.__name__)) out.write(self.return_type.definition()) self.write_local_vars(out) values_dict = { "class_name": self.class_name, "method": self.name} out.write(( "\n" " // Make sure that we have something valid to wrap\n" " if(self->base == NULL) {{\n" " return PyErr_Format(PyExc_RuntimeError, \"{class_name:s} object no longer valid\");\n" " }}\n" "\n").format(**values_dict)) # Precall preparations out.write(" // Precall preparations\n") out.write(self.return_type.pre_call(self)) for type in self.args: out.write(type.pre_call(self)) values_dict = { "class_name": self.class_name, "def_class_name": self.definition_class_name, "method": self.name} out.write(( " // Check the function is implemented\n" " {{\n" " void *method = (({def_class_name:s}) self->base)->{method:s};\n" "\n" " if(method == NULL || (void *) unimplemented == (void *) method) {{\n" " PyErr_Format(PyExc_RuntimeError, \"{class_name:s}.{method:s} is not implemented\");\n" " goto on_error;\n" " }}\n" "\n" " // Make the call\n" " ClearError();\n").format(**values_dict)) base = "(({0:s}) self->base)".format(self.definition_class_name) call = " {0:s}->{1:s}({2:s}".format(base, self.name, base) tmp = "" for type in self.args: tmp += ", " + type.call_arg() call += "{0:s})".format(tmp) # Now call the wrapped function out.write(self.return_type.assign(call, self, borrowed=False)) if self.exception: self.exception.write(out) self.error_set = True out.write( " };\n" "\n" " // Postcall preparations\n") # Postcall preparations post_calls = [] post_call = self.return_type.post_call(self) post_calls.append(post_call) out.write(" {0:s}".format(post_call)) for type in self.args: post_call = type.post_call(self) if post_call not in post_calls: post_calls.append(post_call) out.write(" {0:s}".format(post_call)) # Now assemble the results results = [self.return_type.to_python_object()] for type in self.args: if type.sense == "OUT_DONE": results.append(type.to_python_object(results=results)) # If all the results are returned by reference we dont need # to prepend the void return value at all. if isinstance(self.return_type, Void) and len(results) > 1: results.pop(0) out.write( "\n" " // prepare results\n") # Make a tuple of results and pass them back if len(results) > 1: out.write("returned_result = PyList_New(0);\n") for result in results: out.write(result) out.write( "PyList_Append(returned_result, Py_result);\n" "Py_DecRef(Py_result);\n") out.write("return returned_result;\n") else: out.write(results[0]) # This useless code removes compiler warnings out.write( " returned_result = Py_result;\n" " return returned_result;\n") # Write the error part of the function if self.error_set: out.write(( "\n" "on_error:\n" "{0:s}").format(self.error_condition())) out.write("};\n\n") def add_arg(self, type, name): try: t = type_dispatcher[type](name, type) except KeyError: # Sometimes types must be typedefed in advance try: m = self.typedefed_re.match(type) type = m.group(1) log("Trying {0:s} for {1:s}".format(type, m.group(0))) t = type_dispatcher[type](name, type) except (KeyError, AttributeError): log("Unable to handle type {0:s}.{1:s} {2:s}".format( self.class_name, self.name, type)) return # Here we collapse char * + int type interfaces into a # coherent string like interface. try: previous = self.args[-1] if t.interface == "integer" and previous.interface == "string": # We make a distinction between IN variables and OUT # variables if previous.sense == "OUT": cls = Char_and_Length_OUT else: cls = Char_and_Length cls = cls( previous.name, previous.type, name, type) self.args[-1] = cls return except IndexError: pass self.args.append(t) def comment(self): args = [] for type in self.args: args.append(type.comment()) return "{0:s} {1:s}.{2:s}({3:s});\n".format( self.return_type.original_type, self.class_name, self.name, ",".join(args)) def prototype(self, out): self._prototype(out) out.write(";\n") def _prototype(self, out): values_dict = { "class_name": self.class_name, "method": self.name} out.write( "static PyObject *py{class_name:s}_{method:s}(py{class_name:s} *self, PyObject *args, PyObject *kwds)".format( **values_dict)) def PyMethodDef(self, out): docstring = self.comment() + "\n\n" + self.docstring.strip() values_dict = { "class_name": self.class_name, "docstring": format_as_docstring(docstring), "name": self.name} out.write(( " {{ \"{name:s}\",\n" " (PyCFunction) py{class_name:s}_{name:s},\n" " METH_VARARGS|METH_KEYWORDS,\n" " \"{docstring:s}\" }},\n" "\n").format(**values_dict)) class IteratorMethod(Method): """A method which implements an iterator.""" def __init__(self, *args, **kwargs): super(IteratorMethod, self).__init__(*args, **kwargs) # Tell the return type that a NULL Python return is ok self.return_type.attributes.add("NULL_OK") def get_string(self): """Retrieves a string representation.""" return "Iterator returning {0:s}.".format(self.return_type.get_string()) def _prototype(self, out): values_dict = { "class_name": self.class_name, "method": self.name} out.write( "static PyObject *py{class_name:s}_{method:s}(py{class_name:s} *self)".format( **values_dict)) def PyMethodDef(self, out): # This method should not go in the method table as its linked # in directly. pass class SelfIteratorMethod(IteratorMethod): def write_definition(self, out): out.write( "\n" "/********************************************************\n" " * Autogenerated wrapper for function:\n") out.write(self.comment()) out.write( "********************************************************/\n") self._prototype(out) values_dict = { "class_name": self.class_name, "method": self.name} out.write(( "{{\n" " (({class_name:s}) self->base)->{method:s}(({class_name:s}) self->base);\n" " return PyObject_SelfIter((PyObject *) self);\n" "}}\n").format(**values_dict)) class ConstructorMethod(Method): # Python constructors are a bit different than regular methods def _prototype(self, out): values_dict = { "class_name": self.class_name, "method": self.name} out.write( "static int py{class_name:s}_init(py{class_name:s} *self, PyObject *args, PyObject *kwds)\n".format( **values_dict)) def prototype(self, out): self._prototype(out) values_dict = { "class_name": self.class_name} out.write(( ";\n" "static void py{class_name:s}_initialize_proxies(py{class_name:s} *self, void *item);\n").format( **values_dict)) def write_destructor(self, out): values_dict = { "class_name": self.class_name, "free": FREE} out.write(( "static void {class_name:s}_dealloc(py{class_name:s} *self) {{\n" " struct _typeobject *ob_type = NULL;\n" "\n" " if(self != NULL) {{\n" " if(self->base != NULL) {{\n" " if(self->base_is_python_object != 0) {{\n" " Py_DecRef((PyObject*) self->base);\n" " }} else if(self->base_is_internal != 0) {{\n" " {free:s}(self->base);\n" " }}\n" " self->base = NULL;\n" " }}\n" " if(self->python_object2 != NULL) {{\n" " Py_DecRef(self->python_object2);\n" " self->python_object2 = NULL;\n" " }}\n" " if(self->python_object1 != NULL) {{\n" " Py_DecRef(self->python_object1);\n" " self->python_object1 = NULL;\n" " }}\n" " ob_type = Py_TYPE(self);\n" " if(ob_type != NULL && ob_type->tp_free != NULL) {{\n" " ob_type->tp_free((PyObject*) self);\n" " }}\n" " }}\n" "}}\n" "\n").format(**values_dict)) def error_condition(self): return " return -1;"; def initialise_proxies(self, out): self.myclass.module.function_definitions.add( "py{0:s}_initialize_proxies".format(self.class_name)) values_dict = { "class_name": self.class_name} out.write(( "static void py{class_name:s}_initialize_proxies(py{class_name:s} *self, void *item) {{\n" " {class_name:s} target = ({class_name:s}) item;\n" "\n" " /* Maintain a reference to the Python object\n" " * in the C object extension\n" " */\n" " ((Object) item)->extension = self;\n" "\n").format(**values_dict)) # Install proxies for all the method in the current class. for method in self.myclass.module.classes[self.class_name].methods: if method.name.startswith("_"): continue # Since the SleuthKit uses close method also for freeing it needs # to be handled separately to prevent the C/C++ code calling back # into a garbage collected Python object. For close we keep the # default implementation and have its destructor deal with # correctly closing the SleuthKit object. if method.name != "close": values_dict = { "class_name": method.class_name, "definition_class_name": method.definition_class_name, "name": method.name, "proxied_name": method.proxied.get_name()} out.write(( " if(check_method_override((PyObject *) self, &{class_name:s}_Type, \"{name:s}\")) {{\n" " // Proxy the {name:s} method\n" " (({definition_class_name:s}) target)->{name:s} = {proxied_name:s};\n" " }}\n").format(**values_dict)) out.write("}\n\n") def write_definition(self, out): self.initialise_proxies(out) self._prototype(out) values_dict = { "class_name": self.class_name, "definition_class_name": self.definition_class_name} out.write(( "{{\n" " {class_name:s} result_constructor = NULL;\n").format( **values_dict)) # pdb.set_trace() self.write_local_vars(out) # Assign the initialise_proxies handler out.write(( " self->python_object1 = NULL;\n" " self->python_object2 = NULL;\n" "\n" " /* Initialise is used to keep a reference on the object?\n" " * If not called no longer valid warnings have been seen\n" " * on Windows.\n" " */\n" " self->initialise = (void *) py{class_name:s}_initialize_proxies;\n" "\n").format(**values_dict)) # Precall preparations python_object_index = 1 for type in self.args: out.write(type.pre_call( self, python_object_index=python_object_index)) python_object_index += 1 # Now call the wrapped function out.write(( " ClearError();\n" "\n" " /* Allocate a new instance */\n" " self->base = ({class_name:s}) alloc_{class_name:s}();\n" " self->base_is_python_object = 0;\n" " self->base_is_internal = 1;\n" " self->object_is_proxied = 0;\n" "\n" " /* Update the target by replacing its methods with proxies\n" " * to call back into Python\n" " */\n" " py{class_name:s}_initialize_proxies(self, self->base);\n" "\n" " /* Now call the constructor */\n" " Py_BEGIN_ALLOW_THREADS\n" " result_constructor = CONSTRUCT_INITIALIZE({class_name:s}, {definition_class_name:s}, Con, self->base").format( **values_dict)) tmp = "" for type in self.args: tmp += ", " + type.call_arg() self.error_set = True out.write(tmp) out.write(( ");\n" " Py_END_ALLOW_THREADS\n" "\n" " if(!CheckError(EZero)) {{\n" " char *buffer = NULL;\n" " PyObject *exception = resolve_exception(&buffer);\n" "\n" " PyErr_Format(exception, \"%s\", buffer);\n" " ClearError();\n" " goto on_error;\n" " }}\n" " if(result_constructor == NULL) {{\n" " PyErr_Format(PyExc_IOError, \"Unable to construct class {class_name:s}\");\n" " goto on_error;\n" " }}\n" "\n" " return 0;\n").format(**values_dict)) # Write the error part of the function. if self.error_set: out.write(( "\n" "on_error:\n" " if(self->python_object2 != NULL) {{\n" " Py_DecRef(self->python_object2);\n" " self->python_object2 = NULL;\n" " }}\n" " if(self->python_object1 != NULL) {{\n" " Py_DecRef(self->python_object1);\n" " self->python_object1 = NULL;\n" " }}\n" " if(self->base != NULL) {{\n" " talloc_free(self->base);\n" " self->base = NULL;\n" " }}\n" "{0:s}\n").format(self.error_condition())) out.write("}\n\n") class GetattrMethod(Method): def __init__(self, class_name, base_class_name, myclass): # Cannot use super here due to certain logic in Method.__init__(). self._attributes = [] self.base_class_name = base_class_name self.class_name = class_name self.error_set = True self.myclass = myclass self.name = "" self.return_type = Void("") self.rename_class_name(class_name) def get_string(self): """Retrieves a string representation.""" result = "" for class_name, attr in self.get_attributes(): result += " {0:s}\n".format(attr.get_string()) return result def add_attribute(self, attr): if attr.name: self._attributes.append([self.class_name, attr]) def rename_class_name(self, new_name): """This allows us to rename the class_name at a later stage. Required for late initialization of Structs whose name is not know until much later on. """ # TODO fix this behavior, new_name can be None but it is unclear what # the behavious should be. Python 3 requires the values to be set to # string types. if not new_name: self.class_name = "" self.name = "" else: self.class_name = new_name self.name = "py{0:s}_getattr".format(new_name) for attribure in self._attributes: attribure[0] = new_name def get_attributes(self): for class_name, attr in self._attributes: try: # If its not an active struct, skip it if (not type_dispatcher[attr.type].active and not attr.type in self.myclass.module.active_structs): continue except KeyError: pass yield class_name, attr def clone(self, class_name): result = self.__class__(class_name, self.base_class_name, self.myclass) result._attributes = self._attributes[:] return result def prototype(self, out): if not self.name: return values_dict = { "class_name": self.class_name, "name": self.name} # Define getattr. out.write( "static PyObject *{name:s}(py{class_name:s} *self, PyObject *name);\n".format( **values_dict)) # Define getters. for _, attr in self.get_attributes(): values_dict = { "class_name": self.class_name, "name": attr.name} out.write( "PyObject *py{class_name:s}_{name:s}_getter(py{class_name:s} *self, PyObject *arguments);\n".format( **values_dict)) def built_ins(self, out): """Check for some built in attributes we need to support.""" out.write( " if(strcmp(name, \"__members__\") == 0) {\n" " PyMethodDef *i = NULL;\n" " PyObject *list_object = NULL;\n" " PyObject *string_object = NULL;\n" "\n" " list_object = PyList_New(0);\n" " if(list_object == NULL) {\n" " goto on_error;\n" " }\n" "\n") # Add attributes for class_name, attr in self.get_attributes(): values_dict = { "name": attr.name} out.write(( "#if PY_MAJOR_VERSION >= 3\n" " string_object = PyUnicode_FromString(\"{name:s}\");\n" "#else\n" " string_object = PyString_FromString(\"{name:s}\");\n" "#endif\n" " PyList_Append(list_object, string_object);\n" " Py_DecRef(string_object);\n" "\n").format(**values_dict)) # Add methods out.write(( "\n" " for(i = {0:s}_methods; i->ml_name; i++) {{\n" "#if PY_MAJOR_VERSION >= 3\n" " string_object = PyUnicode_FromString(i->ml_name);\n" "#else\n" " string_object = PyString_FromString(i->ml_name);\n" "#endif\n" " PyList_Append(list_object, string_object);\n" " Py_DecRef(string_object);\n" " }}\n" "#if PY_MAJOR_VERSION >= 3\n" " if( utf8_string_object != NULL ) {{\n" " Py_DecRef(utf8_string_object);\n" " }}\n" "#endif\n" " return list_object;\n" " }}\n").format(self.class_name)) def write_definition(self, out): if not self.name: return values_dict = { "class_name": self.class_name, "name": self.name} out.write(( "static PyObject *py{class_name:s}_getattr(py{class_name:s} *self, PyObject *pyname) {{\n" " PyObject *result = NULL;\n" " char *name = NULL;\n" "\n" "#if PY_MAJOR_VERSION >= 3\n" " PyObject *utf8_string_object = NULL;\n" "#endif\n" "\n" " // Try to hand it off to the Python native handler first\n" " result = PyObject_GenericGetAttr((PyObject*) self, pyname);\n" "\n" " if(result) {{\n" " return result;\n" " }}\n" "\n" " PyErr_Clear();\n" " // No - nothing interesting was found by python\n" "#if PY_MAJOR_VERSION >= 3\n" " utf8_string_object = PyUnicode_AsUTF8String(pyname);\n" "\n" " if(utf8_string_object != NULL) {{\n" " name = PyBytes_AsString(utf8_string_object);\n" " }}\n" "#else\n" " name = PyString_AsString(pyname);\n" "#endif\n" "\n" " if(!self->base) {{\n" "#if PY_MAJOR_VERSION >= 3\n" " if( utf8_string_object != NULL ) {{\n" " Py_DecRef(utf8_string_object);\n" " }}\n" "#endif\n" " return PyErr_Format(PyExc_RuntimeError, \"Wrapped object ({class_name:s}.{name:s}) no longer valid\");\n" " }}\n" " if(!name) {{\n" " goto on_error;\n" " }}\n").format(**values_dict)) self.built_ins(out) out.write( "\n" "#if PY_MAJOR_VERSION >= 3\n" " if( utf8_string_object != NULL ) {{\n" " Py_DecRef(utf8_string_object);\n" " }}\n" "#endif\n" " return PyObject_GenericGetAttr((PyObject *) self, pyname);\n") # Write the error part of the function. if self.error_set: out.write( "on_error:\n" "#if PY_MAJOR_VERSION >= 3\n" " if( utf8_string_object != NULL ) {{\n" " Py_DecRef(utf8_string_object);\n" " }}\n" "#endif\n" + self.error_condition()) out.write("}\n\n") self.write_definition_getters(out) def write_definition_getters(self, out): for _, attr in self.get_attributes(): if self.base_class_name: call = "((({0:s}) self->base)->{1:s})".format( self.class_name, attr.name) else: call = "(self->base->{0:s})".format(attr.name) values_dict = { "class_name": self.class_name, "name": attr.name, "python_obj": attr.to_python_object(), "python_assign": attr.assign(call, self, borrowed=True), "python_def": attr.definition(sense="out")} out.write(( "PyObject *py{class_name:s}_{name:s}_getter(py{class_name:s} *self, PyObject *arguments) {{\n" " PyObject *Py_result = NULL;\n" "{python_def:s}\n" "\n" "{python_assign:s}\n" "{python_obj:s}\n" "\n" " return Py_result;\n" "\n").format(**values_dict)) # Work-around for the String class that generates code that contains "goto on_error". if isinstance(attr, String): out.write(( "on_error:\n" " {0:s}\n").format(attr.error_value)) out.write("}\n\n") def PyGetSetDef(self, out): for _, attr in self.get_attributes(): # TODO: improve docstring. docstring = "{0:s}.".format(attr.name) values_dict = { "class_name": self.class_name, "docstring": format_as_docstring(docstring), "name": attr.name} out.write(( " {{ \"{name:s}\",\n" " (getter) py{class_name:s}_{name:s}_getter,\n" " (setter) 0,\n" " \"{docstring:s}\",\n" " NULL }},\n" "\n").format(**values_dict)) class ProxiedMethod(Method): def __init__(self, method, myclass): # Cannot use super here due to certain logic in Method.__init__(). self.args = method.args self.base_class_name = method.base_class_name self.class_name = method.class_name self.defaults = {} self.definition_class_name = method.definition_class_name self.docstring = "Proxy for {0:s}".format(method.name) self.error_set = False self.exception = None self.method = method self.myclass = myclass self.name = method.name self.return_type = method.return_type def get_name(self): return "Proxied{0:s}_{1:s}".format( self.myclass.class_name, self.name) def _prototype(self, out): out.write("static {0:s} {1:s}({2:s} self".format( self.return_type.type.strip(), self.get_name(), self.definition_class_name)) for arg in self.args: tmp = arg.comment().strip() if tmp: out.write(", {0:s}".format(tmp)) out.write(")") def prototype(self, out): self._prototype(out) out.write(";\n") def write_definition(self, out): name = self.get_name() if name in self.myclass.module.function_definitions: return else: self.myclass.module.function_definitions.add(name) self._prototype(out) self._write_definition(out) def _write_definition(self, out): out.write( " {\n" " PyGILState_STATE gil_state;\n" " PyObject *Py_result = NULL;\n" " PyObject *method_name = NULL;\n") out.write(self.return_type.returned_python_definition()) for arg in self.args: out.write(arg.local_definition()) out.write("PyObject *py_{0:s} = NULL;\n".format(arg.name)) out.write(( "\n" " // Grab the GIL so we can do Python stuff\n" " gil_state = PyGILState_Ensure();\n" "\n" "#if PY_MAJOR_VERSION >= 3\n" " method_name = PyUnicode_FromString(\"{0:s}\");\n" "#else\n" " method_name = PyString_FromString(\"{0:s}\");\n" "#endif\n").format(self.name)) out.write("\n// Obtain Python objects for all the args:\n") for arg in self.args: out.write(arg.to_python_object( result=("py_{0:s}".format(arg.name)), sense="proxied", BORROWED=True)) out.write(( " if(((Object) self)->extension == NULL) {{\n" " RaiseError(ERuntimeError, \"No proxied object in {0:s}\");\n" " goto on_error;\n" " }}\n").format(self.myclass.class_name)) out.write( "\n" " // Now call the method\n" " PyErr_Clear();\n" " Py_result = PyObject_CallMethodObjArgs(((Object) self)->extension, method_name, ") for arg in self.args: out.write("py_{0:s},".format(arg.name)) # Sentinal out.write( "NULL);\n" "\n") self.error_set = True out.write(( " /* Check for Python errors */\n" " if(PyErr_Occurred()) {{\n" " pytsk_fetch_error();\n" "\n" " goto on_error;\n" " }}\n" "\n").format(CURRENT_ERROR_FUNCTION)) for arg in self.args: out.write(arg.python_proxy_post_call()) # Now convert the Python value back to a value out.write(self.return_type.from_python_object( "Py_result", self.return_type.name, self, context="self")) out.write( " if(Py_result != NULL) {\n" " Py_DecRef(Py_result);\n" " }\n" " Py_DecRef(method_name);\n" "\n") # Decref all our Python objects: for arg in self.args: out.write(( " if(py_{0:s} != NULL) {{\n" " Py_DecRef(py_{0:s});\n" " }}\n").format(arg.name)) out.write(( " PyGILState_Release(gil_state);\n" "\n" " {0:s}\n").format( self.return_type.return_value("func_return"))) if self.error_set: out.write( "\n" "on_error:\n" " if(Py_result != NULL) {\n" " Py_DecRef(Py_result);\n" " }\n" " Py_DecRef(method_name);\n" "\n") # Decref all our Python objects: for arg in self.args: out.write(( " if(py_{0:s} != NULL) {{\n" " Py_DecRef(py_{0:s});\n" " }}\n").format(arg.name)) out.write(( " PyGILState_Release(gil_state);\n" "\n" " {0:s}\n").format( self.error_condition())) out.write( "}\n" "\n") def error_condition(self): values_dict = { "result": "func_return"} return self.return_type.error_value.format(**values_dict) class StructConstructor(ConstructorMethod): """ A constructor for struct wrappers - basically just allocate memory for the struct. """ def prototype(self, out): return Method.prototype(self, out) def write_destructor(self, out): """We do not deallocate memory from structs. This is a real problem since struct memory is usually allocated in some proprietary way and we cant just call free on it when done. """ values_dict = { "class_name": self.class_name} out.write(( "static void {class_name:s}_dealloc(py{class_name:s} *self) {{\n" " struct _typeobject *ob_type = NULL;\n" "\n" " if(self != NULL) {{\n" " if(self->base != NULL) {{\n" " self->base = NULL;\n" " }}\n" " ob_type = Py_TYPE(self);\n" " if(ob_type != NULL && ob_type->tp_free != NULL) {{\n" " ob_type->tp_free((PyObject*) self);\n" " }}\n" " }}\n" "}}\n" "\n").format(**values_dict)) def write_definition(self, out): values_dict = { "class_name": self.class_name} out.write(( "static int py{class_name:s}_init(py{class_name:s} *self, PyObject *args, PyObject *kwds) {{\n" " // Base is borrowed from another object.\n" " self->base = NULL;\n" " return 0;\n" "}}\n" "\n").format(**values_dict)) class EmptyConstructor(ConstructorMethod): def prototype(self, out): return Method.prototype(self, out) def write_definition(self, out): values_dict = { "class_name": self.class_name} out.write( "static int py{class_name:s}_init(py{class_name:s} *self, PyObject *args, PyObject *kwds) {{\n" " return 0;\n" "}}\n" "\n".format(**values_dict)) class ClassGenerator(object): docstring = "" def __init__(self, class_name, base_class_name, module): self.class_name = class_name self.methods = [] # self.methods = [DefinitionMethod( # class_name, base_class_name, "_definition", [], "", # myclass=self)] self.module = module self.constructor = EmptyConstructor( class_name, base_class_name, "Con", [], "", myclass=self) self.base_class_name = base_class_name self.attributes = GetattrMethod( self.class_name, self.base_class_name, self) self.modifier = set() self.active = True self.iterator = None def get_string(self): """Retrieves a string representation.""" result = ( "#{0:s}\n" "Class {1:s}({2:s}):\n" " Constructor:{3:s}\n" " Attributes:\n{4:s}\n" " Methods:\n").format( self.docstring, self.class_name, self.base_class_name, self.constructor.get_string(), self.attributes.get_string()) for method in self.methods: result += " {0:s}\n".format(method.get_string()) return result def prepare(self): """ This method is called just before we need to write the output and allows us to do any last minute fixups. """ pass def is_active(self): """Returns true if this class is active and should be generated""" if self.class_name in self.module.active_structs: return True if (not self.active or self.modifier and ("PRIVATE" in self.modifier or "ABSTRACT" in self.modifier)): log("{0:s} is not active {1!s}".format( self.class_name, self.modifier)) return False return True def clone(self, new_class_name): """Creates a clone of this class - usefull when implementing class extensions. """ result = ClassGenerator(new_class_name, self.class_name, self.module) result.constructor = self.constructor.clone(new_class_name) result.methods = [ method.clone(new_class_name) for method in self.methods] result.attributes = self.attributes.clone(new_class_name) return result def add_attribute(self, attr_name, attr_type, modifier, *args, **kwargs): try: if not self.module.classes[attr_type].is_active(): return except KeyError: pass try: # All attribute references are always borrowed - that # means we dont want to free them after accessing them type_class = dispatch( attr_name, "BORROWED {0:s}".format(attr_type), *args, **kwargs) except KeyError: # TODO: fix that self.class_name is None. log("Unknown attribute type {0:s} for {1!s}.{2:s}".format( attr_type, self.class_name, attr_name)) return type_class.attributes.add(modifier) self.attributes.add_attribute(type_class) def add_constructor(self, method_name, args, return_type, docstring): if method_name.startswith("Con"): self.constructor = ConstructorMethod( self.class_name, self.base_class_name, method_name, args, return_type, myclass=self) self.constructor.docstring = docstring def struct(self, out): values_dict = { "class_name": self.class_name} out.write(( "\n" "typedef struct {{\n" " PyObject_HEAD\n" " {class_name:s} base;\n" " int base_is_python_object;\n" " int base_is_internal;\n" " PyObject *python_object1;\n" " PyObject *python_object2;\n" " int object_is_proxied;\n" "\n" " void (*initialise)(Gen_wrapper self, void *item);\n" "}} py{class_name:s};\n").format(**values_dict)) def code(self, out): if not self.constructor: raise RuntimeError( "No constructor found for class {0:s}".format(self.class_name)) self.constructor.write_destructor(out) self.constructor.write_definition(out) if self.attributes: self.attributes.write_definition(out) for method in self.methods: method.write_definition(out) if hasattr(method, "proxied"): method.proxied.write_definition(out) def initialise(self): values_dict = { "class_name": self.class_name} result = ( "python_wrappers[TOTAL_CLASSES].class_ref = (Object)&__{class_name:s};\n" "python_wrappers[TOTAL_CLASSES].python_type = &{class_name:s}_Type;\n").format(**values_dict) func_name = "py{class_name:s}_initialize_proxies".format(**values_dict) if func_name in self.module.function_definitions: result += ( "python_wrappers[TOTAL_CLASSES].initialize_proxies = (void (*)(Gen_wrapper, void *)) &{0:s};\n").format( func_name) result += "TOTAL_CLASSES++;\n" return result def PyGetSetDef(self, out): out.write( "static PyGetSetDef {0:s}_get_set_definitions[] = {{\n".format( self.class_name)) if self.attributes: self.attributes.PyGetSetDef(out) out.write( " {NULL, NULL, NULL, NULL, NULL} /* Sentinel */\n" "};\n" "\n") def PyMethodDef(self, out): out.write("static PyMethodDef {0:s}_methods[] = {{\n".format( self.class_name)) for method in self.methods: method.PyMethodDef(out) out.write( " {NULL, NULL, 0, NULL} /* Sentinel */\n" "};\n" "\n") def prototypes(self, out): """Write prototype suitable for .h file""" out.write("static PyTypeObject {0:s}_Type;\n".format(self.class_name)) self.constructor.prototype(out) if self.attributes: self.attributes.prototype(out) for method in self.methods: method.prototype(out) # Each method, except for close, needs a proxy method that # is called when the object is sub typed. if method.name == "close": continue method.proxied = ProxiedMethod(method, method.myclass) method.proxied.prototype(out) def numeric_protocol_int(self): pass def numeric_protocol_nonzero(self): values_dict = { "class_name": self.class_name} return ( "static int {class_name:s}_nonzero(py{class_name:s} *v) {{\n" " return v->base != 0;\n" "}}\n").format(**values_dict) def numeric_protocol(self, out): args = { "class": self.class_name} for type, func in [ ("nonzero", self.numeric_protocol_nonzero), ("int", self.numeric_protocol_int)]: definition = func() if definition: out.write(definition) args[type] = "{0:s}_{1:s}".format(self.class_name, type) else: args[type] = "0" out.write(( "#if PY_MAJOR_VERSION >= 3\n" "static PyNumberMethods {class:s}_as_number = {{\n" " (binaryfunc) 0, /* nb_add */\n" " (binaryfunc) 0, /* nb_subtract */\n" " (binaryfunc) 0, /* nb_multiply */\n" " (binaryfunc) 0, /* nb_remainder */\n" " (binaryfunc) 0, /* nb_divmod */\n" " (ternaryfunc) 0, /* nb_power */\n" " (unaryfunc) 0, /* nb_negative */\n" " (unaryfunc) 0, /* nb_positive */\n" " (unaryfunc) 0, /* nb_absolute */\n" " (inquiry) {nonzero:s}, /* nb_bool */\n" " (unaryfunc) 0, /* nb_invert */\n" " (binaryfunc) 0, /* nb_lshift */\n" " (binaryfunc) 0, /* nb_rshift */\n" " (binaryfunc) 0, /* nb_and */\n" " (binaryfunc) 0, /* nb_xor */\n" " (binaryfunc) 0, /* nb_or */\n" " (unaryfunc) {int:s}, /* nb_int */\n" " (void *) NULL, /* nb_reserved */\n" " (unaryfunc) 0, /* nb_float */\n" "\n" " (binaryfunc) 0, /* nb_inplace_add */\n" " (binaryfunc) 0, /* nb_inplace_subtract */\n" " (binaryfunc) 0, /* nb_inplace_multiply */\n" " (binaryfunc) 0, /* nb_inplace_remainder */\n" " (ternaryfunc) 0, /* nb_inplace_power */\n" " (binaryfunc) 0, /* nb_inplace_lshift */\n" " (binaryfunc) 0, /* nb_inplace_rshift */\n" " (binaryfunc) 0, /* nb_inplace_and */\n" " (binaryfunc) 0, /* nb_inplace_xor */\n" " (binaryfunc) 0, /* nb_inplace_or */\n" "\n" " (binaryfunc) 0, /* nb_floor_divide */\n" " (binaryfunc) 0, /* nb_true_divide */\n" " (binaryfunc) 0, /* nb_inplace_floor_divide */\n" " (binaryfunc) 0, /* nb_inplace_true_divide */\n" "\n" " (unaryfunc) 0, /* nb_index */\n" "}};\n" "#else\n" "static PyNumberMethods {class:s}_as_number = {{\n" " (binaryfunc) 0, /* nb_add */\n" " (binaryfunc) 0, /* nb_subtract */\n" " (binaryfunc) 0, /* nb_multiply */\n" " (binaryfunc) 0, /* nb_divide */\n" " (binaryfunc) 0, /* nb_remainder */\n" " (binaryfunc) 0, /* nb_divmod */\n" " (ternaryfunc) 0, /* nb_power */\n" " (unaryfunc) 0, /* nb_negative */\n" " (unaryfunc) 0, /* nb_positive */\n" " (unaryfunc) 0, /* nb_absolute */\n" " (inquiry) {nonzero:s}, /* nb_nonzero */\n" " (unaryfunc) 0, /* nb_invert */\n" " (binaryfunc) 0, /* nb_lshift */\n" " (binaryfunc) 0, /* nb_rshift */\n" " (binaryfunc) 0, /* nb_and */\n" " (binaryfunc) 0, /* nb_xor */\n" " (binaryfunc) 0, /* nb_or */\n" " (coercion) 0, /* nb_coerce */\n" " (unaryfunc) {int:s}, /* nb_int */\n" " (unaryfunc) 0, /* nb_long */\n" " (unaryfunc) 0, /* nb_float */\n" " (unaryfunc) 0, /* nb_oct */\n" " (unaryfunc) 0, /* nb_hex */\n" "\n" " (binaryfunc) 0, /* nb_inplace_add */\n" " (binaryfunc) 0, /* nb_inplace_subtract */\n" " (binaryfunc) 0, /* nb_inplace_multiply */\n" " (binaryfunc) 0, /* nb_inplace_divide */\n" " (binaryfunc) 0, /* nb_inplace_remainder */\n" " (ternaryfunc) 0, /* nb_inplace_power */\n" " (binaryfunc) 0, /* nb_inplace_lshift */\n" " (binaryfunc) 0, /* nb_inplace_rshift */\n" " (binaryfunc) 0, /* nb_inplace_and */\n" " (binaryfunc) 0, /* nb_inplace_xor */\n" " (binaryfunc) 0, /* nb_inplace_or */\n" "\n" " (binaryfunc) 0, /* nb_floor_divide */\n" " (binaryfunc) 0, /* nb_true_divide */\n" " (binaryfunc) 0, /* nb_inplace_floor_divide */\n" " (binaryfunc) 0, /* nb_inplace_true_divide */\n" "\n" " (unaryfunc) 0, /* nb_index */\n" "}};\n" "#endif /* PY_MAJOR_VERSION >= 3 */\n" "\n").format(**args)) return "&{class:s}_as_number".format(**args) def PyTypeObject(self, out): docstring = "{0:s}: {1:s}".format( self.class_name, format_as_docstring(self.docstring)) args = { "class": self.class_name, "module": self.module.name, "iterator": 0, "iternext": 0, "tp_str": 0, "tp_eq": 0, "getattr_func": 0, "docstring": docstring} if self.attributes: args["getattr_func"] = self.attributes.name args["numeric_protocol"] = self.numeric_protocol(out) if "ITERATOR" in self.modifier: args["iterator"] = "PyObject_SelfIter" args["iternext"] = "py{0:s}_iternext".format(self.class_name) if "SELF_ITER" in self.modifier: args["iterator"] = "py{0:s}___iter__".format(self.class_name) if "TP_STR" in self.modifier: args["tp_str"] = "py{0:s}___str__".format(self.class_name) if "TP_EQUAL" in self.modifier: args["tp_eq"] = "{0:s}_eq".format(self.class_name) out.write(( "static PyTypeObject {class:s}_Type = {{\n" " PyVarObject_HEAD_INIT(NULL, 0)\n" " /* tp_name */\n" " \"{module:s}.{class:s}\",\n" " /* tp_basicsize */\n" " sizeof(py{class:s}),\n" " /* tp_itemsize */\n" " 0,\n" " /* tp_dealloc */\n" " (destructor) {class:s}_dealloc,\n" " /* tp_print */\n" " 0,\n" " /* tp_getattr */\n" " 0,\n" " /* tp_setattr */\n" " 0,\n" " /* tp_compare */\n" " 0,\n" " /* tp_repr */\n" " 0,\n" " /* tp_as_number */\n" " {numeric_protocol:s},\n" " /* tp_as_sequence */\n" " 0,\n" " /* tp_as_mapping */\n" " 0,\n" " /* tp_hash */\n" " 0,\n" " /* tp_call */\n" " 0,\n" " /* tp_str */\n" " (reprfunc) {tp_str!s},\n" " /* tp_getattro */\n" " (getattrofunc) {getattr_func!s},\n" " /* tp_setattro */\n" " 0,\n" " /* tp_as_buffer */\n" " 0,\n" " /* tp_flags */\n" " Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE,\n" " /* tp_doc */\n" " \"{docstring:s}\",\n" " /* tp_traverse */\n" " 0,\n" " /* tp_clear */\n" " 0,\n" " /* tp_richcompare */\n" " {tp_eq!s},\n" " /* tp_weaklistoffset */\n" " 0,\n" " /* tp_iter */\n" " (getiterfunc) {iterator!s},\n" " /* tp_iternext */\n" " (iternextfunc) {iternext!s},\n" " /* tp_methods */\n" " {class:s}_methods,\n" " /* tp_members */\n" " 0,\n" " /* tp_getset */\n" " {class:s}_get_set_definitions,\n" " /* tp_base */\n" " 0,\n" " /* tp_dict */\n" " 0,\n" " /* tp_descr_get */\n" " 0,\n" " /* tp_descr_set */\n" " 0,\n" " /* tp_dictoffset */\n" " 0,\n" " /* tp_init */\n" " (initproc) py{class:s}_init,\n" " /* tp_alloc */\n" " 0,\n" " /* tp_new */\n" " 0,\n" "}};\n" "\n").format(**args)) class StructGenerator(ClassGenerator): """A wrapper generator for structs.""" def __init__(self, class_name, module): self.class_name = class_name self.methods = [] self.module = module self.base_class_name = None self.active = False self.modifier = set() self.constructor = None self.attributes = GetattrMethod( self.class_name, self.base_class_name, self) def get_string(self): """Retrieves a string representation.""" return ( "# {0:s}\n" "Struct {1:s}:\n" "{2:s}\n").format( self.docstring, self.class_name, self.attributes.get_string()) def prepare(self): # This is needed for late stage initialization - sometimes # our class_name is not know until now. if not self.constructor: self.constructor = StructConstructor( self.class_name, self.base_class_name, "Con", [], "void", myclass=self) self.attributes.rename_class_name(self.class_name) for x in self.attributes._attributes: x[1].attributes.add("FOREIGN") def struct(self, out): values_dict = { "class_name": self.class_name} out.write(( "\n" "typedef struct {{\n" " PyObject_HEAD\n" " {class_name:s} *base;\n" " int base_is_python_object;\n" " int base_is_internal;\n" " PyObject *python_object1;\n" " PyObject *python_object2;\n" " int object_is_proxied;\n" " {class_name:s} *cbase;\n" "}} py{class_name:s};\n").format( **values_dict)) def initialise(self): return "" class EnumConstructor(ConstructorMethod): def prototype(self, out): return Method.prototype(self, out) def write_destructor(self, out): values_dict = { "class_name": self.class_name} out.write(( "static void {class_name:s}_dealloc(py{class_name:s} *self) {{\n" " struct _typeobject *ob_type = NULL;\n" "\n" " if(self != NULL) {{\n" " Py_DecRef(self->value);\n" " ob_type = Py_TYPE(self);\n" " if(ob_type != NULL && ob_type->tp_free != NULL) {{\n" " ob_type->tp_free((PyObject*) self);\n" " }}\n" " }}\n" "}}\n").format(**values_dict)) def write_definition(self, out): self.myclass.modifier.add("TP_STR") self.myclass.modifier.add("TP_EQUAL") self._prototype(out) values_dict = { "class_name": self.class_name} out.write(( "{{\n" " static char *kwlist[] = {{\"value\", NULL}};\n" "\n" " if(!PyArg_ParseTupleAndKeywords(args, kwds, \"O\", kwlist, &self->value)) {{\n" " goto on_error;\n" " }}\n" "\n" " Py_IncRef(self->value);\n" "\n" " return 0;\n" "\n" "on_error:\n" " return -1;\n" "}}\n" "\n" "static PyObject *py{class_name:s}___str__(py{class_name:s} *self) {{\n" " PyObject *result = PyDict_GetItem({class_name:s}_rev_lookup, self->value);\n" "\n" " if(result) {{\n" " Py_IncRef(result);\n" " }} else {{\n" " result = PyObject_Str(self->value);\n" " }}\n" "\n" " return result;\n" "}}\n" "\n" "static PyObject * {class_name:s}_eq(PyObject *me, PyObject *other, int op) {{\n" " py{class_name:s} *self = (py{class_name:s} *)me;\n" " int other_int = PyLong_AsLong(other);\n" " int my_int = 0;\n" " PyObject *result = Py_False;\n" "\n" " if(CheckError(EZero)) {{\n" " my_int = PyLong_AsLong(self->value);\n" " switch(op) {{\n" " case Py_EQ:\n" " result = my_int == other_int? Py_True: Py_False;\n" " break;\n" " case Py_NE:\n" " result = my_int != other_int? Py_True: Py_False;\n" " break;\n" " default:\n" " return Py_NotImplemented;\n" " }}\n" " }} else {{\n" " return NULL;\n" " }}\n" "\n" " ClearError();\n" "\n" " Py_IncRef(result);\n" " return result;\n" "}}\n" "\n").format(**values_dict)) class Enum(StructGenerator): def __init__(self, name, module): super(Enum, self).__init__(name, module) self.values = [] self.name = name self.attributes = None self.active = True def get_string(self): """Retrieves a string representation.""" result = "Enum {0:s}:\n".format(self.name) for attr in self.values: result += " {0:s}\n".format(attr) return result def prepare(self): self.constructor = EnumConstructor( self.class_name, self.base_class_name, "Con", [], "void", myclass=self) StructGenerator.prepare(self) def struct(self, out): values_dict = { "class_name": self.class_name} out.write(( "\n" "typedef struct {{\n" " PyObject_HEAD\n" " PyObject *value;\n" "}} py{class_name:s};\n" "\n" "PyObject *{class_name:s}_Dict_lookup;\n" "PyObject *{class_name:s}_rev_lookup;\n").format( **values_dict)) def PyGetSetDef(self, out): out.write(( "static PyGetSetDef {0:s}_get_set_definitions[] = {{\n" " {{NULL, NULL, NULL, NULL, NULL}} /* Sentinel */\n" "}};\n" "\n").format(self.class_name)) def PyMethodDef(self, out): out.write(( "static PyMethodDef {0:s}_methods[] = {{\n" " {{NULL, NULL, 0, NULL}} /* Sentinel */\n" "}};\n" "\n").format(self.class_name)) def numeric_protocol_nonzero(self): pass def numeric_protocol_int(self): values_dict = { "class_name": self.class_name} return ( "static PyObject *{class_name:s}_int(py{class_name:s} *self) {{\n" " Py_IncRef(self->value);\n" " return self->value;\n" "}}\n").format(**values_dict) def initialise(self): values_dict = { "class_name": self.class_name} result = ( "{class_name:s}_Dict_lookup = PyDict_New();\n" "{class_name:s}_rev_lookup = PyDict_New();\n").format( **values_dict) if self.values: result += ( "{\n" " PyObject *integer_object = NULL;\n" " PyObject *string_object = NULL;\n") for attr in self.values: values_dict = { "class_name": self.class_name, "value": attr} result += ( " integer_object = PyLong_FromLong({value:s});\n" "\n" "#if PY_MAJOR_VERSION >= 3\n" " string_object = PyUnicode_FromString(\"{value:s}\");\n" "#else\n" " string_object = PyString_FromString(\"{value:s}\");\n" "#endif\n" " PyDict_SetItem({class_name:s}_Dict_lookup, string_object, integer_object);\n" " PyDict_SetItem({class_name:s}_rev_lookup, integer_object, string_object);\n" " Py_DecRef(integer_object);\n" " Py_DecRef(string_object);\n" "\n").format(**values_dict) result += "}\n" return result class EnumType(Integer): buildstr = "i" def __init__(self, name, type, *args, **kwargs): super(EnumType, self).__init__(name, type, *args, **kwargs) self.type = type def definition(self, default=None, **kwargs): # Force the enum to be an int just in case the compiler chooses # a random size. if default: return " int {0:s} = {1:s};\n".format(self.name, default) else: return " int UNUSED {0:s} = 0;\n".format(self.name) def to_python_object(self, name=None, result="Py_result", **kwargs): name = name or self.name return ( "PyErr_Clear();\n" "{0:s} = PyObject_CallMethod(g_module, \"{1:s}\", \"K\", (uint64_t){2:s});\n").format( result, self.type, name) def pre_call(self, method, **kwargs): method.error_set = True values_dict = { "name": self.name, "type": self.type} return ( "/* Check if the integer passed is actually a valid member\n" " * of the enum. Enum value of 0 is always allowed.\n" " */\n" "if({name:s}) {{\n" " PyObject *py_{name:s} = NULL;\n" " PyObject *tmp = NULL;\n" "\n" " py_{name:s} = PyLong_FromLong({name:s});\n" " tmp = PyDict_GetItem({type:s}_rev_lookup, py_{name:s});\n" "\n" " Py_DecRef(py_{name:s});\n" " if(!tmp) {{\n" " PyErr_Format(PyExc_RuntimeError, \"value %lu is not valid for Enum {type:s} of arg '{name:s}'\", (unsigned long){name:s});\n" " goto on_error;\n" " }}\n" "}}\n").format(**values_dict) class HeaderParser(lexer.SelfFeederMixIn): tokens = [ ["INITIAL", r"#define\s+", "PUSH_STATE", "DEFINE"], ["DEFINE", r"([A-Za-z_0-9]+)\s+[^\n]+", "DEFINE,POP_STATE", None], ["DEFINE", r"\n", "POP_STATE", None], # Ignore macros with args ["DEFINE", r"\([^\n]+", "POP_STATE", None], # Recognize ansi c comments [".", r"/\*(.)", "PUSH_STATE", "COMMENT"], ["COMMENT", r"(.+?)\*/\s+", "COMMENT_END,POP_STATE", None], ["COMMENT", r"(.+)", "COMMENT", None], # And c++ comments [".", r"//([^\n]+)", "COMMENT", None], # An empty line clears the current comment [".", r"\r?\n\r?\n", "CLEAR_COMMENT", None], # Ignore whitespace [".", r"\s+", "SPACE", None], [".", r"\\\n", "SPACE", None], # Recognize CLASS() definitions ["INITIAL", r"^([A-Z]+)?\s*CLASS\(([A-Z_a-z0-9]+)\s*,\s*([A-Z_a-z0-9]+)\)", "PUSH_STATE,CLASS_START", "CLASS"], ["CLASS", r"^\s*(FOREIGN|ABSTRACT|PRIVATE)?([0-9A-Z_a-z ]+( |\*))METHOD\(([A-Z_a-z0-9]+),\s*([A-Z_a-z0-9]+),?", "PUSH_STATE,METHOD_START", "METHOD"], ["METHOD", r"\s*([0-9A-Z a-z_]+\s+\*?\*?)([0-9A-Za-z_]+),?", "METHOD_ARG", None], ["METHOD", r"\);", "POP_STATE,METHOD_END", None], ["CLASS", r"^\s*(FOREIGN|ABSTRACT)?([0-9A-Z_a-z ]+\s+\*?)\s*([A-Z_a-z0-9]+)\s*;", "CLASS_ATTRIBUTE", None], ["CLASS", "END_CLASS", "END_CLASS,POP_STATE", None], # Recognize struct definitions (With name) ["INITIAL", "([A-Z_a-z0-9 ]+)?struct\s+([A-Z_a-z0-9]+)\s+{", "PUSH_STATE,STRUCT_START", "STRUCT"], # Without name (using typedef) ["INITIAL", "typedef\s+struct\s+{", "PUSH_STATE,TYPEDEF_STRUCT_START", "STRUCT"], ["STRUCT", r"^\s*([0-9A-Z_a-z ]+\s+\*?)\s*([A-Z_a-z0-9]+)(?:\[([A-Z_a-z0-9]+)\])?\s*;", "STRUCT_ATTRIBUTE", None], ["STRUCT", r"^\s*([0-9A-Z_a-z ]+)\*\s+([A-Z_a-z0-9]+)\s*;", "STRUCT_ATTRIBUTE_PTR", None], # Struct ended with typedef ["STRUCT", "}\s+([0-9A-Za-z_]+);", "POP_STATE,TYPEDEF_STRUCT_END", None], ["STRUCT", "}", "POP_STATE,STRUCT_END", None], # Handle recursive struct or union definition (At the moment # we cant handle them at all) ["(RECURSIVE_)?STRUCT", "(struct|union)\s+([_A-Za-z0-9]+)?\s*{", "PUSH_STATE", "RECURSIVE_STRUCT"], ["RECURSIVE_STRUCT", "}\s+[0-9A-Za-z]+", "POP_STATE", None], # Process enums (2 forms - named and typedefed) ["INITIAL", r"enum\s+([0-9A-Za-z_]+)\s+{", "PUSH_STATE,ENUM_START", "ENUM"], # Unnamed ["INITIAL", r"typedef\s+enum\s+{", "PUSH_STATE,TYPEDEF_ENUM_START", "ENUM"], ["ENUM", r"([0-9A-Za-z_]+)\s+=[^\n]+", "ENUM_VALUE", None], # Typedefed ending ["ENUM", r"}\s+([0-9A-Za-z_]+);", "POP_STATE,TYPEDEFED_ENUM_END", None], ["ENUM", r"}", "POP_STATE,ENUM_END", None], ["INITIAL", r"BIND_STRUCT\(([0-9A-Za-z_ \*]+)\)", "BIND_STRUCT", None], # A simple typedef of one type for another type: ["INITIAL", r"typedef ([A-Za-z_0-9]+) +([^;]+);", "SIMPLE_TYPEDEF", None], # Handle proxied directives ["INITIAL", r"PXXROXY_CLASS\(([A-Za-z0-9_]+)\)", "PROXY_CLASS", None], ] def __init__(self, name, verbose=1, base=""): self.module = Module(name) self.base = base super(HeaderParser, self).__init__(verbose=0) file_object = io.BytesIO( b"// Base object\n" b"CLASS(Object, Obj)\n" b"END_CLASS\n") self.parse_fd(file_object) current_comment = "" def COMMENT(self, t, m): self.current_comment += m.group(1) + "\n" def COMMENT_END(self, t, m): self.current_comment += m.group(1) def CLEAR_COMMENT(self, t, m): self.current_comment = "" def DEFINE(self, t, m): line = m.group(0) line = line.split("/*")[0] if "\"" in line: type = "string" else: type = "integer" name = m.group(1).strip() if (len(name) > 3 and name[0] != "_" and name == name.upper() and name not in self.module.constants_blacklist): self.module.add_constant(name, type) current_class = None def CLASS_START(self, t, m): class_name = m.group(2).strip() base_class_name = m.group(3).strip() try: self.current_class = self.module.classes[base_class_name].clone(class_name) except (KeyError, AttributeError): log("Base class {0:s} is not defined !!!!".format(base_class_name)) self.current_class = ClassGenerator(class_name, base_class_name, self.module) self.current_class.docstring = self.current_comment self.current_class.modifier.add(m.group(1)) self.module.add_class(self.current_class, Wrapper) identifier = "{0:s} *".format(class_name) type_dispatcher[identifier] = PointerWrapper current_method = None def METHOD_START(self, t, m): return_type = m.group(2).strip() method_name = m.group(5).strip() modifier = m.group(1) or "" if "PRIVATE" in modifier: return # Is it a regular method or a constructor? self.current_method = Method if (return_type == self.current_class.class_name and method_name.startswith("Con")): self.current_method = ConstructorMethod elif method_name == "iternext": self.current_method = IteratorMethod self.current_class.modifier.add("ITERATOR") elif method_name == "__iter__": self.current_method = SelfIteratorMethod self.current_class.modifier.add("SELF_ITER") elif method_name == "__str__": self.current_class.modifier.add("TP_STR") self.current_method = self.current_method( self.current_class.class_name, self.current_class.base_class_name, method_name, [], return_type, myclass=self.current_class) self.current_method.docstring = self.current_comment self.current_method.modifier = modifier def METHOD_ARG(self, t, m): name = m.group(2).strip() type = m.group(1).strip() if self.current_method: self.current_method.add_arg(type, name) def METHOD_END(self, t, m): if not self.current_method: return if isinstance(self.current_method, ConstructorMethod): self.current_class.constructor = self.current_method else: found = False for i in range(len(self.current_class.methods)): # Try to replace existing methods with this new method method = self.current_class.methods[i] if method.name == self.current_method.name: self.current_class.methods[i] = self.current_method self.current_method = None return # Method does not exist, just add to the end self.current_class.methods.append(self.current_method) self.current_method = None def CLASS_ATTRIBUTE(self, t, m): modifier = m.group(1) or "" type = m.group(2).strip() name = m.group(3).strip() self.current_class.add_attribute(name, type, modifier) def END_CLASS(self, t, m): self.current_class = None current_struct = None def STRUCT_START(self, t, m): self.current_struct = StructGenerator(m.group(2).strip(), self.module) self.current_struct.docstring = self.current_comment self.current_struct.modifier.add(m.group(1)) def TYPEDEF_STRUCT_START(self, t, m): self.current_struct = StructGenerator(None, self.module) self.current_struct.docstring = self.current_comment def STRUCT_ATTRIBUTE(self, t, m): name = m.group(2).strip() type = m.group(1).strip() array_size = m.group(3) if array_size is not None: array_size = array_size.strip() self.current_struct.add_attribute(name, type, "", array_size=array_size) else: self.current_struct.add_attribute(name, type, "") def STRUCT_ATTRIBUTE_PTR(self, t, m): type = "{0:s} *".format(m.group(1).strip()) name = m.group(2).strip() self.current_struct.add_attribute(name, type, "") def STRUCT_END(self, t, m): self.module.add_class(self.current_struct, StructWrapper) identifier = "{0:s} *".format(self.current_struct.class_name) type_dispatcher[identifier] = PointerStructWrapper self.current_struct = None def TYPEDEF_STRUCT_END(self, t, m): self.current_struct.class_name = m.group(1).strip() self.STRUCT_END(t, m) current_enum = None def ENUM_START(self, t, m): self.current_enum = Enum(m.group(1).strip(), self.module) def TYPEDEF_ENUM_START(self, t, m): self.current_enum = Enum(None, self.module) def ENUM_VALUE(self, t, m): self.current_enum.values.append(m.group(1).strip()) def ENUM_END(self, t, m): self.module.classes[self.current_enum.name] = self.current_enum # For now we just treat enums as an integer, and also add # them to the constant table. In future it would be nice to # have them as a proper Python object so we can override # __unicode__, __str__ and __int__. for attr in self.current_enum.values: self.module.add_constant(attr, "integer") # type_dispatcher[self.current_enum.name] = Integer type_dispatcher[self.current_enum.name] = EnumType self.current_enum = None def TYPEDEFED_ENUM_END(self, t, m): self.current_enum.name = self.current_enum.class_name = m.group(1) self.ENUM_END(t, m) def BIND_STRUCT(self, t, m): self.module.active_structs.add(m.group(1)) self.module.active_structs.add("{0:s} *".format(m.group(1))) def SIMPLE_TYPEDEF(self, t, m): # We basically add a new type as a copy of the old # type old, new = m.group(1).strip(), m.group(2).strip() if old in type_dispatcher: type_dispatcher[new] = type_dispatcher[old] def PROXY_CLASS(self, t, m): base_class_name = m.group(1).strip() class_name = "Proxied{0:s}".format(base_class_name) try: proxied_class = self.module.classes[base_class_name] except KeyError: raise RuntimeError(( "Need to create a proxy for {0:s} but it has not been " "defined (yet). You must place the PROXIED_CLASS() " "instruction after the class definition").format( base_class_name)) current_class = ProxyClassGenerator(class_name, base_class_name, self.module) # self.current_class.constructor.args += proxied_class.constructor.args current_class.docstring = self.current_comment # Create proxies for all these methods for method in proxied_class.methods: if method.name[0] != "_": current_class.methods.append(ProxiedMethod(method, current_class)) self.module.add_class(current_class, Wrapper) def parse_filenames(self, filenames): for f in filenames: self._parse(f) # Second pass for f in filenames: self._parse(f) def _parse(self, filename): file_object = open(filename, "rb") self.parse_fd(file_object) file_object.close() if filename not in self.module.files: if filename.startswith(self.base): filename = filename[len(self.base):] self.module.headers += "#include \"{0:s}\"\n".format(filename) self.module.files.append(filename) def write(self, out): try: self.module.write(out) except: # pdb.post_mortem() raise def write_headers(self): pass # pdb.set_trace() if __name__ == "__main__": p = HeaderParser("pytsk3", verbose=1) for arg in sys.argv[1:]: p.parse_fd(open(arg, "rb")) log("second parse") for arg in sys.argv[1:]: p.parse_fd(open(arg, "rb")) p.write(sys.stdout) p.write_headers() pytsk-20190507/dpkg/000077500000000000000000000000001346423473500140755ustar00rootroot00000000000000pytsk-20190507/dpkg/changelog000066400000000000000000000002121346423473500157420ustar00rootroot00000000000000pytsk3 (20190507-1) unstable; urgency=low * Auto-generated -- Joachim Metz Tue, 07 May 2019 09:52:48 -0100 pytsk-20190507/dpkg/compat000066400000000000000000000000021346423473500152730ustar00rootroot000000000000007 pytsk-20190507/dpkg/control000066400000000000000000000024061346423473500155020ustar00rootroot00000000000000Source: pytsk3 Section: python Priority: extra Maintainer: Joachim Metz Build-Depends: debhelper (>= 9), dh-autoreconf, dh-python, python-all (>= 2.7~), python-all-dev, python-setuptools, python3-all (>= 3.2~), python3-all-dev, python3-setuptools Standards-Version: 3.9.5 X-Python-Version: >= 2.7 X-Python3-Version: >= 3.2 Homepage: https://github.com/py4n6/pytsk/ Package: python-pytsk3 Architecture: any Depends: ${shlibs:Depends}, ${misc:Depends} Conflicts: python-tsk Replaces: python-tsk Description: Python 2 bindings for the SleuthKit (libtsk) Python 2 bindings for the SleuthKit (libtsk). Package: python-pytsk3-dbg Architecture: any Section: debug Depends: python-pytsk3 (= ${binary:Version}), ${misc:Depends} Description: Debugging symbols for python-pytsk3 Debugging symbols for python-pytsk3. Package: python3-pytsk3 Architecture: any Depends: ${shlibs:Depends}, ${misc:Depends} Conflicts: python3-tsk Replaces: python3-tsk Description: Python 3 bindings for the SleuthKit (libtsk) Python 3 bindings for the SleuthKit (libtsk). Package: python3-pytsk3-dbg Architecture: any Section: debug Depends: python3-pytsk3 (= ${binary:Version}), ${misc:Depends} Description: Debugging symbols for python3-pytsk3 Debugging symbols for python3-pytsk3. pytsk-20190507/dpkg/copyright000066400000000000000000000031401346423473500160260ustar00rootroot00000000000000This work was packaged for Debian by: Joachim Metz on Wed, 26 Sep 2012 17:00:00 +0200 It was downloaded from: https://github.com/py4n6/pytsk/ Upstream Author(s): Michael Cohen Copyright: Copyright 2010 Michael Cohen License: Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. On Debian systems, the complete text of the Apache-2.0 License can be found in `/usr/share/common-licenses/Apache-2.0'. The Debian packaging is: Copyright (C) 2012 Joachim Metz License: Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. pytsk-20190507/dpkg/python-pytsk3.docs000066400000000000000000000000171346423473500175210ustar00rootroot00000000000000LICENSE README pytsk-20190507/dpkg/python3-pytsk3.docs000066400000000000000000000000171346423473500176040ustar00rootroot00000000000000LICENSE README pytsk-20190507/dpkg/rules000077500000000000000000000035071346423473500151620ustar00rootroot00000000000000#!/usr/bin/make -f # debian/rules that uses debhelper >= 7. # Uncomment this to turn on verbose mode. #export DH_VERBOSE=1 # This has to be exported to make some magic below work. export DH_OPTIONS %: dh $@ --with python2,python3 -- CFLAGS="-g" .PHONY: override_dh_auto_clean override_dh_auto_clean: dh_auto_clean rm -rf build pytsk3.egg-info/SOURCES.txt pytsk3.egg-info/PKG-INFO .PHONY: override_dh_auto_build override_dh_auto_build: (cd sleuthkit && autoreconf -fiv) dh_auto_build set -ex; for python in $(shell py3versions -r); do \ $$python setup.py build; \ done; .PHONY: override_dh_auto_install override_dh_auto_install: dh_auto_install --destdir $(CURDIR)/debian/python-pytsk3 set -ex; for python in $(shell py3versions -r); do \ $$python setup.py install --root=$(CURDIR)/debian/python3-pytsk3 --install-layout=deb; \ done; .PHONY: override_dh_auto_test override_dh_auto_test: .PHONY: override_dh_installmenu override_dh_installmenu: .PHONY: override_dh_installmime override_dh_installmime: .PHONY: override_dh_installmodules override_dh_installmodules: .PHONY: override_dh_installlogcheck override_dh_installlogcheck: .PHONY: override_dh_installlogrotate override_dh_installlogrotate: .PHONY: override_dh_installpam override_dh_installpam: .PHONY: override_dh_installppp override_dh_installppp: .PHONY: override_dh_installudev override_dh_installudev: .PHONY: override_dh_installwm override_dh_installwm: .PHONY: override_dh_installxfonts override_dh_installxfonts: .PHONY: override_dh_gconf override_dh_gconf: .PHONY: override_dh_icons override_dh_icons: .PHONY: override_dh_perl override_dh_perl: .PHONY: override_dh_strip override_dh_strip: ifeq (,$(filter nostrip,$(DEB_BUILD_OPTIONS))) dh_strip -ppython-pytsk3 --dbg-package=python-pytsk3-dbg dh_strip -ppython3-pytsk3 --dbg-package=python3-pytsk3-dbg endif pytsk-20190507/dpkg/source/000077500000000000000000000000001346423473500153755ustar00rootroot00000000000000pytsk-20190507/dpkg/source/format000066400000000000000000000000041346423473500166020ustar00rootroot000000000000001.0 pytsk-20190507/dpkg/source/options000066400000000000000000000000521346423473500170100ustar00rootroot00000000000000tar-ignore = "a.out" tar-ignore = "tmp/*" pytsk-20190507/error.c000066400000000000000000000060051346423473500144460ustar00rootroot00000000000000/* Error functions. * * Copyright 2010, Michael Cohen . * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #if !defined( WIN32 ) #include #endif #include "aff4_errors.h" #include "class.h" #define ERROR_BUFF_SIZE 10240 // Windows version not truely threadsafe for now #if defined( WIN32 ) static char global_error_buffer[ERROR_BUFF_SIZE]; static int global_error_type = 0; #else /** These slots carry the TLS error keys */ static pthread_key_t error_str_slot; static pthread_once_t error_once = PTHREAD_ONCE_INIT; static pthread_key_t error_value_slot; #endif #if defined( WIN32 ) static void error_init(void) { memset(global_error_buffer, 0, sizeof(global_error_buffer)); }; #else static void error_init(void); void error_dest(void *slot) { if(slot) talloc_free(slot); }; void error_init(void) { // We create the error buffer slots if(pthread_key_create(&error_str_slot, error_dest) || pthread_key_create(&error_value_slot, error_dest)) { printf("Unable to set up TLS variables\n"); abort(); }; }; #endif DLL_PUBLIC void *aff4_raise_errors(int t, char *reason, ...) { char *error_buffer; char tmp[ERROR_BUFF_SIZE]; // This has to succeed: int *type = aff4_get_current_error(&error_buffer); if(reason) { va_list ap; va_start(ap, reason); vsnprintf(tmp, ERROR_BUFF_SIZE-1, reason,ap); tmp[ERROR_BUFF_SIZE-1]=0; va_end(ap); }; if(*type == EZero) { *error_buffer = 0; //update the error type *type = t; } else { strncat(error_buffer, "\n", ERROR_BUFF_SIZE -1 ); }; strncat(error_buffer, tmp, ERROR_BUFF_SIZE-1); return NULL; }; #if defined( WIN32 ) DLL_PUBLIC int *aff4_get_current_error(char **error_buffer) { if(error_buffer != NULL) { *error_buffer = global_error_buffer; }; return &global_error_type; }; #else DLL_PUBLIC int *aff4_get_current_error(char **error_buffer) { int *type; (void) pthread_once(&error_once, error_init); type = pthread_getspecific(error_value_slot); // This is optional if(error_buffer != NULL) { *error_buffer = pthread_getspecific(error_str_slot); // If TLS buffers are not set we need to create them // TODO: the TLS buffers need to be freed on exit. if(*error_buffer == NULL) { *error_buffer = talloc_size(NULL, ERROR_BUFF_SIZE); pthread_setspecific(error_str_slot, *error_buffer); }; }; if(!type) { type = talloc_size(NULL, ERROR_BUFF_SIZE); pthread_setspecific(error_value_slot, type); }; return type; }; #endif pytsk-20190507/examples/000077500000000000000000000000001346423473500147665ustar00rootroot00000000000000pytsk-20190507/examples/ewf.py000066400000000000000000000060441346423473500161250ustar00rootroot00000000000000#!/usr/bin/python # # Copyright 2011, Michael Cohen . # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This is a module to interface to libewf. This needs to be tested with the windows port. """ from ctypes import * import ctypes.util possible_names = ["libewf-1", "ewf",] for name in possible_names: resolved = ctypes.util.find_library(name) if resolved: break try: if resolved is None: raise ImportError("libewf not found") libewf = CDLL(resolved) if not libewf._name: raise OSError() except OSError: raise ImportError("libewf not found") class ewffile: """A file like object to provide access to the ewf file.""" def __init__(self, *volumes): volume_array = c_char_p * len(volumes) self.handle = libewf.libewf_open(volume_array(*volumes), c_int(len(volumes)), c_int(1)) if self.handle == 0: raise RuntimeError("Unable to open ewf file") self.readptr = 0 size_p = pointer(c_ulonglong(0)) libewf.libewf_get_media_size(self.handle, size_p) self.size = size_p.contents.value def seek(self, offset, whence=0): if whence == 0: self.readptr = offset elif whence == 1: self.readptr += offset elif whence == 2: self.readptr = self.size + offset self.readptr = min(self.readptr, self.size) def tell(self): return self.readptr def read(self, length): buf = create_string_buffer(length) length = libewf.libewf_read_random( self.handle, buf, c_ulong(length), c_ulonglong(self.readptr)) return buf.raw[:length] def close(self): libewf.libewf_close(self.handle) def get_headers(self): properties = ["case_number", "description", "examinier_name", "evidence_number", "notes", "acquiry_date", "system_date", "acquiry_operating_system", "acquiry_software_version", "password", "compression_type", "model", "serial_number"] ## Make sure we parsed all headers libewf.libewf_parse_header_values(self.handle, c_int(4)) result = {"size": self.size} buf = create_string_buffer(1024) for p in properties: libewf.libewf_get_header_value(self.handle, p, buf, 1024) result[p] = buf.value ## Get the hash if libewf.libewf_get_md5_hash(self.handle, buf, 16) == 1: result["md5"] = buf.raw[:16] return result def ewf_open(volumes): return ewffile(volumes) if __name__== "__main__": fd = ewffile("pyflag_stdimage_0.5.e01") print fd.get_headers() fd.seek(0x8E4B88) print "%r" % fd.read(100) pytsk-20190507/examples/fls.py000066400000000000000000000164731346423473500161370ustar00rootroot00000000000000#!/usr/bin/python # # Copyright 2011, Michael Cohen . # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import gc import pdb import sys import time import images import pytsk3 class Fls(object): FILE_TYPE_LOOKUP = { pytsk3.TSK_FS_NAME_TYPE_UNDEF: "-", pytsk3.TSK_FS_NAME_TYPE_FIFO: "p", pytsk3.TSK_FS_NAME_TYPE_CHR: "c", pytsk3.TSK_FS_NAME_TYPE_DIR: "d", pytsk3.TSK_FS_NAME_TYPE_BLK: "b", pytsk3.TSK_FS_NAME_TYPE_REG: "r", pytsk3.TSK_FS_NAME_TYPE_LNK: "l", pytsk3.TSK_FS_NAME_TYPE_SOCK: "h", pytsk3.TSK_FS_NAME_TYPE_SHAD: "s", pytsk3.TSK_FS_NAME_TYPE_WHT: "w", pytsk3.TSK_FS_NAME_TYPE_VIRT: "v"} META_TYPE_LOOKUP = { pytsk3.TSK_FS_META_TYPE_REG: "r", pytsk3.TSK_FS_META_TYPE_DIR: "d", pytsk3.TSK_FS_META_TYPE_FIFO: "p", pytsk3.TSK_FS_META_TYPE_CHR: "c", pytsk3.TSK_FS_META_TYPE_BLK: "b", pytsk3.TSK_FS_META_TYPE_LNK: "h", pytsk3.TSK_FS_META_TYPE_SHAD: "s", pytsk3.TSK_FS_META_TYPE_SOCK: "s", pytsk3.TSK_FS_META_TYPE_WHT: "w", pytsk3.TSK_FS_META_TYPE_VIRT: "v"} ATTRIBUTE_TYPES_TO_PRINT = [ pytsk3.TSK_FS_ATTR_TYPE_NTFS_IDXROOT, pytsk3.TSK_FS_ATTR_TYPE_NTFS_DATA, pytsk3.TSK_FS_ATTR_TYPE_DEFAULT] def __init__(self): super(Fls, self).__init__() self._fs_info = None self._img_info = None self._long_listing = False self._recursive = False def list_directory(self, directory, stack=None): stack.append(directory.info.fs_file.meta.addr) for directory_entry in directory: prefix = "+" * (len(stack) - 1) if prefix: prefix += " " # Skip ".", ".." or directory entries without a name. if (not hasattr(directory_entry, "info") or not hasattr(directory_entry.info, "name") or not hasattr(directory_entry.info.name, "name") or directory_entry.info.name.name in [".", ".."]): continue self.print_directory_entry(directory_entry, prefix=prefix) if self._recursive: try: sub_directory = directory_entry.as_directory() inode = directory_entry.info.meta.addr # This ensures that we don't recurse into a directory # above the current level and thus avoid circular loops. if inode not in stack: self.list_directory(sub_directory, stack) except IOError: pass stack.pop(-1) def open_directory(self, inode_or_path): inode = None path = None if inode_or_path is None: path = "/" elif inode_or_path.startswith("/"): path = inode_or_path else: inode = inode_or_path # Note that we cannot pass inode=None to fs_info.opendir(). if inode: directory = self._fs_info.open_dir(inode=inode) else: directory = self._fs_info.open_dir(path=path) return directory def open_file_system(self, offset): self._fs_info = pytsk3.FS_Info(self._img_info, offset=offset) def open_image(self, image_type, filenames): # List the actual files (any of these can raise for any reason). self._img_info = images.SelectImage(image_type, filenames) def parse_options(self, options): self._long_listing = getattr(options, "long_listing", False) self._recursive = getattr(options, "recursive", False) def print_directory_entry(self, directory_entry, prefix=""): meta = directory_entry.info.meta name = directory_entry.info.name name_type = "-" if name: name_type = self.FILE_TYPE_LOOKUP.get(int(name.type), "-") meta_type = "-" if meta: meta_type = self.META_TYPE_LOOKUP.get(int(meta.type), "-") directory_entry_type = "{0:s}/{1:s}".format(name_type, meta_type) for attribute in directory_entry: inode_type = int(attribute.info.type) if inode_type in self.ATTRIBUTE_TYPES_TO_PRINT: if self._fs_info.info.ftype in [ pytsk3.TSK_FS_TYPE_NTFS, pytsk3.TSK_FS_TYPE_NTFS_DETECT]: inode = "{0:d}-{1:d}-{2:d}".format( meta.addr, int(attribute.info.type), attribute.info.id) else: inode = "{0:d}".format(meta.addr) attribute_name = attribute.info.name if attribute_name and attribute_name not in ["$Data", "$I30"]: filename = "{0:s}:{1:s}".format(name.name, attribute.info.name) else: filename = name.name if meta and name: print("{0:s}{1:s} {2:s}:\t{3:s}".format( prefix, directory_entry_type, inode, filename)) def Main(): """The main program function. Returns: A boolean containing True if successful or False if not. """ args_parser = argparse.ArgumentParser(description=( "Lists a file system in a storage media image or device.")) args_parser.add_argument( "images", nargs="+", metavar="IMAGE", action="store", type=str, default=None, help=("Storage media images or devices.")) args_parser.add_argument( "inode", nargs="?", metavar="INODE", action="store", type=str, default=None, help=( "The inode or path to list. If [inode] is not given, the root " "directory is used")) # TODO: not implemented. # args_parser.add_argument( # "-f", "--fstype", metavar="TYPE", dest="file_system_type", # action="store", type=str, default=None, help=( # "The file system type (use \"-f list\" for supported types)")) args_parser.add_argument( "-i", "--imgtype", metavar="TYPE", dest="image_type", type=str, choices=["ewf", "qcow", "raw"], default="raw", help=( "Set the storage media image type.")) # TODO: not implemented. # args_parser.add_argument( # "-l", dest="long_listing", action="store_true", default=False, # help="Display long version (like ls -l)") args_parser.add_argument( "-o", "--offset", metavar="OFFSET", dest="offset", action="store", type=int, default=0, help="The offset into image file (in bytes)") args_parser.add_argument( "-r", "--recursive", dest="recursive", action="store_true", default=False, help="List subdirectories recursively.") options = args_parser.parse_args() if not options.images: print('No storage media image or device was provided.') print('') args_parser.print_help() print('') return False fls = Fls() fls.parse_options(options) fls.open_image(options.image_type, options.images) fls.open_file_system(options.offset) directory = fls.open_directory(options.inode) # Iterate over all files in the directory and print their name. # What you get in each iteration is a proxy object for the TSK_FS_FILE # struct - you can further dereference this struct into a TSK_FS_NAME # and TSK_FS_META structs. fls.list_directory(directory, []) return True if __name__ == '__main__': if not Main(): sys.exit(1) else: sys.exit(0) pytsk-20190507/examples/icat.py000066400000000000000000000033171346423473500162640ustar00rootroot00000000000000#!/usr/bin/python # # Copyright 2010, Michael Cohen . # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pytsk3 from optparse import OptionParser import sys import pdb parser = OptionParser() parser.add_option("-f", "--fstype", default=None, help="File system type (use '-f list' for supported types)") (options, args) = parser.parse_args() def error(string): print string sys.exit(1) try: url = args[0] except IndexError: error("You must specify an image (try '%s -h' for help)" % sys.argv[0]) if len(args)==2: inode = int(args[1]) else: error("You must have exactly two arguements provided") ## Now open and read the file specified ## Step 1: get an IMG_INFO object (url can be any URL that AFF4 can ## handle) img = pytsk3.Img_Info(url) ## Step 2: Open the filesystem fs = pytsk3.FS_Info(img) ## Step 3: Open the file using the inode f = fs.open_meta(inode = inode) ## Step 4: Read all the data and print to stdout offset = 0 size = f.info.meta.size BUFF_SIZE = 1024 * 1024 while offset < size: available_to_read = min(BUFF_SIZE, size - offset) data = f.read_random(offset, available_to_read) if not data: break offset += len(data) print data pytsk-20190507/examples/images.py000066400000000000000000000074411346423473500166130ustar00rootroot00000000000000#!/usr/bin/python # # Copyright 2011, Michael Cohen . # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """This module selects a suitable image info object based on the type.""" import bisect import sys import pyqcow import ewf import pytsk3 class EWFImgInfo(pytsk3.Img_Info): """An image info class which uses ewf as a backing reader. All we really need to do to provide TSK with the ability to read image formats is override the methods below. """ def __init__(self, *paths_to_ewf_files): self.fd = ewf.ewffile(*paths_to_ewf_files) # Make sure to call the original base constructor. pytsk3.Img_Info.__init__(self, "") def get_size(self): """This should return the size of the image.""" return self.fd.size def read(self, off, length): """This method simply returns data from a particular offset.""" self.fd.seek(off) return self.fd.read(length) def close(self): """Dispose of the underlying file like object.""" self.fd.close() class QcowImgInfo(pytsk3.Img_Info): def __init__(self, filename): self._qcow_file = pyqcow.file() self._qcow_file.open(filename) super(QcowImgInfo, self).__init__( url='', type=pytsk3.TSK_IMG_TYPE_EXTERNAL) def close(self): self._qcow_file.close() def read(self, offset, size): self._qcow_file.seek(offset) return self._qcow_file.read(size) def get_size(self): return self._qcow_file.get_media_size() class SplitImage(pytsk3.Img_Info): """Virtualize access to split images. Note that unlike other tools (e.g. affuse) we do not assume that the images are the same size. """ def __init__(self, *files): self.fds = [] self.offsets = [0] offset = 0 for fd in files: # Support either a filename or file like objects if not hasattr(fd, "read"): fd = open(fd, "rb") fd.seek(0,2) offset += fd.tell() self.offsets.append(offset) self.fds.append(fd) self.size = offset # Make sure to call the original base constructor. pytsk3.Img_Info.__init__(self, "") def get_size(self): return self.size def read(self, offset, length): """Read a buffer from the split image set. Handles the buffer straddling images. """ result = "" # The total available size in the file length = int(length) length = min(length, long(self.size) - offset) while length > 0: data = self._ReadPartial(offset, length) if not data: break length -= len(data) result += data offset += len(data) return result def _ReadPartial(self, offset, length): """Read as much as we can from the current image.""" # The part we need to read from. idx = bisect.bisect_right(self.offsets, offset + 1) - 1 fd = self.fds[idx] # The offset this part is in the overall image img_offset = self.offsets[idx] fd.seek(offset - img_offset) # This can return less than length return fd.read(length) def SelectImage(img_type, files): if img_type == "raw": if len(files) == 1: # For a single file this is faster. return pytsk3.Img_Info(files[0]) else: return SplitImage(*files) elif img_type == "ewf": # Instantiate our special image object return EWFImgInfo(*files) elif img_type == "qcow": return QcowImgInfo(files[0]) pytsk-20190507/examples/imgfuse.py000066400000000000000000000126351346423473500170060ustar00rootroot00000000000000#!/usr/bin/python # # Copyright 2011, Michael Cohen . # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This is a fuse driver that makes an image mountable through the standard linux loopback driver. This allows users to mount say an encase image directly through the loopback driver. """ import images import os,sys from errno import * import stat import thread # pull in some spaghetti to make this stuff work without fuse-py being installed try: import _find_fuse_parts except ImportError: pass import fuse from fuse import Fuse if not hasattr(fuse, '__version__'): raise RuntimeError, \ "your fuse-py doesn't know of fuse.__version__, probably it's too old." fuse.fuse_python_api = (0, 2) fuse.feature_assert('stateful_files', 'has_init') class Xmp(Fuse): def main(self): options, args = self.parser.parse_args() self.fd = images.SelectImage(options.type, args) return Fuse.main(self) def getattr(self, path): s = fuse.Stat() s.st_ino = 1 s.st_dev = 0 s.st_nlink = 1 s.st_uid = 0 s.st_gid = 0 s.st_size = self.fd.get_size() s.st_atime = 0 s.st_mtime = 0 s.st_ctime = 0 s.st_blocks = 20000 s.st_rdev = 0 s.st_mode = 33188 if path.endswith('/'): s.st_mode = 16877 return s def readlink(self, path): raise IOError("No symbolic links supported on forensic filesystem at %s" % path) def readdir(self, path, offset): # We make it look like there is a single image.raw file in this directory if path == "/": result = fuse.Direntry("image.raw") result.type = stat.S_IFREG yield result def unlink(self, path): raise IOError("Unable to modify Virtual Filesystem") def rmdir(self, path): raise IOError("Unable to modify Virtual Filesystem") def symlink(self, path, path1): raise IOError("Unable to modify Virtual Filesystem") def rename(self, path, path1): raise IOError("Unable to modify Virtual Filesystem") def link(self, path, path1): raise IOError("Unable to modify Virtual Filesystem") def chmod(self, path, mode): raise IOError("Unable to modify Virtual Filesystem") def chown(self, path, user, group): raise IOError("Unable to modify Virtual Filesystem") def truncate(self, path, size): raise IOError("Unable to modify Virtual Filesystem") def mknod(self, path, mode, dev): raise IOError("Unable to modify Virtual Filesystem") def mkdir(self, path, mode): raise IOError("Unable to modify Virtual Filesystem") def utime(self, path, times): raise IOError("Unable to modify Virtual Filesystem") def open(self, path, flags): """ For now we only support a single image in the same filesystem, so any open will simply open this one image """ if path == "/image.raw": ## Image is already open return 0 else: return EBADF def read(self, path, length, offset): result = self.fd.read(offset, length) return result def write(self, path, buf, off): ## We do not modify the data, but we need to pretend that we ## are so callers dont panic - this is handy when mounting ## ext3 filesystems over loopback, where the kernel really ## wants to update the journal and would freak if it can't. return len(buf) def release(self, path, flags): return 0 def statfs(self): """ Should return a tuple with the following 6 elements: - blocksize - size of file blocks, in bytes - totalblocks - total number of blocks in the filesystem - freeblocks - number of free blocks - totalfiles - total number of file inodes - freefiles - nunber of free file inodes Feel free to set any of the above values to 0, which tells the kernel that the info is not available. """ blocks_size = 1024 blocks = 100000 blocks_free = 25000 files = 100000 files_free = 60000 namelen = 80 return (blocks_size, blocks, blocks_free, files, files_free, namelen) def fsync(self, path, isfsyncfile): return 0 if __name__ == '__main__': #Now we create a fuse object with that IO subsystem: server = Xmp() server.flags = 0 server.multithreaded = False; server.parser.add_option("-t", "--type", default="raw", help="Type of image. Currently supported options 'raw', " "'ewf'") server.parse(values = server, errex=1) ## Try to fix up the mount point if it was given relative to the ## CWD if server.fuse_args.mountpoint and not os.access(os.path.join("/",server.fuse_args.mountpoint), os.W_OK): server.fuse_args.mountpoint = os.path.join(os.getcwd(), server.fuse_args.mountpoint) server.main() pytsk-20190507/examples/istat.py000066400000000000000000000036651346423473500164760ustar00rootroot00000000000000#!/usr/bin/python # # Copyright 2010, Michael Cohen . # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pytsk3 from optparse import OptionParser import sys import pdb parser = OptionParser() parser.add_option("-f", "--fstype", default=None, help="File system type (use '-f list' for supported types)") (options, args) = parser.parse_args() def error(string): print string sys.exit(1) try: url = args[0] except IndexError: error("You must specify an image (try '%s -h' for help)" % sys.argv[0]) if len(args)==2: inode = int(args[1]) else: error("You must have exactly two arguements provided") ## Now open and read the file specified ## Step 1: get an IMG_INFO object (url can be any URL that AFF4 can ## handle) img = pytsk3.Img_Info(url) ## Step 2: Open the filesystem fs = pytsk3.FS_Info(img) ## Step 3: Open the file using the inode f = fs.open_meta(inode = inode) ## Step 4: List all blocks allocated by this file. Note that in some ## filesystems each file has several attributes and each can allocates ## multiple blocks. So we really need to iterate over all attributes ## of each file: for attr in f: print "Attribute %s, type %s, id %s" % (attr.info.name, attr.info.type, attr.info.id) for run in attr: print " Blocks %s to %s (%s blocks)" % (run.addr, run.addr + run.len, run.len) pytsk-20190507/examples/mmls.py000066400000000000000000000027761346423473500163240ustar00rootroot00000000000000#!/usr/bin/python # # Copyright 2011, Michael Cohen . # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import images import sys import pytsk3 from optparse import OptionParser parser = OptionParser() parser.add_option("-f", "--fstype", default=None, help="File system type (use '-f list' for supported types)") parser.add_option("-o", "--offset", default=0, type="int", help="Offset in the image (in bytes)") parser.add_option("-t", "--type", default="raw", help="Type of image. Currently supported options 'raw', " "'ewf'") (options, args) = parser.parse_args() if not args: print "You must specify an image." sys.exit(-1) img = images.SelectImage(options.type, args) try: volume = pytsk3.Volume_Info(img) for part in volume: print part.addr, part.desc, "%ss(%s)" % (part.start, part.start * 512), part.len except IOError, e: print ("Error %s: Maybe specify a different image type using " "the -t option?" % e) pytsk-20190507/examples/tskfuse.py000066400000000000000000000173471346423473500170400ustar00rootroot00000000000000#!/usr/bin/python # # Copyright 2011, Michael Cohen . # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import images import pytsk3 import stat from errno import * import pdb import os, sys, re # pull in some spaghetti to make this stuff work without fuse-py being installed try: import _find_fuse_parts except ImportError: pass import fuse from fuse import Fuse if not hasattr(fuse, '__version__'): raise RuntimeError, \ "your fuse-py doesn't know of fuse.__version__, probably it's too old." fuse.fuse_python_api = (0, 2) fuse.feature_assert('stateful_files', 'has_init') int_re = re.compile("^(\d+)([kKmMgGs]?)$") def parse_int(string): """ Parses an integer from a string. Supports suffixes """ try: m = int_re.match(string) except TypeError: return int(string) if not m: raise ValueError("%r is not an integer" % string) base = int(m.group(1)) suffix = m.group(2).lower() if not suffix: return base if suffix == 's': return base * 512 if suffix == 'k': return base * 1024 if suffix == 'm': return base * 1024 * 1024 if suffix == 'g': return base * 1024 * 1024 * 1024 raise ValueError("Unknown suffix '%r'" % suffix) ## A stub to allow for overriding later Img_Info = pytsk3.Img_Info def make_stat(meta): """ Return a stat structure from TSK metadata struct """ meta_type_dispatcher = { pytsk3.TSK_FS_META_TYPE_DIR: stat.S_IFDIR, pytsk3.TSK_FS_META_TYPE_REG: stat.S_IFREG, pytsk3.TSK_FS_META_TYPE_FIFO: stat.S_IFIFO, pytsk3.TSK_FS_META_TYPE_CHR: stat.S_IFCHR, pytsk3.TSK_FS_META_TYPE_LNK: stat.S_IFLNK, pytsk3.TSK_FS_META_TYPE_BLK: stat.S_IFBLK, } s = fuse.Stat() s.st_ino = meta.addr s.st_dev = 0 s.st_nlink = meta.nlink s.st_uid = meta.uid s.st_gid = meta.gid s.st_size = meta.size s.st_atime = meta.atime s.st_mtime = meta.mtime s.st_ctime = meta.crtime s.st_blocks = 2 s.st_rdev = 0 s.st_mode = meta_type_dispatcher.get(int(meta.type), 0) s.st_mode |= int(meta.mode) return s class TSKFuse(Fuse): """ A class that makes a filesystem appear in a fuse filesystem. This is kind of like mounting it, but it uses the sleuthkit. """ offset = '0' def __init__(self, *args, **kw): Fuse.__init__(self, *args, **kw) def main(self): options, args = self.parser.parse_args() self.img = images.SelectImage(options.type, args) self.offset = parse_int(options.offset) self.fs = pytsk3.FS_Info(self.img, offset = self.offset) ## Prepare the file class - this will be used to read specific ## files: self.file_class = self.TSKFuseFile self.file_class.fs = self.fs return Fuse.main(self) def getattr(self, path): try: f = self.fs.open(path) except RuntimeError: return None s = make_stat(f.info.meta) s.st_blksize = self.fs.info.block_size return s def readdir(self, path, offset): for f in self.fs.open_dir(path): try: result = fuse.Direntry(f.info.name.name) if f.info.meta.type == pytsk3.TSK_FS_META_TYPE_DIR: result.type = stat.S_IFDIR else: result.type = stat.S_IFREG except AttributeError: pass yield result def unlink(self, path): pass def rmdir(self, path): pass def symlink(self, path, path1): pass def rename(self, path, path1): pass def link(self, path, path1): pass def chmod(self, path, mode): pass def chown(self, path, user, group): pass def truncate(self, path, len): pass def mknod(self, path, mode, dev): pass def mkdir(self, path, mode): pass def utime(self, path, times): pass def access(self, path, mode): pass def statfs(self): """ Should return an object with statvfs attributes (f_bsize, f_frsize...). Eg., the return value of os.statvfs() is such a thing (since py 2.2). If you are not reusing an existing statvfs object, start with fuse.StatVFS(), and define the attributes. To provide usable information (ie., you want sensible df(1) output, you are suggested to specify the following attributes: - f_bsize - preferred size of file blocks, in bytes - f_frsize - fundamental size of file blcoks, in bytes [if you have no idea, use the same as blocksize] - f_blocks - total number of blocks in the filesystem - f_bfree - number of free blocks - f_files - total number of file inodes - f_ffree - nunber of free file inodes """ s=fuse.StatVfs() info = self.fs.info s.f_bsize = info.dev_bsize s.f_frsize = 0 s.f_blocks = info.block_count s.f_bfree = 0 s.f_files = info.inum_count s.f_ffree = 0 return s def fsinit(self): pass class TSKFuseFile(object): """ This is a file created on the AFF4 universe """ direct_io = False keep_cache = True def __init__(self, path, flags, *mode): self.path = path try: self.fd = self.fs.open(path = path) except RuntimeError: raise IOError("unable to open %s" % path) def read(self, length, offset): return self.fd.read_random(offset, length) def _fflush(self): pass def fsync(self, isfsyncfile): pass def flush(self): pass def fgetattr(self): s = make_stat(self.fd.info.meta) s.st_blksize = self.fs.info.block_size return s def ftruncate(self, len): pass def write(self, *args, **kwargs): return -EOPNOTSUPP def lock(self, cmd, owner, **kw): return -EOPNOTSUPP def close(self): self.fd.close() def main(): global server usage = """ Userspace tsk-fuse: mount a filesystem through fuse. %prog [options] image_name mount_point """ server = TSKFuse(version="%prog " + fuse.__version__, usage=usage, dash_s_do='setsingle') # Disable multithreading: if you want to use it, protect all method of # XmlFile class with locks, in order to prevent race conditions server.multithreaded = False server.parser.add_option("-O", "--offset", default="0", help="Offset of filesystem [default: %default]") server.parser.add_option("-t", "--type", default="raw", help="Type of image. Currently supported options 'raw', " "'ewf'") server.parse(values = server, errex=1) ## Try to fix up the mount point if it was given relative to the ## CWD if server.fuse_args.mountpoint and not os.access(os.path.join("/",server.fuse_args.mountpoint), os.W_OK): server.fuse_args.mountpoint = os.path.join(os.getcwd(), server.fuse_args.mountpoint) server.main() if __name__ == '__main__': main() pytsk-20190507/generate_bindings.py000077500000000000000000000044101346423473500171730ustar00rootroot00000000000000#!/usr/bin/python # # Script to generate the Python bindings. # # Copyright 2012, Joachim Metz . # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import sys import class_parser def generate_bindings(target, source_files, env=None, initialization="", free="talloc_free"): """ Generated the Python bindings """ module_name = os.path.splitext(os.path.basename(target))[0] print("Generating Python bindings for module %s from %s" % ( module_name, source_files)) env = env or dict(V=0) # Sets the free function class_parser.FREE = free p = class_parser.HeaderParser(module_name, verbose=env["V"]) p.module.init_string = initialization p.parse_filenames(source_files) fd = open(target, "w") p.write(fd) fd.close() if __name__ == "__main__": if len(sys.argv) != 2: print("Usage: ./generate_bindings.py path_to_source") sys.exit(1) tsk_source_path = sys.argv[1] include_base = "tsk3" if not os.path.exists(os.path.join(tsk_source_path, include_base)): # sleuthkit 4.1 changed the names of the include headers. include_base = "tsk" if not os.path.exists(os.path.join(tsk_source_path, include_base)): print("Unable to find sleuthkit include headers.") sys.exit(1) sources = [ os.path.join(tsk_source_path, include_base, "libtsk.h"), os.path.join(tsk_source_path, include_base, "base", "tsk_base.h"), os.path.join(tsk_source_path, include_base, "fs", "tsk_fs.h"), os.path.join(tsk_source_path, include_base, "img", "tsk_img.h"), os.path.join(tsk_source_path, include_base, "vs", "tsk_vs.h"), "tsk3.h", ] generate_bindings("pytsk3.c", sources, initialization="tsk_init();") pytsk-20190507/lexer.py000066400000000000000000000143461346423473500146510ustar00rootroot00000000000000#!/usr/bin/python # # Copyright 2013, Michael Cohen . # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """A simple feed lexer.""" import re class Lexer(object): """A generic feed lexer.""" ## The following is a description of the states we have and the ## way we move through them: format is an array of ## [ state_re, re, token/action, next state ] tokens = [] state = "INITIAL" buffer = "" error = 0 verbose = 0 state_stack = [] processed = 0 processed_buffer = "" saved_state = None flags = 0 def __init__(self, verbose=0, fd=None): super(Lexer, self).__init__() self.encoding = "utf-8" if not self.verbose: self.verbose = verbose if len(self.tokens[0]) == 4: for row in self.tokens: row.append(re.compile(row[0], re.DOTALL)) row.append(re.compile(row[1], re.DOTALL | re.M | re.S | self.flags)) self.fd = fd def save_state(self, dummy_t=None, m=None): """Returns a dict which represents the current state of the lexer. When provided to restore_state, the lexer is guaranteed to be in the same state as when the save_state was called. Note that derived classes may need to extend this. """ ## Unable to save our state if we have errors. We need to guarantee ## that we rewind to a good part of the file. if self.error: return try: end = m.end() except: end = 0 self.saved_state = dict( state_stack = self.state_stack[:], processed = self.processed - end, processed_buffer = self.processed_buffer, readptr = self.fd.tell() - len(self.buffer) - end, state = self.state, objects = self.objects[:], error = self.error, ) if self.verbose > 1: print("Saving state {0:s}".format(self.processed)) def restore_state(self): state = self.saved_state if not state: return self.state_stack = state["state_stack"] self.processed = state["processed"] self.processed_buffer = state["processed_buffer"] self.buffer = "" self.fd.seek(state["readptr"]) self.state = state["state"] self.objects = state["objects"] self.error = state["error"] if self.verbose > 1: print("Restoring state to offset {0:s}".format(self.processed)) def next_token(self, end=True): ## Now try to match any of the regexes in order: current_state = self.state for _, re_str, token, next_state, state, regex in self.tokens: ## Does the rule apply for us now? if state.match(current_state): if self.verbose > 2: print("{0:s}: Trying to match {1:s} with {2:s}".format( self.state, repr(self.buffer[:10]), repr(re_str))) match = regex.match(self.buffer) if match: if self.verbose > 3: print("{0:s} matched {1:s}".format( re_str, match.group(0).encode("utf8"))) ## The match consumes the data off the buffer (the ## handler can put it back if it likes) self.processed_buffer += self.buffer[:match.end()] self.buffer = self.buffer[match.end():] self.processed += match.end() ## Try to iterate over all the callbacks specified: for t in token.split(","): try: if self.verbose > 0: print("0x{0:X}: Calling {1:s} {2:s}".format( self.processed, t, repr(match.group(0)))) cb = getattr(self, t, self.default_handler) except AttributeError: continue ## Is there a callback to handle this action? callback_state = cb(t, match) if callback_state == "CONTINUE": continue elif callback_state: next_state = callback_state self.state = next_state if next_state: self.state = next_state return token ## Check that we are making progress - if we are too full, we ## assume we are stuck: if end and len(self.buffer) > 0 or len(self.buffer) > 1024: self.processed_buffer += self.buffer[:1] self.buffer = self.buffer[1:] self.ERROR( "Lexer Stuck, discarding 1 byte ({0:s}) - state {1:s}".format( repr(self.buffer[:10]), self.state)) return "ERROR" ## No token were found return def feed(self, data): """Feeds the lexer. Args: data: binary string containing the data (instance of bytes). """ self.buffer += data.decode(self.encoding) def empty(self): return not len(self.buffer) def default_handler(self, token, match): if self.verbose > 2: print("Default handler: {0:s} with {1:s}".format( token, repr(match.group(0)))) def ERROR(self, message=None, weight=1): if self.verbose > 0 and message: print("Error({0:s}): {1:s}".format(weight, message)) self.error += weight def PUSH_STATE(self, dummy_token=None, dummy_match=None): if self.verbose > 1: print("Storing state {0:s}".format(self.state)) self.state_stack.append(self.state) def POP_STATE(self, dummy_token=None, dummy_match=None): try: state = self.state_stack.pop() if self.verbose > 1: print("Returned state to {0:s}".format(state)) except IndexError: print("Tried to pop the state but failed - possible recursion error") state = None return state def close(self): """Just a conveniece function to force us to parse all the data.""" while self.next_token(): pass class SelfFeederMixIn(Lexer): """This mixin is used to make a lexer which feeds itself one sector at the time. Note that self.fd must be the fd we read from. """ def parse_fd(self, fd): self.feed(fd.read()) while self.next_token(): pass pytsk-20190507/make_dist.sh000077500000000000000000000021421346423473500154460ustar00rootroot00000000000000#!/bin/sh # Script to package pytsk VERSION=`grep -e "^VERSION = " class_parser.py | sed 's/^.*"\([0-9]*\)"$/\1/'`; rm -f pytsk-*.tgz PYTSK_SOURCE_FILES="\ ../pytsk/aff4_errors.h \ ../pytsk/class.c \ ../pytsk/class.h \ ../pytsk/error.c \ ../pytsk/misc.h \ ../pytsk/pytsk3.h \ ../pytsk/tsk3.c \ ../pytsk/tsk3.h" TALLOC_SOURCE_FILES="\ ../pytsk/talloc/LICENSE \ ../pytsk/talloc/README \ ../pytsk/talloc/replace.h \ ../pytsk/talloc/talloc.c \ ../pytsk/talloc/talloc.h" SCRIPTS="\ ../pytsk/class_parser.py \ ../pytsk/generate_bindings.py \ ../pytsk/lexer.py \ ../pytsk/make_dist.sh \ ../pytsk/run_tests.py \ ../pytsk/setup.py \ ../pytsk/tests/*.py" DATA_FILES="\ ../pytsk/LICENSE \ ../pytsk/MANIFEST.in \ ../pytsk/README \ ../pytsk/dpkg \ ../pytsk/msvscpp \ ../pytsk/samples \ ../pytsk/test_data" FILES="\ ${PYTSK_SOURCE_FILES} \ ${TALLOC_SOURCE_FILES} \ ${SCRIPTS} \ ${DATA_FILES}" echo "Creating: pytsk-${VERSION}.tgz" tar zcf pytsk-${VERSION}.tgz --exclude __pycache__ ${FILES} 2>/dev/null pytsk-20190507/misc.h000066400000000000000000000055351346423473500142640ustar00rootroot00000000000000/* Miscellaneous definitions. * * Copyright 2010, Michael Cohen . * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef _PYTSK_MISC_H #define _PYTSK_MISC_H #include #if defined( HAVE_INTTYPES_H ) #include #elif !defined( _MSC_VER ) #include #endif #if defined( WIN32 ) #include #include #include #include #else /* sys/types.h needs to be included before sys/socket.h on * some platforms like FreeBSD. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #endif /* defined( WIN32 ) */ #ifdef __cplusplus extern "C" { #endif #if defined( _MSC_VER ) #define DLL_PUBLIC __declspec(dllexport) #elif !defined( HEADERS_ONLY ) #define DLL_PUBLIC __attribute__ ((visibility("default"))) #else #define DLL_PUBLIC #endif /* Used by class parser */ #if defined( _MSC_VER ) #define UNUSED #else #define UNUSED __attribute__((unused)) #endif #if !defined( PYTSK3_ATTRIBUTE_UNUSED ) #if defined( __GNUC__ ) && __GNUC__ >= 3 #define PYTSK3_ATTRIBUTE_UNUSED __attribute__ ((__unused__)) #else #define PYTSK3_ATTRIBUTE_UNUSED #endif #endif #if defined( _MSC_VER ) #define PYTSK3_UNREFERENCED_PARAMETER( parameter ) \ UNREFERENCED_PARAMETER( parameter ); #else #define PYTSK3_UNREFERENCED_PARAMETER( parameter ) \ /* parameter */ #endif #if !defined( _MSC_VER ) #ifdef min #undef min #endif #define min(X, Y) ((X) < (Y) ? (X) : (Y)) #ifdef max #undef max #endif #define max(X, Y) ((X) > (Y) ? (X) : (Y)) #endif /* if !defined( _MSC_VER ) */ #ifndef MIN #define MIN(a,b) ((a)<(b)?(a):(b)) #endif #ifndef MAX #define MAX(a,b) ((a)>(b)?(a):(b)) #endif #if defined( _MSC_VER ) #if !defined( HAVE_SSIZE_T ) #define HAVE_SSIZE_T #if defined( MS_WIN64 ) typedef __int64 ssize_t; #else typedef _W64 int ssize_t; #endif #endif /* !defined( HAVE_SSIZE_T ) */ #endif /* defined( _MSC_VER ) */ #if defined( WIN32 ) #define MSG_NOSIGNAL 0 typedef unsigned long int in_addr_t; typedef int bool; #else #define O_BINARY 0 typedef int bool; #endif #define true 1 #define false 0 #ifdef __cplusplus } #endif #endif pytsk-20190507/patches/000077500000000000000000000000001346423473500145775ustar00rootroot00000000000000pytsk-20190507/patches/sleuthkit-4.6.6-configure.ac000066400000000000000000000047311346423473500215550ustar00rootroot00000000000000diff --git a/configure.ac b/configure.ac index dc9026ed..0170a0f5 100644 --- a/configure.ac +++ b/configure.ac @@ -132,9 +132,33 @@ AS_IF([test "x$ac_cv_lib_sqlite3_sqlite3_open" = "xyes"], [AC_MSG_RESULT([bundled])]) AM_CONDITIONAL([HAVE_LIBSQLITE3], [test "x$ac_cv_lib_sqlite3_sqlite3_open" = "xyes"]) -dnl check for postgresql -AC_CHECK_HEADERS([postgresql/libpq-fe.h],AC_CHECK_LIB([pq],[PQlibVersion])) -AC_CHECK_HEADERS([libpq-fe.h],AC_CHECK_LIB([pq],[PQlibVersion])) + +# Check if we should link postgresql. +AC_ARG_WITH([libpq], + [AS_HELP_STRING([--without-libpq],[Do not use postgresql even if it is installed])] + [AS_HELP_STRING([--with-libpq=dir],[Specify that postgresql is installed in directory 'dir'])], + dnl If --with-libpq or --without-libpq is given + [], + dnl If --with-libpq or --without-libpq is given + [with_libpq=yes]) + +dnl check for the lib if they did not specify no +AS_IF([test "x$with_libpq" != "xno"], + dnl Test the dir if they specified something beyond yes/no + [AS_IF([test "x$with_libpq" != "xyes"], + [AS_IF([test -d ${with_libpq}/include], + [CPPFLAGS="$CPPFLAGS -I${with_libpq}/include" + LDFLAGS="$LDFLAGS -L${with_libpq}/lib"], + dnl Dir given was not correct + [AC_MSG_FAILURE([postgresql directory not found at ${with_libpq}])]) + ] + )] + dnl check for postgresql + [AC_CHECK_HEADERS([postgresql/libpq-fe.h],AC_CHECK_LIB([pq],[PQlibVersion])) + AC_CHECK_HEADERS([libpq-fe.h],AC_CHECK_LIB([pq],[PQlibVersion])) + ] +) + AM_CONDITIONAL([HAVE_POSTGRESQL],[test "x$ac_cv_lib_pq_PQlibVersion" = "xyes"]) AM_COND_IF([HAVE_POSTGRESQL],[ax_libpq=yes],[ax_libpq=no]) AM_COND_IF([HAVE_POSTGRESQL], [AC_DEFINE([HAVE_LIBPQ_], [1], [Define if using libpq.])]) @@ -399,26 +423,7 @@ AC_CONFIG_FILES([ tsk/vs/Makefile tsk/fs/Makefile tsk/hashdb/Makefile - tsk/auto/Makefile - tools/Makefile - tools/imgtools/Makefile - tools/vstools/Makefile - tools/fstools/Makefile - tools/hashtools/Makefile - tools/srchtools/Makefile - tools/autotools/Makefile - tools/sorter/Makefile - tools/timeline/Makefile - tools/fiwalk/Makefile - tools/fiwalk/src/Makefile - tools/fiwalk/plugins/Makefile - tests/Makefile - samples/Makefile - man/Makefile - bindings/java/Makefile - bindings/java/jni/Makefile - unit_tests/Makefile - unit_tests/base/Makefile]) + tsk/auto/Makefile]) AC_OUTPUT pytsk-20190507/patches/sleuthkit-4.6.6-ext2fs.patch000066400000000000000000000120351346423473500215170ustar00rootroot00000000000000diff --git a/tsk/fs/ext2fs.c b/tsk/fs/ext2fs.c index d40a64be..574e0b41 100644 --- a/tsk/fs/ext2fs.c +++ b/tsk/fs/ext2fs.c @@ -62,6 +62,8 @@ debug_print_buf(unsigned char *buf, int len) static uint8_t test_root(uint32_t a, uint32_t b) { + uint32_t b2; + if (a == 0) { return (b == 0); } @@ -77,7 +79,6 @@ test_root(uint32_t a, uint32_t b) } // keep on multiplying b by itself - uint32_t b2; for (b2 = b; b2 < a; b2 *= b) {} // was it an exact match? @@ -583,12 +584,20 @@ static uint8_t ext2fs_dinode_copy(EXT2FS_INFO * ext2fs, TSK_FS_META * fs_meta, TSK_INUM_T inum, const ext2fs_inode * dino_buf) { - int i; + int copy_len = 0; + ssize_t cnt = 0; + char *a_ptr = NULL; + char *data_buf = NULL; + uint32_t *addr_ptr = NULL; + TSK_DADDR_T *tsk_addr_ptr = NULL; + int i = 0; + unsigned int j = 0; + unsigned int count = 0; TSK_FS_INFO *fs = (TSK_FS_INFO *) & ext2fs->fs_info; ext2fs_sb *sb = ext2fs->fs; EXT2_GRPNUM_T grp_num; TSK_INUM_T ibase = 0; - + unsigned int total_read = 0; if (dino_buf == NULL) { tsk_error_reset(); @@ -723,7 +732,6 @@ ext2fs_dinode_copy(EXT2FS_INFO * ext2fs, TSK_FS_META * fs_meta, } if (tsk_getu32(fs->endian, dino_buf->i_flags) & EXT2_IN_EXTENTS) { - uint32_t *addr_ptr; fs_meta->content_type = TSK_FS_META_CONTENT_TYPE_EXT4_EXTENTS; /* NOTE TSK_DADDR_T != uint32_t, so lets make sure we use uint32_t */ addr_ptr = (uint32_t *) fs_meta->content_ptr; @@ -732,10 +740,9 @@ ext2fs_dinode_copy(EXT2FS_INFO * ext2fs, TSK_FS_META * fs_meta, } } else { - TSK_DADDR_T *addr_ptr; - addr_ptr = (TSK_DADDR_T *) fs_meta->content_ptr; + tsk_addr_ptr = (TSK_DADDR_T *) fs_meta->content_ptr; for (i = 0; i < EXT2FS_NDADDR + EXT2FS_NIADDR; i++) - addr_ptr[i] = tsk_gets32(fs->endian, dino_buf->i_block[i]); + tsk_addr_ptr[i] = tsk_gets32(fs->endian, dino_buf->i_block[i]); /* set the link string * the size check prevents us from trying to allocate a huge amount of @@ -743,20 +750,17 @@ ext2fs_dinode_copy(EXT2FS_INFO * ext2fs, TSK_FS_META * fs_meta, */ if ((fs_meta->type == TSK_FS_META_TYPE_LNK) && (fs_meta->size < EXT2FS_MAXPATHLEN) && (fs_meta->size >= 0)) { - int i; - if ((fs_meta->link = tsk_malloc((size_t) (fs_meta->size + 1))) == NULL) return 1; /* it is located directly in the pointers */ if (fs_meta->size < 4 * (EXT2FS_NDADDR + EXT2FS_NIADDR)) { - unsigned int j; - unsigned int count = 0; + count = 0; for (i = 0; i < (EXT2FS_NDADDR + EXT2FS_NIADDR) && count < fs_meta->size; i++) { - char *a_ptr = (char *) &dino_buf->i_block[i]; + a_ptr = (char *) &dino_buf->i_block[i]; for (j = 0; j < 4 && count < fs_meta->size; j++) { fs_meta->link[count++] = a_ptr[j]; } @@ -769,11 +773,11 @@ ext2fs_dinode_copy(EXT2FS_INFO * ext2fs, TSK_FS_META * fs_meta, /* it is in blocks */ else { - TSK_FS_INFO *fs = (TSK_FS_INFO *) & ext2fs->fs_info; - char *data_buf = NULL; - char *a_ptr = fs_meta->link; - unsigned int total_read = 0; - TSK_DADDR_T *addr_ptr = fs_meta->content_ptr;; + fs = (TSK_FS_INFO *) & ext2fs->fs_info; + data_buf = NULL; + a_ptr = fs_meta->link; + total_read = 0; + tsk_addr_ptr = fs_meta->content_ptr;; if ((data_buf = tsk_malloc(fs->block_size)) == NULL) { return 1; @@ -783,10 +787,8 @@ ext2fs_dinode_copy(EXT2FS_INFO * ext2fs, TSK_FS_META * fs_meta, * on path length */ for (i = 0; i < EXT2FS_NDADDR && total_read < fs_meta->size; i++) { - ssize_t cnt; - cnt = tsk_fs_read_block(fs, - addr_ptr[i], data_buf, fs->block_size); + tsk_addr_ptr[i], data_buf, fs->block_size); if (cnt != fs->block_size) { if (cnt >= 0) { @@ -795,12 +797,12 @@ ext2fs_dinode_copy(EXT2FS_INFO * ext2fs, TSK_FS_META * fs_meta, } tsk_error_set_errstr2 ("ext2fs_dinode_copy: symlink destination from %" - PRIuDADDR, addr_ptr[i]); + PRIuDADDR, tsk_addr_ptr[i]); free(data_buf); return 1; } - int copy_len = + copy_len = (fs_meta->size - total_read < fs->block_size) ? (int) (fs_meta->size - total_read) : (int) (fs->block_size); pytsk-20190507/patches/sleuthkit-4.6.6-ext2fs_dent.patch000066400000000000000000000120311346423473500225250ustar00rootroot00000000000000diff --git a/tsk/fs/ext2fs_dent.c b/tsk/fs/ext2fs_dent.c index f590bd07..9034d234 100644 --- a/tsk/fs/ext2fs_dent.c +++ b/tsk/fs/ext2fs_dent.c @@ -10,7 +10,7 @@ ** ** TASK ** Copyright (c) 2002 Brian Carrier, @stake Inc. All rights reserved -** +* ** TCTUTILS ** Copyright (c) 2001 Brian Carrier. All rights reserved ** @@ -35,49 +35,51 @@ ext2fs_dent_copy(EXT2FS_INFO * ext2fs, char *ext2_dent, TSK_FS_NAME * fs_name) { TSK_FS_INFO *fs = &(ext2fs->fs_info); + ext2fs_dentry1 *dir1 = NULL; + ext2fs_dentry2 *dir2 = NULL; if (ext2fs->deentry_type == EXT2_DE_V1) { - ext2fs_dentry1 *dir = (ext2fs_dentry1 *) ext2_dent; + dir1 = (ext2fs_dentry1 *) ext2_dent; - fs_name->meta_addr = tsk_getu32(fs->endian, dir->inode); + fs_name->meta_addr = tsk_getu32(fs->endian, dir1->inode); /* ext2 does not null terminate */ - if (tsk_getu16(fs->endian, dir->name_len) >= fs_name->name_size) { + if (tsk_getu16(fs->endian, dir1->name_len) >= fs_name->name_size) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr ("ext2fs_dent_copy: Name Space too Small %d %" PRIuSIZE "", - tsk_getu16(fs->endian, dir->name_len), fs_name->name_size); + tsk_getu16(fs->endian, dir1->name_len), fs_name->name_size); return 1; } /* Copy and Null Terminate */ - strncpy(fs_name->name, dir->name, tsk_getu16(fs->endian, - dir->name_len)); - fs_name->name[tsk_getu16(fs->endian, dir->name_len)] = '\0'; + strncpy(fs_name->name, dir1->name, tsk_getu16(fs->endian, + dir1->name_len)); + fs_name->name[tsk_getu16(fs->endian, dir1->name_len)] = '\0'; fs_name->type = TSK_FS_NAME_TYPE_UNDEF; } else { - ext2fs_dentry2 *dir = (ext2fs_dentry2 *) ext2_dent; + dir2 = (ext2fs_dentry2 *) ext2_dent; - fs_name->meta_addr = tsk_getu32(fs->endian, dir->inode); + fs_name->meta_addr = tsk_getu32(fs->endian, dir2->inode); /* ext2 does not null terminate */ - if (dir->name_len >= fs_name->name_size) { + if (dir2->name_len >= fs_name->name_size) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr ("ext2_dent_copy: Name Space too Small %d %" PRIuSIZE "", - dir->name_len, fs_name->name_size); + dir2->name_len, fs_name->name_size); return 1; } /* Copy and Null Terminate */ - strncpy(fs_name->name, dir->name, dir->name_len); - fs_name->name[dir->name_len] = '\0'; + strncpy(fs_name->name, dir2->name, dir2->name_len); + fs_name->name[dir2->name_len] = '\0'; - switch (dir->type) { + switch (dir2->type) { case EXT2_DE_REG: fs_name->type = TSK_FS_NAME_TYPE_REG; break; @@ -129,6 +131,7 @@ ext2fs_dent_parse_block(EXT2FS_INFO * ext2fs, TSK_FS_DIR * a_fs_dir, char *dirPtr; TSK_FS_NAME *fs_name; int minreclen = 4; + unsigned int namelen; if ((fs_name = tsk_fs_name_alloc(EXT2FS_MAXNAMLEN + 1, 0)) == NULL) return TSK_ERR; @@ -138,7 +141,6 @@ ext2fs_dent_parse_block(EXT2FS_INFO * ext2fs, TSK_FS_DIR * a_fs_dir, */ for (idx = 0; idx <= len - EXT2FS_DIRSIZ_lcl(1); idx += minreclen) { - unsigned int namelen; dirPtr = &buf[idx]; if (ext2fs->deentry_type == EXT2_DE_V1) { @@ -242,8 +244,12 @@ ext2fs_dir_open_meta(TSK_FS_INFO * a_fs, TSK_FS_DIR ** a_fs_dir, EXT2FS_INFO *ext2fs = (EXT2FS_INFO *) a_fs; char *dirbuf; TSK_OFF_T size; + TSK_OFF_T offset = 0; TSK_FS_DIR *fs_dir; + TSK_FS_NAME *fs_name = NULL; TSK_LIST *list_seen = NULL; + ssize_t len = 0; + ssize_t cnt = 0; /* If we get corruption in one of the blocks, then continue processing. * retval_final will change when corruption is detected. Errors are @@ -318,11 +324,11 @@ ext2fs_dir_open_meta(TSK_FS_INFO * a_fs, TSK_FS_DIR ** a_fs_dir, } size = roundup(fs_dir->fs_file->meta->size, a_fs->block_size); - TSK_OFF_T offset = 0; + offset = 0; while (size > 0) { - ssize_t len = (a_fs->block_size < size) ? a_fs->block_size : size; - ssize_t cnt = tsk_fs_file_read(fs_dir->fs_file, offset, dirbuf, len, (TSK_FS_FILE_READ_FLAG_ENUM)0); + len = (a_fs->block_size < size) ? a_fs->block_size : size; + cnt = tsk_fs_file_read(fs_dir->fs_file, offset, dirbuf, len, (TSK_FS_FILE_READ_FLAG_ENUM)0); if (cnt != len) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_FWALK); @@ -354,7 +360,7 @@ ext2fs_dir_open_meta(TSK_FS_INFO * a_fs, TSK_FS_DIR ** a_fs_dir, // if we are listing the root directory, add the Orphan directory entry if (a_addr == a_fs->root_inum) { - TSK_FS_NAME *fs_name = tsk_fs_name_alloc(256, 0); + fs_name = tsk_fs_name_alloc(256, 0); if (fs_name == NULL) return TSK_ERR; pytsk-20190507/patches/sleuthkit-4.6.6-ffs_dent.patch000066400000000000000000000106671346423473500221050ustar00rootroot00000000000000diff --git a/tsk/fs/ffs_dent.c b/tsk/fs/ffs_dent.c index a9e9aec3..72ef538e 100644 --- a/tsk/fs/ffs_dent.c +++ b/tsk/fs/ffs_dent.c @@ -33,13 +33,15 @@ static uint8_t ffs_dent_copy(FFS_INFO * ffs, char *ffs_dent, TSK_FS_NAME * fs_name) { TSK_FS_INFO *a_fs = &(ffs->fs_info); + ffs_dentry1 *dir1 = NULL; + ffs_dentry2 *dir2 = NULL; /* this one has the type field */ if ((a_fs->ftype == TSK_FS_TYPE_FFS1) || (a_fs->ftype == TSK_FS_TYPE_FFS2)) { - ffs_dentry1 *dir = (ffs_dentry1 *) ffs_dent; + dir1 = (ffs_dentry1 *) ffs_dent; - fs_name->meta_addr = tsk_getu32(a_fs->endian, dir->d_ino); + fs_name->meta_addr = tsk_getu32(a_fs->endian, dir1->d_ino); if (fs_name->name_size != FFS_MAXNAMLEN) { if (tsk_fs_name_realloc(fs_name, FFS_MAXNAMLEN)) @@ -47,9 +49,9 @@ ffs_dent_copy(FFS_INFO * ffs, char *ffs_dent, TSK_FS_NAME * fs_name) } /* ffs null terminates so we can strncpy */ - strncpy(fs_name->name, dir->d_name, fs_name->name_size); + strncpy(fs_name->name, dir1->d_name, fs_name->name_size); - switch (dir->d_type) { + switch (dir1->d_type) { case FFS_DT_REG: fs_name->type = TSK_FS_NAME_TYPE_REG; break; @@ -81,9 +83,9 @@ ffs_dent_copy(FFS_INFO * ffs, char *ffs_dent, TSK_FS_NAME * fs_name) } } else if (a_fs->ftype == TSK_FS_TYPE_FFS1B) { - ffs_dentry2 *dir = (ffs_dentry2 *) ffs_dent; + dir2 = (ffs_dentry2 *) ffs_dent; - fs_name->meta_addr = tsk_getu32(a_fs->endian, dir->d_ino); + fs_name->meta_addr = tsk_getu32(a_fs->endian, dir2->d_ino); if (fs_name->name_size != FFS_MAXNAMLEN) { if (tsk_fs_name_realloc(fs_name, FFS_MAXNAMLEN)) @@ -91,7 +93,7 @@ ffs_dent_copy(FFS_INFO * ffs, char *ffs_dent, TSK_FS_NAME * fs_name) } /* ffs null terminates so we can strncpy */ - strncpy(fs_name->name, dir->d_name, fs_name->name_size); + strncpy(fs_name->name, dir2->d_name, fs_name->name_size); fs_name->type = TSK_FS_NAME_TYPE_UNDEF; } @@ -117,6 +119,7 @@ ffs_dent_parse_block(FFS_INFO * ffs, TSK_FS_DIR * fs_dir, uint8_t a_is_del, unsigned int idx; unsigned int inode = 0, dellen = 0, reclen = 0; unsigned int minreclen = 4; + unsigned int namelen = 0; TSK_FS_INFO *fs = &(ffs->fs_info); char *dirPtr; @@ -129,7 +132,7 @@ ffs_dent_parse_block(FFS_INFO * ffs, TSK_FS_DIR * fs_dir, uint8_t a_is_del, ** recorded length so we can view the deleted entries */ for (idx = 0; idx <= len - FFS_DIRSIZ_lcl(1); idx += minreclen) { - unsigned int namelen = 0; + namelen = 0; dirPtr = (char *) &buf[idx]; @@ -233,6 +236,10 @@ ffs_dir_open_meta(TSK_FS_INFO * a_fs, TSK_FS_DIR ** a_fs_dir, char *dirbuf; int nchnk, cidx; TSK_FS_DIR *fs_dir; + TSK_OFF_T offset; + int len; + ssize_t cnt; + TSK_FS_NAME *fs_name; /* If we get corruption in one of the blocks, then continue processing. * retval_final will change when corruption is detected. Errors are @@ -240,7 +247,6 @@ ffs_dir_open_meta(TSK_FS_INFO * a_fs, TSK_FS_DIR ** a_fs_dir, TSK_RETVAL_ENUM retval_tmp; TSK_RETVAL_ENUM retval_final = TSK_OK; - if (a_addr < a_fs->first_inum || a_addr > a_fs->last_inum) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_WALK_RNG); @@ -299,11 +305,11 @@ ffs_dir_open_meta(TSK_FS_INFO * a_fs, TSK_FS_DIR ** a_fs_dir, */ nchnk = (int) (size) / (FFS_DIRBLKSIZ) + 1; - TSK_OFF_T offset = 0; + offset = 0; for (cidx = 0; cidx < nchnk && (int64_t) size > 0; cidx++) { - int len = (FFS_DIRBLKSIZ < size) ? FFS_DIRBLKSIZ : (int) size; + len = (FFS_DIRBLKSIZ < size) ? FFS_DIRBLKSIZ : (int) size; - ssize_t cnt = tsk_fs_file_read(fs_dir->fs_file, offset, dirbuf, len, (TSK_FS_FILE_READ_FLAG_ENUM)0); + cnt = tsk_fs_file_read(fs_dir->fs_file, offset, dirbuf, len, (TSK_FS_FILE_READ_FLAG_ENUM)0); if (cnt != len) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_FWALK); @@ -334,7 +340,7 @@ ffs_dir_open_meta(TSK_FS_INFO * a_fs, TSK_FS_DIR ** a_fs_dir, // if we are listing the root directory, add the Orphan directory entry if (a_addr == a_fs->root_inum) { - TSK_FS_NAME *fs_name = tsk_fs_name_alloc(256, 0); + fs_name = tsk_fs_name_alloc(256, 0); if (fs_name == NULL) return TSK_ERR; pytsk-20190507/patches/sleuthkit-4.6.6-gpt.patch000066400000000000000000000045721346423473500211050ustar00rootroot00000000000000diff --git a/tsk/vs/gpt.c b/tsk/vs/gpt.c index cf6bc4ac..0a830bec 100644 --- a/tsk/vs/gpt.c +++ b/tsk/vs/gpt.c @@ -36,6 +36,15 @@ gpt_load_table(TSK_VS_INFO * vs, GPT_LOCATION_ENUM gpt_type) TSK_DADDR_T gpt_relative_addr; TSK_DADDR_T gpt_absolute_addr; + TSK_DADDR_T dos_sect_relative_addr = 0; + TSK_DADDR_T dos_sect_absolute_addr = 0; + + char *name; + + UTF16 *name16; + UTF8 *name8; + int retVal; + if(gpt_type == PRIMARY_TABLE){ gpt_relative_addr = GPT_PART_SOFFSET + 1; gpt_absolute_addr = vs->offset / vs->block_size + GPT_PART_SOFFSET + 1; @@ -52,8 +61,8 @@ gpt_load_table(TSK_VS_INFO * vs, GPT_LOCATION_ENUM gpt_type) return 1; if(gpt_type == PRIMARY_TABLE){ - TSK_DADDR_T dos_sect_relative_addr = GPT_PART_SOFFSET; - TSK_DADDR_T dos_sect_absolute_addr = vs->offset / vs->block_size + GPT_PART_SOFFSET; + dos_sect_relative_addr = GPT_PART_SOFFSET; + dos_sect_absolute_addr = vs->offset / vs->block_size + GPT_PART_SOFFSET; dos_part = (dos_sect *) sect_buf; cnt = tsk_vs_read_block @@ -194,8 +203,6 @@ gpt_load_table(TSK_VS_INFO * vs, GPT_LOCATION_ENUM gpt_type) i = 0; for (a = 0; i < tsk_getu32(vs->endian, &head->tab_num_ent); a++) { - char *name; - /* Read a sector */ cnt = tsk_vs_read_block(vs, tsk_getu64(vs->endian, &head->tab_start_lba) + a, @@ -218,10 +225,6 @@ gpt_load_table(TSK_VS_INFO * vs, GPT_LOCATION_ENUM gpt_type) for (; (uintptr_t) ent < (uintptr_t) ent_buf + vs->block_size && i < tsk_getu32(vs->endian, &head->tab_num_ent); i++) { - UTF16 *name16; - UTF8 *name8; - int retVal; - if (tsk_verbose) tsk_fprintf(stderr, "gpt_load: %d Starting Sector: %" PRIu64 @@ -305,6 +308,8 @@ tsk_vs_gpt_open(TSK_IMG_INFO * img_info, TSK_DADDR_T offset) { TSK_VS_INFO *vs; + int found = 0; + // clean up any errors that are lying around tsk_error_reset(); @@ -339,7 +344,7 @@ tsk_vs_gpt_open(TSK_IMG_INFO * img_info, TSK_DADDR_T offset) /* Load the partitions into the sorted list */ if (gpt_load_table(vs, PRIMARY_TABLE)) { tsk_vs_part_free(vs); - int found = 0; + found = 0; if (tsk_verbose) tsk_fprintf(stderr, "gpt_open: Trying other sector sizes\n"); pytsk-20190507/patches/sleuthkit-4.6.6-hfs.patch000066400000000000000000001240421346423473500210660ustar00rootroot00000000000000diff --git a/tsk/fs/hfs.c b/tsk/fs/hfs.c index 00f1720b..889df108 100644 --- a/tsk/fs/hfs.c +++ b/tsk/fs/hfs.c @@ -91,6 +91,10 @@ #include "lzvn.h" +#if defined( TSK_WIN32 ) +#define __func__ __FUNCTION__ +#endif + // Forward declarations: static uint8_t hfs_load_attrs(TSK_FS_FILE * fs_file); static uint8_t hfs_load_extended_attrs(TSK_FS_FILE * file, @@ -372,10 +376,13 @@ static TSK_FS_ATTR_RUN * hfs_extents_to_attr(TSK_FS_INFO * a_fs, const hfs_ext_desc * a_extents, TSK_OFF_T a_start_off) { + TSK_FS_ATTR_RUN *cur_run = NULL; TSK_FS_ATTR_RUN *head_run = NULL; TSK_FS_ATTR_RUN *prev_run = NULL; int i; TSK_OFF_T cur_off = a_start_off; + uint32_t addr = 0; + uint32_t len = 0; // since tsk_errno is checked as a return value, make sure it is clean. tsk_error_reset(); @@ -386,10 +393,8 @@ hfs_extents_to_attr(TSK_FS_INFO * a_fs, const hfs_ext_desc * a_extents, " to runlist\n", a_start_off); for (i = 0; i < 8; ++i) { - TSK_FS_ATTR_RUN *cur_run; - - uint32_t addr = tsk_getu32(a_fs->endian, a_extents[i].start_blk); - uint32_t len = tsk_getu32(a_fs->endian, a_extents[i].blk_cnt); + addr = tsk_getu32(a_fs->endian, a_extents[i].start_blk); + len = tsk_getu32(a_fs->endian, a_extents[i].blk_cnt); if (tsk_verbose) tsk_fprintf(stderr, @@ -443,6 +448,26 @@ hfs_ext_find_extent_record_attr(HFS_INFO * hfs, uint32_t cnid, uint8_t is_done; uint8_t desiredType; + ssize_t cnt; + + TSK_OFF_T cur_off; /* start address of cur_node */ + uint16_t num_rec; /* number of records in this node */ + hfs_btree_node *node_desc; + + uint32_t next_node = 0; + int rec; + + int cmp; + size_t rec_off; + hfs_btree_key_ext *key; + + int keylen; + + uint32_t rec_cnid; + hfs_extents *extents; + TSK_OFF_T ext_off = 0; + TSK_FS_ATTR_RUN *attr_run; + tsk_error_reset(); if (tsk_verbose) @@ -462,8 +487,6 @@ hfs_ext_find_extent_record_attr(HFS_INFO * hfs, uint32_t cnid, // Load the extents attribute, if it has not been done so yet. if (hfs->extents_file == NULL) { - ssize_t cnt; - if ((hfs->extents_file = tsk_fs_file_open_meta(fs, NULL, HFS_EXTENTS_FILE_ID)) == NULL) { @@ -524,11 +547,6 @@ hfs_ext_find_extent_record_attr(HFS_INFO * hfs, uint32_t cnid, /* Recurse down to the needed leaf nodes and then go forward */ is_done = 0; while (is_done == 0) { - TSK_OFF_T cur_off; /* start address of cur_node */ - uint16_t num_rec; /* number of records in this node */ - ssize_t cnt; - hfs_btree_node *node_desc; - // sanity check if (cur_node > tsk_getu32(fs->endian, hfs->extents_header.totalNodes)) { @@ -585,8 +603,7 @@ hfs_ext_find_extent_record_attr(HFS_INFO * hfs, uint32_t cnid, /* With an index node, find the record with the largest key that is smaller * to or equal to cnid */ if (node_desc->type == HFS_BT_NODE_TYPE_IDX) { - uint32_t next_node = 0; - int rec; + next_node = 0; if (tsk_verbose) tsk_fprintf(stderr, @@ -595,10 +612,6 @@ hfs_ext_find_extent_record_attr(HFS_INFO * hfs, uint32_t cnid, cur_off, num_rec); for (rec = 0; rec < num_rec; ++rec) { - int cmp; - size_t rec_off; - hfs_btree_key_ext *key; - // get the record offset in the node rec_off = tsk_getu16(fs->endian, @@ -629,7 +642,7 @@ hfs_ext_find_extent_record_attr(HFS_INFO * hfs, uint32_t cnid, /* save the info from this record unless it is bigger than cnid */ if ((cmp <= 0) || (next_node == 0)) { hfs_btree_index_record *idx_rec; - int keylen = + keylen = 2 + hfs_get_idxkeylen(hfs, tsk_getu16(fs->endian, key->key_len), &(hfs->extents_header)); if (rec_off + keylen > nodesize) { @@ -667,8 +680,6 @@ hfs_ext_find_extent_record_attr(HFS_INFO * hfs, uint32_t cnid, /* with a leaf, we process until we are past cnid. We move right too if we can */ else if (node_desc->type == HFS_BT_NODE_TYPE_LEAF) { - int rec; - if (tsk_verbose) tsk_fprintf(stderr, "hfs_ext_find_extent_record: Leaf node %" PRIu32 " @ %" @@ -676,13 +687,7 @@ hfs_ext_find_extent_record_attr(HFS_INFO * hfs, uint32_t cnid, num_rec); for (rec = 0; rec < num_rec; ++rec) { - size_t rec_off; - hfs_btree_key_ext *key; - uint32_t rec_cnid; - hfs_extents *extents; - TSK_OFF_T ext_off = 0; - int keylen; - TSK_FS_ATTR_RUN *attr_run; + ext_off = 0; // get the record offset in the node rec_off = @@ -843,6 +848,19 @@ hfs_cat_traverse(HFS_INFO * hfs, uint16_t nodesize; uint8_t is_done = 0; + TSK_OFF_T cur_off; /* start address of cur_node */ + uint16_t num_rec; /* number of records in this node */ + ssize_t cnt; + hfs_btree_node *node_desc; + + uint32_t next_node = 0; + int rec; + + size_t rec_off; + hfs_btree_key_cat *key; + uint8_t retval; + int keylen; + tsk_error_reset(); nodesize = tsk_getu16(fs->endian, hfs->catalog_header.nodesize); @@ -872,11 +890,6 @@ hfs_cat_traverse(HFS_INFO * hfs, /* Recurse down to the needed leaf nodes and then go forward */ is_done = 0; while (is_done == 0) { - TSK_OFF_T cur_off; /* start address of cur_node */ - uint16_t num_rec; /* number of records in this node */ - ssize_t cnt; - hfs_btree_node *node_desc; - // sanity check if (cur_node > tsk_getu32(fs->endian, hfs->catalog_header.totalNodes)) { @@ -930,15 +943,9 @@ hfs_cat_traverse(HFS_INFO * hfs, /* With an index node, find the record with the largest key that is smaller * to or equal to cnid */ if (node_desc->type == HFS_BT_NODE_TYPE_IDX) { - uint32_t next_node = 0; - int rec; + next_node = 0; for (rec = 0; rec < num_rec; ++rec) { - size_t rec_off; - hfs_btree_key_cat *key; - uint8_t retval; - int keylen; - // get the record offset in the node rec_off = tsk_getu16(fs->endian, @@ -991,7 +998,7 @@ hfs_cat_traverse(HFS_INFO * hfs, else if ((retval == HFS_BTREE_CB_IDX_LT) || (next_node == 0)) { hfs_btree_index_record *idx_rec; - int keylen = + keylen = 2 + hfs_get_idxkeylen(hfs, tsk_getu16(fs->endian, key->key_len), &(hfs->catalog_header)); if (rec_off + keylen > nodesize) { @@ -1036,14 +1043,7 @@ hfs_cat_traverse(HFS_INFO * hfs, /* With a leaf, we look for the specific record. */ else if (node_desc->type == HFS_BT_NODE_TYPE_LEAF) { - int rec; - for (rec = 0; rec < num_rec; ++rec) { - size_t rec_off; - hfs_btree_key_cat *key; - uint8_t retval; - int keylen; - // get the record offset in the node rec_off = tsk_getu16(fs->endian, @@ -1130,7 +1130,10 @@ hfs_cat_get_record_offset_cb(HFS_INFO * hfs, int8_t level_type, TSK_OFF_T key_off, void *ptr) { HFS_CAT_GET_RECORD_OFFSET_DATA *offset_data = (HFS_CAT_GET_RECORD_OFFSET_DATA *)ptr; - const hfs_btree_key_cat *targ_key = offset_data->targ_key; + const hfs_btree_key_cat *targ_key = NULL; + int diff = 0; + + targ_key = offset_data->targ_key; if (tsk_verbose) tsk_fprintf(stderr, @@ -1141,14 +1144,14 @@ hfs_cat_get_record_offset_cb(HFS_INFO * hfs, int8_t level_type, tsk_getu32(hfs->fs_info.endian, cur_key->parent_cnid)); if (level_type == HFS_BT_NODE_TYPE_IDX) { - int diff = hfs_cat_compare_keys(hfs, cur_key, targ_key); + diff = hfs_cat_compare_keys(hfs, cur_key, targ_key); if (diff < 0) return HFS_BTREE_CB_IDX_LT; else return HFS_BTREE_CB_IDX_EQGT; } else { - int diff = hfs_cat_compare_keys(hfs, cur_key, targ_key); + diff = hfs_cat_compare_keys(hfs, cur_key, targ_key); // see if this record is for our file or if we passed the interesting entries if (diff < 0) { @@ -1419,6 +1422,7 @@ hfs_follow_hard_link(HFS_INFO * hfs, hfs_file * cat, time_t crtime; uint32_t file_type; uint32_t file_creator; + uint32_t linkNum; *is_error = 0; // default, not an error @@ -1457,7 +1461,7 @@ hfs_follow_hard_link(HFS_INFO * hfs, hfs_file * cat, // For this to work, we need the FS creation times. Is at least one of these set? if ((!hfs->has_root_crtime) && (!hfs->has_meta_dir_crtime) && (!hfs->has_meta_crtime)) { - uint32_t linkNum = + linkNum = tsk_getu32(fs->endian, cat->std.perm.special.inum); *is_error = 1; if (tsk_verbose) @@ -1482,11 +1486,11 @@ hfs_follow_hard_link(HFS_INFO * hfs, hfs_file * cat, (hfs->has_meta_dir_crtime && (crtime == hfs->metadir_crtime)) || (hfs->has_root_crtime && (crtime == hfs->root_crtime))) { // OK, this is a hard link to a file. - uint32_t linkNum = + linkNum = tsk_getu32(fs->endian, cat->std.perm.special.inum); - // We used to resolve this ID to a file in X folder using hfs_lookup_hard_link, but found - // that it was very ineffecient and always resulted in the same linkNum value. + // We used to resolve this ID to a file in X folder using hfs_lookup_hard_link, but found + // that it was very ineffecient and always resulted in the same linkNum value. // We now just use linkNum return linkNum; } @@ -1504,7 +1508,7 @@ hfs_follow_hard_link(HFS_INFO * hfs, hfs_file * cat, // For this to work, we need the FS creation times. Is at least one of these set? if ((!hfs->has_root_crtime) && (!hfs->has_meta_dir_crtime) && (!hfs->has_meta_crtime)) { - uint32_t linkNum = + linkNum = tsk_getu32(fs->endian, cat->std.perm.special.inum); *is_error = 1; @@ -1532,11 +1536,11 @@ hfs_follow_hard_link(HFS_INFO * hfs, hfs_file * cat, (hfs->has_meta_dir_crtime && (crtime == hfs->metadir_crtime)) || (hfs->has_root_crtime && (crtime == hfs->root_crtime))) { // OK, this is a hard link to a directory. - uint32_t linkNum = + linkNum = tsk_getu32(fs->endian, cat->std.perm.special.inum); - // We used to resolve this ID to a file in X folder using hfs_lookup_hard_link, but found - // that it was very ineffecient and always resulted in the same linkNum value. + // We used to resolve this ID to a file in X folder using hfs_lookup_hard_link, but found + // that it was very ineffecient and always resulted in the same linkNum value. // We now just use linkNum return linkNum; } @@ -1568,6 +1572,11 @@ hfs_cat_file_lookup(HFS_INFO * hfs, TSK_INUM_T inum, HFS_ENTRY * entry, hfs_file_folder record; /* file/folder record */ TSK_OFF_T off; + unsigned char is_err; + TSK_INUM_T target_cnid = 0; + + uint8_t res = 0; + tsk_error_reset(); if (tsk_verbose) @@ -1690,8 +1699,7 @@ hfs_cat_file_lookup(HFS_INFO * hfs, TSK_INUM_T inum, HFS_ENTRY * entry, if (follow_hard_link) { // TEST to see if this is a hard link - unsigned char is_err; - TSK_INUM_T target_cnid = + target_cnid = hfs_follow_hard_link(hfs, &(entry->cat), &is_err); if (is_err > 1) { error_returned @@ -1701,7 +1709,7 @@ hfs_cat_file_lookup(HFS_INFO * hfs, TSK_INUM_T inum, HFS_ENTRY * entry, } if (target_cnid != inum) { // This is a hard link, and we have got the cnid of the target file, so look it up. - uint8_t res = + res = hfs_cat_file_lookup(hfs, target_cnid, entry, FALSE); if (res != 0) { error_returned @@ -1743,13 +1751,15 @@ hfs_find_highest_inum(HFS_INFO * hfs) { // @@@ get actual number from Catalog file (go to far right) (we can't always trust the vol header) TSK_INUM_T inum; + TSK_FS_INFO *fs = NULL; + if (hfs_cat_traverse(hfs, hfs_find_highest_inum_cb, &inum)) { /* Catalog traversal failed, fallback on legacy method : if HFS_VH_ATTR_CNIDS_REUSED is set, then the maximum CNID is 2^32-1; if it's not set, then nextCatalogId is supposed to be larger than all CNIDs on disk. */ - TSK_FS_INFO *fs = (TSK_FS_INFO *) & (hfs->fs_info); + fs = (TSK_FS_INFO *) & (hfs->fs_info); if (tsk_getu32(fs->endian, hfs->fs->attr) & HFS_VH_ATTR_CNIDS_REUSED) return (TSK_INUM_T) 0xffffffff; else @@ -2341,6 +2351,8 @@ hfs_dinode_copy(HFS_INFO * a_hfs, const HFS_ENTRY * a_hfs_entry, uint16_t hfsmode; TSK_INUM_T iStd; // the inum (or CNID) that occurs in the standard file metadata + ssize_t bytes_read; + if (a_entry == NULL) { error_detected(TSK_ERR_FS_ARG, "hfs_dinode_copy: a_entry = a_hfs_entry->cat is NULL"); @@ -2480,8 +2492,6 @@ hfs_dinode_copy(HFS_INFO * a_hfs, const HFS_ENTRY * a_hfs_entry, if ((a_fs_meta->type == TSK_FS_META_TYPE_LNK) && (a_fs_meta->size >= 0) && (a_fs_meta->size < HFS_MAXPATHLEN)) { - ssize_t bytes_read; - a_fs_meta->link = tsk_malloc((size_t) a_fs_meta->size + 1); if (a_fs_meta->link == NULL) return 1; @@ -2737,6 +2747,10 @@ hfs_read_lzvn_block_table(const TSK_FS_ATTR *rAttr, CMP_OFFSET_ENTRY** offsetTab char *offsetTableData = NULL; CMP_OFFSET_ENTRY *offsetTable = NULL; + uint32_t a = 0; + uint32_t b; + size_t i; + // The offset table is a sequence of 4-byte offsets of compressed // blocks. The first 4 bytes is thus the offset of the first block, // but also 4 times the number of entries in the table. @@ -2780,9 +2794,7 @@ hfs_read_lzvn_block_table(const TSK_FS_ATTR *rAttr, CMP_OFFSET_ENTRY** offsetTab goto on_error; } - uint32_t a = tableDataSize; - uint32_t b; - size_t i; + a = tableDataSize; for (i = 0; i < tableSize; ++i) { b = tsk_getu32(TSK_LIT_ENDIAN, offsetTableData + 4*(i+1)); @@ -2846,6 +2858,9 @@ static int hfs_decompress_noncompressed_block(char* rawBuf, uint32_t len, char* */ static int hfs_decompress_zlib_block(char* rawBuf, uint32_t len, char* uncBuf, uint64_t* uncLen) { + unsigned long bytesConsumed; + int infResult = 0; + // see if this block is compressed if (len > 0 && (rawBuf[0] & 0x0F) != 0x0F) { // Uncompress the chunk of data @@ -2853,8 +2868,7 @@ static int hfs_decompress_zlib_block(char* rawBuf, uint32_t len, char* uncBuf, u tsk_fprintf(stderr, "%s: Inflating the compression unit\n", __func__); - unsigned long bytesConsumed; - int infResult = zlib_inflate(rawBuf, (uint64_t) len, + infResult = zlib_inflate(rawBuf, (uint64_t) len, uncBuf, (uint64_t) COMPRESSION_UNIT_SIZE, uncLen, &bytesConsumed); if (infResult != 0) { @@ -2938,6 +2952,10 @@ static ssize_t read_and_decompress_block( uint32_t len = offsetTable[indx].length; uint64_t uncLen; + const char *msg = NULL; + + uint32_t expUncLen = 0; + if (tsk_verbose) tsk_fprintf(stderr, "%s: Reading compression unit %d, length %d\n", @@ -2962,7 +2980,7 @@ static ssize_t read_and_decompress_block( attrReadResult = tsk_fs_attr_read(rAttr, offset, rawBuf, len, TSK_FS_FILE_READ_FLAG_NONE); if (attrReadResult != (ssize_t) len) { - char msg[] = + msg = "%s%s: reading in the compression offset table, " "return value %u should have been %u"; @@ -2981,7 +2999,7 @@ static ssize_t read_and_decompress_block( // If size is a multiple of COMPRESSION_UNIT_SIZE, // expected uncompressed length is COMPRESSION_UNIT_SIZE - const uint32_t expUncLen = indx == offsetTableSize - 1 ? + expUncLen = indx == offsetTableSize - 1 ? ((rAttr->fs_file->meta->size - 1) % COMPRESSION_UNIT_SIZE) + 1 : COMPRESSION_UNIT_SIZE; @@ -3032,6 +3050,14 @@ hfs_attr_walk_compressed_rsrc(const TSK_FS_ATTR * fs_attr, size_t indx; // index for looping over the offset table TSK_OFF_T off = 0; // the offset in the uncompressed data stream consumed thus far + ssize_t uncLen; // uncompressed length + unsigned int blockSize; + uint64_t lumpSize; + uint64_t remaining; + char *lumpStart; + + int retval; // action return value + if (tsk_verbose) tsk_fprintf(stderr, "%s: Entered, because this is a compressed file with compressed data in the resource fork\n", __func__); @@ -3105,12 +3131,6 @@ hfs_attr_walk_compressed_rsrc(const TSK_FS_ATTR * fs_attr, // FOR entry in the table DO for (indx = 0; indx < offsetTableSize; ++indx) { - ssize_t uncLen; // uncompressed length - unsigned int blockSize; - uint64_t lumpSize; - uint64_t remaining; - char *lumpStart; - switch ((uncLen = read_and_decompress_block( rAttr, rawBuf, uncBuf, offsetTable, offsetTableSize, offsetTableOffset, indx, @@ -3131,7 +3151,6 @@ hfs_attr_walk_compressed_rsrc(const TSK_FS_ATTR * fs_attr, lumpStart = uncBuf; while (remaining > 0) { - int retval; // action return value lumpSize = remaining <= blockSize ? remaining : blockSize; // Apply the callback function @@ -3262,6 +3281,10 @@ hfs_file_read_compressed_rsrc(const TSK_FS_ATTR * a_fs_attr, TSK_OFF_T endUnit = 0; uint64_t bytesCopied; + uint64_t uncLen; + char *uncBufPtr = NULL; + size_t bytesToCopy; + if (tsk_verbose) tsk_fprintf(stderr, "%s: called because this file is compressed, with data in the resource fork\n", __func__); @@ -3371,9 +3394,7 @@ hfs_file_read_compressed_rsrc(const TSK_FS_ATTR * a_fs_attr, // Read from the indicated comp units for (indx = startUnit; indx <= endUnit; ++indx) { - uint64_t uncLen; - char *uncBufPtr = uncBuf; - size_t bytesToCopy; + uncBufPtr = uncBuf; switch ((uncLen = read_and_decompress_block( rAttr, rawBuf, uncBuf, @@ -3521,6 +3542,17 @@ static int hfs_decompress_noncompressed_attr(char* rawBuf, uint32_t rawSize, uin */ static int hfs_decompress_zlib_attr(char* rawBuf, uint32_t rawSize, uint64_t uncSize, char** dstBuf, uint64_t* dstSize, int* dstBufFree) { +#ifdef HAVE_LIBZ + char* uncBuf = NULL; + uint64_t uLen; + unsigned long bytesConsumed; + int infResult; +#else + // Dummy is one byte long, so the ptr is not null, but we set the + // length to zero bytes, so it is never read. + static uint8_t dummy[1]; +#endif + // ZLIB blocks cannot start with 0xF as the low nibble, so that's used // as the flag for noncompressed blocks if ((rawBuf[0] & 0x0F) == 0x0F) { @@ -3529,11 +3561,6 @@ static int hfs_decompress_zlib_attr(char* rawBuf, uint32_t rawSize, uint64_t unc } else { #ifdef HAVE_LIBZ - char* uncBuf = NULL; - uint64_t uLen; - unsigned long bytesConsumed; - int infResult; - if (tsk_verbose) tsk_fprintf(stderr, "%s: Uncompressing (inflating) data.", __func__); @@ -3577,10 +3604,6 @@ static int hfs_decompress_zlib_attr(char* rawBuf, uint32_t rawSize, uint64_t unc tsk_fprintf(stderr, "%s: ZLIB not available, so loading an empty default DATA attribute.\n", __func__); - // Dummy is one byte long, so the ptr is not null, but we set the - // length to zero bytes, so it is never read. - static uint8_t dummy[1]; - *dstBuf = dummy; *dstSize = 0; *dstBufFree = FALSE; @@ -3605,6 +3628,8 @@ static int hfs_decompress_zlib_attr(char* rawBuf, uint32_t rawSize, uint64_t unc */ static int hfs_decompress_lzvn_attr(char* rawBuf, uint32_t rawSize, uint64_t uncSize, char** dstBuf, uint64_t* dstSize, int* dstBufFree) { + char* uncBuf; + // LZVN blocks cannot start with 0x06, so that's used as the flag for // noncompressed blocks if (rawBuf[0] == 0x06) { @@ -3612,7 +3637,7 @@ static int hfs_decompress_lzvn_attr(char* rawBuf, uint32_t rawSize, uint64_t unc rawBuf, rawSize, uncSize, dstBuf, dstSize, dstBufFree); } - char* uncBuf = (char *) tsk_malloc((size_t) uncSize); + uncBuf = (char *) tsk_malloc((size_t) uncSize); *dstSize = lzvn_decode_buffer(uncBuf, uncSize, rawBuf, rawSize); *dstBuf = uncBuf; *dstBufFree = TRUE; @@ -3646,6 +3671,12 @@ hfs_file_read_compressed_attr(TSK_FS_FILE* fs_file, uint64_t* dstSize, int* dstBufFree)) { + TSK_FS_ATTR *fs_attr_unc; + + char* dstBuf; + uint64_t dstSize; + int dstBufFree = FALSE; + // Data is inline. We will load the uncompressed data as a // resident attribute. if (tsk_verbose) @@ -3663,8 +3694,6 @@ hfs_file_read_compressed_attr(TSK_FS_FILE* fs_file, return 1; } - TSK_FS_ATTR *fs_attr_unc; - // There is data following the compression record, as there should be. if ((fs_attr_unc = tsk_fs_attrlist_getnew( fs_file->meta->attr, TSK_FS_ATTR_RES)) == NULL) @@ -3673,10 +3702,6 @@ hfs_file_read_compressed_attr(TSK_FS_FILE* fs_file, return 0; } - char* dstBuf; - uint64_t dstSize; - int dstBufFree = FALSE; - if (!decompress_attr(buffer + 16, attributeLength - 16, uncSize, &dstBuf, &dstSize, &dstBufFree)) { return 0; @@ -3926,6 +3951,35 @@ hfs_load_extended_attrs(TSK_FS_FILE * fs_file, TSK_LIST *nodeIDs_processed = NULL; // Keep track of node IDs to prevent an infinite loop ssize_t cnt; // count of chars read from file. + uint16_t numRec; // Number of records in the node + int recIndx; // index for looping over records + + uint16_t keyLength; + int comp; // comparison result + char *compStr; // comparison result, as a string + uint8_t *recData; // pointer to the data part of the record + uint32_t keyFileID; + + uint8_t *recOffsetTblEntry = NULL; + uint16_t recOffset = 0; + + uint8_t *recordBytes = NULL; + + hfs_attr_data *attrData; + uint32_t attributeLength; + uint32_t nameLength; + uint32_t recordType; + int conversionResult; + char nameBuff[HFS_MAX_ATTR_NAME_LEN_UTF8_B+1]; + TSK_FS_ATTR_TYPE_ENUM attrType; + TSK_FS_ATTR *fs_attr; // Points to the attribute to be loaded. + + DECMPFS_DISK_HEADER *cmph = NULL; + + uint64_t uncSize = 0; + + uint32_t newNodeID = 0; + tsk_error_reset(); // The CNID (or inode number) of the file @@ -3988,9 +4042,6 @@ hfs_load_extended_attrs(TSK_FS_FILE * fs_file, // While loop, over nodes in path from root node to the correct LEAF node. while (1) { - uint16_t numRec; // Number of records in the node - int recIndx; // index for looping over records - if (tsk_verbose) { tsk_fprintf(stderr, "hfs_load_extended_attrs: Reading Attributes File node with ID %" @@ -4056,15 +4107,9 @@ hfs_load_extended_attrs(TSK_FS_FILE * fs_file, } for (recIndx = 0; recIndx < numRec; ++recIndx) { - uint16_t keyLength; - int comp; // comparison result - char *compStr; // comparison result, as a string - uint8_t *recData; // pointer to the data part of the record - uint32_t keyFileID; - // The offset to the record is stored in table at end of node - uint8_t *recOffsetTblEntry = &nodeData[attrFile.nodeSize - (2 * (recIndx + 1))]; // data describing where this record is - uint16_t recOffset = tsk_getu16(endian, recOffsetTblEntry); + recOffsetTblEntry = &nodeData[attrFile.nodeSize - (2 * (recIndx + 1))]; // data describing where this record is + recOffset = tsk_getu16(endian, recOffsetTblEntry); //uint8_t * nextRecOffsetData = &nodeData[attrFile.nodeSize - 2* (recIndx+2)]; // make sure the record and first fields are in the buffer @@ -4075,7 +4120,7 @@ hfs_load_extended_attrs(TSK_FS_FILE * fs_file, } // Pointer to first byte of record - uint8_t *recordBytes = &nodeData[recOffset]; + recordBytes = &nodeData[recOffset]; // Cast that to the Attributes file key (n.b., the key is the first thing in the record) @@ -4160,9 +4205,6 @@ hfs_load_extended_attrs(TSK_FS_FILE * fs_file, // Loop over successive LEAF nodes, starting with this one done = FALSE; while (!done) { - uint16_t numRec; // number of records - unsigned int recIndx; // index for looping over records - if (tsk_verbose) tsk_fprintf(stderr, "hfs_load_extended_attrs: Attributes File LEAF Node %" @@ -4174,12 +4216,8 @@ hfs_load_extended_attrs(TSK_FS_FILE * fs_file, for (recIndx = 0; recIndx < numRec; ++recIndx) { // The offset to the record is stored in table at end of node - uint8_t *recOffsetTblEntry = &nodeData[attrFile.nodeSize - (2 * (recIndx + 1))]; // data describing where this record is - uint16_t recOffset = tsk_getu16(endian, recOffsetTblEntry); - - int comp; // comparison result - char *compStr; // comparison result as a string - uint32_t keyFileID; + recOffsetTblEntry = &nodeData[attrFile.nodeSize - (2 * (recIndx + 1))]; // data describing where this record is + recOffset = tsk_getu16(endian, recOffsetTblEntry); // make sure the record and first fields are in the buffer if (recOffset + 14 > attrFile.nodeSize) { @@ -4189,7 +4227,7 @@ hfs_load_extended_attrs(TSK_FS_FILE * fs_file, } // Pointer to first byte of record - uint8_t *recordBytes = &nodeData[recOffset]; + recordBytes = &nodeData[recOffset]; // Cast that to the Attributes file key keyB = (hfs_btree_key_attr *) recordBytes; @@ -4220,17 +4258,6 @@ hfs_load_extended_attrs(TSK_FS_FILE * fs_file, if (comp == 0) { // Yes, so load this attribute - uint8_t *recData; // pointer to the data part of the recordBytes - hfs_attr_data *attrData; - uint32_t attributeLength; - uint32_t nameLength; - uint32_t recordType; - uint16_t keyLength; - int conversionResult; - char nameBuff[HFS_MAX_ATTR_NAME_LEN_UTF8_B+1]; - TSK_FS_ATTR_TYPE_ENUM attrType; - TSK_FS_ATTR *fs_attr; // Points to the attribute to be loaded. - keyLength = tsk_getu16(endian, keyB->key_len); // make sure the fields we care about are still in the buffer // +2 because key_len doesn't include its own length @@ -4318,10 +4345,10 @@ hfs_load_extended_attrs(TSK_FS_FILE * fs_file, if (strcmp(nameBuff, "com.apple.decmpfs") == 0 && tsk_getu32(endian, attrData->record_type) == HFS_ATTR_RECORD_INLINE_DATA) { // Now, look at the compression record - DECMPFS_DISK_HEADER *cmph = (DECMPFS_DISK_HEADER *) buffer; + cmph = (DECMPFS_DISK_HEADER *) buffer; *cmpType = tsk_getu32(TSK_LIT_ENDIAN, cmph->compression_type); - uint64_t uncSize = tsk_getu64(TSK_LIT_ENDIAN, + uncSize = tsk_getu64(TSK_LIT_ENDIAN, cmph->uncompressed_size); if (tsk_verbose) @@ -4418,7 +4445,7 @@ hfs_load_extended_attrs(TSK_FS_FILE * fs_file, // so we must get the next node, and continue. // First determine the nodeID of the next LEAF node - uint32_t newNodeID = tsk_getu32(endian, nodeDescriptor->flink); + newNodeID = tsk_getu32(endian, nodeDescriptor->flink); //fprintf(stdout, "Next Node ID = %u\n", newNodeID); if (tsk_verbose) @@ -4543,6 +4570,21 @@ hfs_parse_resource_fork(TSK_FS_FILE * fs_file) hfs_resource_type_list_item *tlItem; int mindx; // index for looping over resource types + uint16_t numRes; + uint16_t refOff; + int pindx; // index for looping over resources + uint16_t rID; + uint32_t rOffset; + + int16_t nameOffset; + char *nameBuffer; + RES_DESCRIPTOR *rsrc; + char lenBuff[4]; // first 4 bytes of a resource encodes its length + uint32_t rLen; // Resource length + + char *name = NULL; + uint8_t nameLen = 0; + if (fs_file == NULL) { error_detected(TSK_ERR_FS_ARG, "hfs_parse_resource_fork: null fs_file"); @@ -4657,24 +4699,12 @@ hfs_parse_resource_fork(TSK_FS_FILE * fs_file) numTypes = tsk_getu16(fs_info->endian, typeList->typeCount) + 1; for (mindx = 0; mindx < numTypes; ++mindx) { - uint16_t numRes; - uint16_t refOff; - int pindx; // index for looping over resources - uint16_t rID; - uint32_t rOffset; - tlItem = &(typeList->type[mindx]); numRes = tsk_getu16(fs_info->endian, tlItem->count) + 1; refOff = tsk_getu16(fs_info->endian, tlItem->offset); for (pindx = 0; pindx < numRes; ++pindx) { - int16_t nameOffset; - char *nameBuffer; - RES_DESCRIPTOR *rsrc; - char lenBuff[4]; // first 4 bytes of a resource encodes its length - uint32_t rLen; // Resource length - hfs_resource_refListItem *item = ((hfs_resource_refListItem *) (((uint8_t *) typeList) + refOff)) + pindx; @@ -4682,8 +4712,8 @@ hfs_parse_resource_fork(TSK_FS_FILE * fs_file) nameBuffer = NULL; if (hasNameList && nameOffset != -1) { - char *name = nameListBegin + nameOffset; - uint8_t nameLen = (uint8_t) name[0]; + name = nameListBegin + nameOffset; + nameLen = (uint8_t) name[0]; nameBuffer = tsk_malloc(nameLen + 1); if (nameBuffer == NULL) { error_returned @@ -5207,6 +5237,9 @@ hfs_block_walk(TSK_FS_INFO * fs, TSK_DADDR_T start_blk, TSK_FS_BLOCK *fs_block; TSK_DADDR_T addr; + int retval; + int myflags; + if (tsk_verbose) tsk_fprintf(stderr, "%s: start_blk: %" PRIuDADDR " end_blk: %" @@ -5256,9 +5289,6 @@ hfs_block_walk(TSK_FS_INFO * fs, TSK_DADDR_T start_blk, * Iterate */ for (addr = start_blk; addr <= end_blk; ++addr) { - int retval; - int myflags; - /* identify if the block is allocated or not */ myflags = hfs_block_is_alloc(hfs, addr) ? TSK_FS_BLOCK_FLAG_ALLOC : TSK_FS_BLOCK_FLAG_UNALLOC; @@ -5303,6 +5333,8 @@ hfs_inode_walk(TSK_FS_INFO * fs, TSK_INUM_T start_inum, TSK_INUM_T inum; TSK_FS_FILE *fs_file; + int retval; + if (tsk_verbose) tsk_fprintf(stderr, "hfs_inode_walk: start_inum: %" PRIuINUM " end_inum: %" @@ -5360,8 +5392,6 @@ hfs_inode_walk(TSK_FS_INFO * fs, TSK_INUM_T start_inum, XSWAP(start_inum, end_inum); for (inum = start_inum; inum <= end_inum; ++inum) { - int retval; - if (hfs_inode_lookup(fs, fs_file, inum)) { // deleted files may not exist in the catalog if (tsk_error_get_errno() == TSK_ERR_FS_INODE_NUM) { @@ -5872,6 +5902,34 @@ hfs_istat(TSK_FS_INFO * fs, TSK_FS_ISTAT_FLAG_ENUM istat_flags, FILE * hFile, TS const TSK_FS_ATTR *compressionAttr = NULL; RES_DESCRIPTOR *rd; // descriptor of a resource + int rslt; + + hfs_uni_str *nm = NULL; + char name_buf[HFS_MAXNAMLEN + 1]; + TSK_INUM_T par_cnid; // parent CNID + + int instr = 0; + int drstr = 0; + + int windx; // loop index + + uint8_t cu = 0; + + int cnt, i; + + const char *type; // type of the attribute as a string + const TSK_FS_ATTR *fs_attr = NULL; + + int attrReadResult; + DECMPFS_DISK_HEADER *cmph; + uint32_t cmpType; + uint64_t uncSize; + uint64_t cmpSize = 0; + + char *aBuf = NULL; + + uint32_t off = 0; + tsk_error_reset(); if (tsk_verbose) @@ -5885,7 +5943,6 @@ hfs_istat(TSK_FS_INFO * fs, TSK_FS_ISTAT_FLAG_ENUM istat_flags, FILE * hFile, TS } if (inum >= HFS_FIRST_USER_CNID) { - int rslt; tsk_fprintf(hFile, "File Path: "); rslt = print_parent_path(hFile, fs, inum); if (rslt != 0) @@ -5927,9 +5984,7 @@ hfs_istat(TSK_FS_INFO * fs, TSK_FS_ISTAT_FLAG_ENUM istat_flags, FILE * hFile, TS tsk_fprintf(hFile, "Link count:\t%d\n", fs_file->meta->nlink); if (hfs_cat_file_lookup(hfs, inum, &entry, TRUE) == 0) { - hfs_uni_str *nm = &entry.thread.name; - char name_buf[HFS_MAXNAMLEN + 1]; - TSK_INUM_T par_cnid; // parent CNID + nm = &entry.thread.name; tsk_fprintf(hFile, "\n"); hfs_UTF16toUTF8(fs, nm->unicode, (int) tsk_getu16(fs->endian, @@ -5941,8 +5996,8 @@ hfs_istat(TSK_FS_INFO * fs, TSK_FS_ISTAT_FLAG_ENUM istat_flags, FILE * hFile, TS par_cnid = tsk_getu32(fs->endian, &(entry.thread.parent_cnid)); if ((hfs->has_meta_dir_crtime && par_cnid == hfs->meta_dir_inum) || (hfs->has_meta_crtime && par_cnid == hfs->meta_inum)) { - int instr = strncmp(name_buf, "iNode", 5); - int drstr = strncmp(name_buf, "dir_", 4); + instr = strncmp(name_buf, "iNode", 5); + drstr = strncmp(name_buf, "dir_", 4); if (instr == 0 && hfs->has_meta_crtime && par_cnid == hfs->meta_inum) { @@ -6016,13 +6071,12 @@ hfs_istat(TSK_FS_INFO * fs, TSK_FS_ISTAT_FLAG_ENUM istat_flags, FILE * hFile, TS // File_type and file_cr are not relevant for Folders if ( !TSK_FS_IS_DIR_META(fs_file->meta->type)){ - int windx; // loop index tsk_fprintf(hFile, "File type:\t%04" PRIx32 " ", tsk_getu32(fs->endian, entry.cat.std.u_info.file_type)); for (windx = 0; windx < 4; ++windx) { - uint8_t cu = entry.cat.std.u_info.file_type[windx]; + cu = entry.cat.std.u_info.file_type[windx]; if (cu >= 32 && cu <= 126) tsk_fprintf(hFile, "%c", (char) cu); else @@ -6033,7 +6087,7 @@ hfs_istat(TSK_FS_INFO * fs, TSK_FS_ISTAT_FLAG_ENUM istat_flags, FILE * hFile, TS "File creator:\t%04" PRIx32 " ", tsk_getu32(fs->endian, entry.cat.std.u_info.file_cr)); for (windx = 0; windx < 4; ++windx) { - uint8_t cu = entry.cat.std.u_info.file_cr[windx]; + cu = entry.cat.std.u_info.file_cr[windx]; if (cu >= 32 && cu <= 126) tsk_fprintf(hFile, "%c", (char) cu); else @@ -6188,13 +6242,10 @@ hfs_istat(TSK_FS_INFO * fs, TSK_FS_ISTAT_FLAG_ENUM istat_flags, FILE * hFile, TS /* Print all of the attributes */ tsk_fprintf(hFile, "\nAttributes: \n"); if (fs_file->meta->attr) { - int cnt, i; - // cycle through the attributes cnt = tsk_fs_file_attr_getsize(fs_file); for (i = 0; i < cnt; ++i) { - const char *type; // type of the attribute as a string - const TSK_FS_ATTR *fs_attr = + fs_attr = tsk_fs_file_attr_get_idx(fs_file, i); if (!fs_attr) continue; @@ -6272,16 +6323,12 @@ hfs_istat(TSK_FS_INFO * fs, TSK_FS_ISTAT_FLAG_ENUM istat_flags, FILE * hFile, TS // IF this is a compressed file if (compressionAttr != NULL) { - const TSK_FS_ATTR *fs_attr = compressionAttr; - ssize_t attrReadResult; - DECMPFS_DISK_HEADER *cmph; - uint32_t cmpType; - uint64_t uncSize; - uint64_t cmpSize = 0; + fs_attr = compressionAttr; + cmpSize = 0; // Read the attribute. It cannot be too large because it is stored in // a btree node - char *aBuf = (char *) tsk_malloc((size_t) fs_attr->size); + aBuf = (char *) tsk_malloc((size_t) fs_attr->size); if (aBuf == NULL) { error_returned("hfs_istat: space for a compression attribute"); return 1; @@ -6313,7 +6360,7 @@ hfs_istat(TSK_FS_INFO * fs, TSK_FS_ISTAT_FLAG_ENUM istat_flags, FILE * hFile, TS // Data is inline { // size of header, with indicator byte if uncompressed - uint32_t off = (cmph->attr_bytes[0] & 0x0F) == 0x0F ? 17 : 16; + off = (cmph->attr_bytes[0] & 0x0F) == 0x0F ? 17 : 16; cmpSize = fs_attr->size - off; tsk_fprintf(hFile, @@ -6327,7 +6374,7 @@ hfs_istat(TSK_FS_INFO * fs, TSK_FS_ISTAT_FLAG_ENUM istat_flags, FILE * hFile, TS // Data is inline { // size of header, with indicator byte if uncompressed - uint32_t off = cmph->attr_bytes[0] == 0x06 ? 17 : 16; + off = cmph->attr_bytes[0] == 0x06 ? 17 : 16; cmpSize = fs_attr->size - off; tsk_fprintf(hFile, @@ -6468,6 +6515,15 @@ hfs_open(TSK_IMG_INFO * img_info, TSK_OFF_T offset, TSK_INUM_T inum; // The inum (or CNID) of the metadata directories int8_t result; // of tsk_fs_path2inum() + hfs_mdb *wrapper_sb = NULL; + + TSK_FS_INFO *fs_info2; + uint16_t drAlBlSt = 0; + uint32_t drAlBlkSiz = 0; + uint16_t startBlock = 0; + + TSK_OFF_T hfsplus_offset = 0; + tsk_error_reset(); if (TSK_FS_TYPE_ISHFS(ftype) == 0) { @@ -6529,7 +6585,7 @@ hfs_open(TSK_IMG_INFO * img_info, TSK_OFF_T offset, */ if (tsk_getu16(fs->endian, hfs->fs->signature) == HFS_VH_SIG_HFS) { - hfs_mdb *wrapper_sb = (hfs_mdb *) hfs->fs; + wrapper_sb = (hfs_mdb *) hfs->fs; // Verify that we are setting a wrapper and not a normal HFS volume if ((tsk_getu16(fs->endian, @@ -6537,24 +6593,23 @@ hfs_open(TSK_IMG_INFO * img_info, TSK_OFF_T offset, || (tsk_getu16(fs->endian, wrapper_sb->drEmbedSigWord) == HFS_VH_SIG_HFSX)) { - TSK_FS_INFO *fs_info2; // offset in sectors to start of first HFS block - uint16_t drAlBlSt = + drAlBlSt = tsk_getu16(fs->endian, wrapper_sb->drAlBlSt); // size of each HFS block - uint32_t drAlBlkSiz = + drAlBlkSiz = tsk_getu32(fs->endian, wrapper_sb->drAlBlkSiz); // start of embedded FS - uint16_t startBlock = tsk_getu16(fs->endian, + startBlock = tsk_getu16(fs->endian, wrapper_sb->drEmbedExtent_startBlock); // calculate the offset; 512 here is intentional. // TN1150 says "The drAlBlSt field contains the offset, in // 512-byte blocks, of the wrapper's allocation block 0 relative // to the start of the volume" - TSK_OFF_T hfsplus_offset = + hfsplus_offset = (drAlBlSt * (TSK_OFF_T) 512) + (drAlBlkSiz * (TSK_OFF_T) startBlock); @@ -6884,25 +6939,29 @@ hfs_open(TSK_IMG_INFO * img_info, TSK_OFF_T offset, void error_detected(uint32_t errnum, char *errstr, ...) { + TSK_ERROR_INFO *errInfo; + char *loc_errstr; + size_t sl; + va_list args; va_start(args, errstr); { - TSK_ERROR_INFO *errInfo = tsk_error_get_info(); - char *loc_errstr = errInfo->errstr; + errInfo = tsk_error_get_info(); + loc_errstr = errInfo->errstr; if (errInfo->t_errno == 0) errInfo->t_errno = errnum; else { //This should not happen! We don't want to wipe out the existing error //code, so we write the new code into the error string, in hex. - size_t sl = strlen(errstr); + sl = strlen(errstr); snprintf(loc_errstr + sl, TSK_ERROR_STRING_MAX_LENGTH - sl, " Next errnum: 0x%x ", errnum); } if (errstr != NULL) { - size_t sl = strlen(loc_errstr); + sl = strlen(loc_errstr); vsnprintf(loc_errstr + sl, TSK_ERROR_STRING_MAX_LENGTH - sl, errstr, args); } @@ -6924,17 +6983,21 @@ error_detected(uint32_t errnum, char *errstr, ...) void error_returned(char *errstr, ...) { + TSK_ERROR_INFO *errInfo; + char *loc_errstr2; + size_t sl; + va_list args; va_start(args, errstr); { - TSK_ERROR_INFO *errInfo = tsk_error_get_info(); - char *loc_errstr2 = errInfo->errstr2; + errInfo = tsk_error_get_info(); + loc_errstr2 = errInfo->errstr2; if (errInfo->t_errno == 0) errInfo->t_errno = TSK_ERR_AUX_GENERIC; if (errstr != NULL) { - size_t sl = strlen(loc_errstr2); + sl = strlen(loc_errstr2); vsnprintf(loc_errstr2 + sl, TSK_ERROR_STRING_MAX_LENGTH - sl, errstr, args); } pytsk-20190507/patches/sleuthkit-4.6.6-hfs_dent.patch000066400000000000000000000106101346423473500220730ustar00rootroot00000000000000diff --git a/tsk/fs/hfs_dent.c b/tsk/fs/hfs_dent.c index 8402b302..0eb5c286 100644 --- a/tsk/fs/hfs_dent.c +++ b/tsk/fs/hfs_dent.c @@ -106,6 +106,9 @@ hfs_UTF16toUTF8(TSK_FS_INFO * fs, uint8_t * uni, int ulen, char *asc, int i; TSKConversionResult r; + uint16_t uc; + int changed; + // remove nulls from the Unicode string // convert / to : uniclean = (uint8_t *) tsk_malloc(ulen * 2); @@ -115,10 +118,9 @@ hfs_UTF16toUTF8(TSK_FS_INFO * fs, uint8_t * uni, int ulen, char *asc, memcpy(uniclean, uni, ulen * 2); for (i = 0; i < ulen; ++i) { - uint16_t uc = tsk_getu16(fs->endian, uniclean + i * 2); - + uc = tsk_getu16(fs->endian, uniclean + i * 2); - int changed = 0; + changed = 0; if (uc == UTF16_NULL) { uc = UTF16_NULL_REPLACE; changed = 1; @@ -201,6 +203,20 @@ hfs_dir_open_meta_cb(HFS_INFO * hfs, int8_t level_type, const hfs_btree_key_cat * cur_key, TSK_OFF_T key_off, void *ptr) { + uint8_t *rec_buf; + uint16_t rec_type; + size_t rec_off2; + + hfs_file *file; + unsigned char is_err; + TSK_INUM_T file_cnid; + TSK_INUM_T target_cnid; + + HFS_ENTRY entry; + uint8_t lkup; // lookup result + + int32_t nameLength; + HFS_DIR_OPEN_META_INFO *info = (HFS_DIR_OPEN_META_INFO *) ptr; TSK_FS_INFO *fs = &hfs->fs_info; @@ -221,9 +237,7 @@ hfs_dir_open_meta_cb(HFS_INFO * hfs, int8_t level_type, } } else { - uint8_t *rec_buf = (uint8_t *) cur_key; - uint16_t rec_type; - size_t rec_off2; + rec_buf = (uint8_t *) cur_key; if (tsk_getu32(hfs->fs_info.endian, cur_key->parent_cnid) < info->cnid) { @@ -266,7 +280,7 @@ hfs_dir_open_meta_cb(HFS_INFO * hfs, int8_t level_type, // Make sure there is enough space in cur_key for the name // (name is unicode so each characters is two bytes; 6 bytes // of non-name characters) - const int32_t nameLength = + nameLength = tsk_getu16(hfs->fs_info.endian, cur_key->name.length); if (2*nameLength > tsk_getu16(hfs->fs_info.endian, cur_key->key_len) - 6) { @@ -283,12 +297,11 @@ hfs_dir_open_meta_cb(HFS_INFO * hfs, int8_t level_type, /* This is a normal file in the folder */ else if (rec_type == HFS_FILE_RECORD) { - hfs_file *file = (hfs_file *) & rec_buf[rec_off2]; + file = (hfs_file *) & rec_buf[rec_off2]; // This could be a hard link. We need to test this CNID, and follow it if necessary. - unsigned char is_err; - TSK_INUM_T file_cnid = + file_cnid = tsk_getu32(hfs->fs_info.endian, file->std.cnid); - TSK_INUM_T target_cnid = + target_cnid = hfs_follow_hard_link(hfs, file, &is_err); if (is_err > 1) { error_returned @@ -296,9 +309,6 @@ hfs_dir_open_meta_cb(HFS_INFO * hfs, int8_t level_type, return HFS_BTREE_CB_ERR; } if (target_cnid != file_cnid) { - HFS_ENTRY entry; - uint8_t lkup; // lookup result - // This is a hard link. We need to fill in the name->type and name->meta_addr from the target info->fs_name->meta_addr = target_cnid; // get the Catalog entry for the target CNID @@ -327,7 +337,7 @@ hfs_dir_open_meta_cb(HFS_INFO * hfs, int8_t level_type, // Make sure there is enough space in cur_key for the name // (name is unicode so each characters is two bytes; 6 bytes // of non-name characters) - const int32_t nameLength = + nameLength = tsk_getu16(hfs->fs_info.endian, cur_key->name.length); if (2*nameLength > tsk_getu16(hfs->fs_info.endian, cur_key->key_len) - 6) { error_returned @@ -379,6 +389,7 @@ hfs_dir_open_meta(TSK_FS_INFO * fs, TSK_FS_DIR ** a_fs_dir, TSK_FS_NAME *fs_name; HFS_DIR_OPEN_META_INFO info; + int i; tsk_error_reset(); @@ -433,7 +444,6 @@ hfs_dir_open_meta(TSK_FS_INFO * fs, TSK_FS_DIR ** a_fs_dir, // if we are listing the root directory, add the Orphan directory and special HFS file entries if (a_addr == fs->root_inum) { - int i; for (i = 0; i < 6; i++) { switch (i) { case 0: pytsk-20190507/patches/sleuthkit-4.6.6-lzvn.patch000066400000000000000000000112661346423473500213020ustar00rootroot00000000000000diff --git a/tsk/fs/lzvn.c b/tsk/fs/lzvn.c index 756c868c..25ac4a4c 100644 --- a/tsk/fs/lzvn.c +++ b/tsk/fs/lzvn.c @@ -26,6 +26,26 @@ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSI #include #include +// define the sized int types +#if !defined( _MSC_VER) || ( _MSC_VER >= 1600 ) +#include +#else +typedef unsigned __int16 uint16_t; +typedef __int16 int16_t; +typedef unsigned __int32 uint32_t; +typedef __int32 int32_t; +typedef unsigned __int64 uint64_t; +typedef __int64 int64_t; + +#if defined( _WIN64 ) +typedef __int64 intmax_t; +typedef unsigned __int64 uintmax_t; +#else +typedef __int32 intmax_t; +typedef unsigned __int32 uintmax_t; +#endif +#endif + #if defined(_MSC_VER) && !defined(__clang__) # define LZFSE_INLINE __forceinline # define __builtin_expect(X, Y) (X) @@ -182,15 +202,23 @@ void lzvn_decode(lzvn_decoder_state *state) { #endif size_t src_len = state->src_end - state->src; size_t dst_len = state->dst_end - state->dst; - if (src_len == 0 || dst_len == 0) - return; // empty buffer - const unsigned char *src_ptr = state->src; - unsigned char *dst_ptr = state->dst; - size_t D = state->d_prev; + const unsigned char *src_ptr = NULL; + unsigned char *dst_ptr = NULL; + size_t D = NULL; size_t M; size_t L; size_t opc_len; + unsigned char opc = 0; + uint16_t opc23 = 0; + size_t i; + + if (src_len == 0 || dst_len == 0) + return; // empty buffer + + src_ptr = state->src; + dst_ptr = state->dst; + D = state->d_prev; // Do we have a partially expanded match saved in state? if (state->L != 0 || state->M != 0) { @@ -206,7 +234,7 @@ void lzvn_decode(lzvn_decoder_state *state) { goto copy_literal_and_match; } - unsigned char opc = src_ptr[0]; + opc = src_ptr[0]; #if HAVE_LABELS_AS_VALUES goto *opc_tbl[opc]; @@ -413,7 +441,7 @@ void lzvn_decode(lzvn_decoder_state *state) { L = (size_t)extract(opc, 3, 2); if (src_len <= opc_len + L) return; // source truncated - uint16_t opc23 = load2(&src_ptr[1]); + opc23 = load2(&src_ptr[1]); M = (size_t)((extract(opc, 0, 3) << 2 | extract(opc23, 0, 2)) + 3); D = (size_t)extract(opc23, 2, 14); goto copy_literal_and_match; @@ -504,14 +532,12 @@ void lzvn_decode(lzvn_decoder_state *state) { // byte-by-byte copy of the literal. This is slow, but it can only ever // happen near the very end of a buffer, so it is not an important case to // optimize. - size_t i; for (i = 0; i < L; ++i) dst_ptr[i] = src_ptr[i]; } else { // Destination truncated: fill DST, and store partial match // Copy partial literal - size_t i; for (i = 0; i < dst_len; ++i) dst_ptr[i] = src_ptr[i]; // Save state @@ -549,21 +575,18 @@ void lzvn_decode(lzvn_decoder_state *state) { // copies. The last of these may slop over the intended end of // the match, but this is OK because we know we have a safety bound // away from the end of the destination buffer. - size_t i; for (i = 0; i < M; i += 8) store8(&dst_ptr[i], load8(&dst_ptr[i - D])); } else if (M <= dst_len) { // Either the match distance is too small, or we are too close to // the end of the buffer to safely use eight byte copies. Fall back // on a simple byte-by-byte implementation. - size_t i; for (i = 0; i < M; ++i) dst_ptr[i] = dst_ptr[i - D]; } else { // Destination truncated: fill DST, and store partial match // Copy partial match - size_t i; for (i = 0; i < dst_len; ++i) dst_ptr[i] = dst_ptr[i - D]; // Save state @@ -697,21 +720,18 @@ void lzvn_decode(lzvn_decoder_state *state) { // We are not near the end of the source or destination buffers; thus // we can safely copy the literal using wide copies, without worrying // about reading or writing past the end of either buffer. - size_t i; for (i = 0; i < L; i += 8) store8(&dst_ptr[i], load8(&src_ptr[i])); } else if (L <= dst_len) { // We are too close to the end of either the input or output stream // to be able to safely use an eight-byte copy. Instead we copy the // literal byte-by-byte. - size_t i; for (i = 0; i < L; ++i) dst_ptr[i] = src_ptr[i]; } else { // Destination truncated: fill DST, and store partial match // Copy partial literal - size_t i; for (i = 0; i < dst_len; ++i) dst_ptr[i] = src_ptr[i]; // Save state diff --git a/tsk/fs/lzvn.h b/tsk/fs/lzvn.h index 7e430ce3..cc10d157 100644 --- a/tsk/fs/lzvn.h +++ b/tsk/fs/lzvn.h @@ -2,7 +2,10 @@ #define LZVN_H #include + +#ifndef _MSC_VER #include +#endif #ifdef __cplusplus extern "C" { pytsk-20190507/patches/sleuthkit-4.6.6-ntfs.patch000066400000000000000000000661251346423473500212670ustar00rootroot00000000000000diff --git a/tsk/fs/ntfs.c b/tsk/fs/ntfs.c index e09557a1..576ebc94 100755 --- a/tsk/fs/ntfs.c +++ b/tsk/fs/ntfs.c @@ -140,6 +140,14 @@ ntfs_dinode_lookup(NTFS_INFO * a_ntfs, char *a_buf, TSK_INUM_T a_mftnum) uint16_t sig_seq; ntfs_mft *mft; + ssize_t cnt; + + uint8_t *new_val, *old_val; + + uint16_t cur_seq = 0; + uint16_t cur_repl = 0; + + TSK_OFF_T run_len = 0; /* sanity checks */ if (!a_buf) { @@ -228,7 +236,7 @@ ntfs_dinode_lookup(NTFS_INFO * a_ntfs, char *a_buf, TSK_INUM_T a_mftnum) } /* The length of this specific run */ - TSK_OFF_T run_len = data_run->len * a_ntfs->csize_b; + run_len = data_run->len * a_ntfs->csize_b; /* Is our MFT entry is in this run somewhere ? */ if (offset < run_len) { @@ -290,7 +298,6 @@ ntfs_dinode_lookup(NTFS_INFO * a_ntfs, char *a_buf, TSK_INUM_T a_mftnum) /* can we do just one read or do we need multiple? */ if (mftaddr2_b) { - ssize_t cnt; /* read the first part into mft */ cnt = tsk_fs_read(&a_ntfs->fs_info, mftaddr_b, a_buf, mftaddr_len); if (cnt != (ssize_t)mftaddr_len) { @@ -321,7 +328,6 @@ ntfs_dinode_lookup(NTFS_INFO * a_ntfs, char *a_buf, TSK_INUM_T a_mftnum) } } else { - ssize_t cnt; /* read the raw entry into mft */ cnt = tsk_fs_read(&a_ntfs->fs_info, mftaddr_b, a_buf, @@ -391,9 +397,8 @@ ntfs_dinode_lookup(NTFS_INFO * a_ntfs, char *a_buf, TSK_INUM_T a_mftnum) sig_seq = tsk_getu16(fs->endian, upd->upd_val); /* cycle through each sector */ for (i = 1; i < tsk_getu16(fs->endian, mft->upd_cnt); i++) { - uint8_t *new_val, *old_val; /* The offset into the buffer of the value to analyze */ - size_t offset = i * NTFS_UPDATE_SEQ_STRIDE - 2; + offset = i * NTFS_UPDATE_SEQ_STRIDE - 2; /* Check that there is room in the buffer to read the current sequence value */ if (offset + 2 > a_ntfs->mft_rsize_b) { @@ -405,11 +410,11 @@ ntfs_dinode_lookup(NTFS_INFO * a_ntfs, char *a_buf, TSK_INUM_T a_mftnum) } /* get the current sequence value */ - uint16_t cur_seq = + cur_seq = tsk_getu16(fs->endian, (uintptr_t) a_buf + offset); if (cur_seq != sig_seq) { /* get the replacement value */ - uint16_t cur_repl = + cur_repl = tsk_getu16(fs->endian, &upd->upd_seq + (i - 1) * 2); tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_GENFS); @@ -452,6 +457,12 @@ is_clustalloc(NTFS_INFO * ntfs, TSK_DADDR_T addr) int bits_p_clust, b; TSK_DADDR_T base; int8_t ret; + + TSK_DADDR_T c = 0; + TSK_FS_ATTR_RUN *run; + TSK_DADDR_T fsaddr = 0; + ssize_t cnt; + bits_p_clust = 8 * ntfs->fs_info.block_size; /* While we are loading the MFT, assume that everything @@ -486,10 +497,8 @@ is_clustalloc(NTFS_INFO * ntfs, TSK_DADDR_T addr) /* is this the same as in the cached buffer? */ if (base != ntfs->bmap_buf_off) { - TSK_DADDR_T c = base; - TSK_FS_ATTR_RUN *run; - TSK_DADDR_T fsaddr = 0; - ssize_t cnt; + c = base; + fsaddr = 0; /* get the file system address of the bitmap cluster */ for (run = ntfs->bmap; run; run = run->next) { @@ -577,6 +586,8 @@ ntfs_make_data_run(NTFS_INFO * ntfs, TSK_OFF_T start_vcn, TSK_DADDR_T prev_addr = 0; TSK_OFF_T file_offset = start_vcn; + int64_t addr_offset = 0; + run = runlist_head; *a_data_run_head = NULL; @@ -589,7 +600,7 @@ ntfs_make_data_run(NTFS_INFO * ntfs, TSK_OFF_T start_vcn, * An entry with offset of 0 is for a sparse run */ while (NTFS_RUNL_LENSZ(run) != 0) { - int64_t addr_offset = 0; + addr_offset = 0; /* allocate a new tsk_fs_attr_run */ if ((data_run = tsk_fs_attr_run_alloc()) == NULL) { @@ -868,6 +879,24 @@ ntfs_uncompress_compunit(NTFS_COMP_INFO * comp) { size_t cl_index; + size_t blk_end; // index into the buffer to where block ends + size_t blk_size; // size of the current block + uint8_t iscomp; // set to 1 if block is compressed + size_t blk_st_uncomp; // index into uncompressed buffer where block started + uint16_t sb_header; // subblock header + + int a; + + unsigned char header = 0; + + size_t i; + int shift; + size_t start_position_index = 0; + size_t end_position_index = 0; + unsigned int offset = 0; + unsigned int length = 0; + uint16_t pheader; + tsk_error_reset(); comp->uncomp_idx = 0; @@ -877,12 +906,6 @@ ntfs_uncompress_compunit(NTFS_COMP_INFO * comp) * We use +1 here because the size value at start of block is 2 bytes. */ for (cl_index = 0; cl_index + 1 < comp->comp_len;) { - size_t blk_end; // index into the buffer to where block ends - size_t blk_size; // size of the current block - uint8_t iscomp; // set to 1 if block is compressed - size_t blk_st_uncomp; // index into uncompressed buffer where block started - uint16_t sb_header; // subblock header - sb_header = tsk_getu16(TSK_LIT_ENDIAN, comp->comp_buf + cl_index); // If the sb_header isn't set, we just fill the rest of the buffer with zeros. @@ -925,10 +948,8 @@ ntfs_uncompress_compunit(NTFS_COMP_INFO * comp) // cycle through the block while (cl_index < blk_end) { - int a; - // get the header header - unsigned char header = comp->comp_buf[cl_index]; + header = comp->comp_buf[cl_index]; cl_index++; if (tsk_verbose) @@ -964,13 +985,10 @@ ntfs_uncompress_compunit(NTFS_COMP_INFO * comp) * to a previous sequence of bytes. */ else { - size_t i; - int shift; - size_t start_position_index = 0; - size_t end_position_index = 0; - unsigned int offset = 0; - unsigned int length = 0; - uint16_t pheader; + start_position_index = 0; + end_position_index = 0; + offset = 0; + length = 0; if (cl_index + 1 >= blk_end) { tsk_error_set_errno(TSK_ERR_FS_FWALK); @@ -1109,6 +1127,8 @@ ntfs_proc_compunit(NTFS_INFO * ntfs, NTFS_COMP_INFO * comp, int sparse; uint64_t a; + ssize_t cnt; + /* With compressed attributes, there are three scenarios. * 1: The compression unit is not compressed, * 2: The compression unit is sparse @@ -1145,8 +1165,6 @@ ntfs_proc_compunit(NTFS_INFO * ntfs, NTFS_COMP_INFO * comp, // load up the compressed buffer so we can decompress it ntfs_uncompress_reset(comp); for (a = 0; a < comp_unit_size; a++) { - ssize_t cnt; - if (comp_unit[a] == 0) break; @@ -1181,8 +1199,6 @@ ntfs_proc_compunit(NTFS_INFO * ntfs, NTFS_COMP_INFO * comp, comp->uncomp_idx = 0; for (a = 0; a < comp_unit_size; a++) { - ssize_t cnt; - cnt = tsk_fs_read_block(fs, comp_unit[a], &comp->uncomp_buf[comp->uncomp_idx], fs->block_size); @@ -1214,6 +1230,22 @@ ntfs_attr_walk_special(const TSK_FS_ATTR * fs_attr, TSK_FS_INFO *fs; NTFS_INFO *ntfs; + TSK_DADDR_T addr; + TSK_FS_ATTR_RUN *fs_attr_run; + TSK_DADDR_T *comp_unit; + uint32_t comp_unit_idx = 0; + NTFS_COMP_INFO comp; + TSK_OFF_T off = 0; + int retval; + uint8_t stop_loop = 0; + + size_t len_idx; + + size_t i; + + int myflags; + size_t read_len; + // clean up any error messages that are lying around tsk_error_reset(); if ((fs_attr == NULL) || (fs_attr->fs_file == NULL) @@ -1240,14 +1272,9 @@ ntfs_attr_walk_special(const TSK_FS_ATTR * fs_attr, * dump the compressed data instead of giving an error. */ if (fs_attr->flags & TSK_FS_ATTR_COMP) { - TSK_DADDR_T addr; - TSK_FS_ATTR_RUN *fs_attr_run; - TSK_DADDR_T *comp_unit; - uint32_t comp_unit_idx = 0; - NTFS_COMP_INFO comp; - TSK_OFF_T off = 0; - int retval; - uint8_t stop_loop = 0; + comp_unit_idx = 0; + off = 0; + stop_loop = 0; if (fs_attr->nrd.compsize <= 0) { tsk_error_set_errno(TSK_ERR_FS_FWALK); @@ -1274,7 +1301,6 @@ ntfs_attr_walk_special(const TSK_FS_ATTR * fs_attr, /* cycle through the number of runs we have */ for (fs_attr_run = fs_attr->nrd.run; fs_attr_run; fs_attr_run = fs_attr_run->next) { - size_t len_idx; /* We may get a FILLER entry at the beginning of the run * if we are processing a non-base file record since @@ -1360,8 +1386,6 @@ ntfs_attr_walk_special(const TSK_FS_ATTR * fs_attr, if ((comp_unit_idx == fs_attr->nrd.compsize) || ((len_idx == fs_attr_run->len - 1) && (fs_attr_run->next == NULL))) { - size_t i; - // decompress the unit if (ntfs_proc_compunit(ntfs, &comp, comp_unit, comp_unit_idx)) { @@ -1379,9 +1403,6 @@ ntfs_attr_walk_special(const TSK_FS_ATTR * fs_attr, // now call the callback with the uncompressed data for (i = 0; i < comp_unit_idx; i++) { - int myflags; - size_t read_len; - myflags = TSK_FS_BLOCK_FLAG_CONT | TSK_FS_BLOCK_FLAG_COMP; @@ -1489,6 +1510,19 @@ ntfs_file_read_special(const TSK_FS_ATTR * a_fs_attr, TSK_FS_INFO *fs = NULL; NTFS_INFO *ntfs = NULL; + TSK_FS_ATTR_RUN *data_run_cur; + TSK_OFF_T cu_blkoffset; // block offset of starting compression unit to start reading from + size_t byteoffset; // byte offset in compression unit of where we want to start reading from + TSK_DADDR_T *comp_unit; + uint32_t comp_unit_idx = 0; + NTFS_COMP_INFO comp; + size_t buf_idx = 0; + + ssize_t len; + + TSK_DADDR_T addr; + size_t a; + if ((a_fs_attr == NULL) || (a_fs_attr->fs_file == NULL) || (a_fs_attr->fs_file->meta == NULL) || (a_fs_attr->fs_file->fs_info == NULL)) { @@ -1502,13 +1536,8 @@ ntfs_file_read_special(const TSK_FS_ATTR * a_fs_attr, ntfs = (NTFS_INFO *) fs; if (a_fs_attr->flags & TSK_FS_ATTR_COMP) { - TSK_FS_ATTR_RUN *data_run_cur; - TSK_OFF_T cu_blkoffset; // block offset of starting compression unit to start reading from - size_t byteoffset; // byte offset in compression unit of where we want to start reading from - TSK_DADDR_T *comp_unit; - uint32_t comp_unit_idx = 0; - NTFS_COMP_INFO comp; - size_t buf_idx = 0; + comp_unit_idx = 0; + buf_idx = 0; if (a_fs_attr->nrd.compsize <= 0) { tsk_error_set_errno(TSK_ERR_FS_FWALK); @@ -1528,8 +1557,6 @@ ntfs_file_read_special(const TSK_FS_ATTR * a_fs_attr, // we return 0s for reads past the initsize if (a_offset >= a_fs_attr->nrd.initsize) { - ssize_t len; - if (tsk_verbose) fprintf(stderr, "ntfs_file_read_special: Returning 0s for read past end of initsize (%" @@ -1570,9 +1597,6 @@ ntfs_file_read_special(const TSK_FS_ATTR * a_fs_attr, (data_run_cur) && (buf_idx < a_len); data_run_cur = data_run_cur->next) { - TSK_DADDR_T addr; - size_t a; - // See if this run contains the starting offset they requested if (data_run_cur->offset + data_run_cur->len < (TSK_DADDR_T) cu_blkoffset) @@ -1717,6 +1741,32 @@ ntfs_proc_attrseq(NTFS_INFO * ntfs, char name[NTFS_MAXNAMLEN_UTF8 + 1]; TSK_FS_INFO *fs = (TSK_FS_INFO *) & ntfs->fs_info; + int retVal; + uint32_t type; + uint16_t id, id_new; + + int i; + UTF8 *name8; + UTF16 *name16; + + TSK_FS_ATTR *fs_attr = NULL; + TSK_FS_ATTR_RUN *fs_attr_run = NULL; + uint8_t data_flag = 0; + uint32_t compsize = 0; + TSK_RETVAL_ENUM retval; + + int cnt; + + const TSK_FS_ATTR *fs_attr2 = NULL; + + uint64_t ssize; // size + uint64_t alen; // allocated length + + uint16_t nameoff; + + ntfs_attr_fname *fname; + TSK_FS_META_NAME_LIST *fs_name; + if (tsk_verbose) tsk_fprintf(stderr, "ntfs_proc_attrseq: Processing extended entry for primary entry %" @@ -1749,10 +1799,6 @@ ntfs_proc_attrseq(NTFS_INFO * ntfs, (ntfs_attr *) ((uintptr_t) attr + tsk_getu32(fs->endian, attr->len))) { - int retVal, i; - uint32_t type; - uint16_t id, id_new; - // sanity check on bounds of attribute. Prevents other // issues later on that use attr->len for bounds checks. if (((uintptr_t) attr + tsk_getu32(fs->endian, @@ -1782,12 +1828,8 @@ ntfs_proc_attrseq(NTFS_INFO * ntfs, } /* Copy the name and convert it to UTF8 */ - const uint16_t nameoff = tsk_getu16(fs->endian, attr->name_off); + nameoff = tsk_getu16(fs->endian, attr->name_off); if (attr->nlen && nameoff + (uint32_t) attr->nlen * 2 < tsk_getu32(fs->endian, attr->len)) { - int i; - UTF8 *name8; - UTF16 *name16; - name8 = (UTF8 *) name; name16 = (UTF16 *) ((uintptr_t) attr + nameoff); @@ -1830,8 +1872,6 @@ ntfs_proc_attrseq(NTFS_INFO * ntfs, * structure */ if (attr->res == NTFS_MFT_RES) { - TSK_FS_ATTR *fs_attr; - if (tsk_verbose) tsk_fprintf(stderr, "ntfs_proc_attrseq: Resident Attribute in Type: %" @@ -1903,11 +1943,10 @@ ntfs_proc_attrseq(NTFS_INFO * ntfs, * list */ else { - TSK_FS_ATTR *fs_attr = NULL; - TSK_FS_ATTR_RUN *fs_attr_run = NULL; - uint8_t data_flag = 0; - uint32_t compsize = 0; - TSK_RETVAL_ENUM retval; + fs_attr = NULL; + fs_attr_run = NULL; + data_flag = 0; + compsize = 0; if (tsk_verbose) tsk_fprintf(stderr, @@ -1979,13 +2018,11 @@ ntfs_proc_attrseq(NTFS_INFO * ntfs, * unique ID values to the extended attributes. */ if (id_new == 0) { - int cnt, i; - // cycle through the attributes cnt = tsk_fs_file_attr_getsize(fs_file); for (i = 0; i < cnt; i++) { - const TSK_FS_ATTR *fs_attr2 = + fs_attr2 = tsk_fs_file_attr_get_idx(fs_file, i); if (!fs_attr2) continue; @@ -2048,9 +2085,6 @@ ntfs_proc_attrseq(NTFS_INFO * ntfs, (TSK_FS_ATTR *) tsk_fs_attrlist_get_id(fs_file->meta->attr, type, id_new); if (fs_attr == NULL) { - uint64_t ssize; // size - uint64_t alen; // allocated length - if ((fs_attr = tsk_fs_attrlist_getnew(fs_file->meta->attr, TSK_FS_ATTR_RES)) == NULL) { @@ -2170,10 +2204,6 @@ ntfs_proc_attrseq(NTFS_INFO * ntfs, /* File Name (always resident) */ else if (type == NTFS_ATYPE_FNAME) { - ntfs_attr_fname *fname; - TSK_FS_META_NAME_LIST *fs_name; - UTF16 *name16; - UTF8 *name8; if (attr->res != NTFS_MFT_RES) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_INODE_COR); @@ -2299,12 +2329,10 @@ ntfs_proc_attrseq(NTFS_INFO * ntfs, * should at least have the head of the list */ if (!ntfs->mft_data) { - int cnt, i; - // cycle through the attributes cnt = tsk_fs_file_attr_getsize(fs_file); for (i = 0; i < cnt; i++) { - const TSK_FS_ATTR *fs_attr = + fs_attr = tsk_fs_file_attr_get_idx(fs_file, i); if (!fs_attr) continue; @@ -2387,6 +2415,15 @@ ntfs_proc_attrlist(NTFS_INFO * ntfs, uint16_t nextid = 0; int a; + uint8_t found; + int i; + + TSK_INUM_T mftnum = 0; + uint32_t type = 0; + uint16_t id = 0; + + TSK_RETVAL_ENUM retval; + if (tsk_verbose) tsk_fprintf(stderr, "ntfs_proc_attrlist: Processing entry %" @@ -2454,12 +2491,10 @@ ntfs_proc_attrlist(NTFS_INFO * ntfs, list = (ntfs_attrlist *) ((uintptr_t) list + tsk_getu16(fs->endian, list->len))) { - uint8_t found; - int i; - TSK_INUM_T mftnum = tsk_getu48(fs->endian, list->file_ref); - uint32_t type = tsk_getu32(fs->endian, list->type); - uint16_t id = tsk_getu16(fs->endian, list->id); + mftnum = tsk_getu48(fs->endian, list->file_ref); + type = tsk_getu32(fs->endian, list->type); + id = tsk_getu16(fs->endian, list->id); if (tsk_verbose) tsk_fprintf(stderr, @@ -2523,8 +2558,6 @@ ntfs_proc_attrlist(NTFS_INFO * ntfs, /* Process the ToDo list & and call ntfs_proc_attr */ for (a = 0; a < mftToDoCnt; a++) { - TSK_RETVAL_ENUM retval; - /* Sanity check. */ if (mftToDo[a] < ntfs->fs_info.first_inum || mftToDo[a] > ntfs->fs_info.last_inum || @@ -2647,6 +2680,8 @@ ntfs_dinode_copy(NTFS_INFO * ntfs, TSK_FS_FILE * a_fs_file, char *a_buf, TSK_RETVAL_ENUM retval; ntfs_mft *mft = (ntfs_mft *) a_buf; + TSK_FS_META_NAME_LIST *fs_name1, *fs_name2; + if ((a_fs_file == NULL) || (a_fs_file->meta == NULL)) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_ARG); @@ -2669,7 +2704,6 @@ ntfs_dinode_copy(NTFS_INFO * ntfs, TSK_FS_FILE * a_fs_file, char *a_buf, /* If there are any name structures allocated, then free 'em */ if (a_fs_file->meta->name2) { - TSK_FS_META_NAME_LIST *fs_name1, *fs_name2; fs_name1 = a_fs_file->meta->name2; while (fs_name1) { @@ -2799,6 +2833,8 @@ ntfs_inode_lookup(TSK_FS_INFO * fs, TSK_FS_FILE * a_fs_file, char *mft; uint8_t allocedMeta = 0; + uint16_t seqToCmp = 0; + // clean up any error messages that are lying around tsk_error_reset(); @@ -2853,7 +2889,7 @@ ntfs_inode_lookup(TSK_FS_INFO * fs, TSK_FS_FILE * a_fs_file, * it is allocated. So, if we have a deleted MFT entry, then use * its previous sequence number to compare with the name so that we * still match them up (until the entry is allocated again). */ - uint16_t seqToCmp = a_fs_file->meta->seq; + seqToCmp = a_fs_file->meta->seq; if (a_fs_file->meta->flags & TSK_FS_META_FLAG_UNALLOC) { if (a_fs_file->meta->seq > 0) seqToCmp--; @@ -2960,6 +2996,11 @@ ntfs_attrname_lookup(TSK_FS_INFO * fs, uint16_t type, char *name, int len) { NTFS_INFO *ntfs = (NTFS_INFO *) fs; ntfs_attrdef *attrdef; + + UTF16 *name16 = NULL; + UTF8 *name8 = NULL; + int retVal; + if (!ntfs->attrdef) { if (ntfs_load_attrdef(ntfs)) return 1; @@ -2972,9 +3013,9 @@ ntfs_attrname_lookup(TSK_FS_INFO * fs, uint16_t type, char *name, int len) (tsk_getu32(fs->endian, attrdef->type))) { if (tsk_getu32(fs->endian, attrdef->type) == type) { - UTF16 *name16 = (UTF16 *) attrdef->label; - UTF8 *name8 = (UTF8 *) name; - int retVal; + name16 = (UTF16 *) attrdef->label; + name8 = (UTF8 *) name; + retVal = tsk_UTF16toUTF8(fs->endian, (const UTF16 **) &name16, (UTF16 *) ((uintptr_t) name16 + @@ -3198,6 +3239,13 @@ ntfs_sds_to_str(TSK_FS_INFO * a_fs, const ntfs_attr_sds * a_sds, ntfs_sid *sid = NULL; uint32_t owner_offset; + + uint64_t authority = 0; + int i, len; + char *sid_str_offset = NULL; + char *sid_str = NULL; + unsigned int sid_str_len; + *a_sidstr = NULL; if ((a_fs == NULL) || (a_sds == NULL) || (a_sidstr == NULL)) { @@ -3228,11 +3276,9 @@ ntfs_sds_to_str(TSK_FS_INFO * a_fs, const ntfs_attr_sds * a_sds, // This check helps not process invalid data, which was noticed while testing // a failing harddrive if (sid->revision == 1) { - uint64_t authority = 0; - int i, len; - char *sid_str_offset = NULL; - char *sid_str = NULL; - unsigned int sid_str_len; + authority = 0; + sid_str_offset = NULL; + sid_str = NULL; //tsk_fprintf(stderr, "Sub-Authority Count: %i\n", sid->sub_auth_count); authority = 0; @@ -3481,6 +3527,10 @@ ntfs_proc_sii(TSK_FS_INFO * fs, NTFS_SXX_BUFFER * sii_buffer) NTFS_INFO *ntfs = (NTFS_INFO *) fs; ntfs_attr_sii *sii; + uintptr_t idx_buffer_end = 0; + + ntfs_idxrec *idxrec = NULL; + if ((fs == NULL) || (sii_buffer == NULL) || (ntfs->sii_data.buffer == NULL)) return; @@ -3489,9 +3539,9 @@ ntfs_proc_sii(TSK_FS_INFO * fs, NTFS_SXX_BUFFER * sii_buffer) for (sii_buffer_offset = 0; sii_buffer_offset < sii_buffer->size; sii_buffer_offset += ntfs->idx_rsize_b) { - uintptr_t idx_buffer_end = 0; + idx_buffer_end = 0; - ntfs_idxrec *idxrec = + idxrec = (ntfs_idxrec *) & sii_buffer->buffer[sii_buffer_offset]; // stop processing if we hit corrupt data @@ -3792,6 +3842,9 @@ ntfs_block_walk(TSK_FS_INFO * fs, TSK_DADDR_T addr; TSK_FS_BLOCK *fs_block; + int retval; + int myflags; + // clean up any error messages that are lying around tsk_error_reset(); @@ -3833,9 +3886,6 @@ ntfs_block_walk(TSK_FS_INFO * fs, /* Cycle through the blocks */ for (addr = a_start_blk; addr <= a_end_blk; addr++) { - int retval; - int myflags; - /* identify if the cluster is allocated or not */ retval = is_clustalloc(ntfs, addr); if (retval == -1) { @@ -3907,6 +3957,10 @@ ntfs_inode_walk(TSK_FS_INFO * fs, TSK_INUM_T start_inum, TSK_FS_FILE *fs_file; TSK_INUM_T end_inum_tmp; ntfs_mft *mft; + + int retval; + TSK_RETVAL_ENUM retval2; + /* * Sanity checks. */ @@ -4002,9 +4056,6 @@ ntfs_inode_walk(TSK_FS_INFO * fs, TSK_INUM_T start_inum, for (mftnum = start_inum; mftnum <= end_inum_tmp; mftnum++) { - int retval; - TSK_RETVAL_ENUM retval2; - /* read MFT entry in to NTFS_INFO */ if ((retval2 = ntfs_dinode_lookup(ntfs, (char *) mft, @@ -4085,8 +4136,6 @@ ntfs_inode_walk(TSK_FS_INFO * fs, TSK_INUM_T start_inum, if ((end_inum == TSK_FS_ORPHANDIR_INUM(fs)) && (flags & TSK_FS_META_FLAG_ALLOC) && (flags & TSK_FS_META_FLAG_USED)) { - int retval; - if (tsk_fs_dir_make_orphan_dir_meta(fs, fs_file->meta)) { tsk_fs_file_close(fs_file); free(mft); @@ -4140,6 +4189,10 @@ ntfs_fsstat(TSK_FS_INFO * fs, FILE * hFile) char asc[512]; ntfs_attrdef *attrdeftmp; + UTF16 *name16 = NULL; + UTF8 *name8 = NULL; + int retVal; + tsk_fprintf(hFile, "FILE SYSTEM INFORMATION\n"); tsk_fprintf(hFile, "--------------------------------------------\n"); tsk_fprintf(hFile, "File System Type: NTFS\n"); @@ -4175,9 +4228,6 @@ ntfs_fsstat(TSK_FS_INFO * fs, FILE * hFile) if ((fs_attr->flags & TSK_FS_ATTR_RES) && (fs_attr->size)) { - UTF16 *name16 = (UTF16 *) fs_attr->rd.buf; - UTF8 *name8 = (UTF8 *) asc; - int retVal; retVal = tsk_UTF16toUTF8(fs->endian, (const UTF16 **) &name16, (UTF16 *) ((uintptr_t) name16 + @@ -4256,9 +4306,9 @@ ntfs_fsstat(TSK_FS_INFO * fs, FILE * hFile) while ((((uintptr_t) attrdeftmp - (uintptr_t) ntfs->attrdef + sizeof(ntfs_attrdef)) < ntfs->attrdef_len) && (tsk_getu32(fs->endian, attrdeftmp->type))) { - UTF16 *name16 = (UTF16 *) attrdeftmp->label; - UTF8 *name8 = (UTF8 *) asc; - int retVal; + name16 = (UTF16 *) attrdeftmp->label; + name8 = (UTF8 *) asc; + retVal = tsk_UTF16toUTF8(fs->endian, (const UTF16 **) &name16, (UTF16 *) ((uintptr_t) name16 + @@ -4354,6 +4404,26 @@ ntfs_istat(TSK_FS_INFO * fs, TSK_FS_ISTAT_FLAG_ENUM istat_flags, FILE * hFile, char timeBuf[128]; int idx; + ntfs_attr_si *si = NULL; + char *sid_str; + + int a = 0; + + ntfs_attr_fname *fname = NULL; + uint64_t flags; + + ntfs_attr_objid *objid = NULL; + uint64_t id1, id2; + + char *buf; + ntfs_attrlist *list; + uintptr_t endaddr; + TSK_FS_LOAD_FILE load_file; + + int cnt, i; + + char type[512]; + // clean up any error messages that are lying around tsk_error_reset(); @@ -4394,10 +4464,9 @@ ntfs_istat(TSK_FS_INFO * fs, TSK_FS_ISTAT_FLAG_ENUM istat_flags, FILE * hFile, /* STANDARD_INFORMATION info */ fs_attr = tsk_fs_attrlist_get(fs_file->meta->attr, NTFS_ATYPE_SI); if (fs_attr) { - ntfs_attr_si *si = (ntfs_attr_si *) fs_attr->rd.buf; - char *sid_str; + si = (ntfs_attr_si *) fs_attr->rd.buf; - int a = 0; + a = 0; tsk_fprintf(hFile, "\n$STANDARD_INFORMATION Attribute Values:\n"); tsk_fprintf(hFile, "Flags: "); if (tsk_getu32(fs->endian, si->dos) & NTFS_SI_RO) @@ -4629,8 +4698,8 @@ ntfs_istat(TSK_FS_INFO * fs, TSK_FS_ISTAT_FLAG_ENUM istat_flags, FILE * hFile, /* $OBJECT_ID Information */ fs_attr = tsk_fs_attrlist_get(fs_file->meta->attr, NTFS_ATYPE_OBJID); if (fs_attr) { - ntfs_attr_objid *objid = (ntfs_attr_objid *) fs_attr->rd.buf; - uint64_t id1, id2; + objid = (ntfs_attr_objid *) fs_attr->rd.buf; + tsk_fprintf(hFile, "\n$OBJECT_ID Attribute Values:\n"); id1 = tsk_getu64(fs->endian, objid->objid1); id2 = tsk_getu64(fs->endian, objid->objid2); @@ -4696,11 +4765,6 @@ ntfs_istat(TSK_FS_INFO * fs, TSK_FS_ISTAT_FLAG_ENUM istat_flags, FILE * hFile, fs_attr = tsk_fs_attrlist_get(fs_file->meta->attr, NTFS_ATYPE_ATTRLIST); if (fs_attr) { - char *buf; - ntfs_attrlist *list; - uintptr_t endaddr; - TSK_FS_LOAD_FILE load_file; - tsk_fprintf(hFile, "\n$ATTRIBUTE_LIST Attribute Values:\n"); /* Get a copy of the attribute list stream */ @@ -4749,14 +4813,10 @@ ntfs_istat(TSK_FS_INFO * fs, TSK_FS_ISTAT_FLAG_ENUM istat_flags, FILE * hFile, /* Print all of the attributes */ tsk_fprintf(hFile, "\nAttributes: \n"); if (fs_file->meta->attr) { - int cnt, i; - // cycle through the attributes cnt = tsk_fs_file_attr_getsize(fs_file); for (i = 0; i < cnt; i++) { - char type[512]; - - const TSK_FS_ATTR *fs_attr = + fs_attr = tsk_fs_file_attr_get_idx(fs_file, i); if (!fs_attr) continue; pytsk-20190507/pylintrc000066400000000000000000000200471346423473500147420ustar00rootroot00000000000000# Original file copied from: # http://src.chromium.org/chrome/trunk/tools/depot_tools/pylintrc [MASTER] # Specify a configuration file. #rcfile= # Python code to execute, usually for sys.path manipulation such as # pygtk.require(). #init-hook= # Add files or directories to the blacklist. They should be base names, not # paths. ignore=CVS # Pickle collected data for later comparisons. persistent=yes # List of plugins (as comma separated values of python modules names) to load, # usually to register additional checkers. load-plugins= [MESSAGES CONTROL] # Enable the message, report, category or checker with the given id(s). You can # either give multiple identifier separated by comma (,) or put this option # multiple time. #enable= # Disable the message, report, category or checker with the given id(s). You # can either give multiple identifier separated by comma (,) or put this option # multiple time (only on the command line, not in the configuration file where # it should appear only once). # CHANGED: # # C0103: Invalid name "" # C0302: Too many lines in module (N) # # I0010: Unable to consider inline option '' # I0011: Locally disabling WNNNN # # R0201: Method could be a function # R0801: Similar lines in N files # R0901: Too many ancestors (N/7) # R0902: Too many instance attributes (N/7) # R0903: Too few public methods (N/2) # R0904: Too many public methods (N/20) # R0911: Too many return statements (N/6) # R0912: Too many branches (N/12) # R0913: Too many arguments (N/5) # R0914: Too many local variables (N/15) # R0915: Too many statements (N/50) # R0921: Abstract class not referenced # R0922: Abstract class is only referenced 1 times # # W0141: Used builtin function '' # W0142: Used * or ** magic # W0402: Uses of a deprecated module 'string' # W0404: 41: Reimport 'XX' (imported line NN) # W0511: TODO # W1201: Specify string format arguments as logging function parameters # # Disabled: # deprecated-lambda # locally-enabled # logging-format-interpolation # no-member # redefined-variable-type # relative-import # simplifiable-if-statement # too-many-boolean-expressions (N/5) # too-many-nested-blocks (N/5) # ungrouped-imports disable=C0103,C0302,I0010,I0011,R0201,R0801,R0901,R0902,R0903,R0904,R0911,R0912,R0913,R0914,R0915,R0921,R0922,W0141,W0142,W0402,W0404,W0511,W1201,deprecated-lambda,locally-enabled,logging-format-interpolation,no-member,redefined-variable-type,relative-import,simplifiable-if-statement,too-many-boolean-expressions,too-many-nested-blocks,ungrouped-imports [REPORTS] # Set the output format. Available formats are text, parseable, colorized, msvs # (visual studio) and html output-format=text # Put messages in a separate file for each module / package specified on the # command line instead of printing them on stdout. Reports (if any) will be # written in a file name "pylint_global.[txt|html]". files-output=no # Tells whether to display a full report or only the messages # CHANGED: reports=no # Python expression which should return a note less than 10 (10 is the highest # note). You have access to the variables errors warning, statement which # respectively contain the number of errors / warnings messages and the total # number of statements analyzed. This is used by the global evaluation report # (RP0004). evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10) [VARIABLES] # Tells whether we should check for unused import in __init__ files. init-import=no # A regular expression matching the beginning of the name of unused variables. # By default this is _ and dummy but we prefer _ and unused. dummy-variables-rgx=_|unused # List of additional names supposed to be defined in builtins. Remember that # you should avoid to define new builtins when possible. additional-builtins= [TYPECHECK] # Tells whether missing members accessed in mixin class should be ignored. A # mixin class is detected if its name ends with "mixin" (case insensitive). ignore-mixin-members=yes # List of classes names for which member attributes should not be checked # (useful for classes with attributes dynamically set). ignored-classes=SQLObject,twisted.internet.reactor,hashlib,google.appengine.api.memcache # List of members which are set dynamically and missed by pylint inference # system, and so shouldn't trigger E0201 when accessed. Python regular # expressions are accepted. generated-members=REQUEST,acl_users,aq_parent,multiprocessing.managers.SyncManager [MISCELLANEOUS] # List of note tags to take in consideration, separated by a comma. notes=FIXME,XXX,TODO [SIMILARITIES] # Minimum lines number of a similarity. min-similarity-lines=4 # Ignore comments when computing similarities. ignore-comments=yes # Ignore docstrings when computing similarities. ignore-docstrings=yes [FORMAT] # Maximum number of characters on a single line. max-line-length=80 # Maximum number of lines in a module max-module-lines=1000 # String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 # tab). # CHANGED: indent-string=' ' [BASIC] # List of builtins function names that should not be used, separated by a comma bad-functions=map,filter,apply,input # Regular expression which should only match correct module names module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$ # Regular expression which should only match correct module level names const-rgx=(([A-Z_][A-Z0-9_]*)|(__.*__))$ # Regular expression which should only match correct class names class-rgx=[A-Z_][a-zA-Z0-9]+$ # Regular expression which should only match correct function names function-rgx=[a-z_][a-z0-9_]{2,30}$ # Regular expression which should only match correct method names method-rgx=[a-z_][a-z0-9_]{2,30}$ # Regular expression which should only match correct instance attribute names attr-rgx=[a-z_][a-z0-9_]{2,30}$ # Regular expression which should only match correct argument names argument-rgx=[a-z_][a-z0-9_]{2,30}$ # Regular expression which should only match correct variable names variable-rgx=[a-z_][a-z0-9_]{2,30}$ # Regular expression which should only match correct list comprehension / # generator expression variable names inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$ # Good variable names which should always be accepted, separated by a comma good-names=i,j,k,ex,Run,_ # Bad variable names which should always be refused, separated by a comma bad-names=foo,bar,baz,toto,tutu,tata # Regular expression which should only match functions or classes name which do # not require a docstring no-docstring-rgx=__.*__ [DESIGN] # Maximum number of arguments for function / method max-args=5 # Argument names that match this expression will be ignored. Default to name # with leading underscore ignored-argument-names=_.* # Maximum number of locals for function / method body max-locals=15 # Maximum number of return / yield for function / method body max-returns=6 # Maximum number of branch for function / method body max-branchs=12 # Maximum number of statements in function / method body max-statements=50 # Maximum number of parents for a class (see R0901). max-parents=7 # Maximum number of attributes for a class (see R0902). max-attributes=7 # Minimum number of public methods for a class (see R0903). min-public-methods=2 # Maximum number of public methods for a class (see R0904). max-public-methods=20 [CLASSES] # List of method names used to declare (i.e. assign) instance attributes. defining-attr-methods=__init__,__new__,setUp # List of valid names for the first argument in a class method. valid-classmethod-first-arg=cls [IMPORTS] # Deprecated modules which should not be used, separated by a comma deprecated-modules=regsub,string,TERMIOS,Bastion,rexec # Create a graph of every (i.e. internal and external) dependencies in the # given file (report RP0402 must not be disabled) import-graph= # Create a graph of external dependencies in the given file (report RP0402 must # not be disabled) ext-import-graph= # Create a graph of internal dependencies in the given file (report RP0402 must # not be disabled) int-import-graph= [EXCEPTIONS] # Exceptions that will emit a warning when being caught. Defaults to # "Exception" overgeneral-exceptions=Exception pytsk-20190507/pytsk3.h000066400000000000000000000006111346423473500145540ustar00rootroot00000000000000/* ** pytsk3.h ** ** Made by mic ** Login ** ** Started on Sat Apr 17 20:48:58 2010 mic ** Last update Sat Apr 17 20:48:58 2010 mic this is a shadow file: Do not directly include it - we redefine some of TSK specific structs so we can bind them here. */ #ifndef PYTSK3_H_ # define PYTSK3_H_ #include #include "class.h" #endif /* !PYTSK3_H_ */ pytsk-20190507/run_tests.py000077500000000000000000000017411346423473500155560ustar00rootroot00000000000000#!/usr/bin/python # # Script to run tests. # # Copyright 2012, Kristinn Gudjonsson . # Copyright 2013, Joachim Metz . # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Script to run the tests.""" import unittest import sys if __name__ == "__main__": test_suite = unittest.TestLoader().discover("tests", pattern="*.py") test_results = unittest.TextTestRunner(verbosity=2).run(test_suite) if not test_results.wasSuccessful(): sys.exit(1) pytsk-20190507/setup.cfg000066400000000000000000000003041346423473500147660ustar00rootroot00000000000000[bdist_rpm] release = 1 packager = Joachim Metz doc_files = LICENSE README build_requires = python-setuptools [egg_info] tag_build = tag_date = 0 tag_svn_revision = 0 pytsk-20190507/setup.py000077500000000000000000000354001346423473500146670ustar00rootroot00000000000000#!/usr/bin/python # # Copyright 2010, Michael Cohen . # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Install the pytsk python module. You can control the installation process using the following environment variables: SLEUTHKIT_SOURCE: The path to the locally downloaded tarball of the sleuthkit. If not specified we download from the internet. SLEUTHKIT_PATH: A path to the locally build sleuthkit source tree. If not specified we use SLEUTHKIT_SOURCE environment variable (above). """ from __future__ import print_function import glob import re import os import subprocess import sys import time import distutils.ccompiler from distutils.ccompiler import new_compiler from setuptools import setup, Command, Extension from setuptools.command.build_ext import build_ext from setuptools.command.sdist import sdist try: from distutils.command.bdist_msi import bdist_msi except ImportError: bdist_msi = None try: from distutils.command.bdist_rpm import bdist_rpm except ImportError: bdist_rpm = None import generate_bindings import run_tests if not bdist_msi: BdistMSICommand = None else: class BdistMSICommand(bdist_msi): """Custom handler for the bdist_msi command.""" def run(self): """Builds an MSI.""" # Command bdist_msi does not support the library version, neither a date # as a version but if we suffix it with .1 everything is fine. self.distribution.metadata.version += ".1" bdist_msi.run(self) if not bdist_rpm: BdistRPMCommand = None else: class BdistRPMCommand(bdist_rpm): """Custom handler for the bdist_rpm command.""" def make_spec_file(self, spec_file): """Make an RPM Spec file.""" # Note that bdist_rpm can be an old style class. if issubclass(BdistRPMCommand, object): spec_file = super(BdistRPMCommand, self)._make_spec_file() else: spec_file = bdist_rpm._make_spec_file(self) if sys.version_info[0] < 3: python_package = 'python2' else: python_package = 'python3' description = [] requires = '' summary = '' in_description = False python_spec_file = [] for line in iter(spec_file): if line.startswith('Summary: '): summary = line elif line.startswith('BuildRequires: '): line = 'BuildRequires: {0:s}-setuptools, {0:s}-devel'.format( python_package) elif line.startswith('Requires: '): requires = line[10:] if python_package == 'python3': requires = requires.replace('python-', 'python3-') requires = requires.replace('python2-', 'python3-') elif line.startswith('%description'): in_description = True elif line.startswith('python setup.py build'): if python_package == 'python3': line = '%py3_build' else: line = '%py2_build' elif line.startswith('python setup.py install'): if python_package == 'python3': line = '%py3_install' else: line = '%py2_install' elif line.startswith('%files'): lines = [ '%files -n {0:s}-%{{name}}'.format(python_package), '%defattr(644,root,root,755)', '%license LICENSE', '%doc README'] if python_package == 'python3': lines.extend([ '%{_libdir}/python3*/site-packages/*.so', '%{_libdir}/python3*/site-packages/pytsk3*.egg-info/*', '', '%exclude %{_prefix}/share/doc/*']) else: lines.extend([ '%{_libdir}/python2*/site-packages/*.so', '%{_libdir}/python2*/site-packages/pytsk3*.egg-info/*', '', '%exclude %{_prefix}/share/doc/*']) python_spec_file.extend(lines) break elif line.startswith('%prep'): in_description = False python_spec_file.append( '%package -n {0:s}-%{{name}}'.format(python_package)) if python_package == 'python2': python_spec_file.extend([ 'Obsoletes: python-pytsk3 < %{version}', 'Provides: python-pytsk3 = %{version}']) if requires: python_spec_file.append('Requires: {0:s}'.format(requires)) python_spec_file.extend([ '{0:s}'.format(summary), '', '%description -n {0:s}-%{{name}}'.format(python_package)]) python_spec_file.extend(description) elif in_description: # Ignore leading white lines in the description. if not description and not line: continue description.append(line) python_spec_file.append(line) return python_spec_file def _make_spec_file(self): """Generates the text of an RPM spec file. Returns: list[str]: lines of text. """ return self.make_spec_file( bdist_rpm._make_spec_file(self)) class BuildExtCommand(build_ext): """Custom handler for the build_ext command.""" def configure_source_tree(self, compiler): """Configures the source and returns a dict of defines.""" define_macros = [] define_macros.append(("HAVE_TSK_LIBTSK_H", "")) if compiler.compiler_type == "msvc": return define_macros + [ ("WIN32", "1"), ("UNICODE", "1"), ("_CRT_SECURE_NO_WARNINGS", "1"), ] # We want to build as much as possible self contained Python # binding. command = [ "sh", "configure", "--disable-java", "--without-afflib", "--without-libewf", "--without-libpq", "--without-libvhdi", "--without-libvmdk", "--without-zlib"] output = subprocess.check_output(command, cwd="sleuthkit") print_line = False for line in output.split(b"\n"): line = line.rstrip() if line == b"configure:": print_line = True if print_line: if sys.version_info[0] >= 3: line = line.decode("ascii") print(line) return define_macros + [ ("HAVE_CONFIG_H", "1"), ("LOCALEDIR", "\"/usr/share/locale\""), ] def run(self): compiler = new_compiler(compiler=self.compiler) # pylint: disable=attribute-defined-outside-init self.define = self.configure_source_tree(compiler) libtsk_path = os.path.join("sleuthkit", "tsk") if not os.access("pytsk3.c", os.R_OK): # Generate the Python binding code (pytsk3.c). libtsk_header_files = [ os.path.join(libtsk_path, "libtsk.h"), os.path.join(libtsk_path, "base", "tsk_base.h"), os.path.join(libtsk_path, "fs", "tsk_fs.h"), os.path.join(libtsk_path, "img", "tsk_img.h"), os.path.join(libtsk_path, "vs", "tsk_vs.h"), "tsk3.h"] print("Generating bindings...") generate_bindings.generate_bindings( "pytsk3.c", libtsk_header_files, initialization="tsk_init();") build_ext.run(self) class SDistCommand(sdist): """Custom handler for generating source dist.""" def run(self): libtsk_path = os.path.join("sleuthkit", "tsk") # sleuthkit submodule is not there, probably because this has been # freshly checked out. if not os.access(libtsk_path, os.R_OK): subprocess.check_call(["git", "submodule", "init"]) subprocess.check_call(["git", "submodule", "update"]) if not os.path.exists(os.path.join("sleuthkit", "configure")): raise RuntimeError( "Missing: sleuthkit/configure run 'setup.py build' first.") sdist.run(self) class UpdateCommand(Command): """Update sleuthkit source. This is normally only run by packagers to make a new release. """ _SLEUTHKIT_GIT_TAG = "4.6.6" version = time.strftime("%Y%m%d") timezone_minutes, _ = divmod(time.timezone, 60) timezone_hours, timezone_minutes = divmod(timezone_minutes, 60) # If timezone_hours is -1 %02d will format as -1 instead of -01 # hence we detect the sign and force a leading zero. if timezone_hours < 0: timezone_string = "-%02d%02d" % (-timezone_hours, timezone_minutes) else: timezone_string = "+%02d%02d" % (timezone_hours, timezone_minutes) version_pkg = "%s %s" % ( time.strftime("%a, %d %b %Y %H:%M:%S"), timezone_string) user_options = [("use-head", None, ( "Use the latest version of Sleuthkit checked into git (HEAD) instead of " "tag: {0:s}".format(_SLEUTHKIT_GIT_TAG)))] def initialize_options(self): self.use_head = False def finalize_options(self): self.use_head = bool(self.use_head) files = { "sleuthkit/Makefile.am": [ ("SUBDIRS = .+", "SUBDIRS = tsk"), ], "class_parser.py": [ ('VERSION = "[^"]+"', 'VERSION = "%s"' % version), ], "dpkg/changelog": [ (r"pytsk3 \([^\)]+\)", "pytsk3 (%s-1)" % version), ("(<[^>]+>).+", r"\1 %s" % version_pkg), ], } def patch_sleuthkit(self): """Applies patches to the SleuthKit source code.""" for filename, rules in iter(self.files.items()): filename = os.path.join(*filename.split("/")) with open(filename, "r") as file_object: data = file_object.read() for search, replace in rules: data = re.sub(search, replace, data) with open(filename, "w") as fd: fd.write(data) patch_files = [ "sleuthkit-{0:s}-configure.ac".format(self._SLEUTHKIT_GIT_TAG), "sleuthkit-{0:s}-ext2fs.patch".format(self._SLEUTHKIT_GIT_TAG), "sleuthkit-{0:s}-ext2fs_dent.patch".format(self._SLEUTHKIT_GIT_TAG), "sleuthkit-{0:s}-ffs_dent.patch".format(self._SLEUTHKIT_GIT_TAG), "sleuthkit-{0:s}-gpt.patch".format(self._SLEUTHKIT_GIT_TAG), "sleuthkit-{0:s}-hfs.patch".format(self._SLEUTHKIT_GIT_TAG), "sleuthkit-{0:s}-hfs_dent.patch".format(self._SLEUTHKIT_GIT_TAG), "sleuthkit-{0:s}-lzvn.patch".format(self._SLEUTHKIT_GIT_TAG), "sleuthkit-{0:s}-ntfs.patch".format(self._SLEUTHKIT_GIT_TAG)] for patch_file in patch_files: patch_file = os.path.join("patches", patch_file) if not os.path.exists(patch_file): print("No such patch file: {0:s}".format(patch_file)) continue patch_file = os.path.join("..", patch_file) subprocess.check_call(["git", "apply", patch_file], cwd="sleuthkit") def run(self): subprocess.check_call(["git", "stash"], cwd="sleuthkit") subprocess.check_call(["git", "submodule", "init"]) subprocess.check_call(["git", "submodule", "update"]) print("Updating sleuthkit") subprocess.check_call(["git", "reset", "--hard"], cwd="sleuthkit") subprocess.check_call(["git", "clean", "-x", "-f", "-d"], cwd="sleuthkit") subprocess.check_call(["git", "checkout", "master"], cwd="sleuthkit") subprocess.check_call(["git", "pull"], cwd="sleuthkit") if self.use_head: print("Pulling from HEAD") else: print("Pulling from tag: {0:s}".format(self._SLEUTHKIT_GIT_TAG)) subprocess.check_call(["git", "fetch", "--tags"], cwd="sleuthkit") git_tag_path = "tags/sleuthkit-{0:s}".format(self._SLEUTHKIT_GIT_TAG) subprocess.check_call(["git", "checkout", git_tag_path], cwd="sleuthkit") self.patch_sleuthkit() compiler_type = distutils.ccompiler.get_default_compiler() if compiler_type != "msvc": subprocess.check_call(["./bootstrap"], cwd="sleuthkit") # Now derive the version based on the date. with open("version.txt", "w") as fd: fd.write(self.version) libtsk_path = os.path.join("sleuthkit", "tsk") # Generate the Python binding code (pytsk3.c). libtsk_header_files = [ os.path.join(libtsk_path, "libtsk.h"), os.path.join(libtsk_path, "base", "tsk_base.h"), os.path.join(libtsk_path, "fs", "tsk_fs.h"), os.path.join(libtsk_path, "img", "tsk_img.h"), os.path.join(libtsk_path, "vs", "tsk_vs.h"), "tsk3.h"] print("Generating bindings...") generate_bindings.generate_bindings( "pytsk3.c", libtsk_header_files, initialization="tsk_init();") class ProjectBuilder(object): """Class to help build the project.""" def __init__(self, project_config, argv): """Initializes a project builder object.""" self._project_config = project_config self._argv = argv # The path to the sleuthkit/tsk directory. self._libtsk_path = os.path.join("sleuthkit", "tsk") # Paths under the sleuthkit/tsk directory which contain files we need # to compile. self._sub_library_names = [ "auto", "base", "docs", "fs", "hashdb", "img", "vs"] # The args for the extension builder. self.extension_args = { "define_macros": [], "include_dirs": ["talloc", self._libtsk_path, "sleuthkit", "."], "library_dirs": [], "libraries": []} # The sources to build. self._source_files = [ "class.c", "error.c", "tsk3.c", "pytsk3.c", "talloc/talloc.c"] # Path to the top of the unpacked sleuthkit sources. self._sleuthkit_path = "sleuthkit" def build(self): """Build everything.""" # Fetch all c and cpp files from the subdirs to compile. for library_name in self._sub_library_names: for extension in ("*.c", "*.cpp"): extension_glob = os.path.join( self._libtsk_path, library_name, extension) self._source_files.extend(glob.glob(extension_glob)) # Sort the soure files to make sure they are in consistent order when # building. source_files = sorted(self._source_files) ext_modules = [Extension("pytsk3", source_files, **self.extension_args)] setup( cmdclass={ "build_ext": BuildExtCommand, "bdist_msi": BdistMSICommand, "bdist_rpm": BdistRPMCommand, "sdist": SDistCommand, "update": UpdateCommand}, ext_modules=ext_modules, **self._project_config) if __name__ == "__main__": __version__ = open("version.txt").read().strip() setup_args = dict( name="pytsk3", version=__version__, description="Python bindings for the sleuthkit", long_description=( "Python bindings for the sleuthkit (http://www.sleuthkit.org/)"), license="Apache 2.0", url="https://github.com/py4n6/pytsk/", author="Michael Cohen and Joachim Metz", author_email="scudette@gmail.com, joachim.metz@gmail.com", zip_safe=False) ProjectBuilder(setup_args, sys.argv).build() pytsk-20190507/sleuthkit/000077500000000000000000000000001346423473500151645ustar00rootroot00000000000000pytsk-20190507/talloc/000077500000000000000000000000001346423473500144265ustar00rootroot00000000000000pytsk-20190507/talloc/LICENSE000066400000000000000000000167301346423473500154420ustar00rootroot00000000000000 GNU LESSER GENERAL PUBLIC LICENSE Version 3, 29 June 2007 Copyright (C) 2007 Free Software Foundation, Inc. Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. This version of the GNU Lesser General Public License incorporates the terms and conditions of version 3 of the GNU General Public License, supplemented by the additional permissions listed below. 0. Additional Definitions. As used herein, "this License" refers to version 3 of the GNU Lesser General Public License, and the "GNU GPL" refers to version 3 of the GNU General Public License. "The Library" refers to a covered work governed by this License, other than an Application or a Combined Work as defined below. An "Application" is any work that makes use of an interface provided by the Library, but which is not otherwise based on the Library. Defining a subclass of a class defined by the Library is deemed a mode of using an interface provided by the Library. A "Combined Work" is a work produced by combining or linking an Application with the Library. The particular version of the Library with which the Combined Work was made is also called the "Linked Version". The "Minimal Corresponding Source" for a Combined Work means the Corresponding Source for the Combined Work, excluding any source code for portions of the Combined Work that, considered in isolation, are based on the Application, and not on the Linked Version. The "Corresponding Application Code" for a Combined Work means the object code and/or source code for the Application, including any data and utility programs needed for reproducing the Combined Work from the Application, but excluding the System Libraries of the Combined Work. 1. Exception to Section 3 of the GNU GPL. You may convey a covered work under sections 3 and 4 of this License without being bound by section 3 of the GNU GPL. 2. Conveying Modified Versions. If you modify a copy of the Library, and, in your modifications, a facility refers to a function or data to be supplied by an Application that uses the facility (other than as an argument passed when the facility is invoked), then you may convey a copy of the modified version: a) under this License, provided that you make a good faith effort to ensure that, in the event an Application does not supply the function or data, the facility still operates, and performs whatever part of its purpose remains meaningful, or b) under the GNU GPL, with none of the additional permissions of this License applicable to that copy. 3. Object Code Incorporating Material from Library Header Files. The object code form of an Application may incorporate material from a header file that is part of the Library. You may convey such object code under terms of your choice, provided that, if the incorporated material is not limited to numerical parameters, data structure layouts and accessors, or small macros, inline functions and templates (ten or fewer lines in length), you do both of the following: a) Give prominent notice with each copy of the object code that the Library is used in it and that the Library and its use are covered by this License. b) Accompany the object code with a copy of the GNU GPL and this license document. 4. Combined Works. You may convey a Combined Work under terms of your choice that, taken together, effectively do not restrict modification of the portions of the Library contained in the Combined Work and reverse engineering for debugging such modifications, if you also do each of the following: a) Give prominent notice with each copy of the Combined Work that the Library is used in it and that the Library and its use are covered by this License. b) Accompany the Combined Work with a copy of the GNU GPL and this license document. c) For a Combined Work that displays copyright notices during execution, include the copyright notice for the Library among these notices, as well as a reference directing the user to the copies of the GNU GPL and this license document. d) Do one of the following: 0) Convey the Minimal Corresponding Source under the terms of this License, and the Corresponding Application Code in a form suitable for, and under terms that permit, the user to recombine or relink the Application with a modified version of the Linked Version to produce a modified Combined Work, in the manner specified by section 6 of the GNU GPL for conveying Corresponding Source. 1) Use a suitable shared library mechanism for linking with the Library. A suitable mechanism is one that (a) uses at run time a copy of the Library already present on the user's computer system, and (b) will operate properly with a modified version of the Library that is interface-compatible with the Linked Version. e) Provide Installation Information, but only if you would otherwise be required to provide such information under section 6 of the GNU GPL, and only to the extent that such information is necessary to install and execute a modified version of the Combined Work produced by recombining or relinking the Application with a modified version of the Linked Version. (If you use option 4d0, the Installation Information must accompany the Minimal Corresponding Source and Corresponding Application Code. If you use option 4d1, you must provide the Installation Information in the manner specified by section 6 of the GNU GPL for conveying Corresponding Source.) 5. Combined Libraries. You may place library facilities that are a work based on the Library side by side in a single library together with other library facilities that are not Applications and are not covered by this License, and convey such a combined library under terms of your choice, if you do both of the following: a) Accompany the combined library with a copy of the same work based on the Library, uncombined with any other library facilities, conveyed under the terms of this License. b) Give prominent notice with the combined library that part of it is a work based on the Library, and explaining where to find the accompanying uncombined form of the same work. 6. Revised Versions of the GNU Lesser General Public License. The Free Software Foundation may publish revised and/or new versions of the GNU Lesser General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Library as you received it specifies that a certain numbered version of the GNU Lesser General Public License "or any later version" applies to it, you have the option of following the terms and conditions either of that published version or of any later version published by the Free Software Foundation. If the Library as you received it does not specify a version number of the GNU Lesser General Public License, you may choose any version of the GNU Lesser General Public License ever published by the Free Software Foundation. If the Library as you received it specifies that a proxy can decide whether future versions of the GNU Lesser General Public License shall apply, that proxy's public statement of acceptance of any version is permanent authorization for you to choose that version for the Library. pytsk-20190507/talloc/README000066400000000000000000000006521346423473500153110ustar00rootroot00000000000000Talloc is part of the Samba project and can be found at: http://talloc.samba.org/talloc/doc/html/index.html It is licensed under the: GNU Lesser General Public License. See the corresponding LICENSE file or http://www.gnu.org/licenses/ The files talloc.c and talloc.h are unaltered copies taken from: http://www.samba.org/ftp/talloc/talloc-2.1.0.tar.gz replace.h was added to force talloc.c to compile on various systems. pytsk-20190507/talloc/replace.h000066400000000000000000000010321346423473500162060ustar00rootroot00000000000000#ifndef _REPLACE_H_ #define _REPLACE_H_ #include #include #if !defined( UINT_MAX ) #include #endif #define _PUBLIC_ extern typedef int bool; #define true 1 #define false 0 typedef unsigned char uint8_t; #if !defined( MIN ) #define MIN(a,b) ((a)<(b)?(a):(b)) #endif #if defined( _MSC_VER ) #define inline /* inline */ #if defined( MS_WIN64 ) typedef __int64 ssize_t; #else typedef _W64 int ssize_t; #endif #else #define HAVE_VA_COPY #endif /* defined( _MSC_VER ) */ #endif /* _REPLACE_H_ */ pytsk-20190507/talloc/talloc.c000066400000000000000000002010531346423473500160510ustar00rootroot00000000000000/* Samba Unix SMB/CIFS implementation. Samba trivial allocation library - new interface NOTE: Please read talloc_guide.txt for full documentation Copyright (C) Andrew Tridgell 2004 Copyright (C) Stefan Metzmacher 2006 ** NOTE! The following LGPL license applies to the talloc ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ /* inspired by http://swapped.cc/halloc/ */ #include "replace.h" #include "talloc.h" #include #ifdef TALLOC_BUILD_VERSION_MAJOR #if (TALLOC_VERSION_MAJOR != TALLOC_BUILD_VERSION_MAJOR) #error "TALLOC_VERSION_MAJOR != TALLOC_BUILD_VERSION_MAJOR" #endif #endif #ifdef TALLOC_BUILD_VERSION_MINOR #if (TALLOC_VERSION_MINOR != TALLOC_BUILD_VERSION_MINOR) #error "TALLOC_VERSION_MINOR != TALLOC_BUILD_VERSION_MINOR" #endif #endif /* Special macros that are no-ops except when run under Valgrind on * x86. They've moved a little bit from valgrind 1.0.4 to 1.9.4 */ #ifdef HAVE_VALGRIND_MEMCHECK_H /* memcheck.h includes valgrind.h */ #include #elif defined(HAVE_VALGRIND_H) #include #endif /* use this to force every realloc to change the pointer, to stress test code that might not cope */ #define ALWAYS_REALLOC 0 #define MAX_TALLOC_SIZE 0x10000000 #define TALLOC_MAGIC_BASE 0xe814ec70 #define TALLOC_MAGIC ( \ TALLOC_MAGIC_BASE + \ (TALLOC_VERSION_MAJOR << 12) + \ (TALLOC_VERSION_MINOR << 4) \ ) #define TALLOC_FLAG_FREE 0x01 #define TALLOC_FLAG_LOOP 0x02 #define TALLOC_FLAG_POOL 0x04 /* This is a talloc pool */ #define TALLOC_FLAG_POOLMEM 0x08 /* This is allocated in a pool */ #define TALLOC_MAGIC_REFERENCE ((const char *)1) /* by default we abort when given a bad pointer (such as when talloc_free() is called on a pointer that came from malloc() */ #ifndef TALLOC_ABORT #define TALLOC_ABORT(reason) abort() #endif #ifndef discard_const_p #if defined(__intptr_t_defined) || defined(HAVE_INTPTR_T) # define discard_const_p(type, ptr) ((type *)((intptr_t)(ptr))) #else # define discard_const_p(type, ptr) ((type *)(ptr)) #endif #endif /* these macros gain us a few percent of speed on gcc */ #if (__GNUC__ >= 3) /* the strange !! is to ensure that __builtin_expect() takes either 0 or 1 as its first argument */ #ifndef likely #define likely(x) __builtin_expect(!!(x), 1) #endif #ifndef unlikely #define unlikely(x) __builtin_expect(!!(x), 0) #endif #else #ifndef likely #define likely(x) (x) #endif #ifndef unlikely #define unlikely(x) (x) #endif #endif /* this null_context is only used if talloc_enable_leak_report() or talloc_enable_leak_report_full() is called, otherwise it remains NULL */ static void *null_context; static void *autofree_context; /* used to enable fill of memory on free, which can be useful for * catching use after free errors when valgrind is too slow */ static struct { bool initialised; bool enabled; uint8_t fill_value; } talloc_fill; #define TALLOC_FILL_ENV "TALLOC_FREE_FILL" /* * do not wipe the header, to allow the * double-free logic to still work */ #define TC_INVALIDATE_FULL_FILL_CHUNK(_tc) do { \ if (unlikely(talloc_fill.enabled)) { \ size_t _flen = (_tc)->size; \ char *_fptr = (char *)TC_PTR_FROM_CHUNK(_tc); \ memset(_fptr, talloc_fill.fill_value, _flen); \ } \ } while (0) #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_NOACCESS) /* Mark the whole chunk as not accessable */ #define TC_INVALIDATE_FULL_VALGRIND_CHUNK(_tc) do { \ size_t _flen = TC_HDR_SIZE + (_tc)->size; \ char *_fptr = (char *)(_tc); \ VALGRIND_MAKE_MEM_NOACCESS(_fptr, _flen); \ } while(0) #else #define TC_INVALIDATE_FULL_VALGRIND_CHUNK(_tc) do { } while (0) #endif #define TC_INVALIDATE_FULL_CHUNK(_tc) do { \ TC_INVALIDATE_FULL_FILL_CHUNK(_tc); \ TC_INVALIDATE_FULL_VALGRIND_CHUNK(_tc); \ } while (0) #define TC_INVALIDATE_SHRINK_FILL_CHUNK(_tc, _new_size) do { \ if (unlikely(talloc_fill.enabled)) { \ size_t _flen = (_tc)->size - (_new_size); \ char *_fptr = (char *)TC_PTR_FROM_CHUNK(_tc); \ _fptr += (_new_size); \ memset(_fptr, talloc_fill.fill_value, _flen); \ } \ } while (0) #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_NOACCESS) /* Mark the unused bytes not accessable */ #define TC_INVALIDATE_SHRINK_VALGRIND_CHUNK(_tc, _new_size) do { \ size_t _flen = (_tc)->size - (_new_size); \ char *_fptr = (char *)TC_PTR_FROM_CHUNK(_tc); \ _fptr += (_new_size); \ VALGRIND_MAKE_MEM_NOACCESS(_fptr, _flen); \ } while (0) #else #define TC_INVALIDATE_SHRINK_VALGRIND_CHUNK(_tc, _new_size) do { } while (0) #endif #define TC_INVALIDATE_SHRINK_CHUNK(_tc, _new_size) do { \ TC_INVALIDATE_SHRINK_FILL_CHUNK(_tc, _new_size); \ TC_INVALIDATE_SHRINK_VALGRIND_CHUNK(_tc, _new_size); \ } while (0) #define TC_UNDEFINE_SHRINK_FILL_CHUNK(_tc, _new_size) do { \ if (unlikely(talloc_fill.enabled)) { \ size_t _flen = (_tc)->size - (_new_size); \ char *_fptr = (char *)TC_PTR_FROM_CHUNK(_tc); \ _fptr += (_new_size); \ memset(_fptr, talloc_fill.fill_value, _flen); \ } \ } while (0) #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_UNDEFINED) /* Mark the unused bytes as undefined */ #define TC_UNDEFINE_SHRINK_VALGRIND_CHUNK(_tc, _new_size) do { \ size_t _flen = (_tc)->size - (_new_size); \ char *_fptr = (char *)TC_PTR_FROM_CHUNK(_tc); \ _fptr += (_new_size); \ VALGRIND_MAKE_MEM_UNDEFINED(_fptr, _flen); \ } while (0) #else #define TC_UNDEFINE_SHRINK_VALGRIND_CHUNK(_tc, _new_size) do { } while (0) #endif #define TC_UNDEFINE_SHRINK_CHUNK(_tc, _new_size) do { \ TC_UNDEFINE_SHRINK_FILL_CHUNK(_tc, _new_size); \ TC_UNDEFINE_SHRINK_VALGRIND_CHUNK(_tc, _new_size); \ } while (0) #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_UNDEFINED) /* Mark the new bytes as undefined */ #define TC_UNDEFINE_GROW_VALGRIND_CHUNK(_tc, _new_size) do { \ size_t _old_used = TC_HDR_SIZE + (_tc)->size; \ size_t _new_used = TC_HDR_SIZE + (_new_size); \ size_t _flen = _new_used - _old_used; \ char *_fptr = _old_used + (char *)(_tc); \ VALGRIND_MAKE_MEM_UNDEFINED(_fptr, _flen); \ } while (0) #else #define TC_UNDEFINE_GROW_VALGRIND_CHUNK(_tc, _new_size) do { } while (0) #endif #define TC_UNDEFINE_GROW_CHUNK(_tc, _new_size) do { \ TC_UNDEFINE_GROW_VALGRIND_CHUNK(_tc, _new_size); \ } while (0) struct talloc_reference_handle { struct talloc_reference_handle *next, *prev; void *ptr; const char *location; }; struct talloc_memlimit { struct talloc_chunk *parent; struct talloc_memlimit *upper; size_t max_size; size_t cur_size; }; static bool talloc_memlimit_check(struct talloc_memlimit *limit, size_t size); static void talloc_memlimit_grow(struct talloc_memlimit *limit, size_t size); static void talloc_memlimit_shrink(struct talloc_memlimit *limit, size_t size); static void talloc_memlimit_update_on_free(struct talloc_chunk *tc); typedef int (*talloc_destructor_t)(void *); struct talloc_pool_hdr; struct talloc_chunk { struct talloc_chunk *next, *prev; struct talloc_chunk *parent, *child; struct talloc_reference_handle *refs; talloc_destructor_t destructor; const char *name; size_t size; unsigned flags; /* * limit semantics: * if 'limit' is set it means all *new* children of the context will * be limited to a total aggregate size ox max_size for memory * allocations. * cur_size is used to keep track of the current use */ struct talloc_memlimit *limit; /* * For members of a pool (i.e. TALLOC_FLAG_POOLMEM is set), "pool" * is a pointer to the struct talloc_chunk of the pool that it was * allocated from. This way children can quickly find the pool to chew * from. */ struct talloc_pool_hdr *pool; }; /* 16 byte alignment seems to keep everyone happy */ #define TC_ALIGN16(s) (((s)+15)&~15) #define TC_HDR_SIZE TC_ALIGN16(sizeof(struct talloc_chunk)) #define TC_PTR_FROM_CHUNK(tc) ((void *)(TC_HDR_SIZE + (char*)tc)) _PUBLIC_ int talloc_version_major(void) { return TALLOC_VERSION_MAJOR; } _PUBLIC_ int talloc_version_minor(void) { return TALLOC_VERSION_MINOR; } static void (*talloc_log_fn)(const char *message); _PUBLIC_ void talloc_set_log_fn(void (*log_fn)(const char *message)) { talloc_log_fn = log_fn; } static void talloc_log(const char *fmt, ...) PRINTF_ATTRIBUTE(1,2); static void talloc_log(const char *fmt, ...) { va_list ap; char *message; if (!talloc_log_fn) { return; } va_start(ap, fmt); message = talloc_vasprintf(NULL, fmt, ap); va_end(ap); talloc_log_fn(message); talloc_free(message); } static void talloc_log_stderr(const char *message) { fprintf(stderr, "%s", message); } _PUBLIC_ void talloc_set_log_stderr(void) { talloc_set_log_fn(talloc_log_stderr); } static void (*talloc_abort_fn)(const char *reason); _PUBLIC_ void talloc_set_abort_fn(void (*abort_fn)(const char *reason)) { talloc_abort_fn = abort_fn; } static void talloc_abort(const char *reason) { talloc_log("%s\n", reason); if (!talloc_abort_fn) { TALLOC_ABORT(reason); } talloc_abort_fn(reason); } static void talloc_abort_magic(unsigned magic) { unsigned striped = magic - TALLOC_MAGIC_BASE; unsigned major = (striped & 0xFFFFF000) >> 12; unsigned minor = (striped & 0x00000FF0) >> 4; talloc_log("Bad talloc magic[0x%08X/%u/%u] expected[0x%08X/%u/%u]\n", magic, major, minor, TALLOC_MAGIC, TALLOC_VERSION_MAJOR, TALLOC_VERSION_MINOR); talloc_abort("Bad talloc magic value - wrong talloc version used/mixed"); } static void talloc_abort_access_after_free(void) { talloc_abort("Bad talloc magic value - access after free"); } static void talloc_abort_unknown_value(void) { talloc_abort("Bad talloc magic value - unknown value"); } /* panic if we get a bad magic value */ static inline struct talloc_chunk *talloc_chunk_from_ptr(const void *ptr) { const char *pp = (const char *)ptr; struct talloc_chunk *tc = discard_const_p(struct talloc_chunk, pp - TC_HDR_SIZE); if (unlikely((tc->flags & (TALLOC_FLAG_FREE | ~0xF)) != TALLOC_MAGIC)) { if ((tc->flags & (~0xFFF)) == TALLOC_MAGIC_BASE) { talloc_abort_magic(tc->flags & (~0xF)); return NULL; } if (tc->flags & TALLOC_FLAG_FREE) { talloc_log("talloc: access after free error - first free may be at %s\n", tc->name); talloc_abort_access_after_free(); return NULL; } else { talloc_abort_unknown_value(); return NULL; } } return tc; } /* hook into the front of the list */ #define _TLIST_ADD(list, p) \ do { \ if (!(list)) { \ (list) = (p); \ (p)->next = (p)->prev = NULL; \ } else { \ (list)->prev = (p); \ (p)->next = (list); \ (p)->prev = NULL; \ (list) = (p); \ }\ } while (0) /* remove an element from a list - element doesn't have to be in list. */ #define _TLIST_REMOVE(list, p) \ do { \ if ((p) == (list)) { \ (list) = (p)->next; \ if (list) (list)->prev = NULL; \ } else { \ if ((p)->prev) (p)->prev->next = (p)->next; \ if ((p)->next) (p)->next->prev = (p)->prev; \ } \ if ((p) && ((p) != (list))) (p)->next = (p)->prev = NULL; \ } while (0) /* return the parent chunk of a pointer */ static inline struct talloc_chunk *talloc_parent_chunk(const void *ptr) { struct talloc_chunk *tc; if (unlikely(ptr == NULL)) { return NULL; } tc = talloc_chunk_from_ptr(ptr); while (tc->prev) tc=tc->prev; return tc->parent; } _PUBLIC_ void *talloc_parent(const void *ptr) { struct talloc_chunk *tc = talloc_parent_chunk(ptr); return tc? TC_PTR_FROM_CHUNK(tc) : NULL; } /* find parents name */ _PUBLIC_ const char *talloc_parent_name(const void *ptr) { struct talloc_chunk *tc = talloc_parent_chunk(ptr); return tc? tc->name : NULL; } /* A pool carries an in-pool object count count in the first 16 bytes. bytes. This is done to support talloc_steal() to a parent outside of the pool. The count includes the pool itself, so a talloc_free() on a pool will only destroy the pool if the count has dropped to zero. A talloc_free() of a pool member will reduce the count, and eventually also call free(3) on the pool memory. The object count is not put into "struct talloc_chunk" because it is only relevant for talloc pools and the alignment to 16 bytes would increase the memory footprint of each talloc chunk by those 16 bytes. */ struct talloc_pool_hdr { void *end; unsigned int object_count; size_t poolsize; }; #define TP_HDR_SIZE TC_ALIGN16(sizeof(struct talloc_pool_hdr)) static struct talloc_pool_hdr *talloc_pool_from_chunk(struct talloc_chunk *c) { return (struct talloc_pool_hdr *)((char *)c - TP_HDR_SIZE); } static struct talloc_chunk *talloc_chunk_from_pool(struct talloc_pool_hdr *h) { return (struct talloc_chunk *)((char *)h + TP_HDR_SIZE); } static void *tc_pool_end(struct talloc_pool_hdr *pool_hdr) { struct talloc_chunk *tc = talloc_chunk_from_pool(pool_hdr); return (char *)tc + TC_HDR_SIZE + pool_hdr->poolsize; } static size_t tc_pool_space_left(struct talloc_pool_hdr *pool_hdr) { return (char *)tc_pool_end(pool_hdr) - (char *)pool_hdr->end; } /* If tc is inside a pool, this gives the next neighbour. */ static void *tc_next_chunk(struct talloc_chunk *tc) { return (char *)tc + TC_ALIGN16(TC_HDR_SIZE + tc->size); } static void *tc_pool_first_chunk(struct talloc_pool_hdr *pool_hdr) { struct talloc_chunk *tc = talloc_chunk_from_pool(pool_hdr); return tc_next_chunk(tc); } /* Mark the whole remaining pool as not accessable */ static void tc_invalidate_pool(struct talloc_pool_hdr *pool_hdr) { size_t flen = tc_pool_space_left(pool_hdr); if (unlikely(talloc_fill.enabled)) { memset(pool_hdr->end, talloc_fill.fill_value, flen); } #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_NOACCESS) VALGRIND_MAKE_MEM_NOACCESS(pool_hdr->end, flen); #endif } /* Allocate from a pool */ static struct talloc_chunk *talloc_alloc_pool(struct talloc_chunk *parent, size_t size, size_t prefix_len) { struct talloc_pool_hdr *pool_hdr = NULL; size_t space_left; struct talloc_chunk *result; size_t chunk_size; if (parent == NULL) { return NULL; } if (parent->flags & TALLOC_FLAG_POOL) { pool_hdr = talloc_pool_from_chunk(parent); } else if (parent->flags & TALLOC_FLAG_POOLMEM) { pool_hdr = parent->pool; } if (pool_hdr == NULL) { return NULL; } space_left = tc_pool_space_left(pool_hdr); /* * Align size to 16 bytes */ chunk_size = TC_ALIGN16(size + prefix_len); if (space_left < chunk_size) { return NULL; } result = (struct talloc_chunk *)((char *)pool_hdr->end + prefix_len); #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_UNDEFINED) VALGRIND_MAKE_MEM_UNDEFINED(pool_hdr->end, chunk_size); #endif pool_hdr->end = (void *)((char *)pool_hdr->end + chunk_size); result->flags = TALLOC_MAGIC | TALLOC_FLAG_POOLMEM; result->pool = pool_hdr; pool_hdr->object_count++; return result; } /* Allocate a bit of memory as a child of an existing pointer */ static inline void *__talloc_with_prefix(const void *context, size_t size, size_t prefix_len) { struct talloc_chunk *tc = NULL; struct talloc_memlimit *limit = NULL; size_t total_len = TC_HDR_SIZE + size + prefix_len; if (unlikely(context == NULL)) { context = null_context; } if (unlikely(size >= MAX_TALLOC_SIZE)) { return NULL; } if (unlikely(total_len < TC_HDR_SIZE)) { return NULL; } if (context != NULL) { struct talloc_chunk *ptc = talloc_chunk_from_ptr(context); if (ptc->limit != NULL) { limit = ptc->limit; } tc = talloc_alloc_pool(ptc, TC_HDR_SIZE+size, prefix_len); } if (tc == NULL) { char *ptr; /* * Only do the memlimit check/update on actual allocation. */ if (!talloc_memlimit_check(limit, total_len)) { errno = ENOMEM; return NULL; } ptr = malloc(total_len); if (unlikely(ptr == NULL)) { return NULL; } tc = (struct talloc_chunk *)(ptr + prefix_len); tc->flags = TALLOC_MAGIC; tc->pool = NULL; talloc_memlimit_grow(limit, total_len); } tc->limit = limit; tc->size = size; tc->destructor = NULL; tc->child = NULL; tc->name = NULL; tc->refs = NULL; if (likely(context)) { struct talloc_chunk *parent = talloc_chunk_from_ptr(context); if (parent->child) { parent->child->parent = NULL; tc->next = parent->child; tc->next->prev = tc; } else { tc->next = NULL; } tc->parent = parent; tc->prev = NULL; parent->child = tc; } else { tc->next = tc->prev = tc->parent = NULL; } return TC_PTR_FROM_CHUNK(tc); } static inline void *__talloc(const void *context, size_t size) { return __talloc_with_prefix(context, size, 0); } /* * Create a talloc pool */ _PUBLIC_ void *talloc_pool(const void *context, size_t size) { struct talloc_chunk *tc; struct talloc_pool_hdr *pool_hdr; void *result; result = __talloc_with_prefix(context, size, TP_HDR_SIZE); if (unlikely(result == NULL)) { return NULL; } tc = talloc_chunk_from_ptr(result); pool_hdr = talloc_pool_from_chunk(tc); tc->flags |= TALLOC_FLAG_POOL; tc->size = 0; pool_hdr->object_count = 1; pool_hdr->end = result; pool_hdr->poolsize = size; tc_invalidate_pool(pool_hdr); return result; } /* * Create a talloc pool correctly sized for a basic size plus * a number of subobjects whose total size is given. Essentially * a custom allocator for talloc to reduce fragmentation. */ _PUBLIC_ void *_talloc_pooled_object(const void *ctx, size_t type_size, const char *type_name, unsigned num_subobjects, size_t total_subobjects_size) { size_t poolsize, subobjects_slack, tmp; struct talloc_chunk *tc; struct talloc_pool_hdr *pool_hdr; void *ret; poolsize = type_size + total_subobjects_size; if ((poolsize < type_size) || (poolsize < total_subobjects_size)) { goto overflow; } if (num_subobjects == UINT_MAX) { goto overflow; } num_subobjects += 1; /* the object body itself */ /* * Alignment can increase the pool size by at most 15 bytes per object * plus alignment for the object itself */ subobjects_slack = (TC_HDR_SIZE + TP_HDR_SIZE + 15) * num_subobjects; if (subobjects_slack < num_subobjects) { goto overflow; } tmp = poolsize + subobjects_slack; if ((tmp < poolsize) || (tmp < subobjects_slack)) { goto overflow; } poolsize = tmp; ret = talloc_pool(ctx, poolsize); if (ret == NULL) { return NULL; } tc = talloc_chunk_from_ptr(ret); tc->size = type_size; pool_hdr = talloc_pool_from_chunk(tc); #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_UNDEFINED) VALGRIND_MAKE_MEM_UNDEFINED(pool_hdr->end, type_size); #endif pool_hdr->end = ((char *)pool_hdr->end + TC_ALIGN16(type_size)); talloc_set_name_const(ret, type_name); return ret; overflow: return NULL; } /* setup a destructor to be called on free of a pointer the destructor should return 0 on success, or -1 on failure. if the destructor fails then the free is failed, and the memory can be continued to be used */ _PUBLIC_ void _talloc_set_destructor(const void *ptr, int (*destructor)(void *)) { struct talloc_chunk *tc = talloc_chunk_from_ptr(ptr); tc->destructor = destructor; } /* increase the reference count on a piece of memory. */ _PUBLIC_ int talloc_increase_ref_count(const void *ptr) { if (unlikely(!talloc_reference(null_context, ptr))) { return -1; } return 0; } /* helper for talloc_reference() this is referenced by a function pointer and should not be inline */ static int talloc_reference_destructor(struct talloc_reference_handle *handle) { struct talloc_chunk *ptr_tc = talloc_chunk_from_ptr(handle->ptr); _TLIST_REMOVE(ptr_tc->refs, handle); return 0; } /* more efficient way to add a name to a pointer - the name must point to a true string constant */ static inline void _talloc_set_name_const(const void *ptr, const char *name) { struct talloc_chunk *tc = talloc_chunk_from_ptr(ptr); tc->name = name; } /* internal talloc_named_const() */ static inline void *_talloc_named_const(const void *context, size_t size, const char *name) { void *ptr; ptr = __talloc(context, size); if (unlikely(ptr == NULL)) { return NULL; } _talloc_set_name_const(ptr, name); return ptr; } /* make a secondary reference to a pointer, hanging off the given context. the pointer remains valid until both the original caller and this given context are freed. the major use for this is when two different structures need to reference the same underlying data, and you want to be able to free the two instances separately, and in either order */ _PUBLIC_ void *_talloc_reference_loc(const void *context, const void *ptr, const char *location) { struct talloc_chunk *tc; struct talloc_reference_handle *handle; if (unlikely(ptr == NULL)) return NULL; tc = talloc_chunk_from_ptr(ptr); handle = (struct talloc_reference_handle *)_talloc_named_const(context, sizeof(struct talloc_reference_handle), TALLOC_MAGIC_REFERENCE); if (unlikely(handle == NULL)) return NULL; /* note that we hang the destructor off the handle, not the main context as that allows the caller to still setup their own destructor on the context if they want to */ talloc_set_destructor(handle, talloc_reference_destructor); handle->ptr = discard_const_p(void, ptr); handle->location = location; _TLIST_ADD(tc->refs, handle); return handle->ptr; } static void *_talloc_steal_internal(const void *new_ctx, const void *ptr); static inline void _talloc_free_poolmem(struct talloc_chunk *tc, const char *location) { struct talloc_pool_hdr *pool; struct talloc_chunk *pool_tc; void *next_tc; pool = tc->pool; pool_tc = talloc_chunk_from_pool(pool); next_tc = tc_next_chunk(tc); tc->flags |= TALLOC_FLAG_FREE; /* we mark the freed memory with where we called the free * from. This means on a double free error we can report where * the first free came from */ tc->name = location; TC_INVALIDATE_FULL_CHUNK(tc); if (unlikely(pool->object_count == 0)) { talloc_abort("Pool object count zero!"); return; } pool->object_count--; if (unlikely(pool->object_count == 1 && !(pool_tc->flags & TALLOC_FLAG_FREE))) { /* * if there is just one object left in the pool * and pool->flags does not have TALLOC_FLAG_FREE, * it means this is the pool itself and * the rest is available for new objects * again. */ pool->end = tc_pool_first_chunk(pool); tc_invalidate_pool(pool); return; } if (unlikely(pool->object_count == 0)) { /* * we mark the freed memory with where we called the free * from. This means on a double free error we can report where * the first free came from */ pool_tc->name = location; if (pool_tc->flags & TALLOC_FLAG_POOLMEM) { _talloc_free_poolmem(pool_tc, location); } else { /* * The talloc_memlimit_update_on_free() * call takes into account the * prefix TP_HDR_SIZE allocated before * the pool talloc_chunk. */ talloc_memlimit_update_on_free(pool_tc); TC_INVALIDATE_FULL_CHUNK(pool_tc); free(pool); } return; } if (pool->end == next_tc) { /* * if pool->pool still points to end of * 'tc' (which is stored in the 'next_tc' variable), * we can reclaim the memory of 'tc'. */ pool->end = tc; return; } /* * Do nothing. The memory is just "wasted", waiting for the pool * itself to be freed. */ } static inline void _talloc_free_children_internal(struct talloc_chunk *tc, void *ptr, const char *location); /* internal talloc_free call */ static inline int _talloc_free_internal(void *ptr, const char *location) { struct talloc_chunk *tc; void *ptr_to_free; if (unlikely(ptr == NULL)) { return -1; } /* possibly initialised the talloc fill value */ if (unlikely(!talloc_fill.initialised)) { const char *fill = getenv(TALLOC_FILL_ENV); if (fill != NULL) { talloc_fill.enabled = true; talloc_fill.fill_value = strtoul(fill, NULL, 0); } talloc_fill.initialised = true; } tc = talloc_chunk_from_ptr(ptr); if (unlikely(tc->refs)) { int is_child; /* check if this is a reference from a child or * grandchild back to it's parent or grandparent * * in that case we need to remove the reference and * call another instance of talloc_free() on the current * pointer. */ is_child = talloc_is_parent(tc->refs, ptr); _talloc_free_internal(tc->refs, location); if (is_child) { return _talloc_free_internal(ptr, location); } return -1; } if (unlikely(tc->flags & TALLOC_FLAG_LOOP)) { /* we have a free loop - stop looping */ return 0; } if (unlikely(tc->destructor)) { talloc_destructor_t d = tc->destructor; if (d == (talloc_destructor_t)-1) { return -1; } tc->destructor = (talloc_destructor_t)-1; if (d(ptr) == -1) { tc->destructor = d; return -1; } tc->destructor = NULL; } if (tc->parent) { _TLIST_REMOVE(tc->parent->child, tc); if (tc->parent->child) { tc->parent->child->parent = tc->parent; } } else { if (tc->prev) tc->prev->next = tc->next; if (tc->next) tc->next->prev = tc->prev; tc->prev = tc->next = NULL; } tc->flags |= TALLOC_FLAG_LOOP; _talloc_free_children_internal(tc, ptr, location); tc->flags |= TALLOC_FLAG_FREE; /* we mark the freed memory with where we called the free * from. This means on a double free error we can report where * the first free came from */ tc->name = location; if (tc->flags & TALLOC_FLAG_POOL) { struct talloc_pool_hdr *pool; pool = talloc_pool_from_chunk(tc); if (unlikely(pool->object_count == 0)) { talloc_abort("Pool object count zero!"); return 0; } pool->object_count--; if (likely(pool->object_count != 0)) { return 0; } /* * With object_count==0, a pool becomes a normal piece of * memory to free. If it's allocated inside a pool, it needs * to be freed as poolmem, else it needs to be just freed. */ ptr_to_free = pool; } else { ptr_to_free = tc; } if (tc->flags & TALLOC_FLAG_POOLMEM) { _talloc_free_poolmem(tc, location); return 0; } talloc_memlimit_update_on_free(tc); TC_INVALIDATE_FULL_CHUNK(tc); free(ptr_to_free); return 0; } static size_t _talloc_total_limit_size(const void *ptr, struct talloc_memlimit *old_limit, struct talloc_memlimit *new_limit); /* move a lump of memory from one talloc context to another return the ptr on success, or NULL if it could not be transferred. passing NULL as ptr will always return NULL with no side effects. */ static void *_talloc_steal_internal(const void *new_ctx, const void *ptr) { struct talloc_chunk *tc, *new_tc; size_t ctx_size = 0; if (unlikely(!ptr)) { return NULL; } if (unlikely(new_ctx == NULL)) { new_ctx = null_context; } tc = talloc_chunk_from_ptr(ptr); if (tc->limit != NULL) { ctx_size = _talloc_total_limit_size(ptr, NULL, NULL); /* Decrement the memory limit from the source .. */ talloc_memlimit_shrink(tc->limit->upper, ctx_size); if (tc->limit->parent == tc) { tc->limit->upper = NULL; } else { tc->limit = NULL; } } if (unlikely(new_ctx == NULL)) { if (tc->parent) { _TLIST_REMOVE(tc->parent->child, tc); if (tc->parent->child) { tc->parent->child->parent = tc->parent; } } else { if (tc->prev) tc->prev->next = tc->next; if (tc->next) tc->next->prev = tc->prev; } tc->parent = tc->next = tc->prev = NULL; return discard_const_p(void, ptr); } new_tc = talloc_chunk_from_ptr(new_ctx); if (unlikely(tc == new_tc || tc->parent == new_tc)) { return discard_const_p(void, ptr); } if (tc->parent) { _TLIST_REMOVE(tc->parent->child, tc); if (tc->parent->child) { tc->parent->child->parent = tc->parent; } } else { if (tc->prev) tc->prev->next = tc->next; if (tc->next) tc->next->prev = tc->prev; tc->prev = tc->next = NULL; } tc->parent = new_tc; if (new_tc->child) new_tc->child->parent = NULL; _TLIST_ADD(new_tc->child, tc); if (tc->limit || new_tc->limit) { ctx_size = _talloc_total_limit_size(ptr, tc->limit, new_tc->limit); /* .. and increment it in the destination. */ if (new_tc->limit) { talloc_memlimit_grow(new_tc->limit, ctx_size); } } return discard_const_p(void, ptr); } /* move a lump of memory from one talloc context to another return the ptr on success, or NULL if it could not be transferred. passing NULL as ptr will always return NULL with no side effects. */ _PUBLIC_ void *_talloc_steal_loc(const void *new_ctx, const void *ptr, const char *location) { struct talloc_chunk *tc; if (unlikely(ptr == NULL)) { return NULL; } tc = talloc_chunk_from_ptr(ptr); if (unlikely(tc->refs != NULL) && talloc_parent(ptr) != new_ctx) { struct talloc_reference_handle *h; talloc_log("WARNING: talloc_steal with references at %s\n", location); for (h=tc->refs; h; h=h->next) { talloc_log("\treference at %s\n", h->location); } } #if 0 /* this test is probably too expensive to have on in the normal build, but it useful for debugging */ if (talloc_is_parent(new_ctx, ptr)) { talloc_log("WARNING: stealing into talloc child at %s\n", location); } #endif return _talloc_steal_internal(new_ctx, ptr); } /* this is like a talloc_steal(), but you must supply the old parent. This resolves the ambiguity in a talloc_steal() which is called on a context that has more than one parent (via references) The old parent can be either a reference or a parent */ _PUBLIC_ void *talloc_reparent(const void *old_parent, const void *new_parent, const void *ptr) { struct talloc_chunk *tc; struct talloc_reference_handle *h; if (unlikely(ptr == NULL)) { return NULL; } if (old_parent == talloc_parent(ptr)) { return _talloc_steal_internal(new_parent, ptr); } tc = talloc_chunk_from_ptr(ptr); for (h=tc->refs;h;h=h->next) { if (talloc_parent(h) == old_parent) { if (_talloc_steal_internal(new_parent, h) != h) { return NULL; } return discard_const_p(void, ptr); } } /* it wasn't a parent */ return NULL; } /* remove a secondary reference to a pointer. This undo's what talloc_reference() has done. The context and pointer arguments must match those given to a talloc_reference() */ static inline int talloc_unreference(const void *context, const void *ptr) { struct talloc_chunk *tc = talloc_chunk_from_ptr(ptr); struct talloc_reference_handle *h; if (unlikely(context == NULL)) { context = null_context; } for (h=tc->refs;h;h=h->next) { struct talloc_chunk *p = talloc_parent_chunk(h); if (p == NULL) { if (context == NULL) break; } else if (TC_PTR_FROM_CHUNK(p) == context) { break; } } if (h == NULL) { return -1; } return _talloc_free_internal(h, __location__); } /* remove a specific parent context from a pointer. This is a more controlled variant of talloc_free() */ _PUBLIC_ int talloc_unlink(const void *context, void *ptr) { struct talloc_chunk *tc_p, *new_p, *tc_c; void *new_parent; if (ptr == NULL) { return -1; } if (context == NULL) { context = null_context; } if (talloc_unreference(context, ptr) == 0) { return 0; } if (context != NULL) { tc_c = talloc_chunk_from_ptr(context); } else { tc_c = NULL; } if (tc_c != talloc_parent_chunk(ptr)) { return -1; } tc_p = talloc_chunk_from_ptr(ptr); if (tc_p->refs == NULL) { return _talloc_free_internal(ptr, __location__); } new_p = talloc_parent_chunk(tc_p->refs); if (new_p) { new_parent = TC_PTR_FROM_CHUNK(new_p); } else { new_parent = NULL; } if (talloc_unreference(new_parent, ptr) != 0) { return -1; } _talloc_steal_internal(new_parent, ptr); return 0; } /* add a name to an existing pointer - va_list version */ static inline const char *talloc_set_name_v(const void *ptr, const char *fmt, va_list ap) PRINTF_ATTRIBUTE(2,0); static inline const char *talloc_set_name_v(const void *ptr, const char *fmt, va_list ap) { struct talloc_chunk *tc = talloc_chunk_from_ptr(ptr); tc->name = talloc_vasprintf(ptr, fmt, ap); if (likely(tc->name)) { _talloc_set_name_const(tc->name, ".name"); } return tc->name; } /* add a name to an existing pointer */ _PUBLIC_ const char *talloc_set_name(const void *ptr, const char *fmt, ...) { const char *name; va_list ap; va_start(ap, fmt); name = talloc_set_name_v(ptr, fmt, ap); va_end(ap); return name; } /* create a named talloc pointer. Any talloc pointer can be named, and talloc_named() operates just like talloc() except that it allows you to name the pointer. */ _PUBLIC_ void *talloc_named(const void *context, size_t size, const char *fmt, ...) { va_list ap; void *ptr; const char *name; ptr = __talloc(context, size); if (unlikely(ptr == NULL)) return NULL; va_start(ap, fmt); name = talloc_set_name_v(ptr, fmt, ap); va_end(ap); if (unlikely(name == NULL)) { _talloc_free_internal(ptr, __location__); return NULL; } return ptr; } /* return the name of a talloc ptr, or "UNNAMED" */ _PUBLIC_ const char *talloc_get_name(const void *ptr) { struct talloc_chunk *tc = talloc_chunk_from_ptr(ptr); if (unlikely(tc->name == TALLOC_MAGIC_REFERENCE)) { return ".reference"; } if (likely(tc->name)) { return tc->name; } return "UNNAMED"; } /* check if a pointer has the given name. If it does, return the pointer, otherwise return NULL */ _PUBLIC_ void *talloc_check_name(const void *ptr, const char *name) { const char *pname; if (unlikely(ptr == NULL)) return NULL; pname = talloc_get_name(ptr); if (likely(pname == name || strcmp(pname, name) == 0)) { return discard_const_p(void, ptr); } return NULL; } static void talloc_abort_type_mismatch(const char *location, const char *name, const char *expected) { const char *reason; reason = talloc_asprintf(NULL, "%s: Type mismatch: name[%s] expected[%s]", location, name?name:"NULL", expected); if (!reason) { reason = "Type mismatch"; } talloc_abort(reason); } _PUBLIC_ void *_talloc_get_type_abort(const void *ptr, const char *name, const char *location) { const char *pname; if (unlikely(ptr == NULL)) { talloc_abort_type_mismatch(location, NULL, name); return NULL; } pname = talloc_get_name(ptr); if (likely(pname == name || strcmp(pname, name) == 0)) { return discard_const_p(void, ptr); } talloc_abort_type_mismatch(location, pname, name); return NULL; } /* this is for compatibility with older versions of talloc */ _PUBLIC_ void *talloc_init(const char *fmt, ...) { va_list ap; void *ptr; const char *name; ptr = __talloc(NULL, 0); if (unlikely(ptr == NULL)) return NULL; va_start(ap, fmt); name = talloc_set_name_v(ptr, fmt, ap); va_end(ap); if (unlikely(name == NULL)) { _talloc_free_internal(ptr, __location__); return NULL; } return ptr; } static inline void _talloc_free_children_internal(struct talloc_chunk *tc, void *ptr, const char *location) { while (tc->child) { /* we need to work out who will own an abandoned child if it cannot be freed. In priority order, the first choice is owner of any remaining reference to this pointer, the second choice is our parent, and the final choice is the null context. */ void *child = TC_PTR_FROM_CHUNK(tc->child); const void *new_parent = null_context; if (unlikely(tc->child->refs)) { struct talloc_chunk *p = talloc_parent_chunk(tc->child->refs); if (p) new_parent = TC_PTR_FROM_CHUNK(p); } if (unlikely(_talloc_free_internal(child, location) == -1)) { if (new_parent == null_context) { struct talloc_chunk *p = talloc_parent_chunk(ptr); if (p) new_parent = TC_PTR_FROM_CHUNK(p); } _talloc_steal_internal(new_parent, child); } } } /* this is a replacement for the Samba3 talloc_destroy_pool functionality. It should probably not be used in new code. It's in here to keep the talloc code consistent across Samba 3 and 4. */ _PUBLIC_ void talloc_free_children(void *ptr) { struct talloc_chunk *tc_name = NULL; struct talloc_chunk *tc; if (unlikely(ptr == NULL)) { return; } tc = talloc_chunk_from_ptr(ptr); /* we do not want to free the context name if it is a child .. */ if (likely(tc->child)) { for (tc_name = tc->child; tc_name; tc_name = tc_name->next) { if (tc->name == TC_PTR_FROM_CHUNK(tc_name)) break; } if (tc_name) { _TLIST_REMOVE(tc->child, tc_name); if (tc->child) { tc->child->parent = tc; } } } _talloc_free_children_internal(tc, ptr, __location__); /* .. so we put it back after all other children have been freed */ if (tc_name) { if (tc->child) { tc->child->parent = NULL; } tc_name->parent = tc; _TLIST_ADD(tc->child, tc_name); } } /* Allocate a bit of memory as a child of an existing pointer */ _PUBLIC_ void *_talloc(const void *context, size_t size) { return __talloc(context, size); } /* externally callable talloc_set_name_const() */ _PUBLIC_ void talloc_set_name_const(const void *ptr, const char *name) { _talloc_set_name_const(ptr, name); } /* create a named talloc pointer. Any talloc pointer can be named, and talloc_named() operates just like talloc() except that it allows you to name the pointer. */ _PUBLIC_ void *talloc_named_const(const void *context, size_t size, const char *name) { return _talloc_named_const(context, size, name); } /* free a talloc pointer. This also frees all child pointers of this pointer recursively return 0 if the memory is actually freed, otherwise -1. The memory will not be freed if the ref_count is > 1 or the destructor (if any) returns non-zero */ _PUBLIC_ int _talloc_free(void *ptr, const char *location) { struct talloc_chunk *tc; if (unlikely(ptr == NULL)) { return -1; } tc = talloc_chunk_from_ptr(ptr); if (unlikely(tc->refs != NULL)) { struct talloc_reference_handle *h; if (talloc_parent(ptr) == null_context && tc->refs->next == NULL) { /* in this case we do know which parent should get this pointer, as there is really only one parent */ return talloc_unlink(null_context, ptr); } talloc_log("ERROR: talloc_free with references at %s\n", location); for (h=tc->refs; h; h=h->next) { talloc_log("\treference at %s\n", h->location); } return -1; } return _talloc_free_internal(ptr, location); } /* A talloc version of realloc. The context argument is only used if ptr is NULL */ _PUBLIC_ void *_talloc_realloc(const void *context, void *ptr, size_t size, const char *name) { struct talloc_chunk *tc; void *new_ptr; bool malloced = false; struct talloc_pool_hdr *pool_hdr = NULL; size_t old_size = 0; size_t new_size = 0; /* size zero is equivalent to free() */ if (unlikely(size == 0)) { talloc_unlink(context, ptr); return NULL; } if (unlikely(size >= MAX_TALLOC_SIZE)) { return NULL; } /* realloc(NULL) is equivalent to malloc() */ if (ptr == NULL) { return _talloc_named_const(context, size, name); } tc = talloc_chunk_from_ptr(ptr); /* don't allow realloc on referenced pointers */ if (unlikely(tc->refs)) { return NULL; } /* don't let anybody try to realloc a talloc_pool */ if (unlikely(tc->flags & TALLOC_FLAG_POOL)) { return NULL; } if (tc->limit && (size > tc->size)) { if (!talloc_memlimit_check(tc->limit, (size - tc->size))) { errno = ENOMEM; return NULL; } } /* handle realloc inside a talloc_pool */ if (unlikely(tc->flags & TALLOC_FLAG_POOLMEM)) { pool_hdr = tc->pool; } #if (ALWAYS_REALLOC == 0) /* don't shrink if we have less than 1k to gain */ if (size < tc->size && tc->limit == NULL) { if (pool_hdr) { void *next_tc = tc_next_chunk(tc); TC_INVALIDATE_SHRINK_CHUNK(tc, size); tc->size = size; if (next_tc == pool_hdr->end) { /* note: tc->size has changed, so this works */ pool_hdr->end = tc_next_chunk(tc); } return ptr; } else if ((tc->size - size) < 1024) { /* * if we call TC_INVALIDATE_SHRINK_CHUNK() here * we would need to call TC_UNDEFINE_GROW_CHUNK() * after each realloc call, which slows down * testing a lot :-(. * * That is why we only mark memory as undefined here. */ TC_UNDEFINE_SHRINK_CHUNK(tc, size); /* do not shrink if we have less than 1k to gain */ tc->size = size; return ptr; } } else if (tc->size == size) { /* * do not change the pointer if it is exactly * the same size. */ return ptr; } #endif /* by resetting magic we catch users of the old memory */ tc->flags |= TALLOC_FLAG_FREE; #if ALWAYS_REALLOC if (pool_hdr) { new_ptr = talloc_alloc_pool(tc, size + TC_HDR_SIZE, 0); pool_hdr->object_count--; if (new_ptr == NULL) { new_ptr = malloc(TC_HDR_SIZE+size); malloced = true; new_size = size; } if (new_ptr) { memcpy(new_ptr, tc, MIN(tc->size,size) + TC_HDR_SIZE); TC_INVALIDATE_FULL_CHUNK(tc); } } else { /* We're doing malloc then free here, so record the difference. */ old_size = tc->size; new_size = size; new_ptr = malloc(size + TC_HDR_SIZE); if (new_ptr) { memcpy(new_ptr, tc, MIN(tc->size, size) + TC_HDR_SIZE); free(tc); } } #else if (pool_hdr) { struct talloc_chunk *pool_tc; void *next_tc = tc_next_chunk(tc); size_t old_chunk_size = TC_ALIGN16(TC_HDR_SIZE + tc->size); size_t new_chunk_size = TC_ALIGN16(TC_HDR_SIZE + size); size_t space_needed; size_t space_left; unsigned int chunk_count = pool_hdr->object_count; pool_tc = talloc_chunk_from_pool(pool_hdr); if (!(pool_tc->flags & TALLOC_FLAG_FREE)) { chunk_count -= 1; } if (chunk_count == 1) { /* * optimize for the case where 'tc' is the only * chunk in the pool. */ char *start = tc_pool_first_chunk(pool_hdr); space_needed = new_chunk_size; space_left = (char *)tc_pool_end(pool_hdr) - start; if (space_left >= space_needed) { size_t old_used = TC_HDR_SIZE + tc->size; size_t new_used = TC_HDR_SIZE + size; new_ptr = start; #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_UNDEFINED) { /* * The area from * start -> tc may have * been freed and thus been marked as * VALGRIND_MEM_NOACCESS. Set it to * VALGRIND_MEM_UNDEFINED so we can * copy into it without valgrind errors. * We can't just mark * new_ptr -> new_ptr + old_used * as this may overlap on top of tc, * (which is why we use memmove, not * memcpy below) hence the MIN. */ size_t undef_len = MIN((((char *)tc) - ((char *)new_ptr)),old_used); VALGRIND_MAKE_MEM_UNDEFINED(new_ptr, undef_len); } #endif memmove(new_ptr, tc, old_used); tc = (struct talloc_chunk *)new_ptr; TC_UNDEFINE_GROW_CHUNK(tc, size); /* * first we do not align the pool pointer * because we want to invalidate the padding * too. */ pool_hdr->end = new_used + (char *)new_ptr; tc_invalidate_pool(pool_hdr); /* now the aligned pointer */ pool_hdr->end = new_chunk_size + (char *)new_ptr; goto got_new_ptr; } next_tc = NULL; } if (new_chunk_size == old_chunk_size) { TC_UNDEFINE_GROW_CHUNK(tc, size); tc->flags &= ~TALLOC_FLAG_FREE; tc->size = size; return ptr; } if (next_tc == pool_hdr->end) { /* * optimize for the case where 'tc' is the last * chunk in the pool. */ space_needed = new_chunk_size - old_chunk_size; space_left = tc_pool_space_left(pool_hdr); if (space_left >= space_needed) { TC_UNDEFINE_GROW_CHUNK(tc, size); tc->flags &= ~TALLOC_FLAG_FREE; tc->size = size; pool_hdr->end = tc_next_chunk(tc); return ptr; } } new_ptr = talloc_alloc_pool(tc, size + TC_HDR_SIZE, 0); if (new_ptr == NULL) { new_ptr = malloc(TC_HDR_SIZE+size); malloced = true; new_size = size; } if (new_ptr) { memcpy(new_ptr, tc, MIN(tc->size,size) + TC_HDR_SIZE); _talloc_free_poolmem(tc, __location__ "_talloc_realloc"); } } else { /* We're doing realloc here, so record the difference. */ old_size = tc->size; new_size = size; new_ptr = realloc(tc, size + TC_HDR_SIZE); } got_new_ptr: #endif if (unlikely(!new_ptr)) { tc->flags &= ~TALLOC_FLAG_FREE; return NULL; } tc = (struct talloc_chunk *)new_ptr; tc->flags &= ~TALLOC_FLAG_FREE; if (malloced) { tc->flags &= ~TALLOC_FLAG_POOLMEM; } if (tc->parent) { tc->parent->child = tc; } if (tc->child) { tc->child->parent = tc; } if (tc->prev) { tc->prev->next = tc; } if (tc->next) { tc->next->prev = tc; } if (new_size > old_size) { talloc_memlimit_grow(tc->limit, new_size - old_size); } else if (new_size < old_size) { talloc_memlimit_shrink(tc->limit, old_size - new_size); } tc->size = size; _talloc_set_name_const(TC_PTR_FROM_CHUNK(tc), name); return TC_PTR_FROM_CHUNK(tc); } /* a wrapper around talloc_steal() for situations where you are moving a pointer between two structures, and want the old pointer to be set to NULL */ _PUBLIC_ void *_talloc_move(const void *new_ctx, const void *_pptr) { const void **pptr = discard_const_p(const void *,_pptr); void *ret = talloc_steal(new_ctx, discard_const_p(void, *pptr)); (*pptr) = NULL; return ret; } enum talloc_mem_count_type { TOTAL_MEM_SIZE, TOTAL_MEM_BLOCKS, TOTAL_MEM_LIMIT, }; static size_t _talloc_total_mem_internal(const void *ptr, enum talloc_mem_count_type type, struct talloc_memlimit *old_limit, struct talloc_memlimit *new_limit) { size_t total = 0; struct talloc_chunk *c, *tc; if (ptr == NULL) { ptr = null_context; } if (ptr == NULL) { return 0; } tc = talloc_chunk_from_ptr(ptr); if (old_limit || new_limit) { if (tc->limit && tc->limit->upper == old_limit) { tc->limit->upper = new_limit; } } /* optimize in the memlimits case */ if (type == TOTAL_MEM_LIMIT && tc->limit != NULL && tc->limit != old_limit && tc->limit->parent == tc) { return tc->limit->cur_size; } if (tc->flags & TALLOC_FLAG_LOOP) { return 0; } tc->flags |= TALLOC_FLAG_LOOP; if (old_limit || new_limit) { if (old_limit == tc->limit) { tc->limit = new_limit; } } switch (type) { case TOTAL_MEM_SIZE: if (likely(tc->name != TALLOC_MAGIC_REFERENCE)) { total = tc->size; } break; case TOTAL_MEM_BLOCKS: total++; break; case TOTAL_MEM_LIMIT: if (likely(tc->name != TALLOC_MAGIC_REFERENCE)) { /* * Don't count memory allocated from a pool * when calculating limits. Only count the * pool itself. */ if (!(tc->flags & TALLOC_FLAG_POOLMEM)) { if (tc->flags & TALLOC_FLAG_POOL) { /* * If this is a pool, the allocated * size is in the pool header, and * remember to add in the prefix * length. */ struct talloc_pool_hdr *pool_hdr = talloc_pool_from_chunk(tc); total = pool_hdr->poolsize + TC_HDR_SIZE + TP_HDR_SIZE; } else { total = tc->size + TC_HDR_SIZE; } } } break; } for (c = tc->child; c; c = c->next) { total += _talloc_total_mem_internal(TC_PTR_FROM_CHUNK(c), type, old_limit, new_limit); } tc->flags &= ~TALLOC_FLAG_LOOP; return total; } /* return the total size of a talloc pool (subtree) */ _PUBLIC_ size_t talloc_total_size(const void *ptr) { return _talloc_total_mem_internal(ptr, TOTAL_MEM_SIZE, NULL, NULL); } /* return the total number of blocks in a talloc pool (subtree) */ _PUBLIC_ size_t talloc_total_blocks(const void *ptr) { return _talloc_total_mem_internal(ptr, TOTAL_MEM_BLOCKS, NULL, NULL); } /* return the number of external references to a pointer */ _PUBLIC_ size_t talloc_reference_count(const void *ptr) { struct talloc_chunk *tc = talloc_chunk_from_ptr(ptr); struct talloc_reference_handle *h; size_t ret = 0; for (h=tc->refs;h;h=h->next) { ret++; } return ret; } /* report on memory usage by all children of a pointer, giving a full tree view */ _PUBLIC_ void talloc_report_depth_cb(const void *ptr, int depth, int max_depth, void (*callback)(const void *ptr, int depth, int max_depth, int is_ref, void *private_data), void *private_data) { struct talloc_chunk *c, *tc; if (ptr == NULL) { ptr = null_context; } if (ptr == NULL) return; tc = talloc_chunk_from_ptr(ptr); if (tc->flags & TALLOC_FLAG_LOOP) { return; } callback(ptr, depth, max_depth, 0, private_data); if (max_depth >= 0 && depth >= max_depth) { return; } tc->flags |= TALLOC_FLAG_LOOP; for (c=tc->child;c;c=c->next) { if (c->name == TALLOC_MAGIC_REFERENCE) { struct talloc_reference_handle *h = (struct talloc_reference_handle *)TC_PTR_FROM_CHUNK(c); callback(h->ptr, depth + 1, max_depth, 1, private_data); } else { talloc_report_depth_cb(TC_PTR_FROM_CHUNK(c), depth + 1, max_depth, callback, private_data); } } tc->flags &= ~TALLOC_FLAG_LOOP; } static void talloc_report_depth_FILE_helper(const void *ptr, int depth, int max_depth, int is_ref, void *_f) { const char *name = talloc_get_name(ptr); struct talloc_chunk *tc; FILE *f = (FILE *)_f; if (is_ref) { fprintf(f, "%*sreference to: %s\n", depth*4, "", name); return; } tc = talloc_chunk_from_ptr(ptr); if (tc->limit && tc->limit->parent == tc) { fprintf(f, "%*s%-30s is a memlimit context" " (max_size = %lu bytes, cur_size = %lu bytes)\n", depth*4, "", name, (unsigned long)tc->limit->max_size, (unsigned long)tc->limit->cur_size); } if (depth == 0) { fprintf(f,"%stalloc report on '%s' (total %6lu bytes in %3lu blocks)\n", (max_depth < 0 ? "full " :""), name, (unsigned long)talloc_total_size(ptr), (unsigned long)talloc_total_blocks(ptr)); return; } fprintf(f, "%*s%-30s contains %6lu bytes in %3lu blocks (ref %d) %p\n", depth*4, "", name, (unsigned long)talloc_total_size(ptr), (unsigned long)talloc_total_blocks(ptr), (int)talloc_reference_count(ptr), ptr); #if 0 fprintf(f, "content: "); if (talloc_total_size(ptr)) { int tot = talloc_total_size(ptr); int i; for (i = 0; i < tot; i++) { if ((((char *)ptr)[i] > 31) && (((char *)ptr)[i] < 126)) { fprintf(f, "%c", ((char *)ptr)[i]); } else { fprintf(f, "~%02x", ((char *)ptr)[i]); } } } fprintf(f, "\n"); #endif } /* report on memory usage by all children of a pointer, giving a full tree view */ _PUBLIC_ void talloc_report_depth_file(const void *ptr, int depth, int max_depth, FILE *f) { if (f) { talloc_report_depth_cb(ptr, depth, max_depth, talloc_report_depth_FILE_helper, f); fflush(f); } } /* report on memory usage by all children of a pointer, giving a full tree view */ _PUBLIC_ void talloc_report_full(const void *ptr, FILE *f) { talloc_report_depth_file(ptr, 0, -1, f); } /* report on memory usage by all children of a pointer */ _PUBLIC_ void talloc_report(const void *ptr, FILE *f) { talloc_report_depth_file(ptr, 0, 1, f); } /* report on any memory hanging off the null context */ static void talloc_report_null(void) { if (talloc_total_size(null_context) != 0) { talloc_report(null_context, stderr); } } /* report on any memory hanging off the null context */ static void talloc_report_null_full(void) { if (talloc_total_size(null_context) != 0) { talloc_report_full(null_context, stderr); } } /* enable tracking of the NULL context */ _PUBLIC_ void talloc_enable_null_tracking(void) { if (null_context == NULL) { null_context = _talloc_named_const(NULL, 0, "null_context"); if (autofree_context != NULL) { talloc_reparent(NULL, null_context, autofree_context); } } } /* enable tracking of the NULL context, not moving the autofree context into the NULL context. This is needed for the talloc testsuite */ _PUBLIC_ void talloc_enable_null_tracking_no_autofree(void) { if (null_context == NULL) { null_context = _talloc_named_const(NULL, 0, "null_context"); } } /* disable tracking of the NULL context */ _PUBLIC_ void talloc_disable_null_tracking(void) { if (null_context != NULL) { /* we have to move any children onto the real NULL context */ struct talloc_chunk *tc, *tc2; tc = talloc_chunk_from_ptr(null_context); for (tc2 = tc->child; tc2; tc2=tc2->next) { if (tc2->parent == tc) tc2->parent = NULL; if (tc2->prev == tc) tc2->prev = NULL; } for (tc2 = tc->next; tc2; tc2=tc2->next) { if (tc2->parent == tc) tc2->parent = NULL; if (tc2->prev == tc) tc2->prev = NULL; } tc->child = NULL; tc->next = NULL; } talloc_free(null_context); null_context = NULL; } /* enable leak reporting on exit */ _PUBLIC_ void talloc_enable_leak_report(void) { talloc_enable_null_tracking(); atexit(talloc_report_null); } /* enable full leak reporting on exit */ _PUBLIC_ void talloc_enable_leak_report_full(void) { talloc_enable_null_tracking(); atexit(talloc_report_null_full); } /* talloc and zero memory. */ _PUBLIC_ void *_talloc_zero(const void *ctx, size_t size, const char *name) { void *p = _talloc_named_const(ctx, size, name); if (p) { memset(p, '\0', size); } return p; } /* memdup with a talloc. */ _PUBLIC_ void *_talloc_memdup(const void *t, const void *p, size_t size, const char *name) { void *newp = _talloc_named_const(t, size, name); if (likely(newp)) { memcpy(newp, p, size); } return newp; } static inline char *__talloc_strlendup(const void *t, const char *p, size_t len) { char *ret; ret = (char *)__talloc(t, len + 1); if (unlikely(!ret)) return NULL; memcpy(ret, p, len); ret[len] = 0; _talloc_set_name_const(ret, ret); return ret; } /* strdup with a talloc */ _PUBLIC_ char *talloc_strdup(const void *t, const char *p) { if (unlikely(!p)) return NULL; return __talloc_strlendup(t, p, strlen(p)); } /* strndup with a talloc */ _PUBLIC_ char *talloc_strndup(const void *t, const char *p, size_t n) { if (unlikely(!p)) return NULL; return __talloc_strlendup(t, p, strnlen(p, n)); } static inline char *__talloc_strlendup_append(char *s, size_t slen, const char *a, size_t alen) { char *ret; ret = talloc_realloc(NULL, s, char, slen + alen + 1); if (unlikely(!ret)) return NULL; /* append the string and the trailing \0 */ memcpy(&ret[slen], a, alen); ret[slen+alen] = 0; _talloc_set_name_const(ret, ret); return ret; } /* * Appends at the end of the string. */ _PUBLIC_ char *talloc_strdup_append(char *s, const char *a) { if (unlikely(!s)) { return talloc_strdup(NULL, a); } if (unlikely(!a)) { return s; } return __talloc_strlendup_append(s, strlen(s), a, strlen(a)); } /* * Appends at the end of the talloc'ed buffer, * not the end of the string. */ _PUBLIC_ char *talloc_strdup_append_buffer(char *s, const char *a) { size_t slen; if (unlikely(!s)) { return talloc_strdup(NULL, a); } if (unlikely(!a)) { return s; } slen = talloc_get_size(s); if (likely(slen > 0)) { slen--; } return __talloc_strlendup_append(s, slen, a, strlen(a)); } /* * Appends at the end of the string. */ _PUBLIC_ char *talloc_strndup_append(char *s, const char *a, size_t n) { if (unlikely(!s)) { return talloc_strndup(NULL, a, n); } if (unlikely(!a)) { return s; } return __talloc_strlendup_append(s, strlen(s), a, strnlen(a, n)); } /* * Appends at the end of the talloc'ed buffer, * not the end of the string. */ _PUBLIC_ char *talloc_strndup_append_buffer(char *s, const char *a, size_t n) { size_t slen; if (unlikely(!s)) { return talloc_strndup(NULL, a, n); } if (unlikely(!a)) { return s; } slen = talloc_get_size(s); if (likely(slen > 0)) { slen--; } return __talloc_strlendup_append(s, slen, a, strnlen(a, n)); } #ifndef HAVE_VA_COPY #ifdef HAVE___VA_COPY #define va_copy(dest, src) __va_copy(dest, src) #else #define va_copy(dest, src) (dest) = (src) #endif #endif _PUBLIC_ char *talloc_vasprintf(const void *t, const char *fmt, va_list ap) { int len; char *ret; va_list ap2; char c; /* this call looks strange, but it makes it work on older solaris boxes */ va_copy(ap2, ap); len = vsnprintf(&c, 1, fmt, ap2); va_end(ap2); if (unlikely(len < 0)) { return NULL; } ret = (char *)__talloc(t, len+1); if (unlikely(!ret)) return NULL; va_copy(ap2, ap); vsnprintf(ret, len+1, fmt, ap2); va_end(ap2); _talloc_set_name_const(ret, ret); return ret; } /* Perform string formatting, and return a pointer to newly allocated memory holding the result, inside a memory pool. */ _PUBLIC_ char *talloc_asprintf(const void *t, const char *fmt, ...) { va_list ap; char *ret; va_start(ap, fmt); ret = talloc_vasprintf(t, fmt, ap); va_end(ap); return ret; } static inline char *__talloc_vaslenprintf_append(char *s, size_t slen, const char *fmt, va_list ap) PRINTF_ATTRIBUTE(3,0); static inline char *__talloc_vaslenprintf_append(char *s, size_t slen, const char *fmt, va_list ap) { ssize_t alen; va_list ap2; char c; va_copy(ap2, ap); alen = vsnprintf(&c, 1, fmt, ap2); va_end(ap2); if (alen <= 0) { /* Either the vsnprintf failed or the format resulted in * no characters being formatted. In the former case, we * ought to return NULL, in the latter we ought to return * the original string. Most current callers of this * function expect it to never return NULL. */ return s; } s = talloc_realloc(NULL, s, char, slen + alen + 1); if (!s) return NULL; va_copy(ap2, ap); vsnprintf(s + slen, alen + 1, fmt, ap2); va_end(ap2); _talloc_set_name_const(s, s); return s; } /** * Realloc @p s to append the formatted result of @p fmt and @p ap, * and return @p s, which may have moved. Good for gradually * accumulating output into a string buffer. Appends at the end * of the string. **/ _PUBLIC_ char *talloc_vasprintf_append(char *s, const char *fmt, va_list ap) { if (unlikely(!s)) { return talloc_vasprintf(NULL, fmt, ap); } return __talloc_vaslenprintf_append(s, strlen(s), fmt, ap); } /** * Realloc @p s to append the formatted result of @p fmt and @p ap, * and return @p s, which may have moved. Always appends at the * end of the talloc'ed buffer, not the end of the string. **/ _PUBLIC_ char *talloc_vasprintf_append_buffer(char *s, const char *fmt, va_list ap) { size_t slen; if (unlikely(!s)) { return talloc_vasprintf(NULL, fmt, ap); } slen = talloc_get_size(s); if (likely(slen > 0)) { slen--; } return __talloc_vaslenprintf_append(s, slen, fmt, ap); } /* Realloc @p s to append the formatted result of @p fmt and return @p s, which may have moved. Good for gradually accumulating output into a string buffer. */ _PUBLIC_ char *talloc_asprintf_append(char *s, const char *fmt, ...) { va_list ap; va_start(ap, fmt); s = talloc_vasprintf_append(s, fmt, ap); va_end(ap); return s; } /* Realloc @p s to append the formatted result of @p fmt and return @p s, which may have moved. Good for gradually accumulating output into a buffer. */ _PUBLIC_ char *talloc_asprintf_append_buffer(char *s, const char *fmt, ...) { va_list ap; va_start(ap, fmt); s = talloc_vasprintf_append_buffer(s, fmt, ap); va_end(ap); return s; } /* alloc an array, checking for integer overflow in the array size */ _PUBLIC_ void *_talloc_array(const void *ctx, size_t el_size, unsigned count, const char *name) { if (count >= MAX_TALLOC_SIZE/el_size) { return NULL; } return _talloc_named_const(ctx, el_size * count, name); } /* alloc an zero array, checking for integer overflow in the array size */ _PUBLIC_ void *_talloc_zero_array(const void *ctx, size_t el_size, unsigned count, const char *name) { if (count >= MAX_TALLOC_SIZE/el_size) { return NULL; } return _talloc_zero(ctx, el_size * count, name); } /* realloc an array, checking for integer overflow in the array size */ _PUBLIC_ void *_talloc_realloc_array(const void *ctx, void *ptr, size_t el_size, unsigned count, const char *name) { if (count >= MAX_TALLOC_SIZE/el_size) { return NULL; } return _talloc_realloc(ctx, ptr, el_size * count, name); } /* a function version of talloc_realloc(), so it can be passed as a function pointer to libraries that want a realloc function (a realloc function encapsulates all the basic capabilities of an allocation library, which is why this is useful) */ _PUBLIC_ void *talloc_realloc_fn(const void *context, void *ptr, size_t size) { return _talloc_realloc(context, ptr, size, NULL); } static int talloc_autofree_destructor(void *ptr) { autofree_context = NULL; return 0; } static void talloc_autofree(void) { talloc_free(autofree_context); } /* return a context which will be auto-freed on exit this is useful for reducing the noise in leak reports */ _PUBLIC_ void *talloc_autofree_context(void) { if (autofree_context == NULL) { autofree_context = _talloc_named_const(NULL, 0, "autofree_context"); talloc_set_destructor(autofree_context, talloc_autofree_destructor); atexit(talloc_autofree); } return autofree_context; } _PUBLIC_ size_t talloc_get_size(const void *context) { struct talloc_chunk *tc; if (context == NULL) { context = null_context; } if (context == NULL) { return 0; } tc = talloc_chunk_from_ptr(context); return tc->size; } /* find a parent of this context that has the given name, if any */ _PUBLIC_ void *talloc_find_parent_byname(const void *context, const char *name) { struct talloc_chunk *tc; if (context == NULL) { return NULL; } tc = talloc_chunk_from_ptr(context); while (tc) { if (tc->name && strcmp(tc->name, name) == 0) { return TC_PTR_FROM_CHUNK(tc); } while (tc && tc->prev) tc = tc->prev; if (tc) { tc = tc->parent; } } return NULL; } /* show the parentage of a context */ _PUBLIC_ void talloc_show_parents(const void *context, FILE *file) { struct talloc_chunk *tc; if (context == NULL) { fprintf(file, "talloc no parents for NULL\n"); return; } tc = talloc_chunk_from_ptr(context); fprintf(file, "talloc parents of '%s'\n", talloc_get_name(context)); while (tc) { fprintf(file, "\t'%s'\n", talloc_get_name(TC_PTR_FROM_CHUNK(tc))); while (tc && tc->prev) tc = tc->prev; if (tc) { tc = tc->parent; } } fflush(file); } /* return 1 if ptr is a parent of context */ static int _talloc_is_parent(const void *context, const void *ptr, int depth) { struct talloc_chunk *tc; if (context == NULL) { return 0; } tc = talloc_chunk_from_ptr(context); while (tc && depth > 0) { if (TC_PTR_FROM_CHUNK(tc) == ptr) return 1; while (tc && tc->prev) tc = tc->prev; if (tc) { tc = tc->parent; depth--; } } return 0; } /* return 1 if ptr is a parent of context */ _PUBLIC_ int talloc_is_parent(const void *context, const void *ptr) { return _talloc_is_parent(context, ptr, TALLOC_MAX_DEPTH); } /* return the total size of memory used by this context and all children */ static size_t _talloc_total_limit_size(const void *ptr, struct talloc_memlimit *old_limit, struct talloc_memlimit *new_limit) { return _talloc_total_mem_internal(ptr, TOTAL_MEM_LIMIT, old_limit, new_limit); } static bool talloc_memlimit_check(struct talloc_memlimit *limit, size_t size) { struct talloc_memlimit *l; for (l = limit; l != NULL; l = l->upper) { if (l->max_size != 0 && ((l->max_size <= l->cur_size) || (l->max_size - l->cur_size < size))) { return false; } } return true; } /* Update memory limits when freeing a talloc_chunk. */ static void talloc_memlimit_update_on_free(struct talloc_chunk *tc) { size_t limit_shrink_size; if (!tc->limit) { return; } /* * Pool entries don't count. Only the pools * themselves are counted as part of the memory * limits. Note that this also takes care of * nested pools which have both flags * TALLOC_FLAG_POOLMEM|TALLOC_FLAG_POOL set. */ if (tc->flags & TALLOC_FLAG_POOLMEM) { return; } /* * If we are part of a memory limited context hierarchy * we need to subtract the memory used from the counters */ limit_shrink_size = tc->size+TC_HDR_SIZE; /* * If we're deallocating a pool, take into * account the prefix size added for the pool. */ if (tc->flags & TALLOC_FLAG_POOL) { limit_shrink_size += TP_HDR_SIZE; } talloc_memlimit_shrink(tc->limit, limit_shrink_size); if (tc->limit->parent == tc) { free(tc->limit); } tc->limit = NULL; } /* Increase memory limit accounting after a malloc/realloc. */ static void talloc_memlimit_grow(struct talloc_memlimit *limit, size_t size) { struct talloc_memlimit *l; for (l = limit; l != NULL; l = l->upper) { size_t new_cur_size = l->cur_size + size; if (new_cur_size < l->cur_size) { talloc_abort("logic error in talloc_memlimit_grow\n"); return; } l->cur_size = new_cur_size; } } /* Decrease memory limit accounting after a free/realloc. */ static void talloc_memlimit_shrink(struct talloc_memlimit *limit, size_t size) { struct talloc_memlimit *l; for (l = limit; l != NULL; l = l->upper) { if (l->cur_size < size) { talloc_abort("logic error in talloc_memlimit_shrink\n"); return; } l->cur_size = l->cur_size - size; } } _PUBLIC_ int talloc_set_memlimit(const void *ctx, size_t max_size) { struct talloc_chunk *tc = talloc_chunk_from_ptr(ctx); struct talloc_memlimit *orig_limit; struct talloc_memlimit *limit = NULL; if (tc->limit && tc->limit->parent == tc) { tc->limit->max_size = max_size; return 0; } orig_limit = tc->limit; limit = malloc(sizeof(struct talloc_memlimit)); if (limit == NULL) { return 1; } limit->parent = tc; limit->max_size = max_size; limit->cur_size = _talloc_total_limit_size(ctx, tc->limit, limit); if (orig_limit) { limit->upper = orig_limit; } else { limit->upper = NULL; } return 0; } pytsk-20190507/talloc/talloc.h000066400000000000000000001730271346423473500160670ustar00rootroot00000000000000#ifndef _TALLOC_H_ #define _TALLOC_H_ /* Unix SMB/CIFS implementation. Samba temporary memory allocation functions Copyright (C) Andrew Tridgell 2004-2005 Copyright (C) Stefan Metzmacher 2006 ** NOTE! The following LGPL license applies to the talloc ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #include #include #include #ifdef __cplusplus extern "C" { #endif /** * @defgroup talloc The talloc API * * talloc is a hierarchical, reference counted memory pool system with * destructors. It is the core memory allocator used in Samba. * * @{ */ #define TALLOC_VERSION_MAJOR 2 #define TALLOC_VERSION_MINOR 0 int talloc_version_major(void); int talloc_version_minor(void); /** * @brief Define a talloc parent type * * As talloc is a hierarchial memory allocator, every talloc chunk is a * potential parent to other talloc chunks. So defining a separate type for a * talloc chunk is not strictly necessary. TALLOC_CTX is defined nevertheless, * as it provides an indicator for function arguments. You will frequently * write code like * * @code * struct foo *foo_create(TALLOC_CTX *mem_ctx) * { * struct foo *result; * result = talloc(mem_ctx, struct foo); * if (result == NULL) return NULL; * ... initialize foo ... * return result; * } * @endcode * * In this type of allocating functions it is handy to have a general * TALLOC_CTX type to indicate which parent to put allocated structures on. */ typedef void TALLOC_CTX; /* this uses a little trick to allow __LINE__ to be stringified */ #ifndef __location__ #define __TALLOC_STRING_LINE1__(s) #s #define __TALLOC_STRING_LINE2__(s) __TALLOC_STRING_LINE1__(s) #define __TALLOC_STRING_LINE3__ __TALLOC_STRING_LINE2__(__LINE__) #define __location__ __FILE__ ":" __TALLOC_STRING_LINE3__ #endif #ifndef TALLOC_DEPRECATED #define TALLOC_DEPRECATED 0 #endif #ifndef PRINTF_ATTRIBUTE #if (__GNUC__ >= 3) /** Use gcc attribute to check printf fns. a1 is the 1-based index of * the parameter containing the format, and a2 the index of the first * argument. Note that some gcc 2.x versions don't handle this * properly **/ #define PRINTF_ATTRIBUTE(a1, a2) __attribute__ ((format (__printf__, a1, a2))) #else #define PRINTF_ATTRIBUTE(a1, a2) #endif #endif #ifdef DOXYGEN /** * @brief Create a new talloc context. * * The talloc() macro is the core of the talloc library. It takes a memory * context and a type, and returns a pointer to a new area of memory of the * given type. * * The returned pointer is itself a talloc context, so you can use it as the * context argument to more calls to talloc if you wish. * * The returned pointer is a "child" of the supplied context. This means that if * you talloc_free() the context then the new child disappears as well. * Alternatively you can free just the child. * * @param[in] ctx A talloc context to create a new reference on or NULL to * create a new top level context. * * @param[in] type The type of memory to allocate. * * @return A type casted talloc context or NULL on error. * * @code * unsigned int *a, *b; * * a = talloc(NULL, unsigned int); * b = talloc(a, unsigned int); * @endcode * * @see talloc_zero * @see talloc_array * @see talloc_steal * @see talloc_free */ void *talloc(const void *ctx, #type); #else #define talloc(ctx, type) (type *)talloc_named_const(ctx, sizeof(type), #type) void *_talloc(const void *context, size_t size); #endif /** * @brief Create a new top level talloc context. * * This function creates a zero length named talloc context as a top level * context. It is equivalent to: * * @code * talloc_named(NULL, 0, fmt, ...); * @endcode * @param[in] fmt Format string for the name. * * @param[in] ... Additional printf-style arguments. * * @return The allocated memory chunk, NULL on error. * * @see talloc_named() */ void *talloc_init(const char *fmt, ...) PRINTF_ATTRIBUTE(1,2); #ifdef DOXYGEN /** * @brief Free a chunk of talloc memory. * * The talloc_free() function frees a piece of talloc memory, and all its * children. You can call talloc_free() on any pointer returned by * talloc(). * * The return value of talloc_free() indicates success or failure, with 0 * returned for success and -1 for failure. A possible failure condition * is if the pointer had a destructor attached to it and the destructor * returned -1. See talloc_set_destructor() for details on * destructors. Likewise, if "ptr" is NULL, then the function will make * no modifications and return -1. * * From version 2.0 and onwards, as a special case, talloc_free() is * refused on pointers that have more than one parent associated, as talloc * would have no way of knowing which parent should be removed. This is * different from older versions in the sense that always the reference to * the most recently established parent has been destroyed. Hence to free a * pointer that has more than one parent please use talloc_unlink(). * * To help you find problems in your code caused by this behaviour, if * you do try and free a pointer with more than one parent then the * talloc logging function will be called to give output like this: * * @code * ERROR: talloc_free with references at some_dir/source/foo.c:123 * reference at some_dir/source/other.c:325 * reference at some_dir/source/third.c:121 * @endcode * * Please see the documentation for talloc_set_log_fn() and * talloc_set_log_stderr() for more information on talloc logging * functions. * * If TALLOC_FREE_FILL environment variable is set, * the memory occupied by the context is filled with the value of this variable. * The value should be a numeric representation of the character you want to * use. * * talloc_free() operates recursively on its children. * * @param[in] ptr The chunk to be freed. * * @return Returns 0 on success and -1 on error. A possible * failure condition is if the pointer had a destructor * attached to it and the destructor returned -1. Likewise, * if "ptr" is NULL, then the function will make no * modifications and returns -1. * * Example: * @code * unsigned int *a, *b; * a = talloc(NULL, unsigned int); * b = talloc(a, unsigned int); * * talloc_free(a); // Frees a and b * @endcode * * @see talloc_set_destructor() * @see talloc_unlink() */ int talloc_free(void *ptr); #else #define talloc_free(ctx) _talloc_free(ctx, __location__) int _talloc_free(void *ptr, const char *location); #endif /** * @brief Free a talloc chunk's children. * * The function walks along the list of all children of a talloc context and * talloc_free()s only the children, not the context itself. * * A NULL argument is handled as no-op. * * @param[in] ptr The chunk that you want to free the children of * (NULL is allowed too) */ void talloc_free_children(void *ptr); #ifdef DOXYGEN /** * @brief Assign a destructor function to be called when a chunk is freed. * * The function talloc_set_destructor() sets the "destructor" for the pointer * "ptr". A destructor is a function that is called when the memory used by a * pointer is about to be released. The destructor receives the pointer as an * argument, and should return 0 for success and -1 for failure. * * The destructor can do anything it wants to, including freeing other pieces * of memory. A common use for destructors is to clean up operating system * resources (such as open file descriptors) contained in the structure the * destructor is placed on. * * You can only place one destructor on a pointer. If you need more than one * destructor then you can create a zero-length child of the pointer and place * an additional destructor on that. * * To remove a destructor call talloc_set_destructor() with NULL for the * destructor. * * If your destructor attempts to talloc_free() the pointer that it is the * destructor for then talloc_free() will return -1 and the free will be * ignored. This would be a pointless operation anyway, as the destructor is * only called when the memory is just about to go away. * * @param[in] ptr The talloc chunk to add a destructor to. * * @param[in] destructor The destructor function to be called. NULL to remove * it. * * Example: * @code * static int destroy_fd(int *fd) { * close(*fd); * return 0; * } * * int *open_file(const char *filename) { * int *fd = talloc(NULL, int); * *fd = open(filename, O_RDONLY); * if (*fd < 0) { * talloc_free(fd); * return NULL; * } * // Whenever they free this, we close the file. * talloc_set_destructor(fd, destroy_fd); * return fd; * } * @endcode * * @see talloc() * @see talloc_free() */ void talloc_set_destructor(const void *ptr, int (*destructor)(void *)); /** * @brief Change a talloc chunk's parent. * * The talloc_steal() function changes the parent context of a talloc * pointer. It is typically used when the context that the pointer is * currently a child of is going to be freed and you wish to keep the * memory for a longer time. * * To make the changed hierarchy less error-prone, you might consider to use * talloc_move(). * * If you try and call talloc_steal() on a pointer that has more than one * parent then the result is ambiguous. Talloc will choose to remove the * parent that is currently indicated by talloc_parent() and replace it with * the chosen parent. You will also get a message like this via the talloc * logging functions: * * @code * WARNING: talloc_steal with references at some_dir/source/foo.c:123 * reference at some_dir/source/other.c:325 * reference at some_dir/source/third.c:121 * @endcode * * To unambiguously change the parent of a pointer please see the function * talloc_reparent(). See the talloc_set_log_fn() documentation for more * information on talloc logging. * * @param[in] new_ctx The new parent context. * * @param[in] ptr The talloc chunk to move. * * @return Returns the pointer that you pass it. It does not have * any failure modes. * * @note It is possible to produce loops in the parent/child relationship * if you are not careful with talloc_steal(). No guarantees are provided * as to your sanity or the safety of your data if you do this. */ void *talloc_steal(const void *new_ctx, const void *ptr); #else /* DOXYGEN */ /* try to make talloc_set_destructor() and talloc_steal() type safe, if we have a recent gcc */ #if (__GNUC__ >= 3) #define _TALLOC_TYPEOF(ptr) __typeof__(ptr) #define talloc_set_destructor(ptr, function) \ do { \ int (*_talloc_destructor_fn)(_TALLOC_TYPEOF(ptr)) = (function); \ _talloc_set_destructor((ptr), (int (*)(void *))_talloc_destructor_fn); \ } while(0) /* this extremely strange macro is to avoid some braindamaged warning stupidity in gcc 4.1.x */ #define talloc_steal(ctx, ptr) ({ _TALLOC_TYPEOF(ptr) __talloc_steal_ret = (_TALLOC_TYPEOF(ptr))_talloc_steal_loc((ctx),(ptr), __location__); __talloc_steal_ret; }) #else /* __GNUC__ >= 3 */ #define talloc_set_destructor(ptr, function) \ _talloc_set_destructor((ptr), (int (*)(void *))(function)) #define _TALLOC_TYPEOF(ptr) void * #define talloc_steal(ctx, ptr) (_TALLOC_TYPEOF(ptr))_talloc_steal_loc((ctx),(ptr), __location__) #endif /* __GNUC__ >= 3 */ void _talloc_set_destructor(const void *ptr, int (*_destructor)(void *)); void *_talloc_steal_loc(const void *new_ctx, const void *ptr, const char *location); #endif /* DOXYGEN */ /** * @brief Assign a name to a talloc chunk. * * Each talloc pointer has a "name". The name is used principally for * debugging purposes, although it is also possible to set and get the name on * a pointer in as a way of "marking" pointers in your code. * * The main use for names on pointer is for "talloc reports". See * talloc_report() and talloc_report_full() for details. Also see * talloc_enable_leak_report() and talloc_enable_leak_report_full(). * * The talloc_set_name() function allocates memory as a child of the * pointer. It is logically equivalent to: * * @code * talloc_set_name_const(ptr, talloc_asprintf(ptr, fmt, ...)); * @endcode * * @param[in] ptr The talloc chunk to assign a name to. * * @param[in] fmt Format string for the name. * * @param[in] ... Add printf-style additional arguments. * * @return The assigned name, NULL on error. * * @note Multiple calls to talloc_set_name() will allocate more memory without * releasing the name. All of the memory is released when the ptr is freed * using talloc_free(). */ const char *talloc_set_name(const void *ptr, const char *fmt, ...) PRINTF_ATTRIBUTE(2,3); #ifdef DOXYGEN /** * @brief Change a talloc chunk's parent. * * This function has the same effect as talloc_steal(), and additionally sets * the source pointer to NULL. You would use it like this: * * @code * struct foo *X = talloc(tmp_ctx, struct foo); * struct foo *Y; * Y = talloc_move(new_ctx, &X); * @endcode * * @param[in] new_ctx The new parent context. * * @param[in] pptr Pointer to the talloc chunk to move. * * @return The pointer of the talloc chunk it has been moved to, * NULL on error. */ void *talloc_move(const void *new_ctx, void **pptr); #else #define talloc_move(ctx, pptr) (_TALLOC_TYPEOF(*(pptr)))_talloc_move((ctx),(void *)(pptr)) void *_talloc_move(const void *new_ctx, const void *pptr); #endif /** * @brief Assign a name to a talloc chunk. * * The function is just like talloc_set_name(), but it takes a string constant, * and is much faster. It is extensively used by the "auto naming" macros, such * as talloc_p(). * * This function does not allocate any memory. It just copies the supplied * pointer into the internal representation of the talloc ptr. This means you * must not pass a name pointer to memory that will disappear before the ptr * is freed with talloc_free(). * * @param[in] ptr The talloc chunk to assign a name to. * * @param[in] name Format string for the name. */ void talloc_set_name_const(const void *ptr, const char *name); /** * @brief Create a named talloc chunk. * * The talloc_named() function creates a named talloc pointer. It is * equivalent to: * * @code * ptr = talloc_size(context, size); * talloc_set_name(ptr, fmt, ....); * @endcode * * @param[in] context The talloc context to hang the result off. * * @param[in] size Number of char's that you want to allocate. * * @param[in] fmt Format string for the name. * * @param[in] ... Additional printf-style arguments. * * @return The allocated memory chunk, NULL on error. * * @see talloc_set_name() */ void *talloc_named(const void *context, size_t size, const char *fmt, ...) PRINTF_ATTRIBUTE(3,4); /** * @brief Basic routine to allocate a chunk of memory. * * This is equivalent to: * * @code * ptr = talloc_size(context, size); * talloc_set_name_const(ptr, name); * @endcode * * @param[in] context The parent context. * * @param[in] size The number of char's that we want to allocate. * * @param[in] name The name the talloc block has. * * @return The allocated memory chunk, NULL on error. */ void *talloc_named_const(const void *context, size_t size, const char *name); #ifdef DOXYGEN /** * @brief Untyped allocation. * * The function should be used when you don't have a convenient type to pass to * talloc(). Unlike talloc(), it is not type safe (as it returns a void *), so * you are on your own for type checking. * * Best to use talloc() or talloc_array() instead. * * @param[in] ctx The talloc context to hang the result off. * * @param[in] size Number of char's that you want to allocate. * * @return The allocated memory chunk, NULL on error. * * Example: * @code * void *mem = talloc_size(NULL, 100); * @endcode */ void *talloc_size(const void *ctx, size_t size); #else #define talloc_size(ctx, size) talloc_named_const(ctx, size, __location__) #endif #ifdef DOXYGEN /** * @brief Allocate into a typed pointer. * * The talloc_ptrtype() macro should be used when you have a pointer and want * to allocate memory to point at with this pointer. When compiling with * gcc >= 3 it is typesafe. Note this is a wrapper of talloc_size() and * talloc_get_name() will return the current location in the source file and * not the type. * * @param[in] ctx The talloc context to hang the result off. * * @param[in] type The pointer you want to assign the result to. * * @return The properly casted allocated memory chunk, NULL on * error. * * Example: * @code * unsigned int *a = talloc_ptrtype(NULL, a); * @endcode */ void *talloc_ptrtype(const void *ctx, #type); #else #define talloc_ptrtype(ctx, ptr) (_TALLOC_TYPEOF(ptr))talloc_size(ctx, sizeof(*(ptr))) #endif #ifdef DOXYGEN /** * @brief Allocate a new 0-sized talloc chunk. * * This is a utility macro that creates a new memory context hanging off an * existing context, automatically naming it "talloc_new: __location__" where * __location__ is the source line it is called from. It is particularly * useful for creating a new temporary working context. * * @param[in] ctx The talloc parent context. * * @return A new talloc chunk, NULL on error. */ void *talloc_new(const void *ctx); #else #define talloc_new(ctx) talloc_named_const(ctx, 0, "talloc_new: " __location__) #endif #ifdef DOXYGEN /** * @brief Allocate a 0-initizialized structure. * * The macro is equivalent to: * * @code * ptr = talloc(ctx, type); * if (ptr) memset(ptr, 0, sizeof(type)); * @endcode * * @param[in] ctx The talloc context to hang the result off. * * @param[in] type The type that we want to allocate. * * @return Pointer to a piece of memory, properly cast to 'type *', * NULL on error. * * Example: * @code * unsigned int *a, *b; * a = talloc_zero(NULL, unsigned int); * b = talloc_zero(a, unsigned int); * @endcode * * @see talloc() * @see talloc_zero_size() * @see talloc_zero_array() */ void *talloc_zero(const void *ctx, #type); /** * @brief Allocate untyped, 0-initialized memory. * * @param[in] ctx The talloc context to hang the result off. * * @param[in] size Number of char's that you want to allocate. * * @return The allocated memory chunk. */ void *talloc_zero_size(const void *ctx, size_t size); #else #define talloc_zero(ctx, type) (type *)_talloc_zero(ctx, sizeof(type), #type) #define talloc_zero_size(ctx, size) _talloc_zero(ctx, size, __location__) void *_talloc_zero(const void *ctx, size_t size, const char *name); #endif /** * @brief Return the name of a talloc chunk. * * @param[in] ptr The talloc chunk. * * @return The current name for the given talloc pointer. * * @see talloc_set_name() */ const char *talloc_get_name(const void *ptr); /** * @brief Verify that a talloc chunk carries a specified name. * * This function checks if a pointer has the specified name. If it does * then the pointer is returned. * * @param[in] ptr The talloc chunk to check. * * @param[in] name The name to check against. * * @return The pointer if the name matches, NULL if it doesn't. */ void *talloc_check_name(const void *ptr, const char *name); /** * @brief Get the parent chunk of a pointer. * * @param[in] ptr The talloc pointer to inspect. * * @return The talloc parent of ptr, NULL on error. */ void *talloc_parent(const void *ptr); /** * @brief Get a talloc chunk's parent name. * * @param[in] ptr The talloc pointer to inspect. * * @return The name of ptr's parent chunk. */ const char *talloc_parent_name(const void *ptr); /** * @brief Get the total size of a talloc chunk including its children. * * The function returns the total size in bytes used by this pointer and all * child pointers. Mostly useful for debugging. * * Passing NULL is allowed, but it will only give a meaningful result if * talloc_enable_leak_report() or talloc_enable_leak_report_full() has * been called. * * @param[in] ptr The talloc chunk. * * @return The total size. */ size_t talloc_total_size(const void *ptr); /** * @brief Get the number of talloc chunks hanging off a chunk. * * The talloc_total_blocks() function returns the total memory block * count used by this pointer and all child pointers. Mostly useful for * debugging. * * Passing NULL is allowed, but it will only give a meaningful result if * talloc_enable_leak_report() or talloc_enable_leak_report_full() has * been called. * * @param[in] ptr The talloc chunk. * * @return The total size. */ size_t talloc_total_blocks(const void *ptr); #ifdef DOXYGEN /** * @brief Duplicate a memory area into a talloc chunk. * * The function is equivalent to: * * @code * ptr = talloc_size(ctx, size); * if (ptr) memcpy(ptr, p, size); * @endcode * * @param[in] t The talloc context to hang the result off. * * @param[in] p The memory chunk you want to duplicate. * * @param[in] size Number of char's that you want copy. * * @return The allocated memory chunk. * * @see talloc_size() */ void *talloc_memdup(const void *t, const void *p, size_t size); #else #define talloc_memdup(t, p, size) _talloc_memdup(t, p, size, __location__) void *_talloc_memdup(const void *t, const void *p, size_t size, const char *name); #endif #ifdef DOXYGEN /** * @brief Assign a type to a talloc chunk. * * This macro allows you to force the name of a pointer to be of a particular * type. This can be used in conjunction with talloc_get_type() to do type * checking on void* pointers. * * It is equivalent to this: * * @code * talloc_set_name_const(ptr, #type) * @endcode * * @param[in] ptr The talloc chunk to assign the type to. * * @param[in] type The type to assign. */ void talloc_set_type(const char *ptr, #type); /** * @brief Get a typed pointer out of a talloc pointer. * * This macro allows you to do type checking on talloc pointers. It is * particularly useful for void* private pointers. It is equivalent to * this: * * @code * (type *)talloc_check_name(ptr, #type) * @endcode * * @param[in] ptr The talloc pointer to check. * * @param[in] type The type to check against. * * @return The properly casted pointer given by ptr, NULL on error. */ type *talloc_get_type(const void *ptr, #type); #else #define talloc_set_type(ptr, type) talloc_set_name_const(ptr, #type) #define talloc_get_type(ptr, type) (type *)talloc_check_name(ptr, #type) #endif #ifdef DOXYGEN /** * @brief Safely turn a void pointer into a typed pointer. * * This macro is used together with talloc(mem_ctx, struct foo). If you had to * assing the talloc chunk pointer to some void pointer variable, * talloc_get_type_abort() is the recommended way to get the convert the void * pointer back to a typed pointer. * * @param[in] ptr The void pointer to convert. * * @param[in] type The type that this chunk contains * * @return The same value as ptr, type-checked and properly cast. */ void *talloc_get_type_abort(const void *ptr, #type); #else #define talloc_get_type_abort(ptr, type) (type *)_talloc_get_type_abort(ptr, #type, __location__) void *_talloc_get_type_abort(const void *ptr, const char *name, const char *location); #endif /** * @brief Find a parent context by name. * * Find a parent memory context of the current context that has the given * name. This can be very useful in complex programs where it may be * difficult to pass all information down to the level you need, but you * know the structure you want is a parent of another context. * * @param[in] ctx The talloc chunk to start from. * * @param[in] name The name of the parent we look for. * * @return The memory context we are looking for, NULL if not * found. */ void *talloc_find_parent_byname(const void *ctx, const char *name); #ifdef DOXYGEN /** * @brief Find a parent context by type. * * Find a parent memory context of the current context that has the given * name. This can be very useful in complex programs where it may be * difficult to pass all information down to the level you need, but you * know the structure you want is a parent of another context. * * Like talloc_find_parent_byname() but takes a type, making it typesafe. * * @param[in] ptr The talloc chunk to start from. * * @param[in] type The type of the parent to look for. * * @return The memory context we are looking for, NULL if not * found. */ void *talloc_find_parent_bytype(const void *ptr, #type); #else #define talloc_find_parent_bytype(ptr, type) (type *)talloc_find_parent_byname(ptr, #type) #endif /** * @brief Allocate a talloc pool. * * A talloc pool is a pure optimization for specific situations. In the * release process for Samba 3.2 we found out that we had become considerably * slower than Samba 3.0 was. Profiling showed that malloc(3) was a large CPU * consumer in benchmarks. For Samba 3.2 we have internally converted many * static buffers to dynamically allocated ones, so malloc(3) being beaten * more was no surprise. But it made us slower. * * talloc_pool() is an optimization to call malloc(3) a lot less for the use * pattern Samba has: The SMB protocol is mainly a request/response protocol * where we have to allocate a certain amount of memory per request and free * that after the SMB reply is sent to the client. * * talloc_pool() creates a talloc chunk that you can use as a talloc parent * exactly as you would use any other ::TALLOC_CTX. The difference is that * when you talloc a child of this pool, no malloc(3) is done. Instead, talloc * just increments a pointer inside the talloc_pool. This also works * recursively. If you use the child of the talloc pool as a parent for * grand-children, their memory is also taken from the talloc pool. * * If there is not enough memory in the pool to allocate the new child, * it will create a new talloc chunk as if the parent was a normal talloc * context. * * If you talloc_free() children of a talloc pool, the memory is not given * back to the system. Instead, free(3) is only called if the talloc_pool() * itself is released with talloc_free(). * * The downside of a talloc pool is that if you talloc_move() a child of a * talloc pool to a talloc parent outside the pool, the whole pool memory is * not free(3)'ed until that moved chunk is also talloc_free()ed. * * @param[in] context The talloc context to hang the result off. * * @param[in] size Size of the talloc pool. * * @return The allocated talloc pool, NULL on error. */ void *talloc_pool(const void *context, size_t size); #ifdef DOXYGEN /** * @brief Allocate a talloc object as/with an additional pool. * * This is like talloc_pool(), but's it's more flexible * and allows an object to be a pool for its children. * * @param[in] ctx The talloc context to hang the result off. * * @param[in] type The type that we want to allocate. * * @param[in] num_subobjects The expected number of subobjects, which will * be allocated within the pool. This allocates * space for talloc_chunk headers. * * @param[in] total_subobjects_size The size that all subobjects can use in total. * * * @return The allocated talloc object, NULL on error. */ void *talloc_pooled_object(const void *ctx, #type, unsigned num_subobjects, size_t total_subobjects_size); #else #define talloc_pooled_object(_ctx, _type, \ _num_subobjects, \ _total_subobjects_size) \ (_type *)_talloc_pooled_object((_ctx), sizeof(_type), #_type, \ (_num_subobjects), \ (_total_subobjects_size)) void *_talloc_pooled_object(const void *ctx, size_t type_size, const char *type_name, unsigned num_subobjects, size_t total_subobjects_size); #endif /** * @brief Free a talloc chunk and NULL out the pointer. * * TALLOC_FREE() frees a pointer and sets it to NULL. Use this if you want * immediate feedback (i.e. crash) if you use a pointer after having free'ed * it. * * @param[in] ctx The chunk to be freed. */ #define TALLOC_FREE(ctx) do { talloc_free(ctx); ctx=NULL; } while(0) /* @} ******************************************************************/ /** * \defgroup talloc_ref The talloc reference function. * @ingroup talloc * * This module contains the definitions around talloc references * * @{ */ /** * @brief Increase the reference count of a talloc chunk. * * The talloc_increase_ref_count(ptr) function is exactly equivalent to: * * @code * talloc_reference(NULL, ptr); * @endcode * * You can use either syntax, depending on which you think is clearer in * your code. * * @param[in] ptr The pointer to increase the reference count. * * @return 0 on success, -1 on error. */ int talloc_increase_ref_count(const void *ptr); /** * @brief Get the number of references to a talloc chunk. * * @param[in] ptr The pointer to retrieve the reference count from. * * @return The number of references. */ size_t talloc_reference_count(const void *ptr); #ifdef DOXYGEN /** * @brief Create an additional talloc parent to a pointer. * * The talloc_reference() function makes "context" an additional parent of * ptr. Each additional reference consumes around 48 bytes of memory on intel * x86 platforms. * * If ptr is NULL, then the function is a no-op, and simply returns NULL. * * After creating a reference you can free it in one of the following ways: * * - you can talloc_free() any parent of the original pointer. That * will reduce the number of parents of this pointer by 1, and will * cause this pointer to be freed if it runs out of parents. * * - you can talloc_free() the pointer itself if it has at maximum one * parent. This behaviour has been changed since the release of version * 2.0. Further informations in the description of "talloc_free". * * For more control on which parent to remove, see talloc_unlink() * @param[in] ctx The additional parent. * * @param[in] ptr The pointer you want to create an additional parent for. * * @return The original pointer 'ptr', NULL if talloc ran out of * memory in creating the reference. * * Example: * @code * unsigned int *a, *b, *c; * a = talloc(NULL, unsigned int); * b = talloc(NULL, unsigned int); * c = talloc(a, unsigned int); * // b also serves as a parent of c. * talloc_reference(b, c); * @endcode * * @see talloc_unlink() */ void *talloc_reference(const void *ctx, const void *ptr); #else #define talloc_reference(ctx, ptr) (_TALLOC_TYPEOF(ptr))_talloc_reference_loc((ctx),(ptr), __location__) void *_talloc_reference_loc(const void *context, const void *ptr, const char *location); #endif /** * @brief Remove a specific parent from a talloc chunk. * * The function removes a specific parent from ptr. The context passed must * either be a context used in talloc_reference() with this pointer, or must be * a direct parent of ptr. * * You can just use talloc_free() instead of talloc_unlink() if there * is at maximum one parent. This behaviour has been changed since the * release of version 2.0. Further informations in the description of * "talloc_free". * * @param[in] context The talloc parent to remove. * * @param[in] ptr The talloc ptr you want to remove the parent from. * * @return 0 on success, -1 on error. * * @note If the parent has already been removed using talloc_free() then * this function will fail and will return -1. Likewise, if ptr is NULL, * then the function will make no modifications and return -1. * * Example: * @code * unsigned int *a, *b, *c; * a = talloc(NULL, unsigned int); * b = talloc(NULL, unsigned int); * c = talloc(a, unsigned int); * // b also serves as a parent of c. * talloc_reference(b, c); * talloc_unlink(b, c); * @endcode */ int talloc_unlink(const void *context, void *ptr); /** * @brief Provide a talloc context that is freed at program exit. * * This is a handy utility function that returns a talloc context * which will be automatically freed on program exit. This can be used * to reduce the noise in memory leak reports. * * Never use this in code that might be used in objects loaded with * dlopen and unloaded with dlclose. talloc_autofree_context() * internally uses atexit(3). Some platforms like modern Linux handles * this fine, but for example FreeBSD does not deal well with dlopen() * and atexit() used simultaneously: dlclose() does not clean up the * list of atexit-handlers, so when the program exits the code that * was registered from within talloc_autofree_context() is gone, the * program crashes at exit. * * @return A talloc context, NULL on error. */ void *talloc_autofree_context(void); /** * @brief Get the size of a talloc chunk. * * This function lets you know the amount of memory allocated so far by * this context. It does NOT account for subcontext memory. * This can be used to calculate the size of an array. * * @param[in] ctx The talloc chunk. * * @return The size of the talloc chunk. */ size_t talloc_get_size(const void *ctx); /** * @brief Show the parentage of a context. * * @param[in] context The talloc context to look at. * * @param[in] file The output to use, a file, stdout or stderr. */ void talloc_show_parents(const void *context, FILE *file); /** * @brief Check if a context is parent of a talloc chunk. * * This checks if context is referenced in the talloc hierarchy above ptr. * * @param[in] context The assumed talloc context. * * @param[in] ptr The talloc chunk to check. * * @return Return 1 if this is the case, 0 if not. */ int talloc_is_parent(const void *context, const void *ptr); /** * @brief Change the parent context of a talloc pointer. * * The function changes the parent context of a talloc pointer. It is typically * used when the context that the pointer is currently a child of is going to be * freed and you wish to keep the memory for a longer time. * * The difference between talloc_reparent() and talloc_steal() is that * talloc_reparent() can specify which parent you wish to change. This is * useful when a pointer has multiple parents via references. * * @param[in] old_parent * @param[in] new_parent * @param[in] ptr * * @return Return the pointer you passed. It does not have any * failure modes. */ void *talloc_reparent(const void *old_parent, const void *new_parent, const void *ptr); /* @} ******************************************************************/ /** * @defgroup talloc_array The talloc array functions * @ingroup talloc * * Talloc contains some handy helpers for handling Arrays conveniently * * @{ */ #ifdef DOXYGEN /** * @brief Allocate an array. * * The macro is equivalent to: * * @code * (type *)talloc_size(ctx, sizeof(type) * count); * @endcode * * except that it provides integer overflow protection for the multiply, * returning NULL if the multiply overflows. * * @param[in] ctx The talloc context to hang the result off. * * @param[in] type The type that we want to allocate. * * @param[in] count The number of 'type' elements you want to allocate. * * @return The allocated result, properly cast to 'type *', NULL on * error. * * Example: * @code * unsigned int *a, *b; * a = talloc_zero(NULL, unsigned int); * b = talloc_array(a, unsigned int, 100); * @endcode * * @see talloc() * @see talloc_zero_array() */ void *talloc_array(const void *ctx, #type, unsigned count); #else #define talloc_array(ctx, type, count) (type *)_talloc_array(ctx, sizeof(type), count, #type) void *_talloc_array(const void *ctx, size_t el_size, unsigned count, const char *name); #endif #ifdef DOXYGEN /** * @brief Allocate an array. * * @param[in] ctx The talloc context to hang the result off. * * @param[in] size The size of an array element. * * @param[in] count The number of elements you want to allocate. * * @return The allocated result, NULL on error. */ void *talloc_array_size(const void *ctx, size_t size, unsigned count); #else #define talloc_array_size(ctx, size, count) _talloc_array(ctx, size, count, __location__) #endif #ifdef DOXYGEN /** * @brief Allocate an array into a typed pointer. * * The macro should be used when you have a pointer to an array and want to * allocate memory of an array to point at with this pointer. When compiling * with gcc >= 3 it is typesafe. Note this is a wrapper of talloc_array_size() * and talloc_get_name() will return the current location in the source file * and not the type. * * @param[in] ctx The talloc context to hang the result off. * * @param[in] ptr The pointer you want to assign the result to. * * @param[in] count The number of elements you want to allocate. * * @return The allocated memory chunk, properly casted. NULL on * error. */ void *talloc_array_ptrtype(const void *ctx, const void *ptr, unsigned count); #else #define talloc_array_ptrtype(ctx, ptr, count) (_TALLOC_TYPEOF(ptr))talloc_array_size(ctx, sizeof(*(ptr)), count) #endif #ifdef DOXYGEN /** * @brief Get the number of elements in a talloc'ed array. * * A talloc chunk carries its own size, so for talloc'ed arrays it is not * necessary to store the number of elements explicitly. * * @param[in] ctx The allocated array. * * @return The number of elements in ctx. */ size_t talloc_array_length(const void *ctx); #else #define talloc_array_length(ctx) (talloc_get_size(ctx)/sizeof(*ctx)) #endif #ifdef DOXYGEN /** * @brief Allocate a zero-initialized array * * @param[in] ctx The talloc context to hang the result off. * * @param[in] type The type that we want to allocate. * * @param[in] count The number of "type" elements you want to allocate. * * @return The allocated result casted to "type *", NULL on error. * * The talloc_zero_array() macro is equivalent to: * * @code * ptr = talloc_array(ctx, type, count); * if (ptr) memset(ptr, sizeof(type) * count); * @endcode */ void *talloc_zero_array(const void *ctx, #type, unsigned count); #else #define talloc_zero_array(ctx, type, count) (type *)_talloc_zero_array(ctx, sizeof(type), count, #type) void *_talloc_zero_array(const void *ctx, size_t el_size, unsigned count, const char *name); #endif #ifdef DOXYGEN /** * @brief Change the size of a talloc array. * * The macro changes the size of a talloc pointer. The 'count' argument is the * number of elements of type 'type' that you want the resulting pointer to * hold. * * talloc_realloc() has the following equivalences: * * @code * talloc_realloc(ctx, NULL, type, 1) ==> talloc(ctx, type); * talloc_realloc(ctx, NULL, type, N) ==> talloc_array(ctx, type, N); * talloc_realloc(ctx, ptr, type, 0) ==> talloc_free(ptr); * @endcode * * The "context" argument is only used if "ptr" is NULL, otherwise it is * ignored. * * @param[in] ctx The parent context used if ptr is NULL. * * @param[in] ptr The chunk to be resized. * * @param[in] type The type of the array element inside ptr. * * @param[in] count The intended number of array elements. * * @return The new array, NULL on error. The call will fail either * due to a lack of memory, or because the pointer has more * than one parent (see talloc_reference()). */ void *talloc_realloc(const void *ctx, void *ptr, #type, size_t count); #else #define talloc_realloc(ctx, p, type, count) (type *)_talloc_realloc_array(ctx, p, sizeof(type), count, #type) void *_talloc_realloc_array(const void *ctx, void *ptr, size_t el_size, unsigned count, const char *name); #endif #ifdef DOXYGEN /** * @brief Untyped realloc to change the size of a talloc array. * * The macro is useful when the type is not known so the typesafe * talloc_realloc() cannot be used. * * @param[in] ctx The parent context used if 'ptr' is NULL. * * @param[in] ptr The chunk to be resized. * * @param[in] size The new chunk size. * * @return The new array, NULL on error. */ void *talloc_realloc_size(const void *ctx, void *ptr, size_t size); #else #define talloc_realloc_size(ctx, ptr, size) _talloc_realloc(ctx, ptr, size, __location__) void *_talloc_realloc(const void *context, void *ptr, size_t size, const char *name); #endif /** * @brief Provide a function version of talloc_realloc_size. * * This is a non-macro version of talloc_realloc(), which is useful as * libraries sometimes want a ralloc function pointer. A realloc() * implementation encapsulates the functionality of malloc(), free() and * realloc() in one call, which is why it is useful to be able to pass around * a single function pointer. * * @param[in] context The parent context used if ptr is NULL. * * @param[in] ptr The chunk to be resized. * * @param[in] size The new chunk size. * * @return The new chunk, NULL on error. */ void *talloc_realloc_fn(const void *context, void *ptr, size_t size); /* @} ******************************************************************/ /** * @defgroup talloc_string The talloc string functions. * @ingroup talloc * * talloc string allocation and manipulation functions. * @{ */ /** * @brief Duplicate a string into a talloc chunk. * * This function is equivalent to: * * @code * ptr = talloc_size(ctx, strlen(p)+1); * if (ptr) memcpy(ptr, p, strlen(p)+1); * @endcode * * This functions sets the name of the new pointer to the passed * string. This is equivalent to: * * @code * talloc_set_name_const(ptr, ptr) * @endcode * * @param[in] t The talloc context to hang the result off. * * @param[in] p The string you want to duplicate. * * @return The duplicated string, NULL on error. */ char *talloc_strdup(const void *t, const char *p); /** * @brief Append a string to given string. * * The destination string is reallocated to take * strlen(s) + strlen(a) + 1 characters. * * This functions sets the name of the new pointer to the new * string. This is equivalent to: * * @code * talloc_set_name_const(ptr, ptr) * @endcode * * If s == NULL then new context is created. * * @param[in] s The destination to append to. * * @param[in] a The string you want to append. * * @return The concatenated strings, NULL on error. * * @see talloc_strdup() * @see talloc_strdup_append_buffer() */ char *talloc_strdup_append(char *s, const char *a); /** * @brief Append a string to a given buffer. * * This is a more efficient version of talloc_strdup_append(). It determines the * length of the destination string by the size of the talloc context. * * Use this very carefully as it produces a different result than * talloc_strdup_append() when a zero character is in the middle of the * destination string. * * @code * char *str_a = talloc_strdup(NULL, "hello world"); * char *str_b = talloc_strdup(NULL, "hello world"); * str_a[5] = str_b[5] = '\0' * * char *app = talloc_strdup_append(str_a, ", hello"); * char *buf = talloc_strdup_append_buffer(str_b, ", hello"); * * printf("%s\n", app); // hello, hello (app = "hello, hello") * printf("%s\n", buf); // hello (buf = "hello\0world, hello") * @endcode * * If s == NULL then new context is created. * * @param[in] s The destination buffer to append to. * * @param[in] a The string you want to append. * * @return The concatenated strings, NULL on error. * * @see talloc_strdup() * @see talloc_strdup_append() * @see talloc_array_length() */ char *talloc_strdup_append_buffer(char *s, const char *a); /** * @brief Duplicate a length-limited string into a talloc chunk. * * This function is the talloc equivalent of the C library function strndup(3). * * This functions sets the name of the new pointer to the passed string. This is * equivalent to: * * @code * talloc_set_name_const(ptr, ptr) * @endcode * * @param[in] t The talloc context to hang the result off. * * @param[in] p The string you want to duplicate. * * @param[in] n The maximum string length to duplicate. * * @return The duplicated string, NULL on error. */ char *talloc_strndup(const void *t, const char *p, size_t n); /** * @brief Append at most n characters of a string to given string. * * The destination string is reallocated to take * strlen(s) + strnlen(a, n) + 1 characters. * * This functions sets the name of the new pointer to the new * string. This is equivalent to: * * @code * talloc_set_name_const(ptr, ptr) * @endcode * * If s == NULL then new context is created. * * @param[in] s The destination string to append to. * * @param[in] a The source string you want to append. * * @param[in] n The number of characters you want to append from the * string. * * @return The concatenated strings, NULL on error. * * @see talloc_strndup() * @see talloc_strndup_append_buffer() */ char *talloc_strndup_append(char *s, const char *a, size_t n); /** * @brief Append at most n characters of a string to given buffer * * This is a more efficient version of talloc_strndup_append(). It determines * the length of the destination string by the size of the talloc context. * * Use this very carefully as it produces a different result than * talloc_strndup_append() when a zero character is in the middle of the * destination string. * * @code * char *str_a = talloc_strdup(NULL, "hello world"); * char *str_b = talloc_strdup(NULL, "hello world"); * str_a[5] = str_b[5] = '\0' * * char *app = talloc_strndup_append(str_a, ", hello", 7); * char *buf = talloc_strndup_append_buffer(str_b, ", hello", 7); * * printf("%s\n", app); // hello, hello (app = "hello, hello") * printf("%s\n", buf); // hello (buf = "hello\0world, hello") * @endcode * * If s == NULL then new context is created. * * @param[in] s The destination buffer to append to. * * @param[in] a The source string you want to append. * * @param[in] n The number of characters you want to append from the * string. * * @return The concatenated strings, NULL on error. * * @see talloc_strndup() * @see talloc_strndup_append() * @see talloc_array_length() */ char *talloc_strndup_append_buffer(char *s, const char *a, size_t n); /** * @brief Format a string given a va_list. * * This function is the talloc equivalent of the C library function * vasprintf(3). * * This functions sets the name of the new pointer to the new string. This is * equivalent to: * * @code * talloc_set_name_const(ptr, ptr) * @endcode * * @param[in] t The talloc context to hang the result off. * * @param[in] fmt The format string. * * @param[in] ap The parameters used to fill fmt. * * @return The formatted string, NULL on error. */ char *talloc_vasprintf(const void *t, const char *fmt, va_list ap) PRINTF_ATTRIBUTE(2,0); /** * @brief Format a string given a va_list and append it to the given destination * string. * * @param[in] s The destination string to append to. * * @param[in] fmt The format string. * * @param[in] ap The parameters used to fill fmt. * * @return The formatted string, NULL on error. * * @see talloc_vasprintf() */ char *talloc_vasprintf_append(char *s, const char *fmt, va_list ap) PRINTF_ATTRIBUTE(2,0); /** * @brief Format a string given a va_list and append it to the given destination * buffer. * * @param[in] s The destination buffer to append to. * * @param[in] fmt The format string. * * @param[in] ap The parameters used to fill fmt. * * @return The formatted string, NULL on error. * * @see talloc_vasprintf() */ char *talloc_vasprintf_append_buffer(char *s, const char *fmt, va_list ap) PRINTF_ATTRIBUTE(2,0); /** * @brief Format a string. * * This function is the talloc equivalent of the C library function asprintf(3). * * This functions sets the name of the new pointer to the new string. This is * equivalent to: * * @code * talloc_set_name_const(ptr, ptr) * @endcode * * @param[in] t The talloc context to hang the result off. * * @param[in] fmt The format string. * * @param[in] ... The parameters used to fill fmt. * * @return The formatted string, NULL on error. */ char *talloc_asprintf(const void *t, const char *fmt, ...) PRINTF_ATTRIBUTE(2,3); /** * @brief Append a formatted string to another string. * * This function appends the given formatted string to the given string. Use * this variant when the string in the current talloc buffer may have been * truncated in length. * * This functions sets the name of the new pointer to the new * string. This is equivalent to: * * @code * talloc_set_name_const(ptr, ptr) * @endcode * * If s == NULL then new context is created. * * @param[in] s The string to append to. * * @param[in] fmt The format string. * * @param[in] ... The parameters used to fill fmt. * * @return The formatted string, NULL on error. */ char *talloc_asprintf_append(char *s, const char *fmt, ...) PRINTF_ATTRIBUTE(2,3); /** * @brief Append a formatted string to another string. * * This is a more efficient version of talloc_asprintf_append(). It determines * the length of the destination string by the size of the talloc context. * * Use this very carefully as it produces a different result than * talloc_asprintf_append() when a zero character is in the middle of the * destination string. * * @code * char *str_a = talloc_strdup(NULL, "hello world"); * char *str_b = talloc_strdup(NULL, "hello world"); * str_a[5] = str_b[5] = '\0' * * char *app = talloc_asprintf_append(str_a, "%s", ", hello"); * char *buf = talloc_strdup_append_buffer(str_b, "%s", ", hello"); * * printf("%s\n", app); // hello, hello (app = "hello, hello") * printf("%s\n", buf); // hello (buf = "hello\0world, hello") * @endcode * * If s == NULL then new context is created. * * @param[in] s The string to append to * * @param[in] fmt The format string. * * @param[in] ... The parameters used to fill fmt. * * @return The formatted string, NULL on error. * * @see talloc_asprintf() * @see talloc_asprintf_append() */ char *talloc_asprintf_append_buffer(char *s, const char *fmt, ...) PRINTF_ATTRIBUTE(2,3); /* @} ******************************************************************/ /** * @defgroup talloc_debug The talloc debugging support functions * @ingroup talloc * * To aid memory debugging, talloc contains routines to inspect the currently * allocated memory hierarchy. * * @{ */ /** * @brief Walk a complete talloc hierarchy. * * This provides a more flexible reports than talloc_report(). It * will recursively call the callback for the entire tree of memory * referenced by the pointer. References in the tree are passed with * is_ref = 1 and the pointer that is referenced. * * You can pass NULL for the pointer, in which case a report is * printed for the top level memory context, but only if * talloc_enable_leak_report() or talloc_enable_leak_report_full() * has been called. * * The recursion is stopped when depth >= max_depth. * max_depth = -1 means only stop at leaf nodes. * * @param[in] ptr The talloc chunk. * * @param[in] depth Internal parameter to control recursion. Call with 0. * * @param[in] max_depth Maximum recursion level. * * @param[in] callback Function to be called on every chunk. * * @param[in] private_data Private pointer passed to callback. */ void talloc_report_depth_cb(const void *ptr, int depth, int max_depth, void (*callback)(const void *ptr, int depth, int max_depth, int is_ref, void *private_data), void *private_data); /** * @brief Print a talloc hierarchy. * * This provides a more flexible reports than talloc_report(). It * will let you specify the depth and max_depth. * * @param[in] ptr The talloc chunk. * * @param[in] depth Internal parameter to control recursion. Call with 0. * * @param[in] max_depth Maximum recursion level. * * @param[in] f The file handle to print to. */ void talloc_report_depth_file(const void *ptr, int depth, int max_depth, FILE *f); /** * @brief Print a summary report of all memory used by ptr. * * This provides a more detailed report than talloc_report(). It will * recursively print the entire tree of memory referenced by the * pointer. References in the tree are shown by giving the name of the * pointer that is referenced. * * You can pass NULL for the pointer, in which case a report is printed * for the top level memory context, but only if * talloc_enable_leak_report() or talloc_enable_leak_report_full() has * been called. * * @param[in] ptr The talloc chunk. * * @param[in] f The file handle to print to. * * Example: * @code * unsigned int *a, *b; * a = talloc(NULL, unsigned int); * b = talloc(a, unsigned int); * fprintf(stderr, "Dumping memory tree for a:\n"); * talloc_report_full(a, stderr); * @endcode * * @see talloc_report() */ void talloc_report_full(const void *ptr, FILE *f); /** * @brief Print a summary report of all memory used by ptr. * * This function prints a summary report of all memory used by ptr. One line of * report is printed for each immediate child of ptr, showing the total memory * and number of blocks used by that child. * * You can pass NULL for the pointer, in which case a report is printed * for the top level memory context, but only if talloc_enable_leak_report() * or talloc_enable_leak_report_full() has been called. * * @param[in] ptr The talloc chunk. * * @param[in] f The file handle to print to. * * Example: * @code * unsigned int *a, *b; * a = talloc(NULL, unsigned int); * b = talloc(a, unsigned int); * fprintf(stderr, "Summary of memory tree for a:\n"); * talloc_report(a, stderr); * @endcode * * @see talloc_report_full() */ void talloc_report(const void *ptr, FILE *f); /** * @brief Enable tracking the use of NULL memory contexts. * * This enables tracking of the NULL memory context without enabling leak * reporting on exit. Useful for when you want to do your own leak * reporting call via talloc_report_null_full(); */ void talloc_enable_null_tracking(void); /** * @brief Enable tracking the use of NULL memory contexts. * * This enables tracking of the NULL memory context without enabling leak * reporting on exit. Useful for when you want to do your own leak * reporting call via talloc_report_null_full(); */ void talloc_enable_null_tracking_no_autofree(void); /** * @brief Disable tracking of the NULL memory context. * * This disables tracking of the NULL memory context. */ void talloc_disable_null_tracking(void); /** * @brief Enable leak report when a program exits. * * This enables calling of talloc_report(NULL, stderr) when the program * exits. In Samba4 this is enabled by using the --leak-report command * line option. * * For it to be useful, this function must be called before any other * talloc function as it establishes a "null context" that acts as the * top of the tree. If you don't call this function first then passing * NULL to talloc_report() or talloc_report_full() won't give you the * full tree printout. * * Here is a typical talloc report: * * @code * talloc report on 'null_context' (total 267 bytes in 15 blocks) * libcli/auth/spnego_parse.c:55 contains 31 bytes in 2 blocks * libcli/auth/spnego_parse.c:55 contains 31 bytes in 2 blocks * iconv(UTF8,CP850) contains 42 bytes in 2 blocks * libcli/auth/spnego_parse.c:55 contains 31 bytes in 2 blocks * iconv(CP850,UTF8) contains 42 bytes in 2 blocks * iconv(UTF8,UTF-16LE) contains 45 bytes in 2 blocks * iconv(UTF-16LE,UTF8) contains 45 bytes in 2 blocks * @endcode */ void talloc_enable_leak_report(void); /** * @brief Enable full leak report when a program exits. * * This enables calling of talloc_report_full(NULL, stderr) when the * program exits. In Samba4 this is enabled by using the * --leak-report-full command line option. * * For it to be useful, this function must be called before any other * talloc function as it establishes a "null context" that acts as the * top of the tree. If you don't call this function first then passing * NULL to talloc_report() or talloc_report_full() won't give you the * full tree printout. * * Here is a typical full report: * * @code * full talloc report on 'root' (total 18 bytes in 8 blocks) * p1 contains 18 bytes in 7 blocks (ref 0) * r1 contains 13 bytes in 2 blocks (ref 0) * reference to: p2 * p2 contains 1 bytes in 1 blocks (ref 1) * x3 contains 1 bytes in 1 blocks (ref 0) * x2 contains 1 bytes in 1 blocks (ref 0) * x1 contains 1 bytes in 1 blocks (ref 0) * @endcode */ void talloc_enable_leak_report_full(void); /** * @brief Set a custom "abort" function that is called on serious error. * * The default "abort" function is abort(). * * The "abort" function is called when: * *
    *
  • talloc_get_type_abort() fails
  • *
  • the provided pointer is not a valid talloc context
  • *
  • when the context meta data are invalid
  • *
  • when access after free is detected
  • *
* * Example: * * @code * void my_abort(const char *reason) * { * fprintf(stderr, "talloc abort: %s\n", reason); * abort(); * } * * talloc_set_abort_fn(my_abort); * @endcode * * @param[in] abort_fn The new "abort" function. * * @see talloc_set_log_fn() * @see talloc_get_type() */ void talloc_set_abort_fn(void (*abort_fn)(const char *reason)); /** * @brief Set a logging function. * * @param[in] log_fn The logging function. * * @see talloc_set_log_stderr() * @see talloc_set_abort_fn() */ void talloc_set_log_fn(void (*log_fn)(const char *message)); /** * @brief Set stderr as the output for logs. * * @see talloc_set_log_fn() * @see talloc_set_abort_fn() */ void talloc_set_log_stderr(void); /** * @brief Set a max memory limit for the current context hierarchy * This affects all children of this context and constrain any * allocation in the hierarchy to never exceed the limit set. * The limit can be removed by setting 0 (unlimited) as the * max_size by calling the funciton again on the sam context. * Memory limits can also be nested, meaning a hild can have * a stricter memory limit than a parent. * Memory limits are enforced only at memory allocation time. * Stealing a context into a 'limited' hierarchy properly * updates memory usage but does *not* cause failure if the * move causes the new parent to exceed its limits. However * any further allocation on that hierarchy will then fail. * * @param[in] ctx The talloc context to set the limit on * @param[in] max_size The (new) max_size */ int talloc_set_memlimit(const void *ctx, size_t max_size); /* @} ******************************************************************/ #if TALLOC_DEPRECATED #define talloc_zero_p(ctx, type) talloc_zero(ctx, type) #define talloc_p(ctx, type) talloc(ctx, type) #define talloc_array_p(ctx, type, count) talloc_array(ctx, type, count) #define talloc_realloc_p(ctx, p, type, count) talloc_realloc(ctx, p, type, count) #define talloc_destroy(ctx) talloc_free(ctx) #define talloc_append_string(c, s, a) (s?talloc_strdup_append(s,a):talloc_strdup(c, a)) #endif #ifndef TALLOC_MAX_DEPTH #define TALLOC_MAX_DEPTH 10000 #endif #ifdef __cplusplus } /* end of extern "C" */ #endif #endif pytsk-20190507/test_data/000077500000000000000000000000001346423473500151205ustar00rootroot00000000000000pytsk-20190507/test_data/bogus.raw000066400000000000000000000000141346423473500167450ustar00rootroot00000000000000BOGUS DATA. pytsk-20190507/test_data/image.raw000066400000000000000000003100001346423473500167070ustar00rootroot00000000000000dK Ĭ¿OS¿ONí €8ñøÚ¸WïIÛœõ´ W6íŃ’ÌÓÐD™®ú QÓUÿΆ«¿OKÿÿ?`øÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿïÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿ†«¿O†«¿O†«¿OíACN·¬¿Oµ¬¿Oµ¬¿Oˆ€0†«¿O†«¿O†«¿OÀA0†«¿O†«¿O†«¿O íACN\¬¿O[¬¿O[¬¿Oˆ?ü ¡€CNh¬¿Oµ¬¿Of¬¿Oµ¬¿OˆJü ¡€CN5R¬¿OR¬¿OR¬¿OˆCü ¡CNtµ¬¿O¿¬¿Oµ¬¿OˆNü ¡€CN[¬¿O[¬¿O[¬¿OˆIü ¡ . ..  lost+found  a_directoryÀ passwords.txt¨passwords.txt~.swp .ô.. . .. another_fileÔa_filee.Ä.another_file.swp¨ another_file~.swpxplace,user,password bank,joesmith,superrich alarm system,-,1234 treasure chest,-,1111 uber secret laire,admin,admin This is a text file. We should be able to parse it. This is another file. pytsk-20190507/test_data/tsk_volume_system.raw000066400000000000000000055000001346423473500214270ustar00rootroot00000000000000î¦õSƒ$^%-_á Uª&ƒ-à Uª ð?Å•  ÈŒ¹RÿÿSïÈŒ¹R €8å´Yú½1N…–>ª™¿ü2_­­‘$ˆA¦Ÿõ­‹_d£s ÈŒ¹R Å•ÿÿÿÿÿ€ÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÈŒ¹RÈŒ¹RÈŒ¹RíAÈŒ¹RÈŒ¹RÈŒ¹R€0ÈŒ¹RÈŒ¹RÈŒ¹R *ÀA0ÈŒ¹RÈŒ¹RÈŒ¹R !"#$%&'() . .. è lost+found .ô..pytsk-20190507/tests/000077500000000000000000000000001346423473500143125ustar00rootroot00000000000000pytsk-20190507/tests/__init__.py000066400000000000000000000011571346423473500164270ustar00rootroot00000000000000#!/usr/bin/python # # Copyright 2013, Joachim Metz . # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. pytsk-20190507/tests/fs_info.py000066400000000000000000000115061346423473500163120ustar00rootroot00000000000000#!/usr/bin/python # -*- coding: utf-8 -*- """Tests for FS_Info.""" import os import unittest import pytsk3 import test_lib # fls -l ./test_data/image.raw # d/d 11: lost+found 2012-05-25 17:55:50 (CEST) # 2012-05-25 17:55:50 (CEST) 2012-05-25 17:55:50 (CEST) # 0000-00-00 00:00:00 (UTC) 12288 0 0 # d/d 12: a_directory 2012-05-25 17:59:23 (CEST) # 2012-05-25 17:59:24 (CEST) 2012-05-25 17:59:23 (CEST) # 0000-00-00 00:00:00 (UTC) 1024 5000 151107 # r/r 15: passwords.txt 2012-05-25 18:00:53 (CEST) # 2012-05-25 18:00:53 (CEST) 2012-05-25 18:01:03 (CEST) # 0000-00-00 00:00:00 (UTC) 116 5000 151107 # r/- * 0: passwords.txt~ 0000-00-00 00:00:00 (UTC) # 0000-00-00 00:00:00 (UTC) 0000-00-00 00:00:00 (UTC) # 0000-00-00 00:00:00 (UTC) 0 0 0 # d/d 17: $OrphanFiles 0000-00-00 00:00:00 (UTC) # 0000-00-00 00:00:00 (UTC) 0000-00-00 00:00:00 (UTC) # 0000-00-00 00:00:00 (UTC) 0 0 0 class TSKFsInfoTestCase(unittest.TestCase): """FS_Info test case.""" def _testInitialize(self, fs_info): """Test the initialize functionality. Args: fs_info: the FS_Info object. """ self.assertNotEquals(fs_info, None) def _testOpenMeta(self, fs_info): """Test the open meta functionality. Args: fs_info: the FS_Info object. """ self.assertNotEquals(fs_info, None) file_object = fs_info.open_meta(15) self.assertNotEquals(file_object, None) with self.assertRaises(IOError): file_object = fs_info.open_meta(19) class TSKFsInfoTest(TSKFsInfoTestCase): """FS_Info for testing.""" def setUp(self): """Sets up the needed objects used throughout the test.""" test_file = os.path.join('test_data', 'image.raw') self._img_info = pytsk3.Img_Info(test_file) def testInitialize(self): """Test the initialize functionality.""" fs_info = pytsk3.FS_Info(self._img_info, offset=0) self._testInitialize(fs_info) def testOpenMeta(self): """Test the open meta functionality.""" fs_info = pytsk3.FS_Info(self._img_info, offset=0) self._testOpenMeta(fs_info) class TSKFsInfoBogusTest(TSKFsInfoTestCase): """FS_Info for testing that fails.""" def setUp(self): """Sets up the needed objects used throughout the test.""" test_file = os.path.join('test_data', 'bogus.raw') self._img_info = pytsk3.Img_Info(test_file) def testInitialize(self): """Test the initialize functionality.""" with self.assertRaises(IOError): pytsk3.FS_Info(self._img_info, offset=0) class TSKFsInfoFileObjectTest(TSKFsInfoTestCase): """Tests the FS_Info object using an Img_Info file-like object.""" def setUp(self): """Sets up the needed objects used throughout the test.""" test_file = os.path.join('test_data', 'image.raw') self._file_object = open(test_file, 'rb') stat_info = os.stat(test_file) self._file_size = stat_info.st_size self._img_info = test_lib.FileObjectImageInfo( self._file_object, self._file_size) def testInitialize(self): """Test the initialize functionality.""" fs_info = pytsk3.FS_Info(self._img_info, offset=0) self._testInitialize(fs_info) def testOpenMeta(self): """Test the open meta functionality.""" fs_info = pytsk3.FS_Info(self._img_info, offset=0) self._testOpenMeta(fs_info) class TSKFsInfoFileObjectWithDetectTest(TSKFsInfoTestCase): """Tests the FS_Info object with auto-detect Img_Info.""" def setUp(self): """Sets up the needed objects used throughout the test.""" test_file = os.path.join('test_data', 'image.raw') self._file_object = open(test_file, 'rb') stat_info = os.stat(test_file) self._file_size = stat_info.st_size self._img_info = test_lib.FileObjectImageInfo( self._file_object, self._file_size, image_type=pytsk3.TSK_IMG_TYPE_DETECT) def testInitialize(self): """Test the initialize functionality.""" fs_info = pytsk3.FS_Info(self._img_info, offset=0) self._testInitialize(fs_info) def testOpenMeta(self): """Test the open meta functionality.""" fs_info = pytsk3.FS_Info(self._img_info, offset=0) self._testOpenMeta(fs_info) class TSKFsInfoFileObjectWithLargeSize(TSKFsInfoTestCase): """Tests the FS_Info object with a large size Img_Info.""" def setUp(self): """Sets up the needed objects used throughout the test.""" test_file = os.path.join('test_data', 'image.raw') self._file_object = open(test_file, 'rb') self._file_size = 1024 * 1024 * 1024 * 1024 self._img_info = test_lib.FileObjectImageInfo( self._file_object, self._file_size) def testInitialize(self): """Test the initialize functionality.""" fs_info = pytsk3.FS_Info(self._img_info, offset=0) self._testInitialize(fs_info) def testOpenMeta(self): """Test the open meta functionality.""" fs_info = pytsk3.FS_Info(self._img_info, offset=0) self._testOpenMeta(fs_info) if __name__ == '__main__': unittest.main() pytsk-20190507/tests/img_info.py000066400000000000000000000131341346423473500164550ustar00rootroot00000000000000#!/usr/bin/python # -*- coding: utf-8 -*- """Tests for Img_Info.""" import os import unittest import pytsk3 import test_lib class TSKImgInfoTestCase(unittest.TestCase): """Img_Info test case.""" def _testInitialize(self, img_info): """Test the initialize functionality. Args: img_info: the Img_Info object. """ self.assertNotEquals(img_info, None) def _testGetSize(self, img_info): """Test the get size functionality. Args: img_info: the Img_Info object. """ self.assertNotEquals(img_info, None) self.assertEquals(img_info.get_size(), self._file_size) def _testRead(self, img_info): """Test the read functionality. Args: img_info: the Img_Info object. """ self.assertNotEquals(img_info, None) self.assertEquals(img_info.read(0x5800, 16), b'place,user,passw') self.assertEquals(img_info.read(0x7c00, 16), b'This is another ') # Conforming to the POSIX seek the offset can exceed the file size # but reading will result in no data being returned. self.assertEquals(img_info.read(0x19000, 16), b'') with self.assertRaises(IOError): img_info.read(-1, 16) class TSKImgInfoTest(TSKImgInfoTestCase): """The unit test for the Img_Info object.""" def setUp(self): """Sets up the needed objects used throughout the test.""" self._test_file = os.path.join('test_data', 'image.raw') self._file_size = 102400 def testInitialize(self): """Test the initialize functionality.""" img_info = pytsk3.Img_Info(url=self._test_file) self._testInitialize(img_info) img_info.close() def testGetSize(self): """Test the get size functionality.""" img_info = pytsk3.Img_Info(url=self._test_file) self._testGetSize(img_info) img_info.close() def testRead(self): """Test the read functionality.""" img_info = pytsk3.Img_Info(url=self._test_file) self.assertNotEquals(img_info, None) self.assertEquals(img_info.read(0x5800, 16), b'place,user,passw') self.assertEquals(img_info.read(0x7c00, 16), b'This is another ') # Conforming to the POSIX seek the offset can exceed the file size # but reading will result in no data being returned. Note that the SleuthKit # does not conform to the posix standard and will raise and IO error. with self.assertRaises(IOError): img_info.read(0x19000, 16) with self.assertRaises(IOError): img_info.read(-1, 16) img_info.close() class TSKImgInfoFileObjectTest(TSKImgInfoTestCase): """The unit test for the Img_Info object using a file-like object.""" def setUp(self): """Sets up the needed objects used throughout the test.""" test_file = os.path.join('test_data', 'image.raw') self._file_object = open(test_file, 'rb') stat_info = os.stat(test_file) self._file_size = stat_info.st_size def testInitialize(self): """Test the initialize functionality.""" img_info = test_lib.FileObjectImageInfo(self._file_object, self._file_size) self._testInitialize(img_info) img_info.close() def testGetSize(self): """Test the get size functionality.""" img_info = test_lib.FileObjectImageInfo(self._file_object, self._file_size) self._testGetSize(img_info) img_info.close() def testRead(self): """Test the read functionality.""" img_info = test_lib.FileObjectImageInfo(self._file_object, self._file_size) self._testRead(img_info) img_info.close() class TSKImgInfoFileObjectWithDetectTest(TSKImgInfoTestCase): """The unit test for the Img_Info object using a file-like object with image type: pytsk3.TSK_IMG_TYPE_DETECT.""" def setUp(self): """Sets up the needed objects used throughout the test.""" test_file = os.path.join('test_data', 'image.raw') self._file_object = open(test_file, 'rb') stat_info = os.stat(test_file) self._file_size = stat_info.st_size def testInitialize(self): """Test the initialize functionality.""" img_info = test_lib.FileObjectImageInfo( self._file_object, self._file_size, image_type=pytsk3.TSK_IMG_TYPE_DETECT) self._testInitialize(img_info) img_info.close() def testGetSize(self): """Test the get size functionality.""" img_info = test_lib.FileObjectImageInfo( self._file_object, self._file_size, image_type=pytsk3.TSK_IMG_TYPE_DETECT) self._testGetSize(img_info) img_info.close() def testRead(self): """Test the read functionality.""" img_info = test_lib.FileObjectImageInfo( self._file_object, self._file_size, image_type=pytsk3.TSK_IMG_TYPE_DETECT) self._testRead(img_info) img_info.close() class TSKImgInfoFileObjectLargeSizeTest(TSKImgInfoTestCase): """The unit test for the Img_Info object using a file-like object with a large size.""" def setUp(self): """Sets up the needed objects used throughout the test.""" test_file = os.path.join('test_data', 'image.raw') self._file_object = open(test_file, 'rb') self._file_size = 1024 * 1024 * 1024 * 1024 def testInitialize(self): """Test the initialize functionality.""" img_info = test_lib.FileObjectImageInfo(self._file_object, self._file_size) self._testInitialize(img_info) img_info.close() def testGetSize(self): """Test the get size functionality.""" img_info = test_lib.FileObjectImageInfo(self._file_object, self._file_size) self._testGetSize(img_info) img_info.close() def testRead(self): """Test the read functionality.""" img_info = test_lib.FileObjectImageInfo(self._file_object, self._file_size) self._testRead(img_info) img_info.close() if __name__ == '__main__': unittest.main() pytsk-20190507/tests/test_lib.py000066400000000000000000000031511346423473500164710ustar00rootroot00000000000000"""Shared test case.""" import os import pytsk3 class FileObjectImageInfo(pytsk3.Img_Info): """Img_Info that uses a file-like object.""" def __init__( self, file_object, file_size, image_type=pytsk3.TSK_IMG_TYPE_RAW): """Initializes the image object. Args: file_object: the file-like object (instance of io.FileIO). file_size: the file size. image_type: optional SleuthKit image type. The default is RAW (pytsk3.TSK_IMG_TYPE_RAW). Raises: ValueError: if the file-like object is invalid. """ if not file_object: raise ValueError(u'Missing file-like object.') # pytsk3.Img_Info does not let you set attributes after initialization. self._file_object = file_object self._file_size = file_size # Using the old parent class invocation style otherwise some versions # of pylint complain also setting type to RAW to make sure Img_Info # does not do detection. pytsk3.Img_Info.__init__(self, url='', type=image_type) # Note: that the following functions are part of the pytsk3.Img_Info object # interface. def close(self): """Closes the volume IO object.""" self._file_object = None def read(self, offset, size): """Reads a byte string from the image object at the specified offset. Args: offset: offset where to start reading. size: number of bytes to read. Returns: A byte string containing the data read. """ self._file_object.seek(offset, os.SEEK_SET) return self._file_object.read(size) def get_size(self): """Retrieves the size.""" return self._file_size pytsk-20190507/tests/volume_info.py000066400000000000000000000160421346423473500172110ustar00rootroot00000000000000#!/usr/bin/python # -*- coding: utf-8 -*- """Tests for Volume_Info.""" import os import unittest import pytsk3 import test_lib # mmls ../test_data/tsk_volume_system.raw # DOS Partition Table # Offset Sector: 0 # Units are in 512-byte sectors # # Slot Start End Length Description # 00: Meta 0000000000 0000000000 0000000001 Primary Table (#0) # 01: ----- 0000000000 0000000000 0000000001 Unallocated # 02: 00:00 0000000001 0000000350 0000000350 Linux (0x83) # 03: Meta 0000000351 0000002879 0000002529 DOS Extended (0x05) # 04: Meta 0000000351 0000000351 0000000001 Extended Table (#1) # 05: ----- 0000000351 0000000351 0000000001 Unallocated # 06: 01:00 0000000352 0000002879 0000002528 Linux (0x83) class TSKVolumeInfoTestCase(unittest.TestCase): """Volume_Info test case.""" maxDiff = None def _testInitialize(self, volume_info): """Test the initialize functionality. Args: volume_info: the Volume_Info object. """ self.assertNotEquals(volume_info, None) def _testIterate(self, volume_info): """Test the iterate functionality. Args: volume_info: the Volume_Info object. """ self.assertNotEquals(volume_info, None) self.assertNotEquals(getattr(volume_info, 'info', None), None) self.assertEquals(str(volume_info.info.vstype), 'TSK_VS_TYPE_DOS') parts = [] for part in volume_info: part_string = ( u'{0:02d}: {1:010d} {2:010d} {3:010d} {4:s}\n').format( part.addr, part.start, part.start + part.len - 1, part.len, part.desc.decode('utf-8')) parts.append(part_string) self.assertEquals(len(parts), 7) expected_parts_string = ( u'00: 0000000000 0000000000 0000000001 Primary Table (#0)\n' u'01: 0000000000 0000000000 0000000001 Unallocated\n' u'02: 0000000001 0000000350 0000000350 Linux (0x83)\n' u'03: 0000000351 0000002879 0000002529 DOS Extended (0x05)\n' u'04: 0000000351 0000000351 0000000001 Extended Table (#1)\n' u'05: 0000000351 0000000351 0000000001 Unallocated\n' u'06: 0000000352 0000002879 0000002528 Linux (0x83)\n') self.assertEquals(u''.join(parts), expected_parts_string) class TSKVolumeInfoTest(TSKVolumeInfoTestCase): """Volume_Info for testing.""" def setUp(self): """Sets up the needed objects used throughout the test.""" test_file = os.path.join('test_data', 'tsk_volume_system.raw') self._img_info = pytsk3.Img_Info(test_file) def testInitialize(self): """Test the initialize functionality.""" volume_info = pytsk3.Volume_Info(self._img_info) self._testInitialize(volume_info) def testIterate(self): """Test the iterate functionality.""" volume_info = pytsk3.Volume_Info(self._img_info) self._testIterate(volume_info) class TSKVolumeInfoBogusTest(TSKVolumeInfoTestCase): """Volume_Info for testing that fails.""" def setUp(self): """Sets up the needed objects used throughout the test.""" test_file = os.path.join('test_data', 'bogus.raw') self._img_info = pytsk3.Img_Info(test_file) def testInitialize(self): """Test the initialize functionality.""" with self.assertRaises(IOError): pytsk3.Volume_Info(self._img_info) class TSKVolumeInfoFileObjectTest(TSKVolumeInfoTestCase): """Tests the Volume_Info object using an Img_Info file-like object.""" def setUp(self): """Sets up the needed objects used throughout the test.""" test_file = os.path.join('test_data', 'tsk_volume_system.raw') self._file_object = open(test_file, 'rb') stat_info = os.stat(test_file) self._file_size = stat_info.st_size self._img_info = test_lib.FileObjectImageInfo( self._file_object, self._file_size) def testInitialize(self): """Test the initialize functionality.""" volume_info = pytsk3.Volume_Info(self._img_info) self._testInitialize(volume_info) def testIterate(self): """Test the iterate functionality.""" volume_info = pytsk3.Volume_Info(self._img_info) self._testIterate(volume_info) class TSKVolumeInfoFileObjectWithDetectTest(TSKVolumeInfoTestCase): """Tests the Volume_Info object with auto-detect Img_Info.""" def setUp(self): """Sets up the needed objects used throughout the test.""" test_file = os.path.join('test_data', 'tsk_volume_system.raw') self._file_object = open(test_file, 'rb') stat_info = os.stat(test_file) self._file_size = stat_info.st_size self._img_info = test_lib.FileObjectImageInfo( self._file_object, self._file_size, image_type=pytsk3.TSK_IMG_TYPE_DETECT) def testInitialize(self): """Test the initialize functionality.""" volume_info = pytsk3.Volume_Info(self._img_info) self._testInitialize(volume_info) def testIterate(self): """Test the iterate functionality.""" volume_info = pytsk3.Volume_Info(self._img_info) self._testIterate(volume_info) class TSKVolumeInfoFileObjectWithLargeSize(TSKVolumeInfoTestCase): """Tests the Volume_Info object with a large size Img_Info.""" def setUp(self): """Sets up the needed objects used throughout the test.""" test_file = os.path.join('test_data', 'tsk_volume_system.raw') self._file_object = open(test_file, 'rb') self._file_size = 1024 * 1024 * 1024 * 1024 self._img_info = test_lib.FileObjectImageInfo( self._file_object, self._file_size) def testInitialize(self): """Test the initialize functionality.""" volume_info = pytsk3.Volume_Info(self._img_info) self._testInitialize(volume_info) def testIterate(self): """Test the iterate functionality.""" volume_info = pytsk3.Volume_Info(self._img_info) self.assertNotEquals(volume_info, None) self.assertNotEquals(getattr(volume_info, 'info', None), None) self.assertEquals(str(volume_info.info.vstype), 'TSK_VS_TYPE_DOS') parts = [] for part in volume_info: part_string = ( u'{0:02d}: {1:010d} {2:010d} {3:010d} {4:s}\n').format( part.addr, part.start, part.start + part.len - 1, part.len, part.desc.decode('utf-8')) parts.append(part_string) # Note that due to the size the SleuthKit will add a non-existing part: # 07: 0000002880 2147483647 2147480768 Unallocated self.assertEquals(len(parts), 8) expected_parts_string = ( u'00: 0000000000 0000000000 0000000001 Primary Table (#0)\n' u'01: 0000000000 0000000000 0000000001 Unallocated\n' u'02: 0000000001 0000000350 0000000350 Linux (0x83)\n' u'03: 0000000351 0000002879 0000002529 DOS Extended (0x05)\n' u'04: 0000000351 0000000351 0000000001 Extended Table (#1)\n' u'05: 0000000351 0000000351 0000000001 Unallocated\n' u'06: 0000000352 0000002879 0000002528 Linux (0x83)\n' u'07: 0000002880 2147483647 2147480768 Unallocated\n') self.assertEquals(u''.join(parts), expected_parts_string) if __name__ == '__main__': unittest.main() pytsk-20190507/tox.ini000066400000000000000000000001701346423473500144610ustar00rootroot00000000000000[tox] envlist = py27, py34 [testenv] pip_pre = True deps = pip >= 7.0.0 pytest commands = python run_tests.py pytsk-20190507/tsk3.c000066400000000000000000000455151346423473500142120ustar00rootroot00000000000000/* SleuthKit functions. * * Copyright 2010, Michael Cohen . * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "tsk3.h" #if defined( TSK_MULTITHREAD_LIB ) extern void tsk_init_lock(tsk_lock_t * lock); extern void tsk_deinit_lock(tsk_lock_t * lock); #endif /* Prototypes for IMG_INFO hooks * Note that IMG_INFO_read is called by the SleuthKit the Img_Info_read * is its equivalent called by the pytsk3 when no proxy object is defined. */ ssize_t IMG_INFO_read(TSK_IMG_INFO *self, TSK_OFF_T off, char *buf, size_t len); void IMG_INFO_close(TSK_IMG_INFO *self); /* This macro is used to receive the object reference from a member of the type. */ #define GET_Object_from_member(type, object, member) \ (type)(((char *)object) - (unsigned long)(&((type)0)->member)) /* Img_Info destructor */ static int Img_Info_dest(Img_Info self) { if(self == NULL) { return -1; } tsk_img_close((TSK_IMG_INFO *) self->img); if(self->img_is_internal != 0) { #if defined( TSK_MULTITHREAD_LIB ) tsk_deinit_lock(&(self->img->base.cache_lock)); #endif // If img is internal talloc will free it. } self->img = NULL; return 0; } /* Img_Info constructor */ static Img_Info Img_Info_Con(Img_Info self, char *urn, TSK_IMG_TYPE_ENUM type) { if(self == NULL) { RaiseError(EInvalidParameter, "Invalid parameter: self."); return NULL; } if(urn != NULL && urn[0] != 0) { #ifdef TSK_VERSION_NUM self->img = (Extended_TSK_IMG_INFO *) tsk_img_open_utf8(1, (const char **) &urn, type, 0); #else self->img = (Extended_TSK_IMG_INFO *) tsk_img_open_utf8(1, (const char **) &urn, type); #endif self->img_is_internal = 0; } else { // Initialise the img struct with the correct callbacks: self->img = talloc_zero(self, Extended_TSK_IMG_INFO); self->img_is_internal = 1; self->img->container = self; #if defined( TSK_MULTITHREAD_LIB ) tsk_init_lock(&(self->img->base.cache_lock)); #endif self->img->base.read = IMG_INFO_read; self->img->base.close = IMG_INFO_close; self->img->base.size = CALL(self, get_size); #ifdef TSK_VERSION_NUM self->img->base.sector_size = 512; #endif #if defined( TSK_VERSION_NUM ) && ( TSK_VERSION_NUM >= 0x040103ff ) self->img->base.itype = TSK_IMG_TYPE_EXTERNAL; #else self->img->base.itype = TSK_IMG_TYPE_RAW_SING; #endif } if(self->img == NULL) { RaiseError(EIOError, "Unable to open image: %s", tsk_error_get()); tsk_error_reset(); return NULL; } self->img_is_open = 1; talloc_set_destructor((void *) self, (int(*)(void *)) &Img_Info_dest); return self; } uint64_t Img_Info_read(Img_Info self, TSK_OFF_T off, OUT char *buf, size_t len) { ssize_t read_count = 0; if(self == NULL) { RaiseError(EInvalidParameter, "Invalid parameter: self."); return 0; } if(self->img_is_open == 0) { RaiseError(EIOError, "Invalid Img_Info not opened."); return 0; } if(off < 0) { RaiseError(EIOError, "Invalid offset value out of bounds."); return 0; } if(buf == NULL) { RaiseError(EInvalidParameter, "Invalid parameter: buf."); return 0; } read_count = CALL((TSK_IMG_INFO *) self->img, read, off, buf, len); if(read_count < 0) { RaiseError(EIOError, "Unable to read image: %s", tsk_error_get()); tsk_error_reset(); return 0; } return read_count; } void Img_Info_close(Img_Info self) { if(self != NULL) { self->img_is_open = 0; } } uint64_t Img_Info_get_size(Img_Info self) { if(self == NULL) { RaiseError(EInvalidParameter, "Invalid parameter: self."); return 0; } if(self->img != NULL) { return ((TSK_IMG_INFO *) self->img)->size; } return (uint64_t) -1; } VIRTUAL(Img_Info, Object) { VMETHOD(Con) = Img_Info_Con; VMETHOD(read) = Img_Info_read; VMETHOD(close) = Img_Info_close; VMETHOD(get_size) = Img_Info_get_size; } END_VIRTUAL void IMG_INFO_close(TSK_IMG_INFO *img) { Extended_TSK_IMG_INFO *self = (Extended_TSK_IMG_INFO *) img; CALL(self->container, close); }; ssize_t IMG_INFO_read(TSK_IMG_INFO *img, TSK_OFF_T off, char *buf, size_t len) { Extended_TSK_IMG_INFO *self = (Extended_TSK_IMG_INFO *) img; if(len == 0) { return 0; } return (ssize_t) CALL(self->container, read, (uint64_t) off, buf, len); } /* FS_Info destructor */ int FS_Info_dest(FS_Info self) { if(self == NULL) { return -1; } tsk_fs_close(self->info); self->info = NULL; self->extended_img_info = NULL; return 0; } /* FS_Info constructor */ static FS_Info FS_Info_Con(FS_Info self, Img_Info img, TSK_OFF_T offset, TSK_FS_TYPE_ENUM type) { if(self == NULL) { RaiseError(EInvalidParameter, "Invalid parameter: self."); return NULL; } if(img == NULL) { RaiseError(EInvalidParameter, "Invalid parameter: img."); return NULL; } self->extended_img_info = img->img; self->info = tsk_fs_open_img((TSK_IMG_INFO *) self->extended_img_info, offset, type); if(!self->info) { RaiseError(EIOError, "Unable to open the image as a filesystem at offset: 0x%08" PRIxOFF " with error: %s", offset, tsk_error_get()); tsk_error_reset(); return NULL; } // Make sure that the filesystem is properly closed when we get freed talloc_set_destructor((void *) self, (int(*)(void *)) &FS_Info_dest); return self; } static Directory FS_Info_open_dir(FS_Info self, ZString path, TSK_INUM_T inode) { Directory object = NULL; if(self == NULL) { RaiseError(EInvalidParameter, "Invalid parameter: self."); return NULL; } // CONSTRUCT_CREATE calls _talloc_memdup to allocate memory for the object. object = CONSTRUCT_CREATE(Directory, Directory, NULL); if(object != NULL) { // CONSTRUCT_INITIALIZE calls the constructor function on the object. if(CONSTRUCT_INITIALIZE(Directory, Directory, Con, object, self, path, inode) == NULL) { goto on_error; } } return object; on_error: if(object != NULL) { talloc_free(object); } return NULL; }; static File FS_Info_open(FS_Info self, ZString path) { TSK_FS_FILE *info = NULL; File object = NULL; if(self == NULL) { RaiseError(EInvalidParameter, "Invalid parameter: self."); return NULL; } info = tsk_fs_file_open(self->info, NULL, path); if(info == NULL) { RaiseError(EIOError, "Unable to open file: %s", tsk_error_get()); tsk_error_reset(); goto on_error; } // CONSTRUCT_CREATE calls _talloc_memdup to allocate memory for the object. object = CONSTRUCT_CREATE(File, File, NULL); if(object != NULL) { // CONSTRUCT_INITIALIZE calls the constructor function on the object. if(CONSTRUCT_INITIALIZE(File, File, Con, object, self, info) == NULL) { goto on_error; } // Tell the File object to manage info. object->info_is_internal = 1; } return object; on_error: if(object != NULL) { talloc_free(object); } if(info != NULL) { tsk_fs_file_close(info); } return NULL; }; static File FS_Info_open_meta(FS_Info self, TSK_INUM_T inode) { TSK_FS_FILE *info = NULL; File object = NULL; if(self == NULL) { RaiseError(EInvalidParameter, "Invalid parameter: self."); return NULL; } info = tsk_fs_file_open_meta(self->info, NULL, inode); if(info == NULL) { RaiseError(EIOError, "Unable to open file: %s", tsk_error_get()); tsk_error_reset(); goto on_error; } // CONSTRUCT_CREATE calls _talloc_memdup to allocate memory for the object. object = CONSTRUCT_CREATE(File, File, NULL); if(object != NULL) { // CONSTRUCT_INITIALIZE calls the constructor function on the object. if(CONSTRUCT_INITIALIZE(File, File, Con, object, self, info) == NULL) { goto on_error; } // Tell the File object to manage info. object->info_is_internal = 1; } return object; on_error: if(object != NULL) { talloc_free(object); } if(info != NULL) { tsk_fs_file_close(info); } return NULL; } static void FS_Info_exit(FS_Info self PYTSK3_ATTRIBUTE_UNUSED) { PYTSK3_UNREFERENCED_PARAMETER(self) exit(0); }; VIRTUAL(FS_Info, Object) { VMETHOD(Con) = FS_Info_Con; VMETHOD(open_dir) = FS_Info_open_dir; VMETHOD(open) = FS_Info_open; VMETHOD(open_meta) = FS_Info_open_meta; VMETHOD(exit) = FS_Info_exit; } END_VIRTUAL /* Directory destructor */ static int Directory_dest(Directory self) { if(self == NULL) { return -1; } tsk_fs_dir_close(self->info); self->info = NULL; return 0; } /* Directory constructor */ static Directory Directory_Con(Directory self, FS_Info fs, ZString path, TSK_INUM_T inode) { if(self == NULL) { RaiseError(EInvalidParameter, "Invalid parameter: self."); return NULL; } if(fs == NULL) { RaiseError(EInvalidParameter, "Invalid parameter: fs."); return NULL; } if(path == NULL) { self->info = tsk_fs_dir_open_meta(fs->info, inode); } else { self->info = tsk_fs_dir_open(fs->info, path); } if(self->info == NULL) { RaiseError(EIOError, "Unable to open directory: %s", tsk_error_get()); tsk_error_reset(); return NULL; } self->current = 0; self->size = tsk_fs_dir_getsize(self->info); self->fs = fs; // TODO: is this still applicable? // Add a reference to them to ensure they dont get freed until we do. // talloc_reference(self, fs); talloc_set_destructor((void *) self, (int(*)(void *)) &Directory_dest); return self; } static File Directory_next(Directory self) { TSK_FS_FILE *info = NULL; File object = NULL; if(self == NULL) { RaiseError(EInvalidParameter, "Invalid parameter: self."); return NULL; } if((self->current < 0) || ((uint64_t) self->current > (uint64_t) self->size)) { RaiseError(EInvalidParameter, "Invalid parameter: current."); return NULL; } if((uint64_t) self->current == (uint64_t) self->size) { return NULL; } info = tsk_fs_dir_get(self->info, self->current); if(info == NULL) { RaiseError(EIOError, "Error opening File: %s", tsk_error_get()); tsk_error_reset(); goto on_error; } // CONSTRUCT_CREATE calls _talloc_memdup to allocate memory for the object. object = CONSTRUCT_CREATE(File, File, NULL); if(object != NULL) { // CONSTRUCT_INITIALIZE calls the constructor function on the object. if(CONSTRUCT_INITIALIZE(File, File, Con, object, self->fs, info) == NULL) { goto on_error; } // Tell the File object to manage info. object->info_is_internal = 1; } self->current++; return object; on_error: if(object != NULL) { talloc_free(object); } if(info != NULL) { tsk_fs_file_close(info); } return NULL; }; static void Directory_iter(Directory self) { self->current = 0; }; VIRTUAL(Directory, Object) { VMETHOD(Con) = Directory_Con; VMETHOD(iternext) = Directory_next; VMETHOD(__iter__) = Directory_iter; } END_VIRTUAL /* File destructor */ static int File_dest(File self) { if(self == NULL) { return -1; } if(self->info_is_internal != 0) { // Here internal refers to the File object managing info // not that info was allocated by talloc. tsk_fs_file_close(self->info); } self->info = NULL; return 0; } /* File constructor */ static File File_Con(File self, FS_Info fs, TSK_FS_FILE *info) { if(self == NULL) { RaiseError(EInvalidParameter, "Invalid parameter: self."); return NULL; } if(fs == NULL) { RaiseError(EInvalidParameter, "Invalid parameter: fs."); return NULL; } if(info == NULL) { RaiseError(EInvalidParameter, "Invalid parameter: info."); return NULL; } self->fs = fs; self->info = info; // Get the total number of attributes. self->max_attr = tsk_fs_file_attr_getsize(info); talloc_set_destructor((void *) self, (int(*)(void *)) &File_dest); return self; }; static uint64_t File_read_random(File self, TSK_OFF_T offset, OUT char *buff, int len, TSK_FS_ATTR_TYPE_ENUM type, int id, TSK_FS_FILE_READ_FLAG_ENUM flags) { ssize_t result; if((id < -1) || (id > 0xffff)) { RaiseError(EInvalidParameter, "id parameter is invalid."); return 0; }; if(id == -1) { result = tsk_fs_file_read(self->info, offset, buff, len, flags); } else { result = tsk_fs_file_read_type(self->info, type, (uint16_t) id, offset, buff, len, flags); }; if(result < 0) { RaiseError(EIOError, "Read error: %s", tsk_error_get()); tsk_error_reset(); return 0; }; return result; }; static Directory File_as_directory(File self) { Directory object = NULL; if(self == NULL) { RaiseError(EInvalidParameter, "Invalid parameter: self."); return NULL; } if(self->info == NULL) { RaiseError(EInvalidParameter, "Invalid parameter: self->info."); return NULL; } #if defined( TSK_VERSION_NUM ) && ( TSK_VERSION_NUM >= 0x040402ff ) if(self->info->meta == NULL || !(TSK_FS_IS_DIR_META(self->info->meta->type))) { #else if(self->info->meta == NULL || self->info->meta->type != TSK_FS_META_TYPE_DIR) { #endif RaiseError(EIOError, "Not a directory"); return NULL; } // CONSTRUCT_CREATE calls _talloc_memdup to allocate memory for the object. object = CONSTRUCT_CREATE(Directory, Directory, NULL); if(object != NULL) { // CONSTRUCT_INITIALIZE calls the constructor function on the object. if(CONSTRUCT_INITIALIZE(Directory, Directory, Con, object, self->fs, NULL, self->info->meta->addr) == NULL) { goto on_error; } } return object; on_error: if(object != NULL) { talloc_free(object); } return NULL; }; static Attribute File_iternext(File self) { TSK_FS_ATTR *attribute = NULL; Attribute object = NULL; if(self == NULL) { RaiseError(EInvalidParameter, "Invalid parameter: self."); return NULL; } if(self->current_attr < 0 || self->current_attr > self->max_attr) { RaiseError(EInvalidParameter, "Invalid parameter: self->current_attr."); return NULL; } if(self->current_attr == self->max_attr) { return NULL; } // It looks like attribute is managed by the SleuthKit. attribute = (TSK_FS_ATTR *) tsk_fs_file_attr_get_idx(self->info, self->current_attr); if(!attribute) { RaiseError(EIOError, "Error opening File: %s", tsk_error_get()); tsk_error_reset(); return NULL; } // CONSTRUCT_CREATE calls _talloc_memdup to allocate memory for the object. object = CONSTRUCT_CREATE(Attribute, Attribute, NULL); if(object != NULL) { // CONSTRUCT_INITIALIZE calls the constructor function on the object. if(CONSTRUCT_INITIALIZE(Attribute, Attribute, Con, object, attribute) == NULL) { goto on_error; } } self->current_attr++; return object; on_error: if(object != NULL) { talloc_free(object); } return NULL; }; static void File_iter__(File self) { self->current_attr = 0; }; VIRTUAL(File, Object) { VMETHOD(Con) = File_Con; VMETHOD(read_random) = File_read_random; VMETHOD(as_directory) = File_as_directory; VMETHOD(iternext) = File_iternext; VMETHOD(__iter__) = File_iter__; } END_VIRTUAL /* Attribute constructor */ static Attribute Attribute_Con(Attribute self, TSK_FS_ATTR *info) { if(self == NULL) { RaiseError(EInvalidParameter, "Invalid parameter: self."); return NULL; } if(info == NULL) { RaiseError(EInvalidParameter, "Invalid parameter: info."); return NULL; } self->info = info; return self; } static void Attribute_iter(Attribute self) { self->current = self->info->nrd.run; }; static TSK_FS_ATTR_RUN *Attribute_iternext(Attribute self) { TSK_FS_ATTR_RUN *result = NULL; if(self->current == NULL) { return NULL; } result = self->current; self->current = self->current->next; if(self->current == self->info->nrd.run) { self->current = NULL; } return talloc_memdup(NULL, result, sizeof(*result)); } VIRTUAL(Attribute, Object) { VMETHOD(Con) = Attribute_Con; VMETHOD(iternext) = Attribute_iternext; VMETHOD(__iter__) = Attribute_iter; } END_VIRTUAL /* The following implement the volume system. */ /* Volume_Info destructor */ static int Volume_Info_dest(Volume_Info self) { if(self == NULL) { return -1; } tsk_vs_close(self->info); self->info = NULL; return 0; } /* Volume_Info constructor */ static Volume_Info Volume_Info_Con(Volume_Info self, Img_Info img, TSK_VS_TYPE_ENUM type, TSK_OFF_T offset) { if(self == NULL) { RaiseError(EInvalidParameter, "Invalid parameter: self."); return NULL; } if(img == NULL) { RaiseError(EInvalidParameter, "Invalid parameter: img."); return NULL; } self->info = tsk_vs_open((TSK_IMG_INFO *) img->img, offset, type); if(self->info == NULL) { RaiseError(EIOError, "Error opening Volume_Info: %s", tsk_error_get()); tsk_error_reset(); return NULL; } talloc_set_destructor((void *) self, (int(*)(void *)) &Volume_Info_dest); return self; } static void Volume_Info_iter(Volume_Info self) { self->current = 0; }; static TSK_VS_PART_INFO *Volume_Info_iternext(Volume_Info self) { return (TSK_VS_PART_INFO *)tsk_vs_part_get(self->info, self->current++); }; VIRTUAL(Volume_Info, Object) { VMETHOD(Con) = Volume_Info_Con; VMETHOD(__iter__) = Volume_Info_iter; VMETHOD(iternext) = Volume_Info_iternext; } END_VIRTUAL void tsk_init() { //tsk_verbose++; Img_Info_init((Object)&__Img_Info); FS_Info_init((Object)&__FS_Info); Directory_init((Object)&__Directory); File_init((Object)&__File); Attribute_init((Object)&__Attribute); Volume_Info_init((Object)&__Volume_Info); }; pytsk-20190507/tsk3.h000066400000000000000000000152101346423473500142040ustar00rootroot00000000000000/* SleuthKit functions. * * Copyright 2010, Michael Cohen . * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #if !defined( TSK3_H_ ) #define TSK3_H_ #if defined( HAVE_TSK3_LIBTSK_H ) #include #elif defined( HAVE_TSK_LIBTSK_H ) #include #else #error Missing libtsk header #endif #include "class.h" #include "aff4_errors.h" typedef struct { TSK_IMG_INFO base; struct Img_Info_t *container; } Extended_TSK_IMG_INFO; BIND_STRUCT(Extended_TSK_IMG_INFO); /** Bind the following structs */ BIND_STRUCT(TSK_FS_INFO); BIND_STRUCT(TSK_FS_NAME); BIND_STRUCT(TSK_FS_META); BIND_STRUCT(TSK_FS_DIR); BIND_STRUCT(TSK_FS_FILE); BIND_STRUCT(TSK_FS_BLOCK); BIND_STRUCT(TSK_FS_ATTR); BIND_STRUCT(TSK_FS_ATTR_RUN); BIND_STRUCT(TSK_VS_PART_INFO); BIND_STRUCT(TSK_VS_INFO); /** This is a normal IMG_INFO which takes a filename and passes it to TSK. It just uses the standard TSK image handling code to support EWF, AFF etc. This is usually the first object you would instantiate in order to use the TSK library: img = Img_Info(filename) you would then pass it to an FS_Info object: fs = FS_Info(img) Then open an inode or path f = fs.open_dir(inode = 2) */ CLASS(Img_Info, Object) PRIVATE Extended_TSK_IMG_INFO *img; /* Value to indicate if img is managed internally */ PRIVATE int img_is_internal; /* Value to indicate if img is open */ PRIVATE int img_is_open; /* Open an image using the Sleuthkit. * * DEFAULT(type) = TSK_IMG_TYPE_DETECT; * DEFAULT(url) = ""; */ Img_Info METHOD(Img_Info, Con, ZString url, TSK_IMG_TYPE_ENUM type); /* Read a random buffer from the image */ uint64_t METHOD(Img_Info, read, TSK_OFF_T off, OUT char *buf, size_t len); /* Retrieve the size of the image */ uint64_t METHOD(Img_Info, get_size); /* Closes the image */ void METHOD(Img_Info, close); END_CLASS /** This object handles volumes. */ CLASS(Volume_Info, Object) FOREIGN TSK_VS_INFO *info; int current; /** Open a volume using the Sleuthkit. DEFAULT(offset) = 0; DEFAULT(type) = TSK_VS_TYPE_DETECT; */ Volume_Info METHOD(Volume_Info, Con, Img_Info img, TSK_VS_TYPE_ENUM type, TSK_OFF_T offset); void METHOD(Volume_Info, __iter__); TSK_VS_PART_INFO *METHOD(Volume_Info, iternext); END_CLASS // Forward declerations struct FS_Info_t; struct Directory_t; /** An attribute is associated with a file. In some filesystem (e.g. NTFS) a file may contain many attributes. Attributes can be iterated over to obtain the attribute runs (e.g. to recover block allocation information). */ CLASS(Attribute, Object) FOREIGN TSK_FS_ATTR *info; FOREIGN TSK_FS_ATTR_RUN *current; Attribute METHOD(Attribute, Con, TSK_FS_ATTR *info); void METHOD(Attribute, __iter__); TSK_FS_ATTR_RUN *METHOD(Attribute, iternext); END_CLASS /** This represents a file object. A file has both metadata and data streams. Its usually not useful to instantiate this class by itself - you need to call FS_Info.open() or iterate over a Directory() object. This object may be used to read the content of the file using read_random(). Iterating over this object will return all the attributes for this file. */ CLASS(File, Object) FOREIGN TSK_FS_FILE *info; /* Value to indicate if info is managed internally */ PRIVATE int info_is_internal; PRIVATE struct FS_Info_t *fs; int max_attr; int current_attr; File METHOD(File, Con, struct FS_Info_t *fs, TSK_FS_FILE *info); /** Read a buffer from a random location in the file. DEFAULT(flags) = 0; DEFAULT(type) = TSK_FS_ATTR_TYPE_DEFAULT; DEFAULT(id) = -1; */ uint64_t METHOD(File, read_random, TSK_OFF_T offset, OUT char *buff, int len, TSK_FS_ATTR_TYPE_ENUM type, int id, TSK_FS_FILE_READ_FLAG_ENUM flags); /* Obtain a directory object that represents this inode. This may be useful if the file is actually a directory and we want to iterate over its contents. */ struct Directory_t *METHOD(File, as_directory); void METHOD(File, __iter__); Attribute METHOD(File, iternext); END_CLASS /** This represents a Directory within the filesystem. You can iterate over this object to obtain all the File objects contained within this directory: for f in d: print f.info.name.name */ CLASS(Directory, Object) TSK_FS_DIR *info; PRIVATE struct FS_Info_t *fs; /* Total number of files in this directory */ size_t size; /* Current file returned in the next iteration */ int current; /* We can open the directory using a path, its inode number. DEFAULT(path) = NULL; DEFAULT(inode) = 0; */ Directory METHOD(Directory, Con, struct FS_Info_t *fs, \ ZString path, TSK_INUM_T inode); /** An iterator of all files in the present directory. */ void METHOD(Directory, __iter__); File METHOD(Directory, iternext); END_CLASS /** This is used to obtain a filesystem object from an Img_Info object. From this FS_Info we can open files or directories by inode, or path. */ CLASS(FS_Info, Object) FOREIGN TSK_FS_INFO *info; PRIVATE Extended_TSK_IMG_INFO *extended_img_info; /** Open the filesystem stored on image. DEFAULT(type) = TSK_FS_TYPE_DETECT; DEFAULT(offset) = 0; */ FS_Info METHOD(FS_Info, Con, Img_Info img, TSK_OFF_T offset, TSK_FS_TYPE_ENUM type); /** A convenience function to open a directory in this image. DEFAULT(path) = NULL; DEFAULT(inode) = 2; */ Directory METHOD(FS_Info, open_dir, ZString path, TSK_INUM_T inode); /** A convenience function to open a file in this image. */ File METHOD(FS_Info, open, ZString path); // Open a file by inode number File METHOD(FS_Info, open_meta, TSK_INUM_T inode); void METHOD(FS_Info, exit); END_CLASS int *tsk_get_current_error(char **buff); void tsk_init(void); #endif /* !TSK3_H_ */ pytsk-20190507/version.txt000066400000000000000000000000101346423473500153650ustar00rootroot0000000000000020190507