pax_global_header00006660000000000000000000000064142166441140014515gustar00rootroot0000000000000052 comment=2d8ea2943b876b62bbf2ec5e9d9316c51cf6d40d 389-ds-base-389-ds-base-2.0.15/000077500000000000000000000000001421664411400152765ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/.cargo/000077500000000000000000000000001421664411400164475ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/.cargo/config.in000066400000000000000000000002251421664411400202430ustar00rootroot00000000000000[source.crates-io] registry = "https://github.com/rust-lang/crates.io-index" @rust_vendor_sources@ [source.vendored-sources] directory = "./vendor" 389-ds-base-389-ds-base-2.0.15/.clang-format000066400000000000000000000027361421664411400176610ustar00rootroot00000000000000--- # BasedOnStyle: Mozilla AccessModifierOffset: 0 # ConstructorInitializerIndentWidth: 4 # AlignEscapedNewlinesLeft: true # AlignTrailingComments: true AllowAllParametersOfDeclarationOnNextLine: false AllowShortIfStatementsOnASingleLine: false AllowShortLoopsOnASingleLine: false # AlwaysBreakTemplateDeclarations: false # AlwaysBreakBeforeMultilineStrings: false BreakBeforeBinaryOperators: false BreakBeforeTernaryOperators: true BreakConstructorInitializersBeforeComma: false BinPackParameters: false ColumnLimit: 0 ConstructorInitializerAllOnOneLineOrOnePerLine: false # DerivePointerBinding: true # ExperimentalAutoDetectBinPacking: false # IndentCaseLabels: true MaxEmptyLinesToKeep: 2 # NamespaceIndentation: None # ObjCSpaceBeforeProtocolList: false # PenaltyBreakBeforeFirstCallParameter: 19 # PenaltyBreakComment: 60 # PenaltyBreakString: 1000 # PenaltyBreakFirstLessLess: 120 # PenaltyExcessCharacter: 1000000 # PenaltyReturnTypeOnItsOwnLine: 200 # PointerBindsToType: true SpacesBeforeTrailingComments: 2 # Cpp11BracedListStyle: false Standard: Cpp03 IndentWidth: 4 TabWidth: 4 UseTab: Never SpaceBeforeAssignmentOperators: true BreakBeforeBraces: Mozilla IndentFunctionDeclarationAfterType: false SpacesInParentheses: false SpacesInAngles: false SpaceInEmptyParentheses: false SpacesInCStyleCastParentheses: false SpaceAfterControlStatementKeyword: true ContinuationIndentWidth: 4 SortIncludes: false AlwaysBreakAfterReturnType: TopLevelDefinitions ... 389-ds-base-389-ds-base-2.0.15/.cvsignore000066400000000000000000000000561421664411400172770ustar00rootroot00000000000000Linux built modules.mk pumpkin.dat .cvsignore 389-ds-base-389-ds-base-2.0.15/.dockerignore000066400000000000000000000000671421664411400177550ustar00rootroot00000000000000.git .gitignore ./src/cockpit/389-console/node_modules 389-ds-base-389-ds-base-2.0.15/.github/000077500000000000000000000000001421664411400166365ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/.github/ISSUE_TEMPLATE/000077500000000000000000000000001421664411400210215ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/.github/ISSUE_TEMPLATE/bug_report.md000066400000000000000000000013321421664411400235120ustar00rootroot00000000000000--- name: Bug report about: Create a report to help us improve title: '' labels: needs triage assignees: '' --- **Issue Description** A clear and concise description of what the bug is. **Package Version and Platform:** - Platform: [e.g. Fedora] - Package and version: [e.g. 389-ds-base-1.4.4.4-20200721git5d41dc5a4.fc32.x86_64] - Browser [e.g. chrome, safari] **Steps to Reproduce** Steps to reproduce the behavior: 1. Go to '...' 2. Click on '....' 3. Scroll down to '....' 4. See error **Expected results** A clear and concise description of what you expected to happen. **Screenshots** If applicable, add screenshots to help explain your problem. **Additional context** Add any other context about the problem here. 389-ds-base-389-ds-base-2.0.15/.github/ISSUE_TEMPLATE/feature_request.md000066400000000000000000000011351421664411400245460ustar00rootroot00000000000000--- name: Feature request about: Suggest an idea for this project title: '' labels: needs triage assignees: '' --- **Is your feature request related to a problem? Please describe.** A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] **Describe the solution you'd like** A clear and concise description of what you want to happen. **Describe alternatives you've considered** A clear and concise description of any alternative solutions or features you've considered. **Additional context** Add any other context or screenshots about the feature request here. 389-ds-base-389-ds-base-2.0.15/.github/daemon.json000066400000000000000000000000761421664411400207770ustar00rootroot00000000000000{ "ipv6": true, "fixed-cidr-v6": "2001:db8:1::/64" } 389-ds-base-389-ds-base-2.0.15/.github/scripts/000077500000000000000000000000001421664411400203255ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/.github/scripts/generate_matrix.py000066400000000000000000000010441421664411400240540ustar00rootroot00000000000000import os import glob import json suites = next(os.walk('dirsrvtests/tests/suites/'))[1] # Filter out snmp as it is an empty directory: suites.remove('snmp') # Run each replication test module separately to speed things up suites.remove('replication') repl_tests = glob.glob('dirsrvtests/tests/suites/replication/*_test.py') suites += [repl_test.replace('dirsrvtests/tests/suites/', '') for repl_test in repl_tests] suites.sort() suites_list = [{ "suite": suite} for suite in suites] matrix = {"include": suites_list} print(json.dumps(matrix))389-ds-base-389-ds-base-2.0.15/.github/workflows/000077500000000000000000000000001421664411400206735ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/.github/workflows/compile.yml000066400000000000000000000043121421664411400230460ustar00rootroot00000000000000name: Compile on: - pull_request - push jobs: compile: runs-on: ubuntu-latest strategy: fail-fast: false matrix: name: - Fedora 33 GCC - Fedora 33 GCC Strict - Fedora 33 GCC Static Analyzer - Fedora 33 Clang - Fedora 33 Clang -Weverything include: - name: Fedora 33 GCC image: quay.io/389ds/ci-images:fedora compiler: gcc cpp-compiler: g++ cflags: "-O2 -g" - name: Fedora 33 GCC strict image: quay.io/389ds/ci-images:fedora compiler: gcc cpp-compiler: g++ cflags: "-O2 -g -Wall -Wextra -Wundef -Wpointer-arith -Wfloat-equal \ -Wstrict-prototypes -Wstrict-overflow=5 -Wwrite-strings -Winit-self \ -Wuninitialized -Wno-sign-compare -Wshadow -Wformat-security" - name: Fedora 33 GCC Static Analyzer image: quay.io/389ds/ci-images:fedora compiler: gcc cpp-compiler: g++ cflags: "-O2 -g -fanalyzer" - name: Fedora 33 Clang image: quay.io/389ds/ci-images:fedora compiler: clang cpp-compiler: clang++ cflags: "-O2 -g -Qunused-arguments" - name: Fedora 33 Clang -Weverything image: quay.io/389ds/ci-images:fedora compiler: clang cpp-compiler: clang++ cflags: "-O2 -g -Weverything -Qunused-arguments" container: image: ${{ matrix.image }} steps: - uses: actions/checkout@v2 - name: Checkout and configure run: cd $GITHUB_WORKSPACE && autoreconf -fvi && ./configure env: CC: ${{ matrix.compiler }} CXX: ${{ matrix.cpp-compiler }} CFLAGS: ${{ matrix.cflags || env.CFLAGS }} CXXFLAGS: ${{ matrix.cxxflags || env.CXXFLAGS }} LDFLAGS: ${{ matrix.ldflags || env.LDFLAGS }} - uses: ammaraskar/gcc-problem-matcher@master - name: Build using ${{ matrix.compiler }} run: bash -c "(make V=0 2> >(tee /dev/stderr)) > log.txt" - uses: actions/upload-artifact@v2 with: name: ${{ matrix.name }} path: log.txt 389-ds-base-389-ds-base-2.0.15/.github/workflows/pytest.yml000066400000000000000000000046171421664411400227560ustar00rootroot00000000000000name: Test on: [push, pull_request] jobs: build: name: Build runs-on: ubuntu-20.04 container: image: quay.io/389ds/ci-images:test outputs: matrix: ${{ steps.set-matrix.outputs.matrix }} steps: - name: Checkout uses: actions/checkout@v2 - name: Get a list of all test suites id: set-matrix run: echo "::set-output name=matrix::$(python3 .github/scripts/generate_matrix.py)" - name: Build RPMs run: cd $GITHUB_WORKSPACE && SKIP_AUDIT_CI=1 make -f rpm.mk dist-bz2 rpms - name: Tar build artifacts run: tar -cvf dist.tar dist/ - name: Upload RPMs uses: actions/upload-artifact@v2 with: name: rpms path: dist.tar test: name: Test runs-on: ubuntu-20.04 needs: build strategy: fail-fast: false matrix: ${{ fromJson(needs.build.outputs.matrix) }} steps: - name: Checkout uses: actions/checkout@v2 - name: Install dependencies run: | sudo apt update -y sudo apt install -y docker.io containerd runc sudo cp .github/daemon.json /etc/docker/daemon.json sudo systemctl unmask docker sudo systemctl start docker - name: Download RPMs uses: actions/download-artifact@master with: name: rpms - name: Extract RPMs run: tar xvf dist.tar - name: Run pytest in a container run: | set -x CID=$(sudo docker run -d -h server.example.com --privileged --rm --shm-size=4gb -v /sys/fs/cgroup:/sys/fs/cgroup:rw,rslave -v ${PWD}:/workspace quay.io/389ds/ci-images:test) sudo docker exec $CID sh -c "dnf install -y -v dist/rpms/*rpm" sudo docker exec $CID py.test --suppress-no-test-exit-code -m "not flaky" --junit-xml=pytest.xml --html=pytest.html -v dirsrvtests/tests/suites/${{ matrix.suite }} - name: Make the results file readable by all if: always() run: | sudo chmod -f -v -R a+r pytest.*ml assets sudo chmod -f -v a+x assets - name: Sanitize filename if: always() run: echo "PYTEST_SUITE=$(echo ${{ matrix.suite }} | sed -e 's#\/#-#g')" >> $GITHUB_ENV - name: Upload pytest test results if: always() uses: actions/upload-artifact@v2 with: name: pytest-${{ env.PYTEST_SUITE }} path: | pytest.xml pytest.html assets 389-ds-base-389-ds-base-2.0.15/.gitignore000066400000000000000000000136111421664411400172700ustar00rootroot00000000000000autom4te.cache m4/libtool.m4 m4/ltoptions.m4 m4/ltsugar.m4 m4/ltversion.m4 m4/lt~obsolete.m4 Makefile.in aclocal.m4 ar-lib compile config.guess config.h.in config.h.in~ config.sub configure depcomp install-sh ltmain.sh missing Makefile config.h config.log config.status libtool stamp-h1 *~ *.patch .DS_Store .autotools .cproject .project .settings .cache *.a *.rsa *.dirstamp *.la *.lo *.o *.rso *.pyc *.rej __pycache__ .libs .deps rpmbuild rpm/389-ds-base.spec Makefile config.h config.log config.status dberrstrs.h dbscan dirsrv.pc dsktune infadd ldap-agent ldclt ldif libtool makstrdb migratecred mmldif ns-slapd ns-slapd.properties pwdhash rsearch stamp-h1 benchmark_par_sds benchmark_sds doxyfile.stamp tags test-driver test-suite.log test_libsds test_libsds.log test_libsds.trs test_nuncstans test_nuncstans.log test_nuncstans.trs test_nuncstans_stress_large test_nuncstans_stress_small test_nuncstans_stress_small.log test_nuncstans_stress_small.trs test_slapd test_slapd.log test_slapd.trs ldap/admin/src/dirsrv ldap/admin/src/defaults.inf ldap/admin/src/scripts/80upgradednformat.pl ldap/admin/src/scripts/DSCreate.pm ldap/admin/src/scripts/DSMigration.pm ldap/admin/src/scripts/DSSharedLib ldap/admin/src/scripts/DSUpdate.pm ldap/admin/src/scripts/DSUtil.pm ldap/admin/src/scripts/DialogManager.pm ldap/admin/src/scripts/Migration.pm ldap/admin/src/scripts/Setup.pm ldap/admin/src/scripts/SetupDialogs.pm ldap/admin/src/scripts/bak2db ldap/admin/src/scripts/bak2db.pl ldap/admin/src/scripts/cleanallruv.pl ldap/admin/src/scripts/db2bak ldap/admin/src/scripts/db2bak.pl ldap/admin/src/scripts/db2index ldap/admin/src/scripts/db2index.pl ldap/admin/src/scripts/db2ldif ldap/admin/src/scripts/db2ldif.pl ldap/admin/src/scripts/dbverify ldap/admin/src/scripts/dn2rdn ldap/admin/src/scripts/dscreate.map ldap/admin/src/scripts/dsorgentries.map ldap/admin/src/scripts/dsupdate.map ldap/admin/src/scripts/fixup-linkedattrs.pl ldap/admin/src/scripts/fixup-memberof.pl ldap/admin/src/scripts/ldif2db ldap/admin/src/scripts/ldif2db.pl ldap/admin/src/scripts/ldif2ldap ldap/admin/src/scripts/migrate-ds.pl ldap/admin/src/scripts/monitor ldap/admin/src/scripts/ns-accountstatus.pl ldap/admin/src/scripts/ns-activate.pl ldap/admin/src/scripts/ns-inactivate.pl ldap/admin/src/scripts/ns-newpwpolicy.pl ldap/admin/src/scripts/remove-ds.pl ldap/admin/src/scripts/repl-monitor.pl ldap/admin/src/scripts/restart-dirsrv ldap/admin/src/scripts/restoreconfig ldap/admin/src/scripts/saveconfig ldap/admin/src/scripts/schema-reload.pl ldap/admin/src/scripts/setup-ds.pl ldap/admin/src/scripts/setup-ds.res ldap/admin/src/scripts/start-dirsrv ldap/admin/src/scripts/stop-dirsrv ldap/admin/src/scripts/suffix2instance ldap/admin/src/scripts/syntax-validate.pl ldap/admin/src/scripts/template-bak2db ldap/admin/src/scripts/template-bak2db.pl ldap/admin/src/scripts/template-cleanallruv.pl ldap/admin/src/scripts/template-db2bak ldap/admin/src/scripts/template-db2bak.pl ldap/admin/src/scripts/template-db2index ldap/admin/src/scripts/template-db2index.pl ldap/admin/src/scripts/template-db2ldif ldap/admin/src/scripts/template-db2ldif.pl ldap/admin/src/scripts/template-dbverify ldap/admin/src/scripts/template-dn2rdn ldap/admin/src/scripts/template-fixup-linkedattrs.pl ldap/admin/src/scripts/template-fixup-memberof.pl ldap/admin/src/scripts/template-fixup-memberuid.pl ldap/admin/src/scripts/template-ldif2db ldap/admin/src/scripts/template-ldif2db.pl ldap/admin/src/scripts/template-ldif2ldap ldap/admin/src/scripts/template-monitor ldap/admin/src/scripts/template-ns-accountstatus.pl ldap/admin/src/scripts/template-ns-activate.pl ldap/admin/src/scripts/template-ns-inactivate.pl ldap/admin/src/scripts/template-ns-newpwpolicy.pl ldap/admin/src/scripts/template-restart-slapd ldap/admin/src/scripts/template-restoreconfig ldap/admin/src/scripts/template-saveconfig ldap/admin/src/scripts/template-schema-reload.pl ldap/admin/src/scripts/template-start-slapd ldap/admin/src/scripts/template-stop-slapd ldap/admin/src/scripts/template-suffix2instance ldap/admin/src/scripts/template-syntax-validate.pl ldap/admin/src/scripts/template-upgradednformat ldap/admin/src/scripts/template-usn-tombstone-cleanup.pl ldap/admin/src/scripts/template-verify-db.pl ldap/admin/src/scripts/template-vlvindex ldap/admin/src/scripts/upgradedb ldap/admin/src/scripts/upgradednformat ldap/admin/src/scripts/usn-tombstone-cleanup.pl ldap/admin/src/scripts/verify-db.pl ldap/admin/src/scripts/vlvindex ldap/admin/src/scripts/91reindex.pl ldap/admin/src/scripts/dbmon.sh ldap/admin/src/scripts/ds_selinux_enabled ldap/admin/src/scripts/ds_selinux_port_query ldap/admin/src/scripts/readnsstate ldap/admin/src/scripts/status-dirsrv ldap/admin/src/slapd.inf ldap/admin/src/template-initconfig ldap/ldif/template-baseacis.ldif ldap/ldif/template-bitwise.ldif ldap/ldif/template-country.ldif ldap/ldif/template-dnaplugin.ldif ldap/ldif/template-domain.ldif ldap/ldif/template-dse.ldif ldap/ldif/template-ldapi-autobind.ldif ldap/ldif/template-ldapi-default.ldif ldap/ldif/template-ldapi.ldif ldap/ldif/template-locality.ldif ldap/ldif/template-org.ldif ldap/ldif/template-orgunit.ldif ldap/ldif/template-pampta.ldif ldap/ldif/template-sasl.ldif ldap/ldif/template-state.ldif ldap/ldif/template-suffix-db.ldif ldap/ldif/template-dse-minimal.ldif ldap/servers/slapd/tools/rsearch/scripts/dbgen.pl ldap/servers/snmp/ldap-agent.conf src/pkgconfig/libsds.pc src/pkgconfig/nunc-stans.pc src/pkgconfig/svrcore.pc wrappers/cl-dump wrappers/dbscan wrappers/dirsrv wrappers/dirsrv-snmp wrappers/dsktune wrappers/infadd wrappers/ldap-agent wrappers/ldclt wrappers/ldif wrappers/migratecred wrappers/mmldif wrappers/pwdhash wrappers/repl-monitor wrappers/rsearch wrappers/ds_systemd_ask_password_acl docs/slapi.doxy man/man3/ html/ .pytest_cache/ src/lib389/dist/ src/lib389/man/ src/libsds/target/ src/librslapd/target/ dist venv .idea src/cockpit/389-console/cockpit_dist/ src/cockpit/389-console/node_modules/ vendor vendor.tar.gz .history .vscode/launch.json .cargo/config 389-ds-base-389-ds-base-2.0.15/LICENSE000066400000000000000000000023561421664411400163110ustar00rootroot00000000000000Copyright (C) 2015 Red Hat See files 'LICENSE.GPLv3+', 'LICENSE.openssl', and 'LICENSE.mit' for more information. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see . Additional permission under GPLv3 section 7: If you modify this Program, or any covered work, by linking or combining it with OpenSSL, or a modified version of OpenSSL licensed under the OpenSSL license (https://www.openssl.org/source/license.html), the licensors of this Program grant you additional permission to convey the resulting work. Corresponding Source for a non-source form of such a combination shall include the source code for the parts that are licensed under the OpenSSL license as well as that of the covered work. 389-ds-base-389-ds-base-2.0.15/LICENSE.GPLv3+000066400000000000000000001045131421664411400172540ustar00rootroot00000000000000 GNU GENERAL PUBLIC LICENSE Version 3, 29 June 2007 Copyright (C) 2007 Free Software Foundation, Inc. Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The GNU General Public License is a free, copyleft license for software and other kinds of works. The licenses for most software and other practical works are designed to take away your freedom to share and change the works. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change all versions of a program--to make sure it remains free software for all its users. We, the Free Software Foundation, use the GNU General Public License for most of our software; it applies also to any other work released this way by its authors. You can apply it to your programs, too. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for them if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs, and that you know you can do these things. To protect your rights, we need to prevent others from denying you these rights or asking you to surrender the rights. Therefore, you have certain responsibilities if you distribute copies of the software, or if you modify it: responsibilities to respect the freedom of others. For example, if you distribute copies of such a program, whether gratis or for a fee, you must pass on to the recipients the same freedoms that you received. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. Developers that use the GNU GPL protect your rights with two steps: (1) assert copyright on the software, and (2) offer you this License giving you legal permission to copy, distribute and/or modify it. For the developers' and authors' protection, the GPL clearly explains that there is no warranty for this free software. For both users' and authors' sake, the GPL requires that modified versions be marked as changed, so that their problems will not be attributed erroneously to authors of previous versions. Some devices are designed to deny users access to install or run modified versions of the software inside them, although the manufacturer can do so. This is fundamentally incompatible with the aim of protecting users' freedom to change the software. The systematic pattern of such abuse occurs in the area of products for individuals to use, which is precisely where it is most unacceptable. Therefore, we have designed this version of the GPL to prohibit the practice for those products. If such problems arise substantially in other domains, we stand ready to extend this provision to those domains in future versions of the GPL, as needed to protect the freedom of users. Finally, every program is threatened constantly by software patents. States should not allow patents to restrict development and use of software on general-purpose computers, but in those that do, we wish to avoid the special danger that patents applied to a free program could make it effectively proprietary. To prevent this, the GPL assures that patents cannot be used to render the program non-free. The precise terms and conditions for copying, distribution and modification follow. TERMS AND CONDITIONS 0. Definitions. "This License" refers to version 3 of the GNU General Public License. "Copyright" also means copyright-like laws that apply to other kinds of works, such as semiconductor masks. "The Program" refers to any copyrightable work licensed under this License. Each licensee is addressed as "you". "Licensees" and "recipients" may be individuals or organizations. To "modify" a work means to copy from or adapt all or part of the work in a fashion requiring copyright permission, other than the making of an exact copy. The resulting work is called a "modified version" of the earlier work or a work "based on" the earlier work. A "covered work" means either the unmodified Program or a work based on the Program. To "propagate" a work means to do anything with it that, without permission, would make you directly or secondarily liable for infringement under applicable copyright law, except executing it on a computer or modifying a private copy. Propagation includes copying, distribution (with or without modification), making available to the public, and in some countries other activities as well. To "convey" a work means any kind of propagation that enables other parties to make or receive copies. Mere interaction with a user through a computer network, with no transfer of a copy, is not conveying. An interactive user interface displays "Appropriate Legal Notices" to the extent that it includes a convenient and prominently visible feature that (1) displays an appropriate copyright notice, and (2) tells the user that there is no warranty for the work (except to the extent that warranties are provided), that licensees may convey the work under this License, and how to view a copy of this License. If the interface presents a list of user commands or options, such as a menu, a prominent item in the list meets this criterion. 1. Source Code. The "source code" for a work means the preferred form of the work for making modifications to it. "Object code" means any non-source form of a work. A "Standard Interface" means an interface that either is an official standard defined by a recognized standards body, or, in the case of interfaces specified for a particular programming language, one that is widely used among developers working in that language. The "System Libraries" of an executable work include anything, other than the work as a whole, that (a) is included in the normal form of packaging a Major Component, but which is not part of that Major Component, and (b) serves only to enable use of the work with that Major Component, or to implement a Standard Interface for which an implementation is available to the public in source code form. A "Major Component", in this context, means a major essential component (kernel, window system, and so on) of the specific operating system (if any) on which the executable work runs, or a compiler used to produce the work, or an object code interpreter used to run it. The "Corresponding Source" for a work in object code form means all the source code needed to generate, install, and (for an executable work) run the object code and to modify the work, including scripts to control those activities. However, it does not include the work's System Libraries, or general-purpose tools or generally available free programs which are used unmodified in performing those activities but which are not part of the work. For example, Corresponding Source includes interface definition files associated with source files for the work, and the source code for shared libraries and dynamically linked subprograms that the work is specifically designed to require, such as by intimate data communication or control flow between those subprograms and other parts of the work. The Corresponding Source need not include anything that users can regenerate automatically from other parts of the Corresponding Source. The Corresponding Source for a work in source code form is that same work. 2. Basic Permissions. All rights granted under this License are granted for the term of copyright on the Program, and are irrevocable provided the stated conditions are met. This License explicitly affirms your unlimited permission to run the unmodified Program. The output from running a covered work is covered by this License only if the output, given its content, constitutes a covered work. This License acknowledges your rights of fair use or other equivalent, as provided by copyright law. You may make, run and propagate covered works that you do not convey, without conditions so long as your license otherwise remains in force. You may convey covered works to others for the sole purpose of having them make modifications exclusively for you, or provide you with facilities for running those works, provided that you comply with the terms of this License in conveying all material for which you do not control copyright. Those thus making or running the covered works for you must do so exclusively on your behalf, under your direction and control, on terms that prohibit them from making any copies of your copyrighted material outside their relationship with you. Conveying under any other circumstances is permitted solely under the conditions stated below. Sublicensing is not allowed; section 10 makes it unnecessary. 3. Protecting Users' Legal Rights From Anti-Circumvention Law. No covered work shall be deemed part of an effective technological measure under any applicable law fulfilling obligations under article 11 of the WIPO copyright treaty adopted on 20 December 1996, or similar laws prohibiting or restricting circumvention of such measures. When you convey a covered work, you waive any legal power to forbid circumvention of technological measures to the extent such circumvention is effected by exercising rights under this License with respect to the covered work, and you disclaim any intention to limit operation or modification of the work as a means of enforcing, against the work's users, your or third parties' legal rights to forbid circumvention of technological measures. 4. Conveying Verbatim Copies. You may convey verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice; keep intact all notices stating that this License and any non-permissive terms added in accord with section 7 apply to the code; keep intact all notices of the absence of any warranty; and give all recipients a copy of this License along with the Program. You may charge any price or no price for each copy that you convey, and you may offer support or warranty protection for a fee. 5. Conveying Modified Source Versions. You may convey a work based on the Program, or the modifications to produce it from the Program, in the form of source code under the terms of section 4, provided that you also meet all of these conditions: a) The work must carry prominent notices stating that you modified it, and giving a relevant date. b) The work must carry prominent notices stating that it is released under this License and any conditions added under section 7. This requirement modifies the requirement in section 4 to "keep intact all notices". c) You must license the entire work, as a whole, under this License to anyone who comes into possession of a copy. This License will therefore apply, along with any applicable section 7 additional terms, to the whole of the work, and all its parts, regardless of how they are packaged. This License gives no permission to license the work in any other way, but it does not invalidate such permission if you have separately received it. d) If the work has interactive user interfaces, each must display Appropriate Legal Notices; however, if the Program has interactive interfaces that do not display Appropriate Legal Notices, your work need not make them do so. A compilation of a covered work with other separate and independent works, which are not by their nature extensions of the covered work, and which are not combined with it such as to form a larger program, in or on a volume of a storage or distribution medium, is called an "aggregate" if the compilation and its resulting copyright are not used to limit the access or legal rights of the compilation's users beyond what the individual works permit. Inclusion of a covered work in an aggregate does not cause this License to apply to the other parts of the aggregate. 6. Conveying Non-Source Forms. You may convey a covered work in object code form under the terms of sections 4 and 5, provided that you also convey the machine-readable Corresponding Source under the terms of this License, in one of these ways: a) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by the Corresponding Source fixed on a durable physical medium customarily used for software interchange. b) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by a written offer, valid for at least three years and valid for as long as you offer spare parts or customer support for that product model, to give anyone who possesses the object code either (1) a copy of the Corresponding Source for all the software in the product that is covered by this License, on a durable physical medium customarily used for software interchange, for a price no more than your reasonable cost of physically performing this conveying of source, or (2) access to copy the Corresponding Source from a network server at no charge. c) Convey individual copies of the object code with a copy of the written offer to provide the Corresponding Source. This alternative is allowed only occasionally and noncommercially, and only if you received the object code with such an offer, in accord with subsection 6b. d) Convey the object code by offering access from a designated place (gratis or for a charge), and offer equivalent access to the Corresponding Source in the same way through the same place at no further charge. You need not require recipients to copy the Corresponding Source along with the object code. If the place to copy the object code is a network server, the Corresponding Source may be on a different server (operated by you or a third party) that supports equivalent copying facilities, provided you maintain clear directions next to the object code saying where to find the Corresponding Source. Regardless of what server hosts the Corresponding Source, you remain obligated to ensure that it is available for as long as needed to satisfy these requirements. e) Convey the object code using peer-to-peer transmission, provided you inform other peers where the object code and Corresponding Source of the work are being offered to the general public at no charge under subsection 6d. A separable portion of the object code, whose source code is excluded from the Corresponding Source as a System Library, need not be included in conveying the object code work. A "User Product" is either (1) a "consumer product", which means any tangible personal property which is normally used for personal, family, or household purposes, or (2) anything designed or sold for incorporation into a dwelling. In determining whether a product is a consumer product, doubtful cases shall be resolved in favor of coverage. For a particular product received by a particular user, "normally used" refers to a typical or common use of that class of product, regardless of the status of the particular user or of the way in which the particular user actually uses, or expects or is expected to use, the product. A product is a consumer product regardless of whether the product has substantial commercial, industrial or non-consumer uses, unless such uses represent the only significant mode of use of the product. "Installation Information" for a User Product means any methods, procedures, authorization keys, or other information required to install and execute modified versions of a covered work in that User Product from a modified version of its Corresponding Source. The information must suffice to ensure that the continued functioning of the modified object code is in no case prevented or interfered with solely because modification has been made. If you convey an object code work under this section in, or with, or specifically for use in, a User Product, and the conveying occurs as part of a transaction in which the right of possession and use of the User Product is transferred to the recipient in perpetuity or for a fixed term (regardless of how the transaction is characterized), the Corresponding Source conveyed under this section must be accompanied by the Installation Information. But this requirement does not apply if neither you nor any third party retains the ability to install modified object code on the User Product (for example, the work has been installed in ROM). The requirement to provide Installation Information does not include a requirement to continue to provide support service, warranty, or updates for a work that has been modified or installed by the recipient, or for the User Product in which it has been modified or installed. Access to a network may be denied when the modification itself materially and adversely affects the operation of the network or violates the rules and protocols for communication across the network. Corresponding Source conveyed, and Installation Information provided, in accord with this section must be in a format that is publicly documented (and with an implementation available to the public in source code form), and must require no special password or key for unpacking, reading or copying. 7. Additional Terms. "Additional permissions" are terms that supplement the terms of this License by making exceptions from one or more of its conditions. Additional permissions that are applicable to the entire Program shall be treated as though they were included in this License, to the extent that they are valid under applicable law. If additional permissions apply only to part of the Program, that part may be used separately under those permissions, but the entire Program remains governed by this License without regard to the additional permissions. When you convey a copy of a covered work, you may at your option remove any additional permissions from that copy, or from any part of it. (Additional permissions may be written to require their own removal in certain cases when you modify the work.) You may place additional permissions on material, added by you to a covered work, for which you have or can give appropriate copyright permission. Notwithstanding any other provision of this License, for material you add to a covered work, you may (if authorized by the copyright holders of that material) supplement the terms of this License with terms: a) Disclaiming warranty or limiting liability differently from the terms of sections 15 and 16 of this License; or b) Requiring preservation of specified reasonable legal notices or author attributions in that material or in the Appropriate Legal Notices displayed by works containing it; or c) Prohibiting misrepresentation of the origin of that material, or requiring that modified versions of such material be marked in reasonable ways as different from the original version; or d) Limiting the use for publicity purposes of names of licensors or authors of the material; or e) Declining to grant rights under trademark law for use of some trade names, trademarks, or service marks; or f) Requiring indemnification of licensors and authors of that material by anyone who conveys the material (or modified versions of it) with contractual assumptions of liability to the recipient, for any liability that these contractual assumptions directly impose on those licensors and authors. All other non-permissive additional terms are considered "further restrictions" within the meaning of section 10. If the Program as you received it, or any part of it, contains a notice stating that it is governed by this License along with a term that is a further restriction, you may remove that term. If a license document contains a further restriction but permits relicensing or conveying under this License, you may add to a covered work material governed by the terms of that license document, provided that the further restriction does not survive such relicensing or conveying. If you add terms to a covered work in accord with this section, you must place, in the relevant source files, a statement of the additional terms that apply to those files, or a notice indicating where to find the applicable terms. Additional terms, permissive or non-permissive, may be stated in the form of a separately written license, or stated as exceptions; the above requirements apply either way. 8. Termination. You may not propagate or modify a covered work except as expressly provided under this License. Any attempt otherwise to propagate or modify it is void, and will automatically terminate your rights under this License (including any patent licenses granted under the third paragraph of section 11). However, if you cease all violation of this License, then your license from a particular copyright holder is reinstated (a) provisionally, unless and until the copyright holder explicitly and finally terminates your license, and (b) permanently, if the copyright holder fails to notify you of the violation by some reasonable means prior to 60 days after the cessation. Moreover, your license from a particular copyright holder is reinstated permanently if the copyright holder notifies you of the violation by some reasonable means, this is the first time you have received notice of violation of this License (for any work) from that copyright holder, and you cure the violation prior to 30 days after your receipt of the notice. Termination of your rights under this section does not terminate the licenses of parties who have received copies or rights from you under this License. If your rights have been terminated and not permanently reinstated, you do not qualify to receive new licenses for the same material under section 10. 9. Acceptance Not Required for Having Copies. You are not required to accept this License in order to receive or run a copy of the Program. Ancillary propagation of a covered work occurring solely as a consequence of using peer-to-peer transmission to receive a copy likewise does not require acceptance. However, nothing other than this License grants you permission to propagate or modify any covered work. These actions infringe copyright if you do not accept this License. Therefore, by modifying or propagating a covered work, you indicate your acceptance of this License to do so. 10. Automatic Licensing of Downstream Recipients. Each time you convey a covered work, the recipient automatically receives a license from the original licensors, to run, modify and propagate that work, subject to this License. You are not responsible for enforcing compliance by third parties with this License. An "entity transaction" is a transaction transferring control of an organization, or substantially all assets of one, or subdividing an organization, or merging organizations. If propagation of a covered work results from an entity transaction, each party to that transaction who receives a copy of the work also receives whatever licenses to the work the party's predecessor in interest had or could give under the previous paragraph, plus a right to possession of the Corresponding Source of the work from the predecessor in interest, if the predecessor has it or can get it with reasonable efforts. You may not impose any further restrictions on the exercise of the rights granted or affirmed under this License. For example, you may not impose a license fee, royalty, or other charge for exercise of rights granted under this License, and you may not initiate litigation (including a cross-claim or counterclaim in a lawsuit) alleging that any patent claim is infringed by making, using, selling, offering for sale, or importing the Program or any portion of it. 11. Patents. A "contributor" is a copyright holder who authorizes use under this License of the Program or a work on which the Program is based. The work thus licensed is called the contributor's "contributor version". A contributor's "essential patent claims" are all patent claims owned or controlled by the contributor, whether already acquired or hereafter acquired, that would be infringed by some manner, permitted by this License, of making, using, or selling its contributor version, but do not include claims that would be infringed only as a consequence of further modification of the contributor version. For purposes of this definition, "control" includes the right to grant patent sublicenses in a manner consistent with the requirements of this License. Each contributor grants you a non-exclusive, worldwide, royalty-free patent license under the contributor's essential patent claims, to make, use, sell, offer for sale, import and otherwise run, modify and propagate the contents of its contributor version. In the following three paragraphs, a "patent license" is any express agreement or commitment, however denominated, not to enforce a patent (such as an express permission to practice a patent or covenant not to sue for patent infringement). To "grant" such a patent license to a party means to make such an agreement or commitment not to enforce a patent against the party. If you convey a covered work, knowingly relying on a patent license, and the Corresponding Source of the work is not available for anyone to copy, free of charge and under the terms of this License, through a publicly available network server or other readily accessible means, then you must either (1) cause the Corresponding Source to be so available, or (2) arrange to deprive yourself of the benefit of the patent license for this particular work, or (3) arrange, in a manner consistent with the requirements of this License, to extend the patent license to downstream recipients. "Knowingly relying" means you have actual knowledge that, but for the patent license, your conveying the covered work in a country, or your recipient's use of the covered work in a country, would infringe one or more identifiable patents in that country that you have reason to believe are valid. If, pursuant to or in connection with a single transaction or arrangement, you convey, or propagate by procuring conveyance of, a covered work, and grant a patent license to some of the parties receiving the covered work authorizing them to use, propagate, modify or convey a specific copy of the covered work, then the patent license you grant is automatically extended to all recipients of the covered work and works based on it. A patent license is "discriminatory" if it does not include within the scope of its coverage, prohibits the exercise of, or is conditioned on the non-exercise of one or more of the rights that are specifically granted under this License. You may not convey a covered work if you are a party to an arrangement with a third party that is in the business of distributing software, under which you make payment to the third party based on the extent of your activity of conveying the work, and under which the third party grants, to any of the parties who would receive the covered work from you, a discriminatory patent license (a) in connection with copies of the covered work conveyed by you (or copies made from those copies), or (b) primarily for and in connection with specific products or compilations that contain the covered work, unless you entered into that arrangement, or that patent license was granted, prior to 28 March 2007. Nothing in this License shall be construed as excluding or limiting any implied license or other defenses to infringement that may otherwise be available to you under applicable patent law. 12. No Surrender of Others' Freedom. If conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot convey a covered work so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not convey it at all. For example, if you agree to terms that obligate you to collect a royalty for further conveying from those to whom you convey the Program, the only way you could satisfy both those terms and this License would be to refrain entirely from conveying the Program. 13. Use with the GNU Affero General Public License. Notwithstanding any other provision of this License, you have permission to link or combine any covered work with a work licensed under version 3 of the GNU Affero General Public License into a single combined work, and to convey the resulting work. The terms of this License will continue to apply to the part which is the covered work, but the special requirements of the GNU Affero General Public License, section 13, concerning interaction through a network will apply to the combination as such. 14. Revised Versions of this License. The Free Software Foundation may publish revised and/or new versions of the GNU General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies that a certain numbered version of the GNU General Public License "or any later version" applies to it, you have the option of following the terms and conditions either of that numbered version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of the GNU General Public License, you may choose any version ever published by the Free Software Foundation. If the Program specifies that a proxy can decide which future versions of the GNU General Public License can be used, that proxy's public statement of acceptance of a version permanently authorizes you to choose that version for the Program. Later license versions may give you additional or different permissions. However, no additional obligations are imposed on any author or copyright holder as a result of your choosing to follow a later version. 15. Disclaimer of Warranty. THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 16. Limitation of Liability. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. 17. Interpretation of Sections 15 and 16. If the disclaimer of warranty and limitation of liability provided above cannot be given local legal effect according to their terms, reviewing courts shall apply local law that most closely approximates an absolute waiver of all civil liability in connection with the Program, unless a warranty or assumption of liability accompanies a copy of the Program in return for a fee. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively state the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see . Also add information on how to contact you by electronic and paper mail. If the program does terminal interaction, make it output a short notice like this when it starts in an interactive mode: Copyright (C) This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details. The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, your program's commands might be different; for a GUI interface, you would use an "about box". You should also get your employer (if you work as a programmer) or school, if any, to sign a "copyright disclaimer" for the program, if necessary. For more information on this, and how to apply and follow the GNU GPL, see . The GNU General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Lesser General Public License instead of this License. But first, please read . 389-ds-base-389-ds-base-2.0.15/LICENSE.mit000066400000000000000000000025731421664411400171020ustar00rootroot00000000000000/* Copyright (c) 2013 Marek Majkowski Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. Original location: https://github.com/majek/csiphash/ Solution inspired by code from: Samuel Neves (supercop/crypto_auth/siphash24/little) djb (supercop/crypto_auth/siphash24/little2) Jean-Philippe Aumasson (https://131002.net/siphash/siphash24.c) */ 389-ds-base-389-ds-base-2.0.15/LICENSE.openldap000066400000000000000000000042461421664411400201120ustar00rootroot00000000000000The OpenLDAP Public License Version 2.8, 17 August 2003 Redistribution and use of this software and associated documentation ("Software"), with or without modification, are permitted provided that the following conditions are met: 1. Redistributions in source form must retain copyright statements and notices, 2. Redistributions in binary form must reproduce applicable copyright statements and notices, this list of conditions, and the following disclaimer in the documentation and/or other materials provided with the distribution, and 3. Redistributions must contain a verbatim copy of this document. The OpenLDAP Foundation may revise this license from time to time. Each revision is distinguished by a version number. You may use this Software under terms of this license revision or under the terms of any subsequent revision of the license. THIS SOFTWARE IS PROVIDED BY THE OPENLDAP FOUNDATION AND ITS CONTRIBUTORS ``AS IS'' AND ANY EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OPENLDAP FOUNDATION, ITS CONTRIBUTORS, OR THE AUTHOR(S) OR OWNER(S) OF THE SOFTWARE BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. The names of the authors and copyright holders must not be used in advertising or otherwise to promote the sale, use or other dealing in this Software without specific, written prior permission. Title to copyright in this Software shall at all times remain with copyright holders. OpenLDAP is a registered trademark of the OpenLDAP Foundation. Copyright 1999-2003 The OpenLDAP Foundation, Redwood City, California, USA. All Rights Reserved. Permission to copy and distribute verbatim copies of this document is granted. 389-ds-base-389-ds-base-2.0.15/LICENSE.openssl000066400000000000000000000010251421664411400177630ustar00rootroot00000000000000Additional permission under GPLv3 section 7: If you modify this Program, or any covered work, by linking or combining it with OpenSSL, or a modified version of OpenSSL licensed under the OpenSSL license (https://www.openssl.org/source/license.html), the licensors of this Program grant you additional permission to convey the resulting work. Corresponding Source for a non-source form of such a combination shall include the source code for the parts that are licensed under the OpenSSL license as well as that of the covered work. 389-ds-base-389-ds-base-2.0.15/Makefile.am000066400000000000000000002263471421664411400173500ustar00rootroot00000000000000# look for included m4 files in the ./m4/ directory ACLOCAL_AMFLAGS = -I m4 NULLSTRING := SPACE := $(NULLSTRING) # the space is between the ) and the # COLON := $(NULLSTRING):# a colon QUOTE := $(NULLSTRING)"# a double quote" #------------------------ # Compiler Flags #------------------------ # # First, we setup the definitions from configure.ac # PYTHON := python3 if DEBUG # This allows sccache to work correctly with C files. BUILDNUM := "\"0000.000.0000\"" else BUILDNUM := $(shell $(srcdir)/buildnum.py) endif NQBUILDNUM := $(subst \,,$(subst $(QUOTE),,$(BUILDNUM))) DEBUG_DEFINES = @debug_defs@ DEBUG_CFLAGS = @debug_cflags@ DEBUG_CXXFLAGS = @debug_cxxflags@ GCCSEC_CFLAGS = @gccsec_cflags@ if CLANG_ENABLE ASAN_CFLAGS = @asan_cflags@ else if enable_asan ASAN_CFLAGS = @asan_cflags@ -lasan else ASAN_CFLAGS = @asan_cflags@ endif endif MSAN_CFLAGS = @msan_cflags@ TSAN_CFLAGS = @tsan_cflags@ UBSAN_CFLAGS = @ubsan_cflags@ if CFI_ENABLE # https://clang.llvm.org/docs/ControlFlowIntegrity.html#available-schemes # vcall is "forward edge" cfi which is what gives a lot of benefit security wise. CFI_CFLAGS = -flto=thin -fsanitize=cfi-cast-strict,cfi-vcall -fvisibility=hidden # Settings we could use in the future # -fsanitize=cfi-icall,cfi-nvcall,cfi-derived-cast,cfi-unrelated-cast,cfi-mfcall else CFI_CFLAGS = endif SYSTEMD_DEFINES = @systemd_defs@ CMOCKA_INCLUDES = $(CMOCKA_CFLAGS) PROFILING_DEFINES = @profiling_defs@ SYSTEMTAP_DEFINES = @systemtap_defs@ NSPR_INCLUDES = $(NSPR_CFLAGS) # Rust inclusions. if RUST_ENABLE # Rust enabled RUST_ON = 1 CARGO_FLAGS = @cargo_defs@ if CLANG_ENABLE RUSTC_FLAGS = @asan_rust_defs@ @msan_rust_defs@ @tsan_rust_defs@ @debug_rust_defs@ RUSTC_LINK_FLAGS = -C link-arg=-fuse-ld=lld else RUSTC_FLAGS = @asan_rust_defs@ @msan_rust_defs@ @tsan_rust_defs@ @debug_rust_defs@ RUSTC_LINK_FLAGS = endif RUST_LDFLAGS = -ldl -lpthread -lgcc_s -lc -lm -lrt -lutil RUST_DEFINES = -DRUST_ENABLE if RUST_ENABLE_OFFLINE RUST_OFFLINE = --locked --offline else RUST_OFFLINE = endif else # Rust disabled RUST_ON = 0 CARGO_FLAGS = RUSTC_FLAGS = RUST_LDFLAGS = RUST_DEFINES = endif if CLANG_ENABLE CLANG_ON = 1 CLANG_LDFLAGS = -latomic -fuse-ld=lld EXPORT_LDFLAGS = else CLANG_ON = 0 CLANG_LDFLAGS = if DEBUG EXPORT_LDFLAGS = -rdynamic endif endif REWRITERS_INCLUDES = -I$(srcdir)/src/rewriters/ SVRCORE_INCLUDES = -I$(srcdir)/src/svrcore/src/ if CLANG_ENABLE # clang complains about the -U. DS_DEFINES = -DBUILD_NUM=$(BUILDNUM) -DVENDOR="\"$(vendor)\"" -DBRAND="\"$(brand)\"" -DCAPBRAND="\"$(capbrand)\"" else # the -U undefines these symbols - should use the corresponding DS_ ones instead - see configure.ac DS_DEFINES = -DBUILD_NUM=$(BUILDNUM) -DVENDOR="\"$(vendor)\"" -DBRAND="\"$(brand)\"" -DCAPBRAND="\"$(capbrand)\"" \ -UPACKAGE_VERSION -UPACKAGE_TARNAME -UPACKAGE_STRING -UPACKAGE_BUGREPORT endif DS_INCLUDES = -I$(srcdir)/ldap/include -I$(srcdir)/ldap/servers/slapd -I$(srcdir)/include -I. if enable_asan ASAN_ON = 1 SANITIZER = ASAN else ASAN_ON = 0 endif if enable_msan MSAN_ON = 1 SANITIZER = MSAN else MSAN_ON = 0 endif if enable_tsan TSAN_ON = 1 SANITIZER = TSAN else TSAN_ON = 0 endif if enable_ubsan UBSAN_ON = 1 SANITIZER = UBSAN else UBSAN_ON = 0 endif if with_systemd WITH_SYSTEMD = 1 else WITH_SYSTEMD = 0 endif # these paths are dependent on the settings of prefix and exec_prefix which may be specified # at make time. So we cannot use AC_DEFINE in the configure.ac because that would set the # values prior to their being defined. Defining them here ensures that they are properly # expanded before use. See create_instance.h for more details. The quoting ensures that # the values are quoted for the shell command, and the value expands to a quoted string # value in the header file e.g. # #define LOCALSTATEDIR "/var" # without the quotes, it would be # #define LOCALSTATEDIR /var # which would be an error PATH_DEFINES = -DLOCALSTATEDIR="\"$(localstatedir)\"" -DSYSCONFDIR="\"$(sysconfdir)\"" \ -DLIBDIR="\"$(libdir)\"" -DBINDIR="\"$(bindir)\"" \ -DDATADIR="\"$(datadir)\"" -DDOCDIR="\"$(docdir)\"" \ -DSBINDIR="\"$(sbindir)\"" -DPLUGINDIR="\"$(serverplugindir)\"" \ -DTEMPLATEDIR="\"$(sampledatadir)\"" -DSYSTEMSCHEMADIR="\"$(systemschemadir)\"" \ -DLOCALRUNDIR="\"$(localrundir)\"" # Now that we have all our defines in place, setup the CPPFLAGS # These flags are the "must have" for all components AM_CPPFLAGS = $(DEBUG_DEFINES) $(PROFILING_DEFINES) $(SYSTEMTAP_DEFINES) $(RUST_DEFINES) AM_CFLAGS = $(DEBUG_CFLAGS) $(GCCSEC_CFLAGS) $(ASAN_CFLAGS) $(MSAN_CFLAGS) $(TSAN_CFLAGS) $(UBSAN_CFLAGS) AM_CXXFLAGS = $(DEBUG_CXXFLAGS) $(GCCSEC_CFLAGS) $(ASAN_CFLAGS) $(MSAN_CFLAGS) $(TSAN_CFLAGS) $(UBSAN_CFLAGS) # Flags for Directory Server # WARNING: This needs a clean up, because slap.h is a horrible mess and is publically exposed! DSPLUGIN_CPPFLAGS = $(DS_DEFINES) $(DS_INCLUDES) $(PATH_DEFINES) $(SYSTEMD_DEFINES) @openldap_inc@ $(NSS_CFLAGS) $(NSPR_INCLUDES) $(SYSTEMD_CFLAGS) # This should give access to internal headers only for tests!!! DSINTERNAL_CPPFLAGS = -I$(srcdir)/include/ldaputil # Flags for Datastructure Library #------------------------ # Linker Flags #------------------------ CMOCKA_LINKS = $(CMOCKA_LIBS) PROFILING_LINKS = @profiling_links@ NSPR_LINK = $(NSPR_LIBS) NSS_LINK = $(NSS_LIBS) # with recent versions of openldap - if you link with both ldap_r and ldap, the # shared lib _fini for one will stomp on the other, and the program will crash LDAPSDK_LINK_NOTHR = @openldap_lib@ -lldap@ol_libver@ @ldap_lib_ldif@ -llber@ol_libver@ LDAPSDK_LINK = @openldap_lib@ -lldap_r@ol_libver@ @ldap_lib_ldif@ -llber@ol_libver@ ldaplib = @ldaplib@ ldaplib_defs = @ldaplib_defs@ DB_LINK = @db_lib@ -ldb-@db_libver@ DB_IMPL = libback-ldbm.la SASL_LINK = $(SASL_LIBS) NETSNMP_LINK = @netsnmp_lib@ @netsnmp_link@ PAM_LINK = -lpam EVENT_LINK = $(EVENT_LIBS) PW_CRACK_LINK = -lcrack LIBSOCKET=@LIBSOCKET@ LIBNSL=@LIBNSL@ LIBDL=@LIBDL@ LIBCSTD=@LIBCSTD@ LIBCRUN=@LIBCRUN@ THREADLIB=@THREADLIB@ LIBCRYPT=@LIBCRYPT@ # We need to make sure that libpthread is linked before libc on HP-UX. if HPUX AM_LDFLAGS = -lpthread else #AM_LDFLAGS = -Wl,-z,defs AM_LDFLAGS = $(PW_CRACK_LINK) $(RUST_LDFLAGS) $(ASAN_CFLAGS) $(MSAN_CFLAGS) $(TSAN_CFLAGS) $(UBSAN_CFLAGS) $(PROFILING_LINKS) $(CLANG_LDFLAGS) $(EXPORT_LDFLAGS) endif #end hpux # https://www.gnu.org/software/libtool/manual/html_node/Updating-version-info.html#Updating-version-info # So, libtool library versions are described by three integers: # # current # # The most recent interface number that this library implements. # revision # # The implementation number of the current interface. # age # # The difference between the newest and oldest interfaces that this library implements. In other words, the library implements all the interface numbers in the range from number current - age to current. # # Here are a set of rules to help you update your library version information: # # Start with version information of ‘0:0:0’ for each libtool library. # Update the version information only immediately before a public release of your software. More frequent updates are unnecessary, and only guarantee that the current interface number gets larger faster. # If the library source code has changed at all since the last update, then increment revision (‘c:r:a’ becomes ‘c:r+1:a’). # If any interfaces have been added, removed, or changed since the last update, increment current, and set revision to 0. # If any interfaces have been added since the last public release, then increment age. # If any interfaces have been removed or changed since the last public release, then set age to 0. SLAPD_LDFLAGS = -version-info 1:0:1 #------------------------ # Generated Sources #------------------------ BUILT_SOURCES = dberrstrs.h \ $(POLICY_FC) if RUST_ENABLE BUILT_SOURCES += rust-slapi-private.h rust-nsslapd-private.h endif if enable_posix_winsync LIBPOSIX_WINSYNC_PLUGIN = libposix-winsync-plugin.la endif CLEANFILES = dberrstrs.h ns-slapd.properties \ ldap/admin/src/template-initconfig \ ldap/ldif/template-baseacis.ldif ldap/ldif/template-bitwise.ldif ldap/ldif/template-country.ldif \ ldap/ldif/template-dnaplugin.ldif ldap/ldif/template-domain.ldif ldap/ldif/template-dse.ldif \ ldap/ldif/template-dse-minimal.ldif \ ldap/ldif/template-ldapi-autobind.ldif ldap/ldif/template-ldapi-default.ldif \ ldap/ldif/template-ldapi.ldif ldap/ldif/template-locality.ldif ldap/ldif/template-org.ldif \ ldap/ldif/template-orgunit.ldif ldap/ldif/template-pampta.ldif ldap/ldif/template-sasl.ldif \ ldap/ldif/template-state.ldif ldap/ldif/template-suffix-db.ldif \ doxyfile.stamp \ $(NULL) if RUST_ENABLE CLEANFILES += rust-slapi-private.h endif clean-local: -rm -rf dist -rm -rf $(abs_top_builddir)/html -rm -rf $(abs_top_builddir)/man/man3 if RUST_ENABLE -rm -rf $(abs_top_builddir)/rs endif dberrstrs.h: Makefile $(srcdir)/ldap/servers/slapd/mkDBErrStrs.py $(srcdir)/ldap/servers/slapd/back-ldbm/dbimpl.h $(srcdir)/ldap/servers/slapd/mkDBErrStrs.py -i $(srcdir)/ldap/servers/slapd/back-ldbm -o . #------------------------ # Install Paths #------------------------ prefixdir = @prefixdir@ configdir = $(sysconfdir)@configdir@ sampledatadir = $(datadir)@sampledatadir@ systemschemadir = $(datadir)@systemschemadir@ propertydir = $(datadir)@propertydir@ schemadir = $(sysconfdir)@schemadir@ serverdir = $(libdir)/@serverdir@ serverplugindir = $(libdir)@serverplugindir@ taskdir = $(datadir)@scripttemplatedir@ systemdsystemunitdir = @with_systemdsystemunitdir@ systemdsystemunitdropindir = @with_systemdsystemunitdir@/$(PACKAGE_NAME)@.service.d systemdsystemconfdir = @with_systemdsystemconfdir@ systemdgroupname = @with_systemdgroupname@ initdir = @initdir@ initconfigdir = $(sysconfdir)@initconfigdir@ instconfigdir = @instconfigdir@ perldir = $(libdir)@perldir@ pythondir = $(libdir)@pythondir@ infdir = $(datadir)@infdir@ mibdir = $(datadir)@mibdir@ updatedir = $(datadir)@updatedir@ pkgconfigdir = $(libdir)/pkgconfig serverincdir = $(includedir)/@serverincdir@ gdbautoloaddir = $(prefixdir)/share/gdb/auto-load$(sbindir) cockpitdir = $(prefixdir)/share/cockpit@cockpitdir@ metainfodir = $(prefixdir)/share/metainfo/389-console tmpfiles_d = @tmpfiles_d@ # This has to be hardcoded to /lib - $libdir changes between lib/lib64, but # sysctl.d is always in /lib. sysctldir = @prefixdir@/lib/sysctl.d defaultuser=@defaultuser@ defaultgroup=@defaultgroup@ #------------------------ # Build Products #------------------------ sbin_PROGRAMS = ns-slapd ldap-agent bin_PROGRAMS = dbscan \ ldclt \ pwdhash # ---------------------------------------------------------------------------------------- # This odd looking definition is to keep the libraries in ORDER that they are needed. rsds # is needed by sds, which is needed by ns. So we have a blank LTLIB, then append in order # based on defines # ---------------------------------------------------------------------------------------- server_LTLIBRARIES = libslapd.la libldaputil.la libns-dshttpd.la librewriters.la lib_LTLIBRARIES = libsvrcore.la # this is how to add optional plugins if enable_pam_passthru LIBPAM_PASSTHRU_PLUGIN = libpam-passthru-plugin.la enable_pam_passthru = 1 endif if enable_dna LIBDNA_PLUGIN = libdna-plugin.la enable_dna = 1 endif if enable_bitwise LIBBITWISE_PLUGIN = libbitwise-plugin.la enable_bitwise = 1 endif if enable_acctpolicy LIBACCTPOLICY_PLUGIN = libacctpolicy-plugin.la LIBACCTPOLICY_SCHEMA = $(srcdir)/ldap/schema/60acctpolicy.ldif enable_acctpolicy = 1 endif serverplugin_LTLIBRARIES = libacl-plugin.la \ libaddn-plugin.la \ libattr-unique-plugin.la \ libautomember-plugin.la libback-ldbm.la libchainingdb-plugin.la \ libcollation-plugin.la libcos-plugin.la libderef-plugin.la \ libpbe-plugin.la libdistrib-plugin.la \ liblinkedattrs-plugin.la libmanagedentries-plugin.la \ libmemberof-plugin.la libpassthru-plugin.la libpwdstorage-plugin.la \ libcontentsync-plugin.la \ libreferint-plugin.la libreplication-plugin.la libretrocl-plugin.la \ libroles-plugin.la libstatechange-plugin.la libsyntax-plugin.la \ libviews-plugin.la libschemareload-plugin.la libusn-plugin.la \ libacctusability-plugin.la librootdn-access-plugin.la \ libwhoami-plugin.la $(LIBACCTPOLICY_PLUGIN) \ $(LIBPAM_PASSTHRU_PLUGIN) $(LIBDNA_PLUGIN) \ $(LIBBITWISE_PLUGIN) $(LIBPRESENCE_PLUGIN) $(LIBPOSIX_WINSYNC_PLUGIN) if RUST_ENABLE serverplugin_LTLIBRARIES += libentryuuid-plugin.la libentryuuid-syntax-plugin.la \ libpwdchan-plugin.la endif noinst_LIBRARIES = libavl.a dist_noinst_HEADERS = \ include/i18n.h \ include/netsite.h \ include/base/crit.h \ include/base/dbtbase.h \ include/base/ereport.h \ include/base/file.h \ include/base/fsmutex.h \ include/base/plist.h \ include/base/pool.h \ include/base/shexp.h \ include/base/systems.h \ include/base/systhr.h \ include/base/util.h \ include/ldaputil/cert.h \ include/ldaputil/certmap.h \ include/ldaputil/dbconf.h \ include/ldaputil/encode.h \ include/ldaputil/errors.h \ include/ldaputil/init.h \ include/ldaputil/ldapauth.h \ include/ldaputil/ldaputil.h \ include/libaccess/aclerror.h \ include/libaccess/acleval.h \ include/libaccess/aclglobal.h \ include/libaccess/acl.h \ include/libaccess/aclproto.h \ include/libaccess/aclstruct.h \ include/libaccess/attrec.h \ include/libaccess/authdb.h \ include/libaccess/dbtlibaccess.h \ include/libaccess/dnfstruct.h \ include/libaccess/ipfstruct.h \ include/libaccess/las.h \ include/libaccess/nsautherr.h \ include/libaccess/nsauth.h \ include/libaccess/nserror.h \ include/libaccess/symbols.h \ include/libaccess/userauth.h \ include/libaccess/usi.h \ include/libaccess/usrcache.h \ include/libadmin/dbtlibadmin.h \ include/libadmin/libadmin.h \ include/public/netsite.h \ include/public/nsapi.h \ include/public/base/systems.h \ include/public/nsacl/aclapi.h \ include/public/nsacl/acldef.h \ include/public/nsacl/nserrdef.h \ include/public/nsacl/plistdef.h \ ldap/include/avl.h \ ldap/include/dblayer.h \ ldap/include/disptmpl.h \ ldap/include/ldaprot.h \ ldap/include/portable.h \ ldap/include/regex.h \ ldap/include/srchpref.h \ ldap/include/sysexits-compat.h \ ldap/servers/plugins/addn/addn.h \ ldap/servers/plugins/collation/config.h \ ldap/servers/plugins/collation/collate.h \ ldap/servers/plugins/collation/orfilter.h \ ldap/servers/plugins/chainingdb/cb.h \ ldap/servers/plugins/deref/deref.h \ ldap/servers/plugins/acctpolicy/acctpolicy.h \ ldap/servers/plugins/posix-winsync/posix-wsp-ident.h \ ldap/servers/plugins/posix-winsync/posix-group-func.h \ ldap/servers/plugins/roles/roles_cache.h \ ldap/servers/plugins/usn/usn.h \ ldap/servers/plugins/pwdstorage/pwdstorage.h \ ldap/servers/plugins/pwdstorage/md5.h \ ldap/servers/plugins/acl/acl.h \ ldap/servers/plugins/linkedattrs/linked_attrs.h \ ldap/servers/plugins/rootdn_access/rootdn_access.h \ ldap/servers/plugins/acct_usability/acct_usability.h \ ldap/servers/plugins/retrocl/retrocl.h \ ldap/servers/plugins/uiduniq/plugin-utils.h \ ldap/servers/plugins/memberof/memberof.h \ ldap/servers/plugins/replication/cl5_api.h \ ldap/servers/plugins/replication/llist.h \ ldap/servers/plugins/replication/repl_shared.h \ ldap/servers/plugins/replication/csnpl.h \ ldap/servers/plugins/replication/cl5.h \ ldap/servers/plugins/replication/repl-session-plugin.h \ ldap/servers/plugins/replication/windows_prot_private.h \ ldap/servers/plugins/replication/repl_helper.h \ ldap/servers/plugins/replication/repl5.h \ ldap/servers/plugins/replication/cl5_test.h \ ldap/servers/plugins/replication/repl5_ruv.h \ ldap/servers/plugins/replication/cl5_clcache.h \ ldap/servers/plugins/replication/cl_crypt.h \ ldap/servers/plugins/replication/urp.h \ ldap/servers/plugins/replication/winsync-plugin.h \ ldap/servers/plugins/replication/windowsrepl.h \ ldap/servers/plugins/replication/repl5_prot_private.h \ ldap/servers/plugins/pam_passthru/pam_passthru.h \ ldap/servers/plugins/syntaxes/syntax.h \ ldap/servers/plugins/cos/cos_cache.h \ ldap/servers/plugins/sync/sync.h \ ldap/servers/plugins/passthru/passthru.h \ ldap/servers/plugins/rever/rever.h \ ldap/servers/plugins/automember/automember.h \ ldap/servers/plugins/mep/mep.h \ ldap/servers/slapd/agtmmap.h \ ldap/servers/slapd/auth.h \ ldap/servers/slapd/csngen.h \ ldap/servers/slapd/disconnect_errors.h \ ldap/servers/slapd/disconnect_error_strings.h \ ldap/servers/slapd/fe.h \ ldap/servers/slapd/filter.h \ ldap/servers/slapd/getopt_ext.h \ ldap/servers/slapd/getsocketpeer.h \ ldap/servers/slapd/intrinsics.h \ ldap/servers/slapd/log.h \ ldap/servers/slapd/openldapber.h \ ldap/servers/slapd/pblock_v3.h \ ldap/servers/slapd/poll_using_select.h \ ldap/servers/slapd/prerrstrs.h \ ldap/servers/slapd/protect_db.h \ ldap/servers/slapd/proto-slap.h \ ldap/servers/slapd/pw.h \ ldap/servers/slapd/pw_verify.h \ ldap/servers/slapd/secerrstrs.h \ ldap/servers/slapd/slap.h \ ldap/servers/slapd/slapi_pal.h \ ldap/servers/slapd/slapi-plugin-compat4.h \ ldap/servers/slapd/slapi-plugin.h \ ldap/servers/slapd/slapi-private.h \ ldap/servers/slapd/snmp_collator.h \ ldap/servers/slapd/sslerrstrs.h \ ldap/servers/slapd/statechange.h \ ldap/servers/slapd/uuid.h \ ldap/servers/slapd/vattr_spi.h \ ldap/servers/slapd/views.h \ ldap/servers/slapd/back-ldbm/attrcrypt.h \ ldap/servers/slapd/back-ldbm/back-ldbm.h \ ldap/servers/slapd/back-ldbm/dbimpl.h \ ldap/servers/slapd/back-ldbm/dblayer.h \ ldap/servers/slapd/back-ldbm/import.h \ ldap/servers/slapd/back-ldbm/ldbm_config.h \ ldap/servers/slapd/back-ldbm/proto-back-ldbm.h \ ldap/servers/slapd/back-ldbm/vlv_key.h \ ldap/servers/slapd/back-ldbm/vlv_srch.h \ ldap/servers/slapd/tools/ldaptool.h \ ldap/servers/slapd/tools/ldaptool-sasl.h \ ldap/servers/slapd/tools/ldclt/ldap-private.h \ ldap/servers/slapd/tools/ldclt/ldclt.h \ ldap/servers/slapd/tools/ldclt/port.h \ ldap/servers/slapd/tools/ldclt/remote.h \ ldap/servers/slapd/tools/ldclt/scalab01.h \ ldap/servers/slapd/tools/ldclt/utils.h \ ldap/servers/snmp/ldap-agent.h \ ldap/systools/pio.h \ lib/base/lexer_pvt.h \ lib/base/plist_pvt.h \ lib/ldaputil/ldaputili.h \ lib/libaccess/access_plhash.h \ lib/libaccess/aclcache.h \ lib/libaccess/aclpriv.h \ lib/libaccess/aclscan.h \ lib/libaccess/acl.tab.h \ lib/libaccess/aclutil.h \ lib/libaccess/lasdns.h \ lib/libaccess/las.h \ lib/libaccess/lasip.h \ lib/libaccess/ldapauth.h \ lib/libaccess/oneeval.h \ lib/libaccess/parse.h \ lib/libaccess/permhash.h \ lib/libsi18n/getstrmem.h \ lib/libsi18n/gsslapd.h \ lib/libsi18n/reshash.h \ lib/libsi18n/txtfile.h if ENABLE_CMOCKA dist_noinst_HEADERS += \ test/test_slapd.h endif dist_noinst_DATA = \ $(srcdir)/buildnum.py \ $(srcdir)/ldap/admin/src/*.in \ $(srcdir)/ldap/admin/src/scripts/*.py \ $(srcdir)/ldap/admin/src/scripts/ds-replcheck \ $(srcdir)/ldap/ldif/*.in \ $(srcdir)/ldap/ldif/*.ldif \ $(srcdir)/ldap/schema/*.ldif \ $(srcdir)/ldap/schema/slapd-collations.conf \ $(srcdir)/ldap/servers/snmp/ldap-agent.conf \ $(srcdir)/ldap/servers/snmp/redhat-directory.mib \ $(srcdir)/ldap/servers/slapd/mkDBErrStrs.py \ $(srcdir)/lib/ldaputil/certmap.conf \ $(srcdir)/m4 \ $(srcdir)/rpm/389-ds-base.spec.in \ $(srcdir)/rpm/389-ds-base-devel.README \ $(srcdir)/rpm/389-ds-base-git.sh \ $(srcdir)/README.md \ $(srcdir)/LICENSE \ $(srcdir)/LICENSE.* \ $(srcdir)/VERSION.sh \ $(srcdir)/wrappers/*.in \ $(srcdir)/dirsrvtests \ $(srcdir)/src/lib389/setup.py \ $(srcdir)/src/lib389 #------------------------ # Installed Files #------------------------ config_DATA = $(srcdir)/lib/ldaputil/certmap.conf \ $(srcdir)/ldap/schema/slapd-collations.conf \ ldap/servers/snmp/ldap-agent.conf # the schema files in this list are either not # standard schema, not tested, or not compatible # with the default schema e.g. there is # considerable overlap of 60changelog.ldif and 01common.ldif # and 60inetmail.ldif and 50ns-mail.ldif among others sampledata_DATA = $(srcdir)/ldap/ldif/Ace.ldif \ $(srcdir)/ldap/ldif/European.ldif \ $(srcdir)/ldap/ldif/Eurosuffix.ldif \ $(srcdir)/ldap/ldif/Example.ldif \ $(srcdir)/ldap/ldif/Example-roles.ldif \ $(srcdir)/ldap/ldif/Example-views.ldif \ $(srcdir)/ldap/ldif/template.ldif \ ldap/ldif/template-dse.ldif \ ldap/ldif/template-dse-minimal.ldif \ ldap/ldif/template-suffix-db.ldif \ ldap/ldif/template-ldapi.ldif \ ldap/ldif/template-ldapi-default.ldif \ ldap/ldif/template-ldapi-autobind.ldif \ ldap/ldif/template-org.ldif \ ldap/ldif/template-domain.ldif \ ldap/ldif/template-state.ldif \ ldap/ldif/template-locality.ldif \ ldap/ldif/template-country.ldif \ ldap/ldif/template-orgunit.ldif \ ldap/ldif/template-baseacis.ldif \ ldap/ldif/template-sasl.ldif \ $(srcdir)/ldap/schema/10rfc2307compat.ldif \ $(srcdir)/ldap/schema/10rfc2307bis.ldif \ $(srcdir)/ldap/schema/60changelog.ldif \ $(srcdir)/ldap/schema/60inetmail.ldif \ $(srcdir)/ldap/schema/60krb5kdc.ldif \ $(srcdir)/ldap/schema/60kerberos.ldif \ $(srcdir)/ldap/schema/60nis.ldif \ $(srcdir)/ldap/schema/60qmail.ldif \ $(srcdir)/ldap/schema/60radius.ldif \ $(srcdir)/ldap/schema/60rfc4876.ldif \ $(srcdir)/ldap/schema/60samba.ldif \ $(srcdir)/ldap/schema/60sendmail.ldif \ $(srcdir)/ldap/schema/dsee.schema \ $(srcdir)/src/lib389/lib389/cli_ctl/dbgen-FamilyNames \ $(srcdir)/src/lib389/lib389/cli_ctl/dbgen-GivenNames \ $(srcdir)/src/lib389/lib389/cli_ctl/dbgen-OrgUnits \ $(LIBPRESENCE_SCHEMA) systemschema_DATA = $(srcdir)/ldap/schema/00core.ldif \ $(srcdir)/ldap/schema/01core389.ldif \ $(srcdir)/ldap/schema/02common.ldif \ $(srcdir)/ldap/schema/05rfc2927.ldif \ $(srcdir)/ldap/schema/05rfc4523.ldif \ $(srcdir)/ldap/schema/05rfc4524.ldif \ $(srcdir)/ldap/schema/06inetorgperson.ldif \ $(srcdir)/ldap/schema/10automember-plugin.ldif \ $(srcdir)/ldap/schema/10dna-plugin.ldif \ $(srcdir)/ldap/schema/10mep-plugin.ldif \ $(srcdir)/ldap/schema/10rfc2307compat.ldif \ $(srcdir)/ldap/schema/20subscriber.ldif \ $(srcdir)/ldap/schema/25java-object.ldif \ $(srcdir)/ldap/schema/28pilot.ldif \ $(srcdir)/ldap/schema/30ns-common.ldif \ $(srcdir)/ldap/schema/50ns-admin.ldif \ $(srcdir)/ldap/schema/50ns-certificate.ldif \ $(srcdir)/ldap/schema/50ns-directory.ldif \ $(srcdir)/ldap/schema/50ns-mail.ldif \ $(srcdir)/ldap/schema/50ns-value.ldif \ $(srcdir)/ldap/schema/50ns-web.ldif \ $(srcdir)/ldap/schema/60pam-plugin.ldif \ $(srcdir)/ldap/schema/60posix-winsync-plugin.ldif \ $(srcdir)/ldap/schema/60autofs.ldif \ $(srcdir)/ldap/schema/60eduperson.ldif \ $(srcdir)/ldap/schema/60mozilla.ldif \ $(srcdir)/ldap/schema/60pureftpd.ldif \ $(srcdir)/ldap/schema/60rfc2739.ldif \ $(srcdir)/ldap/schema/60rfc3712.ldif \ $(srcdir)/ldap/schema/60sabayon.ldif \ $(srcdir)/ldap/schema/60samba3.ldif \ $(srcdir)/ldap/schema/60sudo.ldif \ $(srcdir)/ldap/schema/60trust.ldif \ $(srcdir)/ldap/schema/60nss-ldap.ldif \ $(LIBACCTPOLICY_SCHEMA) if RUST_ENABLE systemschema_DATA += $(srcdir)/ldap/schema/03entryuuid.ldif endif schema_DATA = $(srcdir)/ldap/schema/99user.ldif libexec_SCRIPTS = if SYSTEMD libexec_SCRIPTS += wrappers/ds_systemd_ask_password_acl wrappers/ds_selinux_restorecon.sh endif if ENABLE_COCKPIT install-data-hook: if [ "$(srcdir)" != "." ]; then cp -r $(srcdir)/src/cockpit src ; fi mkdir -p src/cockpit/389-console/cockpit_dist/ mkdir -p $(DESTDIR)$(cockpitdir) rsync -rupE src/cockpit/389-console/cockpit_dist/ $(DESTDIR)$(cockpitdir) mkdir -p $(DESTDIR)$(metainfodir) rsync -up src/cockpit/389-console/org.port389.cockpit_console.metainfo.xml $(DESTDIR)$(metainfodir)/org.port389.cockpit_console.metainfo.xml endif sbin_SCRIPTS = bin_SCRIPTS = # For scripts that are "as is". dist_bin_SCRIPTS = ldap/admin/src/scripts/ds-replcheck \ ldap/admin/src/scripts/ds-logpipe.py dist_bin_SCRIPTS += ldap/admin/src/logconv.pl python_DATA = ldap/admin/src/scripts/failedbinds.py \ ldap/admin/src/scripts/logregex.py gdbautoload_DATA = ldap/admin/src/scripts/ns-slapd-gdb.py dist_sysctl_DATA = ldap/admin/src/70-dirsrv.conf if SYSTEMD # yes, that is an @ in the filename . . . systemdsystemunit_DATA = wrappers/$(PACKAGE_NAME)@.service \ wrappers/$(systemdgroupname) \ wrappers/$(PACKAGE_NAME)-snmp.service if with_sanitizer systemdsystemunitdropin_DATA = wrappers/$(PACKAGE_NAME)@.service.d/xsan.conf else systemdsystemunitdropin_DATA = wrappers/$(PACKAGE_NAME)@.service.d/custom.conf endif else if INITDDIR init_SCRIPTS = wrappers/$(PACKAGE_NAME) \ wrappers/$(PACKAGE_NAME)-snmp endif endif if INITDDIR initconfig_DATA = ldap/admin/src/$(PACKAGE_NAME) endif inf_DATA = ldap/admin/src/slapd.inf \ ldap/admin/src/defaults.inf mib_DATA = ldap/servers/snmp/redhat-directory.mib pkgconfig_DATA = src/pkgconfig/dirsrv.pc \ src/pkgconfig/svrcore.pc #------------------------ # header files #------------------------ serverinc_HEADERS = ldap/servers/plugins/replication/repl-session-plugin.h \ ldap/servers/slapd/slapi_pal.h \ ldap/servers/slapd/slapi-plugin.h \ ldap/servers/plugins/replication/winsync-plugin.h include_HEADERS = src/svrcore/src/svrcore.h #------------------------ # man pages #------------------------ dist_man_MANS = man/man1/dbscan.1 \ man/man1/ds-logpipe.py.1 \ man/man1/ds-replcheck.1 \ man/man1/ldap-agent.1 \ man/man1/ldclt.1 \ man/man1/logconv.pl.1 \ man/man1/pwdhash.1 \ man/man5/99user.ldif.5 \ man/man8/ns-slapd.8 \ man/man5/certmap.conf.5 \ man/man5/dirsrv.5 \ man/man5/dirsrv.systemd.5 \ man/man5/slapd-collations.conf.5 #//////////////////////////////////////////////////////////////// # # Static Server Libraries # #//////////////////////////////////////////////////////////////// #------------------------ # libavl #------------------------ libavl_a_SOURCES = ldap/libraries/libavl/avl.c libavl_a_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) #------------------------ # libldaputil #------------------------ libldaputil_la_SOURCES = lib/ldaputil/cert.c \ lib/ldaputil/certmap.c \ lib/ldaputil/dbconf.c \ lib/ldaputil/encode.c \ lib/ldaputil/errors.c \ lib/ldaputil/init.c \ lib/ldaputil/ldapauth.c \ lib/ldaputil/vtable.c libldaputil_la_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) $(DSINTERNAL_CPPFLAGS) -I$(srcdir)/lib/ldaputil libldaputil_la_LIBADD = libslapd.la $(NSS_LINK) $(NSPR_LINK) libldaputil_la_LDFLAGS = $(AM_LDFLAGS) #//////////////////////////////////////////////////////////////// # # Dynamic Server Libraries # #//////////////////////////////////////////////////////////////// #------------------------ # librewriters #------------------------ librewriters_la_SOURCES = \ src/rewriters/adfilter.c librewriters_la_LDFLAGS = $(AM_LDFLAGS) librewriters_la_CPPFLAGS = $(AM_CPPFLAGS) $(REWRITERS_INCLUDES) $(DSPLUGIN_CPPFLAGS) librewriters_la_LIBADD = libslapd.la $(NSS_LINK) $(NSPR_LINK) #------------------------ # libsvrcore #------------------------ libsvrcore_la_SOURCES = \ src/svrcore/src/alt.c \ src/svrcore/src/cache.c \ src/svrcore/src/errors.c \ src/svrcore/src/file.c \ src/svrcore/src/ntgetpin.c \ src/svrcore/src/ntresource.h \ src/svrcore/src/pin.c \ src/svrcore/src/pk11.c \ src/svrcore/src/std.c \ src/svrcore/src/systemd-ask-pass.c \ src/svrcore/src/std-systemd.c \ src/svrcore/src/user.c libsvrcore_la_LDFLAGS = $(AM_LDFLAGS) libsvrcore_la_CPPFLAGS = $(AM_CPPFLAGS) $(SVRCORE_INCLUDES) $(DSPLUGIN_CPPFLAGS) libsvrcore_la_LIBADD = $(NSS_LINK) $(NSPR_LINK) if RUST_ENABLE noinst_LTLIBRARIES = librslapd.la librnsslapd.la libentryuuid.la libentryuuid_syntax.la \ libpwdchan.la ### Why does this exist? # # Both cargo and autotools are really opinionated. It's really hard to make this work. :( # # https://people.gnome.org/~federico/blog/librsvg-build-infrastructure.html # https://gitlab.gnome.org/GNOME/librsvg/blob/master/Makefile.am ### Rust lib slapd components RSLAPD_LIB = @abs_top_builddir@/rs/rslapd/@rust_target_dir@/librslapd.a librslapd_la_SOURCES = \ src/librslapd/Cargo.toml \ src/librslapd/build.rs \ src/librslapd/src/cache.rs \ src/librslapd/src/lib.rs librslapd_la_EXTRA = src/librslapd/Cargo.lock @abs_top_builddir@/rs/rslapd/@rust_target_dir@/librslapd.a: $(librslapd_la_SOURCES) RUST_BACKTRACE=1 RUSTC_BOOTSTRAP=1 \ CARGO_TARGET_DIR=$(abs_top_builddir)/rs/rslapd \ SLAPD_DYLIB_DIR=$(abs_top_builddir)/ \ SLAPD_HEADER_DIR=$(abs_top_builddir)/ \ cargo rustc $(RUST_OFFLINE) --manifest-path=$(srcdir)/src/librslapd/Cargo.toml \ $(CARGO_FLAGS) --verbose -- $(RUSTC_FLAGS) $(RUSTC_LINK_FLAGS) # The header needs the lib build first. rust-slapi-private.h: @abs_top_builddir@/rs/rslapd/@rust_target_dir@/librslapd.a # Build rust ns-slapd components as a library. RNSSLAPD_LIB = @abs_top_builddir@/rs/rnsslapd/@rust_target_dir@/librnsslapd.a librnsslapd_la_SOURCES = \ src/librnsslapd/Cargo.toml \ src/librnsslapd/build.rs \ src/librnsslapd/src/lib.rs librnsslapd_la_EXTRA = src/librnsslapd/Cargo.lock @abs_top_builddir@/rs/rnsslapd/@rust_target_dir@/librnsslapd.a: $(librnsslapd_la_SOURCES) RUST_BACKTRACE=1 RUSTC_BOOTSTRAP=1 \ CARGO_TARGET_DIR=$(abs_top_builddir)/rs/rnsslapd \ SLAPD_DYLIB_DIR=$(abs_top_builddir)/ \ SLAPD_HEADER_DIR=$(abs_top_builddir)/ \ cargo rustc $(RUST_OFFLINE) --manifest-path=$(srcdir)/src/librnsslapd/Cargo.toml \ $(CARGO_FLAGS) --verbose -- $(RUSTC_FLAGS) $(RUSTC_LINK_FLAGS) # The header needs the lib build first. rust-nsslapd-private.h: @abs_top_builddir@/rs/rnsslapd/@rust_target_dir@/librnsslapd.a libslapi_r_plugin_SOURCES = \ src/slapi_r_plugin/src/backend.rs \ src/slapi_r_plugin/src/ber.rs \ src/slapi_r_plugin/src/charray.rs \ src/slapi_r_plugin/src/constants.rs \ src/slapi_r_plugin/src/dn.rs \ src/slapi_r_plugin/src/entry.rs \ src/slapi_r_plugin/src/error.rs \ src/slapi_r_plugin/src/log.rs \ src/slapi_r_plugin/src/macros.rs \ src/slapi_r_plugin/src/pblock.rs \ src/slapi_r_plugin/src/plugin.rs \ src/slapi_r_plugin/src/search.rs \ src/slapi_r_plugin/src/syntax_plugin.rs \ src/slapi_r_plugin/src/task.rs \ src/slapi_r_plugin/src/value.rs \ src/slapi_r_plugin/src/lib.rs # Build rust ns-slapd components as a library. ENTRYUUID_LIB = @abs_top_builddir@/rs/entryuuid/@rust_target_dir@/libentryuuid.a libentryuuid_la_SOURCES = \ src/plugins/entryuuid/Cargo.toml \ src/plugins/entryuuid/src/lib.rs \ $(libslapi_r_plugin_SOURCES) libentryuuid_la_EXTRA = src/plugin/entryuuid/Cargo.lock @abs_top_builddir@/rs/entryuuid/@rust_target_dir@/libentryuuid.a: $(libentryuuid_la_SOURCES) libslapd.la libentryuuid.la RUST_BACKTRACE=1 RUSTC_BOOTSTRAP=1 \ CARGO_TARGET_DIR=$(abs_top_builddir)/rs/entryuuid \ SLAPD_DYLIB_DIR=$(abs_top_builddir)/ \ SLAPD_HEADER_DIR=$(abs_top_builddir)/ \ cargo rustc $(RUST_OFFLINE) --manifest-path=$(srcdir)/src/plugins/entryuuid/Cargo.toml \ $(CARGO_FLAGS) --verbose -- $(RUSTC_FLAGS) $(RUSTC_LINK_FLAGS) cp $(ENTRYUUID_LIB) @abs_top_builddir@/.libs/libentryuuid.a ENTRYUUID_SYNTAX_LIB = @abs_top_builddir@/rs/entryuuid_syntax/@rust_target_dir@/libentryuuid_syntax.a libentryuuid_syntax_la_SOURCES = \ src/plugins/entryuuid_syntax/Cargo.toml \ src/plugins/entryuuid_syntax/src/lib.rs \ $(libslapi_r_plugin_SOURCES) libentryuuid_syntax_la_EXTRA = src/plugin/entryuuid_syntax/Cargo.lock @abs_top_builddir@/rs/entryuuid_syntax/@rust_target_dir@/libentryuuid_syntax.a: $(libentryuuid_syntax_la_SOURCES) libslapd.la libentryuuid_syntax.la RUST_BACKTRACE=1 RUSTC_BOOTSTRAP=1 \ CARGO_TARGET_DIR=$(abs_top_builddir)/rs/entryuuid_syntax \ SLAPD_DYLIB_DIR=$(abs_top_builddir)/ \ SLAPD_HEADER_DIR=$(abs_top_builddir)/ \ cargo rustc $(RUST_OFFLINE) --manifest-path=$(srcdir)/src/plugins/entryuuid_syntax/Cargo.toml \ $(CARGO_FLAGS) --verbose -- $(RUSTC_FLAGS) $(RUSTC_LINK_FLAGS) cp $(ENTRYUUID_SYNTAX_LIB) @abs_top_builddir@/.libs/libentryuuid_syntax.a # == pwdchan PWDCHAN_LIB = @abs_top_builddir@/rs/pwdchan/@rust_target_dir@/libpwdchan.a libpwdchan_la_SOURCES = \ src/plugins/pwdchan/Cargo.toml \ src/plugins/pwdchan/src/lib.rs \ $(libslapi_r_plugin_SOURCES) libpwdchan_la_EXTRA = src/plugin/pwdchan/Cargo.lock @abs_top_builddir@/rs/pwdchan/@rust_target_dir@/libpwdchan.a: $(libpwdchan_la_SOURCES) libslapd.la libpwdchan.la RUST_BACKTRACE=1 RUSTC_BOOTSTRAP=1 \ CARGO_TARGET_DIR=$(abs_top_builddir)/rs/pwdchan \ SLAPD_DYLIB_DIR=$(abs_top_builddir)/ \ SLAPD_HEADER_DIR=$(abs_top_builddir)/ \ cargo rustc $(RUST_OFFLINE) --manifest-path=$(srcdir)/src/plugins/pwdchan/Cargo.toml \ $(CARGO_FLAGS) --verbose -- $(RUSTC_FLAGS) $(RUSTC_LINK_FLAGS) cp $(PWDCHAN_LIB) @abs_top_builddir@/.libs/libpwdchan.a # == pwdchan EXTRA_DIST = $(librslapd_la_SOURCES) $(librslapd_la_EXTRA) \ $(libentryuuid_la_SOURCES) $(libentryuuid_la_EXTRA) \ $(libentryuuid_syntax_la_SOURCES) $(libentryuuid_syntax_la_EXTRA) \ $(libpwdchan_la_SOURCES) $(libpwdchan_la_EXTRA) \ $(librnsslapd_la_SOURCES) $(librnsslapd_la_EXTRA) ## Run rust tests # cargo does not support offline tests :( if RUST_ENABLE_OFFLINE else if enable_asan # Distro rust tends not to have proper asan support w_ clang else check-local: for thing in "librslapd" "librnsslapd" ; do \ echo \ LD_LIBRARY_PATH=$(abs_top_builddir)/.libs \ RUSTFLAGS="$(RUSTC_FLAGS)" \ RUST_BACKTRACE=1 RUSTC_BOOTSTRAP=1 \ CARGO_TARGET_DIR=$(abs_top_builddir)/rs/test/$${thing} \ SLAPD_DYLIB_DIR=$(abs_top_builddir)/ \ SLAPD_HEADER_DIR=$(abs_top_builddir)/ \ cargo test $(RUST_OFFLINE) \ --manifest-path=$(srcdir)/src/$${thing}/Cargo.toml -- --nocapture ; \ LD_LIBRARY_PATH=$(abs_top_builddir)/.libs \ RUSTFLAGS="$(RUSTC_FLAGS)" \ RUST_BACKTRACE=1 RUSTC_BOOTSTRAP=1 \ CARGO_TARGET_DIR=$(abs_top_builddir)/rs/test/$${thing} \ SLAPD_DYLIB_DIR=$(abs_top_builddir)/ \ SLAPD_HEADER_DIR=$(abs_top_builddir)/ \ cargo test $(RUST_OFFLINE) \ --manifest-path=$(srcdir)/src/$${thing}/Cargo.toml -- --nocapture ; \ done # Plugin tests are a little different for thing in "plugins/pwdchan" ; do \ echo \ LD_LIBRARY_PATH=$(abs_top_builddir)/.libs \ RUSTFLAGS="$(RUSTC_FLAGS)" \ RUST_BACKTRACE=1 RUSTC_BOOTSTRAP=1 \ CARGO_TARGET_DIR=$(abs_top_builddir)/rs/test/$${thing} \ SLAPD_DYLIB_DIR=$(abs_top_builddir)/ \ SLAPD_HEADER_DIR=$(abs_top_builddir)/ \ cargo test $(RUST_OFFLINE) --features=slapi_r_plugin/test_log_direct \ --manifest-path=$(srcdir)/src/$${thing}/Cargo.toml -- --nocapture ; \ LD_LIBRARY_PATH=$(abs_top_builddir)/.libs \ RUSTFLAGS="$(RUSTC_FLAGS)" \ RUST_BACKTRACE=1 RUSTC_BOOTSTRAP=1 \ CARGO_TARGET_DIR=$(abs_top_builddir)/rs/test/$${thing} \ SLAPD_DYLIB_DIR=$(abs_top_builddir)/ \ SLAPD_HEADER_DIR=$(abs_top_builddir)/ \ cargo test $(RUST_OFFLINE) --features=slapi_r_plugin/test_log_direct \ --manifest-path=$(srcdir)/src/$${thing}/Cargo.toml -- --nocapture ; \ done endif endif # End if RUST_ENABLE endif #------------------------ # libns-dshttpd #------------------------ libns_dshttpd_la_SOURCES = lib/libaccess/access_plhash.cpp \ lib/libaccess/acl.tab.cpp \ lib/libaccess/acl.yy.cpp \ lib/libaccess/aclcache.cpp \ lib/libaccess/aclerror.cpp \ lib/libaccess/acleval.cpp \ lib/libaccess/aclflush.cpp \ lib/libaccess/aclspace.cpp \ lib/libaccess/acltools.cpp \ lib/libaccess/aclutil.cpp \ lib/libaccess/authdb.cpp \ lib/libaccess/lasdns.cpp \ lib/libaccess/lasgroup.cpp \ lib/libaccess/lasip.cpp \ lib/libaccess/lastod.cpp \ lib/libaccess/lasuser.cpp \ lib/libaccess/method.cpp \ lib/libaccess/nseframe.cpp \ lib/libaccess/nsautherr.cpp \ lib/libaccess/oneeval.cpp \ lib/libaccess/register.cpp \ lib/libaccess/symbols.cpp \ lib/libaccess/usi.cpp \ lib/libaccess/usrcache.cpp \ lib/libadmin/error.c \ lib/libadmin/template.c \ lib/libadmin/util.c \ lib/base/crit.cpp \ lib/base/dnsdmain.cpp \ lib/base/ereport.cpp \ lib/base/file.cpp \ lib/base/fsmutex.cpp \ lib/base/nscperror.c \ lib/base/plist.cpp \ lib/base/pool.cpp \ lib/base/shexp.cpp \ lib/base/system.cpp \ lib/base/systhr.cpp \ lib/base/util.cpp \ lib/libsi18n/getstrprop.c \ lib/libsi18n/reshash.c \ lib/libsi18n/txtfile.c libns_dshttpd_la_CPPFLAGS = -I$(srcdir)/include/base $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) -I$(srcdir)/lib/ldaputil libns_dshttpd_la_LIBADD = libslapd.la libldaputil.la $(LDAPSDK_LINK) $(SASL_LINK) $(NSS_LINK) $(NSPR_LINK) libns_dshttpd_la_LDFLAGS = $(AM_LDFLAGS) #------------------------ # libslapd #------------------------ libslapd_la_SOURCES = ldap/servers/slapd/add.c \ ldap/servers/slapd/agtmmap.c \ ldap/servers/slapd/apibroker.c \ ldap/servers/slapd/attr.c \ ldap/servers/slapd/attrlist.c \ ldap/servers/slapd/attrsyntax.c \ ldap/servers/slapd/auditlog.c \ ldap/servers/slapd/ava.c \ ldap/servers/slapd/backend.c \ ldap/servers/slapd/backend_manager.c \ ldap/servers/slapd/bitset.c \ ldap/servers/slapd/bulk_import.c \ ldap/servers/slapd/charray.c \ ldap/servers/slapd/ch_malloc.c \ ldap/servers/slapd/computed.c \ ldap/servers/slapd/control.c \ ldap/servers/slapd/configdse.c \ ldap/servers/slapd/counters.c \ ldap/servers/slapd/csn.c \ ldap/servers/slapd/csngen.c \ ldap/servers/slapd/csnset.c \ ldap/servers/slapd/defbackend.c \ ldap/servers/slapd/delete.c \ ldap/servers/slapd/dl.c \ ldap/servers/slapd/dn.c \ ldap/servers/slapd/dse.c \ ldap/servers/slapd/dynalib.c \ ldap/servers/slapd/entry.c \ ldap/servers/slapd/entrywsi.c \ ldap/servers/slapd/errormap.c \ ldap/servers/slapd/eventq.c \ ldap/servers/slapd/eventq-deprecated.c \ ldap/servers/slapd/factory.c \ ldap/servers/slapd/features.c \ ldap/servers/slapd/fileio.c \ ldap/servers/slapd/filter.c \ ldap/servers/slapd/filtercmp.c \ ldap/servers/slapd/filterentry.c \ ldap/servers/slapd/generation.c \ ldap/servers/slapd/getfilelist.c \ ldap/servers/slapd/ldapi.c \ ldap/servers/slapd/ldaputil.c \ ldap/servers/slapd/lenstr.c \ ldap/servers/slapd/libglobs.c \ ldap/servers/slapd/localhost.c \ ldap/servers/slapd/log.c \ ldap/servers/slapd/mapping_tree.c \ ldap/servers/slapd/match.c \ ldap/servers/slapd/modify.c \ ldap/servers/slapd/modrdn.c \ ldap/servers/slapd/modutil.c \ ldap/servers/slapd/object.c \ ldap/servers/slapd/objset.c \ ldap/servers/slapd/operation.c \ ldap/servers/slapd/opshared.c \ ldap/servers/slapd/pagedresults.c \ ldap/servers/slapd/pblock.c \ ldap/servers/slapd/plugin.c \ ldap/servers/slapd/plugin_acl.c \ ldap/servers/slapd/plugin_mmr.c \ ldap/servers/slapd/plugin_internal_op.c \ ldap/servers/slapd/plugin_mr.c \ ldap/servers/slapd/plugin_role.c \ ldap/servers/slapd/plugin_syntax.c \ ldap/servers/slapd/protect_db.c \ ldap/servers/slapd/proxyauth.c \ ldap/servers/slapd/pw.c \ ldap/servers/slapd/pw_retry.c \ ldap/servers/slapd/rdn.c \ ldap/servers/slapd/referral.c \ ldap/servers/slapd/regex.c \ ldap/servers/slapd/resourcelimit.c \ ldap/servers/slapd/result.c \ ldap/servers/slapd/rewriters.c \ ldap/servers/slapd/sasl_map.c \ ldap/servers/slapd/schema.c \ ldap/servers/slapd/schemaparse.c \ ldap/servers/slapd/security_wrappers.c \ ldap/servers/slapd/slapd_plhash.c \ ldap/servers/slapd/slapi_counter.c \ ldap/servers/slapd/slapi2runtime.c \ ldap/servers/slapd/snmp_collator.c \ ldap/servers/slapd/sort.c \ ldap/servers/slapd/ssl.c \ ldap/servers/slapd/str2filter.c \ ldap/servers/slapd/subentry.c \ ldap/servers/slapd/task.c \ ldap/servers/slapd/time.c \ ldap/servers/slapd/thread_data.c \ ldap/servers/slapd/uniqueid.c \ ldap/servers/slapd/uniqueidgen.c \ ldap/servers/slapd/upgrade.c \ ldap/servers/slapd/utf8.c \ ldap/servers/slapd/utf8compare.c \ ldap/servers/slapd/util.c \ ldap/servers/slapd/uuid.c \ ldap/servers/slapd/value.c \ ldap/servers/slapd/valueset.c \ ldap/servers/slapd/vattr.c \ ldap/servers/slapd/slapi_pal.c \ src/libsds/external/csiphash/csiphash.c \ $(GETSOCKETPEER) \ $(libavl_a_SOURCES) libslapd_la_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) $(SASL_CFLAGS) @db_inc@ $(KERBEROS_CFLAGS) $(PCRE_CFLAGS) $(SVRCORE_INCLUDES) libslapd_la_LIBADD = $(LDAPSDK_LINK) $(SASL_LINK) $(NSS_LINK) $(NSPR_LINK) $(KERBEROS_LIBS) $(PCRE_LIBS) $(THREADLIB) $(SYSTEMD_LIBS) libsvrcore.la # If asan is enabled, it creates special libcrypt interceptors. However, they are # detected by the first load of libasan at runtime, and what is in the linked lib # so we need libcrypt to be present as soon as libasan is loaded for the interceptors # to function. Since ns-slapd links libslapd, this is pulled at startup, which allows # pwdstorage to be asan checked with libcrypt. if enable_asan libslapd_la_LIBADD += $(LIBCRYPT) endif libslapd_la_LDFLAGS = $(AM_LDFLAGS) $(SLAPD_LDFLAGS) if RUST_ENABLE libslapd_la_LIBADD += $(RSLAPD_LIB) libslapd_la_LDFLAGS += -lssl -lcrypto endif #//////////////////////////////////////////////////////////////// # # Plugins # #//////////////////////////////////////////////////////////////// #------------------------ # libback-ldbm #------------------------ libback_ldbm_la_SOURCES = ldap/servers/slapd/back-ldbm/ancestorid.c \ ldap/servers/slapd/back-ldbm/archive.c \ ldap/servers/slapd/back-ldbm/backentry.c \ ldap/servers/slapd/back-ldbm/cache.c \ ldap/servers/slapd/back-ldbm/cleanup.c \ ldap/servers/slapd/back-ldbm/close.c \ ldap/servers/slapd/back-ldbm/dbimpl.c \ ldap/servers/slapd/back-ldbm/dblayer.c \ ldap/servers/slapd/back-ldbm/dbsize.c \ ldap/servers/slapd/back-ldbm/dn2entry.c \ ldap/servers/slapd/back-ldbm/entrystore.c \ ldap/servers/slapd/back-ldbm/filterindex.c \ ldap/servers/slapd/back-ldbm/findentry.c \ ldap/servers/slapd/back-ldbm/haschildren.c \ ldap/servers/slapd/back-ldbm/id2entry.c \ ldap/servers/slapd/back-ldbm/idl.c \ ldap/servers/slapd/back-ldbm/idl_shim.c \ ldap/servers/slapd/back-ldbm/idl_new.c \ ldap/servers/slapd/back-ldbm/idl_set.c \ ldap/servers/slapd/back-ldbm/idl_common.c \ ldap/servers/slapd/back-ldbm/import.c \ ldap/servers/slapd/back-ldbm/index.c \ ldap/servers/slapd/back-ldbm/init.c \ ldap/servers/slapd/back-ldbm/instance.c \ ldap/servers/slapd/back-ldbm/ldbm_abandon.c \ ldap/servers/slapd/back-ldbm/ldbm_add.c \ ldap/servers/slapd/back-ldbm/ldbm_attr.c \ ldap/servers/slapd/back-ldbm/ldbm_attrcrypt.c \ ldap/servers/slapd/back-ldbm/ldbm_attrcrypt_config.c \ ldap/servers/slapd/back-ldbm/ldbm_bind.c \ ldap/servers/slapd/back-ldbm/ldbm_compare.c \ ldap/servers/slapd/back-ldbm/ldbm_config.c \ ldap/servers/slapd/back-ldbm/ldbm_delete.c \ ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c \ ldap/servers/slapd/back-ldbm/ldbm_index_config.c \ ldap/servers/slapd/back-ldbm/ldbm_instance_config.c \ ldap/servers/slapd/back-ldbm/ldbm_modify.c \ ldap/servers/slapd/back-ldbm/ldbm_modrdn.c \ ldap/servers/slapd/back-ldbm/ldbm_search.c \ ldap/servers/slapd/back-ldbm/ldbm_unbind.c \ ldap/servers/slapd/back-ldbm/ldbm_usn.c \ ldap/servers/slapd/back-ldbm/ldif2ldbm.c \ ldap/servers/slapd/back-ldbm/dbverify.c \ ldap/servers/slapd/back-ldbm/matchrule.c \ ldap/servers/slapd/back-ldbm/misc.c \ ldap/servers/slapd/back-ldbm/nextid.c \ ldap/servers/slapd/back-ldbm/parents.c \ ldap/servers/slapd/back-ldbm/rmdb.c \ ldap/servers/slapd/back-ldbm/seq.c \ ldap/servers/slapd/back-ldbm/sort.c \ ldap/servers/slapd/back-ldbm/start.c \ ldap/servers/slapd/back-ldbm/uniqueid2entry.c \ ldap/servers/slapd/back-ldbm/vlv.c \ ldap/servers/slapd/back-ldbm/vlv_key.c \ ldap/servers/slapd/back-ldbm/vlv_srch.c \ ldap/servers/slapd/back-ldbm/db-bdb/bdb_config.c \ ldap/servers/slapd/back-ldbm/db-bdb/bdb_instance_config.c \ ldap/servers/slapd/back-ldbm/db-bdb/bdb_verify.c \ ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c \ ldap/servers/slapd/back-ldbm/db-bdb/bdb_misc.c \ ldap/servers/slapd/back-ldbm/db-bdb/bdb_perfctrs.c \ ldap/servers/slapd/back-ldbm/db-bdb/bdb_upgrade.c \ ldap/servers/slapd/back-ldbm/db-bdb/bdb_version.c \ ldap/servers/slapd/back-ldbm/db-bdb/bdb_monitor.c \ ldap/servers/slapd/back-ldbm/db-bdb/bdb_ldif2db.c \ ldap/servers/slapd/back-ldbm/db-bdb/bdb_import.c \ ldap/servers/slapd/back-ldbm/db-bdb/bdb_import_threads.c libback_ldbm_la_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) @db_inc@ libback_ldbm_la_DEPENDENCIES = libslapd.la libback_ldbm_la_LIBADD = libslapd.la $(DB_LINK) $(LDAPSDK_LINK) $(NSPR_LINK) libback_ldbm_la_LDFLAGS = -avoid-version #------------------------ # libacctpolicy-plugin #------------------------ libacctpolicy_plugin_la_SOURCES = ldap/servers/plugins/acctpolicy/acct_config.c \ ldap/servers/plugins/acctpolicy/acct_init.c \ ldap/servers/plugins/acctpolicy/acct_plugin.c \ ldap/servers/plugins/acctpolicy/acct_util.c libacctpolicy_plugin_la_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) libacctpolicy_plugin_la_LIBADD = libslapd.la $(NSPR_LINK) libacctpolicy_plugin_la_DEPENDENCIES = libslapd.la libacctpolicy_plugin_la_LDFLAGS = -avoid-version #------------------------ # libacctusability-plugin #------------------------ libacctusability_plugin_la_SOURCES = ldap/servers/plugins/acct_usability/acct_usability.c libacctusability_plugin_la_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) libacctusability_plugin_la_LIBADD = libslapd.la $(NSPR_LINK) libacctusability_plugin_la_DEPENDENCIES = libslapd.la libacctusability_plugin_la_LDFLAGS = -avoid-version #------------------------ # libacl-plugin #------------------------ libacl_plugin_la_SOURCES = ldap/servers/plugins/acl/acl.c \ ldap/servers/plugins/acl/acl_ext.c \ ldap/servers/plugins/acl/aclanom.c \ ldap/servers/plugins/acl/acleffectiverights.c \ ldap/servers/plugins/acl/aclgroup.c \ ldap/servers/plugins/acl/aclinit.c \ ldap/servers/plugins/acl/acllas.c \ ldap/servers/plugins/acl/acllist.c \ ldap/servers/plugins/acl/aclparse.c \ ldap/servers/plugins/acl/aclplugin.c \ ldap/servers/plugins/acl/aclutil.c libacl_plugin_la_CPPFLAGS = -I$(srcdir)/include/libaccess $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) libacl_plugin_la_DEPENDENCIES = libslapd.la libns-dshttpd.la libacl_plugin_la_LIBADD = libslapd.la libns-dshttpd.la $(LDAPSDK_LINK) $(NSPR_LINK) $(LIBCSTD) $(LIBCRUN) libacl_plugin_la_LDFLAGS = -avoid-version # libacl_plugin_la_LINK = $(CXXLINK) -avoid-version #------------------------ # libaddn-plugin #------------------------ libaddn_plugin_la_SOURCES = ldap/servers/plugins/addn/addn.c libaddn_plugin_la_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) libaddn_plugin_la_LIBADD = libslapd.la $(NSPR_LINK) libaddn_plugin_la_DEPENDENCIES = libslapd.la libaddn_plugin_la_LDFLAGS = -avoid-version #------------------------ # librootdn-access-plugin #------------------------ # librootdn_access_plugin_la_SOURCES = ldap/servers/plugins/rootdn_access/rootdn_access.c librootdn_access_plugin_la_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) librootdn_access_plugin_la_LIBADD = libslapd.la $(NSPR_LINK) librootdn_access_plugin_la_DEPENDENCIES = libslapd.la librootdn_access_plugin_la_LDFLAGS = -avoid-version #------------------------ # libautomember-plugin #------------------------ libautomember_plugin_la_SOURCES = ldap/servers/plugins/automember/automember.c libautomember_plugin_la_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) libautomember_plugin_la_LIBADD = libslapd.la $(NSPR_LINK) libautomember_plugin_la_DEPENDENCIES = libslapd.la libautomember_plugin_la_LDFLAGS = -avoid-version #------------------------ # libattr-unique-plugin #------------------------ libattr_unique_plugin_la_SOURCES = ldap/servers/plugins/uiduniq/7bit.c \ ldap/servers/plugins/uiduniq/uid.c \ ldap/servers/plugins/uiduniq/utils.c libattr_unique_plugin_la_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) libattr_unique_plugin_la_LIBADD = libslapd.la $(NSPR_LINK) libattr_unique_plugin_la_DEPENDENCIES = libslapd.la libattr_unique_plugin_la_LDFLAGS = -avoid-version #------------------------ # libbitwise-plugin #------------------------ libbitwise_plugin_la_SOURCES = ldap/servers/plugins/bitwise/bitwise.c libbitwise_plugin_la_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) libbitwise_plugin_la_LIBADD = libslapd.la libbitwise_plugin_la_DEPENDENCIES = libslapd.la libbitwise_plugin_la_LDFLAGS = -avoid-version #------------------------ # libchainingdb-plugin #------------------------ libchainingdb_plugin_la_SOURCES = ldap/servers/plugins/chainingdb/cb_abandon.c \ ldap/servers/plugins/chainingdb/cb_acl.c \ ldap/servers/plugins/chainingdb/cb_add.c \ ldap/servers/plugins/chainingdb/cb_bind.c \ ldap/servers/plugins/chainingdb/cb_cleanup.c \ ldap/servers/plugins/chainingdb/cb_close.c \ ldap/servers/plugins/chainingdb/cb_compare.c \ ldap/servers/plugins/chainingdb/cb_config.c \ ldap/servers/plugins/chainingdb/cb_conn_stateless.c \ ldap/servers/plugins/chainingdb/cb_controls.c \ ldap/servers/plugins/chainingdb/cb_debug.c \ ldap/servers/plugins/chainingdb/cb_delete.c \ ldap/servers/plugins/chainingdb/cb_init.c \ ldap/servers/plugins/chainingdb/cb_instance.c \ ldap/servers/plugins/chainingdb/cb_modify.c \ ldap/servers/plugins/chainingdb/cb_modrdn.c \ ldap/servers/plugins/chainingdb/cb_monitor.c \ ldap/servers/plugins/chainingdb/cb_schema.c \ ldap/servers/plugins/chainingdb/cb_search.c \ ldap/servers/plugins/chainingdb/cb_start.c \ ldap/servers/plugins/chainingdb/cb_temp.c \ ldap/servers/plugins/chainingdb/cb_test.c \ ldap/servers/plugins/chainingdb/cb_unbind.c \ ldap/servers/plugins/chainingdb/cb_utils.c libchainingdb_plugin_la_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) libchainingdb_plugin_la_LIBADD = libslapd.la $(LDAPSDK_LINK) $(NSPR_LINK) libchainingdb_plugin_la_DEPENDENCIES = libslapd.la libchainingdb_plugin_la_LDFLAGS = -avoid-version #------------------------ # libcollation-plugin #------------------------ libcollation_plugin_la_SOURCES = ldap/servers/plugins/collation/collate.c \ ldap/servers/plugins/collation/config.c \ ldap/servers/plugins/collation/orfilter.c libcollation_plugin_la_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) $(ICU_CFLAGS) libcollation_plugin_la_LIBADD = libslapd.la $(LDAPSDK_LINK) $(NSPR_LINK) $(ICU_LIBS) $(LIBCSTD) $(LIBCRUN) libcollation_plugin_la_DEPENDENCIES = libslapd.la libcollation_plugin_la_LDFLAGS = -avoid-version # libcollation_plugin_la_LINK = $(CXXLINK) -avoid-version #------------------------ # libcos-plugin #------------------------ libcos_plugin_la_SOURCES = ldap/servers/plugins/cos/cos.c \ ldap/servers/plugins/cos/cos_cache.c libcos_plugin_la_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) libcos_plugin_la_LIBADD = libslapd.la $(LDAPSDK_LINK) $(NSPR_LINK) libcos_plugin_la_DEPENDENCIES = libslapd.la libcos_plugin_la_LDFLAGS = -avoid-version #------------------------ # libderef-plugin #----------------------- libderef_plugin_la_SOURCES = ldap/servers/plugins/deref/deref.c libderef_plugin_la_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) libderef_plugin_la_LIBADD = libslapd.la $(LDAPSDK_LINK) $(NSPR_LINK) libderef_plugin_la_DEPENDENCIES = libslapd.la libderef_plugin_la_LDFLAGS = -avoid-version if RUST_ENABLE #------------------------ # libentryuuid-syntax-plugin #----------------------- libentryuuid_syntax_plugin_la_SOURCES = src/slapi_r_plugin/src/init.c libentryuuid_syntax_plugin_la_LIBADD = libslapd.la $(LDAPSDK_LINK) $(NSPR_LINK) -lentryuuid_syntax libentryuuid_syntax_plugin_la_DEPENDENCIES = libslapd.la $(ENTRYUUID_SYNTAX_LIB) libentryuuid_syntax_plugin_la_LDFLAGS = -avoid-version #------------------------ # libentryuuid-plugin #----------------------- libentryuuid_plugin_la_SOURCES = src/slapi_r_plugin/src/init.c libentryuuid_plugin_la_LIBADD = libslapd.la $(LDAPSDK_LINK) $(NSPR_LINK) -lentryuuid libentryuuid_plugin_la_DEPENDENCIES = libslapd.la $(ENTRYUUID_LIB) libentryuuid_plugin_la_LDFLAGS = -avoid-version #------------------------ # libpwdchan-plugin #----------------------- libpwdchan_plugin_la_SOURCES = src/slapi_r_plugin/src/init.c libpwdchan_plugin_la_LIBADD = libslapd.la $(LDAPSDK_LINK) $(NSPR_LINK) -lpwdchan libpwdchan_plugin_la_DEPENDENCIES = libslapd.la $(PWDCHAN_LIB) libpwdchan_plugin_la_LDFLAGS = -avoid-version endif #------------------------ # libpbe-plugin #----------------------- libpbe_plugin_la_SOURCES = ldap/servers/plugins/rever/pbe.c \ ldap/servers/plugins/rever/rever.c libpbe_plugin_la_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) $(SVRCORE_INCLUDES) libpbe_plugin_la_LIBADD = libslapd.la libsvrcore.la $(NSS_LINK) libpbe_plugin_la_DEPENDENCIES = libslapd.la libpbe_plugin_la_LDFLAGS = -avoid-version #------------------------ # libdistrib-plugin #------------------------ libdistrib_plugin_la_SOURCES = ldap/servers/plugins/distrib/distrib.c libdistrib_plugin_la_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) libdistrib_plugin_la_LIBADD = libslapd.la libdistrib_plugin_la_DEPENDENCIES = libslapd.la libdistrib_plugin_la_LDFLAGS = -avoid-version #------------------------ # libdna-plugin #------------------------ libdna_plugin_la_SOURCES = ldap/servers/plugins/dna/dna.c libdna_plugin_la_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) libdna_plugin_la_LIBADD = libslapd.la $(LDAPSDK_LINK) $(NSPR_LINK) libdna_plugin_la_DEPENDENCIES = libslapd.la libdna_plugin_la_LDFLAGS = -avoid-version #------------------------ # liblinkedattrs-plugin #------------------------ liblinkedattrs_plugin_la_SOURCES = ldap/servers/plugins/linkedattrs/fixup_task.c \ ldap/servers/plugins/linkedattrs/linked_attrs.c liblinkedattrs_plugin_la_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) liblinkedattrs_plugin_la_LIBADD = libslapd.la $(NSPR_LINK) liblinkedattrs_plugin_la_DEPENDENCIES = libslapd.la liblinkedattrs_plugin_la_LDFLAGS = -avoid-version #------------------------ # libmanagedentries-plugin #------------------------ libmanagedentries_plugin_la_SOURCES = ldap/servers/plugins/mep/mep.c libmanagedentries_plugin_la_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) libmanagedentries_plugin_la_LIBADD = libslapd.la $(NSPR_LINK) libmanagedentries_plugin_la_DEPENDENCIES = libslapd.la libmanagedentries_plugin_la_LDFLAGS = -avoid-version #------------------------ # libmemberof-plugin #------------------------ libmemberof_plugin_la_SOURCES= ldap/servers/plugins/memberof/memberof.c \ ldap/servers/plugins/memberof/memberof_config.c libmemberof_plugin_la_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) libmemberof_plugin_la_LIBADD = libslapd.la $(LDAPSDK_LINK) $(NSPR_LINK) libmemberof_plugin_la_DEPENDENCIES = libslapd.la libmemberof_plugin_la_LDFLAGS = -avoid-version #------------------------ # libpam-passthru-plugin #------------------------ libpam_passthru_plugin_la_SOURCES = ldap/servers/plugins/pam_passthru/pam_ptconfig.c \ ldap/servers/plugins/pam_passthru/pam_ptdebug.c \ ldap/servers/plugins/pam_passthru/pam_ptimpl.c \ ldap/servers/plugins/pam_passthru/pam_ptpreop.c libpam_passthru_plugin_la_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) libpam_passthru_plugin_la_LIBADD = libslapd.la $(LDAPSDK_LINK) $(NSPR_LINK) $(PAM_LINK) libpam_passthru_plugin_la_DEPENDENCIES = libslapd.la libpam_passthru_plugin_la_LDFLAGS = -avoid-version #------------------------ # libpassthru-plugin #------------------------ libpassthru_plugin_la_SOURCES = ldap/servers/plugins/passthru/ptbind.c \ ldap/servers/plugins/passthru/ptconfig.c \ ldap/servers/plugins/passthru/ptconn.c \ ldap/servers/plugins/passthru/ptdebug.c \ ldap/servers/plugins/passthru/ptpreop.c \ ldap/servers/plugins/passthru/ptutil.c libpassthru_plugin_la_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) libpassthru_plugin_la_LIBADD = libslapd.la $(LDAPSDK_LINK) $(NSPR_LINK) libpassthru_plugin_la_DEPENDENCIES = libslapd.la libpassthru_plugin_la_LDFLAGS = -avoid-version #------------------------ # libposix-winsync-plugin #------------------------ libposix_winsync_plugin_la_SOURCES = ldap/servers/plugins/posix-winsync/posix-winsync.c \ ldap/servers/plugins/posix-winsync/posix-group-func.c \ ldap/servers/plugins/posix-winsync/posix-group-task.c \ ldap/servers/plugins/posix-winsync/posix-winsync-config.c libposix_winsync_plugin_la_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) -DWINSYNC_TEST_POSIX \ -I$(srcdir)/ldap/servers/plugins/replication libposix_winsync_plugin_la_LIBADD = libslapd.la $(LDAPSDK_LINK) $(NSPR_LINK) libposix_winsync_plugin_la_DEPENDENCIES = libslapd.la libposix_winsync_plugin_la_LDFLAGS = -avoid-version #------------------------ # libpwdstorage-plugin #------------------------ libpwdstorage_plugin_la_SOURCES = ldap/servers/plugins/pwdstorage/clear_pwd.c \ ldap/servers/plugins/pwdstorage/crypt_pwd.c \ ldap/servers/plugins/pwdstorage/md5_pwd.c \ ldap/servers/plugins/pwdstorage/md5c.c \ ldap/servers/plugins/pwdstorage/ns-mta-md5_pwd.c \ ldap/servers/plugins/pwdstorage/pwd_init.c \ ldap/servers/plugins/pwdstorage/pwd_util.c \ ldap/servers/plugins/pwdstorage/sha_pwd.c \ ldap/servers/plugins/pwdstorage/smd5_pwd.c \ ldap/servers/plugins/pwdstorage/ssha_pwd.c \ ldap/servers/plugins/pwdstorage/pbkdf2_pwd.c \ ldap/servers/plugins/pwdstorage/gost_yescrypt.c \ $(NULLSTRING) libpwdstorage_plugin_la_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) libpwdstorage_plugin_la_LIBADD = libslapd.la $(NSS_LINK) $(NSPR_LINK) $(LIBCRYPT) libpwdstorage_plugin_la_DEPENDENCIES = libslapd.la libpwdstorage_plugin_la_LDFLAGS = -avoid-version #------------------------ # libcontentsync-plugin #------------------------ libcontentsync_plugin_la_SOURCES = ldap/servers/plugins/sync/sync_init.c \ ldap/servers/plugins/sync/sync_util.c \ ldap/servers/plugins/sync/sync_refresh.c \ ldap/servers/plugins/sync/sync_persist.c libcontentsync_plugin_la_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) libcontentsync_plugin_la_LIBADD = libslapd.la $(NSS_LINK) $(NSPR_LINK) $(LIBCRYPT) libcontentsync_plugin_la_DEPENDENCIES = libslapd.la libcontentsync_plugin_la_LDFLAGS = -avoid-version #------------------------ # libreferint-plugin #------------------------ libreferint_plugin_la_SOURCES = ldap/servers/plugins/referint/referint.c libreferint_plugin_la_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) libreferint_plugin_la_LIBADD = libslapd.la $(LDAPSDK_LINK) $(NSPR_LINK) libreferint_plugin_la_DEPENDENCIES = libslapd.la libreferint_plugin_la_LDFLAGS = -avoid-version #------------------------ # libreplication-plugin #------------------------ libreplication_plugin_la_SOURCES = ldap/servers/plugins/replication/cl5_api.c \ ldap/servers/plugins/replication/cl5_clcache.c \ ldap/servers/plugins/replication/cl5_config.c \ ldap/servers/plugins/replication/cl5_init.c \ ldap/servers/plugins/replication/cl_crypt.c \ ldap/servers/plugins/replication/csnpl.c \ ldap/servers/plugins/replication/llist.c \ ldap/servers/plugins/replication/repl_connext.c \ ldap/servers/plugins/replication/repl_controls.c \ ldap/servers/plugins/replication/repl_ext.c \ ldap/servers/plugins/replication/repl_extop.c \ ldap/servers/plugins/replication/repl_globals.c \ ldap/servers/plugins/replication/repl_opext.c \ ldap/servers/plugins/replication/repl_session_plugin.c \ ldap/servers/plugins/replication/repl5_agmt.c \ ldap/servers/plugins/replication/repl5_agmtlist.c \ ldap/servers/plugins/replication/repl5_backoff.c \ ldap/servers/plugins/replication/repl5_connection.c \ ldap/servers/plugins/replication/repl5_inc_protocol.c \ ldap/servers/plugins/replication/repl5_init.c \ ldap/servers/plugins/replication/repl5_mtnode_ext.c \ ldap/servers/plugins/replication/repl5_plugins.c \ ldap/servers/plugins/replication/repl5_protocol.c \ ldap/servers/plugins/replication/repl5_protocol_util.c \ ldap/servers/plugins/replication/repl5_replica.c \ ldap/servers/plugins/replication/repl5_replica_config.c \ ldap/servers/plugins/replication/repl5_replica_dnhash.c \ ldap/servers/plugins/replication/repl5_replica_hash.c \ ldap/servers/plugins/replication/repl5_ruv.c \ ldap/servers/plugins/replication/repl5_schedule.c \ ldap/servers/plugins/replication/repl5_tot_protocol.c \ ldap/servers/plugins/replication/repl5_total.c \ ldap/servers/plugins/replication/repl5_updatedn_list.c \ ldap/servers/plugins/replication/replutil.c \ ldap/servers/plugins/replication/urp.c \ ldap/servers/plugins/replication/urp_glue.c \ ldap/servers/plugins/replication/urp_tombstone.c \ ldap/servers/plugins/replication/windows_connection.c \ ldap/servers/plugins/replication/windows_inc_protocol.c \ ldap/servers/plugins/replication/windows_private.c \ ldap/servers/plugins/replication/windows_protocol_util.c \ ldap/servers/plugins/replication/windows_tot_protocol.c libreplication_plugin_la_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) $(ICU_CFLAGS) @db_inc@ libreplication_plugin_la_LIBADD = libslapd.la libback-ldbm.la $(LDAPSDK_LINK) $(NSS_LINK) $(NSPR_LINK) $(ICU_LIBS) $(DB_LINK) libreplication_plugin_la_DEPENDENCIES = libslapd.la libback-ldbm.la libreplication_plugin_la_LDFLAGS = -avoid-version #------------------------ # libretrocl-plugin #------------------------ libretrocl_plugin_la_SOURCES = ldap/servers/plugins/retrocl/retrocl.c \ ldap/servers/plugins/retrocl/retrocl_cn.c \ ldap/servers/plugins/retrocl/retrocl_create.c \ ldap/servers/plugins/retrocl/retrocl_po.c \ ldap/servers/plugins/retrocl/retrocl_rootdse.c \ ldap/servers/plugins/retrocl/retrocl_trim.c libretrocl_plugin_la_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) libretrocl_plugin_la_LIBADD = libslapd.la $(LDAPSDK_LINK) $(NSPR_LINK) libretrocl_plugin_la_DEPENDENCIES = libslapd.la libretrocl_plugin_la_LDFLAGS = -avoid-version #------------------------ # libroles-plugin #------------------------ libroles_plugin_la_SOURCES = ldap/servers/plugins/roles/roles_cache.c \ ldap/servers/plugins/roles/roles_plugin.c libroles_plugin_la_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) libroles_plugin_la_LIBADD = libslapd.la $(NSPR_LINK) libroles_plugin_la_DEPENDENCIES = libslapd.la libroles_plugin_la_LDFLAGS = -avoid-version #------------------------ # libschemareload-plugin #------------------------ libschemareload_plugin_la_SOURCES = ldap/servers/plugins/schema_reload/schema_reload.c libschemareload_plugin_la_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) libschemareload_plugin_la_LIBADD = libslapd.la $(NSPR_LINK) libschemareload_plugin_la_DEPENDENCIES = libslapd.la libschemareload_plugin_la_LDFLAGS = -avoid-version #------------------------ # libstatechange-plugin #------------------------ libstatechange_plugin_la_SOURCES = ldap/servers/plugins/statechange/statechange.c libstatechange_plugin_la_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) libstatechange_plugin_la_LIBADD = libslapd.la libstatechange_plugin_la_DEPENDENCIES = libslapd.la libstatechange_plugin_la_LDFLAGS = -avoid-version #------------------------ # libsyntax-plugin #------------------------ libsyntax_plugin_la_SOURCES = ldap/servers/plugins/syntaxes/bin.c \ ldap/servers/plugins/syntaxes/bitstring.c \ ldap/servers/plugins/syntaxes/ces.c \ ldap/servers/plugins/syntaxes/cis.c \ ldap/servers/plugins/syntaxes/debug.c \ ldap/servers/plugins/syntaxes/dn.c \ ldap/servers/plugins/syntaxes/deliverymethod.c \ ldap/servers/plugins/syntaxes/facsimile.c \ ldap/servers/plugins/syntaxes/guide.c \ ldap/servers/plugins/syntaxes/int.c \ ldap/servers/plugins/syntaxes/nameoptuid.c \ ldap/servers/plugins/syntaxes/numericstring.c \ ldap/servers/plugins/syntaxes/phonetic.c \ ldap/servers/plugins/syntaxes/sicis.c \ ldap/servers/plugins/syntaxes/string.c \ ldap/servers/plugins/syntaxes/syntax_common.c \ ldap/servers/plugins/syntaxes/tel.c \ ldap/servers/plugins/syntaxes/telex.c \ ldap/servers/plugins/syntaxes/teletex.c \ ldap/servers/plugins/syntaxes/validate.c \ ldap/servers/plugins/syntaxes/validate_task.c \ ldap/servers/plugins/syntaxes/value.c libsyntax_plugin_la_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) libsyntax_plugin_la_LIBADD = libslapd.la $(LDAPSDK_LINK) $(NSPR_LINK) libsyntax_plugin_la_DEPENDENCIES = libslapd.la libsyntax_plugin_la_LDFLAGS = -avoid-version #------------------------ # libusn-plugin #------------------------ libusn_plugin_la_SOURCES = ldap/servers/plugins/usn/usn.c \ ldap/servers/plugins/usn/usn_cleanup.c libusn_plugin_la_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) libusn_plugin_la_LIBADD = libslapd.la $(LDAPSDK_LINK) $(NSPR_LINK) libusn_plugin_la_DEPENDENCIES = libslapd.la libusn_plugin_la_LDFLAGS = -avoid-version #------------------------ # libviews-plugin #------------------------ libviews_plugin_la_SOURCES = ldap/servers/plugins/views/views.c libviews_plugin_la_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) libviews_plugin_la_LIBADD = libslapd.la $(LDAPSDK_LINK) $(NSPR_LINK) libviews_plugin_la_DEPENDENCIES = libslapd.la libviews_plugin_la_LDFLAGS = -avoid-version #------------------------ # libwhoami-plugin #------------------------ libwhoami_plugin_la_SOURCES = ldap/servers/plugins/whoami/whoami.c libwhoami_plugin_la_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) libwhoami_plugin_la_LIBADD = libslapd.la $(LDAPSDK_LINK) $(NSPR_LINK) libwhoami_plugin_la_DEPENDENCIES = libslapd.la libwhoami_plugin_la_LDFLAGS = -avoid-version #------------------------ #//////////////////////////////////////////////////////////////// # # Programs # #//////////////////////////////////////////////////////////////// #------------------------ # dbscan #------------------------ dbscan_SOURCES = ldap/servers/slapd/tools/dbscan.c dbscan_CPPFLAGS = $(NSPR_INCLUDES) $(AM_CPPFLAGS) dbscan_LDADD = $(NSPR_LINK) $(DB_IMPL) #------------------------ # ldap-agent #------------------------ ldap_agent_SOURCES = ldap/servers/snmp/main.c \ ldap/servers/snmp/ldap-agent.c \ ldap/servers/slapd/agtmmap.c ldap_agent_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) @netsnmp_inc@ ldap_agent_LDADD = $(LDAPSDK_LINK_NOTHR) $(SASL_LINK) $(NSS_LINK) $(NSPR_LINK) $(NETSNMP_LINK) $(THREADLIB) #------------------------ # ldclt #------------------------ ldclt_SOURCES = ldap/servers/slapd/tools/ldaptool-sasl.c \ ldap/servers/slapd/tools/ldclt/data.c \ ldap/servers/slapd/tools/ldclt/ldapfct.c \ ldap/servers/slapd/tools/ldclt/ldclt.c \ ldap/servers/slapd/tools/ldclt/ldcltU.c \ ldap/servers/slapd/tools/ldclt/parser.c \ ldap/servers/slapd/tools/ldclt/port.c \ ldap/servers/slapd/tools/ldclt/scalab01.c \ ldap/servers/slapd/tools/ldclt/threadMain.c \ ldap/servers/slapd/tools/ldclt/utils.c \ ldap/servers/slapd/tools/ldclt/version.c \ ldap/servers/slapd/tools/ldclt/workarounds.c ldclt_CPPFLAGS = $(AM_CPPFLAGS) -I$(srcdir)/ldap/servers/slapd/tools $(DSPLUGIN_CPPFLAGS) $(SASL_CFLAGS) ldclt_LDADD = $(NSPR_LINK) $(NSS_LINK) $(LDAPSDK_LINK) $(SASL_LINK) $(LIBNSL) $(LIBSOCKET) $(LIBDL) $(THREADLIB) #------------------------ # ns-slapd #------------------------ if enable_ldapi GETSOCKETPEER=ldap/servers/slapd/getsocketpeer.c enable_ldapi = 1 endif if enable_autobind enable_autobind = 1 endif if enable_auto_dn_suffix enable_auto_dn_suffix = 1 endif ns_slapd_SOURCES = ldap/servers/slapd/abandon.c \ ldap/servers/slapd/auth.c \ ldap/servers/slapd/bind.c \ ldap/servers/slapd/compare.c \ ldap/servers/slapd/config.c \ ldap/servers/slapd/connection.c \ ldap/servers/slapd/conntable.c \ ldap/servers/slapd/daemon.c \ ldap/servers/slapd/detach.c \ ldap/servers/slapd/extendop.c \ ldap/servers/slapd/fedse.c \ ldap/servers/slapd/fileio.c \ ldap/servers/slapd/getopt_ext.c \ ldap/servers/slapd/globals.c \ ldap/servers/slapd/house.c \ ldap/servers/slapd/init.c \ ldap/servers/slapd/main.c \ ldap/servers/slapd/monitor.c \ ldap/servers/slapd/passwd_extop.c \ ldap/servers/slapd/psearch.c \ ldap/servers/slapd/pw_mgmt.c \ ldap/servers/slapd/pw_verify.c \ ldap/servers/slapd/rootdse.c \ ldap/servers/slapd/sasl_io.c \ ldap/servers/slapd/saslbind.c \ ldap/servers/slapd/search.c \ ldap/servers/slapd/start_tls_extop.c \ ldap/servers/slapd/strdup.c \ ldap/servers/slapd/stubs.c \ ldap/servers/slapd/tempnam.c \ ldap/servers/slapd/unbind.c ns_slapd_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) $(SASL_CFLAGS) $(SVRCORE_INCLUDES) $(CFI_CFLAGS) # We need our libraries to come first, then our externals libraries second. ns_slapd_LDADD = libslapd.la libldaputil.la libsvrcore.la if RUST_ENABLE ns_slapd_LDADD += $(RNSSLAPD_LIB) endif ns_slapd_LDADD += $(LDAPSDK_LINK) $(NSS_LINK) $(LIBADD_DL) \ $(NSPR_LINK) $(SASL_LINK) $(LIBNSL) $(LIBSOCKET) $(THREADLIB) $(SYSTEMD_LIBS) $(EVENT_LINK) if RUST_ENABLE ns_slapd_LDADD += -lssl -lcrypto endif ns_slapd_DEPENDENCIES = libslapd.la libldaputil.la # We need to link ns-slapd with the C++ compiler on HP-UX since we load # some C++ shared libraries (such as icu). if HPUX ns_slapd_LINK = $(CXXLINK) else ns_slapd_LINK = $(LINK) endif #------------------------ # pwdhash #------------------------ pwdhash_SOURCES = ldap/servers/slapd/tools/pwenc.c pwdhash_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) pwdhash_LDADD = libslapd.la libsvrcore.la $(NSPR_LINK) $(NSS_LINK) $(LDAPSDK_LINK) $(SASL_LINK) pwdhash_DEPENDENCIES = libslapd.la #------------------------- # CMOCKA TEST PROGRAMS #------------------------- if ENABLE_CMOCKA check_PROGRAMS = test_slapd # Mark all check programs for testing TESTS = test_slapd test_slapd_SOURCES = test/main.c \ test/libslapd/test.c \ test/libslapd/counters/atomic.c \ test/libslapd/pblock/analytics.c \ test/libslapd/pblock/v3_compat.c \ test/libslapd/schema/filter_validate.c \ test/libslapd/operation/v3_compat.c \ test/libslapd/spal/meminfo.c \ test/plugins/test.c \ test/plugins/pwdstorage/pbkdf2.c # We need to link a lot of plugins for this test. test_slapd_LDADD = libslapd.la \ libpwdstorage-plugin.la \ $(NSS_LINK) $(NSPR_LINK) test_slapd_LDFLAGS = $(AM_CPPFLAGS) $(CMOCKA_LINKS) ### WARNING: Slap.h needs cert.h, which requires the -I/lib/ldaputil!!! ### WARNING: Slap.h pulls ssl.h, which requires nss!!!! # We need to pull in plugin header paths too: test_slapd_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) $(DSINTERNAL_CPPFLAGS) \ -I$(srcdir)/ldap/servers/plugins/pwdstorage endif #------------------------ # end cmocka tests #------------------------ # these are for the config files and scripts that we need to generate and replace # the paths and other tokens with the real values set during configure/make # note that we cannot just use AC_OUTPUT to do this for us, since it will do things like this: # LD_LIBRARY_PATH = ${prefix}/lib/dirsrv # i.e. it literally copies in '${prefix}' rather than expanding it out - we want this instead: # LD_LIBRARY_PATH = /usr/lib/dirsrv fixupcmd = sed \ -e 's,@bindir\@,$(bindir),g' \ -e 's,@sbindir\@,$(sbindir),g' \ -e 's,@libdir\@,$(libdir),g' \ -e 's,@libexecdir\@,$(libexecdir),g' \ -e 's,@nss_libdir\@,$(nss_libdir),g' \ -e 's,@ldaptool_bindir\@,$(ldaptool_bindir),g' \ -e 's,@ldaptool_opts\@,$(ldaptool_opts),g' \ -e 's,@plainldif_opts\@,$(plainldif_opts),g' \ -e 's,@db_libdir\@,$(db_libdir),g' \ -e 's,@db_bindir\@,$(db_bindir),g' \ -e 's,@netsnmp_libdir\@,$(netsnmp_libdir),g' \ -e 's,@pcre_libdir\@,$(pcre_libdir),g' \ -e 's,@propertydir\@,$(propertydir),g' \ -e 's,@datadir\@,$(datadir),g' \ -e 's,@schemadir\@,$(schemadir),g' \ -e 's,@serverdir\@,$(serverdir),g' \ -e 's,@serverincdir\@,$(serverincdir),g' \ -e 's,@serverplugindir\@,$(serverplugindir),g' \ -e 's,@taskdir\@,$(taskdir),g' \ -e 's,@configdir\@,$(configdir),g' \ -e 's,@sysconfdir\@,$(sysconfdir),g' \ -e 's,@localstatedir\@,$(localstatedir),g' \ -e 's,@localrundir\@,$(localrundir),g' \ -e 's,@infdir\@,$(infdir),g' \ -e 's,@mibdir\@,$(mibdir),g' \ -e 's,@cockpitdir\@,$(cockpitdir),g' \ -e 's,@templatedir\@,$(sampledatadir),g' \ -e 's,@systemschemadir\@,$(systemschemadir),g' \ -e 's,@package_name\@,$(PACKAGE_NAME),g' \ -e 's,@instconfigdir\@,$(instconfigdir),g' \ -e 's,@enable_ldapi\@,$(enable_ldapi),g' \ -e 's,@enable_pam_passthru\@,$(enable_pam_passthru),g' \ -e 's,@enable_bitwise\@,$(enable_bitwise),g' \ -e 's,@enable_dna\@,$(enable_dna),g' \ -e 's,@enable_autobind\@,$(enable_autobind),g' \ -e 's,@enable_auto_dn_suffix\@,$(enable_auto_dn_suffix),g' \ -e 's,@enable_presence\@,$(enable_presence),g' \ -e 's,@enable_asan\@,$(ASAN_ON),g' \ -e 's,@enable_msan\@,$(MSAN_ON),g' \ -e 's,@enable_tsan\@,$(TSAN_ON),g' \ -e 's,@enable_ubsan\@,$(UBSAN_ON),g' \ -e 's,@SANITIZER\@,$(SANITIZER),g' \ -e 's,@enable_rust\@,@enable_rust@,g' \ -e 's,@ECHO_N\@,$(ECHO_N),g' \ -e 's,@ECHO_C\@,$(ECHO_C),g' \ -e 's,@brand\@,$(brand),g' \ -e 's,@capbrand\@,$(capbrand),g' \ -e 's,@vendor\@,$(vendor),g' \ -e 's,@PACKAGE_NAME\@,$(PACKAGE_NAME),g' \ -e 's,@PACKAGE_VERSION\@,$(PACKAGE_VERSION),g' \ -e 's,@RPM_VERSION\@,$(RPM_VERSION),g' \ -e 's,@PACKAGE_BASE_VERSION\@,$(PACKAGE_BASE_VERSION),g' \ -e 's,@CONSOLE_VERSION\@,$(CONSOLE_VERSION),g' \ -e 's,@BUILDNUM\@,$(BUILDNUM),g' \ -e 's,@NQBUILD_NUM\@,$(NQBUILDNUM),g' \ -e 's,@perlpath\@,$(perldir),g' \ -e 's,@defaultuser\@,$(defaultuser),g' \ -e 's,@defaultgroup\@,$(defaultgroup),g' \ -e 's,@with_fhs_opt\@,@with_fhs_opt@,g' \ -e 's,@with_selinux\@,@with_selinux@,g' \ -e 's,@with_systemd\@,$(WITH_SYSTEMD),g' \ -e 's,@tmpfiles_d\@,$(tmpfiles_d),g' \ -e 's,@pythonexec\@,@pythonexec@,g' \ -e 's,@sttyexec\@,@sttyexec@,g' \ -e 's,@initconfigdir\@,$(initconfigdir),g' \ -e 's,@updatedir\@,$(updatedir),g' \ -e 's,@ldaplib\@,$(ldaplib),g' \ -e 's,@ldaplib_defs\@,$(ldaplib_defs),g' \ -e 's,@systemdsystemunitdir\@,$(systemdsystemunitdir),g' \ -e 's,@systemdsystemconfdir\@,$(systemdsystemconfdir),g' \ -e 's,@systemdgroupname\@,$(systemdgroupname),g' \ -e 's,@prefixdir\@,$(prefixdir),g' %: %.in mkdir -p $(dir $@) $(fixupcmd) $^ > $@ %/$(PACKAGE_NAME): %/initscript.in if [ ! -d $(dir $@) ] ; then mkdir -p $(dir $@) ; fi $(fixupcmd) $^ > $@ %/$(PACKAGE_NAME): %/base-initconfig.in if [ ! -d $(dir $@) ] ; then mkdir -p $(dir $@) ; fi if SYSTEMD $(fixupcmd) $^ | sed -e 's/@preamble@/# This file is in systemd EnvironmentFile format - see man systemd.exec/' > $@ else $(fixupcmd) $^ | sed -n -e 's/@preamble@//' -e '/^#/{p;d;}' -e '/^$$/{p;d;}' -e 's/^\([^=]*\)\(=.*\)$$/\1\2 ; export \1/ ; p' > $@ $(fixupcmd) $(srcdir)/ldap/admin/src/initconfig.in >> $@ endif %/template-initconfig: %/template-initconfig.in if [ ! -d $(dir $@) ] ; then mkdir -p $(dir $@) ; fi if SYSTEMD $(fixupcmd) $^ | sed -e 's/@preamble@/# This file is in systemd EnvironmentFile format - see man systemd.exec/' > $@ else $(fixupcmd) $^ | sed -n -e 's/@preamble@//' -e '/^#/{p;d;}' -e '/^$$/{p;d;}' -e 's/^\([^=]*\)\(=.*\)$$/\1\2 ; export \1/ ; p' > $@ endif %/$(PACKAGE_NAME)-snmp: %/ldap-agent-initscript.in if [ ! -d $(dir $@) ] ; then mkdir -p $(dir $@) ; fi $(fixupcmd) $^ > $@ # yes, that is an @ in the filename . . . %/$(PACKAGE_NAME)@.service: %/systemd.template.service.in if [ ! -d $(dir $@) ] ; then mkdir -p $(dir $@) ; fi $(fixupcmd) $^ > $@ %/$(PACKAGE_NAME)@.service.d/custom.conf: %/systemd.template.service.custom.conf.in if [ ! -d $(dir $@) ] ; then mkdir -p $(dir $@) ; fi $(fixupcmd) $^ > $@ if with_sanitizer %/$(PACKAGE_NAME)@.service.d/xsan.conf: %/systemd.template.service.xsan.conf.in if [ ! -d $(dir $@) ] ; then mkdir -p $(dir $@) ; fi $(fixupcmd) $^ > $@ endif %/$(systemdgroupname): %/systemd.group.in if [ ! -d $(dir $@) ] ; then mkdir -p $(dir $@) ; fi $(fixupcmd) $^ > $@ %/$(PACKAGE_NAME)-snmp.service: %/systemd-snmp.service.in if [ ! -d $(dir $@) ] ; then mkdir -p $(dir $@) ; fi $(fixupcmd) $^ > $@ # if distdir is a git tag, use that for the git archive tag, else # just assume a developer build and use HEAD git-archive: if [ -n "$(SRCDISTDIR)" -a -d "$(SRCDISTDIR)" ] ; then \ srcdistdir=$(SRCDISTDIR) ; \ else \ srcdistdir=`pwd` ; \ fi ; \ cd $(srcdir) ; \ if git show-ref --tags -q $(distdir) ; then \ gittag=$(distdir) ; \ else \ gittag=HEAD ; \ fi ; \ git archive --prefix=$(distdir)/ $$gittag | bzip2 > $$srcdistdir/$(distdir).tar.bz2 # Python test tests # How will we update this to python 3? lib389: src/lib389/setup.py cd $(srcdir)/src/lib389; $(PYTHON) setup.py build ; $(PYTHON) setup.py build_manpages lib389-install: lib389 cd $(srcdir)/src/lib389; $(PYTHON) setup.py install --skip-build --force if ENABLE_COCKPIT NODE_MODULES_TEST = src/cockpit/389-console/node_modules/webpack WEBPACK_TEST = src/cockpit/389-console/cockpit_dist/index.html # Cockpit UI plugin - we install the dependancies and build the JS sources # and then we use install-data-hook for copying the results on 'make install' $(NODE_MODULES_TEST): cd src/cockpit/389-console; make -f node_modules.mk install $(WEBPACK_TEST): $(NODE_MODULES_TEST) cd src/cockpit/389-console; make -f node_modules.mk build-cockpit-plugin 389-console: $(WEBPACK_TEST) # This requires a built source tree and avoids having to install anything system-wide 389-console-devel-install: cd $(srcdir)/src/cockpit/389-console; \ rm ~/.local/share/cockpit/389-console; \ mkdir -p ~/.local/share/cockpit/; \ ln -s $$(pwd)/dist ~/.local/share/cockpit/389-console 389-console-clean: cd $(srcdir)/src/cockpit/389-console; make -f node_modules.mk clean endif if HAVE_DOXYGEN # The rm in man3 is to remove files like: _home_william_development_389ds_libsds_src_.3 # If there is a way to ignore this in doxygen I'm all ears ... doxyfile.stamp: cd $(srcdir); $(DOXYGEN) $(abs_top_builddir)/docs/slapi.doxy rm -f $(abs_top_builddir)/man/man3/_* touch doxyfile.stamp # Add the docs to make all. all-local: doxyfile.stamp endif 389-ds-base-389-ds-base-2.0.15/README.md000066400000000000000000000025531421664411400165620ustar00rootroot00000000000000389 Directory Server ==================== 389 Directory Server is a highly usable, fully featured, reliable and secure LDAP server implementation. It handles many of the largest LDAP deployments in the world. All our code has been extensively tested with sanitisation tools. As well as a rich feature set of fail-over and backup technologies gives administrators confidence their accounts are safe. License ------- The 389 Directory Server is subject to the terms detailed in the license agreement file called LICENSE. Late-breaking news and information on the 389 Directory Server is available on our [wiki page](https://www.port389.org/) Building -------- autoreconf -fiv ./configure --enable-debug --with-openldap --enable-cmocka --enable-asan make make lib389 sudo make install sudo make lib389-install Note: **--enable-asan** is optional, and it should only be used for debugging/development purposes. See also full [building guide](https://www.port389.org/docs/389ds/development/building.html). Testing ------- make check sudo py.test -s 389-ds-base/dirsrvtests/tests/suites/basic/ To debug the make check item's, you'll need libtool to help: libtool --mode=execute gdb /home/william/build/ds/test_slapd More information ---------------- Please see our [contributing guide](https://www.port389.org/docs/389ds/contributing.html). 389-ds-base-389-ds-base-2.0.15/VERSION.sh000066400000000000000000000044751421664411400167710ustar00rootroot00000000000000# brand is lower case - used for names that don't appear to end users # brand is used for file naming - should contain no spaces brand=389 # capbrand is the properly capitalized brand name that appears to end users # may contain spaces capbrand=389 # vendor is the properly formatted vendor/manufacturer name that appears to end users vendor="389 Project" # PACKAGE_VERSION is constructed from these VERSION_MAJOR=2 VERSION_MINOR=0 VERSION_MAINT=15 # NOTE: VERSION_PREREL is automatically set for builds made out of a git tree VERSION_PREREL= VERSION_DATE=$(date -u +%Y%m%d) # Set the version and release numbers for local developer RPM builds. We # set these here because we do not want the git commit hash in the RPM # version since it can make RPM upgrades difficult. If we have a git # commit hash, we add it into the release number below. RPM_RELEASE=${VERSION_DATE} RPM_VERSION=${VERSION_MAJOR}.${VERSION_MINOR}.${VERSION_MAINT} if $(git -C "$srcdir" rev-parse --is-inside-work-tree > /dev/null 2>&1); then # Check if the source is from a git repo # if this is not a git repo, git log will say # fatal: Not a git repository # to stderr and stdout will be empty # this tells git to print the short commit hash from the last commit COMMIT=$(git -C "$srcdir" log -1 --pretty=format:%h 2> /dev/null) if test -n "$COMMIT" ; then VERSION_PREREL=.${VERSION_DATE}git$COMMIT RPM_RELEASE=${RPM_RELEASE}git$COMMIT fi fi # the real version used throughout configure and make # NOTE: because of autoconf/automake harshness, we cannot override the settings # below in C code - there is no way to override the default #defines # for these set with AC_INIT - so configure.ac should AC_DEFINE # DS_PACKAGE_VERSION DS_PACKAGE_TARNAME DS_PACKAGE_BUGREPORT # for use in C code - other code (perl scripts, shell scripts, Makefiles) # can use PACKAGE_VERSION et. al. PACKAGE_VERSION=$VERSION_MAJOR.$VERSION_MINOR.${VERSION_MAINT}${VERSION_PREREL} # the name of the source tarball - see make dist PACKAGE_TARNAME=${brand}-ds-base # url for bug reports PACKAGE_BUGREPORT="${PACKAGE_BUGREPORT}enter_bug.cgi?product=$brand" # PACKAGE_STRING="$PACKAGE_TARNAME $PACKAGE_VERSION" # the version of the ds console package that this directory server # is compatible with # console .2 is still compatible with 389 .3 for now CONSOLE_VERSION=$VERSION_MAJOR.2 389-ds-base-389-ds-base-2.0.15/autogen.sh000077500000000000000000000066061421664411400173070ustar00rootroot00000000000000#!/bin/sh # set required versions of tools here # the version is dotted integers like X.Y.Z where # X, Y, and Z are integers # comparisons are done using shell -lt, -gt, etc. # this works if the numbers are zero filled as well # so 06 == 6 # autoconf version required # need 2.69 or later ac_need_maj=2 ac_need_min=69 # automake version required # need 1.13.4 or later am_need_maj=1 am_need_min=13 am_need_rev=4 # libtool version required # need 2.4.2 or later lt_need_maj=2 lt_need_min=4 lt_need_rev=2 # should never have to touch anything below this line unless there is a bug ########################################################################### # input # arg1 - version string in the form "X.Y[.Z]" - the .Z is optional # args remaining - the needed X, Y, and Z to match # output # return 0 - success - the version string is >= the required X.Y.Z # return 1 - failure - the version string is < the required X.Y.Z # NOTE: All input must be integers, otherwise you will see shell errors checkvers() { vers="$1"; shift needmaj="$1"; shift needmin="$1"; shift if [ "$#" != "0" ]; then needrev="$1"; shift fi verslist=`echo $vers | tr '.' ' '` set $verslist maj=$1; shift min=$1; shift if [ "$#" != "0" ]; then rev=$1; shift fi if [ "$maj" -gt "$needmaj" ] ; then return 0; fi if [ "$maj" -lt "$needmaj" ] ; then return 1; fi # if we got here, maj == needmaj if [ -z "$needmin" ] ; then return 0; fi if [ "$min" -gt "$needmin" ] ; then return 0; fi if [ "$min" -lt "$needmin" ] ; then return 1; fi # if we got here, min == needmin if [ -z "$needrev" ] ; then return 0; fi if [ "$rev" -gt "$needrev" ] ; then return 0; fi if [ "$rev" -lt "$needrev" ] ; then return 1; fi # if we got here, rev == needrev return 0 } # We use GNU sed-isms, so if `gsed' exists, use that instead. sed=sed if command -v gsed >/dev/null then sed=gsed fi # Check autoconf version AC_VERSION=`autoconf --version | $sed '/^autoconf/ {s/^.* \([1-9][0-9.]*\)$/\1/; q}'` if checkvers "$AC_VERSION" $ac_need_maj $ac_need_min ; then echo Found valid autoconf version $AC_VERSION else echo "You must have autoconf version $ac_need_maj.$ac_need_min or later installed (found version $AC_VERSION)." exit 1 fi # Check automake version AM_VERSION=`automake --version | $sed '/^automake/ {s/^.* \([1-9][0-9.]*\)$/\1/; q}'` if checkvers "$AM_VERSION" $am_need_maj $am_need_min $am_need_rev ; then echo Found valid automake version $AM_VERSION else echo "You must have automake version $am_need_maj.$am_need_min.$am_need_rev or later installed (found version $AM_VERSION)." exit 1 fi # Check libtool version # NOTE: some libtool versions report a letter at the end e.g. on RHEL6 # the version is 2.2.6b - for comparison purposes, just strip off the # letter - note that the shell -lt and -gt comparisons will fail with # test: 6b: integer expression expected if the number to compare # contains a non-digit LT_VERSION=`libtool --version | $sed '/GNU libtool/ {s/^.* \([1-9][0-9a-zA-Z.]*\)$/\1/; s/[a-zA-Z]//g; q}'` if checkvers "$LT_VERSION" $lt_need_maj $lt_need_min $lt_need_rev ; then echo Found valid libtool version $LT_VERSION else echo "You must have libtool version $lt_need_maj.$lt_need_min.$lt_need_rev or later installed (found version $LT_VERSION)." exit 1 fi # Run autoreconf echo "Running autoreconf -fvi" autoreconf -fvi 389-ds-base-389-ds-base-2.0.15/buildnum.py000077500000000000000000000010431421664411400174700ustar00rootroot00000000000000#!/usr/bin/env python3 # --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2020 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # Generate a build number in the format YYYY.DDD.HHMM import os import time SDE = os.getenv('SOURCE_DATE_EPOCH') if SDE is not None: gmtime_obj = time.gmtime(int(SDE)) else: gmtime_obj = time.gmtime() # Print build number buildnum = time.strftime("%Y.%j.%H%M", gmtime_obj) print(f'\\"{buildnum}\\"', end = '') 389-ds-base-389-ds-base-2.0.15/configure.ac000066400000000000000000000721441421664411400175740ustar00rootroot00000000000000# -*- Autoconf -*- # Process this file with autoconf to produce a configure script. AC_PREREQ(2.59) AC_INIT([dirsrv],[1.0],[http://bugzilla.redhat.com/]) # AC_CONFIG_HEADER must be called right after AC_INIT. AC_CONFIG_HEADERS([config.h]) # include the version information . $srcdir/VERSION.sh AC_MSG_NOTICE(This is configure for $PACKAGE_TARNAME $PACKAGE_VERSION) AM_INIT_AUTOMAKE([1.9 foreign subdir-objects dist-bzip2 no-dist-gzip no-define tar-pax]) AC_SUBST([RPM_VERSION]) AC_SUBST([RPM_RELEASE]) AC_SUBST([VERSION_PREREL]) AC_SUBST([CONSOLE_VERSION]) AM_MAINTAINER_MODE AC_CANONICAL_HOST AC_CONFIG_MACRO_DIRS([m4]) # Checks for programs. : ${CXXFLAGS=""} AC_PROG_CXX : ${CFLAGS=""} AC_PROG_CC AM_PROG_CC_C_O AM_PROG_AS AC_PROG_CC_STDC PKG_PROG_PKG_CONFIG # disable static libs by default - we only use a couple AC_DISABLE_STATIC AC_PROG_LIBTOOL # Checks for header files. AC_HEADER_DIRENT AC_HEADER_STDC AC_HEADER_SYS_WAIT AC_CHECK_HEADERS([arpa/inet.h errno.h fcntl.h malloc.h netdb.h netinet/in.h stdlib.h string.h strings.h sys/file.h sys/socket.h sys/time.h syslog.h unistd.h mntent.h sys/sysinfo.h sys/endian.h endian.h]) # These are *required* headers without option. AC_CHECK_HEADERS([inttypes.h], [], AC_MSG_ERROR([unable to locate required header inttypes.h])) AC_CHECK_HEADERS([crack.h], [], AC_MSG_ERROR([unable to locate required header crack.h])) # Checks for typedefs, structures, and compiler characteristics. AC_HEADER_STAT AC_C_CONST AC_HEADER_STDBOOL AC_TYPE_UID_T AC_TYPE_PID_T AC_TYPE_SIZE_T AC_HEADER_TIME AC_STRUCT_TM # Checks for library functions. AC_FUNC_CHOWN AC_FUNC_CLOSEDIR_VOID AC_FUNC_ERROR_AT_LINE AC_FUNC_FORK AC_FUNC_LSTAT AC_FUNC_LSTAT_FOLLOWS_SLASHED_SYMLINK AC_FUNC_MALLOC AC_FUNC_MEMCMP AC_FUNC_MMAP AC_TYPE_SIGNAL AC_FUNC_STAT AC_FUNC_STRERROR_R AC_FUNC_STRFTIME AC_FUNC_VPRINTF AC_CHECK_FUNCS([endpwent ftruncate getcwd getaddrinfo inet_pton inet_ntop localtime_r memmove memset mkdir munmap putenv rmdir setrlimit socket strcasecmp strchr strcspn strdup strerror strncasecmp strpbrk strrchr strstr strtol tzset]) # These functions are *required* without option. AC_CHECK_FUNCS([clock_gettime], [], AC_MSG_ERROR([unable to locate required symbol clock_gettime])) # This will detect if we need to add the LIBADD_DL value for us. LT_LIB_DLLOAD # Optional rust component support. AC_MSG_CHECKING(for --enable-rust-offline) AC_ARG_ENABLE(rust_offline, AS_HELP_STRING([--enable-rust-offline], [Enable rust building offline. you MUST have run vendor! (default: no)]), [], [ enable_rust_offline=no ]) AC_MSG_RESULT($enable_rust_offline) AM_CONDITIONAL([RUST_ENABLE_OFFLINE],[test "$enable_rust_offline" = yes]) AS_IF([test "$enable_rust_offline" = yes], [rust_vendor_sources="replace-with = \"vendored-sources\""], [rust_vendor_sources=""]) AC_SUBST([rust_vendor_sources]) AC_MSG_CHECKING(for --enable-rust) AC_ARG_ENABLE(rust, AS_HELP_STRING([--enable-rust], [Enable rust language features (default: no)]), [], [ enable_rust=no ]) AC_MSG_RESULT($enable_rust) if test "$enable_rust" = yes -o "$enable_rust_offline" = yes; then AC_CHECK_PROG(CARGO, [cargo], [yes], [no]) AC_CHECK_PROG(RUSTC, [rustc], [yes], [no]) # Since fernet uses the openssl lib. PKG_CHECK_MODULES([OPENSSL], [openssl]) AS_IF([test "$CARGO" != "yes" -o "$RUSTC" != "yes"], [ AC_MSG_FAILURE("Rust based plugins cannot be built cargo=$CARGO rustc=$RUSTC") ]) fi AC_SUBST([enable_rust]) AM_CONDITIONAL([RUST_ENABLE],[test "$enable_rust" = yes -o "$enable_rust_offline" = yes]) # Optional cockpit support (enabled by default) AC_MSG_CHECKING(for --enable-cockpit) AC_ARG_ENABLE(cockpit, AS_HELP_STRING([--enable-cockpit], [Enable cockpit plugin (default: yes)]), [], [ enable_cockpit=yes ]) AC_MSG_RESULT($enable_cockpit) AC_SUBST([enable_cockpit]) AC_SUBST(ENABLE_COCKPIT) AM_CONDITIONAL([ENABLE_COCKPIT],[test "$enable_cockpit" = yes]) AC_DEFINE_UNQUOTED([DS_PACKAGE_TARNAME], "$PACKAGE_TARNAME", [package tarball name]) AC_DEFINE_UNQUOTED([DS_PACKAGE_BUGREPORT], "$PACKAGE_BUGREPORT", [package bug report url]) # define these for automake distdir PACKAGE=$PACKAGE_TARNAME AC_DEFINE_UNQUOTED([PACKAGE], "$PACKAGE", [package tar name]) AC_MSG_CHECKING(for --enable-debug) AC_ARG_ENABLE(debug, AS_HELP_STRING([--enable-debug], [Enable debug features (default: no)]), [], [ enable_debug=no ]) AC_MSG_RESULT($enable_debug) if test "$enable_debug" = yes ; then debug_defs="-DDEBUG -DMCC_DEBUG" debug_cflags="-g3 -O0" debug_cxxflags="-g3 -O0" debug_rust_defs="-C debuginfo=2 -Z macro-backtrace" cargo_defs="" rust_target_dir="debug" AC_DEFINE_UNQUOTED([DS_PACKAGE_VERSION], "$VERSION_MAJOR.$VERSION_MINOR.$VERSION_MAINT DEVELOPER BUILD", [package version]) AC_DEFINE_UNQUOTED([DS_PACKAGE_STRING], "$PACKAGE_TARNAME DEVELOPER BUILD", [package string]) # define these for automake distdir VERSION="DEBUG" AC_DEFINE_UNQUOTED([VERSION], "$VERSION", [package version]) else debug_defs="" # set the default safe CFLAGS that would be set by AC_PROG_CC otherwise debug_cflags="-g -O2" debug_cxxflags="-g -O2" debug_rust_defs="-C debuginfo=2" cargo_defs="--release" rust_target_dir="release" AC_DEFINE_UNQUOTED([DS_PACKAGE_VERSION], "$PACKAGE_VERSION", [package version]) AC_DEFINE_UNQUOTED([DS_PACKAGE_STRING], "$PACKAGE_TARNAME $PACKAGE_VERSION", [package string]) # define these for automake distdir VERSION=$PACKAGE_VERSION AC_DEFINE_UNQUOTED([VERSION], "$VERSION", [package version]) fi AC_SUBST([debug_defs]) AC_SUBST([debug_cflags]) AC_SUBST([debug_cxxflags]) AC_SUBST([debug_rust_defs]) AC_SUBST([cargo_defs]) AC_SUBST([rust_target_dir]) AM_CONDITIONAL([DEBUG],[test "$enable_debug" = yes]) AC_MSG_CHECKING(for --enable-asan) AC_ARG_ENABLE(asan, AS_HELP_STRING([--enable-asan], [Enable gcc/clang address sanitizer options (default: no)]), [], [ enable_asan=no ]) AC_MSG_RESULT($enable_asan) if test "$enable_asan" = yes ; then asan_cflags="-fsanitize=address -fno-omit-frame-pointer" asan_rust_defs="-Z sanitizer=address" else asan_cflags="" asan_rust_defs="" fi AC_SUBST([asan_cflags]) AC_SUBST([asan_rust_defs]) AM_CONDITIONAL(enable_asan,[test "$enable_asan" = yes]) AC_MSG_CHECKING(for --enable-msan) AC_ARG_ENABLE(msan, AS_HELP_STRING([--enable-msan], [Enable gcc/clang memory sanitizer options (default: no)]), [], [ enable_msan=no ]) AC_MSG_RESULT($enable_msan) if test "$enable_msan" = yes ; then msan_cflags="-fsanitize=memory -fsanitize-memory-track-origins -fno-omit-frame-pointer" msan_rust_defs="-Z sanitizer=memory" else msan_cflags="" msan_rust_defs="" fi AC_SUBST([msan_cflags]) AC_SUBST([msan_rust_defs]) AM_CONDITIONAL(enable_msan,test "$enable_msan" = "yes") AC_MSG_CHECKING(for --enable-tsan) AC_ARG_ENABLE(tsan, AS_HELP_STRING([--enable-tsan], [Enable gcc/clang thread sanitizer options (default: no)]), [], [ enable_tsan=no ]) AC_MSG_RESULT($enable_tsan) if test "$enable_tsan" = yes ; then tsan_cflags="-fsanitize=thread -fno-omit-frame-pointer" tsan_rust_defs="-Z sanitizer=thread" else tsan_cflags="" tsan_rust_defs="" fi AC_SUBST([tsan_cflags]) AC_SUBST([tsan_rust_defs]) AM_CONDITIONAL(enable_tsan,test "$enable_tsan" = "yes") AC_MSG_CHECKING(for --enable-ubsan) AC_ARG_ENABLE(ubsan, AS_HELP_STRING([--enable-tsan], [Enable gcc/clang undefined behaviour sanitizer options (default: no)]), [], [ enable_ubsan=no ]) AC_MSG_RESULT($enable_ubsan) if test "$enable_ubsan" = yes ; then ubsan_cflags="-fsanitize=undefined -fno-omit-frame-pointer" ubsan_rust_defs="" else ubsan_cflags="" ubsan_rust_defs="" fi AC_SUBST([ubsan_cflags]) AC_SUBST([ubsan_rust_defs]) AM_CONDITIONAL(enable_ubsan,test "$enable_ubsan" = "yes") AM_CONDITIONAL(with_sanitizer,test "$enable_asan" = "yes" -o "$enable_msan" = "yes" -o "$enable_tsan" = "yes" -o "$enable_ubsan" = "yes") AC_MSG_CHECKING(for --enable-clang) AC_ARG_ENABLE(clang, AS_HELP_STRING([--enable-clang], [Enable clang (default: no)]), [], [ enable_clang=no ]) AC_MSG_RESULT($enable_clang) AM_CONDITIONAL(CLANG_ENABLE,test "$enable_clang" = "yes") AC_MSG_CHECKING(for --enable-cfi) AC_ARG_ENABLE(cfi, AS_HELP_STRING([--enable-cfi], [Enable control flow integrity - requires --enable-clang (default: no)]), [], [ enable_cfi=no ]) AC_MSG_RESULT($enable_cfi) AM_CONDITIONAL(CFI_ENABLE,test "$enable_cfi" = "yes" -a "$enable_clang" = "yes") AM_CONDITIONAL([RPM_HARDEND_CC], [test -f /usr/lib/rpm/redhat/redhat-hardened-cc1]) AC_MSG_CHECKING(for --enable-gcc-security) AC_ARG_ENABLE(gcc-security, AS_HELP_STRING([--enable-gcc-security], [Enable gcc secure compilation options (default: no)]), [], [ enable_gcc_security=no ]) AC_MSG_RESULT($enable_gcc_security) if test "$enable_gcc_security" = yes ; then gccsec_cflags="-Wall -Wp,-D_FORTIFY_SOURCE=2 -fexceptions -fstack-protector-strong --param=ssp-buffer-size=4 -grecord-gcc-switches -Werror=format-security" else # Without this, -fPIC doesn't work on generic fedora builds, --disable-gcc-sec. gccsec_cflags="" fi AM_COND_IF([RPM_HARDEND_CC], [ gccsec_cflags="$gccsec_flags -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1" ], []) AC_SUBST([gccsec_cflags]) # Pull in profiling. AC_MSG_CHECKING(for --enable-profiling) AC_ARG_ENABLE(profiling, AS_HELP_STRING([--enable-profiling], [Enable gcov profiling features (default: no)]), [], [ enable_profiling=no ]) AC_MSG_RESULT($enable_profiling) if test "$enable_profiling" = yes ; then profiling_defs="-fprofile-arcs -ftest-coverage -g3 -O0" profiling_links="-lgcov --coverage" else profiling_defs="" profiling_links="" fi AC_SUBST([profiling_defs]) AC_SUBST([profiling_links]) AC_MSG_CHECKING(for --enable-systemtap) AC_ARG_ENABLE(systemtap, AS_HELP_STRING([--enable-systemtap], [Enable systemtap probe features (default: no)]), [], [ enable_systemtap=no ]) AC_MSG_RESULT($enable_systemtap) if test "$enable_systemtap" = yes ; then systemtap_defs="-DSYSTEMTAP" else systemtap_defs="" fi AC_SUBST([systemtap_defs]) # these enables are for optional or experimental features AC_MSG_CHECKING(for --enable-pam-passthru) AC_ARG_ENABLE(pam-passthru, AS_HELP_STRING([--enable-pam-passthru], [enable the PAM passthrough auth plugin (default: yes)]), [], [ enable_pam_passthru=yes ]) AC_MSG_RESULT($enable_pam_passthru) if test "$enable_pam_passthru" = yes ; then # check for pam header file used by plugins/pass_passthru/pam_ptimpl.c AC_CHECK_HEADER([security/pam_appl.h], [], [AC_MSG_ERROR([Missing header file security/pam_appl.h])]) AC_DEFINE([ENABLE_PAM_PASSTHRU], [1], [enable the pam passthru auth plugin]) fi AM_CONDITIONAL(enable_pam_passthru,test "$enable_pam_passthru" = "yes") if test -z "$enable_dna" ; then enable_dna=yes # if not set on cmdline, set default fi AC_MSG_CHECKING(for --enable-dna) AC_ARG_ENABLE(dna, AS_HELP_STRING([--enable-dna], [enable the Distributed Numeric Assignment (DNA) plugin (default: yes)])) if test "$enable_dna" = yes ; then AC_MSG_RESULT(yes) AC_DEFINE([ENABLE_DNA], [1], [enable the dna plugin]) else AC_MSG_RESULT(no) fi AM_CONDITIONAL(enable_dna,test "$enable_dna" = "yes") if test -z "$enable_ldapi" ; then enable_ldapi=yes # if not set on cmdline, set default fi AC_MSG_CHECKING(for --enable-ldapi) AC_ARG_ENABLE(ldapi, AS_HELP_STRING([--enable-ldapi], [enable LDAP over unix domain socket (LDAPI) support (default: yes)])) if test "$enable_ldapi" = yes ; then AC_MSG_RESULT(yes) AC_DEFINE([ENABLE_LDAPI], [1], [enable ldapi support in the server]) else AC_MSG_RESULT(no) fi AM_CONDITIONAL(enable_ldapi,test "$enable_ldapi" = "yes") if test -z "$enable_autobind" ; then enable_autobind=yes # if not set on cmdline, set default fi AC_MSG_CHECKING(for --enable-autobind) AC_ARG_ENABLE(autobind, AS_HELP_STRING([--enable-autobind], [enable auto bind over unix domain socket (LDAPI) support (default: no)])) if test "$enable_ldapi" = yes -a "$enable_autobind" = yes ; then AC_MSG_RESULT(yes) AC_DEFINE([ENABLE_AUTOBIND], [1], [enable ldapi auto bind support in the server]) else AC_MSG_RESULT(no) fi AM_CONDITIONAL(enable_autobind,test "$enable_autobind" = "yes") if test -z "$enable_auto_dn_suffix" ; then enable_auto_dn_suffix=no # if not set on cmdline, set default fi AC_MSG_CHECKING(for --enable-auto-dn-suffix) AC_ARG_ENABLE(auto-dn-suffix, AS_HELP_STRING([--enable-auto-dn-suffix], [enable auto bind with auto dn suffix over unix domain socket (LDAPI) support (default: no)])) if test "$enable_ldapi" = yes -a "$enable_autobind" = yes -a "$enable_auto_dn_suffix" = "yes"; then AC_MSG_RESULT(yes) AC_DEFINE([ENABLE_AUTO_DN_SUFFIX], [1], [enable ldapi auto bind with auto dn suffix support in the server]) else AC_MSG_RESULT(no) fi AM_CONDITIONAL(enable_auto_dn_suffix,test "$enable_auto_dn_suffix" = "yes") if test -z "$enable_bitwise" ; then enable_bitwise=yes # if not set on cmdline, set default fi AC_MSG_CHECKING(for --enable-bitwise) AC_ARG_ENABLE(bitwise, AS_HELP_STRING([--enable-bitwise], [enable the bitwise matching rule plugin (default: yes)])) if test "$enable_bitwise" = yes ; then AC_MSG_RESULT(yes) AC_DEFINE([ENABLE_BITWISE], [1], [enable the bitwise plugin]) else AC_MSG_RESULT(no) fi AM_CONDITIONAL(enable_bitwise,test "$enable_bitwise" = "yes") # Can never be enabled. AM_CONDITIONAL(enable_presence,test "$enable_presence" = "yes") if test -z "$enable_acctpolicy" ; then enable_acctpolicy=yes # if not set on cmdline, set default fi AC_MSG_CHECKING(for --enable-acctpolicy) AC_ARG_ENABLE(acctpolicy, AS_HELP_STRING([--enable-acctpolicy], [enable the account policy plugin (default: yes)])) if test "$enable_acctpolicy" = yes ; then AC_MSG_RESULT(yes) AC_DEFINE([ENABLE_ACCTPOLICY], [1], [enable the account policy plugin]) else AC_MSG_RESULT(no) fi AM_CONDITIONAL(enable_acctpolicy,test "$enable_acctpolicy" = "yes") if test -z "$enable_posix_winsync" ; then enable_posix_winsync=yes # if not set on cmdline, set default fi AC_MSG_CHECKING(for --enable-posix-winsync) AC_ARG_ENABLE(posix_winsync, AS_HELP_STRING([--enable-posix-winsync], [enable support for POSIX user/group attributes in winsync (default: yes)])) if test "$enable_posix_winsync" = yes ; then AC_MSG_RESULT(yes) AC_DEFINE([ENABLE_POSIX_WINSYNC], [1], [enable support for POSIX user/group attributes in winsync]) else AC_MSG_RESULT(no) fi AM_CONDITIONAL(enable_posix_winsync,test "$enable_posix_winsync" = "yes") # the default prefix - override with --prefix or --with-fhs AC_PREFIX_DEFAULT([/opt/$PACKAGE_NAME]) # If we have no prefix specified, we need to fix the prefix variable. # If we don't what happens is $prefixdir ends up as NONE, and then # later configure changes $prefix to $ac_default_prefix underneath us. if test "$prefix" = "NONE"; then prefix=$ac_default_prefix fi m4_include(m4/fhs.m4) # /run directory path AC_ARG_WITH([localrundir], AS_HELP_STRING([--with-localrundir=DIR], [Runtime data directory]), [localrundir=$with_localrundir], [localrundir="/run"]) AC_SUBST([localrundir]) cockpitdir=/389-console # installation paths - by default, we store everything # under the prefix. The with-fhs option will use /usr, # /etc, and /var. The with-fhs-opt option will use the # prefix, but it's sysconfdir and localstatedir will be # /etc/opt, and /var/opt. if test "$with_fhs_opt" = "yes"; then # Override sysconfdir and localstatedir if FHS optional # package was requested. prefixdir=$prefix sysconfdir='/etc/opt' localstatedir='/var/opt' localrundir='/var/opt/run' # relative to datadir sampledatadir=/data # relative to datadir systemschemadir=/schema # relative to datadir scripttemplatedir=/script-templates # relative to datadir updatedir=/updates # relative to libdir serverdir= # relative to includedir serverincdir= # relative to libdir serverplugindir=/plugins # relative to datadir infdir=/inf # relative to datadir mibdir=/mibs # location of property/resource files, relative to datadir propertydir=/properties # relative to libdir perldir=/perl # relative to libdir pythondir=/python else if test "$with_fhs" = "yes"; then ac_default_prefix=/usr prefix=$ac_default_prefix exec_prefix=$prefix dnl as opposed to the default /usr/etc sysconfdir='/etc' dnl as opposed to the default /usr/var localstatedir='/var' localrundir='/run' fi prefixdir=$prefix # relative to datadir sampledatadir=/$PACKAGE_NAME/data # relative to datadir systemschemadir=/$PACKAGE_NAME/schema # relative to datadir scripttemplatedir=/$PACKAGE_NAME/script-templates # relative to datadir updatedir=/$PACKAGE_NAME/updates # relative to libdir serverdir=$PACKAGE_NAME # relative to includedir serverincdir=$PACKAGE_NAME # relative to libdir serverplugindir=/$PACKAGE_NAME/plugins # relative to datadir infdir=/$PACKAGE_NAME/inf # relative to datadir mibdir=/$PACKAGE_NAME/mibs # location of property/resource files, relative to datadir propertydir=/$PACKAGE_NAME/properties # relative to libdir perldir=/$PACKAGE_NAME/perl # relative to libdir pythondir=/$PACKAGE_NAME/python fi # if mandir is the default value, override it # otherwise, the user must have set it - just use it if test X"$mandir" = X'${prefix}/man' ; then mandir='$(datadir)/man' fi # Shared paths for all layouts # relative to sysconfdir configdir=/$PACKAGE_NAME/config # relative to sysconfdir schemadir=/$PACKAGE_NAME/schema # default user, group defaultuser=dirsrv defaultgroup=dirsrv AC_MSG_CHECKING(for --with-perldir) AC_ARG_WITH([perldir], AS_HELP_STRING([--with-perldir=PATH], [Directory for perl]) ) if test -n "$with_perldir"; then if test "$with_perldir" = yes ; then AC_MSG_ERROR([You must specify --with-perldir=/full/path/to/perl]) elif test "$with_perldir" = no ; then with_perldir= else AC_MSG_RESULT([$with_perldir]) fi else with_perldir= fi AC_MSG_CHECKING(for --with-pythonexec) AC_ARG_WITH([pythonexec], AS_HELP_STRING([--with-pythonexec=PATH], [Path to executable for python]) ) if test -n "$with_pythonexec"; then if test "$with_pythonexec" = yes ; then AC_MSG_ERROR([You must specify --with-pythonexec=/full/path/to/python]) elif test "$with_pythonexec" = no ; then with_pythonexec=/usr/bin/python3 else AC_MSG_RESULT([$with_pythonexec]) fi else with_pythonexec=/usr/bin/python3 fi AC_SUBST(prefixdir) AC_SUBST(configdir) AC_SUBST(sampledatadir) AC_SUBST(systemschemadir) AC_SUBST(propertydir) AC_SUBST(schemadir) AC_SUBST(serverdir) AC_SUBST(serverincdir) AC_SUBST(serverplugindir) AC_SUBST(scripttemplatedir) AC_SUBST(perldir) AC_SUBST(pythondir) AC_SUBST(infdir) AC_SUBST(mibdir) AC_SUBST(mandir) AC_SUBST(updatedir) AC_SUBST(defaultuser) AC_SUBST(defaultgroup) AC_SUBST(cockpitdir) # check for --with-instconfigdir AC_MSG_CHECKING(for --with-instconfigdir) AC_ARG_WITH(instconfigdir, AS_HELP_STRING([--with-instconfigdir=/path], [Base directory for instance specific writable configuration directories (default $sysconfdir/$PACKAGE_NAME)]), [ if test $withval = yes ; then AC_ERROR([Please specify a full path with --with-instconfigdir]) fi instconfigdir="$withval" AC_MSG_RESULT($withval) ], [ dnl this value is expanded out in Makefile.am instconfigdir='$(sysconfdir)/$(PACKAGE_NAME)' AC_MSG_RESULT(no) ]) AC_SUBST(instconfigdir) # WINNT should be true if building on Windows system not using # cygnus, mingw, or the like and using cmd.exe as the shell AM_CONDITIONAL([WINNT], false) # Deal with platform dependent defines # initdir is the location for the SysV init scripts - very heavily platform # dependent and not specified in fhs or lsb # and not used if systemd is used initdir='$(sysconfdir)/rc.d' AC_MSG_CHECKING(for --with-initddir) AC_ARG_WITH(initddir, AS_HELP_STRING([--with-initddir=/path], [Absolute path (not relative like some of the other options) that should contain the SysV init scripts (default '$(sysconfdir)/rc.d')]), [ AC_MSG_RESULT($withval) ], [ AC_MSG_RESULT(no) ]) AM_CONDITIONAL([INITDDIR], [test -n "$with_initddir" -a "$with_initddir" != "no"]) # This will let us change over the python version easier in the future. if test -n "$with_pythonexec"; then pythonexec="$with_pythonexec" else pythonexec='/usr/bin/python3' fi # Default to no atomic queue operations. with_atomic_queue="no" # we use stty in perl scripts to disable password echo # this doesn't work unless the full absolute path of the # stty command is used e.g. system("stty -echo") does not # work but system("/bin/stty -echo") does work # since the path of stty may not be the same on all # platforms, we set the default here to /bin/stty and # allow that value to be overridden in the platform # specific section below sttyexec=/bin/stty case $host in *-*-linux*) AC_DEFINE([LINUX], [1], [Linux]) AC_DEFINE([_GNU_SOURCE], [1], [GNU Source]) platform="linux" initdir='$(sysconfdir)/rc.d/init.d' # do arch specific linux stuff here case $host in i*86-*-linux*) AC_DEFINE([CPU_x86], [], [cpu type x86]) ;; x86_64-*-linux*) AC_DEFINE([CPU_x86_64], [1], [cpu type x86_64]) ;; aarch64-*-linux*) AC_DEFINE([CPU_arm], [], [cpu type arm]) ;; arm-*-linux*) AC_DEFINE([CPU_arm], [], [cpu type arm]) ;; ppc64le-*-linux*) ;; ppc64-*-linux*) ;; ppc-*-linux*) ;; s390-*-linux*) ;; s390x-*-linux*) ;; esac # some programs use the native thread library directly THREADLIB=-lpthread AC_SUBST([THREADLIB], [$THREADLIB]) LIBCRYPT=-lcrypt AC_SUBST([LIBCRYPT], [$LIBCRYPT]) AC_DEFINE([USE_POSIX_RWLOCKS], [1], [POSIX rwlocks]) ;; *-*-freebsd*) AC_DEFINE([FREEBSD], [1], [FreeBSD]) platform="freebsd" initdir='$(sysconfdir)/rc.d' THREADLIB=-lthr AC_SUBST([THREADLIB], [$THREADLIB]) AC_DEFINE([USE_POSIX_RWLOCKS], [1], [POSIX rwlocks]) LIBDL= ;; ia64-hp-hpux*) AC_DEFINE([hpux], [1], [HP-UX]) AC_DEFINE([HPUX], [1], [HP-UX]) AC_DEFINE([HPUX11], [1], [HP-UX 11]) AC_DEFINE([HPUX11_23], [1], [HP-UX 11.23]) AC_DEFINE([CPU_ia64], [], [cpu type ia64]) AC_DEFINE([OS_hpux], [1], [OS HP-UX]) AC_DEFINE([_POSIX_C_SOURCE], [199506L], [POSIX revision]) AC_DEFINE([_HPUX_SOURCE], [1], [Source namespace]) AC_DEFINE([_INCLUDE_STDC__SOURCE_199901], [1], [to pick up all of the printf format macros in inttypes.h]) # assume 64 bit platform="hpux" initconfigdir="/$PACKAGE_NAME/config" # HPUX doesn't use /etc for this initdir=/init.d ;; hppa*-hp-hpux*) AC_DEFINE([hpux], [1], [HP-UX]) AC_DEFINE([HPUX], [1], [HP-UX]) AC_DEFINE([HPUX11], [1], [HP-UX 11]) AC_DEFINE([HPUX11_11], [1], [HP-UX 11.11]) AC_DEFINE([CPU_hppa], [], [cpu type pa-risc]) AC_DEFINE([OS_hpux], [1], [OS HP-UX]) AC_DEFINE([_POSIX_C_SOURCE], [199506L], [POSIX revision]) AC_DEFINE([_HPUX_SOURCE], [1], [Source namespace]) AC_DEFINE([_INCLUDE_STDC__SOURCE_199901], [1], [to pick up all of the printf format macros in inttypes.h]) # assume 64 bit initconfigdir="/$PACKAGE_NAME/config" platform="hpux" # HPUX doesn't use /etc for this initdir=/init.d ;; *-*-solaris*) AC_DEFINE([SVR4], [1], [SVR4]) AC_DEFINE([__svr4], [1], [SVR4]) AC_DEFINE([__svr4__], [1], [SVR4]) AC_DEFINE([_SVID_GETTOD], [1], [SVID_GETTOD]) AC_DEFINE([SOLARIS], [1], [SOLARIS]) AC_DEFINE([OS_solaris], [1], [OS SOLARIS]) AC_DEFINE([sunos5], [1], [SunOS5]) AC_DEFINE([OSVERSION], [509], [OS version]) AC_DEFINE([_REENTRANT], [1], [_REENTRANT]) AC_DEFINE([NO_DOMAINNAME], [1], [no getdomainname]) dnl socket nsl and dl are required to link several programs and libdb LIBSOCKET=-lsocket AC_SUBST([LIBSOCKET], [$LIBSOCKET]) LIBNSL=-lnsl AC_SUBST([LIBNSL], [$LIBNSL]) LIBDL=-ldl AC_SUBST([LIBDL], [$LIBDL]) dnl Cstd and Crun are required to link any C++ related code LIBCSTD=-lCstd AC_SUBST([LIBCSTD], [$LIBCSTD]) LIBCRUN=-lCrun AC_SUBST([LIBCRUN], [$LIBCRUN]) platform="solaris" initdir='$(sysconfdir)/init.d' case $host in i?86-*-solaris2.1[[0-9]]*) dnl I dont know why i386 need this explicit AC_DEFINE([HAVE_GETPEERUCRED], [1], [have getpeerucred]) ;; sparc-*-solaris*) dnl includes some assembler stuff in counter.o AC_DEFINE([CPU_sparc], [], [cpu type sparc]) TARGET='SPARC' ;; esac ;; *) platform="" ;; esac ### TO CHECK FOR SSE4.2!!! # gcc -march=native -dM -E - < /dev/null | grep SSE # We can just use the define in GCC instead! AC_MSG_CHECKING([for GCC provided 64-bit atomic operations]) AC_LINK_IFELSE([AC_LANG_PROGRAM([[ #include ]], [[ uint64_t t_counter = 0; uint64_t t_oldval = 0; uint64_t t_newval = 1; __atomic_compare_exchange_8(&t_counter, &t_oldval, t_newval, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); __atomic_add_fetch_8(&t_counter, t_newval, __ATOMIC_SEQ_CST); __atomic_sub_fetch_8(&t_counter, t_newval, __ATOMIC_SEQ_CST); __atomic_load(&t_counter, &t_oldval, __ATOMIC_SEQ_CST); return 0; ]])], [ AC_DEFINE([ATOMIC_64BIT_OPERATIONS], [1], [have 64-bit atomic operation functions provided by gcc]) AC_MSG_RESULT([yes]) ], [ AC_MSG_RESULT([no]) ] ) # cmd line overrides default setting above if test -n "$with_initddir" ; then initdir="$with_initddir" fi # sysv init scripts not used when systemd is used AC_SUBST(initdir) AC_SUBST(pythonexec) AC_SUBST(sttyexec) # set default initconfigdir if not already set # value will be set so as to be relative to $(sysconfdir) if test -z "$initconfigdir" ; then if test -d /etc/sysconfig ; then initconfigdir=/sysconfig elif test -d /etc/default ; then initconfigdir=/default else initconfigdir="/$PACKAGE_NAME/config" fi fi AC_SUBST(initconfigdir) # Conditionals for makefile.am AM_CONDITIONAL([HPUX],[test "$platform" = "hpux"]) AM_CONDITIONAL([SOLARIS],[test "$platform" = "solaris"]) AM_CONDITIONAL([FREEBSD],[test "$platform" = "freebsd"]) AM_CONDITIONAL([SPARC],[test "x$TARGET" = xSPARC]) # Check for library dependencies PKG_CHECK_MODULES([EVENT], [libevent]) if $PKG_CONFIG --exists nspr; then PKG_CHECK_MODULES([NSPR], [nspr]) else PKG_CHECK_MODULES([NSPR], [dirsec-nspr]) fi if $PKG_CONFIG --exists nss; then PKG_CHECK_MODULES([NSS], [nss]) nss_libdir=`$PKG_CONFIG --libs-only-L nss | sed -e s/-L// | sed -e s/\ .*$//` else PKG_CHECK_MODULES([NSS], [dirsec-nss]) nss_libdir=`$PKG_CONFIG --libs-only-L dirsec-nss | sed -e s/-L// | sed -e s/\ .*$//` fi AC_SUBST(nss_libdir) m4_include(m4/openldap.m4) m4_include(m4/db.m4) PKG_CHECK_MODULES([SASL], [libsasl2]) PKG_CHECK_MODULES([ICU], [icu-i18n >= 60.2]) m4_include(m4/netsnmp.m4) PKG_CHECK_MODULES([KERBEROS], [krb5]) krb5_vendor=`$PKG_CONFIG --variable=vendor krb5` if test "$krb5_vendor" = "MIT"; then AC_DEFINE(HAVE_KRB5, 1, [Define if you have Kerberos V]) save_LIBS="$LIBS" LIBS="$KERBEROS_LIBS" AC_CHECK_FUNCS([krb5_cc_new_unique]) LIBS="$save_LIBS" fi if $PKG_CONFIG --exists pcre; then PKG_CHECK_MODULES([PCRE], [pcre]) pcre_libdir=`$PKG_CONFIG --libs-only-L pcre | sed -e s/-L// | sed -e s/\ .*$//` else PKG_CHECK_MODULES([PCRE], [libpcre]) pcre_libdir=`$PKG_CONFIG --libs-only-L libpcre | sed -e s/-L// | sed -e s/\ .*$//` fi AC_SUBST(pcre_libdir) m4_include(m4/selinux.m4) m4_include(m4/systemd.m4) AC_MSG_CHECKING(whether to enable cmocka unit tests) AC_ARG_ENABLE(cmocka, AS_HELP_STRING([--enable-cmocka], [Enable cmocka unit tests (default: no)])) if test "x$enable_cmocka" = "xyes"; then AC_MSG_RESULT(yes) PKG_CHECK_MODULES([CMOCKA], [cmocka]) AC_DEFINE([ENABLE_CMOCKA], [1], [Enable cmocka unit tests]) else AC_MSG_RESULT(no) fi AM_CONDITIONAL([ENABLE_CMOCKA], [test "x$enable_cmocka" = "xyes"]) m4_include(m4/doxygen.m4) PACKAGE_BASE_VERSION=`echo $PACKAGE_VERSION | awk -F\. '{print $1"."$2}'` AC_SUBST(PACKAGE_BASE_VERSION) AM_CONDITIONAL(OPENLDAP,test "$with_openldap" = "yes") # write out paths for binary components AC_SUBST(ldaplib) AC_SUBST(ldaplib_defs) AC_SUBST(ldaptool_bindir) AC_SUBST(ldaptool_opts) AC_SUBST(plainldif_opts) AC_SUBST(brand) AC_SUBST(capbrand) AC_SUBST(vendor) # AC_DEFINE([USE_OLD_UNHASHED], [], [Use old unhashed code]) # Internally we use a macro function slapi_log_err() to call slapi_log_error() # which gives us the option to do performance testing without the presence of # logging. To remove the presence of error logging undefine LDAP_ERROR_LOGGING. AC_DEFINE([LDAP_ERROR_LOGGING], [1], [LDAP error logging flag]) # Build our pkgconfig files # This currently conflicts with %.in: rule in Makefile.am, which should be removed eventually. # AC_CONFIG_FILES([ldap/admin/src/defaults.inf]) AC_CONFIG_FILES([src/pkgconfig/dirsrv.pc src/pkgconfig/libsds.pc src/pkgconfig/svrcore.pc]) AC_CONFIG_FILES([Makefile rpm/389-ds-base.spec ]) AC_CONFIG_FILES([.cargo/config]) AC_OUTPUT 389-ds-base-389-ds-base-2.0.15/dirsrvtests/000077500000000000000000000000001421664411400176725ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/README000066400000000000000000000021251421664411400205520ustar00rootroot00000000000000389-ds-base-tests README ================================================= Prerequisites: ------------------------------------------------- Install the python-lib389 packages, or download the source(git clone ssh://git.fedorahosted.org/git/389/lib389.git) and set your PYTHONPATH accordingly Description: ------------------------------------------------- This package includes python-lib389 based python scripts for testing the Directory Server. The following describes the various types of tests available: tickets - These scripts test individual bug fixes suites - These test functinoal areas of the server stress - These tests perform "stress" tests on the server There is also a "create_test.py" script available to construct a template test script for creating new tests. Documentation: ------------------------------------------------- See http://www.port389.org for the latest information http://www.port389.org/docs/389ds/FAQ/upstream-test-framework.html http://www.port389.org/docs/389ds/howto/howto-write-lib389.html http://www.port389.org/docs/389ds/howto/howto-run-lib389-jenkins.html 389-ds-base-389-ds-base-2.0.15/dirsrvtests/__init__.py000066400000000000000000000000001421664411400217710ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/conftest.py000066400000000000000000000074051421664411400220770ustar00rootroot00000000000000import subprocess import logging import pytest import shutil import glob import os from lib389.paths import Paths from enum import Enum pkgs = ['389-ds-base', 'nss', 'nspr', 'openldap', 'cyrus-sasl'] p = Paths() class FIPSState(Enum): ENABLED = 'enabled' DISABLED = 'disabled' NOT_AVAILABLE = 'not_available' def __unicode__(self): return self.value def __str__(self): return self.value def get_rpm_version(pkg): try: result = subprocess.check_output(['rpm', '-q', '--queryformat', '%{VERSION}-%{RELEASE}', pkg]) except: result = b"not installed" return result.decode('utf-8') def is_fips(): # Are we running in FIPS mode? if not os.path.exists('/proc/sys/crypto/fips_enabled'): return FIPSState.NOT_AVAILABLE state = None with open('/proc/sys/crypto/fips_enabled', 'r') as f: state = f.readline().strip() if state == '1': return FIPSState.ENABLED else: return FIPSState.DISABLED @pytest.fixture(autouse=True) def _environment(request): if "_metadata" in dir(request.config): for pkg in pkgs: request.config._metadata[pkg] = get_rpm_version(pkg) request.config._metadata['FIPS'] = is_fips() def pytest_cmdline_main(config): logging.basicConfig(level=logging.DEBUG) def pytest_report_header(config): header = "" for pkg in pkgs: header += "%s: %s\n" % (pkg, get_rpm_version(pkg)) header += "FIPS: %s" % is_fips() return header @pytest.fixture(scope="function", autouse=True) def log_test_name_to_journald(request): if p.with_systemd: def log_current_test(): subprocess.Popen("echo $PYTEST_CURRENT_TEST | systemd-cat -t pytest", stdin=subprocess.PIPE, shell=True) log_current_test() request.addfinalizer(log_current_test) return log_test_name_to_journald @pytest.fixture(scope="function", autouse=True) def rotate_xsan_logs(request): # Do we have a pytest-html installed? pytest_html = request.config.pluginmanager.getplugin('html') if pytest_html is not None: # We have it installed, but let's check if we actually use it (--html=report.html) pytest_htmlpath = request.config.getoption('htmlpath') if p.asan_enabled and pytest_htmlpath is not None: # ASAN is enabled and an HTML report was requested, # rotate the ASAN logs so that only relevant logs are attached to the case in the report. xsan_logs_dir = f'{p.run_dir}/bak' if not os.path.exists(xsan_logs_dir): os.mkdir(xsan_logs_dir) else: for f in glob.glob(f'{p.run_dir}/ns-slapd-*san*'): shutil.move(f, xsan_logs_dir) return rotate_xsan_logs @pytest.hookimpl(hookwrapper=True) def pytest_runtest_makereport(item, call): pytest_html = item.config.pluginmanager.getplugin('html') outcome = yield report = outcome.get_result() extra = getattr(report, 'extra', []) if report.when == 'call' and pytest_html is not None: for f in glob.glob(f'{p.run_dir}/ns-slapd-*san*'): with open(f) as asan_report: text = asan_report.read() extra.append(pytest_html.extras.text(text, name=os.path.basename(f))) for f in glob.glob(f'{p.log_dir.split("/slapd",1)[0]}/*/*'): if 'rotationinfo' not in f: with open(f) as dirsrv_log: text = dirsrv_log.read() log_name = os.path.basename(f) instance_name = os.path.basename(os.path.dirname(f)).split("slapd-",1)[1] extra.append(pytest_html.extras.text(text, name=f"{instance_name}-{log_name}")) report.extra = extra 389-ds-base-389-ds-base-2.0.15/dirsrvtests/create_test.py000077500000000000000000000265741421664411400225670ustar00rootroot00000000000000#!/usr/bin/python3 # # --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2020 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- import optparse import os import re import sys import uuid from lib389 import topologies """This script generates a template test script that handles the non-interesting parts of a test script: - topology fixture that doesn't exist in in lib389/topologies.py - test function (to be completed by the user), - run-isolated function """ def displayUsage(): """Display the usage""" print ('\nUsage:\ncreate_ticket.py -t|--ticket ' + '-s|--suite ' + '[ i|--instances ' + '[ -m|--suppliers -h|--hubs ' + '-c|--consumers ] -o|--outputfile ]\n') print ('If only "-t" is provided then a single standalone instance is ' + 'created. Or you can create a test suite script using ' + '"-s|--suite" instead of using "-t|--ticket". The "-i" option ' + 'can add multiple standalone instances (maximum 99). However, you' + ' can not mix "-i" with the replication options (-m, -h , -c). ' + 'There is a maximum of 99 suppliers, 99 hubs, and 99 consumers.') print('If "-s|--suite" option was chosen, then no topology would be added ' + 'to the test script. You can find predefined fixtures in the lib389/topologies.py ' + 'and use them or write a new one if you have a special case.') exit(1) def writeFinalizer(): """Write the finalizer function - delete/stop each instance""" def writeInstanceOp(action): TEST.write(' map(lambda inst: inst.{}(), topology.all_insts.values())\n'.format(action)) TEST.write('\n def fin():\n') TEST.write(' """If we are debugging just stop the instances, otherwise remove them"""\n\n') TEST.write(' if DEBUGGING:\n') writeInstanceOp('stop') TEST.write(' else:\n') writeInstanceOp('delete') TEST.write('\n request.addfinalizer(fin)') TEST.write('\n\n') def get_existing_topologies(inst, suppliers, hubs, consumers): """Check if the requested topology exists""" setup_text = "" if inst: if inst == 1: i = 'st' setup_text = "Standalone Instance" else: i = 'i{}'.format(inst) setup_text = "{} Standalone Instances".format(inst) else: i = '' if suppliers: ms = 'm{}'.format(suppliers) if len(setup_text) > 0: setup_text += ", " if suppliers == 1: setup_text += "Supplier Instance" else: setup_text += "{} Supplier Instances".format(suppliers) else: ms = '' if hubs: hs = 'h{}'.format(hubs) if len(setup_text) > 0: setup_text += ", " if hubs == 1: setup_text += "Hub Instance" else: setup_text += "{} Hub Instances".format(hubs) else: hs = '' if consumers: cs = 'c{}'.format(consumers) if len(setup_text) > 0: setup_text += ", " if consumers == 1: setup_text += "Consumer Instance" else: setup_text += "{} Consumer Instances".format(consumers) else: cs = '' my_topology = 'topology_{}{}{}{}'.format(i, ms, hs, cs) # Returns True in the first element of a list, if topology was found if my_topology in dir(topologies): return [True, my_topology, setup_text] else: return [False, my_topology, setup_text] def check_id_uniqueness(id_value): """Checks if ID is already present in other tests. create_test.py script should exist in the directory with a 'tests' dir. """ tests_dir = os.path.join(os.getcwd(), 'tests') for root, dirs, files in os.walk(tests_dir): for name in files: if name.endswith('.py'): with open(os.path.join(root, name), "r") as cifile: for line in cifile: if re.search(str(id_value), line): return False return True desc = 'Script to generate an initial lib389 test script. ' + \ 'This generates the topology, test, final, and run-isolated functions.' if len(sys.argv) > 0: parser = optparse.OptionParser(description=desc, add_help_option=False) # Script options parser.add_option('-t', '--ticket', dest='ticket', default=None) parser.add_option('-s', '--suite', dest='suite', default=None) parser.add_option('-i', '--instances', dest='inst', default='0') parser.add_option('-m', '--suppliers', dest='suppliers', default='0') parser.add_option('-h', '--hubs', dest='hubs', default='0') parser.add_option('-c', '--consumers', dest='consumers', default='0') parser.add_option('-o', '--outputfile', dest='filename', default=None) # Validate the options try: (args, opts) = parser.parse_args() except: displayUsage() if args.ticket is None and args.suite is None: print('Missing required ticket number/suite name') displayUsage() if args.ticket and args.suite: print('You must choose either "-t|--ticket" or "-s|--suite", ' + 'but not both.') displayUsage() if int(args.suppliers) == 0: if int(args.hubs) > 0 or int(args.consumers) > 0: print('You must use "-m|--suppliers" if you want to have hubs ' + 'and/or consumers') displayUsage() if not args.suppliers.isdigit() or \ int(args.suppliers) > 99 or \ int(args.suppliers) < 0: print('Invalid value for "--suppliers", it must be a number and it can' + ' not be greater than 99') displayUsage() if not args.hubs.isdigit() or int(args.hubs) > 99 or int(args.hubs) < 0: print('Invalid value for "--hubs", it must be a number and it can ' + 'not be greater than 99') displayUsage() if not args.consumers.isdigit() or \ int(args.consumers) > 99 or \ int(args.consumers) < 0: print('Invalid value for "--consumers", it must be a number and it ' + 'can not be greater than 99') displayUsage() if args.inst: if not args.inst.isdigit() or \ int(args.inst) > 99 or \ int(args.inst) < 0: print('Invalid value for "--instances", it must be a number ' + 'greater than 0 and not greater than 99') displayUsage() if int(args.inst) > 0: if int(args.suppliers) > 0 or \ int(args.hubs) > 0 or \ int(args.consumers) > 0: print('You can not mix "--instances" with replication.') displayUsage() # Extract usable values ticket = args.ticket suite = args.suite if args.inst == '0' and args.suppliers == '0' and args.hubs == '0' \ and args.consumers == '0': instances = 1 my_topology = [True, 'topology_st', "Standalone Instance"] else: instances = int(args.inst) suppliers = int(args.suppliers) hubs = int(args.hubs) consumers = int(args.consumers) my_topology = get_existing_topologies(instances, suppliers, hubs, consumers) filename = args.filename setup_text = my_topology[2] # Create/open the new test script file if not filename: if ticket: filename = 'ticket' + ticket + '_test.py' else: filename = suite + '_test.py' try: TEST = open(filename, "w") except IOError: print("Can\'t open file:", filename) exit(1) # Write the imports if my_topology[0]: topology_import = 'from lib389.topologies import {} as topo\n'.format(my_topology[1]) else: topology_import = 'from lib389.topologies import create_topology\n' TEST.write('import logging\nimport pytest\nimport os\n') TEST.write('from lib389._constants import *\n') TEST.write('{}\n'.format(topology_import)) TEST.write('log = logging.getLogger(__name__)\n\n') # Add topology function for non existing (in lib389/topologies.py) topologies only if not my_topology[0]: # Write the replication or standalone classes topologies_str = "" if suppliers > 0: topologies_str += " {} suppliers".format(suppliers) if hubs > 0: topologies_str += " {} hubs".format(hubs) if consumers > 0: topologies_str += " {} consumers".format(consumers) if instances > 0: topologies_str += " {} standalone instances".format(instances) # Write the 'topology function' TEST.write('\n@pytest.fixture(scope="module")\n') TEST.write('def topo(request):\n') TEST.write(' """Create a topology with{}"""\n\n'.format(topologies_str)) TEST.write(' topology = create_topology({\n') if suppliers > 0: TEST.write(' ReplicaRole.SUPPLIER: {},\n'.format(suppliers)) if hubs > 0: TEST.write(' ReplicaRole.HUB: {},\n'.format(hubs)) if consumers > 0: TEST.write(' ReplicaRole.CONSUMER: {},\n'.format(consumers)) if instances > 0: TEST.write(' ReplicaRole.STANDALONE: {},\n'.format(instances)) TEST.write(' })\n') TEST.write(' # You can write replica test here. Just uncomment the block and choose instances\n') TEST.write(' # replicas = Replicas(topology.ms["supplier1"])\n') TEST.write(' # replicas.test(DEFAULT_SUFFIX, topology.cs["consumer1"])\n') writeFinalizer() TEST.write(' return topology\n\n') tc_id = '0' while not check_id_uniqueness(tc_id): tc_id = uuid.uuid4() # Write the test function if ticket: TEST.write('\ndef test_ticket{}(topo):\n'.format(ticket)) else: TEST.write('\ndef test_something(topo):\n') TEST.write(' """Specify a test case purpose or name here\n\n') TEST.write(' :id: {}\n'.format(tc_id)) TEST.write(' :setup: ' + setup_text + '\n') TEST.write(' :steps:\n') TEST.write(' 1. Fill in test case steps here\n') TEST.write(' 2. And indent them like this (RST format requirement)\n') TEST.write(' :expectedresults:\n') TEST.write(' 1. Fill in the result that is expected\n') TEST.write(' 2. For each test step\n') TEST.write(' """\n\n') TEST.write(' # If you need any test suite initialization,\n') TEST.write(' # please, write additional fixture for that (including finalizer).\n' ' # Topology for suites are predefined in lib389/topologies.py.\n\n') TEST.write(' # If you need host, port or any other data about instance,\n') TEST.write(' # Please, use the instance object attributes for that (for example, topo.ms["supplier1"].serverid)\n\n\n') # Write the main function TEST.write("if __name__ == '__main__':\n") TEST.write(' # Run isolated\n') TEST.write(' # -s for DEBUG mode\n') TEST.write(' CURRENT_FILE = os.path.realpath(__file__)\n') TEST.write(' pytest.main(["-s", CURRENT_FILE])\n\n') # Done, close things up TEST.close() print('Created: ' + filename) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/pytest.ini000066400000000000000000000002631421664411400217240ustar00rootroot00000000000000[pytest] markers = tier0: mark a test as part of tier0 tier1: mark a test as part of tier1 tier2: mark a test as part of tier2 tier3: mark a test as part of tier3 389-ds-base-389-ds-base-2.0.15/dirsrvtests/requirements.txt000066400000000000000000000000321421664411400231510ustar00rootroot00000000000000pytest pytest-libfaketime 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/000077500000000000000000000000001421664411400210345ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/__init__.py000066400000000000000000000000001421664411400231330ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/data/000077500000000000000000000000001421664411400217455ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/data/README000066400000000000000000000004721421664411400226300ustar00rootroot00000000000000DATA DIRECTORY README This directory is used for storing LDIF files used by the dirsrvtests scripts. This directory can be retrieved via getDir() from the DirSrv class. Example: data_dir_path = topology.standalone.getDir(__file__, DATA_DIR) ldif_file = data_dir_path + "ticket44444/1000entries.ldif" 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/data/__init__.py000066400000000000000000000000001421664411400240440ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/data/basic/000077500000000000000000000000001421664411400230265ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/data/basic/__init__.py000066400000000000000000000000001421664411400251250ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/data/basic/dse.ldif.broken000066400000000000000000000067551421664411400257350ustar00rootroot00000000000000dn: objectClass: top aci: (targetattr != "aci")(version 3.0; aci "rootdse anon read access"; allow( read,search,compare) userdn="ldap:///anyone";) creatorsName: cn=server,cn=plugins,cn=config modifiersName: cn=server,cn=plugins,cn=config createTimestamp: 20150204165610Z modifyTimestamp: 20150204165610Z dn: cn=config cn: config objectClass: top objectClass: extensibleObject objectClass: nsslapdConfig nsslapd-schemadir: /etc/dirsrv/slapd-localhost/schema nsslapd-lockdir: /var/lock/dirsrv/slapd-localhost nsslapd-tmpdir: /tmp nsslapd-certdir: /etc/dirsrv/slapd-localhost nsslapd-ldifdir: /var/lib/dirsrv/slapd-localhost/ldif nsslapd-bakdir: /var/lib/dirsrv/slapd-localhost/bak nsslapd-rundir: /var/run/dirsrv nsslapd-instancedir: /usr/lib64/dirsrv/slapd-localhost nsslapd-accesslog-logging-enabled: on nsslapd-accesslog-maxlogsperdir: 10 nsslapd-accesslog-mode: 600 nsslapd-accesslog-maxlogsize: 100 nsslapd-accesslog-logrotationtime: 1 nsslapd-accesslog-logrotationtimeunit: day nsslapd-accesslog-logrotationsync-enabled: off nsslapd-accesslog-logrotationsynchour: 0 nsslapd-accesslog-logrotationsyncmin: 0 nsslapd-accesslog: /var/log/dirsrv/slapd-localhost/access nsslapd-enquote-sup-oc: off nsslapd-localhost: localhost.localdomain nsslapd-schemacheck: on nsslapd-syntaxcheck: on nsslapd-dn-validate-strict: off nsslapd-rewrite-rfc1274: off nsslapd-return-exact-case: on nsslapd-ssl-check-hostname: on nsslapd-validate-cert: warn nsslapd-allow-unauthenticated-binds: off nsslapd-require-secure-binds: off nsslapd-allow-anonymous####-access: on nsslapd-localssf: 71 nsslapd-minssf: 0 nsslapd-port: 389 nsslapd-localuser: nobody nsslapd-errorlog-logging-enabled: on nsslapd-errorlog-mode: 600 nsslapd-errorlog-maxlogsperdir: 2 nsslapd-errorlog-maxlogsize: 100 nsslapd-errorlog-logrotationtime: 1 nsslapd-errorlog-logrotationtimeunit: week nsslapd-errorlog-logrotationsync-enabled: off nsslapd-errorlog-logrotationsynchour: 0 nsslapd-errorlog-logrotationsyncmin: 0 nsslapd-errorlog: /var/log/dirsrv/slapd-localhost/errors nsslapd-auditlog: /var/log/dirsrv/slapd-localhost/audit nsslapd-auditlog-mode: 600 nsslapd-auditlog-maxlogsize: 100 nsslapd-auditlog-logrotationtime: 1 nsslapd-auditlog-logrotationtimeunit: day nsslapd-rootdn: cn=dm nsslapd-maxdescriptors: 1024 nsslapd-max-filter-nest-level: 40 nsslapd-ndn-cache-enabled: on nsslapd-sasl-mapping-fallback: off nsslapd-dynamic-plugins: off nsslapd-allow-hashed-passwords: off nsslapd-ldapifilepath: /var/run/slapd-localhost.socket nsslapd-ldapilisten: off nsslapd-ldapiautobind: off nsslapd-ldapimaprootdn: cn=dm nsslapd-ldapimaptoentries: off nsslapd-ldapiuidnumbertype: uidNumber nsslapd-ldapigidnumbertype: gidNumber nsslapd-ldapientrysearchbase: dc=example,dc=com nsslapd-defaultnamingcontext: dc=example,dc=com aci: (targetattr="*")(version 3.0; acl "Configuration Administrators Group"; a llow (all) groupdn="ldap:///cn=Configuration Administrators,ou=Groups,ou=Topo logyManagement,o=NetscapeRoot";) aci: (targetattr="*")(version 3.0; acl "Configuration Administrator"; allow (a ll) userdn="ldap:///uid=admin,ou=Administrators,ou=TopologyManagement,o=Netsc apeRoot";) aci: (targetattr = "*")(version 3.0; acl "SIE Group"; allow (all) groupdn = "l dap:///cn=slapd-localhost,cn=389 Directory Server,cn=Server Group,cn=localhos t.localdomain,ou=example.com,o=NetscapeRoot";) modifiersName: cn=dm modifyTimestamp: 20150205195242Z nsslapd-auditlog-logging-enabled: on nsslapd-auditlog-logging-hide-unhashed-pw: off nsslapd-rootpw: {SSHA}AQH9bTYZW4kfkfyHg1k+lG88H2dFOuwakzFEpw== numSubordinates: 10 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/data/entryuuid/000077500000000000000000000000001421664411400237755ustar00rootroot00000000000000localhost-userRoot-2020_03_30_13_14_47.ldif000066400000000000000000000202661421664411400327020ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/data/entryuuidversion: 1 # entry-id: 1 dn: dc=example,dc=com objectClass: top objectClass: domain dc: example description: dc=example,dc=com creatorsName: cn=Directory Manager modifiersName: cn=Directory Manager createTimestamp: 20200325015542Z modifyTimestamp: 20200325015542Z nsUniqueId: a2b33229-6e3b11ea-8de0c78c-83e27eda aci: (targetattr="dc || description || objectClass")(targetfilter="(objectClas s=domain)")(version 3.0; acl "Enable anyone domain read"; allow (read, search , compare)(userdn="ldap:///anyone");) aci: (targetattr="ou || objectClass")(targetfilter="(objectClass=organizationa lUnit)")(version 3.0; acl "Enable anyone ou read"; allow (read, search, compa re)(userdn="ldap:///anyone");) # entry-id: 2 dn: cn=389_ds_system,dc=example,dc=com objectClass: top objectClass: nscontainer objectClass: ldapsubentry cn: 389_ds_system creatorsName: cn=Directory Manager modifiersName: cn=Directory Manager createTimestamp: 20200325015542Z modifyTimestamp: 20200325015542Z nsUniqueId: a2b3322a-6e3b11ea-8de0c78c-83e27eda # entry-id: 3 dn: ou=groups,dc=example,dc=com objectClass: top objectClass: organizationalunit ou: groups aci: (targetattr="cn || member || gidNumber || nsUniqueId || description || ob jectClass")(targetfilter="(objectClass=groupOfNames)")(version 3.0; acl "Enab le anyone group read"; allow (read, search, compare)(userdn="ldap:///anyone") ;) aci: (targetattr="member")(targetfilter="(objectClass=groupOfNames)")(version 3.0; acl "Enable group_modify to alter members"; allow (write)(groupdn="ldap: ///cn=group_modify,ou=permissions,dc=example,dc=com");) aci: (targetattr="cn || member || gidNumber || description || objectClass")(ta rgetfilter="(objectClass=groupOfNames)")(version 3.0; acl "Enable group_admin to manage groups"; allow (write, add, delete)(groupdn="ldap:///cn=group_admi n,ou=permissions,dc=example,dc=com");) creatorsName: cn=Directory Manager modifiersName: cn=Directory Manager createTimestamp: 20200325015543Z modifyTimestamp: 20200325015543Z nsUniqueId: a2b3322b-6e3b11ea-8de0c78c-83e27eda # entry-id: 4 dn: ou=people,dc=example,dc=com objectClass: top objectClass: organizationalunit ou: people aci: (targetattr="objectClass || description || nsUniqueId || uid || displayNa me || loginShell || uidNumber || gidNumber || gecos || homeDirectory || cn || memberOf || mail || nsSshPublicKey || nsAccountLock || userCertificate")(tar getfilter="(objectClass=posixaccount)")(version 3.0; acl "Enable anyone user read"; allow (read, search, compare)(userdn="ldap:///anyone");) aci: (targetattr="displayName || legalName || userPassword || nsSshPublicKey") (version 3.0; acl "Enable self partial modify"; allow (write)(userdn="ldap:// /self");) aci: (targetattr="legalName || telephoneNumber || mobile || sn")(targetfilter= "(|(objectClass=nsPerson)(objectClass=inetOrgPerson))")(version 3.0; acl "Ena ble self legalname read"; allow (read, search, compare)(userdn="ldap:///self" );) aci: (targetattr="legalName || telephoneNumber")(targetfilter="(objectClass=ns Person)")(version 3.0; acl "Enable user legalname read"; allow (read, search, compare)(groupdn="ldap:///cn=user_private_read,ou=permissions,dc=example,dc= com");) aci: (targetattr="uid || description || displayName || loginShell || uidNumber || gidNumber || gecos || homeDirectory || cn || memberOf || mail || legalNam e || telephoneNumber || mobile")(targetfilter="(&(objectClass=nsPerson)(objec tClass=nsAccount))")(version 3.0; acl "Enable user admin create"; allow (writ e, add, delete, read)(groupdn="ldap:///cn=user_admin,ou=permissions,dc=exampl e,dc=com");) aci: (targetattr="uid || description || displayName || loginShell || uidNumber || gidNumber || gecos || homeDirectory || cn || memberOf || mail || legalNam e || telephoneNumber || mobile")(targetfilter="(&(objectClass=nsPerson)(objec tClass=nsAccount))")(version 3.0; acl "Enable user modify to change users"; a llow (write, read)(groupdn="ldap:///cn=user_modify,ou=permissions,dc=example, dc=com");) aci: (targetattr="userPassword || nsAccountLock || userCertificate || nsSshPub licKey")(targetfilter="(objectClass=nsAccount)")(version 3.0; acl "Enable use r password reset"; allow (write, read)(groupdn="ldap:///cn=user_passwd_reset, ou=permissions,dc=example,dc=com");) creatorsName: cn=Directory Manager modifiersName: cn=Directory Manager createTimestamp: 20200325015543Z modifyTimestamp: 20200325015543Z nsUniqueId: a2b3322c-6e3b11ea-8de0c78c-83e27eda # entry-id: 5 dn: ou=permissions,dc=example,dc=com objectClass: top objectClass: organizationalunit ou: permissions creatorsName: cn=Directory Manager modifiersName: cn=Directory Manager createTimestamp: 20200325015543Z modifyTimestamp: 20200325015543Z nsUniqueId: a2b3322d-6e3b11ea-8de0c78c-83e27eda # entry-id: 6 dn: ou=services,dc=example,dc=com objectClass: top objectClass: organizationalunit ou: services aci: (targetattr="objectClass || description || nsUniqueId || cn || memberOf | | nsAccountLock ")(targetfilter="(objectClass=netscapeServer)")(version 3.0; acl "Enable anyone service account read"; allow (read, search, compare)(userd n="ldap:///anyone");) creatorsName: cn=Directory Manager modifiersName: cn=Directory Manager createTimestamp: 20200325015544Z modifyTimestamp: 20200325015544Z nsUniqueId: a2b3322e-6e3b11ea-8de0c78c-83e27eda # entry-id: 7 dn: uid=demo_user,ou=people,dc=example,dc=com objectClass: top objectClass: nsPerson objectClass: nsAccount objectClass: nsOrgPerson objectClass: posixAccount uid: demo_user cn: Demo User displayName: Demo User legalName: Demo User Name uidNumber: 99998 gidNumber: 99998 homeDirectory: /var/empty loginShell: /bin/false nsAccountLock: true creatorsName: cn=Directory Manager modifiersName: cn=Directory Manager createTimestamp: 20200325015544Z modifyTimestamp: 20200325061615Z nsUniqueId: a2b3322f-6e3b11ea-8de0c78c-83e27eda entryUUID: 973e1bbf-ba9c-45d4-b01b-ff7371fd9008 # entry-id: 8 dn: cn=demo_group,ou=groups,dc=example,dc=com objectClass: top objectClass: groupOfNames objectClass: posixGroup objectClass: nsMemberOf cn: demo_group gidNumber: 99999 creatorsName: cn=Directory Manager modifiersName: cn=Directory Manager createTimestamp: 20200325015544Z modifyTimestamp: 20200325015544Z nsUniqueId: a2b33230-6e3b11ea-8de0c78c-83e27eda entryUUID: f6df8fe9-6b30-46aa-aa13-f0bf755371e8 # entry-id: 9 dn: cn=group_admin,ou=permissions,dc=example,dc=com objectClass: top objectClass: groupOfNames objectClass: nsMemberOf cn: group_admin creatorsName: cn=Directory Manager modifiersName: cn=Directory Manager createTimestamp: 20200325015545Z modifyTimestamp: 20200325015545Z nsUniqueId: a2b33231-6e3b11ea-8de0c78c-83e27eda # entry-id: 10 dn: cn=group_modify,ou=permissions,dc=example,dc=com objectClass: top objectClass: groupOfNames objectClass: nsMemberOf cn: group_modify creatorsName: cn=Directory Manager modifiersName: cn=Directory Manager createTimestamp: 20200325015545Z modifyTimestamp: 20200325015545Z nsUniqueId: a2b33232-6e3b11ea-8de0c78c-83e27eda # entry-id: 11 dn: cn=user_admin,ou=permissions,dc=example,dc=com objectClass: top objectClass: groupOfNames objectClass: nsMemberOf cn: user_admin creatorsName: cn=Directory Manager modifiersName: cn=Directory Manager createTimestamp: 20200325015545Z modifyTimestamp: 20200325015545Z nsUniqueId: a2b33233-6e3b11ea-8de0c78c-83e27eda # entry-id: 12 dn: cn=user_modify,ou=permissions,dc=example,dc=com objectClass: top objectClass: groupOfNames objectClass: nsMemberOf cn: user_modify creatorsName: cn=Directory Manager modifiersName: cn=Directory Manager createTimestamp: 20200325015546Z modifyTimestamp: 20200325015546Z nsUniqueId: a2b33234-6e3b11ea-8de0c78c-83e27eda # entry-id: 13 dn: cn=user_passwd_reset,ou=permissions,dc=example,dc=com objectClass: top objectClass: groupOfNames objectClass: nsMemberOf cn: user_passwd_reset creatorsName: cn=Directory Manager modifiersName: cn=Directory Manager createTimestamp: 20200325015546Z modifyTimestamp: 20200325015546Z nsUniqueId: a2b33235-6e3b11ea-8de0c78c-83e27eda # entry-id: 14 dn: cn=user_private_read,ou=permissions,dc=example,dc=com objectClass: top objectClass: groupOfNames objectClass: nsMemberOf cn: user_private_read creatorsName: cn=Directory Manager modifiersName: cn=Directory Manager createTimestamp: 20200325015547Z modifyTimestamp: 20200325015547Z nsUniqueId: a2b33236-6e3b11ea-8de0c78c-83e27eda 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/data/entryuuid/localhost-userRoot-invalid.ldif000066400000000000000000000202361421664411400320740ustar00rootroot00000000000000version: 1 # entry-id: 1 dn: dc=example,dc=com objectClass: top objectClass: domain dc: example description: dc=example,dc=com creatorsName: cn=Directory Manager modifiersName: cn=Directory Manager createTimestamp: 20200325015542Z modifyTimestamp: 20200325015542Z nsUniqueId: a2b33229-6e3b11ea-8de0c78c-83e27eda aci: (targetattr="dc || description || objectClass")(targetfilter="(objectClas s=domain)")(version 3.0; acl "Enable anyone domain read"; allow (read, search , compare)(userdn="ldap:///anyone");) aci: (targetattr="ou || objectClass")(targetfilter="(objectClass=organizationa lUnit)")(version 3.0; acl "Enable anyone ou read"; allow (read, search, compa re)(userdn="ldap:///anyone");) # entry-id: 2 dn: cn=389_ds_system,dc=example,dc=com objectClass: top objectClass: nscontainer objectClass: ldapsubentry cn: 389_ds_system creatorsName: cn=Directory Manager modifiersName: cn=Directory Manager createTimestamp: 20200325015542Z modifyTimestamp: 20200325015542Z nsUniqueId: a2b3322a-6e3b11ea-8de0c78c-83e27eda # entry-id: 3 dn: ou=groups,dc=example,dc=com objectClass: top objectClass: organizationalunit ou: groups aci: (targetattr="cn || member || gidNumber || nsUniqueId || description || ob jectClass")(targetfilter="(objectClass=groupOfNames)")(version 3.0; acl "Enab le anyone group read"; allow (read, search, compare)(userdn="ldap:///anyone") ;) aci: (targetattr="member")(targetfilter="(objectClass=groupOfNames)")(version 3.0; acl "Enable group_modify to alter members"; allow (write)(groupdn="ldap: ///cn=group_modify,ou=permissions,dc=example,dc=com");) aci: (targetattr="cn || member || gidNumber || description || objectClass")(ta rgetfilter="(objectClass=groupOfNames)")(version 3.0; acl "Enable group_admin to manage groups"; allow (write, add, delete)(groupdn="ldap:///cn=group_admi n,ou=permissions,dc=example,dc=com");) creatorsName: cn=Directory Manager modifiersName: cn=Directory Manager createTimestamp: 20200325015543Z modifyTimestamp: 20200325015543Z nsUniqueId: a2b3322b-6e3b11ea-8de0c78c-83e27eda # entry-id: 4 dn: ou=people,dc=example,dc=com objectClass: top objectClass: organizationalunit ou: people aci: (targetattr="objectClass || description || nsUniqueId || uid || displayNa me || loginShell || uidNumber || gidNumber || gecos || homeDirectory || cn || memberOf || mail || nsSshPublicKey || nsAccountLock || userCertificate")(tar getfilter="(objectClass=posixaccount)")(version 3.0; acl "Enable anyone user read"; allow (read, search, compare)(userdn="ldap:///anyone");) aci: (targetattr="displayName || legalName || userPassword || nsSshPublicKey") (version 3.0; acl "Enable self partial modify"; allow (write)(userdn="ldap:// /self");) aci: (targetattr="legalName || telephoneNumber || mobile || sn")(targetfilter= "(|(objectClass=nsPerson)(objectClass=inetOrgPerson))")(version 3.0; acl "Ena ble self legalname read"; allow (read, search, compare)(userdn="ldap:///self" );) aci: (targetattr="legalName || telephoneNumber")(targetfilter="(objectClass=ns Person)")(version 3.0; acl "Enable user legalname read"; allow (read, search, compare)(groupdn="ldap:///cn=user_private_read,ou=permissions,dc=example,dc= com");) aci: (targetattr="uid || description || displayName || loginShell || uidNumber || gidNumber || gecos || homeDirectory || cn || memberOf || mail || legalNam e || telephoneNumber || mobile")(targetfilter="(&(objectClass=nsPerson)(objec tClass=nsAccount))")(version 3.0; acl "Enable user admin create"; allow (writ e, add, delete, read)(groupdn="ldap:///cn=user_admin,ou=permissions,dc=exampl e,dc=com");) aci: (targetattr="uid || description || displayName || loginShell || uidNumber || gidNumber || gecos || homeDirectory || cn || memberOf || mail || legalNam e || telephoneNumber || mobile")(targetfilter="(&(objectClass=nsPerson)(objec tClass=nsAccount))")(version 3.0; acl "Enable user modify to change users"; a llow (write, read)(groupdn="ldap:///cn=user_modify,ou=permissions,dc=example, dc=com");) aci: (targetattr="userPassword || nsAccountLock || userCertificate || nsSshPub licKey")(targetfilter="(objectClass=nsAccount)")(version 3.0; acl "Enable use r password reset"; allow (write, read)(groupdn="ldap:///cn=user_passwd_reset, ou=permissions,dc=example,dc=com");) creatorsName: cn=Directory Manager modifiersName: cn=Directory Manager createTimestamp: 20200325015543Z modifyTimestamp: 20200325015543Z nsUniqueId: a2b3322c-6e3b11ea-8de0c78c-83e27eda # entry-id: 5 dn: ou=permissions,dc=example,dc=com objectClass: top objectClass: organizationalunit ou: permissions creatorsName: cn=Directory Manager modifiersName: cn=Directory Manager createTimestamp: 20200325015543Z modifyTimestamp: 20200325015543Z nsUniqueId: a2b3322d-6e3b11ea-8de0c78c-83e27eda # entry-id: 6 dn: ou=services,dc=example,dc=com objectClass: top objectClass: organizationalunit ou: services aci: (targetattr="objectClass || description || nsUniqueId || cn || memberOf | | nsAccountLock ")(targetfilter="(objectClass=netscapeServer)")(version 3.0; acl "Enable anyone service account read"; allow (read, search, compare)(userd n="ldap:///anyone");) creatorsName: cn=Directory Manager modifiersName: cn=Directory Manager createTimestamp: 20200325015544Z modifyTimestamp: 20200325015544Z nsUniqueId: a2b3322e-6e3b11ea-8de0c78c-83e27eda # entry-id: 7 dn: uid=demo_user,ou=people,dc=example,dc=com objectClass: top objectClass: nsPerson objectClass: nsAccount objectClass: nsOrgPerson objectClass: posixAccount uid: demo_user cn: Demo User displayName: Demo User legalName: Demo User Name uidNumber: 99998 gidNumber: 99998 homeDirectory: /var/empty loginShell: /bin/false nsAccountLock: true creatorsName: cn=Directory Manager modifiersName: cn=Directory Manager createTimestamp: 20200325015544Z modifyTimestamp: 20200325061615Z nsUniqueId: a2b3322f-6e3b11ea-8de0c78c-83e27eda entryUUID: INVALID_UUID # entry-id: 8 dn: cn=demo_group,ou=groups,dc=example,dc=com objectClass: top objectClass: groupOfNames objectClass: posixGroup objectClass: nsMemberOf cn: demo_group gidNumber: 99999 creatorsName: cn=Directory Manager modifiersName: cn=Directory Manager createTimestamp: 20200325015544Z modifyTimestamp: 20200325015544Z nsUniqueId: a2b33230-6e3b11ea-8de0c78c-83e27eda entryUUID: f6df8fe9-6b30-46aa-aa13-f0bf755371e8 # entry-id: 9 dn: cn=group_admin,ou=permissions,dc=example,dc=com objectClass: top objectClass: groupOfNames objectClass: nsMemberOf cn: group_admin creatorsName: cn=Directory Manager modifiersName: cn=Directory Manager createTimestamp: 20200325015545Z modifyTimestamp: 20200325015545Z nsUniqueId: a2b33231-6e3b11ea-8de0c78c-83e27eda # entry-id: 10 dn: cn=group_modify,ou=permissions,dc=example,dc=com objectClass: top objectClass: groupOfNames objectClass: nsMemberOf cn: group_modify creatorsName: cn=Directory Manager modifiersName: cn=Directory Manager createTimestamp: 20200325015545Z modifyTimestamp: 20200325015545Z nsUniqueId: a2b33232-6e3b11ea-8de0c78c-83e27eda # entry-id: 11 dn: cn=user_admin,ou=permissions,dc=example,dc=com objectClass: top objectClass: groupOfNames objectClass: nsMemberOf cn: user_admin creatorsName: cn=Directory Manager modifiersName: cn=Directory Manager createTimestamp: 20200325015545Z modifyTimestamp: 20200325015545Z nsUniqueId: a2b33233-6e3b11ea-8de0c78c-83e27eda # entry-id: 12 dn: cn=user_modify,ou=permissions,dc=example,dc=com objectClass: top objectClass: groupOfNames objectClass: nsMemberOf cn: user_modify creatorsName: cn=Directory Manager modifiersName: cn=Directory Manager createTimestamp: 20200325015546Z modifyTimestamp: 20200325015546Z nsUniqueId: a2b33234-6e3b11ea-8de0c78c-83e27eda # entry-id: 13 dn: cn=user_passwd_reset,ou=permissions,dc=example,dc=com objectClass: top objectClass: groupOfNames objectClass: nsMemberOf cn: user_passwd_reset creatorsName: cn=Directory Manager modifiersName: cn=Directory Manager createTimestamp: 20200325015546Z modifyTimestamp: 20200325015546Z nsUniqueId: a2b33235-6e3b11ea-8de0c78c-83e27eda # entry-id: 14 dn: cn=user_private_read,ou=permissions,dc=example,dc=com objectClass: top objectClass: groupOfNames objectClass: nsMemberOf cn: user_private_read creatorsName: cn=Directory Manager modifiersName: cn=Directory Manager createTimestamp: 20200325015547Z modifyTimestamp: 20200325015547Z nsUniqueId: a2b33236-6e3b11ea-8de0c78c-83e27eda 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/data/longduration/000077500000000000000000000000001421664411400244525ustar00rootroot00000000000000db_protect_long_test_reference_1.4.2.12.json000066400000000000000000001043351421664411400344600ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/data/longduration{"Instance OFFLINE OFFLINE _job_nothing + OFFLINE _job_nothing": "OK + OK", "Instance OFFLINE OFFLINE _job_nothing + OFFLINE _job_db2ldifSuffix1": "OK + OK", "Instance OFFLINE OFFLINE _job_nothing + OFFLINE _job_db2ldifSuffix2": "OK + OK", "Instance OFFLINE OFFLINE _job_nothing + OFFLINE _job_ldif2dbSuffix1": "OK + OK", "Instance OFFLINE OFFLINE _job_nothing + OFFLINE _job_ldif2dbSuffix2": "OK + OK", "Instance OFFLINE OFFLINE _job_nothing + OFFLINE _job_db2indexSuffix1": "OK + OK", "Instance OFFLINE OFFLINE _job_nothing + OFFLINE _job_db2indexSuffix2": "OK + OK", "Instance OFFLINE OFFLINE _job_nothing + OFFLINE _job_db2archive": "OK + OK", "Instance OFFLINE OFFLINE _job_nothing + OFFLINE _job_archive2db": "OK + OK", "Instance OFFLINE OFFLINE _job_db2ldifSuffix1 + OFFLINE _job_nothing": "OK + OK", "Instance OFFLINE OFFLINE _job_db2ldifSuffix1 + OFFLINE _job_db2ldifSuffix1": "OK + OK", "Instance OFFLINE OFFLINE _job_db2ldifSuffix1 + OFFLINE _job_db2ldifSuffix2": "OK + OK", "Instance OFFLINE OFFLINE _job_db2ldifSuffix1 + OFFLINE _job_ldif2dbSuffix1": "OK + KO", "Instance OFFLINE OFFLINE _job_db2ldifSuffix1 + OFFLINE _job_ldif2dbSuffix2": "OK + KO", "Instance OFFLINE OFFLINE _job_db2ldifSuffix1 + OFFLINE _job_db2indexSuffix1": "KO + OK", "Instance OFFLINE OFFLINE _job_db2ldifSuffix1 + OFFLINE _job_db2indexSuffix2": "KO + OK", "Instance OFFLINE OFFLINE _job_db2ldifSuffix1 + OFFLINE _job_db2archive": "OK + OK", "Instance OFFLINE OFFLINE _job_db2ldifSuffix1 + OFFLINE _job_archive2db": "OK + KO", "Instance OFFLINE OFFLINE _job_db2ldifSuffix2 + OFFLINE _job_nothing": "OK + OK", "Instance OFFLINE OFFLINE _job_db2ldifSuffix2 + OFFLINE _job_db2ldifSuffix1": "OK + OK", "Instance OFFLINE OFFLINE _job_db2ldifSuffix2 + OFFLINE _job_db2ldifSuffix2": "OK + OK", "Instance OFFLINE OFFLINE _job_db2ldifSuffix2 + OFFLINE _job_ldif2dbSuffix1": "OK + KO", "Instance OFFLINE OFFLINE _job_db2ldifSuffix2 + OFFLINE _job_ldif2dbSuffix2": "OK + KO", "Instance OFFLINE OFFLINE _job_db2ldifSuffix2 + OFFLINE _job_db2indexSuffix1": "KO + OK", "Instance OFFLINE OFFLINE _job_db2ldifSuffix2 + OFFLINE _job_db2indexSuffix2": "KO + OK", "Instance OFFLINE OFFLINE _job_db2ldifSuffix2 + OFFLINE _job_db2archive": "OK + OK", "Instance OFFLINE OFFLINE _job_db2ldifSuffix2 + OFFLINE _job_archive2db": "OK + KO", "Instance OFFLINE OFFLINE _job_ldif2dbSuffix1 + OFFLINE _job_nothing": "OK + OK", "Instance OFFLINE OFFLINE _job_ldif2dbSuffix1 + OFFLINE _job_db2ldifSuffix1": "OK + KO", "Instance OFFLINE OFFLINE _job_ldif2dbSuffix1 + OFFLINE _job_db2ldifSuffix2": "OK + KO", "Instance OFFLINE OFFLINE _job_ldif2dbSuffix1 + OFFLINE _job_ldif2dbSuffix1": "OK + KO", "Instance OFFLINE OFFLINE _job_ldif2dbSuffix1 + OFFLINE _job_ldif2dbSuffix2": "OK + KO", "Instance OFFLINE OFFLINE _job_ldif2dbSuffix1 + OFFLINE _job_db2indexSuffix1": "KO + OK", "Instance OFFLINE OFFLINE _job_ldif2dbSuffix1 + OFFLINE _job_db2indexSuffix2": "KO + OK", "Instance OFFLINE OFFLINE _job_ldif2dbSuffix1 + OFFLINE _job_db2archive": "OK + KO", "Instance OFFLINE OFFLINE _job_ldif2dbSuffix1 + OFFLINE _job_archive2db": "OK + KO", "Instance OFFLINE OFFLINE _job_ldif2dbSuffix2 + OFFLINE _job_nothing": "OK + OK", "Instance OFFLINE OFFLINE _job_ldif2dbSuffix2 + OFFLINE _job_db2ldifSuffix1": "OK + KO", "Instance OFFLINE OFFLINE _job_ldif2dbSuffix2 + OFFLINE _job_db2ldifSuffix2": "OK + KO", "Instance OFFLINE OFFLINE _job_ldif2dbSuffix2 + OFFLINE _job_ldif2dbSuffix1": "OK + KO", "Instance OFFLINE OFFLINE _job_ldif2dbSuffix2 + OFFLINE _job_ldif2dbSuffix2": "OK + KO", "Instance OFFLINE OFFLINE _job_ldif2dbSuffix2 + OFFLINE _job_db2indexSuffix1": "KO + OK", "Instance OFFLINE OFFLINE _job_ldif2dbSuffix2 + OFFLINE _job_db2indexSuffix2": "KO + OK", "Instance OFFLINE OFFLINE _job_ldif2dbSuffix2 + OFFLINE _job_db2archive": "OK + KO", "Instance OFFLINE OFFLINE _job_ldif2dbSuffix2 + OFFLINE _job_archive2db": "OK + KO", "Instance OFFLINE OFFLINE _job_db2indexSuffix1 + OFFLINE _job_nothing": "OK + OK", "Instance OFFLINE OFFLINE _job_db2indexSuffix1 + OFFLINE _job_db2ldifSuffix1": "OK + KO", "Instance OFFLINE OFFLINE _job_db2indexSuffix1 + OFFLINE _job_db2ldifSuffix2": "OK + KO", "Instance OFFLINE OFFLINE _job_db2indexSuffix1 + OFFLINE _job_ldif2dbSuffix1": "OK + KO", "Instance OFFLINE OFFLINE _job_db2indexSuffix1 + OFFLINE _job_ldif2dbSuffix2": "OK + KO", "Instance OFFLINE OFFLINE _job_db2indexSuffix1 + OFFLINE _job_db2indexSuffix1": "OK + KO", "Instance OFFLINE OFFLINE _job_db2indexSuffix1 + OFFLINE _job_db2indexSuffix2": "OK + KO", "Instance OFFLINE OFFLINE _job_db2indexSuffix1 + OFFLINE _job_db2archive": "OK + KO", "Instance OFFLINE OFFLINE _job_db2indexSuffix1 + OFFLINE _job_archive2db": "OK + KO", "Instance OFFLINE OFFLINE _job_db2indexSuffix2 + OFFLINE _job_nothing": "OK + OK", "Instance OFFLINE OFFLINE _job_db2indexSuffix2 + OFFLINE _job_db2ldifSuffix1": "OK + KO", "Instance OFFLINE OFFLINE _job_db2indexSuffix2 + OFFLINE _job_db2ldifSuffix2": "OK + KO", "Instance OFFLINE OFFLINE _job_db2indexSuffix2 + OFFLINE _job_ldif2dbSuffix1": "OK + KO", "Instance OFFLINE OFFLINE _job_db2indexSuffix2 + OFFLINE _job_ldif2dbSuffix2": "OK + KO", "Instance OFFLINE OFFLINE _job_db2indexSuffix2 + OFFLINE _job_db2indexSuffix1": "OK + KO", "Instance OFFLINE OFFLINE _job_db2indexSuffix2 + OFFLINE _job_db2indexSuffix2": "OK + KO", "Instance OFFLINE OFFLINE _job_db2indexSuffix2 + OFFLINE _job_db2archive": "OK + KO", "Instance OFFLINE OFFLINE _job_db2indexSuffix2 + OFFLINE _job_archive2db": "OK + KO", "Instance OFFLINE OFFLINE _job_db2archive + OFFLINE _job_nothing": "OK + OK", "Instance OFFLINE OFFLINE _job_db2archive + OFFLINE _job_db2ldifSuffix1": "OK + OK", "Instance OFFLINE OFFLINE _job_db2archive + OFFLINE _job_db2ldifSuffix2": "OK + OK", "Instance OFFLINE OFFLINE _job_db2archive + OFFLINE _job_ldif2dbSuffix1": "OK + KO", "Instance OFFLINE OFFLINE _job_db2archive + OFFLINE _job_ldif2dbSuffix2": "OK + KO", "Instance OFFLINE OFFLINE _job_db2archive + OFFLINE _job_db2indexSuffix1": "KO + OK", "Instance OFFLINE OFFLINE _job_db2archive + OFFLINE _job_db2indexSuffix2": "KO + OK", "Instance OFFLINE OFFLINE _job_db2archive + OFFLINE _job_db2archive": "OK + OK", "Instance OFFLINE OFFLINE _job_db2archive + OFFLINE _job_archive2db": "OK + KO", "Instance OFFLINE OFFLINE _job_archive2db + OFFLINE _job_nothing": "OK + OK", "Instance OFFLINE OFFLINE _job_archive2db + OFFLINE _job_db2ldifSuffix1": "OK + KO", "Instance OFFLINE OFFLINE _job_archive2db + OFFLINE _job_db2ldifSuffix2": "OK + KO", "Instance OFFLINE OFFLINE _job_archive2db + OFFLINE _job_ldif2dbSuffix1": "OK + KO", "Instance OFFLINE OFFLINE _job_archive2db + OFFLINE _job_ldif2dbSuffix2": "OK + KO", "Instance OFFLINE OFFLINE _job_archive2db + OFFLINE _job_db2indexSuffix1": "KO + OK", "Instance OFFLINE OFFLINE _job_archive2db + OFFLINE _job_db2indexSuffix2": "KO + OK", "Instance OFFLINE OFFLINE _job_archive2db + OFFLINE _job_db2archive": "OK + KO", "Instance OFFLINE OFFLINE _job_archive2db + OFFLINE _job_archive2db": "OK + KO", "Instance ONLINE OFFLINE _job_nothing + OFFLINE _job_nothing": "OK + OK", "Instance ONLINE OFFLINE _job_nothing + OFFLINE _job_db2ldifSuffix1": "OK + KO", "Instance ONLINE OFFLINE _job_nothing + OFFLINE _job_db2ldifSuffix2": "OK + KO", "Instance ONLINE OFFLINE _job_nothing + OFFLINE _job_ldif2dbSuffix1": "OK + KO", "Instance ONLINE OFFLINE _job_nothing + OFFLINE _job_ldif2dbSuffix2": "OK + KO", "Instance ONLINE OFFLINE _job_nothing + OFFLINE _job_db2indexSuffix1": "OK + KO", "Instance ONLINE OFFLINE _job_nothing + OFFLINE _job_db2indexSuffix2": "OK + KO", "Instance ONLINE OFFLINE _job_nothing + OFFLINE _job_db2archive": "OK + KO", "Instance ONLINE OFFLINE _job_nothing + OFFLINE _job_archive2db": "OK + KO", "Instance ONLINE OFFLINE _job_db2ldifSuffix1 + OFFLINE _job_nothing": "KO + OK", "Instance ONLINE OFFLINE _job_db2ldifSuffix1 + OFFLINE _job_db2ldifSuffix1": "KO + KO", "Instance ONLINE OFFLINE _job_db2ldifSuffix1 + OFFLINE _job_db2ldifSuffix2": "KO + KO", "Instance ONLINE OFFLINE _job_db2ldifSuffix1 + OFFLINE _job_ldif2dbSuffix1": "KO + KO", "Instance ONLINE OFFLINE _job_db2ldifSuffix1 + OFFLINE _job_ldif2dbSuffix2": "KO + KO", "Instance ONLINE OFFLINE _job_db2ldifSuffix1 + OFFLINE _job_db2indexSuffix1": "KO + KO", "Instance ONLINE OFFLINE _job_db2ldifSuffix1 + OFFLINE _job_db2indexSuffix2": "KO + KO", "Instance ONLINE OFFLINE _job_db2ldifSuffix1 + OFFLINE _job_db2archive": "KO + KO", "Instance ONLINE OFFLINE _job_db2ldifSuffix1 + OFFLINE _job_archive2db": "KO + KO", "Instance ONLINE OFFLINE _job_db2ldifSuffix2 + OFFLINE _job_nothing": "KO + OK", "Instance ONLINE OFFLINE _job_db2ldifSuffix2 + OFFLINE _job_db2ldifSuffix1": "KO + KO", "Instance ONLINE OFFLINE _job_db2ldifSuffix2 + OFFLINE _job_db2ldifSuffix2": "KO + KO", "Instance ONLINE OFFLINE _job_db2ldifSuffix2 + OFFLINE _job_ldif2dbSuffix1": "KO + KO", "Instance ONLINE OFFLINE _job_db2ldifSuffix2 + OFFLINE _job_ldif2dbSuffix2": "KO + KO", "Instance ONLINE OFFLINE _job_db2ldifSuffix2 + OFFLINE _job_db2indexSuffix1": "KO + KO", "Instance ONLINE OFFLINE _job_db2ldifSuffix2 + OFFLINE _job_db2indexSuffix2": "KO + KO", "Instance ONLINE OFFLINE _job_db2ldifSuffix2 + OFFLINE _job_db2archive": "KO + KO", "Instance ONLINE OFFLINE _job_db2ldifSuffix2 + OFFLINE _job_archive2db": "KO + KO", "Instance ONLINE OFFLINE _job_ldif2dbSuffix1 + OFFLINE _job_nothing": "KO + OK", "Instance ONLINE OFFLINE _job_ldif2dbSuffix1 + OFFLINE _job_db2ldifSuffix1": "KO + KO", "Instance ONLINE OFFLINE _job_ldif2dbSuffix1 + OFFLINE _job_db2ldifSuffix2": "KO + KO", "Instance ONLINE OFFLINE _job_ldif2dbSuffix1 + OFFLINE _job_ldif2dbSuffix1": "KO + KO", "Instance ONLINE OFFLINE _job_ldif2dbSuffix1 + OFFLINE _job_ldif2dbSuffix2": "KO + KO", "Instance ONLINE OFFLINE _job_ldif2dbSuffix1 + OFFLINE _job_db2indexSuffix1": "KO + KO", "Instance ONLINE OFFLINE _job_ldif2dbSuffix1 + OFFLINE _job_db2indexSuffix2": "KO + KO", "Instance ONLINE OFFLINE _job_ldif2dbSuffix1 + OFFLINE _job_db2archive": "KO + KO", "Instance ONLINE OFFLINE _job_ldif2dbSuffix1 + OFFLINE _job_archive2db": "KO + KO", "Instance ONLINE OFFLINE _job_ldif2dbSuffix2 + OFFLINE _job_nothing": "KO + OK", "Instance ONLINE OFFLINE _job_ldif2dbSuffix2 + OFFLINE _job_db2ldifSuffix1": "KO + KO", "Instance ONLINE OFFLINE _job_ldif2dbSuffix2 + OFFLINE _job_db2ldifSuffix2": "KO + KO", "Instance ONLINE OFFLINE _job_ldif2dbSuffix2 + OFFLINE _job_ldif2dbSuffix1": "KO + KO", "Instance ONLINE OFFLINE _job_ldif2dbSuffix2 + OFFLINE _job_ldif2dbSuffix2": "KO + KO", "Instance ONLINE OFFLINE _job_ldif2dbSuffix2 + OFFLINE _job_db2indexSuffix1": "KO + KO", "Instance ONLINE OFFLINE _job_ldif2dbSuffix2 + OFFLINE _job_db2indexSuffix2": "KO + KO", "Instance ONLINE OFFLINE _job_ldif2dbSuffix2 + OFFLINE _job_db2archive": "KO + KO", "Instance ONLINE OFFLINE _job_ldif2dbSuffix2 + OFFLINE _job_archive2db": "KO + KO", "Instance ONLINE OFFLINE _job_db2indexSuffix1 + OFFLINE _job_nothing": "KO + OK", "Instance ONLINE OFFLINE _job_db2indexSuffix1 + OFFLINE _job_db2ldifSuffix1": "KO + KO", "Instance ONLINE OFFLINE _job_db2indexSuffix1 + OFFLINE _job_db2ldifSuffix2": "KO + KO", "Instance ONLINE OFFLINE _job_db2indexSuffix1 + OFFLINE _job_ldif2dbSuffix1": "KO + KO", "Instance ONLINE OFFLINE _job_db2indexSuffix1 + OFFLINE _job_ldif2dbSuffix2": "KO + KO", "Instance ONLINE OFFLINE _job_db2indexSuffix1 + OFFLINE _job_db2indexSuffix1": "KO + KO", "Instance ONLINE OFFLINE _job_db2indexSuffix1 + OFFLINE _job_db2indexSuffix2": "KO + KO", "Instance ONLINE OFFLINE _job_db2indexSuffix1 + OFFLINE _job_db2archive": "KO + KO", "Instance ONLINE OFFLINE _job_db2indexSuffix1 + OFFLINE _job_archive2db": "KO + KO", "Instance ONLINE OFFLINE _job_db2indexSuffix2 + OFFLINE _job_nothing": "KO + OK", "Instance ONLINE OFFLINE _job_db2indexSuffix2 + OFFLINE _job_db2ldifSuffix1": "KO + KO", "Instance ONLINE OFFLINE _job_db2indexSuffix2 + OFFLINE _job_db2ldifSuffix2": "KO + KO", "Instance ONLINE OFFLINE _job_db2indexSuffix2 + OFFLINE _job_ldif2dbSuffix1": "KO + KO", "Instance ONLINE OFFLINE _job_db2indexSuffix2 + OFFLINE _job_ldif2dbSuffix2": "KO + KO", "Instance ONLINE OFFLINE _job_db2indexSuffix2 + OFFLINE _job_db2indexSuffix1": "KO + KO", "Instance ONLINE OFFLINE _job_db2indexSuffix2 + OFFLINE _job_db2indexSuffix2": "KO + KO", "Instance ONLINE OFFLINE _job_db2indexSuffix2 + OFFLINE _job_db2archive": "KO + KO", "Instance ONLINE OFFLINE _job_db2indexSuffix2 + OFFLINE _job_archive2db": "KO + KO", "Instance ONLINE OFFLINE _job_db2archive + OFFLINE _job_nothing": "KO + OK", "Instance ONLINE OFFLINE _job_db2archive + OFFLINE _job_db2ldifSuffix1": "KO + KO", "Instance ONLINE OFFLINE _job_db2archive + OFFLINE _job_db2ldifSuffix2": "KO + KO", "Instance ONLINE OFFLINE _job_db2archive + OFFLINE _job_ldif2dbSuffix1": "KO + KO", "Instance ONLINE OFFLINE _job_db2archive + OFFLINE _job_ldif2dbSuffix2": "KO + KO", "Instance ONLINE OFFLINE _job_db2archive + OFFLINE _job_db2indexSuffix1": "KO + KO", "Instance ONLINE OFFLINE _job_db2archive + OFFLINE _job_db2indexSuffix2": "KO + KO", "Instance ONLINE OFFLINE _job_db2archive + OFFLINE _job_db2archive": "KO + KO", "Instance ONLINE OFFLINE _job_db2archive + OFFLINE _job_archive2db": "KO + KO", "Instance ONLINE OFFLINE _job_archive2db + OFFLINE _job_nothing": "KO + OK", "Instance ONLINE OFFLINE _job_archive2db + OFFLINE _job_db2ldifSuffix1": "KO + KO", "Instance ONLINE OFFLINE _job_archive2db + OFFLINE _job_db2ldifSuffix2": "KO + KO", "Instance ONLINE OFFLINE _job_archive2db + OFFLINE _job_ldif2dbSuffix1": "KO + KO", "Instance ONLINE OFFLINE _job_archive2db + OFFLINE _job_ldif2dbSuffix2": "KO + KO", "Instance ONLINE OFFLINE _job_archive2db + OFFLINE _job_db2indexSuffix1": "KO + KO", "Instance ONLINE OFFLINE _job_archive2db + OFFLINE _job_db2indexSuffix2": "KO + KO", "Instance ONLINE OFFLINE _job_archive2db + OFFLINE _job_db2archive": "KO + KO", "Instance ONLINE OFFLINE _job_archive2db + OFFLINE _job_archive2db": "KO + KO", "Instance ONLINE ONLINE _job_nothing + OFFLINE _job_nothing": "OK + OK", "Instance ONLINE ONLINE _job_nothing + OFFLINE _job_db2ldifSuffix1": "OK + KO", "Instance ONLINE ONLINE _job_nothing + OFFLINE _job_db2ldifSuffix2": "OK + KO", "Instance ONLINE ONLINE _job_nothing + OFFLINE _job_ldif2dbSuffix1": "OK + KO", "Instance ONLINE ONLINE _job_nothing + OFFLINE _job_ldif2dbSuffix2": "OK + KO", "Instance ONLINE ONLINE _job_nothing + OFFLINE _job_db2indexSuffix1": "OK + KO", "Instance ONLINE ONLINE _job_nothing + OFFLINE _job_db2indexSuffix2": "OK + KO", "Instance ONLINE ONLINE _job_nothing + OFFLINE _job_db2archive": "OK + KO", "Instance ONLINE ONLINE _job_nothing + OFFLINE _job_archive2db": "OK + KO", "Instance ONLINE ONLINE _job_db2ldifSuffix1 + OFFLINE _job_nothing": "OK + OK", "Instance ONLINE ONLINE _job_db2ldifSuffix1 + OFFLINE _job_db2ldifSuffix1": "OK + KO", "Instance ONLINE ONLINE _job_db2ldifSuffix1 + OFFLINE _job_db2ldifSuffix2": "OK + KO", "Instance ONLINE ONLINE _job_db2ldifSuffix1 + OFFLINE _job_ldif2dbSuffix1": "OK + KO", "Instance ONLINE ONLINE _job_db2ldifSuffix1 + OFFLINE _job_ldif2dbSuffix2": "OK + KO", "Instance ONLINE ONLINE _job_db2ldifSuffix1 + OFFLINE _job_db2indexSuffix1": "OK + KO", "Instance ONLINE ONLINE _job_db2ldifSuffix1 + OFFLINE _job_db2indexSuffix2": "OK + KO", "Instance ONLINE ONLINE _job_db2ldifSuffix1 + OFFLINE _job_db2archive": "OK + KO", "Instance ONLINE ONLINE _job_db2ldifSuffix1 + OFFLINE _job_archive2db": "OK + KO", "Instance ONLINE ONLINE _job_db2ldifSuffix2 + OFFLINE _job_nothing": "OK + OK", "Instance ONLINE ONLINE _job_db2ldifSuffix2 + OFFLINE _job_db2ldifSuffix1": "OK + KO", "Instance ONLINE ONLINE _job_db2ldifSuffix2 + OFFLINE _job_db2ldifSuffix2": "OK + KO", "Instance ONLINE ONLINE _job_db2ldifSuffix2 + OFFLINE _job_ldif2dbSuffix1": "OK + KO", "Instance ONLINE ONLINE _job_db2ldifSuffix2 + OFFLINE _job_ldif2dbSuffix2": "OK + KO", "Instance ONLINE ONLINE _job_db2ldifSuffix2 + OFFLINE _job_db2indexSuffix1": "OK + KO", "Instance ONLINE ONLINE _job_db2ldifSuffix2 + OFFLINE _job_db2indexSuffix2": "OK + KO", "Instance ONLINE ONLINE _job_db2ldifSuffix2 + OFFLINE _job_db2archive": "OK + KO", "Instance ONLINE ONLINE _job_db2ldifSuffix2 + OFFLINE _job_archive2db": "OK + KO", "Instance ONLINE ONLINE _job_ldif2dbSuffix1 + OFFLINE _job_nothing": "OK + OK", "Instance ONLINE ONLINE _job_ldif2dbSuffix1 + OFFLINE _job_db2ldifSuffix1": "OK + KO", "Instance ONLINE ONLINE _job_ldif2dbSuffix1 + OFFLINE _job_db2ldifSuffix2": "OK + KO", "Instance ONLINE ONLINE _job_ldif2dbSuffix1 + OFFLINE _job_ldif2dbSuffix1": "OK + KO", "Instance ONLINE ONLINE _job_ldif2dbSuffix1 + OFFLINE _job_ldif2dbSuffix2": "OK + KO", "Instance ONLINE ONLINE _job_ldif2dbSuffix1 + OFFLINE _job_db2indexSuffix1": "OK + KO", "Instance ONLINE ONLINE _job_ldif2dbSuffix1 + OFFLINE _job_db2indexSuffix2": "OK + KO", "Instance ONLINE ONLINE _job_ldif2dbSuffix1 + OFFLINE _job_db2archive": "OK + KO", "Instance ONLINE ONLINE _job_ldif2dbSuffix1 + OFFLINE _job_archive2db": "OK + KO", "Instance ONLINE ONLINE _job_ldif2dbSuffix2 + OFFLINE _job_nothing": "OK + OK", "Instance ONLINE ONLINE _job_ldif2dbSuffix2 + OFFLINE _job_db2ldifSuffix1": "OK + KO", "Instance ONLINE ONLINE _job_ldif2dbSuffix2 + OFFLINE _job_db2ldifSuffix2": "OK + KO", "Instance ONLINE ONLINE _job_ldif2dbSuffix2 + OFFLINE _job_ldif2dbSuffix1": "OK + KO", "Instance ONLINE ONLINE _job_ldif2dbSuffix2 + OFFLINE _job_ldif2dbSuffix2": "OK + KO", "Instance ONLINE ONLINE _job_ldif2dbSuffix2 + OFFLINE _job_db2indexSuffix1": "OK + KO", "Instance ONLINE ONLINE _job_ldif2dbSuffix2 + OFFLINE _job_db2indexSuffix2": "OK + KO", "Instance ONLINE ONLINE _job_ldif2dbSuffix2 + OFFLINE _job_db2archive": "OK + KO", "Instance ONLINE ONLINE _job_ldif2dbSuffix2 + OFFLINE _job_archive2db": "OK + KO", "Instance ONLINE ONLINE _job_db2indexSuffix1 + OFFLINE _job_nothing": "OK + OK", "Instance ONLINE ONLINE _job_db2indexSuffix1 + OFFLINE _job_db2ldifSuffix1": "OK + KO", "Instance ONLINE ONLINE _job_db2indexSuffix1 + OFFLINE _job_db2ldifSuffix2": "OK + KO", "Instance ONLINE ONLINE _job_db2indexSuffix1 + OFFLINE _job_ldif2dbSuffix1": "OK + KO", "Instance ONLINE ONLINE _job_db2indexSuffix1 + OFFLINE _job_ldif2dbSuffix2": "OK + KO", "Instance ONLINE ONLINE _job_db2indexSuffix1 + OFFLINE _job_db2indexSuffix1": "OK + KO", "Instance ONLINE ONLINE _job_db2indexSuffix1 + OFFLINE _job_db2indexSuffix2": "OK + KO", "Instance ONLINE ONLINE _job_db2indexSuffix1 + OFFLINE _job_db2archive": "OK + KO", "Instance ONLINE ONLINE _job_db2indexSuffix1 + OFFLINE _job_archive2db": "OK + KO", "Instance ONLINE ONLINE _job_db2indexSuffix2 + OFFLINE _job_nothing": "OK + OK", "Instance ONLINE ONLINE _job_db2indexSuffix2 + OFFLINE _job_db2ldifSuffix1": "OK + KO", "Instance ONLINE ONLINE _job_db2indexSuffix2 + OFFLINE _job_db2ldifSuffix2": "OK + KO", "Instance ONLINE ONLINE _job_db2indexSuffix2 + OFFLINE _job_ldif2dbSuffix1": "OK + KO", "Instance ONLINE ONLINE _job_db2indexSuffix2 + OFFLINE _job_ldif2dbSuffix2": "OK + KO", "Instance ONLINE ONLINE _job_db2indexSuffix2 + OFFLINE _job_db2indexSuffix1": "OK + KO", "Instance ONLINE ONLINE _job_db2indexSuffix2 + OFFLINE _job_db2indexSuffix2": "OK + KO", "Instance ONLINE ONLINE _job_db2indexSuffix2 + OFFLINE _job_db2archive": "OK + KO", "Instance ONLINE ONLINE _job_db2indexSuffix2 + OFFLINE _job_archive2db": "OK + KO", "Instance ONLINE ONLINE _job_db2archive + OFFLINE _job_nothing": "OK + OK", "Instance ONLINE ONLINE _job_db2archive + OFFLINE _job_db2ldifSuffix1": "OK + KO", "Instance ONLINE ONLINE _job_db2archive + OFFLINE _job_db2ldifSuffix2": "OK + KO", "Instance ONLINE ONLINE _job_db2archive + OFFLINE _job_ldif2dbSuffix1": "OK + KO", "Instance ONLINE ONLINE _job_db2archive + OFFLINE _job_ldif2dbSuffix2": "OK + KO", "Instance ONLINE ONLINE _job_db2archive + OFFLINE _job_db2indexSuffix1": "OK + KO", "Instance ONLINE ONLINE _job_db2archive + OFFLINE _job_db2indexSuffix2": "OK + KO", "Instance ONLINE ONLINE _job_db2archive + OFFLINE _job_db2archive": "KO + KO", "Instance ONLINE ONLINE _job_db2archive + OFFLINE _job_archive2db": "KO + KO", "Instance ONLINE ONLINE _job_archive2db + OFFLINE _job_nothing": "KO + OK", "Instance ONLINE ONLINE _job_archive2db + OFFLINE _job_db2ldifSuffix1": "KO + KO", "Instance ONLINE ONLINE _job_archive2db + OFFLINE _job_db2ldifSuffix2": "KO + KO", "Instance ONLINE ONLINE _job_archive2db + OFFLINE _job_ldif2dbSuffix1": "KO + KO", "Instance ONLINE ONLINE _job_archive2db + OFFLINE _job_ldif2dbSuffix2": "KO + KO", "Instance ONLINE ONLINE _job_archive2db + OFFLINE _job_db2indexSuffix1": "KO + KO", "Instance ONLINE ONLINE _job_archive2db + OFFLINE _job_db2indexSuffix2": "KO + KO", "Instance ONLINE ONLINE _job_archive2db + OFFLINE _job_db2archive": "KO + KO", "Instance ONLINE ONLINE _job_archive2db + OFFLINE _job_archive2db": "KO + KO", "Instance ONLINE OFFLINE _job_nothing + ONLINE _job_nothing": "OK + OK", "Instance ONLINE OFFLINE _job_nothing + ONLINE _job_db2ldifSuffix1": "OK + KO", "Instance ONLINE OFFLINE _job_nothing + ONLINE _job_db2ldifSuffix2": "OK + KO", "Instance ONLINE OFFLINE _job_nothing + ONLINE _job_ldif2dbSuffix1": "OK + KO", "Instance ONLINE OFFLINE _job_nothing + ONLINE _job_ldif2dbSuffix2": "OK + KO", "Instance ONLINE OFFLINE _job_nothing + ONLINE _job_db2indexSuffix1": "OK + KO", "Instance ONLINE OFFLINE _job_nothing + ONLINE _job_db2indexSuffix2": "OK + KO", "Instance ONLINE OFFLINE _job_nothing + ONLINE _job_db2archive": "OK + KO", "Instance ONLINE OFFLINE _job_nothing + ONLINE _job_archive2db": "OK + KO", "Instance ONLINE OFFLINE _job_db2ldifSuffix1 + ONLINE _job_nothing": "KO + OK", "Instance ONLINE OFFLINE _job_db2ldifSuffix1 + ONLINE _job_db2ldifSuffix1": "KO + KO", "Instance ONLINE OFFLINE _job_db2ldifSuffix1 + ONLINE _job_db2ldifSuffix2": "KO + KO", "Instance ONLINE OFFLINE _job_db2ldifSuffix1 + ONLINE _job_ldif2dbSuffix1": "KO + KO", "Instance ONLINE OFFLINE _job_db2ldifSuffix1 + ONLINE _job_ldif2dbSuffix2": "KO + KO", "Instance ONLINE OFFLINE _job_db2ldifSuffix1 + ONLINE _job_db2indexSuffix1": "KO + KO", "Instance ONLINE OFFLINE _job_db2ldifSuffix1 + ONLINE _job_db2indexSuffix2": "KO + KO", "Instance ONLINE OFFLINE _job_db2ldifSuffix1 + ONLINE _job_db2archive": "KO + KO", "Instance ONLINE OFFLINE _job_db2ldifSuffix1 + ONLINE _job_archive2db": "KO + KO", "Instance ONLINE OFFLINE _job_db2ldifSuffix2 + ONLINE _job_nothing": "KO + OK", "Instance ONLINE OFFLINE _job_db2ldifSuffix2 + ONLINE _job_db2ldifSuffix1": "KO + KO", "Instance ONLINE OFFLINE _job_db2ldifSuffix2 + ONLINE _job_db2ldifSuffix2": "KO + KO", "Instance ONLINE OFFLINE _job_db2ldifSuffix2 + ONLINE _job_ldif2dbSuffix1": "KO + KO", "Instance ONLINE OFFLINE _job_db2ldifSuffix2 + ONLINE _job_ldif2dbSuffix2": "KO + KO", "Instance ONLINE OFFLINE _job_db2ldifSuffix2 + ONLINE _job_db2indexSuffix1": "KO + KO", "Instance ONLINE OFFLINE _job_db2ldifSuffix2 + ONLINE _job_db2indexSuffix2": "KO + KO", "Instance ONLINE OFFLINE _job_db2ldifSuffix2 + ONLINE _job_db2archive": "KO + KO", "Instance ONLINE OFFLINE _job_db2ldifSuffix2 + ONLINE _job_archive2db": "KO + KO", "Instance ONLINE OFFLINE _job_ldif2dbSuffix1 + ONLINE _job_nothing": "KO + OK", "Instance ONLINE OFFLINE _job_ldif2dbSuffix1 + ONLINE _job_db2ldifSuffix1": "KO + KO", "Instance ONLINE OFFLINE _job_ldif2dbSuffix1 + ONLINE _job_db2ldifSuffix2": "KO + KO", "Instance ONLINE OFFLINE _job_ldif2dbSuffix1 + ONLINE _job_ldif2dbSuffix1": "KO + KO", "Instance ONLINE OFFLINE _job_ldif2dbSuffix1 + ONLINE _job_ldif2dbSuffix2": "KO + KO", "Instance ONLINE OFFLINE _job_ldif2dbSuffix1 + ONLINE _job_db2indexSuffix1": "KO + KO", "Instance ONLINE OFFLINE _job_ldif2dbSuffix1 + ONLINE _job_db2indexSuffix2": "KO + KO", "Instance ONLINE OFFLINE _job_ldif2dbSuffix1 + ONLINE _job_db2archive": "KO + KO", "Instance ONLINE OFFLINE _job_ldif2dbSuffix1 + ONLINE _job_archive2db": "KO + KO", "Instance ONLINE OFFLINE _job_ldif2dbSuffix2 + ONLINE _job_nothing": "KO + OK", "Instance ONLINE OFFLINE _job_ldif2dbSuffix2 + ONLINE _job_db2ldifSuffix1": "KO + KO", "Instance ONLINE OFFLINE _job_ldif2dbSuffix2 + ONLINE _job_db2ldifSuffix2": "KO + KO", "Instance ONLINE OFFLINE _job_ldif2dbSuffix2 + ONLINE _job_ldif2dbSuffix1": "KO + KO", "Instance ONLINE OFFLINE _job_ldif2dbSuffix2 + ONLINE _job_ldif2dbSuffix2": "KO + KO", "Instance ONLINE OFFLINE _job_ldif2dbSuffix2 + ONLINE _job_db2indexSuffix1": "KO + KO", "Instance ONLINE OFFLINE _job_ldif2dbSuffix2 + ONLINE _job_db2indexSuffix2": "KO + KO", "Instance ONLINE OFFLINE _job_ldif2dbSuffix2 + ONLINE _job_db2archive": "KO + KO", "Instance ONLINE OFFLINE _job_ldif2dbSuffix2 + ONLINE _job_archive2db": "KO + KO", "Instance ONLINE OFFLINE _job_db2indexSuffix1 + ONLINE _job_nothing": "KO + OK", "Instance ONLINE OFFLINE _job_db2indexSuffix1 + ONLINE _job_db2ldifSuffix1": "KO + KO", "Instance ONLINE OFFLINE _job_db2indexSuffix1 + ONLINE _job_db2ldifSuffix2": "KO + KO", "Instance ONLINE OFFLINE _job_db2indexSuffix1 + ONLINE _job_ldif2dbSuffix1": "KO + KO", "Instance ONLINE OFFLINE _job_db2indexSuffix1 + ONLINE _job_ldif2dbSuffix2": "KO + KO", "Instance ONLINE OFFLINE _job_db2indexSuffix1 + ONLINE _job_db2indexSuffix1": "KO + KO", "Instance ONLINE OFFLINE _job_db2indexSuffix1 + ONLINE _job_db2indexSuffix2": "KO + KO", "Instance ONLINE OFFLINE _job_db2indexSuffix1 + ONLINE _job_db2archive": "KO + KO", "Instance ONLINE OFFLINE _job_db2indexSuffix1 + ONLINE _job_archive2db": "KO + KO", "Instance ONLINE OFFLINE _job_db2indexSuffix2 + ONLINE _job_nothing": "KO + OK", "Instance ONLINE OFFLINE _job_db2indexSuffix2 + ONLINE _job_db2ldifSuffix1": "KO + KO", "Instance ONLINE OFFLINE _job_db2indexSuffix2 + ONLINE _job_db2ldifSuffix2": "KO + KO", "Instance ONLINE OFFLINE _job_db2indexSuffix2 + ONLINE _job_ldif2dbSuffix1": "KO + KO", "Instance ONLINE OFFLINE _job_db2indexSuffix2 + ONLINE _job_ldif2dbSuffix2": "KO + KO", "Instance ONLINE OFFLINE _job_db2indexSuffix2 + ONLINE _job_db2indexSuffix1": "KO + KO", "Instance ONLINE OFFLINE _job_db2indexSuffix2 + ONLINE _job_db2indexSuffix2": "KO + KO", "Instance ONLINE OFFLINE _job_db2indexSuffix2 + ONLINE _job_db2archive": "KO + KO", "Instance ONLINE OFFLINE _job_db2indexSuffix2 + ONLINE _job_archive2db": "KO + KO", "Instance ONLINE OFFLINE _job_db2archive + ONLINE _job_nothing": "KO + OK", "Instance ONLINE OFFLINE _job_db2archive + ONLINE _job_db2ldifSuffix1": "KO + KO", "Instance ONLINE OFFLINE _job_db2archive + ONLINE _job_db2ldifSuffix2": "KO + KO", "Instance ONLINE OFFLINE _job_db2archive + ONLINE _job_ldif2dbSuffix1": "KO + KO", "Instance ONLINE OFFLINE _job_db2archive + ONLINE _job_ldif2dbSuffix2": "KO + KO", "Instance ONLINE OFFLINE _job_db2archive + ONLINE _job_db2indexSuffix1": "KO + KO", "Instance ONLINE OFFLINE _job_db2archive + ONLINE _job_db2indexSuffix2": "KO + KO", "Instance ONLINE OFFLINE _job_db2archive + ONLINE _job_db2archive": "KO + KO", "Instance ONLINE OFFLINE _job_db2archive + ONLINE _job_archive2db": "KO + KO", "Instance ONLINE OFFLINE _job_archive2db + ONLINE _job_nothing": "KO + OK", "Instance ONLINE OFFLINE _job_archive2db + ONLINE _job_db2ldifSuffix1": "KO + KO", "Instance ONLINE OFFLINE _job_archive2db + ONLINE _job_db2ldifSuffix2": "KO + KO", "Instance ONLINE OFFLINE _job_archive2db + ONLINE _job_ldif2dbSuffix1": "KO + KO", "Instance ONLINE OFFLINE _job_archive2db + ONLINE _job_ldif2dbSuffix2": "KO + KO", "Instance ONLINE OFFLINE _job_archive2db + ONLINE _job_db2indexSuffix1": "KO + KO", "Instance ONLINE OFFLINE _job_archive2db + ONLINE _job_db2indexSuffix2": "KO + KO", "Instance ONLINE OFFLINE _job_archive2db + ONLINE _job_db2archive": "KO + KO", "Instance ONLINE OFFLINE _job_archive2db + ONLINE _job_archive2db": "KO + KO", "Instance ONLINE ONLINE _job_nothing + ONLINE _job_nothing": "OK + OK", "Instance ONLINE ONLINE _job_nothing + ONLINE _job_db2ldifSuffix1": "OK + KO", "Instance ONLINE ONLINE _job_nothing + ONLINE _job_db2ldifSuffix2": "OK + KO", "Instance ONLINE ONLINE _job_nothing + ONLINE _job_ldif2dbSuffix1": "OK + KO", "Instance ONLINE ONLINE _job_nothing + ONLINE _job_ldif2dbSuffix2": "OK + KO", "Instance ONLINE ONLINE _job_nothing + ONLINE _job_db2indexSuffix1": "OK + KO", "Instance ONLINE ONLINE _job_nothing + ONLINE _job_db2indexSuffix2": "OK + KO", "Instance ONLINE ONLINE _job_nothing + ONLINE _job_db2archive": "OK + KO", "Instance ONLINE ONLINE _job_nothing + ONLINE _job_archive2db": "OK + KO", "Instance ONLINE ONLINE _job_db2ldifSuffix1 + ONLINE _job_nothing": "KO + OK", "Instance ONLINE ONLINE _job_db2ldifSuffix1 + ONLINE _job_db2ldifSuffix1": "KO + KO", "Instance ONLINE ONLINE _job_db2ldifSuffix1 + ONLINE _job_db2ldifSuffix2": "KO + KO", "Instance ONLINE ONLINE _job_db2ldifSuffix1 + ONLINE _job_ldif2dbSuffix1": "KO + KO", "Instance ONLINE ONLINE _job_db2ldifSuffix1 + ONLINE _job_ldif2dbSuffix2": "KO + KO", "Instance ONLINE ONLINE _job_db2ldifSuffix1 + ONLINE _job_db2indexSuffix1": "KO + KO", "Instance ONLINE ONLINE _job_db2ldifSuffix1 + ONLINE _job_db2indexSuffix2": "KO + KO", "Instance ONLINE ONLINE _job_db2ldifSuffix1 + ONLINE _job_db2archive": "KO + KO", "Instance ONLINE ONLINE _job_db2ldifSuffix1 + ONLINE _job_archive2db": "KO + KO", "Instance ONLINE ONLINE _job_db2ldifSuffix2 + ONLINE _job_nothing": "KO + OK", "Instance ONLINE ONLINE _job_db2ldifSuffix2 + ONLINE _job_db2ldifSuffix1": "KO + KO", "Instance ONLINE ONLINE _job_db2ldifSuffix2 + ONLINE _job_db2ldifSuffix2": "KO + KO", "Instance ONLINE ONLINE _job_db2ldifSuffix2 + ONLINE _job_ldif2dbSuffix1": "KO + KO", "Instance ONLINE ONLINE _job_db2ldifSuffix2 + ONLINE _job_ldif2dbSuffix2": "KO + KO", "Instance ONLINE ONLINE _job_db2ldifSuffix2 + ONLINE _job_db2indexSuffix1": "KO + KO", "Instance ONLINE ONLINE _job_db2ldifSuffix2 + ONLINE _job_db2indexSuffix2": "KO + KO", "Instance ONLINE ONLINE _job_db2ldifSuffix2 + ONLINE _job_db2archive": "KO + KO", "Instance ONLINE ONLINE _job_db2ldifSuffix2 + ONLINE _job_archive2db": "KO + KO", "Instance ONLINE ONLINE _job_ldif2dbSuffix1 + ONLINE _job_nothing": "KO + OK", "Instance ONLINE ONLINE _job_ldif2dbSuffix1 + ONLINE _job_db2ldifSuffix1": "KO + KO", "Instance ONLINE ONLINE _job_ldif2dbSuffix1 + ONLINE _job_db2ldifSuffix2": "KO + KO", "Instance ONLINE ONLINE _job_ldif2dbSuffix1 + ONLINE _job_ldif2dbSuffix1": "KO + KO", "Instance ONLINE ONLINE _job_ldif2dbSuffix1 + ONLINE _job_ldif2dbSuffix2": "KO + KO", "Instance ONLINE ONLINE _job_ldif2dbSuffix1 + ONLINE _job_db2indexSuffix1": "KO + KO", "Instance ONLINE ONLINE _job_ldif2dbSuffix1 + ONLINE _job_db2indexSuffix2": "KO + KO", "Instance ONLINE ONLINE _job_ldif2dbSuffix1 + ONLINE _job_db2archive": "KO + KO", "Instance ONLINE ONLINE _job_ldif2dbSuffix1 + ONLINE _job_archive2db": "KO + KO", "Instance ONLINE ONLINE _job_ldif2dbSuffix2 + ONLINE _job_nothing": "KO + OK", "Instance ONLINE ONLINE _job_ldif2dbSuffix2 + ONLINE _job_db2ldifSuffix1": "KO + KO", "Instance ONLINE ONLINE _job_ldif2dbSuffix2 + ONLINE _job_db2ldifSuffix2": "KO + KO", "Instance ONLINE ONLINE _job_ldif2dbSuffix2 + ONLINE _job_ldif2dbSuffix1": "KO + KO", "Instance ONLINE ONLINE _job_ldif2dbSuffix2 + ONLINE _job_ldif2dbSuffix2": "KO + KO", "Instance ONLINE ONLINE _job_ldif2dbSuffix2 + ONLINE _job_db2indexSuffix1": "KO + KO", "Instance ONLINE ONLINE _job_ldif2dbSuffix2 + ONLINE _job_db2indexSuffix2": "KO + KO", "Instance ONLINE ONLINE _job_ldif2dbSuffix2 + ONLINE _job_db2archive": "KO + KO", "Instance ONLINE ONLINE _job_ldif2dbSuffix2 + ONLINE _job_archive2db": "KO + KO", "Instance ONLINE ONLINE _job_db2indexSuffix1 + ONLINE _job_nothing": "KO + OK", "Instance ONLINE ONLINE _job_db2indexSuffix1 + ONLINE _job_db2ldifSuffix1": "KO + KO", "Instance ONLINE ONLINE _job_db2indexSuffix1 + ONLINE _job_db2ldifSuffix2": "KO + KO", "Instance ONLINE ONLINE _job_db2indexSuffix1 + ONLINE _job_ldif2dbSuffix1": "KO + KO", "Instance ONLINE ONLINE _job_db2indexSuffix1 + ONLINE _job_ldif2dbSuffix2": "KO + KO", "Instance ONLINE ONLINE _job_db2indexSuffix1 + ONLINE _job_db2indexSuffix1": "KO + KO", "Instance ONLINE ONLINE _job_db2indexSuffix1 + ONLINE _job_db2indexSuffix2": "KO + KO", "Instance ONLINE ONLINE _job_db2indexSuffix1 + ONLINE _job_db2archive": "KO + KO", "Instance ONLINE ONLINE _job_db2indexSuffix1 + ONLINE _job_archive2db": "KO + KO", "Instance ONLINE ONLINE _job_db2indexSuffix2 + ONLINE _job_nothing": "KO + OK", "Instance ONLINE ONLINE _job_db2indexSuffix2 + ONLINE _job_db2ldifSuffix1": "KO + KO", "Instance ONLINE ONLINE _job_db2indexSuffix2 + ONLINE _job_db2ldifSuffix2": "KO + KO", "Instance ONLINE ONLINE _job_db2indexSuffix2 + ONLINE _job_ldif2dbSuffix1": "KO + KO", "Instance ONLINE ONLINE _job_db2indexSuffix2 + ONLINE _job_ldif2dbSuffix2": "KO + KO", "Instance ONLINE ONLINE _job_db2indexSuffix2 + ONLINE _job_db2indexSuffix1": "KO + KO", "Instance ONLINE ONLINE _job_db2indexSuffix2 + ONLINE _job_db2indexSuffix2": "KO + KO", "Instance ONLINE ONLINE _job_db2indexSuffix2 + ONLINE _job_db2archive": "KO + KO", "Instance ONLINE ONLINE _job_db2indexSuffix2 + ONLINE _job_archive2db": "KO + KO", "Instance ONLINE ONLINE _job_db2archive + ONLINE _job_nothing": "KO + OK", "Instance ONLINE ONLINE _job_db2archive + ONLINE _job_db2ldifSuffix1": "KO + KO", "Instance ONLINE ONLINE _job_db2archive + ONLINE _job_db2ldifSuffix2": "KO + KO", "Instance ONLINE ONLINE _job_db2archive + ONLINE _job_ldif2dbSuffix1": "KO + KO", "Instance ONLINE ONLINE _job_db2archive + ONLINE _job_ldif2dbSuffix2": "KO + KO", "Instance ONLINE ONLINE _job_db2archive + ONLINE _job_db2indexSuffix1": "KO + KO", "Instance ONLINE ONLINE _job_db2archive + ONLINE _job_db2indexSuffix2": "KO + KO", "Instance ONLINE ONLINE _job_db2archive + ONLINE _job_db2archive": "KO + KO", "Instance ONLINE ONLINE _job_db2archive + ONLINE _job_archive2db": "KO + KO", "Instance ONLINE ONLINE _job_archive2db + ONLINE _job_nothing": "KO + OK", "Instance ONLINE ONLINE _job_archive2db + ONLINE _job_db2ldifSuffix1": "KO + KO", "Instance ONLINE ONLINE _job_archive2db + ONLINE _job_db2ldifSuffix2": "KO + KO", "Instance ONLINE ONLINE _job_archive2db + ONLINE _job_ldif2dbSuffix1": "KO + KO", "Instance ONLINE ONLINE _job_archive2db + ONLINE _job_ldif2dbSuffix2": "KO + KO", "Instance ONLINE ONLINE _job_archive2db + ONLINE _job_db2indexSuffix1": "KO + KO", "Instance ONLINE ONLINE _job_archive2db + ONLINE _job_db2indexSuffix2": "KO + KO", "Instance ONLINE ONLINE _job_archive2db + ONLINE _job_db2archive": "KO + KO", "Instance ONLINE ONLINE _job_archive2db + ONLINE _job_archive2db": "KO + KO"} 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/data/openldap_2_389/000077500000000000000000000000001421664411400243735ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/data/openldap_2_389/1/000077500000000000000000000000001421664411400245335ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/data/openldap_2_389/1/example_com.slapcat.ldif000066400000000000000000000177161421664411400313260ustar00rootroot00000000000000dn: dc=example,dc=com objectClass: dcObject objectClass: organization o: Example Company dc: example structuralObjectClass: organization entryUUID: 67c6a9b8-eafa-1039-882d-152569770969 creatorsName: cn=Manager,dc=example,dc=com createTimestamp: 20200224023755Z entryCSN: 20200224023755.130368Z#000000#000#000000 modifiersName: cn=Manager,dc=example,dc=com modifyTimestamp: 20200224023755Z dn: cn=Manager,dc=example,dc=com objectClass: organizationalRole cn: Manager structuralObjectClass: organizationalRole entryUUID: 67c8c932-eafa-1039-882e-152569770969 creatorsName: cn=Manager,dc=example,dc=com createTimestamp: 20200224023755Z entryCSN: 20200224023755.144283Z#000000#000#000000 modifiersName: cn=Manager,dc=example,dc=com modifyTimestamp: 20200224023755Z dn: ou=People,dc=example,dc=com objectClass: organizationalUnit ou: People structuralObjectClass: organizationalUnit entryUUID: 67ca92a8-eafa-1039-882f-152569770969 creatorsName: cn=Manager,dc=example,dc=com createTimestamp: 20200224023755Z entryCSN: 20200224023755.155994Z#000000#000#000000 modifiersName: cn=Manager,dc=example,dc=com modifyTimestamp: 20200224023755Z dn: ou=Groups,dc=example,dc=com objectClass: organizationalUnit ou: Groups structuralObjectClass: organizationalUnit entryUUID: 67cc2212-eafa-1039-8830-152569770969 creatorsName: cn=Manager,dc=example,dc=com createTimestamp: 20200224023755Z entryCSN: 20200224023755.166219Z#000000#000#000000 modifiersName: cn=Manager,dc=example,dc=com modifyTimestamp: 20200224023755Z dn: cn=user0,ou=People,dc=example,dc=com objectClass: account objectClass: posixAccount cn: user0 uid: user0 uidNumber: 80000 gidNumber: 80000 homeDirectory: /home/user0 structuralObjectClass: account entryUUID: 67cdfcea-eafa-1039-8831-152569770969 creatorsName: cn=Manager,dc=example,dc=com createTimestamp: 20200224023755Z entryCSN: 20200224023755.178373Z#000000#000#000000 modifyTimestamp: 20200224023755Z memberOf: cn=group0,ou=groups,dc=example,dc=com memberOf: cn=group1,ou=groups,dc=example,dc=com memberOf: cn=group2,ou=groups,dc=example,dc=com memberOf: cn=group3,ou=groups,dc=example,dc=com memberOf: cn=group4,ou=groups,dc=example,dc=com modifiersName: cn=Manager,dc=example,dc=com dn: cn=user1,ou=People,dc=example,dc=com objectClass: account objectClass: posixAccount cn: user1 uid: user1 uidNumber: 80001 gidNumber: 80001 homeDirectory: /home/user1 structuralObjectClass: account entryUUID: 67d05080-eafa-1039-8832-152569770969 creatorsName: cn=Manager,dc=example,dc=com createTimestamp: 20200224023755Z entryCSN: 20200224023755.193620Z#000000#000#000000 modifyTimestamp: 20200224023755Z memberOf: cn=group0,ou=groups,dc=example,dc=com memberOf: cn=group1,ou=groups,dc=example,dc=com memberOf: cn=group2,ou=groups,dc=example,dc=com memberOf: cn=group3,ou=groups,dc=example,dc=com memberOf: cn=group4,ou=groups,dc=example,dc=com modifiersName: cn=Manager,dc=example,dc=com dn: cn=user2,ou=People,dc=example,dc=com objectClass: account objectClass: posixAccount cn: user2 uid: user2 uidNumber: 80002 gidNumber: 80002 homeDirectory: /home/user2 structuralObjectClass: account entryUUID: 67d26172-eafa-1039-8833-152569770969 creatorsName: cn=Manager,dc=example,dc=com createTimestamp: 20200224023755Z entryCSN: 20200224023755.207161Z#000000#000#000000 modifyTimestamp: 20200224023755Z memberOf: cn=group0,ou=groups,dc=example,dc=com memberOf: cn=group1,ou=groups,dc=example,dc=com memberOf: cn=group2,ou=groups,dc=example,dc=com memberOf: cn=group3,ou=groups,dc=example,dc=com memberOf: cn=group4,ou=groups,dc=example,dc=com modifiersName: cn=Manager,dc=example,dc=com dn: cn=user3,ou=People,dc=example,dc=com objectClass: account objectClass: posixAccount cn: user3 uid: user3 uidNumber: 80003 gidNumber: 80003 homeDirectory: /home/user3 structuralObjectClass: account entryUUID: 67d460bc-eafa-1039-8834-152569770969 creatorsName: cn=Manager,dc=example,dc=com createTimestamp: 20200224023755Z entryCSN: 20200224023755.220249Z#000000#000#000000 modifyTimestamp: 20200224023755Z memberOf: cn=group0,ou=groups,dc=example,dc=com memberOf: cn=group1,ou=groups,dc=example,dc=com memberOf: cn=group2,ou=groups,dc=example,dc=com memberOf: cn=group3,ou=groups,dc=example,dc=com memberOf: cn=group4,ou=groups,dc=example,dc=com modifiersName: cn=Manager,dc=example,dc=com dn: cn=user4,ou=People,dc=example,dc=com objectClass: account objectClass: posixAccount cn: user4 uid: user4 uidNumber: 80004 gidNumber: 80004 homeDirectory: /home/user4 structuralObjectClass: account entryUUID: 67d5d2a8-eafa-1039-8835-152569770969 creatorsName: cn=Manager,dc=example,dc=com createTimestamp: 20200224023755Z entryCSN: 20200224023755.229723Z#000000#000#000000 modifyTimestamp: 20200224023755Z memberOf: cn=group0,ou=groups,dc=example,dc=com memberOf: cn=group1,ou=groups,dc=example,dc=com memberOf: cn=group2,ou=groups,dc=example,dc=com memberOf: cn=group3,ou=groups,dc=example,dc=com memberOf: cn=group4,ou=groups,dc=example,dc=com modifiersName: cn=Manager,dc=example,dc=com dn: cn=group0,ou=Groups,dc=example,dc=com objectClass: groupOfNames objectClass: posixGroup cn: group0 member: cn=user0,ou=people,dc=example,dc=com member: cn=user1,ou=people,dc=example,dc=com member: cn=user2,ou=people,dc=example,dc=com member: cn=user3,ou=people,dc=example,dc=com member: cn=user4,ou=people,dc=example,dc=com gidNumber: 90000 structuralObjectClass: groupOfNames entryUUID: 67d6f796-eafa-1039-8836-152569770969 creatorsName: cn=Manager,dc=example,dc=com createTimestamp: 20200224023755Z entryCSN: 20200224023755.237225Z#000000#000#000000 modifiersName: cn=Manager,dc=example,dc=com modifyTimestamp: 20200224023755Z dn: cn=group1,ou=Groups,dc=example,dc=com objectClass: groupOfNames objectClass: posixGroup cn: group1 member: cn=user0,ou=people,dc=example,dc=com member: cn=user1,ou=people,dc=example,dc=com member: cn=user2,ou=people,dc=example,dc=com member: cn=user3,ou=people,dc=example,dc=com member: cn=user4,ou=people,dc=example,dc=com gidNumber: 90001 structuralObjectClass: groupOfNames entryUUID: 67da9d2e-eafa-1039-8837-152569770969 creatorsName: cn=Manager,dc=example,dc=com createTimestamp: 20200224023755Z entryCSN: 20200224023755.261127Z#000000#000#000000 modifiersName: cn=Manager,dc=example,dc=com modifyTimestamp: 20200224023755Z dn: cn=group2,ou=Groups,dc=example,dc=com objectClass: groupOfNames objectClass: posixGroup cn: group2 member: cn=user0,ou=people,dc=example,dc=com member: cn=user1,ou=people,dc=example,dc=com member: cn=user2,ou=people,dc=example,dc=com member: cn=user3,ou=people,dc=example,dc=com member: cn=user4,ou=people,dc=example,dc=com gidNumber: 90002 structuralObjectClass: groupOfNames entryUUID: 67de2822-eafa-1039-8838-152569770969 creatorsName: cn=Manager,dc=example,dc=com createTimestamp: 20200224023755Z entryCSN: 20200224023755.284346Z#000000#000#000000 modifiersName: cn=Manager,dc=example,dc=com modifyTimestamp: 20200224023755Z dn: cn=group3,ou=Groups,dc=example,dc=com objectClass: groupOfNames objectClass: posixGroup cn: group3 member: cn=user0,ou=people,dc=example,dc=com member: cn=user1,ou=people,dc=example,dc=com member: cn=user2,ou=people,dc=example,dc=com member: cn=user3,ou=people,dc=example,dc=com member: cn=user4,ou=people,dc=example,dc=com gidNumber: 90003 structuralObjectClass: groupOfNames entryUUID: 67e1a6aa-eafa-1039-8839-152569770969 creatorsName: cn=Manager,dc=example,dc=com createTimestamp: 20200224023755Z entryCSN: 20200224023755.307244Z#000000#000#000000 modifiersName: cn=Manager,dc=example,dc=com modifyTimestamp: 20200224023755Z dn: cn=group4,ou=Groups,dc=example,dc=com objectClass: groupOfNames objectClass: posixGroup cn: group4 member: cn=user0,ou=people,dc=example,dc=com member: cn=user1,ou=people,dc=example,dc=com member: cn=user2,ou=people,dc=example,dc=com member: cn=user3,ou=people,dc=example,dc=com member: cn=user4,ou=people,dc=example,dc=com gidNumber: 90004 structuralObjectClass: groupOfNames entryUUID: 67e5a50c-eafa-1039-883a-152569770969 creatorsName: cn=Manager,dc=example,dc=com createTimestamp: 20200224023755Z entryCSN: 20200224023755.333416Z#000000#000#000000 modifiersName: cn=Manager,dc=example,dc=com modifyTimestamp: 20200224023755Z 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/data/openldap_2_389/1/example_net.slapcat.ldif000066400000000000000000000177161421664411400313360ustar00rootroot00000000000000dn: dc=example,dc=net objectClass: dcObject objectClass: organization o: Example Company dc: example structuralObjectClass: organization entryUUID: 5df457fe-eafb-1039-8857-152569770969 creatorsName: cn=Manager,dc=example,dc=net createTimestamp: 20200224024448Z entryCSN: 20200224024448.149265Z#000000#000#000000 modifiersName: cn=Manager,dc=example,dc=net modifyTimestamp: 20200224024448Z dn: cn=Manager,dc=example,dc=net objectClass: organizationalRole cn: Manager structuralObjectClass: organizationalRole entryUUID: 5df55cf8-eafb-1039-8858-152569770969 creatorsName: cn=Manager,dc=example,dc=net createTimestamp: 20200224024448Z entryCSN: 20200224024448.155945Z#000000#000#000000 modifiersName: cn=Manager,dc=example,dc=net modifyTimestamp: 20200224024448Z dn: ou=People,dc=example,dc=net objectClass: organizationalUnit ou: People structuralObjectClass: organizationalUnit entryUUID: 5df60342-eafb-1039-8859-152569770969 creatorsName: cn=Manager,dc=example,dc=net createTimestamp: 20200224024448Z entryCSN: 20200224024448.160202Z#000000#000#000000 modifiersName: cn=Manager,dc=example,dc=net modifyTimestamp: 20200224024448Z dn: ou=Groups,dc=example,dc=net objectClass: organizationalUnit ou: Groups structuralObjectClass: organizationalUnit entryUUID: 5df6a57c-eafb-1039-885a-152569770969 creatorsName: cn=Manager,dc=example,dc=net createTimestamp: 20200224024448Z entryCSN: 20200224024448.164355Z#000000#000#000000 modifiersName: cn=Manager,dc=example,dc=net modifyTimestamp: 20200224024448Z dn: cn=user0,ou=People,dc=example,dc=net objectClass: account objectClass: posixAccount cn: user0 uid: user0 uidNumber: 80000 gidNumber: 80000 homeDirectory: /home/user0 structuralObjectClass: account entryUUID: 5df7521a-eafb-1039-885b-152569770969 creatorsName: cn=Manager,dc=example,dc=net createTimestamp: 20200224024448Z entryCSN: 20200224024448.168774Z#000000#000#000000 modifyTimestamp: 20200224024448Z memberOf: cn=group0,ou=groups,dc=example,dc=net memberOf: cn=group1,ou=groups,dc=example,dc=net memberOf: cn=group2,ou=groups,dc=example,dc=net memberOf: cn=group3,ou=groups,dc=example,dc=net memberOf: cn=group4,ou=groups,dc=example,dc=net modifiersName: cn=Manager,dc=example,dc=net dn: cn=user1,ou=People,dc=example,dc=net objectClass: account objectClass: posixAccount cn: user1 uid: user1 uidNumber: 80001 gidNumber: 80001 homeDirectory: /home/user1 structuralObjectClass: account entryUUID: 5df80f66-eafb-1039-885c-152569770969 creatorsName: cn=Manager,dc=example,dc=net createTimestamp: 20200224024448Z entryCSN: 20200224024448.173619Z#000000#000#000000 modifyTimestamp: 20200224024448Z memberOf: cn=group0,ou=groups,dc=example,dc=net memberOf: cn=group1,ou=groups,dc=example,dc=net memberOf: cn=group2,ou=groups,dc=example,dc=net memberOf: cn=group3,ou=groups,dc=example,dc=net memberOf: cn=group4,ou=groups,dc=example,dc=net modifiersName: cn=Manager,dc=example,dc=net dn: cn=user2,ou=People,dc=example,dc=net objectClass: account objectClass: posixAccount cn: user2 uid: user2 uidNumber: 80002 gidNumber: 80002 homeDirectory: /home/user2 structuralObjectClass: account entryUUID: 5df8e710-eafb-1039-885d-152569770969 creatorsName: cn=Manager,dc=example,dc=net createTimestamp: 20200224024448Z entryCSN: 20200224024448.179140Z#000000#000#000000 modifyTimestamp: 20200224024448Z memberOf: cn=group0,ou=groups,dc=example,dc=net memberOf: cn=group1,ou=groups,dc=example,dc=net memberOf: cn=group2,ou=groups,dc=example,dc=net memberOf: cn=group3,ou=groups,dc=example,dc=net memberOf: cn=group4,ou=groups,dc=example,dc=net modifiersName: cn=Manager,dc=example,dc=net dn: cn=user3,ou=People,dc=example,dc=net objectClass: account objectClass: posixAccount cn: user3 uid: user3 uidNumber: 80003 gidNumber: 80003 homeDirectory: /home/user3 structuralObjectClass: account entryUUID: 5df9c356-eafb-1039-885e-152569770969 creatorsName: cn=Manager,dc=example,dc=net createTimestamp: 20200224024448Z entryCSN: 20200224024448.184778Z#000000#000#000000 modifyTimestamp: 20200224024448Z memberOf: cn=group0,ou=groups,dc=example,dc=net memberOf: cn=group1,ou=groups,dc=example,dc=net memberOf: cn=group2,ou=groups,dc=example,dc=net memberOf: cn=group3,ou=groups,dc=example,dc=net memberOf: cn=group4,ou=groups,dc=example,dc=net modifiersName: cn=Manager,dc=example,dc=net dn: cn=user4,ou=People,dc=example,dc=net objectClass: account objectClass: posixAccount cn: user4 uid: user4 uidNumber: 80004 gidNumber: 80004 homeDirectory: /home/user4 structuralObjectClass: account entryUUID: 5dfaecc2-eafb-1039-885f-152569770969 creatorsName: cn=Manager,dc=example,dc=net createTimestamp: 20200224024448Z entryCSN: 20200224024448.192376Z#000000#000#000000 modifyTimestamp: 20200224024448Z memberOf: cn=group0,ou=groups,dc=example,dc=net memberOf: cn=group1,ou=groups,dc=example,dc=net memberOf: cn=group2,ou=groups,dc=example,dc=net memberOf: cn=group3,ou=groups,dc=example,dc=net memberOf: cn=group4,ou=groups,dc=example,dc=net modifiersName: cn=Manager,dc=example,dc=net dn: cn=group0,ou=Groups,dc=example,dc=net objectClass: groupOfNames objectClass: posixGroup cn: group0 member: cn=user0,ou=people,dc=example,dc=net member: cn=user1,ou=people,dc=example,dc=net member: cn=user2,ou=people,dc=example,dc=net member: cn=user3,ou=people,dc=example,dc=net member: cn=user4,ou=people,dc=example,dc=net gidNumber: 90000 structuralObjectClass: groupOfNames entryUUID: 5dfc02c4-eafb-1039-8860-152569770969 creatorsName: cn=Manager,dc=example,dc=net createTimestamp: 20200224024448Z entryCSN: 20200224024448.199510Z#000000#000#000000 modifiersName: cn=Manager,dc=example,dc=net modifyTimestamp: 20200224024448Z dn: cn=group1,ou=Groups,dc=example,dc=net objectClass: groupOfNames objectClass: posixGroup cn: group1 member: cn=user0,ou=people,dc=example,dc=net member: cn=user1,ou=people,dc=example,dc=net member: cn=user2,ou=people,dc=example,dc=net member: cn=user3,ou=people,dc=example,dc=net member: cn=user4,ou=people,dc=example,dc=net gidNumber: 90001 structuralObjectClass: groupOfNames entryUUID: 5e01038c-eafb-1039-8861-152569770969 creatorsName: cn=Manager,dc=example,dc=net createTimestamp: 20200224024448Z entryCSN: 20200224024448.232297Z#000000#000#000000 modifiersName: cn=Manager,dc=example,dc=net modifyTimestamp: 20200224024448Z dn: cn=group2,ou=Groups,dc=example,dc=net objectClass: groupOfNames objectClass: posixGroup cn: group2 member: cn=user0,ou=people,dc=example,dc=net member: cn=user1,ou=people,dc=example,dc=net member: cn=user2,ou=people,dc=example,dc=net member: cn=user3,ou=people,dc=example,dc=net member: cn=user4,ou=people,dc=example,dc=net gidNumber: 90002 structuralObjectClass: groupOfNames entryUUID: 5e06b610-eafb-1039-8862-152569770969 creatorsName: cn=Manager,dc=example,dc=net createTimestamp: 20200224024448Z entryCSN: 20200224024448.269635Z#000000#000#000000 modifiersName: cn=Manager,dc=example,dc=net modifyTimestamp: 20200224024448Z dn: cn=group3,ou=Groups,dc=example,dc=net objectClass: groupOfNames objectClass: posixGroup cn: group3 member: cn=user0,ou=people,dc=example,dc=net member: cn=user1,ou=people,dc=example,dc=net member: cn=user2,ou=people,dc=example,dc=net member: cn=user3,ou=people,dc=example,dc=net member: cn=user4,ou=people,dc=example,dc=net gidNumber: 90003 structuralObjectClass: groupOfNames entryUUID: 5e0aec76-eafb-1039-8863-152569770969 creatorsName: cn=Manager,dc=example,dc=net createTimestamp: 20200224024448Z entryCSN: 20200224024448.297242Z#000000#000#000000 modifiersName: cn=Manager,dc=example,dc=net modifyTimestamp: 20200224024448Z dn: cn=group4,ou=Groups,dc=example,dc=net objectClass: groupOfNames objectClass: posixGroup cn: group4 member: cn=user0,ou=people,dc=example,dc=net member: cn=user1,ou=people,dc=example,dc=net member: cn=user2,ou=people,dc=example,dc=net member: cn=user3,ou=people,dc=example,dc=net member: cn=user4,ou=people,dc=example,dc=net gidNumber: 90004 structuralObjectClass: groupOfNames entryUUID: 5e0f0900-eafb-1039-8864-152569770969 creatorsName: cn=Manager,dc=example,dc=net createTimestamp: 20200224024448Z entryCSN: 20200224024448.324187Z#000000#000#000000 modifiersName: cn=Manager,dc=example,dc=net modifyTimestamp: 20200224024448Z 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/data/openldap_2_389/1/setup/000077500000000000000000000000001421664411400256735ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/data/openldap_2_389/1/setup/example_com.ldif000066400000000000000000000055441421664411400310340ustar00rootroot00000000000000dn: dc=example,dc=com objectclass: dcObject objectclass: organization o: Example Company dc: example dn: cn=Manager,dc=example,dc=com objectclass: organizationalRole cn: Manager dn: ou=People,dc=example,dc=com objectClass: organizationalUnit ou: People dn: ou=Groups,dc=example,dc=com objectClass: organizationalUnit ou: Groups dn: cn=user0,ou=people,dc=example,dc=com objectClass: Account objectClass: posixAccount cn: user0 uid: user0 uidnumber: 80000 gidnumber: 80000 homeDirectory: /home/user0 dn: cn=user1,ou=people,dc=example,dc=com objectClass: Account objectClass: posixAccount cn: user1 uid: user1 uidnumber: 80001 gidnumber: 80001 homeDirectory: /home/user1 dn: cn=user2,ou=people,dc=example,dc=com objectClass: Account objectClass: posixAccount cn: user2 uid: user2 uidnumber: 80002 gidnumber: 80002 homeDirectory: /home/user2 dn: cn=user3,ou=people,dc=example,dc=com objectClass: Account objectClass: posixAccount cn: user3 uid: user3 uidnumber: 80003 gidnumber: 80003 homeDirectory: /home/user3 dn: cn=user4,ou=people,dc=example,dc=com objectClass: Account objectClass: posixAccount cn: user4 uid: user4 uidnumber: 80004 gidnumber: 80004 homeDirectory: /home/user4 dn: cn=group0,ou=groups,dc=example,dc=com objectClass: groupOfNames objectClass: posixGroup cn: group0 member: cn=user0,ou=people,dc=example,dc=com member: cn=user1,ou=people,dc=example,dc=com member: cn=user2,ou=people,dc=example,dc=com member: cn=user3,ou=people,dc=example,dc=com member: cn=user4,ou=people,dc=example,dc=com gidNumber: 90000 dn: cn=group1,ou=groups,dc=example,dc=com objectClass: groupOfNames objectClass: posixGroup cn: group1 member: cn=user0,ou=people,dc=example,dc=com member: cn=user1,ou=people,dc=example,dc=com member: cn=user2,ou=people,dc=example,dc=com member: cn=user3,ou=people,dc=example,dc=com member: cn=user4,ou=people,dc=example,dc=com gidNumber: 90001 dn: cn=group2,ou=groups,dc=example,dc=com objectClass: groupOfNames objectClass: posixGroup cn: group2 member: cn=user0,ou=people,dc=example,dc=com member: cn=user1,ou=people,dc=example,dc=com member: cn=user2,ou=people,dc=example,dc=com member: cn=user3,ou=people,dc=example,dc=com member: cn=user4,ou=people,dc=example,dc=com gidNumber: 90002 dn: cn=group3,ou=groups,dc=example,dc=com objectClass: groupOfNames objectClass: posixGroup cn: group3 member: cn=user0,ou=people,dc=example,dc=com member: cn=user1,ou=people,dc=example,dc=com member: cn=user2,ou=people,dc=example,dc=com member: cn=user3,ou=people,dc=example,dc=com member: cn=user4,ou=people,dc=example,dc=com gidNumber: 90003 dn: cn=group4,ou=groups,dc=example,dc=com objectClass: groupOfNames objectClass: posixGroup cn: group4 member: cn=user0,ou=people,dc=example,dc=com member: cn=user1,ou=people,dc=example,dc=com member: cn=user2,ou=people,dc=example,dc=com member: cn=user3,ou=people,dc=example,dc=com member: cn=user4,ou=people,dc=example,dc=com gidNumber: 90004 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/data/openldap_2_389/1/setup/example_net.ldif000066400000000000000000000055441421664411400310440ustar00rootroot00000000000000dn: dc=example,dc=net objectclass: dcObject objectclass: organization o: Example Company dc: example dn: cn=Manager,dc=example,dc=net objectclass: organizationalRole cn: Manager dn: ou=People,dc=example,dc=net objectClass: organizationalUnit ou: People dn: ou=Groups,dc=example,dc=net objectClass: organizationalUnit ou: Groups dn: cn=user0,ou=people,dc=example,dc=net objectClass: Account objectClass: posixAccount cn: user0 uid: user0 uidnumber: 80000 gidnumber: 80000 homeDirectory: /home/user0 dn: cn=user1,ou=people,dc=example,dc=net objectClass: Account objectClass: posixAccount cn: user1 uid: user1 uidnumber: 80001 gidnumber: 80001 homeDirectory: /home/user1 dn: cn=user2,ou=people,dc=example,dc=net objectClass: Account objectClass: posixAccount cn: user2 uid: user2 uidnumber: 80002 gidnumber: 80002 homeDirectory: /home/user2 dn: cn=user3,ou=people,dc=example,dc=net objectClass: Account objectClass: posixAccount cn: user3 uid: user3 uidnumber: 80003 gidnumber: 80003 homeDirectory: /home/user3 dn: cn=user4,ou=people,dc=example,dc=net objectClass: Account objectClass: posixAccount cn: user4 uid: user4 uidnumber: 80004 gidnumber: 80004 homeDirectory: /home/user4 dn: cn=group0,ou=groups,dc=example,dc=net objectClass: groupOfNames objectClass: posixGroup cn: group0 member: cn=user0,ou=people,dc=example,dc=net member: cn=user1,ou=people,dc=example,dc=net member: cn=user2,ou=people,dc=example,dc=net member: cn=user3,ou=people,dc=example,dc=net member: cn=user4,ou=people,dc=example,dc=net gidNumber: 90000 dn: cn=group1,ou=groups,dc=example,dc=net objectClass: groupOfNames objectClass: posixGroup cn: group1 member: cn=user0,ou=people,dc=example,dc=net member: cn=user1,ou=people,dc=example,dc=net member: cn=user2,ou=people,dc=example,dc=net member: cn=user3,ou=people,dc=example,dc=net member: cn=user4,ou=people,dc=example,dc=net gidNumber: 90001 dn: cn=group2,ou=groups,dc=example,dc=net objectClass: groupOfNames objectClass: posixGroup cn: group2 member: cn=user0,ou=people,dc=example,dc=net member: cn=user1,ou=people,dc=example,dc=net member: cn=user2,ou=people,dc=example,dc=net member: cn=user3,ou=people,dc=example,dc=net member: cn=user4,ou=people,dc=example,dc=net gidNumber: 90002 dn: cn=group3,ou=groups,dc=example,dc=net objectClass: groupOfNames objectClass: posixGroup cn: group3 member: cn=user0,ou=people,dc=example,dc=net member: cn=user1,ou=people,dc=example,dc=net member: cn=user2,ou=people,dc=example,dc=net member: cn=user3,ou=people,dc=example,dc=net member: cn=user4,ou=people,dc=example,dc=net gidNumber: 90003 dn: cn=group4,ou=groups,dc=example,dc=net objectClass: groupOfNames objectClass: posixGroup cn: group4 member: cn=user0,ou=people,dc=example,dc=net member: cn=user1,ou=people,dc=example,dc=net member: cn=user2,ou=people,dc=example,dc=net member: cn=user3,ou=people,dc=example,dc=net member: cn=user4,ou=people,dc=example,dc=net gidNumber: 90004 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/data/openldap_2_389/1/setup/slapd.ldif000066400000000000000000000076341421664411400276500ustar00rootroot00000000000000# # See slapd-config(5) for details on configuration options. # This file should NOT be world readable. # dn: cn=config objectClass: olcGlobal cn: config # # # Define global ACLs to disable default read access. # olcArgsFile: /var/run/slapd.args olcPidFile: /var/run/slapd.pid # # Do not enable referrals until AFTER you have a working directory # service AND an understanding of referrals. #olcReferral: ldap://root.openldap.org # # Sample security restrictions # Require integrity protection (prevent hijacking) # Require 112-bit (3DES or better) encryption for updates # Require 64-bit encryption for simple bind #olcSecurity: ssf=1 update_ssf=112 simple_bind=64 # # Load dynamic backend modules: # dn: cn=module,cn=config objectClass: olcModuleList cn: module #olcModulepath: %MODULEDIR% olcModuleload: back_mdb.la olcModuleload: memberof.la olcModuleload: refint.la olcModuleload: unique.la #olcModuleload: back_ldap.la #olcModuleload: back_passwd.la #olcModuleload: back_shell.la dn: cn=schema,cn=config objectClass: olcSchemaConfig cn: schema include: file:///etc/openldap/schema/core.ldif include: file:///etc/openldap/schema/cosine.ldif include: file:///etc/openldap/schema/inetorgperson.ldif include: file:///etc/openldap/schema/rfc2307bis.ldif include: file:///etc/openldap/schema/yast.ldif # Frontend settings # dn: olcDatabase=frontend,cn=config objectClass: olcDatabaseConfig objectClass: olcFrontendConfig olcDatabase: frontend # # Sample global access control policy: # Root DSE: allow anyone to read it # Subschema (sub)entry DSE: allow anyone to read it # Other DSEs: # Allow self write access # Allow authenticated users read access # Allow anonymous users to authenticate # olcAccess: to dn.base="" by * read olcAccess: to dn.base="cn=Subschema" by * read #olcAccess: to * # by self write # by users read # by anonymous auth # # if no access controls are present, the default policy # allows anyone and everyone to read anything but restricts # updates to rootdn. (e.g., "access to * by * read") # # rootdn can always read and write EVERYTHING! # ####################################################################### # LMDB database definitions ####################################################################### # dn: olcDatabase={1}mdb,cn=config objectClass: olcDatabaseConfig objectClass: olcMdbConfig olcDatabase: mdb olcSuffix: dc=example,dc=com olcRootDN: cn=Manager,dc=example,dc=com # Cleartext passwords, especially for the rootdn, should # be avoided. See slappasswd(8) and slapd-config(5) for details. # Use of strong authentication encouraged. olcRootPW: secret # The database directory MUST exist prior to running slapd AND # should only be accessible by the slapd and slap tools. # Mode 700 recommended. olcDbDirectory: /var/lib/ldap/example_com # Indices to maintain olcDbIndex: objectClass eq dn: olcOverlay=memberof,olcDatabase={1}mdb,cn=config objectClass: olcOverlayConfig objectClass: olcMemberOf olcOverlay: memberof olcMemberOfRefint: TRUE dn: olcOverlay=refint,olcDatabase={1}mdb,cn=config objectClass: olcOverlayConfig objectClass: olcRefintConfig olcOverlay: refint olcRefintAttribute: member olcRefintAttribute: memberOf dn: olcOverlay=unique,olcDatabase={1}mdb,cn=config objectClass: olcOverlayConfig objectClass: olcUniqueConfig olcOverlay: unique olcUniqueURI: ldap:///?mail?sub olcUniqueURI: ldap:///?uid?sub dn: olcDatabase={2}mdb,cn=config objectClass: olcDatabaseConfig objectClass: olcMdbConfig olcDatabase: mdb olcSuffix: dc=example,dc=net olcRootDN: cn=Manager,dc=example,dc=net olcRootPW: secret olcDbDirectory: /var/lib/ldap/example_net olcDbIndex: objectClass eq dn: olcOverlay=memberof,olcDatabase={2}mdb,cn=config objectClass: olcOverlayConfig objectClass: olcMemberOf olcOverlay: memberof olcMemberOfRefint: TRUE dn: olcOverlay=unique,olcDatabase={2}mdb,cn=config objectClass: olcOverlayConfig objectClass: olcUniqueConfig olcOverlay: unique olcUniqueURI: ldap:///?mail?sub olcUniqueURI: ldap:///?uid?sub 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/data/openldap_2_389/1/slapd.d/000077500000000000000000000000001421664411400260605ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/data/openldap_2_389/1/slapd.d/cn=config.ldif000066400000000000000000000006561421664411400306120ustar00rootroot00000000000000# AUTO-GENERATED FILE - DO NOT EDIT!! Use ldapmodify. # CRC32 6905879f dn: cn=config objectClass: olcGlobal cn: config olcArgsFile: /var/run/slapd.args olcPidFile: /var/run/slapd.pid structuralObjectClass: olcGlobal entryUUID: 4019c5a8-eaf5-1039-865e-dbfbf2f5e6dd creatorsName: cn=config createTimestamp: 20200224020101Z entryCSN: 20200224020101.082506Z#000000#000#000000 modifiersName: cn=config modifyTimestamp: 20200224020101Z 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/data/openldap_2_389/1/slapd.d/cn=config/000077500000000000000000000000001421664411400277435ustar00rootroot00000000000000cn=module{0}.ldif000066400000000000000000000007601421664411400331020ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/data/openldap_2_389/1/slapd.d/cn=config# AUTO-GENERATED FILE - DO NOT EDIT!! Use ldapmodify. # CRC32 9b38b059 dn: cn=module{0} objectClass: olcModuleList cn: module{0} olcModuleLoad: {0}back_mdb.la olcModuleLoad: {1}memberof.la olcModuleLoad: {2}refint.la olcModuleLoad: {3}unique.la structuralObjectClass: olcModuleList entryUUID: 4019cc88-eaf5-1039-865f-dbfbf2f5e6dd creatorsName: cn=config createTimestamp: 20200224020101Z entryCSN: 20200224020101.082706Z#000000#000#000000 modifiersName: cn=config modifyTimestamp: 20200224020101Z cn=schema.ldif000066400000000000000000000005721421664411400324060ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/data/openldap_2_389/1/slapd.d/cn=config# AUTO-GENERATED FILE - DO NOT EDIT!! Use ldapmodify. # CRC32 095fcaec dn: cn=schema objectClass: olcSchemaConfig cn: schema structuralObjectClass: olcSchemaConfig entryUUID: 4019e6aa-eaf5-1039-8660-dbfbf2f5e6dd creatorsName: cn=config createTimestamp: 20200224020101Z entryCSN: 20200224020101.083375Z#000000#000#000000 modifiersName: cn=config modifyTimestamp: 20200224020101Z 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/data/openldap_2_389/1/slapd.d/cn=config/cn=schema/000077500000000000000000000000001421664411400316215ustar00rootroot00000000000000cn={0}core.ldif000066400000000000000000000363321421664411400344270ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/data/openldap_2_389/1/slapd.d/cn=config/cn=schema# AUTO-GENERATED FILE - DO NOT EDIT!! Use ldapmodify. # CRC32 31e6d4be dn: cn={0}core objectClass: olcSchemaConfig cn: {0}core olcAttributeTypes: {0}( 2.5.4.2 NAME 'knowledgeInformation' DESC 'RFC2256: k nowledge information' EQUALITY caseIgnoreMatch SYNTAX 1.3.6.1.4.1.1466.115. 121.1.15{32768} ) olcAttributeTypes: {1}( 2.5.4.4 NAME ( 'sn' 'surname' ) DESC 'RFC2256: last (family) name(s) for which the entity is known by' SUP name ) olcAttributeTypes: {2}( 2.5.4.5 NAME 'serialNumber' DESC 'RFC2256: serial nu mber of the entity' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMat ch SYNTAX 1.3.6.1.4.1.1466.115.121.1.44{64} ) olcAttributeTypes: {3}( 2.5.4.6 NAME ( 'c' 'countryName' ) DESC 'RFC4519: tw o-letter ISO-3166 country code' SUP name SYNTAX 1.3.6.1.4.1.1466.115.121.1. 11 SINGLE-VALUE ) olcAttributeTypes: {4}( 2.5.4.7 NAME ( 'l' 'localityName' ) DESC 'RFC2256: l ocality which this object resides in' SUP name ) olcAttributeTypes: {5}( 2.5.4.8 NAME ( 'st' 'stateOrProvinceName' ) DESC 'RF C2256: state or province which this object resides in' SUP name ) olcAttributeTypes: {6}( 2.5.4.9 NAME ( 'street' 'streetAddress' ) DESC 'RFC2 256: street address of this object' EQUALITY caseIgnoreMatch SUBSTR caseIgn oreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{128} ) olcAttributeTypes: {7}( 2.5.4.10 NAME ( 'o' 'organizationName' ) DESC 'RFC22 56: organization this object belongs to' SUP name ) olcAttributeTypes: {8}( 2.5.4.11 NAME ( 'ou' 'organizationalUnitName' ) DESC 'RFC2256: organizational unit this object belongs to' SUP name ) olcAttributeTypes: {9}( 2.5.4.12 NAME 'title' DESC 'RFC2256: title associate d with the entity' SUP name ) olcAttributeTypes: {10}( 2.5.4.14 NAME 'searchGuide' DESC 'RFC2256: search g uide, deprecated by enhancedSearchGuide' SYNTAX 1.3.6.1.4.1.1466.115.121.1. 25 ) olcAttributeTypes: {11}( 2.5.4.15 NAME 'businessCategory' DESC 'RFC2256: bus iness category' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch S YNTAX 1.3.6.1.4.1.1466.115.121.1.15{128} ) olcAttributeTypes: {12}( 2.5.4.16 NAME 'postalAddress' DESC 'RFC2256: postal address' EQUALITY caseIgnoreListMatch SUBSTR caseIgnoreListSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.41 ) olcAttributeTypes: {13}( 2.5.4.17 NAME 'postalCode' DESC 'RFC2256: postal co de' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6. 1.4.1.1466.115.121.1.15{40} ) olcAttributeTypes: {14}( 2.5.4.18 NAME 'postOfficeBox' DESC 'RFC2256: Post O ffice Box' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{40} ) olcAttributeTypes: {15}( 2.5.4.19 NAME 'physicalDeliveryOfficeName' DESC 'RF C2256: Physical Delivery Office Name' EQUALITY caseIgnoreMatch SUBSTR caseI gnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{128} ) olcAttributeTypes: {16}( 2.5.4.20 NAME 'telephoneNumber' DESC 'RFC2256: Tele phone Number' EQUALITY telephoneNumberMatch SUBSTR telephoneNumberSubstring sMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.50{32} ) olcAttributeTypes: {17}( 2.5.4.21 NAME 'telexNumber' DESC 'RFC2256: Telex Nu mber' SYNTAX 1.3.6.1.4.1.1466.115.121.1.52 ) olcAttributeTypes: {18}( 2.5.4.22 NAME 'teletexTerminalIdentifier' DESC 'RFC 2256: Teletex Terminal Identifier' SYNTAX 1.3.6.1.4.1.1466.115.121.1.51 ) olcAttributeTypes: {19}( 2.5.4.23 NAME ( 'facsimileTelephoneNumber' 'fax' ) DESC 'RFC2256: Facsimile (Fax) Telephone Number' SYNTAX 1.3.6.1.4.1.1466.11 5.121.1.22 ) olcAttributeTypes: {20}( 2.5.4.24 NAME 'x121Address' DESC 'RFC2256: X.121 Ad dress' EQUALITY numericStringMatch SUBSTR numericStringSubstringsMatch SYNT AX 1.3.6.1.4.1.1466.115.121.1.36{15} ) olcAttributeTypes: {21}( 2.5.4.25 NAME 'internationaliSDNNumber' DESC 'RFC22 56: international ISDN number' EQUALITY numericStringMatch SUBSTR numericSt ringSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.36{16} ) olcAttributeTypes: {22}( 2.5.4.26 NAME 'registeredAddress' DESC 'RFC2256: re gistered postal address' SUP postalAddress SYNTAX 1.3.6.1.4.1.1466.115.121. 1.41 ) olcAttributeTypes: {23}( 2.5.4.27 NAME 'destinationIndicator' DESC 'RFC2256: destination indicator' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstring sMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.44{128} ) olcAttributeTypes: {24}( 2.5.4.28 NAME 'preferredDeliveryMethod' DESC 'RFC22 56: preferred delivery method' SYNTAX 1.3.6.1.4.1.1466.115.121.1.14 SINGLE- VALUE ) olcAttributeTypes: {25}( 2.5.4.29 NAME 'presentationAddress' DESC 'RFC2256: presentation address' EQUALITY presentationAddressMatch SYNTAX 1.3.6.1.4.1. 1466.115.121.1.43 SINGLE-VALUE ) olcAttributeTypes: {26}( 2.5.4.30 NAME 'supportedApplicationContext' DESC 'R FC2256: supported application context' EQUALITY objectIdentifierMatch SYNTA X 1.3.6.1.4.1.1466.115.121.1.38 ) olcAttributeTypes: {27}( 2.5.4.31 NAME 'member' DESC 'RFC2256: member of a g roup' SUP distinguishedName ) olcAttributeTypes: {28}( 2.5.4.32 NAME 'owner' DESC 'RFC2256: owner (of the object)' SUP distinguishedName ) olcAttributeTypes: {29}( 2.5.4.33 NAME 'roleOccupant' DESC 'RFC2256: occupan t of role' SUP distinguishedName ) olcAttributeTypes: {30}( 2.5.4.36 NAME 'userCertificate' DESC 'RFC2256: X.50 9 user certificate, use ;binary' EQUALITY certificateExactMatch SYNTAX 1.3. 6.1.4.1.1466.115.121.1.8 ) olcAttributeTypes: {31}( 2.5.4.37 NAME 'cACertificate' DESC 'RFC2256: X.509 CA certificate, use ;binary' EQUALITY certificateExactMatch SYNTAX 1.3.6.1. 4.1.1466.115.121.1.8 ) olcAttributeTypes: {32}( 2.5.4.38 NAME 'authorityRevocationList' DESC 'RFC22 56: X.509 authority revocation list, use ;binary' SYNTAX 1.3.6.1.4.1.1466.1 15.121.1.9 ) olcAttributeTypes: {33}( 2.5.4.39 NAME 'certificateRevocationList' DESC 'RFC 2256: X.509 certificate revocation list, use ;binary' SYNTAX 1.3.6.1.4.1.14 66.115.121.1.9 ) olcAttributeTypes: {34}( 2.5.4.40 NAME 'crossCertificatePair' DESC 'RFC2256: X.509 cross certificate pair, use ;binary' SYNTAX 1.3.6.1.4.1.1466.115.121 .1.10 ) olcAttributeTypes: {35}( 2.5.4.42 NAME ( 'givenName' 'gn' ) DESC 'RFC2256: f irst name(s) for which the entity is known by' SUP name ) olcAttributeTypes: {36}( 2.5.4.43 NAME 'initials' DESC 'RFC2256: initials of some or all of names, but not the surname(s).' SUP name ) olcAttributeTypes: {37}( 2.5.4.44 NAME 'generationQualifier' DESC 'RFC2256: name qualifier indicating a generation' SUP name ) olcAttributeTypes: {38}( 2.5.4.45 NAME 'x500UniqueIdentifier' DESC 'RFC2256: X.500 unique identifier' EQUALITY bitStringMatch SYNTAX 1.3.6.1.4.1.1466.1 15.121.1.6 ) olcAttributeTypes: {39}( 2.5.4.46 NAME 'dnQualifier' DESC 'RFC2256: DN quali fier' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR case IgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.44 ) olcAttributeTypes: {40}( 2.5.4.47 NAME 'enhancedSearchGuide' DESC 'RFC2256: enhanced search guide' SYNTAX 1.3.6.1.4.1.1466.115.121.1.21 ) olcAttributeTypes: {41}( 2.5.4.48 NAME 'protocolInformation' DESC 'RFC2256: protocol information' EQUALITY protocolInformationMatch SYNTAX 1.3.6.1.4.1. 1466.115.121.1.42 ) olcAttributeTypes: {42}( 2.5.4.50 NAME 'uniqueMember' DESC 'RFC2256: unique member of a group' EQUALITY uniqueMemberMatch SYNTAX 1.3.6.1.4.1.1466.115.1 21.1.34 ) olcAttributeTypes: {43}( 2.5.4.51 NAME 'houseIdentifier' DESC 'RFC2256: hous e identifier' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYN TAX 1.3.6.1.4.1.1466.115.121.1.15{32768} ) olcAttributeTypes: {44}( 2.5.4.52 NAME 'supportedAlgorithms' DESC 'RFC2256: supported algorithms' SYNTAX 1.3.6.1.4.1.1466.115.121.1.49 ) olcAttributeTypes: {45}( 2.5.4.53 NAME 'deltaRevocationList' DESC 'RFC2256: delta revocation list; use ;binary' SYNTAX 1.3.6.1.4.1.1466.115.121.1.9 ) olcAttributeTypes: {46}( 2.5.4.54 NAME 'dmdName' DESC 'RFC2256: name of DMD' SUP name ) olcAttributeTypes: {47}( 2.5.4.65 NAME 'pseudonym' DESC 'X.520(4th): pseudon ym for the object' SUP name ) olcAttributeTypes: {48}( 0.9.2342.19200300.100.1.3 NAME ( 'mail' 'rfc822Mail box' ) DESC 'RFC1274: RFC822 Mailbox' EQUALITY caseIgnoreIA5Match SUBST R caseIgnoreIA5SubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.26{256} ) olcAttributeTypes: {49}( 0.9.2342.19200300.100.1.25 NAME ( 'dc' 'domainCompo nent' ) DESC 'RFC1274/2247: domain component' EQUALITY caseIgnoreIA5Match S UBSTR caseIgnoreIA5SubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 SIN GLE-VALUE ) olcAttributeTypes: {50}( 0.9.2342.19200300.100.1.37 NAME 'associatedDomain' DESC 'RFC1274: domain associated with object' EQUALITY caseIgnoreIA5Match S UBSTR caseIgnoreIA5SubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 ) olcAttributeTypes: {51}( 1.2.840.113549.1.9.1 NAME ( 'email' 'emailAddress' 'pkcs9email' ) DESC 'RFC3280: legacy attribute for email addresses in DNs' EQUALITY caseIgnoreIA5Match SUBSTR caseIgnoreIA5SubstringsMatch SYNTAX 1.3. 6.1.4.1.1466.115.121.1.26{128} ) olcObjectClasses: {0}( 2.5.6.2 NAME 'country' DESC 'RFC2256: a country' SUP top STRUCTURAL MUST c MAY ( searchGuide $ description ) ) olcObjectClasses: {1}( 2.5.6.3 NAME 'locality' DESC 'RFC2256: a locality' SU P top STRUCTURAL MAY ( street $ seeAlso $ searchGuide $ st $ l $ descriptio n ) ) olcObjectClasses: {2}( 2.5.6.4 NAME 'organization' DESC 'RFC2256: an organiz ation' SUP top STRUCTURAL MUST o MAY ( userPassword $ searchGuide $ seeAlso $ businessCategory $ x121Address $ registeredAddress $ destinationIndicato r $ preferredDeliveryMethod $ telexNumber $ teletexTerminalIdentifier $ tel ephoneNumber $ internationaliSDNNumber $ facsimileTelephoneNumber $ street $ postOfficeBox $ postalCode $ postalAddress $ physicalDeliveryOfficeName $ st $ l $ description ) ) olcObjectClasses: {3}( 2.5.6.5 NAME 'organizationalUnit' DESC 'RFC2256: an o rganizational unit' SUP top STRUCTURAL MUST ou MAY ( userPassword $ searchG uide $ seeAlso $ businessCategory $ x121Address $ registeredAddress $ desti nationIndicator $ preferredDeliveryMethod $ telexNumber $ teletexTerminalId entifier $ telephoneNumber $ internationaliSDNNumber $ facsimileTelephoneNu mber $ street $ postOfficeBox $ postalCode $ postalAddress $ physicalDelive ryOfficeName $ st $ l $ description ) ) olcObjectClasses: {4}( 2.5.6.6 NAME 'person' DESC 'RFC2256: a person' SUP to p STRUCTURAL MUST ( sn $ cn ) MAY ( userPassword $ telephoneNumber $ seeAls o $ description ) ) olcObjectClasses: {5}( 2.5.6.7 NAME 'organizationalPerson' DESC 'RFC2256: an organizational person' SUP person STRUCTURAL MAY ( title $ x121Address $ r egisteredAddress $ destinationIndicator $ preferredDeliveryMethod $ telexNu mber $ teletexTerminalIdentifier $ telephoneNumber $ internationaliSDNNumbe r $ facsimileTelephoneNumber $ street $ postOfficeBox $ postalCode $ posta lAddress $ physicalDeliveryOfficeName $ ou $ st $ l ) ) olcObjectClasses: {6}( 2.5.6.8 NAME 'organizationalRole' DESC 'RFC2256: an o rganizational role' SUP top STRUCTURAL MUST cn MAY ( x121Address $ register edAddress $ destinationIndicator $ preferredDeliveryMethod $ telexNumber $ teletexTerminalIdentifier $ telephoneNumber $ internationaliSDNNumber $ fac simileTelephoneNumber $ seeAlso $ roleOccupant $ preferredDeliveryMethod $ street $ postOfficeBox $ postalCode $ postalAddress $ physicalDeliveryOffic eName $ ou $ st $ l $ description ) ) olcObjectClasses: {7}( 2.5.6.9 NAME 'groupOfNames' DESC 'RFC2256: a group of names (DNs)' SUP top STRUCTURAL MUST ( member $ cn ) MAY ( businessCategor y $ seeAlso $ owner $ ou $ o $ description ) ) olcObjectClasses: {8}( 2.5.6.10 NAME 'residentialPerson' DESC 'RFC2256: an r esidential person' SUP person STRUCTURAL MUST l MAY ( businessCategory $ x1 21Address $ registeredAddress $ destinationIndicator $ preferredDeliveryMet hod $ telexNumber $ teletexTerminalIdentifier $ telephoneNumber $ internati onaliSDNNumber $ facsimileTelephoneNumber $ preferredDeliveryMethod $ stree t $ postOfficeBox $ postalCode $ postalAddress $ physicalDeliveryOfficeName $ st $ l ) ) olcObjectClasses: {9}( 2.5.6.11 NAME 'applicationProcess' DESC 'RFC2256: an application process' SUP top STRUCTURAL MUST cn MAY ( seeAlso $ ou $ l $ de scription ) ) olcObjectClasses: {10}( 2.5.6.12 NAME 'applicationEntity' DESC 'RFC2256: an application entity' SUP top STRUCTURAL MUST ( presentationAddress $ cn ) MA Y ( supportedApplicationContext $ seeAlso $ ou $ o $ l $ description ) ) olcObjectClasses: {11}( 2.5.6.13 NAME 'dSA' DESC 'RFC2256: a directory syste m agent (a server)' SUP applicationEntity STRUCTURAL MAY knowledgeInformati on ) olcObjectClasses: {12}( 2.5.6.14 NAME 'device' DESC 'RFC2256: a device' SUP top STRUCTURAL MUST cn MAY ( serialNumber $ seeAlso $ owner $ ou $ o $ l $ description ) ) olcObjectClasses: {13}( 2.5.6.15 NAME 'strongAuthenticationUser' DESC 'RFC22 56: a strong authentication user' SUP top AUXILIARY MUST userCertificate ) olcObjectClasses: {14}( 2.5.6.16 NAME 'certificationAuthority' DESC 'RFC2256 : a certificate authority' SUP top AUXILIARY MUST ( authorityRevocationList $ certificateRevocationList $ cACertificate ) MAY crossCertificatePair ) olcObjectClasses: {15}( 2.5.6.17 NAME 'groupOfUniqueNames' DESC 'RFC2256: a group of unique names (DN and Unique Identifier)' SUP top STRUCTURAL MUST ( uniqueMember $ cn ) MAY ( businessCategory $ seeAlso $ owner $ ou $ o $ de scription ) ) olcObjectClasses: {16}( 2.5.6.18 NAME 'userSecurityInformation' DESC 'RFC225 6: a user security information' SUP top AUXILIARY MAY ( supportedAlgorithms ) ) olcObjectClasses: {17}( 2.5.6.16.2 NAME 'certificationAuthority-V2' SUP cert ificationAuthority AUXILIARY MAY ( deltaRevocationList ) ) olcObjectClasses: {18}( 2.5.6.19 NAME 'cRLDistributionPoint' SUP top STRUCTU RAL MUST ( cn ) MAY ( certificateRevocationList $ authorityRevocationList $ deltaRevocationList ) ) olcObjectClasses: {19}( 2.5.6.20 NAME 'dmd' SUP top STRUCTURAL MUST ( dmdNam e ) MAY ( userPassword $ searchGuide $ seeAlso $ businessCategory $ x121Add ress $ registeredAddress $ destinationIndicator $ preferredDeliveryMethod $ telexNumber $ teletexTerminalIdentifier $ telephoneNumber $ internationali SDNNumber $ facsimileTelephoneNumber $ street $ postOfficeBox $ postalCode $ postalAddress $ physicalDeliveryOfficeName $ st $ l $ description ) ) olcObjectClasses: {20}( 2.5.6.21 NAME 'pkiUser' DESC 'RFC2587: a PKI user' S UP top AUXILIARY MAY userCertificate ) olcObjectClasses: {21}( 2.5.6.22 NAME 'pkiCA' DESC 'RFC2587: PKI certificate authority' SUP top AUXILIARY MAY ( authorityRevocationList $ certificateRe vocationList $ cACertificate $ crossCertificatePair ) ) olcObjectClasses: {22}( 2.5.6.23 NAME 'deltaCRL' DESC 'RFC2587: PKI user' SU P top AUXILIARY MAY deltaRevocationList ) olcObjectClasses: {23}( 1.3.6.1.4.1.250.3.15 NAME 'labeledURIObject' DESC 'R FC2079: object that contains the URI attribute type' MAY ( labeledURI ) SUP top AUXILIARY ) olcObjectClasses: {24}( 0.9.2342.19200300.100.4.19 NAME 'simpleSecurityObjec t' DESC 'RFC1274: simple security object' SUP top AUXILIARY MUST userPasswo rd ) olcObjectClasses: {25}( 1.3.6.1.4.1.1466.344 NAME 'dcObject' DESC 'RFC2247: domain component object' SUP top AUXILIARY MUST dc ) olcObjectClasses: {26}( 1.3.6.1.1.3.1 NAME 'uidObject' DESC 'RFC2377: uid ob ject' SUP top AUXILIARY MUST uid ) structuralObjectClass: olcSchemaConfig entryUUID: 4019f348-eaf5-1039-8661-dbfbf2f5e6dd creatorsName: cn=config createTimestamp: 20200224020101Z entryCSN: 20200224020101.083690Z#000000#000#000000 modifiersName: cn=config modifyTimestamp: 20200224020101Z cn={1}cosine.ldif000066400000000000000000000261431421664411400347570ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/data/openldap_2_389/1/slapd.d/cn=config/cn=schema# AUTO-GENERATED FILE - DO NOT EDIT!! Use ldapmodify. # CRC32 4e3862ab dn: cn={1}cosine objectClass: olcSchemaConfig cn: {1}cosine olcAttributeTypes: {0}( 0.9.2342.19200300.100.1.2 NAME 'textEncodedORAddress ' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1. 4.1.1466.115.121.1.15{256} ) olcAttributeTypes: {1}( 0.9.2342.19200300.100.1.4 NAME 'info' DESC 'RFC1274: general information' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsM atch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{2048} ) olcAttributeTypes: {2}( 0.9.2342.19200300.100.1.5 NAME ( 'drink' 'favouriteD rink' ) DESC 'RFC1274: favorite drink' EQUALITY caseIgnoreMatch SUBSTR case IgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{256} ) olcAttributeTypes: {3}( 0.9.2342.19200300.100.1.6 NAME 'roomNumber' DESC 'RF C1274: room number' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMat ch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{256} ) olcAttributeTypes: {4}( 0.9.2342.19200300.100.1.7 NAME 'photo' DESC 'RFC1274 : photo (G3 fax)' SYNTAX 1.3.6.1.4.1.1466.115.121.1.23{25000} ) olcAttributeTypes: {5}( 0.9.2342.19200300.100.1.8 NAME 'userClass' DESC 'RFC 1274: category of user' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstring sMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{256} ) olcAttributeTypes: {6}( 0.9.2342.19200300.100.1.9 NAME 'host' DESC 'RFC1274: host computer' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch S YNTAX 1.3.6.1.4.1.1466.115.121.1.15{256} ) olcAttributeTypes: {7}( 0.9.2342.19200300.100.1.10 NAME 'manager' DESC 'RFC1 274: DN of manager' EQUALITY distinguishedNameMatch SYNTAX 1.3.6.1.4.1.1466 .115.121.1.12 ) olcAttributeTypes: {8}( 0.9.2342.19200300.100.1.11 NAME 'documentIdentifier' DESC 'RFC1274: unique identifier of document' EQUALITY caseIgnoreMatch SUB STR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{256} ) olcAttributeTypes: {9}( 0.9.2342.19200300.100.1.12 NAME 'documentTitle' DESC 'RFC1274: title of document' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSub stringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{256} ) olcAttributeTypes: {10}( 0.9.2342.19200300.100.1.13 NAME 'documentVersion' D ESC 'RFC1274: version of document' EQUALITY caseIgnoreMatch SUBSTR caseIgno reSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{256} ) olcAttributeTypes: {11}( 0.9.2342.19200300.100.1.14 NAME 'documentAuthor' DE SC 'RFC1274: DN of author of document' EQUALITY distinguishedNameMatch SYNT AX 1.3.6.1.4.1.1466.115.121.1.12 ) olcAttributeTypes: {12}( 0.9.2342.19200300.100.1.15 NAME 'documentLocation' DESC 'RFC1274: location of document original' EQUALITY caseIgnoreMatch SUBS TR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{256} ) olcAttributeTypes: {13}( 0.9.2342.19200300.100.1.20 NAME ( 'homePhone' 'home TelephoneNumber' ) DESC 'RFC1274: home telephone number' EQUALITY telephone NumberMatch SUBSTR telephoneNumberSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.1 15.121.1.50 ) olcAttributeTypes: {14}( 0.9.2342.19200300.100.1.21 NAME 'secretary' DESC 'R FC1274: DN of secretary' EQUALITY distinguishedNameMatch SYNTAX 1.3.6.1.4.1 .1466.115.121.1.12 ) olcAttributeTypes: {15}( 0.9.2342.19200300.100.1.22 NAME 'otherMailbox' SYNT AX 1.3.6.1.4.1.1466.115.121.1.39 ) olcAttributeTypes: {16}( 0.9.2342.19200300.100.1.26 NAME 'aRecord' EQUALITY caseIgnoreIA5Match SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 ) olcAttributeTypes: {17}( 0.9.2342.19200300.100.1.27 NAME 'mDRecord' EQUALITY caseIgnoreIA5Match SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 ) olcAttributeTypes: {18}( 0.9.2342.19200300.100.1.28 NAME 'mXRecord' EQUALITY caseIgnoreIA5Match SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 ) olcAttributeTypes: {19}( 0.9.2342.19200300.100.1.29 NAME 'nSRecord' EQUALITY caseIgnoreIA5Match SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 ) olcAttributeTypes: {20}( 0.9.2342.19200300.100.1.30 NAME 'sOARecord' EQUALIT Y caseIgnoreIA5Match SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 ) olcAttributeTypes: {21}( 0.9.2342.19200300.100.1.31 NAME 'cNAMERecord' EQUAL ITY caseIgnoreIA5Match SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 ) olcAttributeTypes: {22}( 0.9.2342.19200300.100.1.38 NAME 'associatedName' DE SC 'RFC1274: DN of entry associated with domain' EQUALITY distinguishedName Match SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 ) olcAttributeTypes: {23}( 0.9.2342.19200300.100.1.39 NAME 'homePostalAddress' DESC 'RFC1274: home postal address' EQUALITY caseIgnoreListMatch SUBSTR ca seIgnoreListSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.41 ) olcAttributeTypes: {24}( 0.9.2342.19200300.100.1.40 NAME 'personalTitle' DES C 'RFC1274: personal title' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubst ringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{256} ) olcAttributeTypes: {25}( 0.9.2342.19200300.100.1.41 NAME ( 'mobile' 'mobileT elephoneNumber' ) DESC 'RFC1274: mobile telephone number' EQUALITY telephon eNumberMatch SUBSTR telephoneNumberSubstringsMatch SYNTAX 1.3.6.1.4.1.1466. 115.121.1.50 ) olcAttributeTypes: {26}( 0.9.2342.19200300.100.1.42 NAME ( 'pager' 'pagerTel ephoneNumber' ) DESC 'RFC1274: pager telephone number' EQUALITY telephoneNu mberMatch SUBSTR telephoneNumberSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115 .121.1.50 ) olcAttributeTypes: {27}( 0.9.2342.19200300.100.1.43 NAME ( 'co' 'friendlyCou ntryName' ) DESC 'RFC1274: friendly country name' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 ) olcAttributeTypes: {28}( 0.9.2342.19200300.100.1.44 NAME 'uniqueIdentifier' DESC 'RFC1274: unique identifer' EQUALITY caseIgnoreMatch SYNTAX 1.3.6.1.4. 1.1466.115.121.1.15{256} ) olcAttributeTypes: {29}( 0.9.2342.19200300.100.1.45 NAME 'organizationalStat us' DESC 'RFC1274: organizational status' EQUALITY caseIgnoreMatch SUBSTR c aseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{256} ) olcAttributeTypes: {30}( 0.9.2342.19200300.100.1.46 NAME 'janetMailbox' DESC 'RFC1274: Janet mailbox' EQUALITY caseIgnoreIA5Match SUBSTR caseIgnoreIA5S ubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.26{256} ) olcAttributeTypes: {31}( 0.9.2342.19200300.100.1.47 NAME 'mailPreferenceOpti on' DESC 'RFC1274: mail preference option' SYNTAX 1.3.6.1.4.1.1466.115.121. 1.27 ) olcAttributeTypes: {32}( 0.9.2342.19200300.100.1.48 NAME 'buildingName' DESC 'RFC1274: name of building' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubs tringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{256} ) olcAttributeTypes: {33}( 0.9.2342.19200300.100.1.49 NAME 'dSAQuality' DESC ' RFC1274: DSA Quality' SYNTAX 1.3.6.1.4.1.1466.115.121.1.19 SINGLE-VALUE ) olcAttributeTypes: {34}( 0.9.2342.19200300.100.1.50 NAME 'singleLevelQuality ' DESC 'RFC1274: Single Level Quality' SYNTAX 1.3.6.1.4.1.1466.115.121.1.13 SINGLE-VALUE ) olcAttributeTypes: {35}( 0.9.2342.19200300.100.1.51 NAME 'subtreeMinimumQual ity' DESC 'RFC1274: Subtree Mininum Quality' SYNTAX 1.3.6.1.4.1.1466.115.12 1.1.13 SINGLE-VALUE ) olcAttributeTypes: {36}( 0.9.2342.19200300.100.1.52 NAME 'subtreeMaximumQual ity' DESC 'RFC1274: Subtree Maximun Quality' SYNTAX 1.3.6.1.4.1.1466.115.12 1.1.13 SINGLE-VALUE ) olcAttributeTypes: {37}( 0.9.2342.19200300.100.1.53 NAME 'personalSignature' DESC 'RFC1274: Personal Signature (G3 fax)' SYNTAX 1.3.6.1.4.1.1466.115.12 1.1.23 ) olcAttributeTypes: {38}( 0.9.2342.19200300.100.1.54 NAME 'dITRedirect' DESC 'RFC1274: DIT Redirect' EQUALITY distinguishedNameMatch SYNTAX 1.3.6.1.4.1. 1466.115.121.1.12 ) olcAttributeTypes: {39}( 0.9.2342.19200300.100.1.55 NAME 'audio' DESC 'RFC12 74: audio (u-law)' SYNTAX 1.3.6.1.4.1.1466.115.121.1.4{25000} ) olcAttributeTypes: {40}( 0.9.2342.19200300.100.1.56 NAME 'documentPublisher' DESC 'RFC1274: publisher of document' EQUALITY caseIgnoreMatch SUBSTR case IgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 ) olcObjectClasses: {0}( 0.9.2342.19200300.100.4.4 NAME ( 'pilotPerson' 'newPi lotPerson' ) SUP person STRUCTURAL MAY ( userid $ textEncodedORAddress $ rf c822Mailbox $ favouriteDrink $ roomNumber $ userClass $ homeTelephoneNumber $ homePostalAddress $ secretary $ personalTitle $ preferredDeliveryMethod $ businessCategory $ janetMailbox $ otherMailbox $ mobileTelephoneNumber $ pagerTelephoneNumber $ organizationalStatus $ mailPreferenceOption $ person alSignature ) ) olcObjectClasses: {1}( 0.9.2342.19200300.100.4.5 NAME 'account' SUP top STRU CTURAL MUST userid MAY ( description $ seeAlso $ localityName $ organizatio nName $ organizationalUnitName $ host ) ) olcObjectClasses: {2}( 0.9.2342.19200300.100.4.6 NAME 'document' SUP top STR UCTURAL MUST documentIdentifier MAY ( commonName $ description $ seeAlso $ localityName $ organizationName $ organizationalUnitName $ documentTitle $ documentVersion $ documentAuthor $ documentLocation $ documentPublisher ) ) olcObjectClasses: {3}( 0.9.2342.19200300.100.4.7 NAME 'room' SUP top STRUCTU RAL MUST commonName MAY ( roomNumber $ description $ seeAlso $ telephoneNum ber ) ) olcObjectClasses: {4}( 0.9.2342.19200300.100.4.9 NAME 'documentSeries' SUP t op STRUCTURAL MUST commonName MAY ( description $ seeAlso $ telephonenumber $ localityName $ organizationName $ organizationalUnitName ) ) olcObjectClasses: {5}( 0.9.2342.19200300.100.4.13 NAME 'domain' SUP top STRU CTURAL MUST domainComponent MAY ( associatedName $ organizationName $ descr iption $ businessCategory $ seeAlso $ searchGuide $ userPassword $ locality Name $ stateOrProvinceName $ streetAddress $ physicalDeliveryOfficeName $ p ostalAddress $ postalCode $ postOfficeBox $ streetAddress $ facsimileTeleph oneNumber $ internationalISDNNumber $ telephoneNumber $ teletexTerminalIden tifier $ telexNumber $ preferredDeliveryMethod $ destinationIndicator $ reg isteredAddress $ x121Address ) ) olcObjectClasses: {6}( 0.9.2342.19200300.100.4.14 NAME 'RFC822localPart' SUP domain STRUCTURAL MAY ( commonName $ surname $ description $ seeAlso $ tel ephoneNumber $ physicalDeliveryOfficeName $ postalAddress $ postalCode $ po stOfficeBox $ streetAddress $ facsimileTelephoneNumber $ internationalISDNN umber $ telephoneNumber $ teletexTerminalIdentifier $ telexNumber $ preferr edDeliveryMethod $ destinationIndicator $ registeredAddress $ x121Address ) ) olcObjectClasses: {7}( 0.9.2342.19200300.100.4.15 NAME 'dNSDomain' SUP domai n STRUCTURAL MAY ( ARecord $ MDRecord $ MXRecord $ NSRecord $ SOARecord $ C NAMERecord ) ) olcObjectClasses: {8}( 0.9.2342.19200300.100.4.17 NAME 'domainRelatedObject' DESC 'RFC1274: an object related to an domain' SUP top AUXILIARY MUST asso ciatedDomain ) olcObjectClasses: {9}( 0.9.2342.19200300.100.4.18 NAME 'friendlyCountry' SUP country STRUCTURAL MUST friendlyCountryName ) olcObjectClasses: {10}( 0.9.2342.19200300.100.4.20 NAME 'pilotOrganization' SUP ( organization $ organizationalUnit ) STRUCTURAL MAY buildingName ) olcObjectClasses: {11}( 0.9.2342.19200300.100.4.21 NAME 'pilotDSA' SUP dsa S TRUCTURAL MAY dSAQuality ) olcObjectClasses: {12}( 0.9.2342.19200300.100.4.22 NAME 'qualityLabelledData ' SUP top AUXILIARY MUST dsaQuality MAY ( subtreeMinimumQuality $ subtreeMa ximumQuality ) ) structuralObjectClass: olcSchemaConfig entryUUID: 401a0f9a-eaf5-1039-8662-dbfbf2f5e6dd creatorsName: cn=config createTimestamp: 20200224020101Z entryCSN: 20200224020101.084423Z#000000#000#000000 modifiersName: cn=config modifyTimestamp: 20200224020101Z cn={2}inetorgperson.ldif000066400000000000000000000054511421664411400363750ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/data/openldap_2_389/1/slapd.d/cn=config/cn=schema# AUTO-GENERATED FILE - DO NOT EDIT!! Use ldapmodify. # CRC32 2dfdddb6 dn: cn={2}inetorgperson objectClass: olcSchemaConfig cn: {2}inetorgperson olcAttributeTypes: {0}( 2.16.840.1.113730.3.1.1 NAME 'carLicense' DESC 'RFC2 798: vehicle license or registration plate' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 ) olcAttributeTypes: {1}( 2.16.840.1.113730.3.1.2 NAME 'departmentNumber' DESC 'RFC2798: identifies a department within an organization' EQUALITY caseIgn oreMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1 .15 ) olcAttributeTypes: {2}( 2.16.840.1.113730.3.1.241 NAME 'displayName' DESC 'R FC2798: preferred name to be used when displaying entries' EQUALITY caseIgn oreMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1 .15 SINGLE-VALUE ) olcAttributeTypes: {3}( 2.16.840.1.113730.3.1.3 NAME 'employeeNumber' DESC ' RFC2798: numerically identifies an employee within an organization' EQUALIT Y caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466. 115.121.1.15 SINGLE-VALUE ) olcAttributeTypes: {4}( 2.16.840.1.113730.3.1.4 NAME 'employeeType' DESC 'RF C2798: type of employment for a person' EQUALITY caseIgnoreMatch SUBSTR cas eIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 ) olcAttributeTypes: {5}( 0.9.2342.19200300.100.1.60 NAME 'jpegPhoto' DESC 'RF C2798: a JPEG image' SYNTAX 1.3.6.1.4.1.1466.115.121.1.28 ) olcAttributeTypes: {6}( 2.16.840.1.113730.3.1.39 NAME 'preferredLanguage' DE SC 'RFC2798: preferred written or spoken language for a person' EQUALITY ca seIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115. 121.1.15 SINGLE-VALUE ) olcAttributeTypes: {7}( 2.16.840.1.113730.3.1.40 NAME 'userSMIMECertificate' DESC 'RFC2798: PKCS#7 SignedData used to support S/MIME' SYNTAX 1.3.6.1.4. 1.1466.115.121.1.5 ) olcAttributeTypes: {8}( 2.16.840.1.113730.3.1.216 NAME 'userPKCS12' DESC 'RF C2798: personal identity information, a PKCS #12 PFX' SYNTAX 1.3.6.1.4.1.14 66.115.121.1.5 ) olcObjectClasses: {0}( 2.16.840.1.113730.3.2.2 NAME 'inetOrgPerson' DESC 'RF C2798: Internet Organizational Person' SUP organizationalPerson STRUCTURAL MAY ( audio $ businessCategory $ carLicense $ departmentNumber $ displayNam e $ employeeNumber $ employeeType $ givenName $ homePhone $ homePostalAddre ss $ initials $ jpegPhoto $ labeledURI $ mail $ manager $ mobile $ o $ page r $ photo $ roomNumber $ secretary $ uid $ userCertificate $ x500uniqueIden tifier $ preferredLanguage $ userSMIMECertificate $ userPKCS12 ) ) structuralObjectClass: olcSchemaConfig entryUUID: 401a225a-eaf5-1039-8663-dbfbf2f5e6dd creatorsName: cn=config createTimestamp: 20200224020101Z entryCSN: 20200224020101.084903Z#000000#000#000000 modifiersName: cn=config modifyTimestamp: 20200224020101Z cn={3}rfc2307bis.ldif000066400000000000000000000226561421664411400352720ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/data/openldap_2_389/1/slapd.d/cn=config/cn=schema# AUTO-GENERATED FILE - DO NOT EDIT!! Use ldapmodify. # CRC32 dd0a742e dn: cn={3}rfc2307bis objectClass: olcSchemaConfig cn: {3}rfc2307bis olcAttributeTypes: {0}( 1.3.6.1.1.1.1.2 NAME 'gecos' DESC 'The GECOS field; the common name' EQUALITY caseIgnoreIA5Match SUBSTR caseIgnoreIA5Substrings Match SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 SINGLE-VALUE ) olcAttributeTypes: {1}( 1.3.6.1.1.1.1.3 NAME 'homeDirectory' DESC 'The absol ute path to the home directory' EQUALITY caseExactIA5Match SYNTAX 1.3.6.1.4 .1.1466.115.121.1.26 SINGLE-VALUE ) olcAttributeTypes: {2}( 1.3.6.1.1.1.1.4 NAME 'loginShell' DESC 'The path to the login shell' EQUALITY caseExactIA5Match SYNTAX 1.3.6.1.4.1.1466.115.121 .1.26 SINGLE-VALUE ) olcAttributeTypes: {3}( 1.3.6.1.1.1.1.5 NAME 'shadowLastChange' EQUALITY int egerMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) olcAttributeTypes: {4}( 1.3.6.1.1.1.1.6 NAME 'shadowMin' EQUALITY integerMat ch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) olcAttributeTypes: {5}( 1.3.6.1.1.1.1.7 NAME 'shadowMax' EQUALITY integerMat ch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) olcAttributeTypes: {6}( 1.3.6.1.1.1.1.8 NAME 'shadowWarning' EQUALITY intege rMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) olcAttributeTypes: {7}( 1.3.6.1.1.1.1.9 NAME 'shadowInactive' EQUALITY integ erMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) olcAttributeTypes: {8}( 1.3.6.1.1.1.1.10 NAME 'shadowExpire' EQUALITY intege rMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) olcAttributeTypes: {9}( 1.3.6.1.1.1.1.11 NAME 'shadowFlag' EQUALITY integerM atch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) olcAttributeTypes: {10}( 1.3.6.1.1.1.1.12 NAME 'memberUid' EQUALITY caseExac tIA5Match SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 ) olcAttributeTypes: {11}( 1.3.6.1.1.1.1.13 NAME 'memberNisNetgroup' EQUALITY caseExactIA5Match SUBSTR caseExactIA5SubstringsMatch SYNTAX 1.3.6.1.4.1.146 6.115.121.1.26 ) olcAttributeTypes: {12}( 1.3.6.1.1.1.1.14 NAME 'nisNetgroupTriple' DESC 'Net group triple' EQUALITY caseIgnoreIA5Match SYNTAX 1.3.6.1.4.1.1466.115.121.1 .26 ) olcAttributeTypes: {13}( 1.3.6.1.1.1.1.15 NAME 'ipServicePort' DESC 'Service port number' EQUALITY integerMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SI NGLE-VALUE ) olcAttributeTypes: {14}( 1.3.6.1.1.1.1.16 NAME 'ipServiceProtocol' DESC 'Ser vice protocol name' SUP name ) olcAttributeTypes: {15}( 1.3.6.1.1.1.1.17 NAME 'ipProtocolNumber' DESC 'IP p rotocol number' EQUALITY integerMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) olcAttributeTypes: {16}( 1.3.6.1.1.1.1.18 NAME 'oncRpcNumber' DESC 'ONC RPC number' EQUALITY integerMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-V ALUE ) olcAttributeTypes: {17}( 1.3.6.1.1.1.1.19 NAME 'ipHostNumber' DESC 'IPv4 add resses as a dotted decimal omitting leading zeros or IPv6 addresses as defined in RFC2373' SUP name ) olcAttributeTypes: {18}( 1.3.6.1.1.1.1.20 NAME 'ipNetworkNumber' DESC 'IP ne twork as a dotted decimal, eg. 192.168, omitting leading zeros' SUP name SINGLE-VALUE ) olcAttributeTypes: {19}( 1.3.6.1.1.1.1.21 NAME 'ipNetmaskNumber' DESC 'IP ne tmask as a dotted decimal, eg. 255.255.255.0, omitting leading zeros ' EQUALITY caseIgnoreIA5Match SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 SINGLE-V ALUE ) olcAttributeTypes: {20}( 1.3.6.1.1.1.1.22 NAME 'macAddress' DESC 'MAC addres s in maximal, colon separated hex notation, eg. 00:00:92:90:ee:e2' E QUALITY caseIgnoreIA5Match SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 ) olcAttributeTypes: {21}( 1.3.6.1.1.1.1.23 NAME 'bootParameter' DESC 'rpc.boo tparamd parameter' EQUALITY caseExactIA5Match SYNTAX 1.3.6.1.4.1.1466.115.1 21.1.26 ) olcAttributeTypes: {22}( 1.3.6.1.1.1.1.24 NAME 'bootFile' DESC 'Boot image n ame' EQUALITY caseExactIA5Match SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 ) olcAttributeTypes: {23}( 1.3.6.1.1.1.1.26 NAME 'nisMapName' DESC 'Name of a A generic NIS map' SUP name ) olcAttributeTypes: {24}( 1.3.6.1.1.1.1.27 NAME 'nisMapEntry' DESC 'A generic NIS entry' EQUALITY caseExactIA5Match SUBSTR caseExactIA5SubstringsMatch S YNTAX 1.3.6.1.4.1.1466.115.121.1.26 SINGLE-VALUE ) olcAttributeTypes: {25}( 1.3.6.1.1.1.1.28 NAME 'nisPublicKey' DESC 'NIS publ ic key' EQUALITY octetStringMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.40 SING LE-VALUE ) olcAttributeTypes: {26}( 1.3.6.1.1.1.1.29 NAME 'nisSecretKey' DESC 'NIS secr et key' EQUALITY octetStringMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.40 SING LE-VALUE ) olcAttributeTypes: {27}( 1.3.6.1.1.1.1.30 NAME 'nisDomain' DESC 'NIS domain' EQUALITY caseIgnoreIA5Match SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 ) olcAttributeTypes: {28}( 1.3.6.1.1.1.1.31 NAME 'automountMapName' DESC 'auto mount Map Name' EQUALITY caseExactIA5Match SUBSTR caseExactIA5SubstringsMat ch SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 SINGLE-VALUE ) olcAttributeTypes: {29}( 1.3.6.1.1.1.1.32 NAME 'automountKey' DESC 'Automoun t Key value' EQUALITY caseExactIA5Match SUBSTR caseExactIA5SubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 SINGLE-VALUE ) olcAttributeTypes: {30}( 1.3.6.1.1.1.1.33 NAME 'automountInformation' DESC ' Automount information' EQUALITY caseExactIA5Match SUBSTR caseExactIA5Substr ingsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 SINGLE-VALUE ) olcObjectClasses: {0}( 1.3.6.1.1.1.2.0 NAME 'posixAccount' DESC 'Abstraction of an account with POSIX attributes' SUP top AUXILIARY MUST ( cn $ uid $ u idNumber $ gidNumber $ homeDirectory ) MAY ( userPassword $ loginShell $ ge cos $ description ) ) olcObjectClasses: {1}( 1.3.6.1.1.1.2.1 NAME 'shadowAccount' DESC 'Additional attributes for shadow passwords' SUP top AUXILIARY MUST uid MAY ( userPass word $ description $ shadowLastChange $ shadowMin $ shadowMax $ shadowWarni ng $ shadowInactive $ shadowExpire $ shadowFlag ) ) olcObjectClasses: {2}( 1.3.6.1.1.1.2.2 NAME 'posixGroup' DESC 'Abstraction o f a group of accounts' SUP top AUXILIARY MUST gidNumber MAY ( userPassword $ memberUid $ description ) ) olcObjectClasses: {3}( 1.3.6.1.1.1.2.3 NAME 'ipService' DESC 'Abstraction an Internet Protocol service. Maps an IP port and protocol (such as tc p or udp) to one or more names; the distinguished value of th e cn attribute denotes the services canonical name' SUP top STRUCTUR AL MUST ( cn $ ipServicePort $ ipServiceProtocol ) MAY description ) olcObjectClasses: {4}( 1.3.6.1.1.1.2.4 NAME 'ipProtocol' DESC 'Abstraction o f an IP protocol. Maps a protocol number to one or more names. The d istinguished value of the cn attribute denotes the protocols canonic al name' SUP top STRUCTURAL MUST ( cn $ ipProtocolNumber ) MAY description ) olcObjectClasses: {5}( 1.3.6.1.1.1.2.5 NAME 'oncRpc' DESC 'Abstraction of an Open Network Computing (ONC) [RFC1057] Remote Procedure Call (RPC) b inding. This class maps an ONC RPC number to a name. The distin guished value of the cn attribute denotes the RPC services canonical name' SUP top STRUCTURAL MUST ( cn $ oncRpcNumber ) MAY description ) olcObjectClasses: {6}( 1.3.6.1.1.1.2.6 NAME 'ipHost' DESC 'Abstraction of a host, an IP device. The distinguished value of the cn attribute deno tes the hosts canonical name. Device SHOULD be used as a structural class' SUP top AUXILIARY MUST ( cn $ ipHostNumber ) MAY ( userPassword $ l $ description $ manager ) ) olcObjectClasses: {7}( 1.3.6.1.1.1.2.7 NAME 'ipNetwork' DESC 'Abstraction of a network. The distinguished value of the cn attribute denotes the networks canonical name' SUP top STRUCTURAL MUST ipNetworkNumber MAY ( cn $ ipNetmaskNumber $ l $ description $ manager ) ) olcObjectClasses: {8}( 1.3.6.1.1.1.2.8 NAME 'nisNetgroup' DESC 'Abstraction of a netgroup. May refer to other netgroups' SUP top STRUCTURAL MUST cn MAY ( nisNetgroupTriple $ memberNisNetgroup $ description ) ) olcObjectClasses: {9}( 1.3.6.1.1.1.2.9 NAME 'nisMap' DESC 'A generic abstrac tion of a NIS map' SUP top STRUCTURAL MUST nisMapName MAY description ) olcObjectClasses: {10}( 1.3.6.1.1.1.2.10 NAME 'nisObject' DESC 'An entry in a NIS map' SUP top STRUCTURAL MUST ( cn $ nisMapEntry $ nisMapName ) MAY de scription ) olcObjectClasses: {11}( 1.3.6.1.1.1.2.11 NAME 'ieee802Device' DESC 'A device with a MAC address; device SHOULD be used as a structural class' SU P top AUXILIARY MAY macAddress ) olcObjectClasses: {12}( 1.3.6.1.1.1.2.12 NAME 'bootableDevice' DESC 'A devic e with boot parameters; device SHOULD be used as a structural class' SUP top AUXILIARY MAY ( bootFile $ bootParameter ) ) olcObjectClasses: {13}( 1.3.6.1.1.1.2.14 NAME 'nisKeyObject' DESC 'An object with a public and secret key' SUP top AUXILIARY MUST ( cn $ nisPublicKey $ nisSecretKey ) MAY ( uidNumber $ description ) ) olcObjectClasses: {14}( 1.3.6.1.1.1.2.15 NAME 'nisDomainObject' DESC 'Associ ates a NIS domain with a naming context' SUP top AUXILIARY MUST nisDomain ) olcObjectClasses: {15}( 1.3.6.1.1.1.2.16 NAME 'automountMap' SUP top STRUCTU RAL MUST automountMapName MAY description ) olcObjectClasses: {16}( 1.3.6.1.1.1.2.17 NAME 'automount' DESC 'Automount in formation' SUP top STRUCTURAL MUST ( automountKey $ automountInformation ) MAY description ) olcObjectClasses: {17}( 1.3.6.1.4.1.5322.13.1.1 NAME 'namedObject' SUP top S TRUCTURAL MAY cn ) structuralObjectClass: olcSchemaConfig entryUUID: 401a2e6c-eaf5-1039-8664-dbfbf2f5e6dd creatorsName: cn=config createTimestamp: 20200224020101Z entryCSN: 20200224020101.085186Z#000000#000#000000 modifiersName: cn=config modifyTimestamp: 20200224020101Z cn={4}yast.ldif000066400000000000000000000150061421664411400344560ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/data/openldap_2_389/1/slapd.d/cn=config/cn=schema# AUTO-GENERATED FILE - DO NOT EDIT!! Use ldapmodify. # CRC32 442e4b40 dn: cn={4}yast objectClass: olcSchemaConfig cn: {4}yast olcObjectIdentifier: {0}SUSE 1.3.6.1.4.1.7057 olcObjectIdentifier: {1}SUSE.YaST SUSE:10.1 olcObjectIdentifier: {2}SUSE.YaST.ModuleConfig SUSE:10.1.2 olcObjectIdentifier: {3}SUSE.YaST.ModuleConfig.OC SUSE.YaST.ModuleConfig:1 olcObjectIdentifier: {4}SUSE.YaST.ModuleConfig.Attr SUSE.YaST.ModuleConfig:2 olcAttributeTypes: {0}( SUSE.YaST.ModuleConfig.Attr:2 NAME 'suseDefaultBase' DESC 'Base DN where new Objects should be created by default' EQUALITY dis tinguishedNameMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 SINGLE-VALUE ) olcAttributeTypes: {1}( SUSE.YaST.ModuleConfig.Attr:3 NAME 'suseNextUniqueId ' DESC 'Next unused unique ID, can be used to generate directory wide uniqe IDs' EQUALITY integerMatch ORDERING integerOrderingMatch SYNTAX 1.3.6.1.4. 1.1466.115.121.1.27 SINGLE-VALUE ) olcAttributeTypes: {2}( SUSE.YaST.ModuleConfig.Attr:4 NAME 'suseMinUniqueId' DESC 'lower Border for Unique IDs' EQUALITY integerMatch ORDERING integerO rderingMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) olcAttributeTypes: {3}( SUSE.YaST.ModuleConfig.Attr:5 NAME 'suseMaxUniqueId' DESC 'upper Border for Unique IDs' EQUALITY integerMatch ORDERING integerO rderingMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) olcAttributeTypes: {4}( SUSE.YaST.ModuleConfig.Attr:6 NAME 'suseDefaultTempl ate' DESC 'The DN of a template that should be used by default' EQUALITY di stinguishedNameMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 SINGLE-VALUE ) olcAttributeTypes: {5}( SUSE.YaST.ModuleConfig.Attr:7 NAME 'suseSearchFilter ' DESC 'Search filter to localize Objects' SYNTAX 1.3.6.1.4.1.1466.115.121. 1.15 SINGLE-VALUE ) olcAttributeTypes: {6}( SUSE.YaST.ModuleConfig.Attr:11 NAME 'suseDefaultValu e' DESC 'an Attribute-Value-Assertions to define defaults for specific Attr ibutes' EQUALITY caseIgnoreMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 ) olcAttributeTypes: {7}( SUSE.YaST.ModuleConfig.Attr:12 NAME 'suseNamingAttri bute' DESC 'AttributeType that should be used as the RDN' EQUALITY caseIgno reIA5Match SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 SINGLE-VALUE ) olcAttributeTypes: {8}( SUSE.YaST.ModuleConfig.Attr:15 NAME 'suseSecondaryGr oup' DESC 'seconday group DN' EQUALITY distinguishedNameMatch SYNTAX 1.3.6. 1.4.1.1466.115.121.1.12 ) olcAttributeTypes: {9}( SUSE.YaST.ModuleConfig.Attr:16 NAME 'suseMinPassword Length' DESC 'minimum Password length for new users' EQUALITY integerMatch ORDERING integerOrderingMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-V ALUE ) olcAttributeTypes: {10}( SUSE.YaST.ModuleConfig.Attr:17 NAME 'suseMaxPasswor dLength' DESC 'maximum Password length for new users' EQUALITY integerMatch ORDERING integerOrderingMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE- VALUE ) olcAttributeTypes: {11}( SUSE.YaST.ModuleConfig.Attr:18 NAME 'susePasswordHa sh' DESC 'Hash method to use for new users' EQUALITY caseIgnoreIA5Match SYN TAX 1.3.6.1.4.1.1466.115.121.1.26 SINGLE-VALUE ) olcAttributeTypes: {12}( SUSE.YaST.ModuleConfig.Attr:19 NAME 'suseSkelDir' D ESC '' EQUALITY caseExactIA5Match SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 ) olcAttributeTypes: {13}( SUSE.YaST.ModuleConfig.Attr:20 NAME 'susePlugin' DE SC 'plugin to use upon user/ group creation' EQUALITY caseIgnoreMatch SYNTA X 1.3.6.1.4.1.1466.115.121.1.15 ) olcAttributeTypes: {14}( SUSE.YaST.ModuleConfig.Attr:21 NAME 'suseMapAttribu te' DESC '' EQUALITY caseIgnoreMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 ) olcAttributeTypes: {15}( SUSE.YaST.ModuleConfig.Attr:22 NAME 'suseImapServer ' DESC '' EQUALITY caseIgnoreMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SIN GLE-VALUE ) olcAttributeTypes: {16}( SUSE.YaST.ModuleConfig.Attr:23 NAME 'suseImapAdmin' DESC '' EQUALITY caseIgnoreMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SING LE-VALUE ) olcAttributeTypes: {17}( SUSE.YaST.ModuleConfig.Attr:24 NAME 'suseImapDefaul tQuota' DESC '' EQUALITY integerMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) olcAttributeTypes: {18}( SUSE.YaST.ModuleConfig.Attr:25 NAME 'suseImapUseSsl ' DESC '' EQUALITY booleanMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE- VALUE ) olcObjectClasses: {0}( SUSE.YaST.ModuleConfig.OC:2 NAME 'suseModuleConfigura tion' DESC 'Contains configuration of Management Modules' SUP top STRUCTURA L MUST cn MAY suseDefaultBase ) olcObjectClasses: {1}( SUSE.YaST.ModuleConfig.OC:3 NAME 'suseUserConfigurati on' DESC 'Configuration of user management tools' SUP suseModuleConfigurati on STRUCTURAL MAY ( suseMinPasswordLength $ suseMaxPasswordLength $ susePas swordHash $ suseSkelDir $ suseNextUniqueId $ suseMinUniqueId $ suseMaxUniqu eId $ suseDefaultTemplate $ suseSearchFilter $ suseMapAttribute ) ) olcObjectClasses: {2}( SUSE.YaST.ModuleConfig.OC:4 NAME 'suseObjectTemplate' DESC 'Base Class for Object-Templates' SUP top STRUCTURAL MUST cn MAY ( su sePlugin $ suseDefaultValue $ suseNamingAttribute ) ) olcObjectClasses: {3}( SUSE.YaST.ModuleConfig.OC:5 NAME 'suseUserTemplate' D ESC 'User object template' SUP suseObjectTemplate STRUCTURAL MUST cn MAY su seSecondaryGroup ) olcObjectClasses: {4}( SUSE.YaST.ModuleConfig.OC:6 NAME 'suseGroupTemplate' DESC 'Group object template' SUP suseObjectTemplate STRUCTURAL MUST cn ) olcObjectClasses: {5}( SUSE.YaST.ModuleConfig.OC:7 NAME 'suseGroupConfigurat ion' DESC 'Configuration of user management tools' SUP suseModuleConfigurat ion STRUCTURAL MAY ( suseNextUniqueId $ suseMinUniqueId $ suseMaxUniqueId $ suseDefaultTemplate $ suseSearchFilter $ suseMapAttribute ) ) olcObjectClasses: {6}( SUSE.YaST.ModuleConfig.OC:8 NAME 'suseCaConfiguration ' DESC 'Configuration of CA management tools' SUP suseModuleConfiguration S TRUCTURAL ) olcObjectClasses: {7}( SUSE.YaST.ModuleConfig.OC:9 NAME 'suseDnsConfiguratio n' DESC 'Configuration of mail server management tools' SUP suseModuleConfi guration STRUCTURAL ) olcObjectClasses: {8}( SUSE.YaST.ModuleConfig.OC:10 NAME 'suseDhcpConfigurat ion' DESC 'Configuration of DHCP server management tools' SUP suseModuleCon figuration STRUCTURAL ) olcObjectClasses: {9}( SUSE.YaST.ModuleConfig.OC:11 NAME 'suseMailConfigurat ion' DESC 'Configuration of IMAP user management tools' SUP suseModuleConfi guration STRUCTURAL MUST ( suseImapServer $ suseImapAdmin $ suseImapDefault Quota $ suseImapUseSsl ) ) structuralObjectClass: olcSchemaConfig entryUUID: 401a3f38-eaf5-1039-8665-dbfbf2f5e6dd creatorsName: cn=config createTimestamp: 20200224020101Z entryCSN: 20200224020101.085642Z#000000#000#000000 modifiersName: cn=config modifyTimestamp: 20200224020101Z cn={5}test.ldif000066400000000000000000000010621421664411400344530ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/data/openldap_2_389/1/slapd.d/cn=config/cn=schemadn: cn={5}test objectClass: olcSchemaConfig cn: {5}test olcAttributeTypes: {0}( x-attribute NAME 'x-attribute' DESC 'desc' EQUALITY distinguishedNameMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 SINGLE-VALUE ) olcObjectClasses: {0}( x-object-oid NAME 'x-object' DESC 'desc' SUP top STRUCTURAL MUST x-attribute ) structuralObjectClass: olcSchemaConfig entryUUID: 86660309-e157-4ebb-be06-a5d7e3c877bc creatorsName: cn=config createTimestamp: 20200224020101Z entryCSN: 20200224020101.085642Z#000000#000#000000 modifiersName: cn=config modifyTimestamp: 20200224020101Z olcDatabase={-1}frontend.ldif000066400000000000000000000010231421664411400353050ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/data/openldap_2_389/1/slapd.d/cn=config# AUTO-GENERATED FILE - DO NOT EDIT!! Use ldapmodify. # CRC32 ebfceba5 dn: olcDatabase={-1}frontend objectClass: olcDatabaseConfig objectClass: olcFrontendConfig olcDatabase: {-1}frontend olcAccess: {0}to dn.base="" by * read olcAccess: {1}to dn.base="cn=Subschema" by * read structuralObjectClass: olcDatabaseConfig entryUUID: 401a4c6c-eaf5-1039-8666-dbfbf2f5e6dd creatorsName: cn=config createTimestamp: 20200224020101Z entryCSN: 20200224020101.085980Z#000000#000#000000 modifiersName: cn=config modifyTimestamp: 20200224020101Z olcDatabase={0}config.ldif000066400000000000000000000011101421664411400346520ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/data/openldap_2_389/1/slapd.d/cn=config# AUTO-GENERATED FILE - DO NOT EDIT!! Use ldapmodify. # CRC32 e112c647 dn: olcDatabase={0}config objectClass: olcDatabaseConfig olcDatabase: {0}config olcAccess: {0}to * by * none olcAddContentAcl: TRUE olcLastMod: TRUE olcMaxDerefDepth: 15 olcReadOnly: FALSE olcRootDN: cn=config olcSyncUseSubentry: FALSE olcMonitoring: FALSE structuralObjectClass: olcDatabaseConfig entryUUID: 401a534c-eaf5-1039-8668-dbfbf2f5e6dd creatorsName: cn=config createTimestamp: 20200224020101Z entryCSN: 20200224020101.086158Z#000000#000#000000 modifiersName: cn=config modifyTimestamp: 20200224020101Z olcDatabase={1}mdb.ldif000066400000000000000000000011351421664411400341570ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/data/openldap_2_389/1/slapd.d/cn=config# AUTO-GENERATED FILE - DO NOT EDIT!! Use ldapmodify. # CRC32 c1d2cbb7 dn: olcDatabase={1}mdb objectClass: olcDatabaseConfig objectClass: olcMdbConfig olcDatabase: mdb olcDbDirectory: /var/lib/ldap/example_com olcSuffix: dc=example,dc=com olcRootDN: cn=Manager,dc=example,dc=com olcRootPW:: c2VjcmV0 olcDbIndex: objectClass eq olcDbIndex: uid eq,pres,sub structuralObjectClass: olcMdbConfig entryUUID: 401a528e-eaf5-1039-8667-dbfbf2f5e6dd creatorsName: cn=config createTimestamp: 20200224020101Z entryCSN: 20200224020101.086134Z#000000#000#000000 modifiersName: cn=config modifyTimestamp: 20200224020101Z olcDatabase={1}mdb/000077500000000000000000000000001421664411400333175ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/data/openldap_2_389/1/slapd.d/cn=configolcOverlay={0}memberof.ldif000066400000000000000000000007021421664411400405570ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/data/openldap_2_389/1/slapd.d/cn=config/olcDatabase={1}mdb# AUTO-GENERATED FILE - DO NOT EDIT!! Use ldapmodify. # CRC32 82bb3fb3 dn: olcOverlay={0}memberof objectClass: olcOverlayConfig objectClass: olcMemberOf olcOverlay: {0}memberof olcMemberOfRefInt: TRUE structuralObjectClass: olcMemberOf entryUUID: 401a5f7c-eaf5-1039-8669-dbfbf2f5e6dd creatorsName: cn=config createTimestamp: 20200224020101Z entryCSN: 20200224020101.086468Z#000000#000#000000 modifiersName: cn=config modifyTimestamp: 20200224020101Z olcOverlay={1}refint.ldif000066400000000000000000000007461421664411400402630ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/data/openldap_2_389/1/slapd.d/cn=config/olcDatabase={1}mdb# AUTO-GENERATED FILE - DO NOT EDIT!! Use ldapmodify. # CRC32 28d25ae6 dn: olcOverlay={1}refint objectClass: olcOverlayConfig objectClass: olcRefintConfig olcOverlay: {1}refint olcRefintAttribute: member olcRefintAttribute: memberOf structuralObjectClass: olcRefintConfig entryUUID: 401a66fc-eaf5-1039-866a-dbfbf2f5e6dd creatorsName: cn=config createTimestamp: 20200224020101Z entryCSN: 20200224020101.086660Z#000000#000#000000 modifiersName: cn=config modifyTimestamp: 20200224020101Z olcOverlay={2}unique.ldif000066400000000000000000000007551421664411400403030ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/data/openldap_2_389/1/slapd.d/cn=config/olcDatabase={1}mdb# AUTO-GENERATED FILE - DO NOT EDIT!! Use ldapmodify. # CRC32 e3a2aeac dn: olcOverlay={2}unique objectClass: olcOverlayConfig objectClass: olcUniqueConfig olcOverlay: {2}unique olcUniqueURI: ldap:///?mail?sub olcUniqueURI: ldap:///?uid?sub structuralObjectClass: olcUniqueConfig entryUUID: 401a6b02-eaf5-1039-866b-dbfbf2f5e6dd creatorsName: cn=config createTimestamp: 20200224020101Z entryCSN: 20200224020101.086763Z#000000#000#000000 modifiersName: cn=config modifyTimestamp: 20200224020101Z olcDatabase={2}mdb.ldif000066400000000000000000000011011421664411400341510ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/data/openldap_2_389/1/slapd.d/cn=config# AUTO-GENERATED FILE - DO NOT EDIT!! Use ldapmodify. # CRC32 1bb1ab28 dn: olcDatabase={2}mdb objectClass: olcDatabaseConfig objectClass: olcMdbConfig olcDatabase: mdb olcDbDirectory: /var/lib/ldap/example_net olcSuffix: dc=example,dc=net olcRootDN: cn=Manager,dc=example,dc=net olcRootPW:: c2VjcmV0 olcDbIndex: objectClass eq structuralObjectClass: olcMdbConfig entryUUID: 401a7084-eaf5-1039-866c-dbfbf2f5e6dd creatorsName: cn=config createTimestamp: 20200224020101Z entryCSN: 20200224020101.086905Z#000000#000#000000 modifiersName: cn=config modifyTimestamp: 20200224020101Z olcDatabase={2}mdb/000077500000000000000000000000001421664411400333205ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/data/openldap_2_389/1/slapd.d/cn=configolcOverlay={0}memberof.ldif000066400000000000000000000007021421664411400405600ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/data/openldap_2_389/1/slapd.d/cn=config/olcDatabase={2}mdb# AUTO-GENERATED FILE - DO NOT EDIT!! Use ldapmodify. # CRC32 6b48531c dn: olcOverlay={0}memberof objectClass: olcOverlayConfig objectClass: olcMemberOf olcOverlay: {0}memberof olcMemberOfRefInt: TRUE structuralObjectClass: olcMemberOf entryUUID: 401a7890-eaf5-1039-866d-dbfbf2f5e6dd creatorsName: cn=config createTimestamp: 20200224020101Z entryCSN: 20200224020101.087110Z#000000#000#000000 modifiersName: cn=config modifyTimestamp: 20200224020101Z olcOverlay={1}unique.ldif000066400000000000000000000007551421664411400403030ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/data/openldap_2_389/1/slapd.d/cn=config/olcDatabase={2}mdb# AUTO-GENERATED FILE - DO NOT EDIT!! Use ldapmodify. # CRC32 027478a0 dn: olcOverlay={1}unique objectClass: olcOverlayConfig objectClass: olcUniqueConfig olcOverlay: {1}unique olcUniqueURI: ldap:///?mail?sub olcUniqueURI: ldap:///?uid?sub structuralObjectClass: olcUniqueConfig entryUUID: 401a7f20-eaf5-1039-866e-dbfbf2f5e6dd creatorsName: cn=config createTimestamp: 20200224020101Z entryCSN: 20200224020101.087278Z#000000#000#000000 modifiersName: cn=config modifyTimestamp: 20200224020101Z 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/data/openldap_2_389/4539/000077500000000000000000000000001421664411400247775ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/data/openldap_2_389/4539/slapd.d/000077500000000000000000000000001421664411400263245ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/data/openldap_2_389/4539/slapd.d/cn=config.ldif000066400000000000000000000021541421664411400310510ustar00rootroot00000000000000# AUTO-GENERATED FILE - DO NOT EDIT!! Use ldapmodify. # CRC32 652b4ad6 dn: cn=config objectClass: olcGlobal cn: config olcConfigFile: slapd.conf olcConfigDir: ./slapd.d olcAttributeOptions: lang- olcAuthzPolicy: none olcConcurrency: 0 olcConnMaxPending: 100 olcConnMaxPendingAuth: 1000 olcGentleHUP: FALSE olcIdleTimeout: 0 olcIndexSubstrIfMaxLen: 4 olcIndexSubstrIfMinLen: 2 olcIndexSubstrAnyLen: 4 olcIndexSubstrAnyStep: 2 olcIndexIntLen: 4 olcListenerThreads: 1 olcLocalSSF: 71 olcLogLevel: 0 olcReadOnly: FALSE olcSaslSecProps: noplain,noanonymous olcSockbufMaxIncoming: 262143 olcSockbufMaxIncomingAuth: 16777215 olcThreads: 16 olcTLSCACertificateFile: /tmp/ldap-sssdtest.cacrt olcTLSCertificateFile: /tmp/ldap-sssdtest.crt olcTLSCertificateKeyFile: /tmp/ldap-sssdtest.key olcTLSCRLCheck: none olcTLSVerifyClient: never olcTLSProtocolMin: 0.0 olcToolThreads: 1 olcWriteTimeout: 0 structuralObjectClass: olcGlobal entryUUID: 12127e74-e5e6-103a-973c-d731be523aab creatorsName: cn=config createTimestamp: 20210108101443Z entryCSN: 20210108101443.265809Z#000000#000#000000 modifiersName: cn=config modifyTimestamp: 20210108101443Z 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/data/openldap_2_389/4539/slapd.d/cn=config/000077500000000000000000000000001421664411400302075ustar00rootroot00000000000000cn=module{0}.ldif000066400000000000000000000006321421664411400333440ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/data/openldap_2_389/4539/slapd.d/cn=config# AUTO-GENERATED FILE - DO NOT EDIT!! Use ldapmodify. # CRC32 453c66fb dn: cn=module{0} objectClass: olcModuleList cn: module{0} olcModuleLoad: {0}back_hdb.la structuralObjectClass: olcModuleList entryUUID: 1212848c-e5e6-103a-973d-d731be523aab creatorsName: cn=config createTimestamp: 20210108101443Z entryCSN: 20210108101443.265809Z#000000#000#000000 modifiersName: cn=config modifyTimestamp: 20210108101443Z cn=schema.ldif000066400000000000000000001105711421664411400326530ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/data/openldap_2_389/4539/slapd.d/cn=config# AUTO-GENERATED FILE - DO NOT EDIT!! Use ldapmodify. # CRC32 8c9a2f9c dn: cn=schema objectClass: olcSchemaConfig cn: schema olcObjectIdentifier: OLcfg 1.3.6.1.4.1.4203.1.12.2 olcObjectIdentifier: OLcfgAt OLcfg:3 olcObjectIdentifier: OLcfgGlAt OLcfgAt:0 olcObjectIdentifier: OLcfgBkAt OLcfgAt:1 olcObjectIdentifier: OLcfgDbAt OLcfgAt:2 olcObjectIdentifier: OLcfgOvAt OLcfgAt:3 olcObjectIdentifier: OLcfgCtAt OLcfgAt:4 olcObjectIdentifier: OLcfgOc OLcfg:4 olcObjectIdentifier: OLcfgGlOc OLcfgOc:0 olcObjectIdentifier: OLcfgBkOc OLcfgOc:1 olcObjectIdentifier: OLcfgDbOc OLcfgOc:2 olcObjectIdentifier: OLcfgOvOc OLcfgOc:3 olcObjectIdentifier: OLcfgCtOc OLcfgOc:4 olcObjectIdentifier: OMsyn 1.3.6.1.4.1.1466.115.121.1 olcObjectIdentifier: OMsBoolean OMsyn:7 olcObjectIdentifier: OMsDN OMsyn:12 olcObjectIdentifier: OMsDirectoryString OMsyn:15 olcObjectIdentifier: OMsIA5String OMsyn:26 olcObjectIdentifier: OMsInteger OMsyn:27 olcObjectIdentifier: OMsOID OMsyn:38 olcObjectIdentifier: OMsOctetString OMsyn:40 olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.1 DESC 'ACI Item' X-BINARY-TRA NSFER-REQUIRED 'TRUE' X-NOT-HUMAN-READABLE 'TRUE' ) olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.2 DESC 'Access Point' X-NOT-HU MAN-READABLE 'TRUE' ) olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.3 DESC 'Attribute Type Descrip tion' ) olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.4 DESC 'Audio' X-NOT-HUMAN-REA DABLE 'TRUE' ) olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.5 DESC 'Binary' X-NOT-HUMAN-RE ADABLE 'TRUE' ) olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.6 DESC 'Bit String' ) olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.7 DESC 'Boolean' ) olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.8 DESC 'Certificate' X-BINARY- TRANSFER-REQUIRED 'TRUE' X-NOT-HUMAN-READABLE 'TRUE' ) olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.9 DESC 'Certificate List' X-BI NARY-TRANSFER-REQUIRED 'TRUE' X-NOT-HUMAN-READABLE 'TRUE' ) olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.10 DESC 'Certificate Pair' X-B INARY-TRANSFER-REQUIRED 'TRUE' X-NOT-HUMAN-READABLE 'TRUE' ) olcLdapSyntaxes: ( 1.3.6.1.4.1.4203.666.11.10.2.1 DESC 'X.509 AttributeCerti ficate' X-BINARY-TRANSFER-REQUIRED 'TRUE' X-NOT-HUMAN-READABLE 'TRUE' ) olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.12 DESC 'Distinguished Name' ) olcLdapSyntaxes: ( 1.2.36.79672281.1.5.0 DESC 'RDN' ) olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.13 DESC 'Data Quality' ) olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.14 DESC 'Delivery Method' ) olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.15 DESC 'Directory String' ) olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.16 DESC 'DIT Content Rule Desc ription' ) olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.17 DESC 'DIT Structure Rule De scription' ) olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.19 DESC 'DSA Quality' ) olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.20 DESC 'DSE Type' ) olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.21 DESC 'Enhanced Guide' ) olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.22 DESC 'Facsimile Telephone N umber' ) olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.23 DESC 'Fax' X-NOT-HUMAN-READ ABLE 'TRUE' ) olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.24 DESC 'Generalized Time' ) olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.25 DESC 'Guide' ) olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.26 DESC 'IA5 String' ) olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.27 DESC 'Integer' ) olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.28 DESC 'JPEG' X-NOT-HUMAN-REA DABLE 'TRUE' ) olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.29 DESC 'Supplier And Shadow Acc ess Points' ) olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.30 DESC 'Matching Rule Descrip tion' ) olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.31 DESC 'Matching Rule Use Des cription' ) olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.32 DESC 'Mail Preference' ) olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.33 DESC 'MHS OR Address' ) olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.34 DESC 'Name And Optional UID ' ) olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.35 DESC 'Name Form Description ' ) olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.36 DESC 'Numeric String' ) olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.37 DESC 'Object Class Descript ion' ) olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.38 DESC 'OID' ) olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.39 DESC 'Other Mailbox' ) olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.40 DESC 'Octet String' ) olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.41 DESC 'Postal Address' ) olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.42 DESC 'Protocol Information' ) olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.43 DESC 'Presentation Address' ) olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.44 DESC 'Printable String' ) olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.11 DESC 'Country String' ) olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.45 DESC 'SubtreeSpecification' ) olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.49 DESC 'Supported Algorithm' X-BINARY-TRANSFER-REQUIRED 'TRUE' X-NOT-HUMAN-READABLE 'TRUE' ) olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.50 DESC 'Telephone Number' ) olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.51 DESC 'Teletex Terminal Iden tifier' ) olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.52 DESC 'Telex Number' ) olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.54 DESC 'LDAP Syntax Descripti on' ) olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.55 DESC 'Modify Rights' ) olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.56 DESC 'LDAP Schema Definitio n' ) olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.57 DESC 'LDAP Schema Descripti on' ) olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.58 DESC 'Substring Assertion' ) olcLdapSyntaxes: ( 1.3.6.1.1.1.0.0 DESC 'RFC2307 NIS Netgroup Triple' ) olcLdapSyntaxes: ( 1.3.6.1.1.1.0.1 DESC 'RFC2307 Boot Parameter' ) olcLdapSyntaxes: ( 1.3.6.1.1.15.1 DESC 'Certificate Exact Assertion' ) olcLdapSyntaxes: ( 1.3.6.1.1.15.2 DESC 'Certificate Assertion' ) olcLdapSyntaxes: ( 1.3.6.1.1.15.3 DESC 'Certificate Pair Exact Assertion' ) olcLdapSyntaxes: ( 1.3.6.1.1.15.4 DESC 'Certificate Pair Assertion' ) olcLdapSyntaxes: ( 1.3.6.1.1.15.5 DESC 'Certificate List Exact Assertion' ) olcLdapSyntaxes: ( 1.3.6.1.1.15.6 DESC 'Certificate List Assertion' ) olcLdapSyntaxes: ( 1.3.6.1.1.15.7 DESC 'Algorithm Identifier' ) olcLdapSyntaxes: ( 1.3.6.1.4.1.4203.666.11.10.2.2 DESC 'AttributeCertificate Exact Assertion' ) olcLdapSyntaxes: ( 1.3.6.1.4.1.4203.666.11.10.2.3 DESC 'AttributeCertificate Assertion' ) olcLdapSyntaxes: ( 1.3.6.1.1.16.1 DESC 'UUID' ) olcLdapSyntaxes: ( 1.3.6.1.4.1.4203.666.11.2.1 DESC 'CSN' ) olcLdapSyntaxes: ( 1.3.6.1.4.1.4203.666.11.2.4 DESC 'CSN SID' ) olcLdapSyntaxes: ( 1.3.6.1.4.1.4203.1.1.1 DESC 'OpenLDAP void' ) olcLdapSyntaxes: ( 1.3.6.1.4.1.4203.666.2.7 DESC 'OpenLDAP authz' ) olcLdapSyntaxes: ( 1.3.6.1.4.1.4203.666.2.1 DESC 'OpenLDAP Experimental ACI' ) olcAttributeTypes: ( 2.5.4.0 NAME 'objectClass' DESC 'RFC4512: object classe s of the entity' EQUALITY objectIdentifierMatch SYNTAX 1.3.6.1.4.1.1466.115 .121.1.38 ) olcAttributeTypes: ( 2.5.21.9 NAME 'structuralObjectClass' DESC 'RFC4512: st ructural object class of entry' EQUALITY objectIdentifierMatch SYNTAX 1.3.6 .1.4.1.1466.115.121.1.38 SINGLE-VALUE NO-USER-MODIFICATION USAGE directoryO peration ) olcAttributeTypes: ( 2.5.18.1 NAME 'createTimestamp' DESC 'RFC4512: time whi ch object was created' EQUALITY generalizedTimeMatch ORDERING generalizedTi meOrderingMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.24 SINGLE-VALUE NO-USER-M ODIFICATION USAGE directoryOperation ) olcAttributeTypes: ( 2.5.18.2 NAME 'modifyTimestamp' DESC 'RFC4512: time whi ch object was last modified' EQUALITY generalizedTimeMatch ORDERING general izedTimeOrderingMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.24 SINGLE-VALUE NO- USER-MODIFICATION USAGE directoryOperation ) olcAttributeTypes: ( 2.5.18.3 NAME 'creatorsName' DESC 'RFC4512: name of cre ator' EQUALITY distinguishedNameMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 SINGLE-VALUE NO-USER-MODIFICATION USAGE directoryOperation ) olcAttributeTypes: ( 2.5.18.4 NAME 'modifiersName' DESC 'RFC4512: name of la st modifier' EQUALITY distinguishedNameMatch SYNTAX 1.3.6.1.4.1.1466.115.12 1.1.12 SINGLE-VALUE NO-USER-MODIFICATION USAGE directoryOperation ) olcAttributeTypes: ( 2.5.18.9 NAME 'hasSubordinates' DESC 'X.501: entry has children' EQUALITY booleanMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE- VALUE NO-USER-MODIFICATION USAGE directoryOperation ) olcAttributeTypes: ( 2.5.18.10 NAME 'subschemaSubentry' DESC 'RFC4512: name of controlling subschema entry' EQUALITY distinguishedNameMatch SYNTAX 1.3. 6.1.4.1.1466.115.121.1.12 SINGLE-VALUE NO-USER-MODIFICATION USAGE directory Operation ) olcAttributeTypes: ( 2.5.18.12 NAME 'collectiveAttributeSubentries' DESC 'RF C3671: collective attribute subentries' EQUALITY distinguishedNameMatch SYN TAX 1.3.6.1.4.1.1466.115.121.1.12 NO-USER-MODIFICATION USAGE directoryOpera tion ) olcAttributeTypes: ( 2.5.18.7 NAME 'collectiveExclusions' DESC 'RFC3671: col lective attribute exclusions' EQUALITY objectIdentifierMatch SYNTAX 1.3.6.1 .4.1.1466.115.121.1.38 USAGE directoryOperation ) olcAttributeTypes: ( 1.3.6.1.1.20 NAME 'entryDN' DESC 'DN of the entry' EQUA LITY distinguishedNameMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 SINGLE-VAL UE NO-USER-MODIFICATION USAGE directoryOperation ) olcAttributeTypes: ( 1.3.6.1.1.16.4 NAME 'entryUUID' DESC 'UUID of the entry ' EQUALITY UUIDMatch ORDERING UUIDOrderingMatch SYNTAX 1.3.6.1.1.16.1 SINGL E-VALUE NO-USER-MODIFICATION USAGE directoryOperation ) olcAttributeTypes: ( 1.3.6.1.4.1.4203.666.1.7 NAME 'entryCSN' DESC 'change s equence number of the entry content' EQUALITY CSNMatch ORDERING CSNOrdering Match SYNTAX 1.3.6.1.4.1.4203.666.11.2.1{64} SINGLE-VALUE NO-USER-MODIFICAT ION USAGE directoryOperation ) olcAttributeTypes: ( 1.3.6.1.4.1.4203.666.1.13 NAME 'namingCSN' DESC 'change sequence number of the entry naming (RDN)' EQUALITY CSNMatch ORDERING CSNO rderingMatch SYNTAX 1.3.6.1.4.1.4203.666.11.2.1{64} SINGLE-VALUE NO-USER-MO DIFICATION USAGE directoryOperation ) olcAttributeTypes: ( 1.3.6.1.4.1.4203.666.1.23 NAME 'syncreplCookie' DESC 's yncrepl Cookie for shadow copy' EQUALITY octetStringMatch ORDERING octetStr ingOrderingMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.40 SINGLE-VALUE NO-USER- MODIFICATION USAGE dSAOperation ) olcAttributeTypes: ( 1.3.6.1.4.1.4203.666.1.25 NAME 'contextCSN' DESC 'the l argest committed CSN of a context' EQUALITY CSNMatch ORDERING CSNOrderingMa tch SYNTAX 1.3.6.1.4.1.4203.666.11.2.1{64} NO-USER-MODIFICATION USAGE dSAOp eration ) olcAttributeTypes: ( 1.3.6.1.4.1.1466.101.120.6 NAME 'altServer' DESC 'RFC45 12: alternative servers' SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 USAGE dSAOper ation ) olcAttributeTypes: ( 1.3.6.1.4.1.1466.101.120.5 NAME 'namingContexts' DESC ' RFC4512: naming contexts' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 USAGE dSAOpe ration ) olcAttributeTypes: ( 1.3.6.1.4.1.1466.101.120.13 NAME 'supportedControl' DES C 'RFC4512: supported controls' SYNTAX 1.3.6.1.4.1.1466.115.121.1.38 USAGE dSAOperation ) olcAttributeTypes: ( 1.3.6.1.4.1.1466.101.120.7 NAME 'supportedExtension' DE SC 'RFC4512: supported extended operations' SYNTAX 1.3.6.1.4.1.1466.115.121 .1.38 USAGE dSAOperation ) olcAttributeTypes: ( 1.3.6.1.4.1.1466.101.120.15 NAME 'supportedLDAPVersion' DESC 'RFC4512: supported LDAP versions' SYNTAX 1.3.6.1.4.1.1466.115.121.1. 27 USAGE dSAOperation ) olcAttributeTypes: ( 1.3.6.1.4.1.1466.101.120.14 NAME 'supportedSASLMechanis ms' DESC 'RFC4512: supported SASL mechanisms' SYNTAX 1.3.6.1.4.1.1466.115.1 21.1.15 USAGE dSAOperation ) olcAttributeTypes: ( 1.3.6.1.4.1.4203.1.3.5 NAME 'supportedFeatures' DESC 'R FC4512: features supported by the server' EQUALITY objectIdentifierMatch SY NTAX 1.3.6.1.4.1.1466.115.121.1.38 USAGE dSAOperation ) olcAttributeTypes: ( 1.3.6.1.4.1.4203.666.1.10 NAME 'monitorContext' DESC 'm onitor context' EQUALITY distinguishedNameMatch SYNTAX 1.3.6.1.4.1.1466.115 .121.1.12 SINGLE-VALUE NO-USER-MODIFICATION USAGE dSAOperation ) olcAttributeTypes: ( 1.3.6.1.4.1.4203.1.12.2.1 NAME 'configContext' DESC 'co nfig context' EQUALITY distinguishedNameMatch SYNTAX 1.3.6.1.4.1.1466.115.1 21.1.12 SINGLE-VALUE NO-USER-MODIFICATION USAGE dSAOperation ) olcAttributeTypes: ( 1.3.6.1.1.4 NAME 'vendorName' DESC 'RFC3045: name of im plementation vendor' EQUALITY caseExactMatch SYNTAX 1.3.6.1.4.1.1466.115.12 1.1.15 SINGLE-VALUE NO-USER-MODIFICATION USAGE dSAOperation ) olcAttributeTypes: ( 1.3.6.1.1.5 NAME 'vendorVersion' DESC 'RFC3045: version of implementation' EQUALITY caseExactMatch SYNTAX 1.3.6.1.4.1.1466.115.121 .1.15 SINGLE-VALUE NO-USER-MODIFICATION USAGE dSAOperation ) olcAttributeTypes: ( 2.5.18.5 NAME 'administrativeRole' DESC 'RFC3672: admin istrative role' EQUALITY objectIdentifierMatch SYNTAX 1.3.6.1.4.1.1466.115. 121.1.38 USAGE directoryOperation ) olcAttributeTypes: ( 2.5.18.6 NAME 'subtreeSpecification' DESC 'RFC3672: sub tree specification' SYNTAX 1.3.6.1.4.1.1466.115.121.1.45 SINGLE-VALUE USAGE directoryOperation ) olcAttributeTypes: ( 2.5.21.1 NAME 'dITStructureRules' DESC 'RFC4512: DIT st ructure rules' EQUALITY integerFirstComponentMatch SYNTAX 1.3.6.1.4.1.1466. 115.121.1.17 USAGE directoryOperation ) olcAttributeTypes: ( 2.5.21.2 NAME 'dITContentRules' DESC 'RFC4512: DIT cont ent rules' EQUALITY objectIdentifierFirstComponentMatch SYNTAX 1.3.6.1.4.1. 1466.115.121.1.16 USAGE directoryOperation ) olcAttributeTypes: ( 2.5.21.4 NAME 'matchingRules' DESC 'RFC4512: matching r ules' EQUALITY objectIdentifierFirstComponentMatch SYNTAX 1.3.6.1.4.1.1466. 115.121.1.30 USAGE directoryOperation ) olcAttributeTypes: ( 2.5.21.5 NAME 'attributeTypes' DESC 'RFC4512: attribute types' EQUALITY objectIdentifierFirstComponentMatch SYNTAX 1.3.6.1.4.1.146 6.115.121.1.3 USAGE directoryOperation ) olcAttributeTypes: ( 2.5.21.6 NAME 'objectClasses' DESC 'RFC4512: object cla sses' EQUALITY objectIdentifierFirstComponentMatch SYNTAX 1.3.6.1.4.1.1466. 115.121.1.37 USAGE directoryOperation ) olcAttributeTypes: ( 2.5.21.7 NAME 'nameForms' DESC 'RFC4512: name forms ' E QUALITY objectIdentifierFirstComponentMatch SYNTAX 1.3.6.1.4.1.1466.115.121 .1.35 USAGE directoryOperation ) olcAttributeTypes: ( 2.5.21.8 NAME 'matchingRuleUse' DESC 'RFC4512: matching rule uses' EQUALITY objectIdentifierFirstComponentMatch SYNTAX 1.3.6.1.4.1 .1466.115.121.1.31 USAGE directoryOperation ) olcAttributeTypes: ( 1.3.6.1.4.1.1466.101.120.16 NAME 'ldapSyntaxes' DESC 'R FC4512: LDAP syntaxes' EQUALITY objectIdentifierFirstComponentMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.54 USAGE directoryOperation ) olcAttributeTypes: ( 2.5.4.1 NAME ( 'aliasedObjectName' 'aliasedEntryName' ) DESC 'RFC4512: name of aliased object' EQUALITY distinguishedNameMatch SYN TAX 1.3.6.1.4.1.1466.115.121.1.12 SINGLE-VALUE ) olcAttributeTypes: ( 2.16.840.1.113730.3.1.34 NAME 'ref' DESC 'RFC3296: subo rdinate referral URL' EQUALITY caseExactMatch SYNTAX 1.3.6.1.4.1.1466.115.1 21.1.15 USAGE distributedOperation ) olcAttributeTypes: ( 1.3.6.1.4.1.4203.1.3.1 NAME 'entry' DESC 'OpenLDAP ACL entry pseudo-attribute' SYNTAX 1.3.6.1.4.1.4203.1.1.1 SINGLE-VALUE NO-USER- MODIFICATION USAGE dSAOperation ) olcAttributeTypes: ( 1.3.6.1.4.1.4203.1.3.2 NAME 'children' DESC 'OpenLDAP A CL children pseudo-attribute' SYNTAX 1.3.6.1.4.1.4203.1.1.1 SINGLE-VALUE NO -USER-MODIFICATION USAGE dSAOperation ) olcAttributeTypes: ( 1.3.6.1.4.1.4203.666.1.8 NAME ( 'authzTo' 'saslAuthzTo' ) DESC 'proxy authorization targets' EQUALITY authzMatch SYNTAX 1.3.6.1.4. 1.4203.666.2.7 USAGE distributedOperation X-ORDERED 'VALUES' ) olcAttributeTypes: ( 1.3.6.1.4.1.4203.666.1.9 NAME ( 'authzFrom' 'saslAuthzF rom' ) DESC 'proxy authorization sources' EQUALITY authzMatch SYNTAX 1.3.6. 1.4.1.4203.666.2.7 USAGE distributedOperation X-ORDERED 'VALUES' ) olcAttributeTypes: ( 1.3.6.1.4.1.1466.101.119.3 NAME 'entryTtl' DESC 'RFC258 9: entry time-to-live' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE NO -USER-MODIFICATION USAGE dSAOperation ) olcAttributeTypes: ( 1.3.6.1.4.1.1466.101.119.4 NAME 'dynamicSubtrees' DESC 'RFC2589: dynamic subtrees' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 NO-USER-MO DIFICATION USAGE dSAOperation ) olcAttributeTypes: ( 2.5.4.49 NAME 'distinguishedName' DESC 'RFC4519: common supertype of DN attributes' EQUALITY distinguishedNameMatch SYNTAX 1.3.6.1 .4.1.1466.115.121.1.12 ) olcAttributeTypes: ( 2.5.4.41 NAME 'name' DESC 'RFC4519: common supertype of name attributes' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{32768} ) olcAttributeTypes: ( 2.5.4.3 NAME ( 'cn' 'commonName' ) DESC 'RFC4519: commo n name(s) for which the entity is known by' SUP name ) olcAttributeTypes: ( 0.9.2342.19200300.100.1.1 NAME ( 'uid' 'userid' ) DESC 'RFC4519: user identifier' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstr ingsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{256} ) olcAttributeTypes: ( 1.3.6.1.1.1.1.0 NAME 'uidNumber' DESC 'RFC2307: An inte ger uniquely identifying a user in an administrative domain' EQUALITY integ erMatch ORDERING integerOrderingMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) olcAttributeTypes: ( 1.3.6.1.1.1.1.1 NAME 'gidNumber' DESC 'RFC2307: An inte ger uniquely identifying a group in an administrative domain' EQUALITY inte gerMatch ORDERING integerOrderingMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) olcAttributeTypes: ( 2.5.4.35 NAME 'userPassword' DESC 'RFC4519/2307: passwo rd of user' EQUALITY octetStringMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{ 128} ) olcAttributeTypes: ( 1.3.6.1.4.1.250.1.57 NAME 'labeledURI' DESC 'RFC2079: U niform Resource Identifier with optional label' EQUALITY caseExactMatch SYN TAX 1.3.6.1.4.1.1466.115.121.1.15 ) olcAttributeTypes: ( 2.5.4.13 NAME 'description' DESC 'RFC4519: descriptive information' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYNT AX 1.3.6.1.4.1.1466.115.121.1.15{1024} ) olcAttributeTypes: ( 2.5.4.34 NAME 'seeAlso' DESC 'RFC4519: DN of related ob ject' SUP distinguishedName ) olcAttributeTypes: ( OLcfgGlAt:78 NAME 'olcConfigFile' DESC 'File for slapd configuration directives' EQUALITY caseIgnoreMatch SYNTAX OMsDirectoryStrin g SINGLE-VALUE ) olcAttributeTypes: ( OLcfgGlAt:79 NAME 'olcConfigDir' DESC 'Directory for sl apd configuration backend' EQUALITY caseIgnoreMatch SYNTAX OMsDirectoryStri ng SINGLE-VALUE ) olcAttributeTypes: ( OLcfgGlAt:1 NAME 'olcAccess' DESC 'Access Control List' EQUALITY caseIgnoreMatch SYNTAX OMsDirectoryString X-ORDERED 'VALUES' ) olcAttributeTypes: ( OLcfgGlAt:86 NAME 'olcAddContentAcl' DESC 'Check ACLs a gainst content of Add ops' SYNTAX OMsBoolean SINGLE-VALUE ) olcAttributeTypes: ( OLcfgGlAt:2 NAME 'olcAllows' DESC 'Allowed set of depre cated features' EQUALITY caseIgnoreMatch SYNTAX OMsDirectoryString ) olcAttributeTypes: ( OLcfgGlAt:3 NAME 'olcArgsFile' DESC 'File for slapd com mand line options' EQUALITY caseIgnoreMatch SYNTAX OMsDirectoryString SINGL E-VALUE ) olcAttributeTypes: ( OLcfgGlAt:5 NAME 'olcAttributeOptions' EQUALITY caseIgn oreMatch SYNTAX OMsDirectoryString ) olcAttributeTypes: ( OLcfgGlAt:4 NAME 'olcAttributeTypes' DESC 'OpenLDAP att ributeTypes' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYNT AX OMsDirectoryString X-ORDERED 'VALUES' ) olcAttributeTypes: ( OLcfgGlAt:6 NAME 'olcAuthIDRewrite' EQUALITY caseIgnore Match SYNTAX OMsDirectoryString X-ORDERED 'VALUES' ) olcAttributeTypes: ( OLcfgGlAt:7 NAME 'olcAuthzPolicy' EQUALITY caseIgnoreMa tch SYNTAX OMsDirectoryString SINGLE-VALUE ) olcAttributeTypes: ( OLcfgGlAt:8 NAME 'olcAuthzRegexp' EQUALITY caseIgnoreMa tch SYNTAX OMsDirectoryString X-ORDERED 'VALUES' ) olcAttributeTypes: ( OLcfgGlAt:9 NAME 'olcBackend' DESC 'A type of backend' EQUALITY caseIgnoreMatch SYNTAX OMsDirectoryString SINGLE-VALUE X-ORDERED ' SIBLINGS' ) olcAttributeTypes: ( OLcfgGlAt:10 NAME 'olcConcurrency' SYNTAX OMsInteger SI NGLE-VALUE ) olcAttributeTypes: ( OLcfgGlAt:11 NAME 'olcConnMaxPending' SYNTAX OMsInteger SINGLE-VALUE ) olcAttributeTypes: ( OLcfgGlAt:12 NAME 'olcConnMaxPendingAuth' SYNTAX OMsInt eger SINGLE-VALUE ) olcAttributeTypes: ( OLcfgGlAt:13 NAME 'olcDatabase' DESC 'The backend type for a database instance' SUP olcBackend SINGLE-VALUE X-ORDERED 'SIBLINGS' ) olcAttributeTypes: ( OLcfgGlAt:14 NAME 'olcDefaultSearchBase' SYNTAX OMsDN S INGLE-VALUE ) olcAttributeTypes: ( OLcfgGlAt:15 NAME 'olcDisallows' EQUALITY caseIgnoreMat ch SYNTAX OMsDirectoryString ) olcAttributeTypes: ( OLcfgGlAt:16 NAME 'olcDitContentRules' DESC 'OpenLDAP D IT content rules' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX OMsDirectoryString X-ORDERED 'VALUES' ) olcAttributeTypes: ( OLcfgDbAt:0.20 NAME 'olcExtraAttrs' EQUALITY caseIgnore Match SYNTAX OMsDirectoryString ) olcAttributeTypes: ( OLcfgGlAt:17 NAME 'olcGentleHUP' SYNTAX OMsBoolean SING LE-VALUE ) olcAttributeTypes: ( OLcfgDbAt:0.17 NAME 'olcHidden' SYNTAX OMsBoolean SINGL E-VALUE ) olcAttributeTypes: ( OLcfgGlAt:18 NAME 'olcIdleTimeout' SYNTAX OMsInteger SI NGLE-VALUE ) olcAttributeTypes: ( OLcfgGlAt:19 NAME 'olcInclude' SUP labeledURI ) olcAttributeTypes: ( OLcfgGlAt:20 NAME 'olcIndexSubstrIfMinLen' SYNTAX OMsIn teger SINGLE-VALUE ) olcAttributeTypes: ( OLcfgGlAt:21 NAME 'olcIndexSubstrIfMaxLen' SYNTAX OMsIn teger SINGLE-VALUE ) olcAttributeTypes: ( OLcfgGlAt:22 NAME 'olcIndexSubstrAnyLen' SYNTAX OMsInte ger SINGLE-VALUE ) olcAttributeTypes: ( OLcfgGlAt:23 NAME 'olcIndexSubstrAnyStep' SYNTAX OMsInt eger SINGLE-VALUE ) olcAttributeTypes: ( OLcfgGlAt:84 NAME 'olcIndexIntLen' SYNTAX OMsInteger SI NGLE-VALUE ) olcAttributeTypes: ( OLcfgDbAt:0.4 NAME 'olcLastMod' SYNTAX OMsBoolean SINGL E-VALUE ) olcAttributeTypes: ( OLcfgGlAt:85 NAME 'olcLdapSyntaxes' DESC 'OpenLDAP ldap Syntax' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX OM sDirectoryString X-ORDERED 'VALUES' ) olcAttributeTypes: ( OLcfgDbAt:0.5 NAME 'olcLimits' EQUALITY caseIgnoreMatch SYNTAX OMsDirectoryString X-ORDERED 'VALUES' ) olcAttributeTypes: ( OLcfgGlAt:93 NAME 'olcListenerThreads' SYNTAX OMsIntege r SINGLE-VALUE ) olcAttributeTypes: ( OLcfgGlAt:26 NAME 'olcLocalSSF' SYNTAX OMsInteger SINGL E-VALUE ) olcAttributeTypes: ( OLcfgGlAt:27 NAME 'olcLogFile' SYNTAX OMsDirectoryStrin g SINGLE-VALUE ) olcAttributeTypes: ( OLcfgGlAt:28 NAME 'olcLogLevel' EQUALITY caseIgnoreMatc h SYNTAX OMsDirectoryString ) olcAttributeTypes: ( OLcfgDbAt:0.6 NAME 'olcMaxDerefDepth' SYNTAX OMsInteger SINGLE-VALUE ) olcAttributeTypes: ( OLcfgDbAt:0.16 NAME 'olcMirrorMode' SYNTAX OMsBoolean S INGLE-VALUE ) olcAttributeTypes: ( OLcfgGlAt:30 NAME 'olcModuleLoad' EQUALITY caseIgnoreMa tch SYNTAX OMsDirectoryString X-ORDERED 'VALUES' ) olcAttributeTypes: ( OLcfgGlAt:31 NAME 'olcModulePath' SYNTAX OMsDirectorySt ring SINGLE-VALUE ) olcAttributeTypes: ( OLcfgDbAt:0.18 NAME 'olcMonitoring' SYNTAX OMsBoolean S INGLE-VALUE ) olcAttributeTypes: ( OLcfgGlAt:32 NAME 'olcObjectClasses' DESC 'OpenLDAP obj ect classes' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYNT AX OMsDirectoryString X-ORDERED 'VALUES' ) olcAttributeTypes: ( OLcfgGlAt:33 NAME 'olcObjectIdentifier' EQUALITY caseIg noreMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX OMsDirectoryString X-ORDE RED 'VALUES' ) olcAttributeTypes: ( OLcfgGlAt:34 NAME 'olcOverlay' SUP olcDatabase SINGLE-V ALUE X-ORDERED 'SIBLINGS' ) olcAttributeTypes: ( OLcfgGlAt:35 NAME 'olcPasswordCryptSaltFormat' SYNTAX O MsDirectoryString SINGLE-VALUE ) olcAttributeTypes: ( OLcfgGlAt:36 NAME 'olcPasswordHash' EQUALITY caseIgnore Match SYNTAX OMsDirectoryString ) olcAttributeTypes: ( OLcfgGlAt:37 NAME 'olcPidFile' SYNTAX OMsDirectoryStrin g SINGLE-VALUE ) olcAttributeTypes: ( OLcfgGlAt:38 NAME 'olcPlugin' EQUALITY caseIgnoreMatch SYNTAX OMsDirectoryString ) olcAttributeTypes: ( OLcfgGlAt:39 NAME 'olcPluginLogFile' SYNTAX OMsDirector yString SINGLE-VALUE ) olcAttributeTypes: ( OLcfgGlAt:40 NAME 'olcReadOnly' SYNTAX OMsBoolean SINGL E-VALUE ) olcAttributeTypes: ( OLcfgGlAt:41 NAME 'olcReferral' SUP labeledURI SINGLE-V ALUE ) olcAttributeTypes: ( OLcfgDbAt:0.7 NAME 'olcReplica' SUP labeledURI EQUALITY caseIgnoreMatch X-ORDERED 'VALUES' ) olcAttributeTypes: ( OLcfgGlAt:43 NAME 'olcReplicaArgsFile' SYNTAX OMsDirect oryString SINGLE-VALUE ) olcAttributeTypes: ( OLcfgGlAt:44 NAME 'olcReplicaPidFile' SYNTAX OMsDirecto ryString SINGLE-VALUE ) olcAttributeTypes: ( OLcfgGlAt:45 NAME 'olcReplicationInterval' SYNTAX OMsIn teger SINGLE-VALUE ) olcAttributeTypes: ( OLcfgGlAt:46 NAME 'olcReplogFile' SYNTAX OMsDirectorySt ring SINGLE-VALUE ) olcAttributeTypes: ( OLcfgGlAt:47 NAME 'olcRequires' EQUALITY caseIgnoreMatc h SYNTAX OMsDirectoryString ) olcAttributeTypes: ( OLcfgGlAt:48 NAME 'olcRestrict' EQUALITY caseIgnoreMatc h SYNTAX OMsDirectoryString ) olcAttributeTypes: ( OLcfgGlAt:49 NAME 'olcReverseLookup' SYNTAX OMsBoolean SINGLE-VALUE ) olcAttributeTypes: ( OLcfgDbAt:0.8 NAME 'olcRootDN' EQUALITY distinguishedNa meMatch SYNTAX OMsDN SINGLE-VALUE ) olcAttributeTypes: ( OLcfgGlAt:51 NAME 'olcRootDSE' EQUALITY caseIgnoreMatch SYNTAX OMsDirectoryString ) olcAttributeTypes: ( OLcfgDbAt:0.9 NAME 'olcRootPW' SYNTAX OMsDirectoryStrin g SINGLE-VALUE ) olcAttributeTypes: ( OLcfgGlAt:89 NAME 'olcSaslAuxprops' SYNTAX OMsDirectory String SINGLE-VALUE ) olcAttributeTypes: ( OLcfgGlAt:53 NAME 'olcSaslHost' SYNTAX OMsDirectoryStri ng SINGLE-VALUE ) olcAttributeTypes: ( OLcfgGlAt:54 NAME 'olcSaslRealm' SYNTAX OMsDirectoryStr ing SINGLE-VALUE ) olcAttributeTypes: ( OLcfgGlAt:56 NAME 'olcSaslSecProps' SYNTAX OMsDirectory String SINGLE-VALUE ) olcAttributeTypes: ( OLcfgGlAt:58 NAME 'olcSchemaDN' EQUALITY distinguishedN ameMatch SYNTAX OMsDN SINGLE-VALUE ) olcAttributeTypes: ( OLcfgGlAt:59 NAME 'olcSecurity' EQUALITY caseIgnoreMatc h SYNTAX OMsDirectoryString ) olcAttributeTypes: ( OLcfgGlAt:81 NAME 'olcServerID' EQUALITY caseIgnoreMatc h SYNTAX OMsDirectoryString ) olcAttributeTypes: ( OLcfgGlAt:60 NAME 'olcSizeLimit' SYNTAX OMsDirectoryStr ing SINGLE-VALUE ) olcAttributeTypes: ( OLcfgGlAt:61 NAME 'olcSockbufMaxIncoming' SYNTAX OMsInt eger SINGLE-VALUE ) olcAttributeTypes: ( OLcfgGlAt:62 NAME 'olcSockbufMaxIncomingAuth' SYNTAX OM sInteger SINGLE-VALUE ) olcAttributeTypes: ( OLcfgGlAt:83 NAME 'olcSortVals' DESC 'Attributes whose values will always be sorted' EQUALITY caseIgnoreMatch SYNTAX OMsDirectoryS tring ) olcAttributeTypes: ( OLcfgDbAt:0.15 NAME 'olcSubordinate' SYNTAX OMsDirector yString SINGLE-VALUE ) olcAttributeTypes: ( OLcfgDbAt:0.10 NAME 'olcSuffix' EQUALITY distinguishedN ameMatch SYNTAX OMsDN ) olcAttributeTypes: ( OLcfgDbAt:0.19 NAME 'olcSyncUseSubentry' DESC 'Store sy nc context in a subentry' SYNTAX OMsBoolean SINGLE-VALUE ) olcAttributeTypes: ( OLcfgDbAt:0.11 NAME 'olcSyncrepl' EQUALITY caseIgnoreMa tch SYNTAX OMsDirectoryString X-ORDERED 'VALUES' ) olcAttributeTypes: ( OLcfgGlAt:90 NAME 'olcTCPBuffer' DESC 'Custom TCP buffe r size' SYNTAX OMsDirectoryString ) olcAttributeTypes: ( OLcfgGlAt:66 NAME 'olcThreads' SYNTAX OMsInteger SINGLE -VALUE ) olcAttributeTypes: ( OLcfgGlAt:67 NAME 'olcTimeLimit' SYNTAX OMsDirectoryStr ing ) olcAttributeTypes: ( OLcfgGlAt:68 NAME 'olcTLSCACertificateFile' SYNTAX OMsD irectoryString SINGLE-VALUE ) olcAttributeTypes: ( OLcfgGlAt:69 NAME 'olcTLSCACertificatePath' SYNTAX OMsD irectoryString SINGLE-VALUE ) olcAttributeTypes: ( OLcfgGlAt:70 NAME 'olcTLSCertificateFile' SYNTAX OMsDir ectoryString SINGLE-VALUE ) olcAttributeTypes: ( OLcfgGlAt:71 NAME 'olcTLSCertificateKeyFile' SYNTAX OMs DirectoryString SINGLE-VALUE ) olcAttributeTypes: ( OLcfgGlAt:72 NAME 'olcTLSCipherSuite' SYNTAX OMsDirecto ryString SINGLE-VALUE ) olcAttributeTypes: ( OLcfgGlAt:73 NAME 'olcTLSCRLCheck' SYNTAX OMsDirectoryS tring SINGLE-VALUE ) olcAttributeTypes: ( OLcfgGlAt:82 NAME 'olcTLSCRLFile' SYNTAX OMsDirectorySt ring SINGLE-VALUE ) olcAttributeTypes: ( OLcfgGlAt:74 NAME 'olcTLSRandFile' SYNTAX OMsDirectoryS tring SINGLE-VALUE ) olcAttributeTypes: ( OLcfgGlAt:75 NAME 'olcTLSVerifyClient' SYNTAX OMsDirect oryString SINGLE-VALUE ) olcAttributeTypes: ( OLcfgGlAt:77 NAME 'olcTLSDHParamFile' SYNTAX OMsDirecto ryString SINGLE-VALUE ) olcAttributeTypes: ( OLcfgGlAt:87 NAME 'olcTLSProtocolMin' SYNTAX OMsDirecto ryString SINGLE-VALUE ) olcAttributeTypes: ( OLcfgGlAt:80 NAME 'olcToolThreads' SYNTAX OMsInteger SI NGLE-VALUE ) olcAttributeTypes: ( OLcfgDbAt:0.12 NAME 'olcUpdateDN' SYNTAX OMsDN SINGLE-V ALUE ) olcAttributeTypes: ( OLcfgDbAt:0.13 NAME 'olcUpdateRef' SUP labeledURI EQUAL ITY caseIgnoreMatch ) olcAttributeTypes: ( OLcfgGlAt:88 NAME 'olcWriteTimeout' SYNTAX OMsInteger S INGLE-VALUE ) olcAttributeTypes: ( OLcfgDbAt:0.1 NAME 'olcDbDirectory' DESC 'Directory for database content' EQUALITY caseIgnoreMatch SYNTAX OMsDirectoryString SINGL E-VALUE ) olcAttributeTypes: ( 1.3.6.1.4.1.4203.666.1.5 NAME 'OpenLDAPaci' DESC 'OpenL DAP access control information (experimental)' EQUALITY OpenLDAPaciMatch SY NTAX 1.3.6.1.4.1.4203.666.2.1 USAGE directoryOperation ) olcAttributeTypes: ( OLcfgDbAt:1.11 NAME 'olcDbCacheFree' DESC 'Number of ex tra entries to free when max is reached' SYNTAX OMsInteger SINGLE-VALUE ) olcAttributeTypes: ( OLcfgDbAt:1.1 NAME 'olcDbCacheSize' DESC 'Entry cache s ize in entries' SYNTAX OMsInteger SINGLE-VALUE ) olcAttributeTypes: ( OLcfgDbAt:1.2 NAME 'olcDbCheckpoint' DESC 'Database che ckpoint interval in kbytes and minutes' SYNTAX OMsDirectoryString SINGLE-VA LUE ) olcAttributeTypes: ( OLcfgDbAt:1.16 NAME 'olcDbChecksum' DESC 'Enable databa se checksum validation' SYNTAX OMsBoolean SINGLE-VALUE ) olcAttributeTypes: ( OLcfgDbAt:1.13 NAME 'olcDbCryptFile' DESC 'Pathname of file containing the DB encryption key' SYNTAX OMsDirectoryString SINGLE-VAL UE ) olcAttributeTypes: ( OLcfgDbAt:1.14 NAME 'olcDbCryptKey' DESC 'DB encryption key' SYNTAX OMsOctetString SINGLE-VALUE ) olcAttributeTypes: ( OLcfgDbAt:1.3 NAME 'olcDbConfig' DESC 'BerkeleyDB DB_CO NFIG configuration directives' SYNTAX OMsIA5String X-ORDERED 'VALUES' ) olcAttributeTypes: ( OLcfgDbAt:1.4 NAME 'olcDbNoSync' DESC 'Disable synchron ous database writes' SYNTAX OMsBoolean SINGLE-VALUE ) olcAttributeTypes: ( OLcfgDbAt:1.15 NAME 'olcDbPageSize' DESC 'Page size of specified DB, in Kbytes' EQUALITY caseExactMatch SYNTAX OMsDirectoryString ) olcAttributeTypes: ( OLcfgDbAt:1.5 NAME 'olcDbDirtyRead' DESC 'Allow reads o f uncommitted data' SYNTAX OMsBoolean SINGLE-VALUE ) olcAttributeTypes: ( OLcfgDbAt:1.12 NAME 'olcDbDNcacheSize' DESC 'DN cache s ize' SYNTAX OMsInteger SINGLE-VALUE ) olcAttributeTypes: ( OLcfgDbAt:1.6 NAME 'olcDbIDLcacheSize' DESC 'IDL cache size in IDLs' SYNTAX OMsInteger SINGLE-VALUE ) olcAttributeTypes: ( OLcfgDbAt:0.2 NAME 'olcDbIndex' DESC 'Attribute index p arameters' EQUALITY caseIgnoreMatch SYNTAX OMsDirectoryString ) olcAttributeTypes: ( OLcfgDbAt:1.7 NAME 'olcDbLinearIndex' DESC 'Index attri butes one at a time' SYNTAX OMsBoolean SINGLE-VALUE ) olcAttributeTypes: ( OLcfgDbAt:1.8 NAME 'olcDbLockDetect' DESC 'Deadlock det ection algorithm' SYNTAX OMsDirectoryString SINGLE-VALUE ) olcAttributeTypes: ( OLcfgDbAt:0.3 NAME 'olcDbMode' DESC 'Unix permissions o f database files' SYNTAX OMsDirectoryString SINGLE-VALUE ) olcAttributeTypes: ( OLcfgDbAt:1.9 NAME 'olcDbSearchStack' DESC 'Depth of se arch stack in IDLs' SYNTAX OMsInteger SINGLE-VALUE ) olcAttributeTypes: ( OLcfgDbAt:1.10 NAME 'olcDbShmKey' DESC 'Key for shared memory region' SYNTAX OMsInteger SINGLE-VALUE ) olcObjectClasses: ( 2.5.6.0 NAME 'top' DESC 'top of the superclass chain' AB STRACT MUST objectClass ) olcObjectClasses: ( 1.3.6.1.4.1.1466.101.120.111 NAME 'extensibleObject' DES C 'RFC4512: extensible object' SUP top AUXILIARY ) olcObjectClasses: ( 2.5.6.1 NAME 'alias' DESC 'RFC4512: an alias' SUP top ST RUCTURAL MUST aliasedObjectName ) olcObjectClasses: ( 2.16.840.1.113730.3.2.6 NAME 'referral' DESC 'namedref: named subordinate referral' SUP top STRUCTURAL MUST ref ) olcObjectClasses: ( 1.3.6.1.4.1.4203.1.4.1 NAME ( 'OpenLDAProotDSE' 'LDAProo tDSE' ) DESC 'OpenLDAP Root DSE object' SUP top STRUCTURAL MAY cn ) olcObjectClasses: ( 2.5.17.0 NAME 'subentry' DESC 'RFC3672: subentry' SUP to p STRUCTURAL MUST ( cn $ subtreeSpecification ) ) olcObjectClasses: ( 2.5.20.1 NAME 'subschema' DESC 'RFC4512: controlling sub schema (sub)entry' AUXILIARY MAY ( dITStructureRules $ nameForms $ dITConte ntRules $ objectClasses $ attributeTypes $ matchingRules $ matchingRuleUse ) ) olcObjectClasses: ( 2.5.17.2 NAME 'collectiveAttributeSubentry' DESC 'RFC367 1: collective attribute subentry' AUXILIARY ) olcObjectClasses: ( 1.3.6.1.4.1.1466.101.119.2 NAME 'dynamicObject' DESC 'RF C2589: Dynamic Object' SUP top AUXILIARY ) olcObjectClasses: ( 1.3.6.1.4.1.4203.666.3.4 NAME 'glue' DESC 'Glue Entry' S UP top STRUCTURAL ) olcObjectClasses: ( 1.3.6.1.4.1.4203.666.3.5 NAME 'syncConsumerSubentry' DES C 'Persistent Info for SyncRepl Consumer' AUXILIARY MAY syncreplCookie ) olcObjectClasses: ( 1.3.6.1.4.1.4203.666.3.6 NAME 'syncProviderSubentry' DES C 'Persistent Info for SyncRepl Producer' AUXILIARY MAY contextCSN ) olcObjectClasses: ( OLcfgGlOc:0 NAME 'olcConfig' DESC 'OpenLDAP configuratio n object' SUP top ABSTRACT ) olcObjectClasses: ( OLcfgGlOc:1 NAME 'olcGlobal' DESC 'OpenLDAP Global confi guration options' SUP olcConfig STRUCTURAL MAY ( cn $ olcConfigFile $ olcCo nfigDir $ olcAllows $ olcArgsFile $ olcAttributeOptions $ olcAuthIDRewrite $ olcAuthzPolicy $ olcAuthzRegexp $ olcConcurrency $ olcConnMaxPending $ ol cConnMaxPendingAuth $ olcDisallows $ olcGentleHUP $ olcIdleTimeout $ olcInd exSubstrIfMaxLen $ olcIndexSubstrIfMinLen $ olcIndexSubstrAnyLen $ olcIndex SubstrAnyStep $ olcIndexIntLen $ olcListenerThreads $ olcLocalSSF $ olcLogF ile $ olcLogLevel $ olcPasswordCryptSaltFormat $ olcPasswordHash $ olcPidFi le $ olcPluginLogFile $ olcReadOnly $ olcReferral $ olcReplogFile $ olcRequ ires $ olcRestrict $ olcReverseLookup $ olcRootDSE $ olcSaslAuxprops $ olcS aslHost $ olcSaslRealm $ olcSaslSecProps $ olcSecurity $ olcServerID $ olcS izeLimit $ olcSockbufMaxIncoming $ olcSockbufMaxIncomingAuth $ olcTCPBuffer $ olcThreads $ olcTimeLimit $ olcTLSCACertificateFile $ olcTLSCACertificat ePath $ olcTLSCertificateFile $ olcTLSCertificateKeyFile $ olcTLSCipherSuit e $ olcTLSCRLCheck $ olcTLSRandFile $ olcTLSVerifyClient $ olcTLSDHParamFil e $ olcTLSCRLFile $ olcTLSProtocolMin $ olcToolThreads $ olcWriteTimeout $ olcObjectIdentifier $ olcAttributeTypes $ olcObjectClasses $ olcDitContentR ules $ olcLdapSyntaxes ) ) olcObjectClasses: ( OLcfgGlOc:2 NAME 'olcSchemaConfig' DESC 'OpenLDAP schema object' SUP olcConfig STRUCTURAL MAY ( cn $ olcObjectIdentifier $ olcLdapS yntaxes $ olcAttributeTypes $ olcObjectClasses $ olcDitContentRules ) ) olcObjectClasses: ( OLcfgGlOc:3 NAME 'olcBackendConfig' DESC 'OpenLDAP Backe nd-specific options' SUP olcConfig STRUCTURAL MUST olcBackend ) olcObjectClasses: ( OLcfgGlOc:4 NAME 'olcDatabaseConfig' DESC 'OpenLDAP Data base-specific options' SUP olcConfig STRUCTURAL MUST olcDatabase MAY ( olcH idden $ olcSuffix $ olcSubordinate $ olcAccess $ olcAddContentAcl $ olcLast Mod $ olcLimits $ olcMaxDerefDepth $ olcPlugin $ olcReadOnly $ olcReplica $ olcReplicaArgsFile $ olcReplicaPidFile $ olcReplicationInterval $ olcReplo gFile $ olcRequires $ olcRestrict $ olcRootDN $ olcRootPW $ olcSchemaDN $ o lcSecurity $ olcSizeLimit $ olcSyncUseSubentry $ olcSyncrepl $ olcTimeLimit $ olcUpdateDN $ olcUpdateRef $ olcMirrorMode $ olcMonitoring $ olcExtraAtt rs ) ) olcObjectClasses: ( OLcfgGlOc:5 NAME 'olcOverlayConfig' DESC 'OpenLDAP Overl ay-specific options' SUP olcConfig STRUCTURAL MUST olcOverlay ) olcObjectClasses: ( OLcfgGlOc:6 NAME 'olcIncludeFile' DESC 'OpenLDAP configu ration include file' SUP olcConfig STRUCTURAL MUST olcInclude MAY ( cn $ ol cRootDSE ) ) olcObjectClasses: ( OLcfgGlOc:7 NAME 'olcFrontendConfig' DESC 'OpenLDAP fron tend configuration' AUXILIARY MAY ( olcDefaultSearchBase $ olcPasswordHash $ olcSortVals ) ) olcObjectClasses: ( OLcfgGlOc:8 NAME 'olcModuleList' DESC 'OpenLDAP dynamic module info' SUP olcConfig STRUCTURAL MAY ( cn $ olcModulePath $ olcModuleL oad ) ) olcObjectClasses: ( OLcfgDbOc:2.1 NAME 'olcLdifConfig' DESC 'LDIF backend co nfiguration' SUP olcDatabaseConfig STRUCTURAL MUST olcDbDirectory ) olcObjectClasses: ( OLcfgDbOc:1.2 NAME 'olcHdbConfig' DESC 'HDB backend conf iguration' SUP olcDatabaseConfig STRUCTURAL MUST olcDbDirectory MAY ( olcDb CacheSize $ olcDbCheckpoint $ olcDbChecksum $ olcDbConfig $ olcDbCryptFile $ olcDbCryptKey $ olcDbNoSync $ olcDbDirtyRead $ olcDbIDLcacheSize $ olcDbI ndex $ olcDbLinearIndex $ olcDbLockDetect $ olcDbMode $ olcDbSearchStack $ olcDbShmKey $ olcDbCacheFree $ olcDbDNcacheSize $ olcDbPageSize ) ) structuralObjectClass: olcSchemaConfig entryUUID: 12129616-e5e6-103a-973e-d731be523aab creatorsName: cn=config createTimestamp: 20210108101443Z entryCSN: 20210108101443.265809Z#000000#000#000000 modifiersName: cn=config modifyTimestamp: 20210108101443Z cn=schema/000077500000000000000000000000001421664411400320065ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/data/openldap_2_389/4539/slapd.d/cn=configcn={0}core.ldif000066400000000000000000000362721421664411400346760ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/data/openldap_2_389/4539/slapd.d/cn=config/cn=schema# AUTO-GENERATED FILE - DO NOT EDIT!! Use ldapmodify. # CRC32 db8103c5 dn: cn={0}core objectClass: olcSchemaConfig cn: {0}core olcAttributeTypes: {0}( 2.5.4.2 NAME 'knowledgeInformation' DESC 'RFC2256: k nowledge information' EQUALITY caseIgnoreMatch SYNTAX 1.3.6.1.4.1.1466.115. 121.1.15{32768} ) olcAttributeTypes: {1}( 2.5.4.4 NAME ( 'sn' 'surname' ) DESC 'RFC2256: last (family) name(s) for which the entity is known by' SUP name ) olcAttributeTypes: {2}( 2.5.4.5 NAME 'serialNumber' DESC 'RFC2256: serial nu mber of the entity' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMat ch SYNTAX 1.3.6.1.4.1.1466.115.121.1.44{64} ) olcAttributeTypes: {3}( 2.5.4.6 NAME ( 'c' 'countryName' ) DESC 'RFC4519: tw o-letter ISO-3166 country code' SUP name SYNTAX 1.3.6.1.4.1.1466.115.121.1. 11 SINGLE-VALUE ) olcAttributeTypes: {4}( 2.5.4.7 NAME ( 'l' 'localityName' ) DESC 'RFC2256: l ocality which this object resides in' SUP name ) olcAttributeTypes: {5}( 2.5.4.8 NAME ( 'st' 'stateOrProvinceName' ) DESC 'RF C2256: state or province which this object resides in' SUP name ) olcAttributeTypes: {6}( 2.5.4.9 NAME ( 'street' 'streetAddress' ) DESC 'RFC2 256: street address of this object' EQUALITY caseIgnoreMatch SUBSTR caseIgn oreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{128} ) olcAttributeTypes: {7}( 2.5.4.10 NAME ( 'o' 'organizationName' ) DESC 'RFC22 56: organization this object belongs to' SUP name ) olcAttributeTypes: {8}( 2.5.4.11 NAME ( 'ou' 'organizationalUnitName' ) DESC 'RFC2256: organizational unit this object belongs to' SUP name ) olcAttributeTypes: {9}( 2.5.4.12 NAME 'title' DESC 'RFC2256: title associate d with the entity' SUP name ) olcAttributeTypes: {10}( 2.5.4.14 NAME 'searchGuide' DESC 'RFC2256: search g uide, deprecated by enhancedSearchGuide' SYNTAX 1.3.6.1.4.1.1466.115.121.1. 25 ) olcAttributeTypes: {11}( 2.5.4.15 NAME 'businessCategory' DESC 'RFC2256: bus iness category' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch S YNTAX 1.3.6.1.4.1.1466.115.121.1.15{128} ) olcAttributeTypes: {12}( 2.5.4.16 NAME 'postalAddress' DESC 'RFC2256: postal address' EQUALITY caseIgnoreListMatch SUBSTR caseIgnoreListSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.41 ) olcAttributeTypes: {13}( 2.5.4.17 NAME 'postalCode' DESC 'RFC2256: postal co de' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6. 1.4.1.1466.115.121.1.15{40} ) olcAttributeTypes: {14}( 2.5.4.18 NAME 'postOfficeBox' DESC 'RFC2256: Post O ffice Box' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{40} ) olcAttributeTypes: {15}( 2.5.4.19 NAME 'physicalDeliveryOfficeName' DESC 'RF C2256: Physical Delivery Office Name' EQUALITY caseIgnoreMatch SUBSTR caseI gnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{128} ) olcAttributeTypes: {16}( 2.5.4.20 NAME 'telephoneNumber' DESC 'RFC2256: Tele phone Number' EQUALITY telephoneNumberMatch SUBSTR telephoneNumberSubstring sMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.50{32} ) olcAttributeTypes: {17}( 2.5.4.21 NAME 'telexNumber' DESC 'RFC2256: Telex Nu mber' SYNTAX 1.3.6.1.4.1.1466.115.121.1.52 ) olcAttributeTypes: {18}( 2.5.4.22 NAME 'teletexTerminalIdentifier' DESC 'RFC 2256: Teletex Terminal Identifier' SYNTAX 1.3.6.1.4.1.1466.115.121.1.51 ) olcAttributeTypes: {19}( 2.5.4.23 NAME ( 'facsimileTelephoneNumber' 'fax' ) DESC 'RFC2256: Facsimile (Fax) Telephone Number' SYNTAX 1.3.6.1.4.1.1466.11 5.121.1.22 ) olcAttributeTypes: {20}( 2.5.4.24 NAME 'x121Address' DESC 'RFC2256: X.121 Ad dress' EQUALITY numericStringMatch SUBSTR numericStringSubstringsMatch SYNT AX 1.3.6.1.4.1.1466.115.121.1.36{15} ) olcAttributeTypes: {21}( 2.5.4.25 NAME 'internationaliSDNNumber' DESC 'RFC22 56: international ISDN number' EQUALITY numericStringMatch SUBSTR numericSt ringSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.36{16} ) olcAttributeTypes: {22}( 2.5.4.26 NAME 'registeredAddress' DESC 'RFC2256: re gistered postal address' SUP postalAddress SYNTAX 1.3.6.1.4.1.1466.115.121. 1.41 ) olcAttributeTypes: {23}( 2.5.4.27 NAME 'destinationIndicator' DESC 'RFC2256: destination indicator' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstring sMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.44{128} ) olcAttributeTypes: {24}( 2.5.4.28 NAME 'preferredDeliveryMethod' DESC 'RFC22 56: preferred delivery method' SYNTAX 1.3.6.1.4.1.1466.115.121.1.14 SINGLE- VALUE ) olcAttributeTypes: {25}( 2.5.4.29 NAME 'presentationAddress' DESC 'RFC2256: presentation address' EQUALITY presentationAddressMatch SYNTAX 1.3.6.1.4.1. 1466.115.121.1.43 SINGLE-VALUE ) olcAttributeTypes: {26}( 2.5.4.30 NAME 'supportedApplicationContext' DESC 'R FC2256: supported application context' EQUALITY objectIdentifierMatch SYNTA X 1.3.6.1.4.1.1466.115.121.1.38 ) olcAttributeTypes: {27}( 2.5.4.31 NAME 'member' DESC 'RFC2256: member of a g roup' SUP distinguishedName ) olcAttributeTypes: {28}( 2.5.4.32 NAME 'owner' DESC 'RFC2256: owner (of the object)' SUP distinguishedName ) olcAttributeTypes: {29}( 2.5.4.33 NAME 'roleOccupant' DESC 'RFC2256: occupan t of role' SUP distinguishedName ) olcAttributeTypes: {30}( 2.5.4.36 NAME 'userCertificate' DESC 'RFC2256: X.50 9 user certificate, use ;binary' EQUALITY certificateExactMatch SYNTAX 1.3. 6.1.4.1.1466.115.121.1.8 ) olcAttributeTypes: {31}( 2.5.4.37 NAME 'cACertificate' DESC 'RFC2256: X.509 CA certificate, use ;binary' EQUALITY certificateExactMatch SYNTAX 1.3.6.1. 4.1.1466.115.121.1.8 ) olcAttributeTypes: {32}( 2.5.4.38 NAME 'authorityRevocationList' DESC 'RFC22 56: X.509 authority revocation list, use ;binary' SYNTAX 1.3.6.1.4.1.1466.1 15.121.1.9 ) olcAttributeTypes: {33}( 2.5.4.39 NAME 'certificateRevocationList' DESC 'RFC 2256: X.509 certificate revocation list, use ;binary' SYNTAX 1.3.6.1.4.1.14 66.115.121.1.9 ) olcAttributeTypes: {34}( 2.5.4.40 NAME 'crossCertificatePair' DESC 'RFC2256: X.509 cross certificate pair, use ;binary' SYNTAX 1.3.6.1.4.1.1466.115.121 .1.10 ) olcAttributeTypes: {35}( 2.5.4.42 NAME ( 'givenName' 'gn' ) DESC 'RFC2256: f irst name(s) for which the entity is known by' SUP name ) olcAttributeTypes: {36}( 2.5.4.43 NAME 'initials' DESC 'RFC2256: initials of some or all of names, but not the surname(s).' SUP name ) olcAttributeTypes: {37}( 2.5.4.44 NAME 'generationQualifier' DESC 'RFC2256: name qualifier indicating a generation' SUP name ) olcAttributeTypes: {38}( 2.5.4.45 NAME 'x500UniqueIdentifier' DESC 'RFC2256: X.500 unique identifier' EQUALITY bitStringMatch SYNTAX 1.3.6.1.4.1.1466.1 15.121.1.6 ) olcAttributeTypes: {39}( 2.5.4.46 NAME 'dnQualifier' DESC 'RFC2256: DN quali fier' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR case IgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.44 ) olcAttributeTypes: {40}( 2.5.4.47 NAME 'enhancedSearchGuide' DESC 'RFC2256: enhanced search guide' SYNTAX 1.3.6.1.4.1.1466.115.121.1.21 ) olcAttributeTypes: {41}( 2.5.4.48 NAME 'protocolInformation' DESC 'RFC2256: protocol information' EQUALITY protocolInformationMatch SYNTAX 1.3.6.1.4.1. 1466.115.121.1.42 ) olcAttributeTypes: {42}( 2.5.4.50 NAME 'uniqueMember' DESC 'RFC2256: unique member of a group' EQUALITY uniqueMemberMatch SYNTAX 1.3.6.1.4.1.1466.115.1 21.1.34 ) olcAttributeTypes: {43}( 2.5.4.51 NAME 'houseIdentifier' DESC 'RFC2256: hous e identifier' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYN TAX 1.3.6.1.4.1.1466.115.121.1.15{32768} ) olcAttributeTypes: {44}( 2.5.4.52 NAME 'supportedAlgorithms' DESC 'RFC2256: supported algorithms' SYNTAX 1.3.6.1.4.1.1466.115.121.1.49 ) olcAttributeTypes: {45}( 2.5.4.53 NAME 'deltaRevocationList' DESC 'RFC2256: delta revocation list; use ;binary' SYNTAX 1.3.6.1.4.1.1466.115.121.1.9 ) olcAttributeTypes: {46}( 2.5.4.54 NAME 'dmdName' DESC 'RFC2256: name of DMD' SUP name ) olcAttributeTypes: {47}( 2.5.4.65 NAME 'pseudonym' DESC 'X.520(4th): pseudon ym for the object' SUP name ) olcAttributeTypes: {48}( 0.9.2342.19200300.100.1.3 NAME ( 'mail' 'rfc822Mail box' ) DESC 'RFC1274: RFC822 Mailbox' EQUALITY caseIgnoreIA5Match SUBSTR ca seIgnoreIA5SubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.26{256} ) olcAttributeTypes: {49}( 0.9.2342.19200300.100.1.25 NAME ( 'dc' 'domainCompo nent' ) DESC 'RFC1274/2247: domain component' EQUALITY caseIgnoreIA5Match S UBSTR caseIgnoreIA5SubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 SIN GLE-VALUE ) olcAttributeTypes: {50}( 0.9.2342.19200300.100.1.37 NAME 'associatedDomain' DESC 'RFC1274: domain associated with object' EQUALITY caseIgnoreIA5Match S UBSTR caseIgnoreIA5SubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 ) olcAttributeTypes: {51}( 1.2.840.113549.1.9.1 NAME ( 'email' 'emailAddress' 'pkcs9email' ) DESC 'RFC3280: legacy attribute for email addresses in DNs' EQUALITY caseIgnoreIA5Match SUBSTR caseIgnoreIA5SubstringsMatch SYNTAX 1.3. 6.1.4.1.1466.115.121.1.26{128} ) olcObjectClasses: {0}( 2.5.6.2 NAME 'country' DESC 'RFC2256: a country' SUP top STRUCTURAL MUST c MAY ( searchGuide $ description ) ) olcObjectClasses: {1}( 2.5.6.3 NAME 'locality' DESC 'RFC2256: a locality' SU P top STRUCTURAL MAY ( street $ seeAlso $ searchGuide $ st $ l $ descriptio n ) ) olcObjectClasses: {2}( 2.5.6.4 NAME 'organization' DESC 'RFC2256: an organiz ation' SUP top STRUCTURAL MUST o MAY ( userPassword $ searchGuide $ seeAlso $ businessCategory $ x121Address $ registeredAddress $ destinationIndicato r $ preferredDeliveryMethod $ telexNumber $ teletexTerminalIdentifier $ tel ephoneNumber $ internationaliSDNNumber $ facsimileTelephoneNumber $ street $ postOfficeBox $ postalCode $ postalAddress $ physicalDeliveryOfficeName $ st $ l $ description ) ) olcObjectClasses: {3}( 2.5.6.5 NAME 'organizationalUnit' DESC 'RFC2256: an o rganizational unit' SUP top STRUCTURAL MUST ou MAY ( userPassword $ searchG uide $ seeAlso $ businessCategory $ x121Address $ registeredAddress $ desti nationIndicator $ preferredDeliveryMethod $ telexNumber $ teletexTerminalId entifier $ telephoneNumber $ internationaliSDNNumber $ facsimileTelephoneNu mber $ street $ postOfficeBox $ postalCode $ postalAddress $ physicalDelive ryOfficeName $ st $ l $ description ) ) olcObjectClasses: {4}( 2.5.6.6 NAME 'person' DESC 'RFC2256: a person' SUP to p STRUCTURAL MUST ( sn $ cn ) MAY ( userPassword $ telephoneNumber $ seeAls o $ description ) ) olcObjectClasses: {5}( 2.5.6.7 NAME 'organizationalPerson' DESC 'RFC2256: an organizational person' SUP person STRUCTURAL MAY ( title $ x121Address $ r egisteredAddress $ destinationIndicator $ preferredDeliveryMethod $ telexNu mber $ teletexTerminalIdentifier $ telephoneNumber $ internationaliSDNNumbe r $ facsimileTelephoneNumber $ street $ postOfficeBox $ postalCode $ postal Address $ physicalDeliveryOfficeName $ ou $ st $ l ) ) olcObjectClasses: {6}( 2.5.6.8 NAME 'organizationalRole' DESC 'RFC2256: an o rganizational role' SUP top STRUCTURAL MUST cn MAY ( x121Address $ register edAddress $ destinationIndicator $ preferredDeliveryMethod $ telexNumber $ teletexTerminalIdentifier $ telephoneNumber $ internationaliSDNNumber $ fac simileTelephoneNumber $ seeAlso $ roleOccupant $ preferredDeliveryMethod $ street $ postOfficeBox $ postalCode $ postalAddress $ physicalDeliveryOffic eName $ ou $ st $ l $ description ) ) olcObjectClasses: {7}( 2.5.6.9 NAME 'groupOfNames' DESC 'RFC2256: a group of names (DNs)' SUP top STRUCTURAL MUST ( member $ cn ) MAY ( businessCategor y $ seeAlso $ owner $ ou $ o $ description ) ) olcObjectClasses: {8}( 2.5.6.10 NAME 'residentialPerson' DESC 'RFC2256: an r esidential person' SUP person STRUCTURAL MUST l MAY ( businessCategory $ x1 21Address $ registeredAddress $ destinationIndicator $ preferredDeliveryMet hod $ telexNumber $ teletexTerminalIdentifier $ telephoneNumber $ internati onaliSDNNumber $ facsimileTelephoneNumber $ preferredDeliveryMethod $ stree t $ postOfficeBox $ postalCode $ postalAddress $ physicalDeliveryOfficeName $ st $ l ) ) olcObjectClasses: {9}( 2.5.6.11 NAME 'applicationProcess' DESC 'RFC2256: an application process' SUP top STRUCTURAL MUST cn MAY ( seeAlso $ ou $ l $ de scription ) ) olcObjectClasses: {10}( 2.5.6.12 NAME 'applicationEntity' DESC 'RFC2256: an application entity' SUP top STRUCTURAL MUST ( presentationAddress $ cn ) MA Y ( supportedApplicationContext $ seeAlso $ ou $ o $ l $ description ) ) olcObjectClasses: {11}( 2.5.6.13 NAME 'dSA' DESC 'RFC2256: a directory syste m agent (a server)' SUP applicationEntity STRUCTURAL MAY knowledgeInformati on ) olcObjectClasses: {12}( 2.5.6.14 NAME 'device' DESC 'RFC2256: a device' SUP top STRUCTURAL MUST cn MAY ( serialNumber $ seeAlso $ owner $ ou $ o $ l $ description ) ) olcObjectClasses: {13}( 2.5.6.15 NAME 'strongAuthenticationUser' DESC 'RFC22 56: a strong authentication user' SUP top AUXILIARY MUST userCertificate ) olcObjectClasses: {14}( 2.5.6.16 NAME 'certificationAuthority' DESC 'RFC2256 : a certificate authority' SUP top AUXILIARY MUST ( authorityRevocationList $ certificateRevocationList $ cACertificate ) MAY crossCertificatePair ) olcObjectClasses: {15}( 2.5.6.17 NAME 'groupOfUniqueNames' DESC 'RFC2256: a group of unique names (DN and Unique Identifier)' SUP top STRUCTURAL MUST ( uniqueMember $ cn ) MAY ( businessCategory $ seeAlso $ owner $ ou $ o $ de scription ) ) olcObjectClasses: {16}( 2.5.6.18 NAME 'userSecurityInformation' DESC 'RFC225 6: a user security information' SUP top AUXILIARY MAY supportedAlgorithms ) olcObjectClasses: {17}( 2.5.6.16.2 NAME 'certificationAuthority-V2' SUP cert ificationAuthority AUXILIARY MAY deltaRevocationList ) olcObjectClasses: {18}( 2.5.6.19 NAME 'cRLDistributionPoint' SUP top STRUCTU RAL MUST cn MAY ( certificateRevocationList $ authorityRevocationList $ del taRevocationList ) ) olcObjectClasses: {19}( 2.5.6.20 NAME 'dmd' SUP top STRUCTURAL MUST dmdName MAY ( userPassword $ searchGuide $ seeAlso $ businessCategory $ x121Address $ registeredAddress $ destinationIndicator $ preferredDeliveryMethod $ tel exNumber $ teletexTerminalIdentifier $ telephoneNumber $ internationaliSDNN umber $ facsimileTelephoneNumber $ street $ postOfficeBox $ postalCode $ po stalAddress $ physicalDeliveryOfficeName $ st $ l $ description ) ) olcObjectClasses: {20}( 2.5.6.21 NAME 'pkiUser' DESC 'RFC2587: a PKI user' S UP top AUXILIARY MAY userCertificate ) olcObjectClasses: {21}( 2.5.6.22 NAME 'pkiCA' DESC 'RFC2587: PKI certificate authority' SUP top AUXILIARY MAY ( authorityRevocationList $ certificateRe vocationList $ cACertificate $ crossCertificatePair ) ) olcObjectClasses: {22}( 2.5.6.23 NAME 'deltaCRL' DESC 'RFC2587: PKI user' SU P top AUXILIARY MAY deltaRevocationList ) olcObjectClasses: {23}( 1.3.6.1.4.1.250.3.15 NAME 'labeledURIObject' DESC 'R FC2079: object that contains the URI attribute type' SUP top AUXILIARY MAY labeledURI ) olcObjectClasses: {24}( 0.9.2342.19200300.100.4.19 NAME 'simpleSecurityObjec t' DESC 'RFC1274: simple security object' SUP top AUXILIARY MUST userPasswo rd ) olcObjectClasses: {25}( 1.3.6.1.4.1.1466.344 NAME 'dcObject' DESC 'RFC2247: domain component object' SUP top AUXILIARY MUST dc ) olcObjectClasses: {26}( 1.3.6.1.1.3.1 NAME 'uidObject' DESC 'RFC2377: uid ob ject' SUP top AUXILIARY MUST uid ) structuralObjectClass: olcSchemaConfig entryUUID: 1212a836-e5e6-103a-973f-d731be523aab creatorsName: cn=config createTimestamp: 20210108101443Z entryCSN: 20210108101443.265809Z#000000#000#000000 modifiersName: cn=config modifyTimestamp: 20210108101443Z cn={1}cosine.ldif000066400000000000000000000261431421664411400352230ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/data/openldap_2_389/4539/slapd.d/cn=config/cn=schema# AUTO-GENERATED FILE - DO NOT EDIT!! Use ldapmodify. # CRC32 aac8272d dn: cn={1}cosine objectClass: olcSchemaConfig cn: {1}cosine olcAttributeTypes: {0}( 0.9.2342.19200300.100.1.2 NAME 'textEncodedORAddress ' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1. 4.1.1466.115.121.1.15{256} ) olcAttributeTypes: {1}( 0.9.2342.19200300.100.1.4 NAME 'info' DESC 'RFC1274: general information' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsM atch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{2048} ) olcAttributeTypes: {2}( 0.9.2342.19200300.100.1.5 NAME ( 'drink' 'favouriteD rink' ) DESC 'RFC1274: favorite drink' EQUALITY caseIgnoreMatch SUBSTR case IgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{256} ) olcAttributeTypes: {3}( 0.9.2342.19200300.100.1.6 NAME 'roomNumber' DESC 'RF C1274: room number' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMat ch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{256} ) olcAttributeTypes: {4}( 0.9.2342.19200300.100.1.7 NAME 'photo' DESC 'RFC1274 : photo (G3 fax)' SYNTAX 1.3.6.1.4.1.1466.115.121.1.23{25000} ) olcAttributeTypes: {5}( 0.9.2342.19200300.100.1.8 NAME 'userClass' DESC 'RFC 1274: category of user' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstring sMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{256} ) olcAttributeTypes: {6}( 0.9.2342.19200300.100.1.9 NAME 'host' DESC 'RFC1274: host computer' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch S YNTAX 1.3.6.1.4.1.1466.115.121.1.15{256} ) olcAttributeTypes: {7}( 0.9.2342.19200300.100.1.10 NAME 'manager' DESC 'RFC1 274: DN of manager' EQUALITY distinguishedNameMatch SYNTAX 1.3.6.1.4.1.1466 .115.121.1.12 ) olcAttributeTypes: {8}( 0.9.2342.19200300.100.1.11 NAME 'documentIdentifier' DESC 'RFC1274: unique identifier of document' EQUALITY caseIgnoreMatch SUB STR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{256} ) olcAttributeTypes: {9}( 0.9.2342.19200300.100.1.12 NAME 'documentTitle' DESC 'RFC1274: title of document' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSub stringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{256} ) olcAttributeTypes: {10}( 0.9.2342.19200300.100.1.13 NAME 'documentVersion' D ESC 'RFC1274: version of document' EQUALITY caseIgnoreMatch SUBSTR caseIgno reSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{256} ) olcAttributeTypes: {11}( 0.9.2342.19200300.100.1.14 NAME 'documentAuthor' DE SC 'RFC1274: DN of author of document' EQUALITY distinguishedNameMatch SYNT AX 1.3.6.1.4.1.1466.115.121.1.12 ) olcAttributeTypes: {12}( 0.9.2342.19200300.100.1.15 NAME 'documentLocation' DESC 'RFC1274: location of document original' EQUALITY caseIgnoreMatch SUBS TR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{256} ) olcAttributeTypes: {13}( 0.9.2342.19200300.100.1.20 NAME ( 'homePhone' 'home TelephoneNumber' ) DESC 'RFC1274: home telephone number' EQUALITY telephone NumberMatch SUBSTR telephoneNumberSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.1 15.121.1.50 ) olcAttributeTypes: {14}( 0.9.2342.19200300.100.1.21 NAME 'secretary' DESC 'R FC1274: DN of secretary' EQUALITY distinguishedNameMatch SYNTAX 1.3.6.1.4.1 .1466.115.121.1.12 ) olcAttributeTypes: {15}( 0.9.2342.19200300.100.1.22 NAME 'otherMailbox' SYNT AX 1.3.6.1.4.1.1466.115.121.1.39 ) olcAttributeTypes: {16}( 0.9.2342.19200300.100.1.26 NAME 'aRecord' EQUALITY caseIgnoreIA5Match SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 ) olcAttributeTypes: {17}( 0.9.2342.19200300.100.1.27 NAME 'mDRecord' EQUALITY caseIgnoreIA5Match SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 ) olcAttributeTypes: {18}( 0.9.2342.19200300.100.1.28 NAME 'mXRecord' EQUALITY caseIgnoreIA5Match SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 ) olcAttributeTypes: {19}( 0.9.2342.19200300.100.1.29 NAME 'nSRecord' EQUALITY caseIgnoreIA5Match SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 ) olcAttributeTypes: {20}( 0.9.2342.19200300.100.1.30 NAME 'sOARecord' EQUALIT Y caseIgnoreIA5Match SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 ) olcAttributeTypes: {21}( 0.9.2342.19200300.100.1.31 NAME 'cNAMERecord' EQUAL ITY caseIgnoreIA5Match SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 ) olcAttributeTypes: {22}( 0.9.2342.19200300.100.1.38 NAME 'associatedName' DE SC 'RFC1274: DN of entry associated with domain' EQUALITY distinguishedName Match SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 ) olcAttributeTypes: {23}( 0.9.2342.19200300.100.1.39 NAME 'homePostalAddress' DESC 'RFC1274: home postal address' EQUALITY caseIgnoreListMatch SUBSTR ca seIgnoreListSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.41 ) olcAttributeTypes: {24}( 0.9.2342.19200300.100.1.40 NAME 'personalTitle' DES C 'RFC1274: personal title' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubst ringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{256} ) olcAttributeTypes: {25}( 0.9.2342.19200300.100.1.41 NAME ( 'mobile' 'mobileT elephoneNumber' ) DESC 'RFC1274: mobile telephone number' EQUALITY telephon eNumberMatch SUBSTR telephoneNumberSubstringsMatch SYNTAX 1.3.6.1.4.1.1466. 115.121.1.50 ) olcAttributeTypes: {26}( 0.9.2342.19200300.100.1.42 NAME ( 'pager' 'pagerTel ephoneNumber' ) DESC 'RFC1274: pager telephone number' EQUALITY telephoneNu mberMatch SUBSTR telephoneNumberSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115 .121.1.50 ) olcAttributeTypes: {27}( 0.9.2342.19200300.100.1.43 NAME ( 'co' 'friendlyCou ntryName' ) DESC 'RFC1274: friendly country name' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 ) olcAttributeTypes: {28}( 0.9.2342.19200300.100.1.44 NAME 'uniqueIdentifier' DESC 'RFC1274: unique identifer' EQUALITY caseIgnoreMatch SYNTAX 1.3.6.1.4. 1.1466.115.121.1.15{256} ) olcAttributeTypes: {29}( 0.9.2342.19200300.100.1.45 NAME 'organizationalStat us' DESC 'RFC1274: organizational status' EQUALITY caseIgnoreMatch SUBSTR c aseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{256} ) olcAttributeTypes: {30}( 0.9.2342.19200300.100.1.46 NAME 'janetMailbox' DESC 'RFC1274: Janet mailbox' EQUALITY caseIgnoreIA5Match SUBSTR caseIgnoreIA5S ubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.26{256} ) olcAttributeTypes: {31}( 0.9.2342.19200300.100.1.47 NAME 'mailPreferenceOpti on' DESC 'RFC1274: mail preference option' SYNTAX 1.3.6.1.4.1.1466.115.121. 1.27 ) olcAttributeTypes: {32}( 0.9.2342.19200300.100.1.48 NAME 'buildingName' DESC 'RFC1274: name of building' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubs tringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{256} ) olcAttributeTypes: {33}( 0.9.2342.19200300.100.1.49 NAME 'dSAQuality' DESC ' RFC1274: DSA Quality' SYNTAX 1.3.6.1.4.1.1466.115.121.1.19 SINGLE-VALUE ) olcAttributeTypes: {34}( 0.9.2342.19200300.100.1.50 NAME 'singleLevelQuality ' DESC 'RFC1274: Single Level Quality' SYNTAX 1.3.6.1.4.1.1466.115.121.1.13 SINGLE-VALUE ) olcAttributeTypes: {35}( 0.9.2342.19200300.100.1.51 NAME 'subtreeMinimumQual ity' DESC 'RFC1274: Subtree Mininum Quality' SYNTAX 1.3.6.1.4.1.1466.115.12 1.1.13 SINGLE-VALUE ) olcAttributeTypes: {36}( 0.9.2342.19200300.100.1.52 NAME 'subtreeMaximumQual ity' DESC 'RFC1274: Subtree Maximun Quality' SYNTAX 1.3.6.1.4.1.1466.115.12 1.1.13 SINGLE-VALUE ) olcAttributeTypes: {37}( 0.9.2342.19200300.100.1.53 NAME 'personalSignature' DESC 'RFC1274: Personal Signature (G3 fax)' SYNTAX 1.3.6.1.4.1.1466.115.12 1.1.23 ) olcAttributeTypes: {38}( 0.9.2342.19200300.100.1.54 NAME 'dITRedirect' DESC 'RFC1274: DIT Redirect' EQUALITY distinguishedNameMatch SYNTAX 1.3.6.1.4.1. 1466.115.121.1.12 ) olcAttributeTypes: {39}( 0.9.2342.19200300.100.1.55 NAME 'audio' DESC 'RFC12 74: audio (u-law)' SYNTAX 1.3.6.1.4.1.1466.115.121.1.4{25000} ) olcAttributeTypes: {40}( 0.9.2342.19200300.100.1.56 NAME 'documentPublisher' DESC 'RFC1274: publisher of document' EQUALITY caseIgnoreMatch SUBSTR case IgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 ) olcObjectClasses: {0}( 0.9.2342.19200300.100.4.4 NAME ( 'pilotPerson' 'newPi lotPerson' ) SUP person STRUCTURAL MAY ( userid $ textEncodedORAddress $ rf c822Mailbox $ favouriteDrink $ roomNumber $ userClass $ homeTelephoneNumber $ homePostalAddress $ secretary $ personalTitle $ preferredDeliveryMethod $ businessCategory $ janetMailbox $ otherMailbox $ mobileTelephoneNumber $ pagerTelephoneNumber $ organizationalStatus $ mailPreferenceOption $ person alSignature ) ) olcObjectClasses: {1}( 0.9.2342.19200300.100.4.5 NAME 'account' SUP top STRU CTURAL MUST userid MAY ( description $ seeAlso $ localityName $ organizatio nName $ organizationalUnitName $ host ) ) olcObjectClasses: {2}( 0.9.2342.19200300.100.4.6 NAME 'document' SUP top STR UCTURAL MUST documentIdentifier MAY ( commonName $ description $ seeAlso $ localityName $ organizationName $ organizationalUnitName $ documentTitle $ documentVersion $ documentAuthor $ documentLocation $ documentPublisher ) ) olcObjectClasses: {3}( 0.9.2342.19200300.100.4.7 NAME 'room' SUP top STRUCTU RAL MUST commonName MAY ( roomNumber $ description $ seeAlso $ telephoneNum ber ) ) olcObjectClasses: {4}( 0.9.2342.19200300.100.4.9 NAME 'documentSeries' SUP t op STRUCTURAL MUST commonName MAY ( description $ seeAlso $ telephonenumber $ localityName $ organizationName $ organizationalUnitName ) ) olcObjectClasses: {5}( 0.9.2342.19200300.100.4.13 NAME 'domain' SUP top STRU CTURAL MUST domainComponent MAY ( associatedName $ organizationName $ descr iption $ businessCategory $ seeAlso $ searchGuide $ userPassword $ locality Name $ stateOrProvinceName $ streetAddress $ physicalDeliveryOfficeName $ p ostalAddress $ postalCode $ postOfficeBox $ streetAddress $ facsimileTeleph oneNumber $ internationalISDNNumber $ telephoneNumber $ teletexTerminalIden tifier $ telexNumber $ preferredDeliveryMethod $ destinationIndicator $ reg isteredAddress $ x121Address ) ) olcObjectClasses: {6}( 0.9.2342.19200300.100.4.14 NAME 'RFC822localPart' SUP domain STRUCTURAL MAY ( commonName $ surname $ description $ seeAlso $ tel ephoneNumber $ physicalDeliveryOfficeName $ postalAddress $ postalCode $ po stOfficeBox $ streetAddress $ facsimileTelephoneNumber $ internationalISDNN umber $ telephoneNumber $ teletexTerminalIdentifier $ telexNumber $ preferr edDeliveryMethod $ destinationIndicator $ registeredAddress $ x121Address ) ) olcObjectClasses: {7}( 0.9.2342.19200300.100.4.15 NAME 'dNSDomain' SUP domai n STRUCTURAL MAY ( ARecord $ MDRecord $ MXRecord $ NSRecord $ SOARecord $ C NAMERecord ) ) olcObjectClasses: {8}( 0.9.2342.19200300.100.4.17 NAME 'domainRelatedObject' DESC 'RFC1274: an object related to an domain' SUP top AUXILIARY MUST asso ciatedDomain ) olcObjectClasses: {9}( 0.9.2342.19200300.100.4.18 NAME 'friendlyCountry' SUP country STRUCTURAL MUST friendlyCountryName ) olcObjectClasses: {10}( 0.9.2342.19200300.100.4.20 NAME 'pilotOrganization' SUP ( organization $ organizationalUnit ) STRUCTURAL MAY buildingName ) olcObjectClasses: {11}( 0.9.2342.19200300.100.4.21 NAME 'pilotDSA' SUP dsa S TRUCTURAL MAY dSAQuality ) olcObjectClasses: {12}( 0.9.2342.19200300.100.4.22 NAME 'qualityLabelledData ' SUP top AUXILIARY MUST dsaQuality MAY ( subtreeMinimumQuality $ subtreeMa ximumQuality ) ) structuralObjectClass: olcSchemaConfig entryUUID: 1212b38a-e5e6-103a-9740-d731be523aab creatorsName: cn=config createTimestamp: 20210108101443Z entryCSN: 20210108101443.265809Z#000000#000#000000 modifiersName: cn=config modifyTimestamp: 20210108101443Z cn={2}inetorgperson.ldif000066400000000000000000000054511421664411400366410ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/data/openldap_2_389/4539/slapd.d/cn=config/cn=schema# AUTO-GENERATED FILE - DO NOT EDIT!! Use ldapmodify. # CRC32 7fd3c455 dn: cn={2}inetorgperson objectClass: olcSchemaConfig cn: {2}inetorgperson olcAttributeTypes: {0}( 2.16.840.1.113730.3.1.1 NAME 'carLicense' DESC 'RFC2 798: vehicle license or registration plate' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 ) olcAttributeTypes: {1}( 2.16.840.1.113730.3.1.2 NAME 'departmentNumber' DESC 'RFC2798: identifies a department within an organization' EQUALITY caseIgn oreMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1 .15 ) olcAttributeTypes: {2}( 2.16.840.1.113730.3.1.241 NAME 'displayName' DESC 'R FC2798: preferred name to be used when displaying entries' EQUALITY caseIgn oreMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1 .15 SINGLE-VALUE ) olcAttributeTypes: {3}( 2.16.840.1.113730.3.1.3 NAME 'employeeNumber' DESC ' RFC2798: numerically identifies an employee within an organization' EQUALIT Y caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466. 115.121.1.15 SINGLE-VALUE ) olcAttributeTypes: {4}( 2.16.840.1.113730.3.1.4 NAME 'employeeType' DESC 'RF C2798: type of employment for a person' EQUALITY caseIgnoreMatch SUBSTR cas eIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 ) olcAttributeTypes: {5}( 0.9.2342.19200300.100.1.60 NAME 'jpegPhoto' DESC 'RF C2798: a JPEG image' SYNTAX 1.3.6.1.4.1.1466.115.121.1.28 ) olcAttributeTypes: {6}( 2.16.840.1.113730.3.1.39 NAME 'preferredLanguage' DE SC 'RFC2798: preferred written or spoken language for a person' EQUALITY ca seIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115. 121.1.15 SINGLE-VALUE ) olcAttributeTypes: {7}( 2.16.840.1.113730.3.1.40 NAME 'userSMIMECertificate' DESC 'RFC2798: PKCS#7 SignedData used to support S/MIME' SYNTAX 1.3.6.1.4. 1.1466.115.121.1.5 ) olcAttributeTypes: {8}( 2.16.840.1.113730.3.1.216 NAME 'userPKCS12' DESC 'RF C2798: personal identity information, a PKCS #12 PFX' SYNTAX 1.3.6.1.4.1.14 66.115.121.1.5 ) olcObjectClasses: {0}( 2.16.840.1.113730.3.2.2 NAME 'inetOrgPerson' DESC 'RF C2798: Internet Organizational Person' SUP organizationalPerson STRUCTURAL MAY ( audio $ businessCategory $ carLicense $ departmentNumber $ displayNam e $ employeeNumber $ employeeType $ givenName $ homePhone $ homePostalAddre ss $ initials $ jpegPhoto $ labeledURI $ mail $ manager $ mobile $ o $ page r $ photo $ roomNumber $ secretary $ uid $ userCertificate $ x500uniqueIden tifier $ preferredLanguage $ userSMIMECertificate $ userPKCS12 ) ) structuralObjectClass: olcSchemaConfig entryUUID: 1212b95c-e5e6-103a-9741-d731be523aab creatorsName: cn=config createTimestamp: 20210108101443Z entryCSN: 20210108101443.265809Z#000000#000#000000 modifiersName: cn=config modifyTimestamp: 20210108101443Z cn={3}rfc2307bis.ldif000066400000000000000000000226561421664411400355360ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/data/openldap_2_389/4539/slapd.d/cn=config/cn=schema# AUTO-GENERATED FILE - DO NOT EDIT!! Use ldapmodify. # CRC32 e0f5e515 dn: cn={3}rfc2307bis objectClass: olcSchemaConfig cn: {3}rfc2307bis olcAttributeTypes: {0}( 1.3.6.1.1.1.1.2 NAME 'gecos' DESC 'The GECOS field; the common name' EQUALITY caseIgnoreIA5Match SUBSTR caseIgnoreIA5Substrings Match SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 SINGLE-VALUE ) olcAttributeTypes: {1}( 1.3.6.1.1.1.1.3 NAME 'homeDirectory' DESC 'The absol ute path to the home directory' EQUALITY caseExactIA5Match SYNTAX 1.3.6.1.4 .1.1466.115.121.1.26 SINGLE-VALUE ) olcAttributeTypes: {2}( 1.3.6.1.1.1.1.4 NAME 'loginShell' DESC 'The path to the login shell' EQUALITY caseExactIA5Match SYNTAX 1.3.6.1.4.1.1466.115.121 .1.26 SINGLE-VALUE ) olcAttributeTypes: {3}( 1.3.6.1.1.1.1.5 NAME 'shadowLastChange' EQUALITY int egerMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) olcAttributeTypes: {4}( 1.3.6.1.1.1.1.6 NAME 'shadowMin' EQUALITY integerMat ch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) olcAttributeTypes: {5}( 1.3.6.1.1.1.1.7 NAME 'shadowMax' EQUALITY integerMat ch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) olcAttributeTypes: {6}( 1.3.6.1.1.1.1.8 NAME 'shadowWarning' EQUALITY intege rMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) olcAttributeTypes: {7}( 1.3.6.1.1.1.1.9 NAME 'shadowInactive' EQUALITY integ erMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) olcAttributeTypes: {8}( 1.3.6.1.1.1.1.10 NAME 'shadowExpire' EQUALITY intege rMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) olcAttributeTypes: {9}( 1.3.6.1.1.1.1.11 NAME 'shadowFlag' EQUALITY integerM atch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) olcAttributeTypes: {10}( 1.3.6.1.1.1.1.12 NAME 'memberUid' EQUALITY caseExac tIA5Match SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 ) olcAttributeTypes: {11}( 1.3.6.1.1.1.1.13 NAME 'memberNisNetgroup' EQUALITY caseExactIA5Match SUBSTR caseExactIA5SubstringsMatch SYNTAX 1.3.6.1.4.1.146 6.115.121.1.26 ) olcAttributeTypes: {12}( 1.3.6.1.1.1.1.14 NAME 'nisNetgroupTriple' DESC 'Net group triple' EQUALITY caseIgnoreIA5Match SYNTAX 1.3.6.1.4.1.1466.115.121.1 .26 ) olcAttributeTypes: {13}( 1.3.6.1.1.1.1.15 NAME 'ipServicePort' DESC 'Service port number' EQUALITY integerMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SI NGLE-VALUE ) olcAttributeTypes: {14}( 1.3.6.1.1.1.1.16 NAME 'ipServiceProtocol' DESC 'Ser vice protocol name' SUP name ) olcAttributeTypes: {15}( 1.3.6.1.1.1.1.17 NAME 'ipProtocolNumber' DESC 'IP p rotocol number' EQUALITY integerMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) olcAttributeTypes: {16}( 1.3.6.1.1.1.1.18 NAME 'oncRpcNumber' DESC 'ONC RPC number' EQUALITY integerMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-V ALUE ) olcAttributeTypes: {17}( 1.3.6.1.1.1.1.19 NAME 'ipHostNumber' DESC 'IPv4 add resses as a dotted decimal omitting leading zeros or IPv6 addresses as defined in RFC2373' SUP name ) olcAttributeTypes: {18}( 1.3.6.1.1.1.1.20 NAME 'ipNetworkNumber' DESC 'IP ne twork as a dotted decimal, eg. 192.168, omitting leading zeros' SUP name SINGLE-VALUE ) olcAttributeTypes: {19}( 1.3.6.1.1.1.1.21 NAME 'ipNetmaskNumber' DESC 'IP ne tmask as a dotted decimal, eg. 255.255.255.0, omitting leading zeros ' EQUALITY caseIgnoreIA5Match SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 SINGLE-V ALUE ) olcAttributeTypes: {20}( 1.3.6.1.1.1.1.22 NAME 'macAddress' DESC 'MAC addres s in maximal, colon separated hex notation, eg. 00:00:92:90:ee:e2' E QUALITY caseIgnoreIA5Match SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 ) olcAttributeTypes: {21}( 1.3.6.1.1.1.1.23 NAME 'bootParameter' DESC 'rpc.boo tparamd parameter' EQUALITY caseExactIA5Match SYNTAX 1.3.6.1.4.1.1466.115.1 21.1.26 ) olcAttributeTypes: {22}( 1.3.6.1.1.1.1.24 NAME 'bootFile' DESC 'Boot image n ame' EQUALITY caseExactIA5Match SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 ) olcAttributeTypes: {23}( 1.3.6.1.1.1.1.26 NAME 'nisMapName' DESC 'Name of a A generic NIS map' SUP name ) olcAttributeTypes: {24}( 1.3.6.1.1.1.1.27 NAME 'nisMapEntry' DESC 'A generic NIS entry' EQUALITY caseExactIA5Match SUBSTR caseExactIA5SubstringsMatch S YNTAX 1.3.6.1.4.1.1466.115.121.1.26 SINGLE-VALUE ) olcAttributeTypes: {25}( 1.3.6.1.1.1.1.28 NAME 'nisPublicKey' DESC 'NIS publ ic key' EQUALITY octetStringMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.40 SING LE-VALUE ) olcAttributeTypes: {26}( 1.3.6.1.1.1.1.29 NAME 'nisSecretKey' DESC 'NIS secr et key' EQUALITY octetStringMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.40 SING LE-VALUE ) olcAttributeTypes: {27}( 1.3.6.1.1.1.1.30 NAME 'nisDomain' DESC 'NIS domain' EQUALITY caseIgnoreIA5Match SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 ) olcAttributeTypes: {28}( 1.3.6.1.1.1.1.31 NAME 'automountMapName' DESC 'auto mount Map Name' EQUALITY caseExactIA5Match SUBSTR caseExactIA5SubstringsMat ch SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 SINGLE-VALUE ) olcAttributeTypes: {29}( 1.3.6.1.1.1.1.32 NAME 'automountKey' DESC 'Automoun t Key value' EQUALITY caseExactIA5Match SUBSTR caseExactIA5SubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 SINGLE-VALUE ) olcAttributeTypes: {30}( 1.3.6.1.1.1.1.33 NAME 'automountInformation' DESC ' Automount information' EQUALITY caseExactIA5Match SUBSTR caseExactIA5Substr ingsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 SINGLE-VALUE ) olcObjectClasses: {0}( 1.3.6.1.1.1.2.0 NAME 'posixAccount' DESC 'Abstraction of an account with POSIX attributes' SUP top AUXILIARY MUST ( cn $ uid $ u idNumber $ gidNumber $ homeDirectory ) MAY ( userPassword $ loginShell $ ge cos $ description ) ) olcObjectClasses: {1}( 1.3.6.1.1.1.2.1 NAME 'shadowAccount' DESC 'Additional attributes for shadow passwords' SUP top AUXILIARY MUST uid MAY ( userPass word $ description $ shadowLastChange $ shadowMin $ shadowMax $ shadowWarni ng $ shadowInactive $ shadowExpire $ shadowFlag ) ) olcObjectClasses: {2}( 1.3.6.1.1.1.2.2 NAME 'posixGroup' DESC 'Abstraction o f a group of accounts' SUP top AUXILIARY MUST gidNumber MAY ( userPassword $ memberUid $ description ) ) olcObjectClasses: {3}( 1.3.6.1.1.1.2.3 NAME 'ipService' DESC 'Abstraction an Internet Protocol service. Maps an IP port and protocol (such as tc p or udp) to one or more names; the distinguished value of th e cn attribute denotes the services canonical name' SUP top STRUCTUR AL MUST ( cn $ ipServicePort $ ipServiceProtocol ) MAY description ) olcObjectClasses: {4}( 1.3.6.1.1.1.2.4 NAME 'ipProtocol' DESC 'Abstraction o f an IP protocol. Maps a protocol number to one or more names. The d istinguished value of the cn attribute denotes the protocols canonic al name' SUP top STRUCTURAL MUST ( cn $ ipProtocolNumber ) MAY description ) olcObjectClasses: {5}( 1.3.6.1.1.1.2.5 NAME 'oncRpc' DESC 'Abstraction of an Open Network Computing (ONC) [RFC1057] Remote Procedure Call (RPC) b inding. This class maps an ONC RPC number to a name. The distin guished value of the cn attribute denotes the RPC services canonical name' SUP top STRUCTURAL MUST ( cn $ oncRpcNumber ) MAY description ) olcObjectClasses: {6}( 1.3.6.1.1.1.2.6 NAME 'ipHost' DESC 'Abstraction of a host, an IP device. The distinguished value of the cn attribute deno tes the hosts canonical name. Device SHOULD be used as a structural class' SUP top AUXILIARY MUST ( cn $ ipHostNumber ) MAY ( userPassword $ l $ description $ manager ) ) olcObjectClasses: {7}( 1.3.6.1.1.1.2.7 NAME 'ipNetwork' DESC 'Abstraction of a network. The distinguished value of the cn attribute denotes the networks canonical name' SUP top STRUCTURAL MUST ipNetworkNumber MAY ( cn $ ipNetmaskNumber $ l $ description $ manager ) ) olcObjectClasses: {8}( 1.3.6.1.1.1.2.8 NAME 'nisNetgroup' DESC 'Abstraction of a netgroup. May refer to other netgroups' SUP top STRUCTURAL MUST cn MAY ( nisNetgroupTriple $ memberNisNetgroup $ description ) ) olcObjectClasses: {9}( 1.3.6.1.1.1.2.9 NAME 'nisMap' DESC 'A generic abstrac tion of a NIS map' SUP top STRUCTURAL MUST nisMapName MAY description ) olcObjectClasses: {10}( 1.3.6.1.1.1.2.10 NAME 'nisObject' DESC 'An entry in a NIS map' SUP top STRUCTURAL MUST ( cn $ nisMapEntry $ nisMapName ) MAY de scription ) olcObjectClasses: {11}( 1.3.6.1.1.1.2.11 NAME 'ieee802Device' DESC 'A device with a MAC address; device SHOULD be used as a structural class' SU P top AUXILIARY MAY macAddress ) olcObjectClasses: {12}( 1.3.6.1.1.1.2.12 NAME 'bootableDevice' DESC 'A devic e with boot parameters; device SHOULD be used as a structural class' SUP top AUXILIARY MAY ( bootFile $ bootParameter ) ) olcObjectClasses: {13}( 1.3.6.1.1.1.2.14 NAME 'nisKeyObject' DESC 'An object with a public and secret key' SUP top AUXILIARY MUST ( cn $ nisPublicKey $ nisSecretKey ) MAY ( uidNumber $ description ) ) olcObjectClasses: {14}( 1.3.6.1.1.1.2.15 NAME 'nisDomainObject' DESC 'Associ ates a NIS domain with a naming context' SUP top AUXILIARY MUST nisDomain ) olcObjectClasses: {15}( 1.3.6.1.1.1.2.16 NAME 'automountMap' SUP top STRUCTU RAL MUST automountMapName MAY description ) olcObjectClasses: {16}( 1.3.6.1.1.1.2.17 NAME 'automount' DESC 'Automount in formation' SUP top STRUCTURAL MUST ( automountKey $ automountInformation ) MAY description ) olcObjectClasses: {17}( 1.3.6.1.4.1.5322.13.1.1 NAME 'namedObject' SUP top S TRUCTURAL MAY cn ) structuralObjectClass: olcSchemaConfig entryUUID: 1212c37a-e5e6-103a-9742-d731be523aab creatorsName: cn=config createTimestamp: 20210108101443Z entryCSN: 20210108101443.265809Z#000000#000#000000 modifiersName: cn=config modifyTimestamp: 20210108101443Z cn={4}yast.ldif000066400000000000000000000150061421664411400347220ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/data/openldap_2_389/4539/slapd.d/cn=config/cn=schema# AUTO-GENERATED FILE - DO NOT EDIT!! Use ldapmodify. # CRC32 79fdfade dn: cn={4}yast objectClass: olcSchemaConfig cn: {4}yast olcObjectIdentifier: {0}SUSE 1.3.6.1.4.1.7057 olcObjectIdentifier: {1}SUSE.YaST SUSE:10.1 olcObjectIdentifier: {2}SUSE.YaST.ModuleConfig SUSE:10.1.2 olcObjectIdentifier: {3}SUSE.YaST.ModuleConfig.OC SUSE.YaST.ModuleConfig:1 olcObjectIdentifier: {4}SUSE.YaST.ModuleConfig.Attr SUSE.YaST.ModuleConfig:2 olcAttributeTypes: {0}( SUSE.YaST.ModuleConfig.Attr:2 NAME 'suseDefaultBase' DESC 'Base DN where new Objects should be created by default' EQUALITY dis tinguishedNameMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 SINGLE-VALUE ) olcAttributeTypes: {1}( SUSE.YaST.ModuleConfig.Attr:3 NAME 'suseNextUniqueId ' DESC 'Next unused unique ID, can be used to generate directory wide uniqe IDs' EQUALITY integerMatch ORDERING integerOrderingMatch SYNTAX 1.3.6.1.4. 1.1466.115.121.1.27 SINGLE-VALUE ) olcAttributeTypes: {2}( SUSE.YaST.ModuleConfig.Attr:4 NAME 'suseMinUniqueId' DESC 'lower Border for Unique IDs' EQUALITY integerMatch ORDERING integerO rderingMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) olcAttributeTypes: {3}( SUSE.YaST.ModuleConfig.Attr:5 NAME 'suseMaxUniqueId' DESC 'upper Border for Unique IDs' EQUALITY integerMatch ORDERING integerO rderingMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) olcAttributeTypes: {4}( SUSE.YaST.ModuleConfig.Attr:6 NAME 'suseDefaultTempl ate' DESC 'The DN of a template that should be used by default' EQUALITY di stinguishedNameMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 SINGLE-VALUE ) olcAttributeTypes: {5}( SUSE.YaST.ModuleConfig.Attr:7 NAME 'suseSearchFilter ' DESC 'Search filter to localize Objects' SYNTAX 1.3.6.1.4.1.1466.115.121. 1.15 SINGLE-VALUE ) olcAttributeTypes: {6}( SUSE.YaST.ModuleConfig.Attr:11 NAME 'suseDefaultValu e' DESC 'an Attribute-Value-Assertions to define defaults for specific Attr ibutes' EQUALITY caseIgnoreMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 ) olcAttributeTypes: {7}( SUSE.YaST.ModuleConfig.Attr:12 NAME 'suseNamingAttri bute' DESC 'AttributeType that should be used as the RDN' EQUALITY caseIgno reIA5Match SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 SINGLE-VALUE ) olcAttributeTypes: {8}( SUSE.YaST.ModuleConfig.Attr:15 NAME 'suseSecondaryGr oup' DESC 'seconday group DN' EQUALITY distinguishedNameMatch SYNTAX 1.3.6. 1.4.1.1466.115.121.1.12 ) olcAttributeTypes: {9}( SUSE.YaST.ModuleConfig.Attr:16 NAME 'suseMinPassword Length' DESC 'minimum Password length for new users' EQUALITY integerMatch ORDERING integerOrderingMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-V ALUE ) olcAttributeTypes: {10}( SUSE.YaST.ModuleConfig.Attr:17 NAME 'suseMaxPasswor dLength' DESC 'maximum Password length for new users' EQUALITY integerMatch ORDERING integerOrderingMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE- VALUE ) olcAttributeTypes: {11}( SUSE.YaST.ModuleConfig.Attr:18 NAME 'susePasswordHa sh' DESC 'Hash method to use for new users' EQUALITY caseIgnoreIA5Match SYN TAX 1.3.6.1.4.1.1466.115.121.1.26 SINGLE-VALUE ) olcAttributeTypes: {12}( SUSE.YaST.ModuleConfig.Attr:19 NAME 'suseSkelDir' D ESC '' EQUALITY caseExactIA5Match SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 ) olcAttributeTypes: {13}( SUSE.YaST.ModuleConfig.Attr:20 NAME 'susePlugin' DE SC 'plugin to use upon user/ group creation' EQUALITY caseIgnoreMatch SYNTA X 1.3.6.1.4.1.1466.115.121.1.15 ) olcAttributeTypes: {14}( SUSE.YaST.ModuleConfig.Attr:21 NAME 'suseMapAttribu te' DESC '' EQUALITY caseIgnoreMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 ) olcAttributeTypes: {15}( SUSE.YaST.ModuleConfig.Attr:22 NAME 'suseImapServer ' DESC '' EQUALITY caseIgnoreMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SIN GLE-VALUE ) olcAttributeTypes: {16}( SUSE.YaST.ModuleConfig.Attr:23 NAME 'suseImapAdmin' DESC '' EQUALITY caseIgnoreMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SING LE-VALUE ) olcAttributeTypes: {17}( SUSE.YaST.ModuleConfig.Attr:24 NAME 'suseImapDefaul tQuota' DESC '' EQUALITY integerMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) olcAttributeTypes: {18}( SUSE.YaST.ModuleConfig.Attr:25 NAME 'suseImapUseSsl ' DESC '' EQUALITY booleanMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE- VALUE ) olcObjectClasses: {0}( SUSE.YaST.ModuleConfig.OC:2 NAME 'suseModuleConfigura tion' DESC 'Contains configuration of Management Modules' SUP top STRUCTURA L MUST cn MAY suseDefaultBase ) olcObjectClasses: {1}( SUSE.YaST.ModuleConfig.OC:3 NAME 'suseUserConfigurati on' DESC 'Configuration of user management tools' SUP suseModuleConfigurati on STRUCTURAL MAY ( suseMinPasswordLength $ suseMaxPasswordLength $ susePas swordHash $ suseSkelDir $ suseNextUniqueId $ suseMinUniqueId $ suseMaxUniqu eId $ suseDefaultTemplate $ suseSearchFilter $ suseMapAttribute ) ) olcObjectClasses: {2}( SUSE.YaST.ModuleConfig.OC:4 NAME 'suseObjectTemplate' DESC 'Base Class for Object-Templates' SUP top STRUCTURAL MUST cn MAY ( su sePlugin $ suseDefaultValue $ suseNamingAttribute ) ) olcObjectClasses: {3}( SUSE.YaST.ModuleConfig.OC:5 NAME 'suseUserTemplate' D ESC 'User object template' SUP suseObjectTemplate STRUCTURAL MUST cn MAY su seSecondaryGroup ) olcObjectClasses: {4}( SUSE.YaST.ModuleConfig.OC:6 NAME 'suseGroupTemplate' DESC 'Group object template' SUP suseObjectTemplate STRUCTURAL MUST cn ) olcObjectClasses: {5}( SUSE.YaST.ModuleConfig.OC:7 NAME 'suseGroupConfigurat ion' DESC 'Configuration of user management tools' SUP suseModuleConfigurat ion STRUCTURAL MAY ( suseNextUniqueId $ suseMinUniqueId $ suseMaxUniqueId $ suseDefaultTemplate $ suseSearchFilter $ suseMapAttribute ) ) olcObjectClasses: {6}( SUSE.YaST.ModuleConfig.OC:8 NAME 'suseCaConfiguration ' DESC 'Configuration of CA management tools' SUP suseModuleConfiguration S TRUCTURAL ) olcObjectClasses: {7}( SUSE.YaST.ModuleConfig.OC:9 NAME 'suseDnsConfiguratio n' DESC 'Configuration of mail server management tools' SUP suseModuleConfi guration STRUCTURAL ) olcObjectClasses: {8}( SUSE.YaST.ModuleConfig.OC:10 NAME 'suseDhcpConfigurat ion' DESC 'Configuration of DHCP server management tools' SUP suseModuleCon figuration STRUCTURAL ) olcObjectClasses: {9}( SUSE.YaST.ModuleConfig.OC:11 NAME 'suseMailConfigurat ion' DESC 'Configuration of IMAP user management tools' SUP suseModuleConfi guration STRUCTURAL MUST ( suseImapServer $ suseImapAdmin $ suseImapDefault Quota $ suseImapUseSsl ) ) structuralObjectClass: olcSchemaConfig entryUUID: 1212cabe-e5e6-103a-9743-d731be523aab creatorsName: cn=config createTimestamp: 20210108101443Z entryCSN: 20210108101443.265809Z#000000#000#000000 modifiersName: cn=config modifyTimestamp: 20210108101443Z olcDatabase={-1}frontend.ldif000066400000000000000000000015271421664411400355620ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/data/openldap_2_389/4539/slapd.d/cn=config# AUTO-GENERATED FILE - DO NOT EDIT!! Use ldapmodify. # CRC32 adeada7d dn: olcDatabase={-1}frontend objectClass: olcDatabaseConfig objectClass: olcFrontendConfig olcDatabase: {-1}frontend olcAccess: {0}to dn.base="" by * read olcAccess: {1}to dn.base="cn=subschema" by * read olcAccess: {2}to attrs=userPassword,userPKCS12 by self write by * auth olcAccess: {3}to attrs=shadowLastChange by self write by * read olcAccess: {4}to * by * read olcAddContentAcl: FALSE olcLastMod: TRUE olcMaxDerefDepth: 0 olcReadOnly: FALSE olcSchemaDN: cn=Subschema olcSyncUseSubentry: FALSE olcMonitoring: FALSE structuralObjectClass: olcDatabaseConfig entryUUID: 1212d054-e5e6-103a-9744-d731be523aab creatorsName: cn=config createTimestamp: 20210108101443Z entryCSN: 20210108101443.265809Z#000000#000#000000 modifiersName: cn=config modifyTimestamp: 20210108101443Z olcDatabase={0}config.ldif000066400000000000000000000011101421664411400351160ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/data/openldap_2_389/4539/slapd.d/cn=config# AUTO-GENERATED FILE - DO NOT EDIT!! Use ldapmodify. # CRC32 70bea9f6 dn: olcDatabase={0}config objectClass: olcDatabaseConfig olcDatabase: {0}config olcAccess: {0}to * by * none olcAddContentAcl: TRUE olcLastMod: TRUE olcMaxDerefDepth: 15 olcReadOnly: FALSE olcRootDN: cn=config olcSyncUseSubentry: FALSE olcMonitoring: FALSE structuralObjectClass: olcDatabaseConfig entryUUID: 1212d3e2-e5e6-103a-9745-d731be523aab creatorsName: cn=config createTimestamp: 20210108101443Z entryCSN: 20210108101443.265809Z#000000#000#000000 modifiersName: cn=config modifyTimestamp: 20210108101443Z olcDatabase={1}hdb.ldif000066400000000000000000000016551421664411400344250ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/data/openldap_2_389/4539/slapd.d/cn=config# AUTO-GENERATED FILE - DO NOT EDIT!! Use ldapmodify. # CRC32 4e8f1f23 dn: olcDatabase={1}hdb objectClass: olcDatabaseConfig objectClass: olcHdbConfig olcDatabase: {1}hdb olcSuffix: dc=ldapdom,dc=net olcAddContentAcl: FALSE olcLastMod: TRUE olcMaxDerefDepth: 15 olcReadOnly: FALSE olcRootDN: cn=root,dc=ldapdom,dc=net olcRootPW:: cGFzcw== olcSyncUseSubentry: FALSE olcMonitoring: FALSE olcDbDirectory: /tmp/ldap-sssdtest olcDbCacheSize: 10000 olcDbCheckpoint: 1024 5 olcDbChecksum: FALSE olcDbNoSync: FALSE olcDbDirtyRead: FALSE olcDbIDLcacheSize: 0 olcDbIndex: objectClass eq olcDbLinearIndex: FALSE olcDbMode: 0600 olcDbSearchStack: 16 olcDbShmKey: 0 olcDbCacheFree: 1 olcDbDNcacheSize: 0 structuralObjectClass: olcHdbConfig entryUUID: 1212d82e-e5e6-103a-9746-d731be523aab creatorsName: cn=config createTimestamp: 20210108101443Z entryCSN: 20210108101443.265809Z#000000#000#000000 modifiersName: cn=config modifyTimestamp: 20210108101443Z 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/data/ticket47953/000077500000000000000000000000001421664411400236445ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/data/ticket47953/__init__.py000066400000000000000000000000001421664411400257430ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/data/ticket47953/ticket47953.ldif000066400000000000000000000030111421664411400263760ustar00rootroot00000000000000dn: dc=example,dc=com objectClass: top objectClass: domain dc: example aci: (targetattr!="userPassword")(version 3.0; acl "Enable anonymous access"; allow (read, search, compare) userdn="ldap:///anyone";) aci: (targetattr="carLicense || description || displayName || facsimileTelepho neNumber || homePhone || homePostalAddress || initials || jpegPhoto || labele dURI || mail || mobile || pager || photo || postOfficeBox || postalAddress || postalCode || preferredDeliveryMethod || preferredLanguage || registeredAddr ess || roomNumber || secretary || seeAlso || st || street || telephoneNumber || telexNumber || title || userCertificate || userPassword || userSMIMECertif icate || x500UniqueIdentifier")(version 3.0; acl "Enable self write for commo n attributes"; allow (write) userdn="ldap:///self";) aci: (targetattr ="fffff")(version 3.0;acl "Directory Administrators Group";al low (all) (groupdn = "ldap:///cn=Directory Administrators, dc=example,dc=com" );) aci: (targetattr="*")(version 3.0; acl "Configuration Administrators Group"; a llow (all) groupdn="ldap:///cn=Configuration Administrators,ou=Groups,ou=Topo logyManagement,o=NetscapeRoot";) aci: (targetattr="*")(version 3.0; acl "Configuration Administrator"; allow (a ll) userdn="ldap:///uid=admin,ou=Administrators,ou=TopologyManagement,o=Netsc apeRoot";) aci: (targetattr = "*")(version 3.0; acl "TEST ACI"; allow (writ e) groupdn = "ldap:///cn=slapd-localhost,cn=389 Directory Server,cn=Server Gr oup,cn=localhost.localdomain,ou=example.com,o=NetscapeRoot";) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/data/ticket47988/000077500000000000000000000000001421664411400236545ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/data/ticket47988/__init__.py000066400000000000000000000000001421664411400257530ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/data/ticket47988/schema_ipa3.3.tar.gz000066400000000000000000002774011421664411400273330ustar00rootroot00000000000000‹øÂTschema_ipa3.3.tarì]isÛFÒö×õ¯˜’\e»J¢yJvRû&)™JâË#±÷ ID ÀÅ!‰ÙÚÿþvÏÌ DŠ’ $D*eq<ÝÓÓ×(}zóìW®ÓÓ2üÛ(—OêÊ¿ñõ¦R­Ÿ”+õr¹VyS®Tàþ7¤ñüÐÞ¼‰‚Ðð yN<ß2þX{ßCõÒ«ô餼ôûîøÖvƒ•k/hf»%Dz§Oôìàz}mÿ7*UÖÿÓÓzå´Rƒþ?©6ªoHù‰Þïõ7ïÿ÷‡äkç¼{IZWýƒîù·ùÚ»jý壹¾ïÍ|cAàÏ©O) ¼ixkøôg²ò"b.ñ©e¡oO¢;$†k}ò|²ð@‚VX¹õ¡½pNIHýE@¼)ûq~9&çÔ¥¾á~4ql“ôl“º%F@–XÌ©E&+vû¾(ÞíyвÚžû3¹¡~ªl[4T‚ûrHI[ÄvÙso ðæFˆomÇ!J¢€N#çˆÀä·îèÛÕx­5/ßšƒAórôãg¸7œ{POo(oÉ^,˜¾á†+„tÑ´¾ÁýͯÝ^wôƒƒÎº£ËÎpˆ„\ H“ô›ƒQ·5î5¤?ô¯†Rú§¦Œ×>% Û Å? wÀåXdnÜPè%“Ú7€Ê ¦·\mÒŽçÎu¬ïþLì)q½ðˆÜú6ôyèe»Géœ#ÒuÍÒi|!# ¬Á®ë;†IÈ0Âçkµòùê!Þ{Ñ$åj¥R9®Ôʧdú¾!#™½c“O´æÛ³yˆÀÛ½fÕ¦gQÎERx^ä eð&Ï’{ÐØþ}8¸ôÜãó~´ …ƒ80ð•ÊhÁ·Cƒ¦·˜Ø.cVíÙ®éD– Ìg#æÖ;"A4ùš¡dµc/ìßÏÅ0/ ßÐËy‰¨ï…ö–0æì0TÃí1×ÈT ñ\g%˜á­å“ß‹fsø×o) ‹Nm—#ÜSèWÀdQ7´§v2º¦¶L4PÒùÞêôGÝ«KhnŠR"ï ¼È7Å›ñþ€|Àâƒæré{(ºÝø K0¦åmÞšR‰& 4 ª`(¢(xðG€5Ü`úûe»@6´¹&gèÔ÷B[å¼9|DNˆ¿n=Ÿ±pBcq™¬DkëÇZ‰\!ÿuñEÆ5447܃ I1YÀî›æa-1áçý;¡«4‡ºÈBGУ@Œ­0`º „²Ã‚õJ³¥>ÿ Lµ˜ìÂO÷wÁL>€ÔG¹Ø±"­Ãf1‰ƒ–b-‘î”aQ˜DbÇñR`½ ).]°Ap2MZAu:6€Ú ‡·¬˜8Lpz Žñ›áÞq æ*qH«AÈÓo”²ÁÚ¦P+Ù uh˜~Z9…!ÃdŽ!#›Ñ=1å€Í¡ 0Š&FŠ·ë¼ãÚÿoÚçzïCë#©‚㥠6ì|¼%@ >ˆw.ÛY÷ÿ#CsN“Ÿ.ñ— ¦ @Õkƒ8á8«ã;dð ”È’,èbTBC–û1ݬµ·F(ôãhµ¤ÁOä©–*'¥Ïõr©RªTj§µr©V+õ ¹l^tÈ{+ÀtÁÛÖ{Òî [ä}ÇalåcLcdƒ”ï ¦Oe§ûtê – u˜s{ùž € ÿNHœ„:"ªŸÀŸ•F©R­ ªòýø X>Õ{Á`Ò¶Áþ†ž¿‹\õß“o=¦Ò[Ž멬–jР ’Ã:GT’Bö‡Ø~wÎ b€’qŸ0ÇŸ·ÐǤ9øø`I1”|ܨ¢ýÙýµÝUúÔ(»Á±Iýk,ú”‘¿Šÿê'ÕTü×(×êûøï%®}ü·ÿöñß>þÛÇûøoÿýe㿼ð¯ê×%6„U!xã€wÓ¢(=øxIÃÀ4À·W”\®1ãœ2ˆÄŠn€-´Kô¢e¥as­!…¿ÁÄ7ð`/ô–äb<ÁË”:-°Û?g¯-Ãü§þÞÉ›ê¶ ¶2¡ó¿/q ¶F¹§ïù!û™HÁ#I/ÚcÚ«ô©Zç"0Aî©ÿô±^÷Ç•zí¤’ŠÿêÕÓýüß‹\ûøoÿíã¿}ü·ÿöñß>þÛÇ/ÿA‹‹PÈ#9Bã¹:%<¾¿:$qQ¹š[íÛ±C\=#<DC Í[!È2{ª 7àî¼Õ΢,pʃ à«ã™†âÄA€1±@­…í¢ú6B)€c‡0#µ&2 edZˆŽ…x•Z£ñ$x媑øÄÜÕTŸØcþÂÎø+Ð*h“qèo›Vóœ˜ m²ó'^OÊrÞÕÀ& Œ¶Ò¦^«zl½ ÔQÂÝtëedA¹Oƒ…6^ù3õÿ`có=éü߸É2´Ê À#æqbðxa„æ|3.»—ç½Îñ¯ÍÞ¸“CǶx¥H,}:¥>hňfÂ⸘‰¬Ã=@Gk QdíÎ\p )ã¯ÃÑ@) T PÓÚxjZkiZGö‚þÛss¨ ¡†üṂ׌›/eA.xaÛ[¶ûе/%µø‹´/ÑPAõQï…èVsŠÛ—C0EøÔ†Ãÿì÷©Õíèªdè‚M‹I×a¢>88"àèÊ¿¨n­f#Ûù`=‘U…H°‘þN$b÷¾6k)¿…ár<èźÈot‚n(˜ø€›Ð -‰yzØuö0n´iš\„]KïÚÿ‰¨’åÅ€Gúð ðÿ H¬ Dæï±FìÉÈlä’ÙšƒƒN!>d2ï (01Éé/”÷wYT®J„çC>Ì=Ç xØ'›#˹ÊýO¸ÝûµÎ½ŽhÑÄ'Ù}Œ“î“\þ%hŸ‚}¾híñÜSúó%Ø·éDDõKFÃÇã*ºƒæ pO¹/¶êÆöá±;!x²¨’ÍO/6K[Còޤ I~²þ‰ˆ«•SŠoi·sÜËA€/Ö;²¨Ù:ú”¿Ïrl B ˆ »‡¸0‘“ž˜¥àÛhÔu %¨‡ûèÖóñ ;< “Ö:kÉrKÙvå¯tZ'*ës¯ñ¡¶QMÁè3‘ºŒ¹i¦&^´î=€À°”g­–Hÿ3——[ U¾Yºä˜/!9"S#uÙr`>HWEuƒX\&GË…á_CãÌWÅl­ òµ´<û–Ï„¼¹IÁôc1‡AõŽ7ŽÁe>©÷ŽEO¸¼²«ô©úyi;^øù†ÔJÛëÓ“çæ°Ù/°, º%˜VSh¾ „;¡(àF’ù±W\Q*aïiè¿9PêÐ÷Tr0VÜÅMª:Æ^f1'”º1!·8 ˜À”žÍYŒC/FM±¾^Ø3ÜqGŒ[OÖ­øB„*ߤ@,‰9J5ø1 ÷óÄ’¾2lHÌgûïŽDVZ9;,ÂÒbQ«»º…aJéñ›ØÈ[ Ø(H4¹¨Gj¾%§:ûš±š t ÕŒOˆú#BÃ/i‹{%í¬{Èà¬=B…ñF¥wÖ"•êiýH8_HA×Þ–š‚u„±aq™ŒC¼©Pä˜]¢·(CŽ·bÈÐ!}‡bömÂØÏ|EÖö4 # !ð‚ 2M K L™ÄâR"«@®Åƒt_LhEÜ\Ú`r¹ô¥T­Õ!”þ£¨V†ÿ/5dÂ@é\ZeGôÚÍþc–)«SÁ’Úüôé:xU ( /ÄdûוDè£ycø6X\”p1ï8•úRk91ŸsÆÞr¾XnâWQf²Bù ë¥xÂ3QVùœ¿š§CÀ;ôÈ;buGRPX.hŠ…¿/é¬?÷Bü[ïªTŽY¶*ÂǘxŠO“t¥ç뛢‹^÷×ÎWéSíÿeKqž)tþ§Ú€¡•^ÿ]®íó?/ríó?ûüÏ>ÿ³Ïÿìó?ûüÏ>ÿ³Ïÿ¼ìþ_±<™¢f1ó&At²÷SÛf* Ÿ{‡o~`ê|ýŠI_àù€ò–u¯d»V+*\º’%+Šê¾d¯pŽTaq±Àš|Ñe6QS¼ø$°®;õtª!LW%y1Žñ —PYëúàrW½>î |x´…=0ëù(Õ;Š‚Ë–Õðõy˜àÓ€¦êŠ’I<—w¡Ôð©…3| Ã$ˆƒBp²:ÀTeQ ¯‚_¹¢¡‹K‹‚Åä‹mʽ¸ª(€üx‰çé8).”sÃn'å$ÅE©æ_Á{÷ôCxQQœ‚pÅŠÌ0#`JyQÐ.mó:ƒK̯<}¡Wæ˜G¶c]F¸€T÷:“ò¢  è¬ÉA§Wè|Ú†“O­( \³<ŽÃ—rgl}¦¶(˜»¥í3mˆõ5ŒzU¡dHÍÈ·ÃUvˆÈšb  ¶¨8Å&ZÝ´\wS± [xÀô˜3÷¹gK¾p#O—gj ã,N —Û£óS­)l´û¾ç÷<ýü9YX˜¶­4$^T˜;jš4Ò˜âÒÂ’tjDNˆ8–¡Ü4¤‡âyw«dr5·^UÀ‘\÷Œ u4lqia!8u–éT•(+ÎI f’(+Ìx³a´\fÒŽJyQÐþeø8’±¨Jya€\Sÿ¢Æ=ç„N%1·.a·ù©œž²¥ÓpÆ®ŠÅi™àæ;)yn.“¬{ÔQ¡¦»þPP‹âFH>e÷Ž8ì\P5+È RY8V'¿¶!*ÉJ­IVíJ`:ñ•!QM>±ŸIÊg+B’uÎ:%Iùî¤ðÄO Jî…ý–9µRöÎ;=äg¿õ`Zž °¬ /²u•¬ŒýÄ5“C\,ÊîÖ#·ä¥Òâ²’ŒÏ¬œJ› U”ÊLL°MW (_M‘6'JùîÊ6qú¨Ðèg¼€ÝE&ö%øÀ#îÓ©è!jÊ¿“µ-æìàKŸý¶:…/Î*}Yñã‘%=–”+|d[•žw÷ý§poˆ/­R=k£¼D ¹xˆŒ« qõ(cö+×ÝWÞ÷-¸¢MŠ®Ÿ.ݽùl?×MÊ ;'L™øÔ Ô9F½FL⮥µè­ûk««ô©|¢måy†5àœÿ ŽìIæûOÕÆ~ý÷K\‡lïSõôËgrœ»§‹|¨–O¿ñíQ–†‚ÇôñãÛÃ'¾R+¾Ÿþ‡Ÿ%¹1 _¬Œ|ÿ–M|Cç¶éÐxMûòáLœs\cËLñöuÇBÕCâ-D~p›¾/»up¡Ì«,±¢¸àºF1E31^Œë¸“ÛØ F~š’^þ­¹Y¯$ü´®›çY™œÉÜ4v¶ À|2Ûz;§.ϳ]°.¼ž/ÆYõ”²×ÉèZÌf\*î­(M ­- \dÔ$ª»D>Q´ü¾~.×3\ƳÍÍ Ã¼–/‰V{ý[h‚uû[OÊ1÷âMž ë ò¯~çœØ œ‹ØˆÒêç¢)]3¿ÄtfoËS|¸ )¤8æH°ô®á/Gi…ÏŸ`&’„DZ /ºåÃG “{†Ñæ¢adg$Ç ¢åãìá'|r3i+|X­ûˆô‰Æ‹þ/­a¥šp“ÃJ•ôϾ“~{ÌDŠÞñ=9¨ª¸pް áŠmÄö±êu¬9LÎ;`ÇPàf’oùÀŠz£Zggœò£I¯ü™vb>?Bä!øŽŸ8„ÛèÝò+Ø&õ¶å€í,?P›`]bsï–ÞPÿšã‡YÈ=9ʉ ´ßd$} C@ð ‹RÑŠº!êe<ÐôŽg†42µ²¹V©—ÿ[m`¬ü¿Œ È­øS|*õµ´IXŸ'ªpšTŒ€ûOó’˜P‡ZãA÷}ü#ò„—²&;à sŒ +*GBñ÷¬´Ýé:­æ¨ÓNÃz®mxèiûišƒ DfÎ×aÕh0nƃfï-Ip1Á=–`|Z`•fø¥ùw$ ÊÙ‘b’wÐ^J¬·é.½R€B?q§0Ÿ«”ÏϽËÅZö·‡“@I6™þc8~à†x8v¾†íhÇl,¼‰ÍΜE—¼P<(Oàð=oc p #4ñü„`´Sêw åãw0ž2‡w¼Ë;W”' ¥njÏwìçÔN+õ$ÿ[-ãù¯µÊÉ>ÿû×á!éö›„}wäªÛþéyŠýó[‡¤ûY?ý#ÿFø““_kdb(~èL|üJ3"ë¨* hgR¿Í$‰3¶|¦8£k/Î]H}0?T]¶.K ÿî¯$‰½ç33Wƒvg!ŸRqå[4vñ¶]ŃýqSË]Z´–¾jBßåHÎGªèÅ4âˆÄ“•¸I:‡¦n³±æë9P³ýtÖ¡àQdÖT:G> ^j‰Ï(›¾GDT^÷ç&Z£ùÌ1´ÝGøûÓ% '¶^±ƒ >ç“Cz]#¢Ë‰a^÷}{Þ¶úBÖ‰á‹) Šƒ'ØÝâüùDIœ$èó£=ȸ.¦Ô3ŸÛ‘š†JÍ7#˜+‹?ã¯>-ãmðkãÆ¤3•šmÕN½ü”u¢’×ófž;dë3”/«ø„•^ñד×S•}ßÃu¨}#œk,å„Wü…¨ÿ¬É7Mñ¢~¬!qÕ_ _Ö²¡íCœ™Ã ,&=†;ú¯•²Ê n‹ÏÇš¡š—¾RËœ›ìàV2þ›ÏôÄlVˆ yxR„ýƒ>ÕAo±š˜¸ñ1P,ÖÒ³lÞ$^”ü_1d}>> ”ÕÆqŸÿ.”ßõ ܾá‡n℟E8ƒüŸÈpø irÞ½ñ½,戈L8§†Ð;ðF‚û¾v¹Áàþo¥úù[ÒÓÈÊOί¢pæÁÈK¾”ðÁ£«¦•)qf6(!8u´¶¯2ŽÎFnÌv$ä’ÔuMo±I¶x¤x’N3$d€:Â?Õ•¤¼XUš¦ôø Óò9;‚¼À¾»šNÁ©—tô¯†ÝïÀo,{£^3úC>=J­Žkú+¶B™5ô,ë og‰NIUÚlþµ-Ÿ\ˆŠ·éÅLoâ£\7‹±®¦ñ-/yöOôjVü¬øv+F¼‰˜#¯»³ºÉ~|…(vH,ž|;ag½Ê;­¢ÆíáUSð¨52üY2ø/²Lì.Œ‚±äÉ6¯)[I³ Äõ ÅÍvz¶ÝiQ„x—Šo¿½nw+ÊÑó~4ù…ÆÁÎpøŸal’k,ÝQ‡î˜DR¼(Lö&Nø™íƒ%¸1œH¬dÊ–tÛÄ7ØgHŸJ íH€âWuÛ„6´ÿˆ‡'þýªá7tþîé€ÁCàãq"J·Íû<1iŠ;5¤¦‡ß0X=Dc o|Ԯ׿µS„ýâOÐü£m„ÛÌ;Ô4_fØmu óìM{›»Ð7h›ð²Ãpñ(Y¶P&ö3/†Çýfë…"åÝTã—u”§C‡{)ƒ†?åuÅ¢D¹Q³ˆC/4œ{Ö®ÞŽÐ{ÆT¬Š›ˆéÅôBÎVºu“ˆy3~bÆ@î–K–älœËLªŸYNïMön!¼ÿ¸^-MÃÄz-ÿ[Ñìš©>•>Ë€«UR)yY¤¤¨e‘–°Ì-äéÊÉÕ’:¬cOïN@êÙåc‘fÉ):ù;É{ê}“7Ÿµ+5ÙdœÍ•{%û—L–ÊÝùƒBÉrJúR9E­8IÌ©¥"!¶Ža™$S¦BO­B™ ÖÊS‰I’Xåë‚÷¸>LjçÕIð·é?éJe"é{ÄQé T̾«4)I%ðâ«ÚÀf«Çe#~U«J¡ÉFÕœaZ8»+ØJ~za•ï Ï´¹¹Mký©ø»ã¸\|(+ï¼ƒØÆnSÏYäˆ_¯GÛÙl5ªÜ‘9Uí­ãøìy(Ûí¥5í¥ü(—g©'ÊüeÎàaÙÉ !3åïg26j2½ÛQOÒåñxfx˜Îÿþöþt¹q$I…ÏßæSÄÍJûR2K)¹ˆZªnŸkLIY©SZ8$UËkƒPB'°PKÏwÿÜ=v,7IT¦ÔS“D„{xx¸{øbh¼¼AIgKÉÐÞ8ÿßRS%Zº²…šÇÇ”Wþ–šÁ‡ïñ1>>BµU[C,Ù*¦ ÒaåÒþ¡šAßsé6·¹—ªFÑ’˜Ý—ñ EüÿÐÿó›ŸÕÇéSy€N÷ÿÜÙÝÛkçý?÷v[oþŸÏñ—‹ºgçñ-ÖtúM„,âz¤¨1Õ‰ª/ýzõAQ¿‘7¨ºVJâÛø#»°ýÝú.…ýî÷Ò“øüïí:€é>ô‚_oßp73M ¢;Q•¦¬º õxqr”þ ÿÂß?Aàʶ‚4ÞrÝ Ë6š›¼17žDYò°ÑØUMèþ¾KÚh`VˆÂFúÅÄx64r÷˜U›¸ð*þÉu¶Ñª—ÝÖHWÂ4Ã¥¾±³IGóªŒÑO%ïÂÕC®‘Eγ¦m†ïõ¢Ì¹ßh— ÏüDJÏÍÚ¿¨ÉLÖX*Î÷(ÈNh»½“ƒÏFÓ‡²™ =g¼‘ÿlCK) h¬lì®.‰Æ ;bû§M„&(›-ø[å(ðOÕ§âÑÖ±£¬GÑb»;’X`žù¶¶yelƒm<¹¶'±‡K®”*SâœH¡\ev®²¯™l\‚°ÎXF5º1ðq„µäÆ¡ÏòÃát!6D”Äáᣨ—FÝd¹NG˜©`f´‰÷9ò€×nœˆÊ/‰}ª0Î=Qð³>U‹¯VÊ0“ZÈ‘½ ž´Q¿¯‹äcl‹áÅ>]mÒLKá’úIEMö”ÈCtetÓàýP7û¢ÓMéB䈽Œ“†Ãƒ8©V%Ê=é¶12>rü†ä›)fP‹Ÿn ¨¸0ÕŠHHü¢›£“~çôôât/úƒ# þ›†3ÿÌ—‹ÞÞQçóé±~ª™jðëàŸ;ýã#¹Ç«GwòöŽÏÿÀî îÆ×»øó¤ìÑF=ÿèÑe÷ýߎÿbùG›…Gáÿÿcpò'+<º#íÝžôŽû0€ãt._ îýú‡ý¤|æ½ÐëXáù×ãü£Íza¬ýß{¶ìÄEbÊÎþÑ?îý~rxœ´‰Î³ª÷ÍUm,±'ذºî}0šŒ¤ãQ }âR¹•)6»8òæãP&,gÎ=ç¾²rhÎ$Àùý;äÝ•À|H%¸ BÕ¨çÀêÉovVèÇ"û'wÀ×ņû‘«Êx÷|' ÇOÉ¡6¾KùÆFÆÀƒ<ÆáÏnʇAÚBõ3Ъý½sxv¼}xqö/¤îCÒ¼:’vL$Ñ@z“– .n3¡7%†NEaåÓ£NWä¾j¿‘­Èï]ćF.ƒüîå9™#epT"À/dîyäáýôçOŸwäo»ñèçÝÖnÍÖŽÔ~„õÈçbnBc…ï”Î<-°ðÀ ÞP*HBIᥕ”J絋½ù¤Ÿ†%þP&lþÁ%½äu˜ðþæ©Y•ÇþíèáGW†§&U§Yÿæ¹¹'+hú›ç†ÏqÇY<ùûäöþýûýÿçCО‰ 1?+ äФ¤àqÕ½ófÄIÑž·*„Yû˜ÅÊ6°W_½9žaQØ"“c¬ÈŽIA>Jé0Kœ(¥ôX öe±‡$¼a‹Æ ÉA‰7б)S@Åoüƒ—’H¶‚ñ?Ä×~’}ÿ„ýJ¡A~OÈŸuecþ ¸T|yÔÝf y58ìηœ›óE\pìO×Ãç0ƒTâ=uã1ßqSßIÜZƒÝRB+~Ëþ)ïéhR°k¯L³¤1GËcJÝêÆaà>ˆís¾³›£-Þ•’ÍV³.rê`ì, @´,¥X¢êY *Ô´oþÛ0¥áßü‡MÎúˆ$Éi³Rƒa˜[FNŠ S¾}†om3SîTß…n:ýsÀt{¾<¸*ª‡bÿàš I·ÀñÜC‡Rè^ù°WJ3Óo :]öA,üù翳>àóøüð˜ý·<ƒ“]lœ‚ÞyÅÂúüŸõÿb—°Ö»‹Ï‘ýl#ÿì7}3÷÷ŸMþl«)ŸU>üŸ-ù,»èN.Î;§êèIZþÒÎi8/¾ È÷¶·¥àú?²ùSåHJ0ÓH'ûØP2"Ú§ÜøxÚü6Áf”.ÇteÄ·|oSÆI_/(N«ZH6`hT|ºô{å_ÀŇ _…[œf _U›‹_”j©¿-t;'çg6ŸOÜ©fž——žHçÎÖÐCìtrµ•%¾/M-wd-6ÃÎ#Åd:Qæ-©œ £wBg ¤RS 8¼4C4{“Š8þ×"⎥¤[Њõïj{ðx8#›Ã'ZXDÿn<º "îÈeh…0`ÙÆA–©mÖGÙYaŒ*¢åÊø™ú>,¬y~ºå^¹[nâþáö·0KRï`•#W¹+Vb±×H+K‚ñlhÃM4­ÎÂA^X¦K÷v¥¯«A§æqüùEï¬sª7 Ñüû޽/é§;§gùæ‹óÓ¿Ì¢¹ß=><1:—†ž/ýVIßÓhÄdê9ñGH¥¹ÇŒŒâ!Q¸âÍŠZò²A:'KlÙËWwÕô8“È\²Ø°TÓ¤>nøútÃ_ÀNÊÏßD¾ŸæùØÖsÜlBõ˜Šx~“`‰p§Â˜´*yp8½O_‰²17Îx šëœ§é;öi:t Þärg<“J¬D<­Øn›ï˜.p Q iÇàÂ4`û3Ê1Ms ߃•iž9æ`']=«&oÁ¼$¿´¨[­¨3BtRºŠÔÝÇVPÅêÕë¨| /¶š,KÓÙ õ*îJÍ%·ŒÎÞ(åB–?¶KUÆ=ËwÌðeo,°³WªBvÂÏVx¾ýß v‘N(îx8 óÑ 2Žý<ãè«/`@á“zôÎ/ÚØ‹ Q„õ ëêý‰á´®5Eëër á%4ΤÀ,a.¡÷4 å]ǦÒÈœþé–ñá4¾"Ž/2ê?M¬@ÙvbÏéÈ/ô—›Kï§ÌB˜±i¥üʬ6Fã/§‹óv÷;HÏó=ãX½ðö©×Žè@“ж)Ðoáá9féýÇÌ0ÃIBðЧú+¯Ãêô? ~íÃSÿšø)vßQÛIøÀ5AqÏ»«¶k,Õ à±{ƒoèÄ“ÝpUE;û’yÆ'M+üe;A¥ƒ›U娂k0<Æ×´¼Ñ²£EO N(A1¥ø^=[…hÆ’îÔ‹¶Fm¹ÂÌ9ú “¯áÿÍI‡Sá(j®åm›{y¿Fb\à´Ä7˹vEæ b죂 …ÏÝJ·E…D xBóư‚Ü‹b EK(ÙLÚA‘”èÆA2‰DØ.>O²¹>$$2QGÚ Ð¨!»áOóÜsáÍ2sé9û 0f¬ã™1PðPy>|,ŽvÎê7:ø~¦¥D™3¹@󜓲8µ2m* Uvƒ°Œ‰„ûò7×¼øéÇ¢§íàýˆÉf1¹ÿPÈsUˆÎ§ebÓ—n’æaú£[ZI¬°¸QpáQ{ÞôŽDšAˆ+¶gÓú™{KXmôƒ¶µC·kË€hË©¿¼Ui‘úEC0¶IIr?“8G.U[¦è+ó‘B¹ć2ÎY ƒæõ¿êublÇ«TPYƒVvGÛ…#·å¹„ÖFX1‘}yš·òÐ…yb§T²Yœ-žxsì‹óƵ·ê|ê1PA^–_ WHÐ`’Vncþ‰;Rlâú4™uø20h#¾î "tnÃä=ѧal‘« ¼øÃ! ç£êHáOIFÒ%ÙJÝÀ‰Žjý}0žÙ¸ìn·ÓüñÇÒcšˆrõçŒD»Î+g:Ÿ›Ü :Ï s­Ú·X5•9誛9÷×âô›¬–—P婊hçÂDÙÇ@Ä %5d™ãÞprpŠ6¬8±Dÿù¦d·Ê‰ªj?4ÓL¼gù@rÕdaOG‘¹H$‰"C±ñ$]‘‹Tø >ìvîŠyP°W…‚*…þQþ¿†T¶à‹OQzzþ?ø¹»—Ëÿ·³·×|Ëÿ÷‹Tn eËË^2½ f¦Ô«Üá·Ö½úËÌéI÷Ë1À[N"/fm.‘ÈËPY‘žteóù?ǰE ³±÷2ù?ë»{pO­ÿF‹çÿ|«ÿþ,?±Üü³-Ö…ë­/ƒ.h&"-èŠÿ(cß÷Yï¸stv¼MR³8Ï×÷bw¢¡<>BÚФ·©£?’ Ë@lÖëÍ­zc«¹Ã®ØghùÕO²?ôØÿ °¿wgžÀþ¼ut1ØÂtŸÿ»–/+¿:[¾pµÖ˜Å¨vöÄX·p‡s@ÃLâÌùiúš9[¥6¶3N—ñ‘M™^äµ¶àÒ\hŸå 3Ø%\jf¤& ?²ùÒHkvöù!+ÇÚÈ¿v®ð振–‰¯Ëq;^×”Æ]² ˜w)¥ÂÃÔ~ßE{zO$‘þÜ1ˆè˜…—cŽ@O¼ûã¡°]¤ÁÏNäÝ^v£‘¨šhíþöù,[ÐÃS÷rêÃóIây»eô7/ú$íýˆÜ3ˆÙ¡'ÆfÑáŽhš…öŸ™/Œâ¸^ƒ”çð41uÅyÎŽ©W„¨}Q“ÀÓXê?¤™?´(l#¾õ“$ðüÔ¨©PÆ4­nþ8tu`¢ëúqt]èÊ+ÑØjr Gu)ä0QvW´ò\?´ÆcqŒò/”è0ËrgYˆ¡™hiOäÌ-}jqƒ‚mÉÓÆ5yƒ! ñko7ŸÑœÕz.׌³M?`žÛÏ=éÛŸöêÁ؉³ñSiÿéÿf}·ž¯ÿ±ÿ¼éÿÏð÷C£Òè?ª̹89ú¹ª$ê®TÞ15q»±GR,/µ8¬J ‹™¹cš6¯eñ7?ºŒ‚MŒºÅld¼•éM«L •3½–樦‰Ÿhæ8’ƒâb¬\FNò %0„ÊI™~²TȘ;cøÒ°´ò°œÇÙgʉcÏÈ­:3yT=}Ån£Kƒ±SFsæØPøÚUa=áhçáøÝ¼8ÉãÍÂÅì±òÔöùÁÜ̓y{~X€’Z_-{y û~8JàΜ*y»0û­§Ûσ]PÕû<+ˆ[íÈJFî„×q²æ¨8þ޾µFè7‹_J ¼à:ÈÒ":õ™|bEA>ËCÑÈA1€>\ô²â%Z‹F`¨ à7Ö„f ¯ÑÏü±5~lÜJ©umßÊSÑ]¤7Fª–b¤…}'å¿ô,?÷,D¸¬x„;¹ö/˜¤84^HTWŒP¤Çqä¡VÈŸ! 2*V zá…Û.èÒãœÑ·© Š,:\P éñ瘅ÝÒAËdÐÂÚÁä8s·GúHýøùkÃ/Â^n¢ùŒA4®Ó®ÕØ/?J¤ñ$€j]FsP:úž]øjËÒ•ÌóªX@šù}W”œVÅó$,ºš^ƒª7–á"‰5g«¨H÷:G'—}é^jØ<9çÊ™í¦€Þ´ôÞÈýVnØQ¾vÜL(·®*}Kש¬4ŸWô…ë¤ç§nŒuy½3×R]6š”~j·‘²g4 I7p]B_s¡;ï‡3? ÆT c!àë Æ†¨Àïñâm„[È6ÑbÈ®v3—õŒ¶œl•»#E–åÁ.Ø øZï&ñýƒ®C^V#Ylrô$ãÚ”’ß+7mìØ²ÁòLæ÷ÕÖ&¡)L%‡£lç–p¹&…›{jI3nISWˆµ!Ú'5Úóœ«_/m¯|û[íúÿ¥/~:ëÿ£þ{õV+ïÿר¿Õÿ~–?ôÿSó϶d½o2é_]<Å‘Ô8Û5ÚíMÎvˆûì¯6ÒMvwƒ"ÛcÉ$bø˜u:[È„g¯¹Ú³p+geœíw×:/Ä ùðRÜeM ez«Q†W&ÊÞ0FñÆãÑȉ ¿Ñ°Á=s®|æßcAL>ö0#ö^)bv,Äô&Q'-.¿`4¦`YçGÀHÛÂ?ßÐ(á×éy® ø;ÁH>v‹RΤ¿G*©ÀÉ^'¿&ñd¬‘B—?VÐMÍ@‹>ÆvaÃDæ”h (2 …a‹›b‚”dÓÛSÏ~ñº”{ÀŠ_Âg9'ÆçÖ ·µjù‰¹¢·cÌü>/ZÃê¬H§k†ÒFÝæô”F¨IË&Š1ÞäÉZpÁÇ!Gg0ÝY\áO´/€·½çÃÛ#ƒqÍœÄŽÈø@ò#Y]ð—²»hÇKxx¬qfÄÁ*OA©°÷L ¶â§ÐÄ1\ó7©&SÙLYrÑcÖíô'‡—§ë^öºýãmò³}S*0Öó3'À¼Añ_0;)Œ+¹gßõ)Ã&¡?Ì2˜QV´à~¿`¤Bcþ•$à'š…é1&ç#;‰Üí¬}À˜:§®:˜Z®?Á÷[­úGö¶[|ö¬ÃêÍF£±ÕhÕ÷Øe¿#à9‰T¥HOÂÃöï]ÌÛ{ßW'ã_¤J½)!ÂN ½Q"}£Û˜,ž#Å Š7 ;.ü*ƒ/ðÓ{ÐYþ6ÞÇÑÖ¯ÝSv=¼Ûä)ùbƒöèëСYëú "7œx²ÄuvÄÚK”[D š¢·Dm"ß+$ z|³ÍÌïB Ž0DZœ2Ü ¬±°@Gáƒ@F\‰¢j®Q¤Ä;– ù{Ó:‚Å=t(C£tÄT« C)•»ÇŽÿ<<¦JÐÝ©D>“Æ“Ä_¦PH¶Íï:ãq#鞨/¼ÛÜæýé±x]™@“í‘§¹Ì0ÇŠÊY”âƒE ¸TŸ sSØÐÓ$r9Bñd^p«’/«åî:“TÎQâ§“ŠƒÝÅÉ7aÅ“ärõ z«^kÛìño“/Â0r¾ù¢h:’dœjZHI§-ë6?Ÿß+ÿ!?\‡6ÉbžÎ“{áÚÊR•hV0 ¹,h†tµ/ºÄlgD»˜ãꟙ:“•|•“5Y¦‘E=)Ýf'CÚlÄŽB‰ÿ‘·šüû̧|—a‚àDšÜüeäéØr+\Þ1 âš %¸Hc Gõe¸Â'î`;‚›¸¤Õ@äù/JÚ ‘ § 1JœÏÇzJ¹— 7Œ2ôG*AZ ÑŠéQ3¬Ìcp"@¤øº;Î=áÿës¶·q¸‰ÉÀ~#vàB È´TÞeÊ'‚øFšòD Å÷Û6½â¾ˆ´C)ʨ¬lrKå7ØñùQQºà9 ÄiBá$Mÿj?ýíq ¥²³áêú? ©nd£M‘9‰e)đ̺ö®pÆ4]'ßi¢sÆLª%ŠÄ¤‹š^…_&§ñ¯‰rÖFåÉÖð¶£1 â.b1–ÕXÞuG" •ÀÙ¡Ü3¡„ÕwR^ž½S4ò ·’Á…±Lñ;T ƒe”ÏÒÕ1–†g߀‡{ë ­ˆ'þWŽp¢U•†søLq½çÃR.|‹Zùlã°¿8./PTNjQIªƒñŸ¨t…òýõ¡2åÍ𩼋σõ¬|üÄ¥_–à+«¹hKCÓ4 9Š]åË «ƒf§˜®†Ä(œ³—_!ʳÛblé äfò0Vâ>½šy(¤¹+D§ç¬K¥€—©V†üÜ/úe™Ùð–׎1® ñE>(c°+êT£q™Ã¤‘5K*‡¤=wì=3ù·¸ÊÌ[ —­]Ê]K€L5“5‘®QRÌ‚VäØË×ÜÝxÂ|ƒ‡ˆá"gItkõ0âó Là[ðíåGmîagN’Þ€Ü]Šå‘ºùX~i{ÔÛßóþmª×Ý8ñ_Îÿow§Ñnçýÿê»oþÏò÷3æŸm¸%2N÷•{ž,É”òDåX,<5ô6^nQPYà6œôgzûãIšÖÕ¿"µ­?¹âGÃd¶ˆüŒ%Î0£÷Q¶:êëjï7xgjtÂ4V(ª×w>¦Q±†îUžÞf?±X›8ômžGðübpü3~3¡ž1;(áhδ$AG·ï$äƒÄËÏÜÐÃßwØÕäZ™¶$æ´Ú„2ü5ž^ ÏeÝx_4ñ ~c‹ºù óiB7ÎèÂF…øò|èÍBÉjA°ñ~"?ò˜OaÁLJx~òˆa1º>TÝ/Aï5 n<Áà ^¼°4wã;a2¡÷ ÆI<ö“ð¾‡Hê}<Éf$v-=dÌô…† =<™gzµ‡ž¶á€o°$ÿLÇËVCˆ%‚ùÚwÒÉ•èÁ…þ´§+‘ UCM1M·¬SXXFÖs´NÝipúÜô„ö8>aŸNÙVqxÅ“T¤‰ßDz†/¸¼íÚÏþþ‡úó§OŸhï6Å=üû¿þNKksCZZÛõ_7„'ûFZÂá§ï~á³Á6°èÎGXcX9é#Yî6) É‹Øß™úª=Àjz÷˳¹\”q4¶ÛR’0´«Å¹Ÿ¥®3ö¹ö±ðOÛ•;4™b!µ39Q4Ð¥ï²ßùõX¸€ÅÁ«‡˜§ùj$Ê”ÅdHå39]âÚ‡4LÅ#ŒR;‘~Æ[Öø%¯îáJ'˜ÖÎ ¶+ ‘b5r×ç ŒRÛFŸÂð,ÿžòê³asZF°ƒ[š1±ļ]'÷™àÖþ:@ØØß–†ªr'é“Î*È0[—ýãÞÖÙÅÑÉ—“óÍÈ~^G G>úøÁ†8¿€“àw€Li‰âgPþÅ•!©i"éáâGf3èÌ,™¸Ùprñ4üü;À•:ô¹Æïè¥/°æ÷&¡?ÍŸw¼Ÿ¯I¸Ô•‡¯Ð£¤šõƒò±RÁ·©K·+<•Ž”_³w2°h M×xó%FW¼÷\†À¯¸KNÈÖ'á){² a„ÈFãºñ’Ùv™ú9oå§F\]°-¬èš/߬CRJTYû©r…îeRôÿi û$)àp4-7â‡aøü \mŸ«¤±Ÿª°7ïàçûjÕÚçœvf…JGÎ5ñÊ:`=ðT3¿â‰¯ü3óg‚õ0ä=óM¼’˜MŽöTó^_ñÄW þwÜtæšûýôA'O5ûÏ~åð˜ùç‡H<ûôl3_±Ža]§µ#Ã>½ÿd| ½³*J˜œ†2ŽNL¶ l ÒBÒ³)?9PRõU2†ÊáÿPÓ¯ôž ¡1h«O6 %}ñ™_µ 0eܳ ûõN9yË¡Z;ßLK‡1¥?Õîß^ñT— ü»^ÞÓ½éìÍOySlŸ g:ã€Ð4–¼¨á«Ž]u2˜‰³àÙtˆ9ëwÖÆ¾W f[S2H±´cÝgéŒìÍ׫òÅN'cô,ò½Cîj0¸d Eàîå¡=æ ’ç5ú.àÝiÖÑ¿¤¥ÈXÁúÅwpÿ]é)ÈzãÂ&õBP]û;ÍÈÉö^È;yûþé™AœA:š•§µ×jîµ±Ó0äŽB6)#+u=Zmevl?x~7IÎWúë§é¡Rtœ;–úΑ $’ öÁýÿa™Ìä‡In Ñ*âŽU3ÀÔ˜Á-éàµw{LJÁñQá«Ï KÃF£8*…6HÌן :&Rjí4·Íz½U®…ÿm7Û 6'׋GN¹ReàN)ÝPFêË$ò\€2J†ü¼,B wFÖÄš{þLä´8‰<Œ…“ç@ÄÎÎz bç@¯¥¼Ÿ.­(·š–uf˜iqDÏ©ÀzшðÒÇÈ@9Xé[ߪïŠRä’ñ#]É£þ×IàÍèbÞl¬$M½77 FAèüÐß3>EëàÖ½Ø*lVÊ}>ï¼IIŸ;"§³H|º$ð죕'˜)’>¡ë5ƶT,nâ ¬l]ÍñÚ@wT 6 sÂtý¦©©j–QȞójôÎÅ7§+šŒ€u»Fµ,=cÖ½E&­µ»ÙS|/‡ÉR {0~F‹ê²–ïéyu¹0yìž$ɂ̲³!ÝGa:b˜ƒ8¹v¢àßD¦ ÏCY'Ï TC5ÉAå„—À,ÖA«»z^"“;TLU'לƤÓûøæ!í&<òÃÃ…/†ÃÀõ 1ßù®¥|MÆqš9¡LòSŽS˜ÒJ,àÍ…¤ù5‘UaIŽ LÏòC¾>_ Ÿãû M…J0“øžd g~v{3äcƒÖ8¹Îÿ*¦l4Ö:òk‹¼ª•Û’’w‡þ…ëNÆN”­û6¤N›æ¶¬ÉriiüN˜ÆkŽpu Ié”Jôïݬ©¤ÐO?ÒI²°Øi¼û¬ ìk2“¯^$]Ìè Ém!pÊûyVÐ ÐßçàáGf]¬8Áy–¸e¹±=ï^¦Ò\äL”&Šr÷rxÊÝ]Yíúz,ìfÓÀFæß0ù/šu,+Ü ð¬ÉnÞlðÜ뙂 £ó³ïíjF‚,\+kmÕù±6Q€¼3áµb:zPÏ+8Hncæ½²Xycèš-)!~»NšÞʼn]æ+v3?ËÙgHº¼à5¥˜zã*3oüvî9É÷íz§¨«8¥¹ æžêg†±*™”ÊÅ4‡"…?Èt®¡â–Ô£äR.úç‰Zb\™1 zÅã"4~WW÷z)È}GxY= ¶µRB×ÀùµÔ)®çâà—•N¡õܵ˜#¥øcNJwZ ¼"ijÍÕ,8W¤X4ÃWQpüj¨YêÍ\›¡+WŸzPæý?WƒÇõÃ2f³’ΉB+Ûç‡HiZTGÃS°ÇW)©ÆSÑQÎmÓìÕ¬LÉÂÌóÇÇÈ*ÖeÈ™XGÓÄF ¶Š”Æ Q® mwgFx¥ÂÂo¼h(Ÿð|¥j{Ã0)À[^Æ;UÎVˆAGÌ:1ÞÕÇTâ!e¼ÇëÊÃÍÅ!¥­!…©SI*‚ý8ËJ#.›YÔV=[Åõº†hÙ+YxNصÄQVÎœÈ ñ¶€Vµ€p“«h=d¿”@©µœ4¥Ž ªÐ˜­¢›*¬™¯ Ž) µ’:¦PEÉ*7 g¢ì*šJP7SË"¨uÚ¥d†HŠEf%» é£T6eíW“F8ëT‚1¹ ±(àQâ)[M'„‚×l¼t±£·¿Âßö§ÝúØmÃÉu=M°éõ¿õf¡þ×N³Þ|«ÿõ°,Ùçcd … Íl`”~ÇSCØìY³;'ñ‘Õ£@ø1*ÚÖ±ú"Ž¨Ö T¥ZÔ@>3Jeîê‚èÌIÙ[¨¤ò/]þ¿ß߇þ¾`ñxbn¿¨©)û–•Õyñë<(zĪúüM<™¬ô†XÃ$Üá$äµÂÿ8|½¸ÄÚZó¿Ø^¯s>øëUôÛ¿õyOÁhbUfÚàéì¸wøžï|>á‡á ûr28?î÷‹ë°n§789¼<íôX÷²×½èo³¾ï?†©!á:ÁúE™„©€+¿§0®ÐãÕ·`?ð,ï07?Ì2aŒí:š;À_X0ÄÊbÙ]`-¤¸8=ÆäðòàYû€ |@ N]7t\ÿ#ˆø~«UÿÈ>ƒˆÏžuX½Ùh4¶­úîfž“ˆ9žð,¯Feá`تØùG» 9ÃHÇ”ÓoSÝ{^ÈÑ7^¨eUBŠA)Tˆ˜î`4Y£Þ¨º~~ UàoãYáþÝ&•rÇ:izµàסC7] !…þ‚È 'ž,½ÝÅQÔ |æÕ!H켆¬(ûc;‰ƒßl3ó»X¬u†,3Fn­§Òôq>dÄ•ø :HâÉõ üˆw>,Y7K• †1IíD­.ªZ¾K;þóð¸KÒÊO@¼0$U$jò/ãó)ÛÀæwñ8‰‘tOÔÞmnSu@þX<Äò}Ðlä牰¬ZF¢Z†„G¢¬XÔð–½çÕlèi¹¡XÛOp«’/«åî:¨ò9ÅfR…zP… …W¾"—«Ñ[õZÛfˆ›|†‘ó :ºq¢k:–dœjZHI.-ë6?Ÿß+ÿ!?\‡6ÉÂD`y9ÖV–ò2ÊÐ ˜…\4CHÍ1V;ä—€Th §ýS “/ óUNvÔdM˜FQ–ú“ºÍN†´Ùˆ…('þ#o4QŽ?/OØ€ x‘&wy:v€Ü —w ¨¸¦ú㼘a«/Ã>qÛ/‡¨F;Kî‹’6xíW°¿9 éð¹—°L%ºcíp^W­†ŒhÅô(Èal¡cp"@¤øº;Î=©Šçø³½ÃMÖ!̦7Ü×pîñ‘IÓ zØ||~T”ð¢H(‘‰*m« Û9c\¢Dj Z‘"Çe®À⬥ûšõ]WåŒNˆAúýÉpÜËâÈüʧuÁ9(ßq,ÎpCseÝm®qMOªajîQ¥…ùæa_ƒp|ÿþ½)b`8Ð0œiJm& _y1P`YÐéˆ?¢&æC– `?ÌäÜikY5T{uƒ¸ŽÎœ±Õ²a9cöùÃÑ9^â¤ðC½×ˆ† VÂã'“ð>]¾ò&=¹POŽfýÊÜÔþÛÌ•ã~“C–׈èÄ¿ž„ ¤‘‰&äO¹$Ø…ÞY Ø{bô·44}ß$ªÚ¼¨©ÌRjeƒÓþ§~ÿtycäóÒäTëÌŽ LB.f’IñKæpvÆXa‡þýuél–á7wŸtü–±¾a櫵À¯ørÎÜ* ËEÍO(L( Na¹+_ã%§E°šÛ­†±Ò¾*¡ªÚp?Skûì{–gâ¼ÉÚ›x“µãŠ&b1ò·â™¼A-_ºâËAþæäÃ㓱9#ž^ÚXòþ¡ý/u®œ‡ø‰Œÿë1û_½µ·—·ÿµÚ{»oö¿çø[Ê oqÂf ÄM´$ДdBûü}ŒÙnåjÉU=·c× [æ@"§u =tE´GɪëhÌä6?+™:+ôðµY<-ƒ@&ô-}ä:8´1‘ðU`ù¨kx°É†¡è?‚â&í¦Ð‡{£…«›¤õÅwïL~engp,Ï!(É &pÀ'´XrôãºÀŽ (­¤<âÿ#“Ú:eÑx*08ðüFö0ÆsYP0o²l¼É&IXÜ{k¬bz·1PDIó/wù‡/{§ú< ­7Ð@bi~„òp1t®@cö.{'ÖéUŸ? OmNEAš¡Yç'’pþK¨ÞždÂŒ=ärUÃá)—Ø¿ƒ1amãî&yAZ†Ã8FÓÝdÌŸüÌýäùé7Øk·Dçé§÷øåÍmècó#›²•ÁÛ¨VàûUAöHò%aÖ™H› ÌL@~Á…FîNBöCßñ8ø*aœæ›V+ú¿È/b×~ÆMñ|½èé½]îª&h§rá|`jvòóÁf÷Ç+,Æ©¾©6–/ S>gVãIÆŠDHÌ –’@ÉT ÄøÈÜ9Ð2nµ#†<¦2f®ÀôxÒ®é%@U⤠³W¿ÅX5Gœ€G|MÔ<g¹†ÿ…ò_½áɶö^HþÛDí¢ü·[oì`41Ê­zãMþ{Ž¿·óß·óß·óß·óß·óß·óß·óßêü·ì7bg.„PuF© ¼Ë”OñG±ýDçÇ=ëŸÀÔL”%f’ê]?o†í9õÃá¶ÜÕƒ”3þ¦4j$üÌ…æò*Ž3`¹¨fãƒÔËG®":h¯AŒ!šýÈA#HñC'G1&IæhX§'ùǶóf%SÛaéãÉ4ÊO-Tñ¬4,ýó³îë› •ù˜paæãXk9¤µc ü4v_àÛÖ ±ø¯ãfk>æ]kÌOÆ=W>î=kÜgFé~Ó£ûiWç~ Ý8yE0´lÓ#¯3áì<”ãPRï×zÌ-C~¦Ñ®^ü_ùˆ5iÓñp]÷&ázºÙØmØx>9¢¢%®¢]VŽ‡Ì®‡G=´ÓÁ×#²Ý²-´®°_ÿ„°|ºuÂÉ AÔ6ˆ]&õ|BÞR¸\¶¼zX N„f·úÄz]G„íƒ2ùhõÖ¤§…¡L,êQª½uçše‚Qg’ÅO3ü§œ†Ýúž ˗й^¥\÷䣷ØÑÀI¿­5åìÖÌíê³Dšò?}E~#»†z°HEs’\ƒ–:3CRš/ðå•Ð]CÔÖð’â!fA|ý¶JÄ£+tAöi&É®r;C[[XÛ&¬‰»„°Ë#öçòš{ÿö½A0ñ_*Ïeïr”ìÌ`Å[9žv§âé8š]Ôûž±´W‚%ÎÒ¾e},}J45?ûÒy ì?¶ž²ÉS ÏäA À“ˆ\¹©`ëÊ€|‰Ü¯—íâ§þµã>ÆQŠ%û^LÒl´ÊÀyu®Hû†_O.vØ_©±¸m?Û¹ã~½lF¾O^½@µ_/Û)¢/õ›ˆ!±Tµu –Þ Ž£²Ç ¤×¿¹6êe«åó$}øÃ 2œ©×ÎÄ2RïûLÔu&©ÿ=€ ›n™¦ŒuoÇxòŠìIüÏ„Æó¯&×bæ²9÷©‡§Yß5 ' ÏœqHçž%~U€´ó€|vRÿè| BZ_,»yXx¸WKÃôå°t“ N(èúù›D顟d;ºž¹l|jãqÕ ~ó cm/5¤~ÿ´i^p0­ü`Z/5˜Ái¿a ^3‡aàGYg’ÝäQ¤ï¼àð„¸#t¼üí»/I_ÓÇÙZ›6ƒñŸ¤…Å)Ú_‡Ck½üÐú<+¡ïU 1ÿ‡:ˆ¿ùQ~|Ôø‚ƒâå©h7\åGgß}ÁavÜ,¸%ËU~ˆúÎË ¯J<ÈDz¦Vù§ò sÍÜ 1;càL²Ø‹Ör¼RYЩÅ×lÖ¥@e'é*‡7ÓyÐßùô†–Äqæ¸<¥#›ç8b€hÛ@¸aœú¯Š] ÏyH·€¤â»×ä³pìÙp[7+Ÿ\ýÀ÷sàG¯bÜefw9{™a·ë%è^ÿQ—'‚ÑçôÒ­ØçÊ£HVE©Óvg±‡«7n>ùÑs»ìèù3ìÅñpx<¥Z½zPÊ4 гJ ËÓƒ’OÄ(Ä¥û­« oÙJƒ¯r¥<9@êô™ç8åš1¯Yàb탎‹™f×|í«cç z>Õ`Xs öJ€èÈ×_Ç<ìOá5ÌB«žÏQ*d= lI ÑZzþÀ€ERØŒS«ýe¬ÅÆ€U–m¥û@åÐ0…s¬V›µ:ãŽî”Õ€OÆßÉŒb …ÑŸÓÕyÅDÄ{^©±,‘ŒU7q*1Ì BNÒë’ 7çȱ(/YÌ]hD·Î/“È-Ü@Ú(>íš}”´c¢ÚB³‰¾ü=á-håè¯JMZx1/ ¯~b´°7=Ý Sb¤³Sz9¬¨ôqte¦fÓ·sYÏVsÃpFa|BÜ9×’„ÿð|õclèxô3n$þϧ8ä…¥¦Dµ<ÿèÊr*,6¼|V SfgjÄ.–Añ\!×l™êéž5!×Äóñ†yår·©˜˜h¢Xsy¡Û¹ge0}®ÙŒ³ÏUçn”µæ C{swìp¨üGmå9wW«¢7œû'XŽÚZ­ _dãÍgØ#Tº'Xeišw®`eE9¾Þ˪Òrt†ißrM˜E-¿̤dÕk¡4VÉãªxtÉBž–jè‘Û”‰(÷ŒåGRgIJ‚\}Dñ ¦Ú¡ßåYlò Ð^Ó#fsOYÑÇ•÷èÝiw³Iš»ä,y[„$å ‘RùóU#Ñ¡;òfIlAî=3&7_‘{'_ò¿Zõ*nå*&ØÚça?îêEûVÃÈ„ÍÝÕÇÓ!f$ af³¤`Þ±]þ •s£_½m'¡^ vçß —<_=*Ó«‘ô]]çÕ™$[]èÌå¹&bÍ‹c*J#7y /s‚µüó7—߆dÄ!\ý9HýÓ¦üÑ¢è^?,û„} 2Äû2gkÕOy³vW_Þ¯êÅÜuÞK¶ áⶈI.Ö!syüJ—Y9rÃC'[µSh)íÌÃXšæáçÑy×P–—…&B£/œwë6óôX·ê³XãIyÌ™,炆¥ñc¤ï'³g7†Ý¨Ú ¤BÍ‘Ã{6Õ_}Ÿ›Ws÷sö㪻üÝ™¹Lí¥«Ú¾ýÍú·ýi·î¸n6&¢|šÐÓë?×[»­¬ÿÜÞÛÛiì5Zÿ«ÞØiîî¾Õ~Ž¿’ºŽì«úY¶ÕUÝItñ%B÷_Â8N Å¡ëXºñQT‡¦Ú¤Ÿ¸8Á’›s0ß–¹›*Šv¹C+g‰Œ›øáÁ\±Oxó'kvãqŠ"7qè¥,Äk^Ó©ˆ%‘N¯lãW;'K@¸Ÿá²q€?è0Kyv[_ÿPc¦³:E&n¬ÎI½fz©wª¸HÃäyѦÀn,üVrE¸Ê€pSPÛ8:Ÿ‚kô8f²©Z ůi”tJ¿7fòYÁW„¼}9(ˆƒlÎÏP¨¤ù %Pwx±o*ÍÌ_]r91ÅÞ6”&ôð¹qØÒ8,Wùá͈Iû({„‰»¢æ¯!*#ßÊUÊ©°Y”ÕKш°Ñ $õ**R4&¥ôÎåŸÀÅ;½¿„¸^1Ë›³þ,Rôö§v=J·¨úÐÓHÉF³]ÏÉ­Ýö›ü÷,¸ÿ}>FB,TèæâQwAÁ./ÖAsv s– ñ‹!оEGÛ¼øy”9%=SÎÃ:çsKzSä<¤RÒ‚Þœb[…œgHy4w³ÈyeRÞ#£/N]7t\ÿ#ëOðýV«^ñ(âí á9ýÏóÞ!P‡Ã7K¶*vÿÑ.HϮޔAbxÛ ¡7®ðÀÀAäüF·ÝØó9R J!—î¸ð¨ ¾ ÕÀÊàåøÛxwG[¿vOAäôüw›T9>i¬ü:tèÆ£+Œ•‚ÑÁ~é†÷qZ1wñGÜËÈz#PM{.ž“/ áNâÀ Ç7ÛÌü.ô <ž#‡ÇÖØX ø$Œ¸DI<¹F¹?ï|X&ÒpE"ÕæÆD'}t&V™y#g-Çw1 º"•ÈgÒx’¸âËø<ˆËØü®3I÷D}áÝæ6¬iùiR&Ðläç!o‘ŒâÑ2qÚâƒE ¸ILWAb¡>¬øÎ:Lâ‘àV%_VËÝu&©œ£ÄOÑ? —Kœ ¯|E.W¢·êµ¶Í.ÿ6ù" #çtÄO iË’ŒSM )ß•Œu›ˆŸÏï•ÿ®C›dQÈ„pÆ([oÁÁ,ä² BjŽ¡5á—€Th.£ dJQ¿ÊÉŽš¬ ÓÈ"Šƒžn³“!m6bG!ŠÄ‰ÿÈ[Mþ}æãâ² $†‘@ÀÁ_FžŽ ·Âå*®ñ™i 䨾 WøÄlGp— ˆ<ÿEI4Rq„/F‰“àù •æ_‚ÞHW$åi­†ŒhÅô(Èal¡cp"@¤øº;Î=k%vž°ßˆ¸È',„w™ò‰ ¾Aç'Ûeï·mzÅ}iI‘„ÐìaóñùQQº Í[Y‘y|eÊÞ‘ ;°?zǤ¦Ó¼gvÖ5Îû~ÇnOúOî],Ùb’ù Ïîä}¼ÊA7wŸfÐm{Е•@Úõ§ô®=è“9*,R¾ü ÷ìA^©Ëö‘ô¾=æg(ÿ·ü˜rýnKº]ÏaÚ: ˜¼‚7ì?A9ž'v3ïB•×5|n{ì“ÑðŒ;·C~õÃñeo•É·æÛ%gõ>Êí‘'ôúÆ÷xtˆvþb¹yHñA_À¶¬/N„·¯ÜFô…pÝT|O_q1žÌ¹-™ó¡´¿³É”Ê–ªüÒv²ïõÏÿ“¡ÛÜk<•ù÷±óÿÝÝv#oÿm¶Úoößçøû‰Ùó ¯÷åáõGvè„ Vz”#7 Üâ¡*j¦§Gní§ÿÏZŸâ…ý¦Æ`OhrÆÝhµÛ|sØÙÛ‡"©žJ.{'úH…¸ú™A+šÐîE¶XÎã\A|Aå;rÔ?¹ŽâÄ?é´)¸ŠÎ>÷=û^r•Ò^ªžšaïªÉ”ixòÜ«X‚Ô:v“ŽÛ|a˜þò÷òrü ‹•*ã©ÄÕ$}`ž“9ox®Æó~Ý"äN·šÑ˜‡ØvãÑhQ\…6åqô¿Qö oäXGÇKª‘ŽœÏ2æÿkâ§’xš‰s74OAsÓDóÚ¤9›NK0MÔü¼[a=œqzgt0)lÚ‚¼Ó7ÄOA|«€xbß³â£ø§7> ¾wŠ„Nl|„;#íÙQÝ.ã)œÏ…í„3s“—ÿˆÏ[IÊðÞÞÞß3Ñ~l;ñ)d+½ä8ç¿”VMMH©­%BÀä?Í­Âhá<Œ7ðõe>Î\Ήëéñ÷DúŸÐÿ[{æ éÿ­ö^³àÿÕlî¼éÿÏñ'ô5ÿ ÿ£NoúDwñ=‹)qVOÝ/¦ï·¶ûÛu4ÏÚÑÒ˜½5IÃÿ“X{*c¥³oWðõJ¦¾GoàÆTéE+¦òÙ7P õ½ï“`KáM#]x&€ |?¤'ËŸ€ä îÒ­‰/ü‚”S†_®Á$¬Ö[íÖÑßÊFvdþ–ð2w™ã‚½Ñ@"¸åîYÊåëyÑùßæÞÿ¬=a7JPœM'Ü èzâ\û[.E¾N’<™ëv&Þaòtšº» Üæ' z\E¹MR6òÓÔ¹&¯3îÊkº"Ýoá Øæ -‚×1k­Ý¬…"ßž¢ñÍC Ò1LŒ ×[ SÐZ\ A4Œ5JÕáà-R8H#ŽTç ]äÞðZ…×^ѽ;‡ÜŽ2¢ïw~§“Plïqì|ó·€ó²=?Ôˆ>C\äÈÔþƦaµUäãñ–ðdMËÀ“n}i)/tyÝñŽâ0LDK/}fŒ¯Š›EÂEËÓ8ô·¼Ø ñÖ?ã«rdGå€4qã“q Øú5—"š3˜Ø#ŸÉÞÑ+˜Áìy¸ŠcL*6;†÷ÖžšëÅýͽq’ÔÏf‘ñÄ£/ ÚÍBÜ»­WÀM•ø/¡í¾Oª£x"5ôCÖ­ÜÀÈJKg=àk}Oì¯"ÐÛå—,õC1a†°édôË`Ýf£h‡AlU’¼æ6Þ\é ¶ª? ïw±È_À3P‡žù|gK¯‘W,>–Øe°nÊ‘`=t¢Ç3·ô ˆé'¬ (§Àú1OU!ø?/5ïƒN%"Ø©ó7nTqLW”zødà¹NXj=°ŽEiNÓoXžFòEû Õ‡“¬Bt?Çûú©¼™FiFR¡MßPn¡¼h1à;æ¿&”àyÖéAÆÄsb>Ž~æÝ(¢õŸñ¢–òÐO7dÖ ï1ù 1ëäÌûá(¡7ÊôÿqP-ªŒœû`4Y >ÏE?e£ã#¡IsÏÅ…Gœ¦©ôýCNF‘¡ã2ø¶ƒ8=:%(½àqÇ· Œ¯pil4>Ö›;Œ¿Î7æ>l¢½ õÜkÁ“p2Šç}t*âPVò·©iÅKw’`…¢-®œæ ÜW.SiÞÄÓìf2Âó<ñ%z‚m˜ë»Á5]ÊjõvÈ]>%ÊwŒEÛNš÷)š>âMÆß,L¿ý6Ó¦£hyðü0¸õ“‡-Ø…ÕN•ƒVšaTÂѹpýå>i¸˜­ŠÏæ°ë$žŒñ)óVªUÆL¨ŒUã• iG>Š|üÎáÈ“ ;{þÐÉÇ¥19æ—Õwå'¹mMr°¢œI2 A$.ƒ%!€uO¨Ü ÕýU bá)ó&FæÇË)MA(ÆâÜWAö$(mZ(=ò£‡e1êùQ°ž•ãÇCÂë8y(Ú*ª°$³òº#ïP¼,qÔQ%L¥†U5ÈÈ Ñó¡Òº Kò73:Ì›í÷1ôPQ<4ÀˆA'SSèbX€ÇÓÀÎNZ2%0f H?—)V¢|(«« ï02ßp'ìø.¥‘á]>64÷l¿TâÖ-ü…æX‹21ìý}èàb¬’„_ð®0€ckŽ…ŸÉ¡ãøm™´hÞ´ÊÅ˪Hž ŽQxÅ îþÊËÔðu&J]!ˆf_l»6z“¨“Vp$" ÅŽÄ¥¯™–ÂâŸgN*ØçtHŒs@î‰ÊW ô°u, ò» ãñcþ½ïNt•4SJ´mÏŽ‘½Œßg+YPÖŒYøZï…æß“ÁI1sr•ä¨ ü´@ &h¼öÙÌØß/Á>¢~¾€2tr œ/ÏEƒ‰IýT†‰Ï§ÿw þlŽ­í º_±»Ò%÷Ì ­>¬vFF> øË‘G²…¹u9;2õò•f!dޥƇø½­5ø'YqFQηæhŒË-º›8E6Ÿ~›côR¦ÆwÏàU9Ø“.õDîóËi›n^ Ïé°¾›&®Ê€}i{ÜsÿaþÏQüï —²ÿ¶[BþÏöÞ[ýgùÃüŸæü×~bâ²sÕ¥£ÛN8¾¡”œì0ñ…#ÈAŽÊ×ö¶þó?‘=ÿÜQMûÿŸúõ_ÿÅÍŸ›¬ÍŽ|—×Üèö›Ø!6eCç—÷ÐqÓ`„X¦ÈßÄ‘ν¼áh#!ñàÛÒXÍ6þû¿¯&×ÿ_«¹·¿×üŸÿ™a ;í $>1’}9Y!šjkºq”9A”R"ÔÛ–´‚ ÅJ×A8ãxb ’Äú9Ž¿aV,h›’;2寄ÑoooÓ7Žü± F2Â%ƒ†!!“§šöÏmX†8Då—°G¸'˜ð¡d‡_ãУ^€»áÁëÉL™':S&ÛÀlÅ›ì?1yr}ïà¿>RwÖŸß$ñšú@€èã»›Œöß~ Ü%Ô[³½û_lØŽHF…y/ú'çÇô-uBÉÝbæÞu‘QY''ðÖErÍéÐ|±¹w°/Â[°%\së}KêjííÀ¯{¶¥çì3û@éœ?°Ì¹úÎ2Úæ×~ýbiÓ‹GgsDÛŸêG\¶Ž^Ài0µf9ÖšoX›†µV9ÖZoX›†µr¬í¼a­k­"_ûü~–ø~ö†¸iˆ+°6¸7ö6sö†˜;EïÍ {8é¶QEì+„¯Àˆ8e8Ùk¬]X7å/<Œ=ÿ™¨~§þ‰~· w‡ñ3ø}4¿WÞe®&(æU²¹ýÜ‹¦H5mÚìdíákáëûnyÇ#' Sjõyëf9A2xò› 'ÊÖPÂó­íê8ˆ‚Šª³eí4øÁŒ7wŠø]lÇ¡C²!\UÚõß³ëàÖÄc7(ã]øñ‹¸˜ñŸø òïå„ Ûe¾¡™ohåvtCN~µo˜Úœ}G« v;éGù&”•ÊÚšv#ˆºAn‹ºÅØHt£Áu£!¡Ùü ‚]À/ 'Ëã\Ó$ŒM°ðâb8 \ÿs|GÇ8ŸtðŸJ²Â4fAú¹P—gf¡/}ŽöZÿðü×÷&âåiN€§ŸÿÖw[í½ÜùïNcçíü÷YþðüמØ<û¾Ïn²lüó§O£ÀóBÿÎIüí@œã5·á…Oê¥OßééYû ÙÚnðÿ©Ý û$ðÈ0ÈË5Î]Gpvœ¿ 0jà¥YÄ naÒ&$*C-ß=FZEŒ\$×Gçp ºî¿¹ï¿1KèÑ4måµ!i§I—QU!Ê ÑÉ!›_¯Aí"‚ºI€î¥lGÜc‹±Ÿï†¬vK±¹ÁØ m–¤šÙœŒé»ÁÕ^Wljñ̯1•küîù÷~åÊ+áPrÝ•pª‚M‘Õwã±ï•r)~kA&µfHzÄÊ¢pT" }È[T¸Ï}‰\‰¾÷y±Êl#Á"×ÀIÔl,n¹»šAš7Œ…ÏÞW®óÂÔç k1q/­n­Ýßö§v=J·T]¶§0L×ÿwZ»ÍV^ÿo·÷ÞôÿçøCsøçc\ ‡Ý¿`©~°Ï§‡¿I—èn_'ÎMÔÃÄÇ’sà Wï/ì!žPÜ]âcJá° BºyŸ¨.±‡AgÐÀ#ò~âµCüd¤âF~=¿d¿Rê¸u'Waà²ÓÀõ£ÔÇ@Ì1¶`’™à ~¿/¾ý}‰¡gb5¿(_ô¦ì[t´ Ï•€¢GìI«ý °/XC–59'©?œ„ÜÞþÇÉàëÅåzëœÿÅþèôzóÁ_¿c4V\öo}ÞS0‡˜–†™8Ïý{vÜ;ü Ïw>ŸÐ¡ èËÉàü¸ßG@.z¬ÃºÞàäðò´ÓcÝË^÷¢¼M–˜G0¥j@{~æa* þ fGÅß8·KïúÁ-&TÅ”é³LAG×ÍFà/ÅÙGv—™ ¶¦Ç˜œì$r·?²öø€œºnè¸þGÖŸàû­Vý#û§>{Öaõf£ÑØj´ê{ì²ßðœèL›E.wL± Ãöï1I8µ÷¾¯NÆ¿H§ )!‚êªðM<¼¡"{0ð0ˆ¾‰HRÏW‡-’RÈçîPòʨLÅ× ìAgøÛxwG[¿vOÚÖ߉ú9±A{ôu,®‚ˆçÆÓ UPTÛùˆ›tü#P£@æ(d{'q`Ðã›mf~úƒmÏŒ‘Ãã kl4Áô§Qø Wâƒè ‰'×%™î|X&üdÌãu&‡0¯2ï¥?«‹"+ðlÃcÇw'çÐÝ©D>#¢èËø|Ê6(0»3'1’î‰ú»Ím:OãQ ƒ 4ñ²PY€‡3ÕkÑ #ÇMbº ¢ö†“ÈåÅ,Â[•|Y-w×™¤rŽ?Å@m\.qòM„˜Jr¹z½U¯µmvø·ÉaÀ¬žXÔ6º¦¡cx‰ ãTÓÏ^[2Öm"~>¿WþC~¸m’ (£å!jã¡~Ø ˜…\4C”‡ª_Ò%ž_íbüÌ?2u°Œ|•“5Y¦‘E=)Ýf'CÚlÄŽ’ÉH¼Є1ªTœÊ"l@<€H“»ƒ€ƒ¿Œ<;@n…Ë;T\S€.ÒÈQ}®ð‰;ØŽà&.)@5 Ë}QÒTD‡‹Qâ$x~ègù— 7¬ÍEmÊpƒVCF4‡bzä0¶ÐLNˆ_·qǹ'[×çloãp£ŸÀ~#vàBxHa© ¼Ë”Oñ ʳº]ö~Û¦WÜ‘vð‘ISøxØ||~T”.è Þ©QVñís?K]vdU›™õ)'ÛÁÄ`¹‚‚ÒÊ¢´ï»“Äç/uã$ÛŠO(cÅ›2ìT}V25#ÝtýaÞ¨îJ0ÐG£8ê²PYÐ"ë2M Ì=挆ñZxG¦1I|„\gí1¼s`ør ¢‹ÿÄ)Ðh×{èíº=ôÏþu]$îzºQ†ïž^#¾·ÞCo– c=ʰ°ÎCoåH%ˆ¼3öŒõÆøž\œnôøÈ{q¼ÒUÙ\õ˜÷ëz̘8c­¼ßЃ]{V½ßÔƒE>ŠÖ›Zz¸ W¡©äuõú¾“¸7ëêkìý4\ïá¶mÊ8}ÕP'LW8n+‰ájƽ«Ç}ê¤ßQ@C#Òzc|Ï ×³îïç‡Í%=gÍ9Ÿ±ö"—ôü[g½å±rÑ´w¸ÖnìÙRÒÚoßÍzN$}Æ÷ãÎÉ£ä€ÿ…TܵfØÍúŽ=påâ¶Îd²£¬QŠì㓼œ}¡’k=òf#·$¿p¶½Þ»c³‘3ƒt®œÈ‹ax<ïÑ*çSË^Ž\(ÆD¥&ZëÁ7wrV‘ wwñ.Ì×zô;õá TØóCÿÖ‰2N;«ä{+¿Ô'#Jäxœ :Yñb5]ñÊ80"Û$–®YsI ž<ðU§iMÏÜvîßu\ãž^(-›šè å5±£à ,¤¯qYì´m]YI«_$»¹Ù ²zu`4¦ È€†öz hXË»›·+UZžvô»ukôIvó%t®W«w=1ön}™&˜,Û]Ìu×ÞïºN2Z±xýÄã·79ô®M»~ò‡ï{M¤´cAq>Æ×qôªVCÛÆ/%àWDN»yrZµûäìå¶…ý^^ÕìçXRšÞʼnw|?Õê5ME£Q·EŒWEHí¦½¿aÒPn_%µ›9mîõíÏí¦½Á½6!©Ý´·¶>e;é:ÙŠOpŸ {wÃô ©ð#~]pØûÚÇh§~]@ì•1¾.(ìm®ãºßâ^EÙjè™sßÏâÄY©¥õÉhÕ‹Âë×x’¼*NÛ²÷ëώ׽;\±±ïéÈïØ”lŠ2;¦UÑjåÀðüîk[;yûF°¾B9¶Õ.“c’àö•ÑTÎòªN_—Êy {\ÿt­OÛ¹Côµ÷¡mçNA_·ç®”"ÒɦËëž<»' ôPŸÏ›EùyâI?¨–ðžû›¯ë^²T¶²oþÛbî$I@Å$®£ñ#7yóõô8H;õª…ZÞãf!õB)TÍm­o <)4=bÍ3²…~I0eÚKãžU+o*í¥¹±X‘I«ç{ …®¨üO7ÁË«£g›Èû ŸA÷oqiHQ#®@iÇR ”~:©?GKù<ÚÚÅBذ½씥¡ÌŠ™Âª SL’©¤Ô…ØtÕµ°Ä¨k2k¨+m$PMê€HµèóýI<½PW¦’®•Æ[hõÇ´.¦Ú´b£G`6èî”î š´$®š”…ß~O0"%%ým\”4GA6jñ†¤f4“%8?%$´Œ­ú«9?uÃò*à­b<¥].<ÌHÒ¶üóÌ4mâ3'é6û\_·§äº÷ e2yïêQ§‚?¢”ûA…ñõÎa ƒë9ÑÈ3G+çW†ÞLa`édä'ë5B%c-?Oµ ÒØf¤;CæÔW¢¢°$ª¯¸°§¯ Ÿfû!À¦Û¸$n\[q ùvr˜o—¡ŸÆ·„óºn1²5˜uc†ýˆ%v˜+7VÝ”wÕwŠ>€O@»…@»>¥dX€<€8[A{XšGÄñ_bÒe,×9ÿ](¥nðÉ-†øðÆBüŒx/O¹ ó)cꘅå§}æÙzÖü?˜ÿ9;.q¨ÉÿÜhÖ[ ÿ©YÇüOõÝú[þ§çøûé'Uߥ*LøäÀ8=L‡g¨À\l ‹d£”Ï.NŽ~þ[iUoñ¤)~þ[ùƒÛ P#oèÚâ’9U<ÞTÏÐw nÎÜ÷<—rzÞÔr9ÀEÖGM€xØ—À½¾¦uÛˆ •cmsÎåœk¬<œ±»?baÜ2¢o’j>›¦Óïã….®žÀ»îÌñ½ãf²ìÌLVˆMIËƒÆ ºÏ[SÃVÂÊ€ã‚pïQhŽ šx&LvÃkúÌ3z³ì½ý cì˜KéZÔj™Í‡ß4£Ø0ô`ˆ‰—J0FÎ}0šŒXÄ«7ÄCL †ð:ÛÙ$‰æ€f'ùµ•Ä+y(bsé8]#©ÝÄw2›Ôå ¥3fðî8ñ·”äùóLZ[ƒiŽáCeM¯™L¼,ÔaSж°…Ò•3 3¬ÅÜS]¦ò3²¥(¸D€¨,7.'õ5\O‘Ç®ÅÎÞ®ÏËÒ‹©t8x§1HVýÌ –9Ä[0™xo•`îi0 ƒxRXM[FÚ±H#L|á…Ý*¸ŽVò~ds,e@ÏN¾¥Ÿ9÷¸á ÷?õ£ëìÆ`9qƒ…ü†撲 Yá›+g<Ý;´%u¼Ûó8Ãd[|€q{΃Øå0Ú&ÕIïÈ@ê£ñ‰gÂð-‘Œ]ùCL×8FÕ¤Oü¹ø’rÍ+Žq•8rŒ%Oÿ_2°Y5¶Xr‹õ©ÙNÌÄ@‰²çéÜòG#/€,4Ö9¾×*,bªÅÍËïA´lƒkN«Ç‘sú/ ,²AïòX,QÜH á´ªÚd”ÝwÕ‡ò´Ž…Ù5ñðiÍ2óƒv~9ß¶g™›ÐÙì“ÒÒL(ø‰XßXíiõ¤™‰–‹6¤qõ3¬½‡¢ÁÎsˆ¯È'ÒJÒ‘›“Ú*r [wvƒOn pš²&¥¼u‰VAÖ0¤sTr6({dàæfA‚ÏTãEïè¸Ø.Ré#cç=-G=õ¶9û(ͦsÿø4ˆ&÷ü´d,œ^\2ñãË]qvJh}˜ {U¸H<}ðÖ¹¾C‹ÑD’Bæßg,Ægƒ—$ƒÇ0²˜^o1x­Úÿ: øû+è|EP&^X‰°ÿ™»ÍlVØ}´ÃÛ‹üÂ]4@VËL ïËÕò÷e íû ñ}AÑz_ÐQÞ—Ëñï«DÞ÷å’áû2Ñê}¹°ñ¾bk7¿iìŠâ“ù]WÊ®BÊÙ-Z›Ìfß|hH  \ñDÆî'‚¨0OYû¿ù¹(¯í‰àŽd_J8Öf³YÊ;_Ú0ùö÷,hÿÏXíOdûÇ¿Gê?6wê\ý‡V«¾ófÿŽ?¬ÿ¨ç8×!U(–®]ÈDè6(P!÷A¹ Æ)_HX”¦ÿ=g,K>Œ]ÃmÏý»þŸtrõÿ{àßÉâ!Ü,67þ¿ ê{{~ø÷á$ ×õÓtsƒÿ›ÅçŽJsoÛ3ìë/Ub²-Äþ¿º*D˜8CLõ$ ü€†Ê”=WÁøi¥Î䤒³ýÌŨlêÒœœfq} 6ÝÝ @rbJ¹È ê)¶ e>/b_“%«LD6ó”(¬èò]~– V Í,ˆ†”=LÖòšV2\Ó·Q:\ÎY®TÔs"æ¥ð ÿmª·“¡»Ónî<™0}ÿoî´v û³ù¶ÿ?ËßO°á§Xÿ½Á>ý¹ 3Ãú ϲØ^bÏeõíƒífk´«ƒf½Þªƒ~…ÿm·¤Éxbìb™OXT_jCè}9d¸?<Û~Zœýrd­T…,dðN¨HÞð\åË5\Õ®¸š!Ú› `ç Weä°-4ÚÿÝlïþÏÚâE!6Ø7þÀ> “À¼ðÁåÎÝ¥?Ý|<•£ˆZŽ»½ãÃÎàø¨r„/ŒGåìÅîÝþ1$ N¾ÿ…¥ø$à'²HXò¶¼Ðtmcç4v•`ýƒã¦½›ÃMWT†|&ÂYW¼¨sh‰—–ì}#m0—ˆùS{Ctdƒ]ÜÀ¾áFîÜÆ¬ãyÄžgŸ‚¤âF^á #±YWXDï¼îMhA¿~èñúœÜ+sèÌì»9œæî.‚Øv}FœVŒõ¥µéÔEhA5;ž—øiZ±t1²¢’2ñæ"HÜi¬ëêÕèI³7f¦± â3¡£¾³¿¶øÐ*„GJ†î~³‰Wñ}%kÃÆ< ¾0Ì—–¤WåȉœkÿPÎdÊ~$øŠ2mНeC«íKcV*vqríÈg'ìgN6©ÚÕ~(ÞT”7¦µ&þ}-tW5Ø—F«dacP}àÞtCŽiKHâx$fë )*Gn껉Ÿ9ÉðåIéq"ÒV¼Y#näzJÏÀw‹“‚oïs8_Tà}G…:ÚûBfUü^.{SéS1 <åHa'àÐy7(‡LÇZcÎÓâÉñ;3èyïtØ‹g .b`|ÀŒY¦S|Ѷ-üœ&÷š´ÒÏ)ãôÚ¢ú ‡ê¾¡4ÓîF´fQ˜B4!I‰a"Ôx]ña˜‹åñÿÊs"&F¦)}é…eé[4w5A‹4=t2ÿCx9vîë ãe ¹Æt'‰ïé6@4l»D_'‘‡þêŠþƉ?ôxúȃ[?yPipé÷\°1¦!óï~2‚ÞB½dÄÓ†à*ÞÀxÁ$šJÐ?:—wÙÐqÓ`TT´`Lx˜Éñ«¹×ÿßÃÍ1ÙòD°±iØ“/Ü<¤a(áá/‹ìt”-&,,kIg¶‹Âú’ÙžEf=t¨õ=áÊoÐ\Áy.®ðI©A¼‹¼o6q_Æ\âuI¥yýr¸ßl†Àíéâ3™ƒ¨jš²œ§,«Êµøø²É­3k–¯ÒJ®Rž¨ÍåÒÈà|å1N¤ø–Å×u=ïšäÌ›§V;§H)yì­+*Ým0‡>å ²‡Xš¹u>/p/íp¹fÛŸÚõ(ÝBÛûKùÿ¶õvÞÿwgï-ÿ׳ü¡üù—ÞáE÷/X„_ìóéÅáoÐ>¸ RÖMâëÄa¼Ä˜;Kãavç$þ/ìD~ S‡M!HeÖ§ “Á@£ØÃpNh˜Díø¥ø€=d”bÒ!¼øõü’GJ:!#µÉe§°E©îc¡HQ‚i|ü ~¿/¾O!HÐ3팿°[¡¯5eߢ£mx®=bO&¹‰Ç0¼'Ã!ßa(‚퇓ð#Èðûãdðõâr½uÎÿbtz½Îùà¯_(&þðo}Þ2CL=ÃLœ({À!÷¿ÂóÏ'dC}9œ÷ûÈEuX·Óœ^žvz¬{Ùë^ô·Yß÷ÃÔp`RÆ Vq* þ f'…q…»qn}LòäÞî1Lî6~˜e Â8º&èhî4ÁÔQœ}dwè•6…é1&ç#;‰Üí¬}À>îÐ]7t\ÿ#ëOðýV«þ‘}YŸ=ë°z³Ñhl5Zõ=vÙïxNt.§HKÇ>HÅ!óï]ÌÛ{ßW'ã_d×oJˆÀáÉ·zK‚ë› Ñ7ºí¢XDH1(…²KÐ~Ü¢ÄéYb:«À߯»ó8Úúµ{ÊPäz·‰ ?i¬ü:tèÆ£+!bh]¹á„R]Њ¹‹?²”Ç)KTSú1‘šÈÆŒQç0èñÍ63¿ ýQÞÊ2cäð¸ÂaL…üqdÄ•ø :HâÉõ ücV&Œú–)µHRIpHÉN­.Ê.ˆ.¸;þóð¸;8¹8‡î†H%ò™”’ò/ãó)ÛÀæwñ8‰‘tOÔÞmnÚ–ÅCèÊšà<°vLŠ$Š„ç`Žcžšp“˜®‚(Äø‡á$r9B‡I<ܪäËj¹»êõ|Ž@NÅT¸\âä›HÔ!ÉåêAôV½Ö¶Ùâß&_„aä|ƒŽD.Ø™¨$ãTÓNß°l¬ÛDü|~¯ü‡ü pÚ$‹ Ó`FX[Oƒ ‚YÈeA3„Ôg7”ýÁãiIˆv1 ýŸ™|™¯r²£&kÂ4²ˆâ 'E ÛìdH›ØQˆ"qâ?òV'e˜~)GØ€ x‘&wy:v€Ü —w ¨¸¦t¼¸Hc Gõe¸Â'î`;‚›¸¤ÕhçÉ}QÒTä{£ÄIðHÿɽ½azwŸRÀ#íÑ ÕÍ¡˜9Œ-ô1FWs"@¤øº;Î=áÿës¶·q¸É@ŽoûØY€ á”»Q*ïRu ¾‘â(¶ËÞoÛôŠû"Ò>’" a´¥‡ÍÇçGEé‚lï}‘zÈL¥Ä=ϹƵ$ÒȳíûmÐ%0ž:ÂHiÚЗ Y…âü¬ÏÚÛ÷…@£™+]îZ4J©–Å'òi C…‘ë{ÌXî¥,=pÙæ©í±WßSÝ:œQÞÍ0ÑñkHÚ˜Ä a"é×FËðÔºNÆ› 9èIaiî> ,Ͷ‹X4ƒx½aÁ¡ŠSÚ!tJîþSôXŪ¥‰®a­ãÄ ×÷xÕ†3_8®<áZj> Ûoسæy_}Ç{b`žj15mXx†‚ÏIìx®ƒØ×T«nÕ#[úàsݲ ¼F šöâéÅ4¾âUžô €xw>V|ZfðD{OÛØR£ôntLöCÐ…¨À ?|•sÔܵÒq’ÄIúôzøS«­ÍV™Jôz7Ö¦Íï@uèùx¶Óq×ßÊHúÎQRæ<.áõ!ûþkd=‹Û}‰“;'ñ^7o5ód‡¥`ƒ?¹0÷äüá |==¹`÷DÄ··otMDEÁÃßýöäÔ·è„Y•5¦ÔU±µ ùn0ÐÛ.ˆJËNi®Í*؇¢Z·á–IÅiù?yk‡h*‹²›|ÍPîd“© åÛE›}¨$ F´›r–h*ÑþÅRð7EŸ¢Ž,÷ÂI€Žœ–ÏziIʦט\饪ÞMiù嵞§’ÍR¶&Ej*‘P¡½p¢g´ s×ʦ؜äc——°^ãÕkžÈ낕ݾ¡é§x($Z Z«hW§aâZ åâRüÉkµ+ëS:Ì·Jz-Û&D»iV“M9<†RÜEègüÄ"CÚ¨ÊÒÅð¬pò„ÎêéSSÐ{³Zu1 'ç‰]uç¢ÔðC£ý:OšoÒÇ‹ïN4;¤Ýh½hp×å ¯Óðöìá9÷ë5¼}kx8IdT{^!XC<‰/n׌UÖX>Èãûq¬ÛÖ¿„Κͳ*i0"ƒëeà­.8¤–5¤ó áãÚ<öx¹¡)û°Ô ÆáêÈná¡É# ¹¬'ÙšÛnaŒIœÅn¼²=تK?×Ðt9v9¦u”ýTʧ8r{cw-‡x 0‰ç‚«âÂó+ýaTB•YùÀV!Ú7ÌQŽœt=G©ö ÇUZ/=¿rÓ¸Šã¬ë$ÎÈÏV¸2åÊê@‡õ%Xƒ}B:­âv挩ØÞ‹OÞž5¦cJb÷¬ZY!—9À¦2X˜Õ´Ë8ÛÁ¯køI#ü¾6~[*´:IÏeÀÔJ,¾ö€ÜÑúæbðÚRëÒ#¤åŸî‚ÐìUÐ%à±ó‘4ZÅ~ |E»K]ƒ RW.½4 ÉA]ñäÄ«[&ʘø¾¿_oùsòÐÜrQ¸×rôK,eBÕ¹ ý•Á%…^ÌÉmŠå/eË’1—¢¸É‹ü¶?íÖƒ±ãE铹=æÿÕÚ«ïjÿ¯fò6šoþ_Ïñ÷ÓOì¤ÛaŸÔg'G?ÿ­Ìu¿†©sb•}ëç¿•?2Æû½ÉŽÎû,á Ε’”`y\½ŠïPèÆò™Å=rÈk*aÍæþv³¾]Wª“wÞ Nå²ëD佂£çG1yrdÁÈçBxÐ ŠŸˆç¨¬ÉlF¬…³|l =6^tEŒ]nô_pðh;Ê€6Fi‰¦”ô›M#q„†y~<èõÄ×ùE&tôðaã˜\>Ò‘~£Þj?2ÄÇk® u0Œþ6 T$%Ðnk …ŠÃ(B!j)¢ÅK“Ðë€JJÿÙŸ¦ ƒø—×xøRv¾ô>Û #KçKŸ9™Ã9n»æt4ö[ë²öƒkhˆœl’ø|ðÍöúM†LM÷á7ÿÁ<4¬ñ° u:{Ü'ÝÛ]tDá\ÍþþîÚ Ï\Nc×ÿ©Mðû{k7ö–Òóü'Š£-ÿ> §Òõ%™ZòCÚûÝÊ-a9¦ÓÜÛo® rÝFn^Ð5¥3^+ÈX×”6šõ›¹£}û³À„Øñ=÷îeG~è_›ójÚÚA"MW®Ÿd6,ØÂ]^õ~°¿vãWŒu×}g—ñ†ö÷vÖUº µýÜÂö$+ëâDžïÑ©¦«Ý½çZïÖA×Ì0î¨jl}@½BÊrÉ·vÛkGhª¶hÚÿú¥›“›ú_®ü/ðq?Ã`7ñg˜m~6ÜJ}7½Ùò¢t«ÞÞÎ0‡ÅšÁ¦*èöú'¿Ú°õ°ILË^{í8ñŽ:ØíæV´¬ÃÀ++ôJ¬;½'Ôú+¿~ ÷Ÿ—™‹?¸øÏÊFðªÄ†ÜFF¾ÈdH*=åç+ì{Ê‚‹2Ý5pËùòGç/±¹h73o›¥^U5 €(ãÃÑCt9æÉb9h<€ŽyŸ.›ÐÍ”Å"òîßqdZ¯â8ôhVX*½Ü¥i€òÿ¨:–›4ÔÃàMŒÂQc(¬GiàQðUá‰ÿz€Ò2@é_tF–gÐE‡ñëWFj;6PI¨$Ž³× YÛ†Œ‘™ ñ ÂWÞnnâüaâ§7ÖÔñ&vë„“×7}{yøL—7‚O²_'lû6l¾—Àñ–W ÝAŽOQ0šŒLðDÓ+…¯aÊ|[–yµÜ‘ß™Çâ‰5…µ4Ý[Eþcâë•øùäüè€Q¾°­á Ö9—Eb•·‹û‰:`—)†ÆgqI¨vsâ~ÏKH—r ÉoQ&[•a•SbŠ]Lvf°êû¾“¸ ☇@Ü)oûO» ®[>*–*œs‡ˆRaQþ*Õàx`»!wÖ*G.©PKßÍ‚:ÊÞ£]w08å?df;qDˆ¿Ôy!^쪟ç}õóP[PàJ®Àフ#Yít9s©˜ú¾4™€Ì{&Eê5&ôŠ<WŽûMD3àC³Ô8®™EaZûLGÍËÌí›/Ž…½x’ƒJD|HØ×HèãfêbxS5 Rzƨ¦3.vê¯&AŒÇ¾7#zæûÄÅA]ã¢wt>ðÀ¼Xh´_ 3*šÛ­ÆÁ¼ ¼¦Ç„Q~)³üÆóP×Mƒ$.`ƒ ¢G°Áš<\ü5࣡ñ1’õ#‘͆^èÇäËüZó&~-Öçl({iSËZþmª·±þÇAó¥êÀÅÞN¡þGë­þdzü½ÙÿÞìoö¿7ûß›ýïÍþ÷fÿû‘ìkYÿÄÐÚO{wvÚ…–áxj$GJŒÄß{G}õzâx1¬œ¤?v9c@ô ìY-tÔËQ!1ó@Ó”÷ö® d/íK3ã|Æ’¥O F|æDËw>B¯Ó;ÁÄÅ$ žÈl±=Gõ=¾:ÝêÚ§*ôœ1\:«Q·Ú_™'‘~1ÆÊ¤ˆ)K¹’³ô¦P}èÿáL²˜çÖ{7Gü?š ûåô¿6ü½éÏñ÷¦ÿ½éoúß›þ÷¦ÿ½éoúߤÿ5+õÿè€ÉÎHŠLo‚ñšúH e^>Ú>l‡¾>¸ÊÑâmæâF—Îf˜É«8÷Þ/ æK §rÜ éþ¼Èi¿Fäs|»t "FÏ¿öï«‘äËç`MÁƒ,™„¯ÀS`fÔ4êõjN¢ÙPDß9jÔùCD^±£1Š]ËǾ§«F½YÀ¡D'ÛÑ&‘kñÔ¼Þ6ÍÝ×ÇmõV‘n‚sá{ShF<±?Ò«ÃP›P4p’kÿ±••ÑCs/¬õÀÏÌþ9ÅUfب+qÃ#íL#y‰³ŽŽ¯Co£äEN¢²Z¸ a5™Ëž|ž¸çO9§´Þ”KáUxJW2í‰=±Ó•O=Âð™gÀX ¥¸µeëU{ïÎWXys´úÎþ°þÇ•“ú·Í«ÿQßiïäë´­7ûÿsü­ºþG‹î@’Z¨ðÇŽñþ¼?*F¤Ã÷ÇÎeükâŸÉ@r~­í•å™Éxn‰‹ÞÑqäãÆÆÃ&6kò «$åÜÉZ:.~소ùwn%R•Bt‰Á·¯ð§KÏæ­`ø}yšÃ¥! ˜Ë Ÿeþ¤(ËbHî·~”ÄÎg#O•Oñ0  ÊM‹ÝÝÄhDz OŒœh‚ç ô*™ã¦tÞ´Ý8:—¢àü¥F+óð ÿå©9¬Ì¡ùz&Ae9PY ô$ÐáŸ/¼Â)¿ 7¸òGgÆò<‹F×it@œ 2ž_êD%k1Ùj“MÅO €ù-¹Â4øÿ¦z"Žç"oüÄßfÇü$’j‚ Ï;vÛ2ªÕòFÊ–ƒÁSè—B-º°D¾ï¥â,ƒúå #S|n˜ª– t@¹±:‡lœøC?I¤SL*kZx?¾wÜlq¤µÊòúTì@æ~Á PêJâz­*’šçÍФ™‘ªbx1Äb»ð&G÷æL‰ˆ*lì v¢ì˪"WR"SnŠÞ²£(_gæPÌ•‹–ÈZ¦6¢;ì¸wºèÀ²coë±w•Û›9tÝZ‰PQrÓè×É’#3¦6W%—öoQÓcê ŠÓidÞç9ÈrÃTå ý4ó=« 1WCɳBøå¥ ÎÃÙ=c.†ÈèÓ|þ5öegæe*L&‘–ø("ͤNDæà–OÛf0…ñP ²Œgæ-²­â¹¨Àsª³4ö7û‘K^Ãr. ;äó)d'Ø_"æðß ™î ‘º¹ý$b“ â&‹ÿ:Ö92;Ê̓#MÃÒ)%¾QžÜïÅ·ô™¡ß³&Ë,‘œ›, §‚ $ÇÆÊ§3s¾aT7záèC²{¢“!~b¾Ã7^ûŒ©pZÁ®çÀxéµ#Á¢%#±½™Y¤‹´:vFzwnHŠ~Ÿ†H›¥B¾e2gíφ¡sýÚ§åÀÚÈ®þ 2K"”8+éM|'‹‘ÆnÀiÕ žÈ‰•»‚Ê@Þ³fê¬ÚpuEy§£G’¢¿s8( ÃG¹rñjc”¢§XÂJ G> &ƒã2–Õ)õÊá*rAq¿9s#8Ž|<å(΋&†’Ÿâò1>Ë1Ø’£•Zúj%ÀêÌñž¡÷å'—Ö+­«’ A†ÌÁÓ]‚ ½ ÓíW¾ü”ê¦'ñÜÕ)™ û\Š_&×5™PâÑ5 ôGàD¶AI‚c*Ýü@>$¯ž(v,| ]%¤C-TœúELÕ³r\ÃþóõsçÎ;yŽ\‹òäÚ ¸¯^ÀA‹ mÁJqe!e  Æ`;xà©0»4VÅ*ƒôˆª`›U]ÎOų́½¦”9ë¬Ùk°s?»Î«®Æ‚©G™{šçÜYUSjšÐfœ:‰þ¿–é•5Š'QvæŒÍ9Sí n”•ã!cäJJP8]«¾w¤³Œ¼™ùo¾Þ&Ô¨±º`Y±v+?ì]‰¼8üÀ¼ù<äéßSçÙ7 ¦8n$kìU‰ÞótGÔo †‹Žs/?ÎG[5À f˜pÉOœg÷ìÌtÏ4tÆVQ ‘¾>„šꉉ²! ƒ‰R{*B°­V×AF\™&c¨k‡¾‰sž+FibqIyÝÞЕú^¤Cm2‰ãQ¶=óP¥C)XQñž2!TYS«lÏe|i_§øùŸvÚÍÖ åj´Z­ÝBþ§æ›ÿϳüýDypúÙV1Ÿg§n·ëÌ8lKŸÂ¥òéã„ØàâèÀ…Ú„F¤‚¨ýTæ~ÜÆ³-ë¼A¿øSFp~ÃQDçHnî½õèú4Ÿx|Ù¯(­3}t Î[ìf~ÖÏÔ¶·H>OIHOåt»ðô©ªõi“wØyþ©«Ùô}ÓÖu‚äѩۑR릹· 3H™¸bcùXÙLbóÎf£ŽÏ•#oA fƒáû˜ôSŒU~t½J×ÀBÏ¿¹Ñ»(μ¯D=J¡Ñ•ÓÍ;ýóÏþ,ÐÌÌDF@êMœÙÃ#T ž[;x’™ùæ¿-ÍFžfÎ#sOϬۼO‡`&^ñ|÷¹³—ïuÂk¤æ›ÑãS®Nà 礪)WÏ0Ç~HaLµŸ(wÞ9&|g‘Ÿ À,ãe^ ׯ ~ÒH3þ?Yøîo'L¶ê߉ˆµœB³p55\‡2¨LÕ¤#ù407˜%þ£ãA•¨ÀÞ×x’BV±“àË%Rf.p不ü ÜÞé‘Ì‚#ïbu‘‚ÚÓ;Õ©’!ŽåSE³µ „t#ÓepX¯×…MkK•ï#âFÉ•À·Щ³T˜ö8ºÆåƒŒ›²ÀF€Çðçh&õƒÄ8*ÁG:YG΢‚fpp}ß eše9ÐÉž“Šç¬s©¼¦dcZÒVÇlzé4 ­R“œGù¬:Ö*_ñ’%Lá9.-ÃªËØíš SÅ”#të÷æL8ý¨“+Jô–w¸~l業ëëÿ‡ù_½ÈyÁú¿ÍFk¯™¯ÿÛ€Ûoç?Ïð÷–ÿõ-ÿë[þ×·ü¯où_ßò¿¾åý¡ò¿6¼ú¿ e`™—B ”ttÞ1ƒNÐùœ²RWƒÿ¾§¬”JÏt`Þ÷6DΘnÑ܃¡øûÍ(¨:ÏíátEñap=LƧúÒüÑï•`v4rz>Jo°P~WÛFP"oŠMŠ…þ0û1èÇo@ο‰CÏFN&›‰Ý\û©¶$þ$¨ŠâvõÝïíMCLÆ€D4^Øhº­ˆ,ì Ræ—Žü»ÅPõ:÷.CvîÆIv>Ù˜¢bŒÑ„¢’Vˆ¬×IW†MǼ~)Êèd×܉¹[Eë!ä9¶n#Câ‰Ì*wßñ:l‚5!§Ç£"1Ì=žd9&ωÅÃSÉòxI_Øøƒñûöž%:Ä™ÿäìÃĬ¹ÀÛÙÜ`.ÜA+·~Ϭ½½_†SÜ4qò]Ë–íƒ<*ã(ê&q»±a ÁVŸ›¿ÇâæÏT…ø#œö?â™C¿úÓÌn½ŒfÎüì&®XK#º÷3@zÖ==þˆøÿ×éŸ~::ùõ¸?Ø:;jsÄaÛ¯ý~§{òê8sÍC‡áVt^˜#'RÑ[óëD}¿„Ñ[»;1e¶Û”Ôn–*¸Ý*ùg¥9Ìn—¥@Ì6Q0ÄjÒš­}C©,ÅÓ–g7—ì‚ì½¼o=hð¶\Ö^`ÜCgÓ×P‚¤]°”¿#JÀÌNNR1Ê‘hs3jʺU(æL¢ì®Ék ÷M=ÖIyY¿›uùÃú©3ºrž®üÇ#þ_; +þŸ×ÿ€MäÍÿë9þ~ú‰¥âÏ•Qîÿâ{qâ|êùÞW'+,Q äübpü³q Ž`äó0F§'e&YáÉ?‹N®ÏäyðOÌ“_ø¯Ü1£·ø[_Žú¼O~æ~‚gÓäöS:coëÿ–m! úߟøHÐôƒ^Ã5ªeÁ>~…{Q;®‹ÉmR:lç™BFΘ\kPn£â%'G)y Äw‘öHã} |t¡ïÀfŠ™h‚ýšø0R¶Å&'Ûpã4ˆüm~wß*ýÛBG¡qè©ñ ¾eŠL{Ý6¦Ú¾‡÷l1/­x¶IÏZ[/ÑB8 ƒ@éi^ÑCk®ïµ¦~‘¦a§ôÕ9>³Sù‘²g[ôìéÑçOH#¸sfIV=½c?}L+@H*KQÕÚmzrŒÏ#zü¨e?ÐÖnåkçˆHW…ç$^º•%Žû­X:ç§•I$%¨«‘S®¾NZÛuÉlr+Ñ ¬Çpã¤7~úS™úRº”ðMLèôLv¤2«bþZôtI˜¾U’]j®,eÿÝjþ"ª,°|¼ms¼çƒüxÏŽvÒõiä®5~ÒÁ‹rQœ­ò4ÉÈL…g TaÿyùÇÑŸ4ÿÿE¯Ì ò® 2|$ûÂó0« ™ôÕ/•ə篱[ž¢1´¤Á’ÁŽñÿc”l+à¬iV€öL€ºwÞ©“f}_eoÈþå$gÊÔ ò2.f®?d¿×"Ôu&kÝ¢´·Ÿ÷¡’¯béÈïn„4í¹Uý‡Ã (óÙ:ÈtÒá,©¹ ÿ~ "Í Œ_Ù9óНãÈÌkh(ÄG^b ü@‡ÃÇG:¾ÄP›æP ÜoÓÆjÓ¸Œ"€Á£k$¼È3):©†/ÎŽµf?;žäb‡f¾Hh×4 =û£1º¿ÓÏ?䃊!çgÁbW% ¼ÀàÛíÂ’ü zÚ­¨…‰¦¥÷ªª­Xì¼)?ÊŸgsRÉ[ €¯ñÈ?J‚[…wºHXóÊxB«t$¶R¡Ÿåa[TDjí¦ OY)í9à™*f²›)ùBg±8ÿ7Èì Ö¢—n£vÝ¥1 ³y쌸{.ÝzññZ†hÿ'ßRœ¢ëðΪäÚ˜àÇKB¶—'|s¾Úd~y~¸’™h4÷ÿgŽ!Z̼˜|ü ò°lÂù@æ ÔÞÝîÆû•a^Ó*<ÁÀw,ÜžM@ÀòŽ'ÔûOêïî0?B]ÅãÅø¦‘O†bfËüïF½]ŸcÄm‹‰Èèk€Ö$• ø0Ž0t:¢ ’³£¶Ð>å¾”:!Þ€Ù“ÐcÂ%ìP+ÒÉ$ïïŸ}$ãèâæÑL,²¯ëæÊü ›¦ŽV§O–Ù»çO  íÙ¿²6XÜlG°ºñÖb€>úeýÀ›ê–̓é”^ØÀ€ü®(Ä63>vV2mä&ŒRM'ñö•ŒT΄2œŽ­.ó KÔ#Ôš…@`åptóÆÕ sb¡d1UBÆ|Eú‚kÎC:þñP·Ä^ ì1Ø„ hŸ»¦²I“¤´8ݳ©½Íü  ËŽšûÑ´¿Ä°[ùaÏ8b`„Àn­äÙt‹¡ª, CÇMÏÜFŒ¨Íš^`=XºF؈£3«ñPožôþ%l;6çÁãdS¡†²ZóJ‹Ï¢ñ Ž4ÇÇ[à*Š€Pº;¬díFŸã8äcUöNYW &?W3c¹‚k³Ð’N8ÁåIB,ŽòÙ(ÔR­x^½<*…ž\ã*ªÌ>ÐÝâ@q¿®,/ñÜk wÂY%zNHºåkzø•©ÅSK±³ÎÚe3ß%(¸×¾È÷ùÓß”àÅÛS¦îL‘ch<³‹2³ÕZ”ƒd’Ú‡Ô¢OVtA…ßßú¯-[¡]g7ïf·¥1žž6PêÀ(ˆ@° ó_`ž?t&aö3ko®TôbïÔ§n¸:5;4y{½ÐÇlˆøU›—Ï©`u \ýý£¡vÕ0’ KI¤iC¤uPˆžêÝ+|xs ðK¿Ä˜HEÏT[SPôQ¨/þ‘5éW´j ÈH=m”½ög‡Ù’.Μ{¶£OeÎJúþH'޾Gž9Á[ /òÑjÉj´6¾rq-- r³¸TMKú®™¦–WI F#ß xZy‹V z»ßPÞö„/áì`·lê¦nޤG¢²ØÚ½#Ü€lò9ÐpcZ!>Ý@ú8á«“dˆsùùÙáÜ)óâ ³nHÜv§Ë#ã—ð ™9C´IüV¾jHñ4E~R¦Ìi»Ry˜›RR‚ü«È»Ä7ŸƒS{Aêr~Â÷ì`ZRqàSqh)ŽJtÇœ3¸)&‘á ÞàPQG®? GNü¡H…tƒravÀ-cn:=ã}¢O€u|Þ!.$îêýXf8zêÙ$KL:%*ø÷YÞ¹†n° M ydÑŽ6C~ Ó{é.“Oñ=S‚ï¹ ±E$`ƒAw66bÕ@ /‰ë¢BÖu’,B_!OÂðýkâ„<1ŒÜÖv^~ZÀMnƒéõµç9›K8í1³j ûÙç“‹¾‚À)RÉóŒ²]¤ïIvs1É®c2¯ªâ£frn#S5ç%7d™¥W(ܹ’š–9曨ÝR N"7ÍT ^Y ¬-Z%WU1”•öäÈeûyæ#¾™Ü/Úc`Œ°#àOÌ¢n¨ÆÐ,Vw~Úò€ WðgŸ®ƒ"3‹Óàþb8Lµ[h÷¢ò'ÊH¨¼0ûÝ·ŠeŽùãÈMÈ2I¯+2S•O|õM§OÇÓù»çÂðfôx_¹Ã;&CÅîuæ9™ƒ (ýmPÆÙ}<ÙÄȃÛÖ&K‡”'$óAi\Ï9yXJƒ€‹xâfLXjÁ¹Í0p!¢P†&ÀèÅ0u˜!ÓÇ墋÷hF“@ÿÐ'fÛ$HXU·@2bÜŽ‡˜Ç"ψc¿89Ú¤…‰ˆÌbvã‡c@1ó|íÖWŽûíC(vHü!žrÓýæv“Ç…¡ð8Úð·¯·Yè9c ÕðW6ÛÚú§Ÿ$8¬Bde É7·m#½3êHçBýIõŠ$:“û Dç~猩÷DŒzÏäù½(¥ÌË*‹f• [´ß¿lÑ^äê-él6pWVÙbx·ý(¯n£M;FËFå‡åSŒÀ%q[úO™×ä;h ûå©ïhï7ªÉbÖ’.u7Ó/Z.²Y;HÉíy$[ò±%ízlG rêܔοZÞ¸ñÌNj–4HI·ÊéÄÆ?ª¦Cà :?•Ï…&<{6K¦A¸™0+×i”Þ¨4F–øbÞã—øûDÞæì¨hì”ÈùR‡-©gobÃ>”Q訠âšÒÇ<ªÀy}ƲSØøã&ý­x¸%dŠùæÞ’±„8ö ü1fyXÍiÄ#ÂB!Ãp 1Û„w‹b‡µÒŒ£&YÖͩđ†ê@Úü톜­8×\0­æîyXÞ‚'›Êm[z¦•}$ð`¡ÿ÷܉'¡?û|[’úeÜŸxÝ8«W:Þ•‰º©Ð °—Ës a#I?ÁZOÍ­E-|Í6á3Î’²OH¯ð4Áöx²¬Ô_ƒ.; `±eŸ0~×®+¾´s“NJYí‡2ÛIyNO•§F‚½ÈN”‹ï_2gÞ³‹=k SëŒïY”ò ;œmê—`OxdU°4Tduõ\I—Ë­E5à5Xž 3÷±is­ÍmZõ“ƒõÒûŠÿ0ÿ'椦/¦ÿ5í‚þ·ó¦ÿ=ÏßOÌš¶baùâó»õrþßh4šM`öÀÿwïïíµwÿ7šõ7þÿ6‹6ÖŸiÅY (#ô'W$ÂÛwÒÉ•èÁ…þ$¿w`€Ýk?ûû;|6Oêcï6Å=\êìÿúû;xڤݳµ]ÿÅqCöΉâèa×@ÛúÈ¢˜Ácé»_D<ûFâ;ÞGàùNâÞˆÌo‰¿IQÄ^ÄþÎÔwè!Žüw¿lÖ¸q>ƒ›‡ž¬;ᯪ”âO^Y‰?ý ’üþÌšõF»»C£ÝÜoµÿß2;W¸¢ôþ,~`Œé¸PÂa½ûPîXÅ?$ý»àªi©÷}”nû”ט_øø|ŸnÖÕ·Õüç-m  ¤ß›ài ïÁÍùKŒGÁïñ ,žd%îÛ¡¨òYú;4Œp‡½‚^€îì–ËÔ7 ”sW”êðÁUÎttà@–Ci›”x¯X¿»ÚwÏSYFʺIìú锉ƲTÒÈ÷;aŠ4 ¾gaÁi­d€…Ö¦ QÚ)†¡ÂË’K —jý:Áó“…4Û¤µTZ*ÏÍO’A¤86¯Ã[üÓb²Ôç|T§LBE85jg|=_wÜãŠæ-žcîjsŒXž"…ábˆìp*]‰aרȾÂ#P ÆC'ó¯EüC5 å㯱…1¾gpÿšø‚¡–Ç„ž?[˜[’r90Òá}¸1|©N7Yâsÿ7äv1@¾®äªJ³ÅÖ;ì Ó‡)‰ô?' þíTzìòNbº†Änú_Ú£Uh.™ƒ{ØkA#M ®ÿ:@‘‹u+@™ Š“ÈCNãdè'ðä‘bšòY^Že~èß«uW£ëÌ¿à‰'è½–g"Þ߀ð ž'Oþ9x¶t®î 7Š¡?(¼¦æmŒzÑp\âs|/®ÎÁå…† Ôž›‡ H(ø«ÒsÞ™®±ÙRº¨òŠSÕs ôˆfYéxJËybÏvƒzÆv͹ŽöJÔ » Ü²¢agAú$7ÁÕØ#$7àŠä¯I`Ë’WY6¼ˆß [Åž³_:=½8œ¾aÖ#Y~fJä‹ÌL+¨±jfP1WŠþýÂu'c‡Â$ÂèæçÕ“XËMãêd.>¥íÒ)…Í8›²1LÔ”–°|Ûñ bFsØÅmx>Fñ³5fà6xkV*÷“:kLvVµó¥röiî‹„f‘%.àajα_sÑ”NwÍ'wIÝz D ôG,Î9HºÕS>ÿÂÌ·ËOg¹(LÙÓçe¹@Ž‹­Örn[[TÂ¥ 4÷ˆ"„±çðÕ[˜!X.>ŠØmò‘H—[‘DPžHþ†Û“Æ%nþ¨|à•Â0l &=S?¢—{à®ÌОw.ƒpSR‰»UÍîôÅwв!©±Ù<¯ 2}LA´tÀ=BWÒSx÷v‚*F­>S,m¼rŽ¢”*¬òê㟲Úì°Yf¼j+7±‰ ~l„…¬šO`×@OÜÂó"„Én<ñ M¿£|›J5ûNä¿°›rNnæ½ãȹ®å{Ê%ξÏ#;‰ï^Ýòü1|0ÝŠ£­Œ‡ÓÓôÕØ“N ò¥KGú÷î€0sN]ŸD©Ï¢{ iÀøÝW3uf˜äômž+ ï:Ñ):±—u­/ºò˜L?;î7˜™Q;ØD‘øò\Ôã@æ´‚·2Ôž‹x¥˜êl=ùù-fUƒÇ¡ÃRZ ö5Yr‡Í2OU@×ꆞ<à¹á †¾¯†~Ôo÷|²Ã.ÏÍÞzqœÑúPMÀO4ÏKÐKå —õ.Ï5w®â$³î‰k îªe©?ü9ˆ¼£s¾¾Ñ=w[ˆ#Ô$3»à¨ä'ý–{ºGB%R1›;“,¶néñv'É5íq¡ó»5ˆGWäÓ3´‹ßÞçÙd–y§†ÞòΩí¸@lédä'ù'q»qˆ‡:è­oßEþ‡g¡vàâ-ç~ÑÝ£çYõ-E} %Ë,±‡uLÊÄÍ¥buy& ËQì©6ªF½U²®pî\_CŸ„D™Ù+íÕzr‘¿Ër‹%›^¿Æin1:äðn7 'JQÆOdˆrź҇  pU)-yUè.?š"k@@½MAûU·Q¡ÁqVX•—Tn{<¹9Ž>ƒÖéåŒùP´2ÝI ­'“[ú ÿŠ70”þŒ­zò±D Ë^h‰äI|§}+tÁ^ \(É&ãÜÛØ1ðq”ïYÞC`éíÊw‘ÃMÒÜm~ë$¢è$®šÑmØøôû(“_à¢l •ÏÛcš¿kŒÅó¯&׉\L!…LÒ‡?œ ™§jÖ@0 eŒsž®ïSìN×U*Þ,å‚0ü£Ï a´l C¦EY|?-*O$ ´Ôql—Nª¬Zˉ`ZÄæ}öükÿžG‰ýGÜÀlJGçÈh¾ò%aƒTw¸Ü îv“ ¦ªoˆ£{ZRsõþùY·*©ÆæÅ t&œVŰõ"¹†Å/ÍŽêáÓØµHît3uWû°M<5qyæ  Jmº)5›³YÇ£T§¯à‹b M›ù›6fkl‰ÍñÐO2Š)D 󆪼äþiSþhÑÁi¿&ª4üÛf¸˜d\;÷ò Á4‘=ÉvóaÀ>€ùBÔ·ƒñ/ø¾Hר߫OÈÜòÉòY™¤M,ŸQÖ®Š)à7W8É ¥T‚Õ% Ñ ë®e{ÇÍ‚[gŠ5h)þÖ”ü-©ãè¼k˜8 XÑ$µ¸,†]{ÑV<ö£-ªf‡¢Ñ ãÔç­ªÍsÒ-YXµRÃÖ _™òI?zغ!³kþÁ`œ{ fÃß|âz«©•Eî8Ýa§}ȱAô¼šÏ8ÈSR\b¹Ð¯u@¨¥êÊ'z>ùÙÚ÷• Ùɽ „lÝo/*T:•o±¶C²&G­s/M‚ÜyÅèÏjÍÍàÑ)åP%Bä)ÎfÎxþ4"T+çõÒ;MK6Ú[9Ðe¨ëÌ‹‰'—ÈäÉ#šý«ãŸaÿ™6œ¡„Óô¸1êp¾9NãkQŠŠç¥Ì·öƒç]ƒÅ¨Ät|k¶)^²!×5=5xô& ŸÆÆÁà”mû¥¼(·þ¦v¼¤ìÜ•*Ëœu›b)Qæõ Oa´Tà¬|Ö0ƒ–Í:w îÅ@û™ºµÉ›¸*µ ÙÇ8}Ö §QÊé£ç0´;V«á±!ž] ™TXr‡¶ôìeª  *RZk ½8ýt¨{ꜧ¤ÈŽt„k•G¤Á„¯+°@‰Þ¼/âDF\“Ú<@*ßlí›Ø ®òÜrtó‘š\Cr ,ñV Ø@_ϪÏp÷#Í1„bÎ/Î'9'!qW'bÞPi`ÞcºôžÇ]#ðHû"ôø%´ƒDì£ÂWØ‹¸­‡ººå¸«ü¸×ÂŽlLA˜…,àùÂYîmËèFŽC‰–f$ÒoY¦ÿ°¹d #QyBÆ$ÓÍHéã;?Ÿâ¡4p[ÚDe_ÇXPq ûÆN‘Ì+Ûz>¼,­Ý ^Ô]Rm8ÑÓÉ_FX¬-שÎQ.[¨ס¤C”Kõ½_Ç¥Õ>måMÛ±@žwRZ¹I)ŸW0)â-Ø”a›¦ðñÆ(·*3µlûÍ÷ÇELžD²­f´ª ù²'•vd‹,{÷ÞÕ'' ½Xcjúa–ÜÖòq>ÕÅK=DåÙýÖ€mçÞü¢™Ì_6öçšlÄN:êÁSCc–j‘?éµú8 ®ƒ,Íõ[cáøÆÉµ^ŽÇÜ`Î èƒù¶>Og´Ö¶ï_9ôöü±ïä?.¤ýÀϵ“bNTPc& °o\—Ê­3ÚŽð eúšAÿ”ÈÀR"º'%GCr[®±GVÆ|Ë{¡µ•OÊ$©9™³„v÷ÈÈk«@²¨oÃÛ>ùï•bš8Þ\–MèO$ÉÇžÝx,~’EµùÞoxiùt^†ÌÕ ù̹M ÷ ˜ì>·¡r† ×ú8å¸Àí%~=ÃyľôLpãôßÌêv žG”èú¢4§2! j°å@ å¯þ‡p©,.´Lxƒ¡KÞ³p’m»Gwì*lL7tšÎ}E´äŠÁw€vˆîç—+Q;¢ô1K«_é,^|y,MaI’£­’¶áRƒÜ—‚R›åô|vP@€}Pv צdoPÊæµ vö4T¦µf~4WŒ`&ˆJç·ÀZñ¤á_|†¥¤mÝç>&¤_f)ÍbÚU2D^ñôàètêr~ø˜Ç`%?$³“Éô‘C¢\µø¿€5­®¤§·Lm`ª³ˆ¤ò=÷@ôÛ§]Ðޟ倡Qohˆ»1E‰<ÄzÏGùèY¦Sc£¯-u½ØŠœ :þ XY_Íâ“Ù¶Çx%§´è>¢xÁ˜³ðñ,¹<.àVéâäþa•ÞaÀùgö›Áó •;Û÷k)Ï/è®èÑ´¸çLE™o×¢ž_5–÷ýªöüšêãÅïÕØŠ|¼lDZ W®=›þa£úÝTËNÝ®1ësÿ:@_ÿ!rm‡6q“—P,Þ¥ר]*ªÐèO¦ðða ð3wA„/¼qäÿá<`£¾}ßúؤAf/;ñ¬JÈ9ê†@…nF}o¬t`ñMf.Ëò¾æÂRé[úÄÄû¥ræª<ZéQ³á¸ÿ{àßÍN’ÏÃ{QST†Z{º£ó†V5“Ô~=ËqYîjºK,ˆXcTqÕÄGÙW¤83þ˜'sX ø€u;a¼57!5&¦?dxB”Ć6[s§©ÇsØ)ŽÆüV‡§ ÃLµJŠq;æ+°ôUÏ¿N~b›sd‡^þæ¸O®J¸A%¼Sóß4”žÐ;=’…=U£pi#à°wªjoê1êQä1°Ñ;/Üóü0sr­3@ýh¦Âè˜ )µ2j¶&¶ÆÔÔ–lYêÓåæ²$Ž®;VòþÜò€•ßb°g[Ò¢¨Ž}tÅÔØœ£–û¾¡J£™¥[ÍAS·)ÃW]6aY_Ó›‚aD hIôäÐ’GPHšUc3:Æ1í'“O_0ÔØª¡Pþ^uåLA™žL?—ó²š;•Nä±pò3CEmùä3fÎ \W™(½çÌZQæ! èŽÝ€ÛÁJ<ØVD {1ôx²ü\¾ 3ƒ°H‹)Æ&˯>ÅÈä?„Ey!w⑹EâD+q¿4?Å`¤F—|9Üo6)ÛxåƒêkU.èåé‚–“w0ª†ñÌÔˆ")j®bÐe<Èt»]>U±q:w’ò›Mw5V*G‹ÍÒŠúPbLðbv‘\›¹yÌ$M˜E¨[– ä//¨`„®“Èzqˆý1Pî!F’2ÐrÆ¡ó 8Ú@â_óÙ BÒ±L£¬Ý|ü.N’ÓU.uÙ£¹‘ôŸcÿÎh;v®`¾½ËÞ Ãd°AHÿài"~q_ÁvJsLž:¼y,^¶è „ëÄÏuBYYò2?ì å×ë2€ÚàÑŠÓŸ:ÑõÄ!G)’ëÏNÎŽí>ˆ~;ì7š0µµ 4÷ö— ç‘T€õBx’Ëâa@•Ÿ˜ñÛXÝ[TWg†åÿxzbW£,Ë™ÏýW¥'‹þ²²ùè&2- ¦aúçMEvÆXPrÂŒž³Þ=[†šXkí ! ¥Æ@FiÕ÷A„*pãxñÝ£˜@ƒé˜Ð†§¼~O_À3Yåà,…Eä:ÿMþðü·öÑç×'‘ƒa×¾j ¨ }Yc˜él-Ž«¦I4$L!™ÌÅlJ3œQi ©  |Ê{2ФpŒEÈÖTÈB …x2Õ™pËÐ×r›Ù|Ä×jvHòs«˜-Ý›5i^~¹ *oäöÆîô)¡G‡Ï{š>t9pXM‹}Wae”¹†^`­¼ÅFezs¸£ÕJÛôP³­¡ùè÷¼ÞCXjŽ(™°©þú6Ü{ îs?à ôK.$ÕOøtsä¤ê&È%§—^tr•¿i®ã„56³ƒ—îpÃ&+gçúE‰= `:¨9Hí0Äextvæð¤nêJ_­HÉ;WŠÜÙß÷÷ëÍ#*[§™Ówv™Àq-;{É!Dm5 OB[›^¹;_Åq†6‰éרt' `ìò ·YáÏ®“ÀìYŽ3*%ÛjøÕè˜oº[šr±lí# >PM¶…NÝ ô€kl '­ÃDz$Žò ÅR£Ú„·A’MœPZÈ£Øó§8:é^yÖ.²[bôo¾ATL+ p²è€'û‰sð¢0Ë*XÇg:D0‡ Ìw7{ÃF“4cW>š5S?ÊDÝfž£±â‰“ ,Ol ;Uø€ àœe(âZ‹ˆò–” _³lÌÓäSY}èb¸,þ{FiËŠœv0•"[~î4ˆ¾ùÞ…eVÏWAS¾Mó™%²7M[37¶.Y£ýˆ¦"ï³Qj¬Ömx2dµ ‘ëÿEÓ|¾vÓ*²O5Z;möÕV{ñU¦Ÿjä‰$+}T¿0ô¹ögßAO±tG…‚q«$¦ªÜ®æ) Å kKGò ú¥’¨5‰´!<¿è¡¢ xRí¨ÄÆC¨ Ï(ÝaM¥/¢ú×TdÜÈ©/ˆf§‰ªëüÖÚþ?[§þY6`°+Ìþ<&6™³?ó“†öty$@¹j¨;ÆPmQÌÌrâ Ž˜·–Zu `ü¨ÚÕ6ì6:ÄiñâùW<Ÿ8ŠÝÇÅMÞ¥–<¼mÞ>I X89 PTaÙœ©¼ c÷‰AÞeJj9s€°g€8Å|ó’é)€O°D·W\M’êEKØüÅ‘yÑgž‘9€Ù7€.ã*Ÿ‘ºý2óQu ®rxaœU©30¶*>Åõ]žD{AÖóyPñq ¬áGÀ˜ ‰ë@”Ϲ7¡UUyú;ÉãÖJæ¶ÑÜ›ñH>Jy’ô¬ ’Fû<™Q‹u•òH²º&f#†_c åÁDˇ5rÓœ2Ý7¾ö¸¾Úé¬Ê´9þEA0XPF…|Ò¹Iª::½RãE*=ìfÉèX},PHÖâ¤Yvײkä#Êt§e(Àú¸)?S ÀéþÉqeœüÌÄ©KÉÚpêöeÓö€ª¢+ùH²nX3©Eéyà~S"ËšöžŒÒÏ“ ÔÇQÚUÛ̺FX뛪.¯9B©Ø¸<+¾^•ž-JuB¦# SÕ–Ž=ÔrŠQ½”.Ó ?: ®y„õH$Ó&~îîrÓ òk"xhtHÙ{!„üÆWäYÎ-Ò™:÷Ä ëý¢B&Sòàì”W®³ÄÌù“Éζ¿N…¼ èàÞÈf1,d89úB1‚?6ôŽÎ1ãNù¢tI*…Ö?ü0ü-Šï"¾‰£ÃGá_½°7ÓîÄ·Á ó޽I•<0O6¥©–Ù9lÂÔý“€)yp‡”bµ”AË›+"Ô©¤I«*Ö~I°'i0Äß–ÊTó<t%—se„§ˆ°´$4ƒx‡ñõCTÅÛ:•&mZÊ+õ^8l5üq Gô«š0 wØ.¯¢ì”ü•7 ÌŒ¼º¬å*âJf5"/ ¹]çç¥`à@>ÁÜoƒb/œ:ó ÊÓ^H¥P,Q»ÔKó¦¹S¶˜œÚƒ£²`Å&`ƹTPlì\ȨˆljåfV)FfÊ8{Y^übÇ_C[)™ÈòA/\¾`Ê4â$’ëR‰ö ú¤Ÿk´4Ù •7îÉð,å!ÕúÍŠ; 1¥3$–'üP/Ü:~R¡N45fG1ó÷ºIp«.¾roju ÓHˆKt­ê¤ÂÐ|Ò§oÈb×ôÆ›ùÉeZè$Ä‘~à8ùFÁ¸8ZÝŒnͧñ5·NX-áþœëfÜcY¿xæÜ÷É\˜ø(¢ K»ð1ßÿ¦;Äî¿‚Æ¢_þìxÝ;Uu•šÎ1øMí79%Ø¢XW`,Û<¿›Ç¿0²‡&•S0r’’R­æX.3'ç(áÞÚ“@θ€y¶}_¿N¦XÿÜ—èêW”¬VY*u³ªŠÞù€a²©›$–G5KeŸTô?Õ†8«bÆPˇñ‹Ï ”Óo¥_¤ºM!R†êñvÂ8&íæ±¬Dl ¥ˆÍŠíD ¶ÆV„Üý¦ÞQeQŒ¢EdÞ õ‘B”ݪZàV9mªpüšÆß’nÚèXUQ{ÑV•u›€£ªN¬ì‰‰•×1±R:&:#•lItªFhDrR’§Q¦L}^¿/©H¿¨òò;ˆçÁ÷òo 1•Œ¯aÊÆ‹ÄÕ-Æšª_Kþ¿*âQ& ô=UØ) L¸¿ðÏßï\a‘/†©8ThŒLˆ=Xâ·N$ãkfIrVcK¯™w'HdrC²ª­Ìœ¢;d 5&ˆ~ _Š8à7Ì6ÆÓcZÌ©‚©EÔ~êòͧ*_%Äò Qx3O0ЖK©ÈŸ¢óJN&Ž‘Hï°”D!³Iä9jË@¢ç»Á8ðK’¸ž¡*u®1DKˆ;Û÷Ó§¹< ' ¨ÑNÈ3©Þ>Ø,&œFO2ŒV·îsÓµÊ ÿÔXÈKÞÇ3.çЃ²màßËUÉ­°ñKœÜ9‰'¿D¾¸üÎíc]Ù+¥.uF²#aéÅ;ÿ1‰3Û•#`EˆoéÌJÞ¯˜â³t‹iÖŠÊ£mL³ÎŽ®ºæ=çd}™6¿l Ìw­bÆœ/ÒªJflÁù݃ÏX”ŽÒkÌp€µ™øhe+ˆÙgéõ¨ïFtzàù—†ý!>zëPš5Ô0A­Çݨ5@Û®çñÌhóLö®ö;ƪ.fé›5¶²ÿ`‘ÆùWôu2†«¯¾ãñ¬!xMµgaüœÄŽçRýgë–§i ÛÆã$¾õÍ@Jh5Þ¥’VX°ƒß¤0ˆÅ5§ê)2I¤è:ÅJ’TER5` 8ÄŽ#³[%ýBÛy|4ìýðÆw¿I1ÿÖ×PÃT@#¥:#:Î4<°dn.ž½ eú¶ËœâPŽ#2\ wRȃSö•ÑÓ¢ÔÄÓ…é œÇjÁŽ ÃÒøb•!Ió³ÚC>TZ]s)+‡Ë`®¢¦é£ ÍËNtÝJqBÅ)¢ÏÓ>ñW~qܾet9žEüãâ Œ.>‹4Øt!lfü¡ïò+¬¾ÂÕ½ñlÎÍ„ÑsIºDPƒ2ɗ謊å3ëéÀBwʶ€õcf”`YËëAŽ’þð¯JÌ—ÚÂi“s>4•.8k†U-‹–:ÇWèíüÒéù”¦ó*TìýB¸pU×å¥uˆU÷ªMyvUðZDÈGÓ«Â4DžO»ö¢4†± VZô§¥Üp¤çeN²^пèÿ“M Js!ÌTÙ`¦É°®úøÍGf¯.„¼¶ Â,‹Q¼ f<ÅÊÁ³›‡Çˆ­Ô A#ûk>ÿo,³£Ç",k Œ¿lí#}©¸t‡{çB†ÿò"åµ’‹óƒwÃÀÊ êGIŒ®¼äÚo6Q~6:¬žPcÆLœt;ì¶¹$M …퉽炲sÂ+ÂÈydQÈJÆ¥sM ygG†ÂŒŸ8ÆŠ³ò† )ŽÑxýÄJ¤ÂÓÓé xúˆssõð´5<]ÌššnQø….jœ¢ÝÞZlöcu@»oÕX“Mq´Qôíä%ØÊ›òöù¼ÖbÐðHåŠ"Òÿ-¹Âs¢S˜ŽÄu­ÛúkwYŠ7×ã&ãÉÃSQjÛkÎd„LÎcØrFdTé5¶ä  ŠG×5~º®ÉI¾›ž Öªƒß"jç-c¼o¤Ì|¯0NJ|½ãÔùRœŽÈƒKKÎ c¤_“‘ÙÉPxž ”ÔbZSâ÷„Æ0ã=›ß%¾CyÍŸ´ ‚ß3æäsçgpDl›À“§Ma÷$# ¾@¼D±“½YÀ¦/ èÉ>†\ €Á0pG»'#…´Jð/•Üæ@ÉeìkœŸôóITè8fÎ^‡ žAiT<”GUYÒØ8QFUPš„ßܽW?ÑÊÃÇè‡ÂP1g¯2¤Á¼uDñ'Ø×ê6å©í¢˜¥¼âë ‘Õh”ÎæX5_»r_«ä\Uû‚5lên¹7š¦ly,ÈîWM©\ ®± Eßxé¬@’…æÒîU-g®˜Q„!EœÈ[#Ë’œy¥Ælaí| Š„ä Ðݯ•n£ßÜ ƒ`‘MTð Ý&„S G& ÌoÎ5r§Š"Ö–GBËD‚r2°±¢Ï X(¬Že‡¶cz c«±Bæ°òÑ ´Ân'ƒn½vûK<ç„á•ã~3_VaÛ„pLR• _.ÿA‚Ö<–n©=jNÓ} o¡š,Gý$'8§©Z1C}T“O¡Ä|1É®cž¾Q¿Ž7N"7åoHXû$ÖˆõAw¾Ä æõÅŸâ˜Æ ¦b¼S_­ª¾¬±£K!Ì<ø…?qrô9„éÄÓÔüXÌ{ c­¥¼9fx×¶£9E.|54‰¶Ÿ²$Tœ%ª7VO}†PO[¤Á¢7‡§rGAº5î¢Ür Æ â“јÚóC/Ùî{ªÆþ4LÜÜàûǧA4¹çNv†Ô„縥r“`gÆ{*}‹’‹ç–&¸ôø­´ŸÞü*èdrõ›ÿ e ÆHàÊ5<Äﮞ Lc |†Ÿ $ʱ՘]ÅùÃj†Ò²†Â X/4c:9Šsü—fÈÌ­Uš ¸jg*‘+kléA·ÍA'è”Ó,I  Nt/–\øäˆ’«‹sNh KÚDJ†:sÖŸê¡аð’ä#P~Š PÊ|pì=9xÀoŒiNdkég®4V5dƒuŠ­¬sÄÇ®F¢ÓÈ/Íói½dWÌ×å\ =؆ÜŸqv¹&!]ø¹M—²BÙúFäe—òÏÓÖ«ÔÿÄ‹Ó<5i4[ÜX/àð½‰,²’·ñÑ- ¸Ù1ËžnÔ;†S–j»Hx³´y#fÎàÍB8¬èYí™ùî ƒ/Rû¹þ¬¯É{T,Á3?eSöM–þôi„9£½ÐÇT-Û¼B•Ÿ5·¡›Oðßž?-C@–î÷ë$®¯9Ìà ³Ï=ñ¾~è a“—5.4óÜsù•шU±Ý8ñRãá2;@•)EA#€ì„ŸÓžšéÕ±YÞ°¤{ºuæ€!º§~t-•´î:°w¼Ûó¸¶PÒðA+\Ñ7´¢$­á“?“t\Áʬ%Ü$‡Õ8ˆFÈÑ„ºû̳”dy ]rëÈß“P©eÛ’qç“pw¬šÓ‚}LsíGhÖ \QwDÚ-±(9=à½ôAA9¼Ôä²°CÔ>0%/z˜§Å¥c`>Th`¼å#ï$€øÀ2QU+\Ø‘b/Iì=zˆ¸û(­qï¼? Cüàœõ=søÇðW§ÓÑ»êgÔW?]µ¸Bµf<è©{iïwõ;ûs ~þÔ?ÔÏžÁAÞÀ*ìÆµó¥ôY]õƒkõä,õû4võ /F.Ú}Ôõ7=,«©.¨¾Ò`¦ý¯_ºê*éõ1CšãCq±âÝÕ¬ & “kJJ D›Ò²êY—ij)ÛÉÅëYªú‘T“ùeB—5ÝR¾$ãˆ5oÌž@Z\ú²öìößMFê@opªûï5AþÇÄO¬†H&é³ÙV£hî '…¥Õ›hCÿ–•ÌBÓ˜…2rtÞg×5FÁûŠ‹–_b[žeÔÐ+R@lÃ‹ÓØ³¦¡àS^³J?fœˆœþJk…ØÌ¨´¶ÍÒaã=Qê î!Æõ-¹22†üVDU:й©ÀørÓürÏwÂQáóX˯ÒâoqÇøuÙç[üöÏAâÓFŒW4“}ظy*h¢¬bÂkK¼¡J}'̸E‰nÉ}<£Æ® NúÂ}G%±äoýæ¹Ö@¦°®;ÞȾE¦#—§ÕUrøk¥Ý“¸©–ËÐSsÆ*Œ¤¿âisHp¤+éɇò(ê\îxb¿:ÙcÒ¤mÌS-UžXyÀ·Ä×ÅP‡Œ×ÒåµK"¬_ÄŸG»âF)ÚŸô ]i²œâ¥QøÀ]¦.oHg ö݃}ŠLÑ®A]Àù—A×3#„e›P¦v€ÐÁ2ç¿0Jö#Ën’8ËBZ֑Lj–J,6Ъ@ô]¤(È«oñ~}öù!“ —ã0v¼öÇŽâ»(×ÄŸù\ÃOßÉDÆ“Ÿíf‘ ?®ƒ|,6‚º…°Î^¦«)îÛmDk{{_ÂÜ[uu{_›{­ƒŸÙ¡úxp)êO+¨ ;‚^öNШê„_>óH5øy‘Ý  0ÞKE ž¼ù =#š;]õ.ò‰¸“¡‹›u¥ 5ö·ëXr»ÙV<&!Úï\a–<Í,»¼™%~ÈÓ˜!BÛe]DÉ$zmKlÆú2>n…"osmÉç¤ÅÊçB„^6‰BòrÃTçÛ€‚¾ù[@.pÛ£Du ãûÇ¡¿åÅîó¶þ_¥[©´º(¿û $îâÐäí²W£(Š×Xá5ŽÒòñÄaœ”Üõî(5;¦Ð-{w ŸN·Æˆ€ âÉN«nÑT„A-ÁCãôxB9¢"’\¶&ã²»Ð]€p[úæÈ÷gÊœþк_SO¸q8É*!Æ·þ5!´ô ˜ø-ê.NªqãqP1t|ûÛVìf~¦àÇ€êíI‚e̶¨VfaTøoñÓ]“Dªƒ-”šžS !È™î7˜xÔóË#Ù¥p<¢ò‘ 16ÔxªÀ;@•šÑÇ8Ç9Ú6çÈ…÷HÆ!ÙeZ #ÇkŠûŒd’X÷I`¡|u ì•‚‚¢éi¡@ª•ðÀÐ[6è§Ýž~`C矒ò¢ÖFµ Eƒ#L•ã§8(ÊpͺOCƒ6O¥Î•ó€.&ä~.Ç.Z‰ŠW²zâÂG.zÈróìóçç—D (š¥P 5ÀÔÂÐâ£"¿%h0†8¯¬ûZì7Ù¿ Z¥@-M²V Ãcek‹3³" ö»m SßU Œ®œ¾3äªÆ6³Övf‚bÔ‡û Vªç*+~sœ÷k$! ý˜¶ŸžÊôäù ß‚L´ô“9„Z( CzòòŒ‚Ͷß÷[® ÏœH-è¶³IšÙs]e¥´mô™©x¼‡º–i2õU HM|Ë ±-zôªc”›Tg&/j§¦>ºNÊGf0µœMïˆ;!P æ(•v;zYhOës O]:—¹”êvLª#¸Îœñ˜âaMºûƆz³ºYIl¦[®"9ñSgɬéTÖ¤–̼šKÔ¶ ¬ X!ÓUдÒM¦ïVçU¨˜~cùÔd¿ bÞT ߯±Ææ<¿-…¸mlG¿ðy®r5f¤XZ“­@Cüû¬§^òb v®4£©^£”3 \h.ybÕŠy#ñ•?œ18S|xs O¨%IGBÀS4ƒá}v® ~aÑØÍW¨Ð?‚È‹ïôèăôo‰CõÑ/qâú*u15õü!Lè™ã‚ªæë3V+ï‚$¾gNøeÜŸxÝ8•@ ¿ÉjŠ6®³ƒ8yy~ò'né'X¼ÕÜ«Üï-¡dÁqï›ã>ñFÎØ2å¶Ã†È†FÌaHpYŒ)hNަH‚4+ƒȾ¶==L1½ã¬CmñÈ&Ú#®±CÓ¼Ïä3SÎ2\]¹`«µÆ.¬Ý•Ák?±¹CoYòTI&‚Eg¯]ØOe„…µ¯R9 ŽñÜwÚ`1yGnÛ< }'ÁŒ€SdÖM!Ç“çßñ$¥×jÌzqi¤ìV"¥TÈX üT÷,ÜwÄ–;(„Ÿê'ÍðSSü‘á§|¯Rá§Çeá§ö #ä“ÏJY|pÍ€ ¨Æx¬TÞ_Ij‘_àÂÜh´[XæY«ð/ÆúñjŠà:DFð\ŒÏ ¼"Ò…àO™A~b <¤áW½ITcT_uRã5º–AØ W5\œÇÙgÈó®ŠëÎ0£]L<-âªSŽô/.fÓRÛVò.¹d4ñ9ÝÔÏRF7d†´Ô>w¨Ü¢é¥3²ý‹üjj×bÒô5zÎøÓØý,¦exÿVðîTÀ)…Ë_Q*qøËuEþqOc¶o†õ‰^fJ÷be1ä–Kj‚gãgNŠÇÁüÔ”\¸GqÞtäGÔÂÜ‘g¤ƒ¹Ùú’’s—Ç$𣷬Ý~iç×1oDf7FsÙ›©j*×LnÅ”ÎÔR™<ö­LZÈFJËÔùÕàkÚ•—&s9³øPÕhä*JyŸvÂ"@m!ÑǪ“«ì›‰-d¿&ãú5öd)ç¦<†Æß|µY«,–íÆ|ex–©Šàwz@ :—‡Öè{õØÅÊAט)[¸ Ë&“’6NÍ5£Q'— gâtU^÷ytL‘2.ÝåÈ. Ê§Ï ¨[‘u~³À%Ê/Š{ÚKÃ9‚Ýoþƒ‰±A׿‚Ù7¼à:ÈR£ ?ë¢+±™ÞBÝɨXº?¶uðZhÎ3¤§ †SF<ñD¦Å3s„¦Jqì0ž\…܇Ä î95—š‘øÀìÓÌ>µ<ŽN4sT)¢*)…!ˆC^›J]‡çÁÓâI;:§ª†žŸò(fq-÷qÙ¹ö#%TÉ>hO<Ñ‚õI¤Æ)ž Ååøg<[¢(A±¸Ç¡®6kl|fHéæjàÔh†W /ª´QG«IP»íé7Ç'ùÑÑõÅxNÂp“ð$M'@X]УE¸®¹üËîkïÞidSY2ªŠPjŒH>© ¢ð[d%Én‚TEWãÒP CdŽzƒ®è=†0ŸK˜”[_F$‹ðúž+œÃDÁ{z±Ð  ²æ¤m$p÷›ï!+BŒO¢\ÖU—nǬÌ9glž5Ôü¤äïå©ÒVÍ7@$¾,°âgŽø ¿nUZiOÛta&µÚr¤” F'Ü>AØ‘G;Á5úت3‹dmßbL<ů˜(ó÷E$”ÍHŒ‹x¬ûð“ñE$ùêå/*|à„f5m£u!î@ïŸ?¶ˆhšÆÚÞW£i„àÍÈ7™ÀCµ=¶u`¨íD'aïQdj“à~ÿ¦¦ˆ’EÊPFxR”jÑÈ{ë$îMÀcžxÏ/rËããµi*ðT wÕ">éP_‚˜\Ê™~âÑè kGÚ>ؤÇßi¦ä¶Wë…{ -ù nÍMÑÆçi7.™å¯É}ìçEsŽfr•RuãÜHds5qN1š þ†[žxè¨<§:´vb˜Ô&f’%ræ“«/'êe_d¼¥«åø˜çÝG«u—\ÞàÖed|}Þ™"¡ÑÆ‹ÈQµ€è,pr…µí¤4 z¼üYÄ•e³z|¼©EÕ}î„L'•U¶‘¾g˜gî‡zÎ%9ÝIiÓâJ ÙôEy U×$H‰Ø1DvŽ·6µfKv‰q“\r­/7KΓí‚.sÑ0Áhk0ø4 ÑÍhd«´¨'='ÙÁ­&g)÷4èôÜ>l G\W«`º‘xXX å¥ª£+®É"½„[(þÒ0<`Ž.;ÜØóÒäæ=D£Hî°+ô:q­C\~`}ÌT¨uXc˜Ã‚›'„x;N02;ñ=«ÕãÙ.xÎ áíbµQj2îfVHáuDžy ôéñÊ›§þ-™ûììr2ö ºÆhÛ§`ñ„êôüD†ŽëØ~áý·"«9ʼn¾c–û”M¥/vٷͲXç彎Ùã—côðÀ ”N¹ˆ³/‡;û{»¥æ*Õ e ©²W5¶ÛFä£~»§ëÞŠš¥úÀºP”]}ƒeÜlù×ù ó§Mx;»ÂßÞn4‘Û¬rþëioýÞ9½<^¢ÔhÀ²͆u i•áït“ÝÝÄlä`>–L":DøÀŽÿã²sz2ø‹¹0ñÇ÷ XŸtÚgNæÞ¹£QÛº;49¤â‘¿@•ÍÝÙ!GE¾³Ý´èt(˜St”€ôè¼™£-©«˜ áÐŽ©ç[Ò!ÈŒ…R)—¤¡áÖ·Ë1ç*žd¤KçõíÜÜœ\G ¶f†7óyÁX=3AÚ$ZF ä$_cñ ³­8²7 byj¡Äy€¯Õ'” èy¤VˆqÈÈóF­ær§Âið™9JKqrf‘öX^Ròuq^bͨ®ÌDÔ”¬¢|º:úC´v0‡i³^oÕLüo[ä:R-’$Vå^~yU-®r¼·v”}u¨ÂE0Ž;Q»t…yžæBsŽQa·-Õ'L#Sá8G‰3ÌaÖ”B›‡%*ø€¿Ü±Œ€Cø~ÄÃ*6a¿Ö(Ï¥!°XAî^‘Oðµ©;V}Þd2luÖykÖõÄY¥»§T[\hµTÔí\ÁnÛÜiîøKŠ—¿åm5Ðð‘Q½¹v«ÆÚ¥|¼l“Uw*fÆQcO¸°ö)>ݬPtèBkŒæjÀämrÄ]HXXsp© lŠúð£“*øàÂr˜-:¤ëX"Ø dªFâ.÷Xc˜vZDÓ¶®Œ^éM< =ÆãLÍÝ e òëœ {H(& Ÿ}e6êûJ8îbÄSXîØJ&×7¤dè‰ÿ ºÈ5¨*y, FãÐg ì =EÀˆà‡Ð ¥+€^XÈ»™‰˜s ^ö;¿3O.ß õ{¡·öóõ³Ì¾ÌÌÐRô*sB¾R(^½‡{h€Fä o cN¯â8ôh¦,VÓµ•'aД…ß0í¨¶2;Ž‘9¥6'{]xoÖŠÞÒÐ{[)å.YíNÐ¬Ø –’j[ºu+3)GOV3•Œœ{±À`¢•’ýEï踤`Ü /5í§ ÿs‰þ°-ÍêH5#ëlX*ÚKÍ瀉âÂñÇàKp?P¯ç¢Æˆ¼azGç‡ñh ÒRdžÖo>“\BöêQ"ƒ¬(åujU@¹øøò4 z'뙿•Ž}ÏôèÔ‚œôè-¦ƒ¾¯fÐNklu$ÚÎÅ;O °/[ºm¬Y)™ l¾0cNÓ­kl%£·œJQÜi7fÝŸª4²–Ò0IÚÂÊ~³‰Wñ=öÆ`—^™¸¹Múô º¯¹SÞŸ[GÇÝÞñagp|㭱܈gÞ3vë{¦ýŽ‚)­JËSJ?˪ÙbÖî€ËjQ:pÒo§Î•Z«÷p¤5ödâž:j[éŠtEå=„-ìkñÆ©¦ËÙ½jla+Šò.Ex€“þѹ”ô £É¢ÛÏJN õ=*#1Ÿ&@îBe†þ%­*ûµý×DNä,¸½ö2N®q)Ý}2›Çç@Šò+^õ†«U°Ì*Ñ´f,w{O‰Fnå<áyTÎ…?®fðèAé\¯3Ìs*~Ý­ð¨Ñ2 ,|®OJTýf£,°DI¶”! d&Â?8iÔ[Ë› ©î)¼0mÃSÛ²;¡ETÚÖÊlO‹˜§Øž„awÛSÕQªRWdšó’ãá›ÝùzŠ3\“s’ú”óÓÙÙêÞ¾2¨]'cXv_}Ç+(x:RfðZ̨֬±Ýr!t £Z…$Ô¶Mñ”ÝY°ÚWd‹¯Zw¥Õð.ˆ°gµŸðL«¶e Œ B<_tŠò2òpi)ÍR >"u²v"îÚÁÒGßz¤ nÞcñ|Ë@µ/nýqǬ}8_K©0º×2…Q3£e¹ƒý«`¹CDx<°jCqô8"Ÿ¤YÅF:óæ°€¼m¸)£þ´0:·Ã\ú©Ôp:BËÊ’O§•¾HÏÆ®çõEª Ži”¥Ë’©´Ë-%ÎÂæ­”Úvž«±)Ç:«‰iê(ÁÇÎâ[¿c¥kyNSÈ uñ}¬I­ÒɤÙ%§–L+ë¢'…³FÁ—;mlÖ÷¥ˆ=ò©d“ïafR}QµVO¦°'<ÌkØÙ±£)¸n%“IÐ-e0¶ëÑ«jô‚«¡ÑØdeK+wm7‡«Ê#QÐxà2§ö¿k‰0J©$AJe®#ÌŽ–®”Å“ ´²Ž©n¨þ׆Hó3Ûj°¿ÿoP¢‡Y~E›”’UÅ—NO‡!œ€ëê€'tÅɵ#Íd"²]ί9»µ*ï毨´µ–aÙ±*¹QÌUAeÀÕ¡‚PüÔÚçÊx&ï¤å‚˪¬ç;Òk"É eØ,Í)¨å+Y³Z\º+²Ÿ‹ ¡ÅUœ'j®ÔVàmÎ r¥Bÿ `éyGnoìÚ®c¯Â#X‘¢G°HÜÏŒ¸e#seg@WÍç:Ãh6TN/rÐA.26Î;äöVSæ^áÁ‚ŽR×~Æ0 lBIái³Déà™²Å-±±W©mÀâ\ç§%}#‰Ââ>øEPÈ…}ð‹ðÖæ ÙS)òR,%dÙ,D—[äL´`Ò3´ŸÆ÷hO…P_½8ÎJØ¢ð)\[¬±¥7­¶Å­êET ÃHë€2]*Èòi!×z$ËÙs˜`¹£©ÅŽ€„æóÞ· ¥ÑsKL`YôMàFŽâ÷êø¥úöŽf+ hj†äa{Äl!s˜;¬¹©=jÜ[ÀOZN rú*_¬ÑYæ+ƒïÌé(ƒÇ¸×¯0}DAÑ_ØaF«ú¼²žS>{a0cŒÄL¡µ?žÀpþ­޳]!-S&˜£þ¯%I}ï™l‹+Í$¡ìŒ*“:ñCKÅný3¾Jù±.æTÖ»'‘Gñ)»»ñ)„ ˆ7Ší ‡Ò“²&¤ÏnœˆaÂgìÕþ(N’0øÆö‚¹u¶ef O‡ùÎq˹5tBn)À”f\¦ûp/š"hâ"]…Þ;¿†TcóÉTC†‘2ª*§ùkØ)…,ŽJßÇ„ÎmÓo#•ªVÊaŽiæŒÆ¦clÑ1„‚—“\Ú1Sy›ûÅùÅÖeÿ¸·uvqtòåä°38¹8Ÿ-y)°5Ö^Ê© ±}PŒ‡=¾ÿá$Ô†Äâ‘-¬U^ÇZ´/ XX_­t²Y¥R»Õl‚êeTzãFû3çþ (š“Ä7©f ²Q&>厙ø¾:é‘K /ATÑ'ä<l²pö ¨Þ¸“£ø¸ÊNýñ |3U§ l™[et©n¶2› òetMH(’˜®ÊÑBú†O©ù9{ìMèNˆ²Ð…™ý\$uXqEõ¹W«°„‚è(o(8p^c«È륫´RÔìÖå†-Â(Bªx€f¢eñ`pN®~¼Ù¼šÓ¸>æ|ÿQã‹°´—B­êû²j[A õŠ2ý©8k‘é«oVÂ?ópvÁXè£5¶:• D*/½È'ßc;;ä;¬Fv õî#{Dt…¿Aä}çQ, ÷NÖª¡*8”ÇÀÁØ›Y)°- ñEð6åß¼ ð(mf ¯ô¨ªiÕ½?÷EéÝ^`DÒÝc†àÄ P Z)ñØS™£¶ÊK¾Îë®Ïu5£cªÊ—!µÐ+¶à7æg}È»¦"=jn#^îæÞcò=båƒô±!_‹ÃÃ8)†!§Çs]Ã^eªà'Úòhø›zÔ1 <ÅGÒ!1G %²®oBø/ã/f˪1Úµ†ß&XFž c5Êz“ÐOMu˜{Zéà¸/h>H3•#wŽ(ËrnEÅÐK4f¥ÊE¡­NZ…µ²Ëëû–U‰Y‘ÏI=e|_»™/72哯À´lS˜•ƒwùè·(¥×Xß‚ðó7Ël^¸[⺢“r{¾“©¡kC$d¬®Êm±O b•óɪ³žI{•‘õ¬ã%…Äg5Ö:ø¹ø TžêLd΄ äD¤õ:²žUÆCéSË‘íÀ¯tЇAúÂa“ò!‰ŠðíysÌÎ$BÏ.Nà¡óì·´ºß??ëžÆ®c©3âÕ€a¾’'(ñÒRÇî —ËwìŒpét0\ä†ë‚T¼ÛODí—ƒÆbÁü5¶Ý.çc¯«â“J>BW1å—#©±Ç­xb“ïw¦tˆÍ}¦ÜoqIR´SqIT.ѳ£“„ƒ;¿Å:Ë)ÕšaJK)UŠäÝ‚k*žDnŒT§CíJ‹<©ÄH»  á"/1œSUè’œa>˜®·+ Và~*ÕÙ’1u{1Y2¶N1Zâ¡ÌDE)záI.egh3n”ª¸¼ôÊÜA1jñ𠮆®ë[æËk=W~„ÕzÖ÷õA»<ì:sîFßA‡<ÙÄwÝòŠóüV&X”;«C’1ωdy²¡25*×Ó§——©Zô5¶â\ˆ&·ý½CO+“öÁõ‹û™“dºúùrö¦éVˆ½¬o«—A ãE=_Çå> Ц‹pø `b_…"ŸÂŸ So`"ÎȃVÌ®0I\³Qº9Åhœhu17dÓ›NC«°Vï(\~LÁÓ0ÒÏWU³ê Ø(:Å«2s4¹8Í‹U¥+©-?³'Šœ ÿ®çUd-œ;½éô¬…8¹se-¬RÙT™“äÿßÞ›6·déÂó¿"£ïD´aÙ"%jé÷ %»JÝZØ¢ÔÕs¿Ü€HB8¨¥ý{ÎÉH,I•;¥ó4ú·&ƒÛ$;u’æÑ ;;åe™Oè¹ù¤Pƒ•Ì…Tw‡õ&‚wõÊõΫå6`%,=Ò5Å-¹k;¥â+uéWï­rŸDz…3צ® .0L§èCE†jŠšä ¯êW ô'ZkùÖ¼¨ÕNBObóM‚o.6m—¹Ëà2”CÝÿ>=ýj‡AÐËŸ²Sä&ñÉ¢ÆÂg¬†ƒ£½Ø:ðš/O³8"Ö?c±”Moj­Ä_ìÉ}ˆðX"=ñ ÄÏ”L—Ž^õíå x7xB½íÖ`€µÌÛ5ˆr¨prH˜ŠxÅ»¬AÄ…\möa«Ã, ¸qõË2 Öh&YŨÃ{Ž\%IÆs=æ ñ.'kжg§òFöìH©í"•Öôjœ……®ƒÏ <öÊ ˆÜäž7á$7êKÛ;]{lÉHeIÌMÁ èHˆ‡îWC·í„Þ^ˆf ø9©žÎç”?Wš§c[T•víÛJ!Òvú6ØU ŒO^†Ï¿ñ2qœ¼È‡Œ‡/­VˆmkìvDHx¨-sâ#„ëFš%íêf‚¾x€ñ#×yËÙ sìÏJ(¹K}»öƒh™_‡”sÓÙ‚K…ô#¿ÞŸ:¥•÷aoÎxëà 5s­0çÀŒ¦q/ïɇזPŒh¥r¦£ÊÞ`xØذgSû¢eCû4ê§×ÏÀîÚ‘)m`:©Â­œ<+üôŽ;µ§|²VUƒ{¸Ê­Î!×ôAÝNæ­¡¢¯Oñ:5nó?Õ¢£¸t•’â§_DÜK§òÄ–`«9 Üêyu÷:uŽ“Mô(›†Ûœžó”.±AÁ'»å£ïŠ2®ÒÛ·ÔGDé•%µ9Ƥ?©w°Tè«8W‡$m,A¢å¥áXÈÀªŒñ&»Ä+ÖÒWšeñ7Lj $ÁŽYí5Þ»t6·GªcôOùDfêUDèŽÉJ@6Áöؼªœ Ù1»½ÝýQ‡}p|4 ø­êç÷›ÉiúœÄ©\ãOiØ0|À 6Ê{ù˜˜"¶r8ŽÜ.>>!ßEªï(Þda­¯3]gaM–Y¸?4æe­ãT˜Ò¾a2 ÇqäK:/t]9Cë+g†°s¡†ÚgÄ0F éË}þ‰ÑD)±3GÓé¯ìoá+Ãz’¦M×O,ÀlÜN÷;y8Ëw0ôvGŸ‹—¦Ú ˆ*í 7l?07!Á ÙUqMI—EÙc)VSYƒ†œ¸,6uFÏxîÈŒyÊPõ$K‹t–Æ¢#N#ÕX°½ gà>~°¥ |³Øí-9lêFß_KS“ ³<Öººt]¢Ƨҭ=°tŒÎÓÙ§ËÌÌž§°;áˆ#°=±9XCXm§IŒ÷v? "cØ»Bx­«Æ|ßöjsrMvæŰ£ªè2‡Ÿâ‡+Ä3¹Àžvd«Þ0G쥬 £¬æ ™r/fÖ[]:×cígüšt®ÇÚ ýÕ2®Ê¸‡Áµ)Vˆ”«™â§ª¤[½ÏÒ¹¤`m^^~ídzÊ÷•M¥ÑÚ‹¥6Ž”LÖB{ª GæÁYzLDúoQ¤ÏàbxZèšÁÃ×¢oôÛwaœÂÞ8ÉÓf;è5dzèWýÛBÿ ½_çŽL@Šœ§ÊvÎá ,Æo°¢Ôëc³4gy¢ô2Ï›#ÑÍ{#þòuAé§D9üÊY`¡ly£ÇTsßP¦ÍlŽU„°*ÜEMi kÈ-”ÆÂÌLÙ–ÉmÝæÝ÷KZ6'gÓÕ~HúGýà Üy!›êЊìK˜€‡Á×W½PS0 ˜£»À.I2EŽHtw¡ss?Yú1ã_¦ˆPLäÐÝj™¯[Ç{Š›à­j"X—>_è;yíÆ2_:Ra§Âϵʡõ€¥aP…(µÊÏ$ðU* ú/6à :8àxDÇ.Z›Ó×re¬J®e‘y,}Š^¬`UázüAµMÉ–6Ñë`º­T]lKåQ¨=O—Iq¦£7*=$ÁëŽÌÃ=WÖuþ–”Yî¼ÎNÍÐÒ2s¾ðå6ƒw†ß©·~X/8ügò*O9ãÐÊœÃpÖVç\ÿ¡ªë³sø‰NI?…`-ÅhX ¾ à¤ÈÂM¼ˆI|­}E˜Å-UÔíásx|Ô8ã’ä¨{x’&÷уeéêæ7Ñä8A–é‚Ón‡Œ‹)Ñ^2}ÍûmpƒÏÇÇ«UŒQr&ˆÚ¥"Fø#Œ7¸ñ#k•ºú‘Ý•¨öãHçN.oЇÿ„"üæeáoÖ¿?Yl^SØè1g1b|fõù³.U$Jœ ŒÃÅcš„«ÑîÒ!Éb,ºS:Ú¾O£ÝªÆ|ùèóág£„}Zæt—-RÚL#"“Œ<σ&Ã,ƒM ûR‚IŸl™0óŸhÊÍI±3oÎ7þ2-ׯ$q8r/“,T„—<òÀ_ü΂ñœxâKÈs7»D{J$Ûø_9J—Q°AUHVê;Bô£ â±Æ! ºt’’Ù–©Ä›¨ˆC;¥¤ÓN– luNu Ñ„8ø‘©JŠU=V¶LÅ#A|s5Yå­öšê U1ü; õw¥œ½OãÅ£/ÉИ‘ .öiÊÙ¹Û§ºÖÏfÇ6m³ßD³aÁ¥ÎÕÛœB}UWª4:¿Ãý]­ÈDC§ÊÙ©íGo7ýæ¢OR÷Í@*Ì÷¢DìñõìjÊ8˜S¢&p*õ«C$@ÊÝኀHD~5$ÇúA¾—`–9ÿBá‚/2ÍÙ8"€‘¨‹Cœ‹œ>:Ì?Ê ]\‘ þi%o·L{£¬SORbdT€ë„õ„Œ(‹ã1…¥­!㺠ƒk”j0n®n&³Sg÷÷yXè"<Àè*›â¡.µß‚6Ó]õ…%ìó- ð;uº?âð­6ý¥´½§D á‚þÂ\j*,>õq¼¯ËY¾MTåuõ„³ !âð0:†²0š«šŠz0h,Ž‹4>mŠªT8ºMÜ}݃TOc1z½ÀÂAçR²k$“¶Sa7Úל<Àmý÷2´+eÞ9wU Sì™bª»Ê w…ÖâƒظµŠ.Ãà ·v[‹9ê<¶]–ºÁç#¹·Ï¢ óž§•i\ÛY‰ Æ \Ó_VÕføOèø»‡¢Ü¢©š>¯ º†}*µ Iéþk>LÓBÁñ}ö×É·_X4÷èÐÈ-?ÚÍmŒ™cúù´ÿå?ùT€„›‹ò]¿/ãø.úßK?¦FöW8+!b|ˆ†ò½? …œÚjþcõˆ¦iÀzÐG®Wê]3ŸsA!*$4¦ a™ÍA”PÊøÁü1Z ëDg´Î+Ђuì-d}vÍMHàbÎý¼8ƒåJfK<§EÐ¥hëf¶ K¸–àmM@hÐÚ¾ÎçXŒ‰Y£€ëuŽä{ØÎ~„¯lup'‹_)¡IyKõ À}Î^ë©HM!”uº[ÕW]yÓ^+ö‰Jðç2{OÃØm¡ÚÓ|,vºj>ˆŽo«[E÷ä(D¾•4/ÆZðtWÓÆ#eC‰‡ºˆïW1Š4ŽZOõóÀ €‚A*bžžMU¨ !EFéÎÚ%‰ÙkIµÊhý=(`oΧˋÆw…®ƒáfÆMŠ–Øåm«Ã©ò@‘¾©uh? 9öãÜ­Î×ÚÏ¥Ïu^uÁ£tÅä$îxÄdê¹ FWÅZ‡ÄºHðZyWý6›3¹s¢>#\3Í'Ë;cËÃŽÌI„(áø\'>Ù˜KZ'¯:dS•q„(ˆ¶0•ÁëߊU[×)'¼ [n‘?ÅyŒ#©4µùàúë$öÄÔ®¡ä"~EÒ‡ÿE„—C=H|â‹7ó´wHkwŠ7ÌÐÂõIØG@x»HFR¯”J¡sS}8\Ñ\$BB“>î- U\,Äøs{\–PÓ`‹ŒÌZÒj3è°=†x9™x[pi†.¯ŠÇȽM`køIúA~ClBÏ»OÆgo×Ìøœ§¼zLåLm ’@ÒãI9RÉÞîCQ—×N£&<3«HœölWá-Hé1â<4Îÿ7MÂ1)ÊŽñ‹"b“ç§þÚöUéægëŽt Ûž›&¿ñ²kæ¢\ p,sRqK-àåÇÆ[#RP1θj Õ Maá¢údŒöÅý˜iï?I°q™Ê!ð+ˆ[mäÑ6¶„’ô¬c l0YšÞ_ݳÙ#<ðصÚGàÈ]˜‡šdÉçXðùýÃòùy$±êø«49n2?É1K-ü]I<çÈ1«ðíx‰ùQUyÌcùõëóÕï›Ór~)$!›~_.àÁèx´÷™P9J)HO8.RÝÿø!Ç _‘"R2| gK!í€ßèX*  KqŸéíéU/<˱iÝPÖ…¨Þ @tlⱫéÙ? ê…KþõH½¥éýuø´ªT>ôö…%ôÍ^Y@ä!Lp ƒÄ/–챟<,ý‡p,°2(ç’ŸÃä9,Å®.֌ڤÉz›-‡A\K‡ vwGF¼ûV‘ü5™¤éHïE.ð[õ—êØ5mÀ(¹¿¦Ë,wj‚ÛÁ©ºõ¹3b´–öTzº&í©.Lã€=­f“ž¢þ ·èZg¢¯¿c³7å鞆'Šržà{%}¦¨xÒ”rÒáÑ¡ô„>¨é47'ø†#¸ì‹hÍ‘ ÚCÞþ±26ׂ՛Ä?‘!6Š›Âó©‹œ¶Š„UVƒëX«‡`=?Î_ê¨*Ý.{»C£Â8 ŸmSWªÃöPUO§aÂð$Z<ÂL/ó&Èöƒ‘£ÎÊÉþ\Á¯²–ÂUð+x[WÕ‚·X¤Þ¯ÌMKÞª´Ø!,ÏAŠÑ#…Åî»àJ@åî诽áäÕUˆèGv7y¾Ã²·ÌÂ^æ ³ˆœq“Òljrq?7J|N²4XÎKn8£ý]ÍÁL«i–×ÞÕf7fÜå-ÿóóh÷˜É“ðfê1E&c0œg?`Ü!òZ»”‡KRc1Ï"Íóyoåéœë'ÁÂsdm…w û{f0¹Â˧P!†²t¾†Çµ¯¬Uß°ñ>ñŒ÷ø‹2·…Ç:Þqõ‹ãùŒMÙ ~Ñ}f›ùñ÷¯·×Š,q[‡{Ça·×gŒÖ]ɶ…ÌÓÄÉ/! ï–ù+Q'¶`‚éÜŸ|“¿$¼ËžÂØP+¥Ã…¢§¥Š;|9F Ý:— ³›Jø`ºÁö<«ùý¢ +À|UpÚ“£°b¤Â’¼˜Á‰æÍÀÓÆs)€[píoò,ššáUx39oEàä·4ûq›ÅH{ÑQ$Äc1dí8dͰªL}…=!½¿¿ð_œÖ- Sèmë–†¶r¾'Øó S†âgV.ÚÝ-yò Ãůà9|-qñn¼ªÙtµz¶½MQ£1Wÿ*Á¥ [•9 ²Ñ`œŒ'°÷WÚKÎ_I–ý|¾L8’²À¹•—±´aà±ÚŒÍ Ï0 zÚ.˜^^L.ÀGõ±Ü-8i»U‡I~Êí5ÔW\ç2¥g†s*Ïx›(BoÛÂãL`ÐÌ¢…ëˆ"ÆŽôtà4•–׸¨^¸zN¬bp³¹EÀ‹§ßŒFnt¼g ùêßVèB»ŽoIÙlŽè2óï-j¿§«Ï‘kôdÌ7>…càÛ åØcúŒ+k‘…~AĤrøä”TŸ…Y¡)ƒóu^¯·Žõ¬}½M+a)¸°[‰ñm%% åbÈ ŠÖ–|A²J3"•éË'^ šóøšŽ`øÈ‚ˆÌq|ܰoq–Z~EÅ /Ó™!,¼.ÅmEbjkÖ;$þGêlØ}o¤´5wOœÂíš®W+bm0;ô ΃ð2|¶(¤èZëqë±56¤cakKCWl¼ç)ØpÓÇ0V¼Ò~5¥ –¯œT{è&›ÞKµÉPSíöo•òótºìMxÎé¥ñ,yCƒš¼MjvNž¤bž˜/âô5¤Óå½á3ÃE3Ä 8J§¸úŒÓC¼ñRØša nðìëÊ婟ÇþŒFJ:uÈÍÄËv+_Tºidé­fÒ"þ£6Ad:÷¾ÅѵþYñÒÀ¨¬&¦éÙéרŸýÀ<ìÕ²xHQ OÑ?}{ížðiŽÊUK ˆ Ir¤âtv1Ý™ŒOê{(ÆâFQÃu×[5}šæz“û©_øU ï§ccáÒZÖRC2åˆf×!8VfEÜ<[´¦ˆªõ9ŠcÔ~yx‰yšãL%YŠšZkYMW4ËjMÎFYÜ62{ÈV«T+)ûL¡Ÿ“¹Ú}8ã¡Ç’™à«Ä´š²–$ Y-=ÌlIýxâ¡CöÚ8ü¬eˆÞ"èI¤È VHl;—™9ËÃJ\ Ÿš–zÔZ4³ñ–ÖÌä98ñ““GÓK¤|<ò ò|üóc˜h‘ÌR‹÷¯˜gæéèBIW¥éB{¤és.wÂUC-ÃÍ€/&´}ì’ø*éÐq™wÔÕŠ-Ýó ?Wog6‡8@`örFR£XÞ%a±Î{¬ðÔ-[ÊsYƒ~6@ÿÚ¿C+³1ÉR”hvP.ÍŠJRg Ñ^?Âr+.§›ÄåܰgVJL1XA¸a$ö˜ÕÔ—vØëš*¬¶n=Ö?}}¤CÖªZî¬äöd¶.wÌsj¸×–Èíj‹ U±‹só¶bŽÇøv‡„,Î_àf/&çß>±éôþßxzþåôì—oÓ›‹ÓIãPÛ/ÓéxrÖŸQºÞ¸j!¨ÙûªlM$²OÀEIç{o£æÛ“-¥E5Û`Å3Så†{†Ò…²¹ð·©¢ Æò có4à*mT3£À¢Ô9£õ퉙Ö µ+I<Âe޽+X%8™fü {vöEáË…oŽwý|…Û<\Ïû SÒ%ï3¥'zð>›¿9á[rØþ]ÏœRÎòÃ2SE@¦¶ÉãØ±¤Þ¤Î« 4Áv»YÿZßd o’üYþW`0?ûר“q –ºÂ¼fð&7Xï‡()gÒtãÚ %"ðœOïœG …ø>ˆjº¿°½Ý•v-Wsu–B¬«w ‚d9ÝÕ¼öÕ¬ö^W^ûjV{5Y\Ú©(©Øvo“Ø`©¬ØëzÕ§4ˆ/»ZKÄ_-¥;åvÿ$K‹t–ÆUÁ%Múïå:¸Ô˜˜¡†VyßÊ{øuÈÓUŽá©§€ï#rg4gqx_Ø"ÇPPÿ°ÅÿÄsG`îa™v™OFyWÜ,Á2[á¨[fÕÜt¿Ôst¨}¤ët5»Ì¯i絩T­¬ÚôœÒËòÐaͰ!¢-6¯0¬Ø‡Þ½›»äÐz„Ù?P@/Ml9hW¥Žv©—#‰Ãš%jÿÀŽÀ%Åê0ìQ«RMAŒ£øÿ½]ð»Ÿ÷eh ÅÃ,ãePHN-;µÀâ|~ˆ›.ÄKÝùë±>FýΩœÈž>½|«xð$ŒŠÕ±( ×ó(1Õø ÂAŽíÀê*ahŒ”Ì(z¬ÙCX`üÄ5ù¤ý¾=¶u@Øñ¾zâEöJ|€L0>*Œ©~~+ 8¢G|E©'ú«CCcŠp÷7é™Þ~4Cˆ‚ €=f†pi4¶*­_‹¬÷X[l=÷&J{ñŽ(þ(UB6#c€Ç¡hbyÈìê^¾xò x'1mèqfmèlœÂZÍPÖµ Oxp¹8| Ý°§ÓæïIëòãÛ³<šGqx×]<‚1)}8ôÂc× Ýá Íòư³ÇD€ý¥ŽÞRÖÛës›Á8PétÂvã.P¹û2X r–évmþ”a‡Å ±Ÿkó‚뮤÷05æaaÂ47NäÔíò*ZÄj%2‰–¦±+ÚKæã–Z«¯ —’ŽáˆSÀØ~TO´¿Q•6•f±UŒ|:ÖaŠêŸ{zª²± eKŠ: Ç»3!XØÛøT.Bä4ŒÑa\ø%IÕö^vüƒciQÃ7 N¾ó "]Uû•ŠÙÙé%ŽqúÞh'öôÒV4mÀºY1e•r %丠]µdÉ|MÏNULWdÐ6• ¬êÙ÷¤[¶ÁÝôXËrÿ¦è,þW!PBW-µuHHUߨf‘]c¡£µ ‘]K޵á¹Z@ì JWá„™‘þ¬@W¹ÑYŸj]ÃtØšºVݶ45ÉÿêgKÊaR£ýQ×”ñhêóùÙ)h=•yÈëaõóE2\LyLÆ4ÍÛçiˆ áWØŠ:ŽéQ˜%ɪÞñ}$ †EkiÏèÛ°Î`{k¦è²™Œ‡Ï(£Ý«¬¢å¤ &+ˆx|âk«Õ‰pKi’…OQºÌOâÐÏn—b"FˆÖô’§°žÃ°U;«ˆÏ™ÌN«ÒEäxb\ÖtW¶šÖ߯–µ’ˆ¦x6ùžÈ7qÅXª­~làX9­1œ/ÿvü.]‹eQ¬ZIÖ[ïatËhëš(VÉ$Q…£„eεDK *ó¬–§ʲŒ˜ºi¹ãgÑàä {Sy¥Ù²‰žªy-N-þL%±£V\ZÊÖ ,ÒžiŸ§,{¾«é)04jåÁçù·—"L‚0à•¼á½ ®šø,•ÇÛ 7ËŒ2v~˲4ËoÒ ;ÍY§àÙŒüØ‚DµÁV©ÉtÇàÙ¸2'×·ÿÐÎ¥ãÖC¡(£;0uty«ïufê+zh§Çœ a§GÙøäƒY˜7i‰„§Phs§¢sæm¸KJ*Ç¡ÒAßCÔ× s»ÂÏcv_ÓýoïHP¦c#tb½¾–Õ|u”©½ÒåŤ„’þ ò~e¹™_Sgö&u®U¥MÐ&§;RBÊ,¿5lá)k°r>1BköŽZqôWÓ yÌuž©ª 3cM  ‰¼”À­Óñ¨-a­(`e(À¢•Ùx³£ã¡(÷QŒ~a°œP¶áª›È•G0Va†R7¶vå‹bñ—/_æQÄ᳨îÏœì>,†Ÿá—¿Àÿx®ãK¿ÂEee!öþ «šßs»i lÛóò†+ÈúYapí~ÿË¿ÿ{éǤ›ÄôB‰Gø TºÁuQ‰Ï—(/*ð”í+ßÑ`–è‚:é†}¿™àžzAÅùì-0«!ÇM5–ÒÕLŽäÜš‡‹ ?7*ø†é.ÙYló´²o¢Øf‘Ò´è`Z h7®GÙ›ž}³Íf·d•Õ <¼[¤gïÊêsd)2dZ\Œ«vOQ]#Ü7š‘뉷…ë±_ÊÇ[g‘Vº+³Hnœ5¬š%‘•ê5ô=Qh×Õeo”ðšRæ÷(ŒƒÜ Qíê ˆ¤wmÃÒ‘}ŠŸ¦uûU×|mÍ~Õ=üÇeÀI©ïuEœ7¿.øá@‘1giZÉÎ,Nóp§XBégпê5ç5Æ÷ßʼ…ºH»…<Æ|~°-ö“¯e5|£ÊðÖ”ªµb­ jíkÇ,E°l®I¿| {¥À‘ﺫ«¬ ßu««Ìý|Ç• ÄèW)¦ÚÞKâ[‘  6ö8&•«",‹þê['-­µóFA¬aY‰m/álŽ·w®¸R™2ÙÙ)bKœøº&è:õç„=ƒÅ†²í+jØ£rœJŽå}¿Œvwo“è¿—¡?™Á©»ÈÈÉ4c=]%,ªt5×hÂxáÈN¯Æ9ñ×+çèjÌÈÕT~æÍÂ"ã‡:5ÕÆtW+Ä,]€7û=Kç®íÎd(Îw|5/Z’F”Àýfó[è4v­ì/ª|Ò¿¥cð/ÿÉŸÒ¨DwÇdöžR‘o'¨?!Ø_á ‚ß­Éýï徭9ÓUŒ:šÚ„ãqƒ2.Ì.îú}yÿTKivÐ2&ZtµC4jÑ­f×tz>žÑS¹B³tä]”–K_Êò=sÿG¸ã'ÁÎ< ÂXp~„ÈÀ…”¦Ø^VêéT²Û¨“r3‘ ïϹð‘v6(þ°MS5y®È÷Ž|”ŒôÑQÍþ´ÑÂö˜‹ ä½pOàQìa¥CN”‹):åðÌQ¥†<%[«¿¯-ÛQ<¯á, Ôۨ.0œ  O›sO­˜Äá}óoâ?„öªÒ“¨Y²Î±0©²¶gDðž>(] í‹ ˜qDÇd†ýy-øÏҤ𣄳³!•êáˆ(ó,Õ£ïC«U.®sÆ2ÆŸì%Ù }/ûÕŸÈ«³3‘üa§™_ô ³¯<¯ ÿ%š/ç'i2[fôú7´™ ºâÞ“t®©ÏK'JÇÄ4z°ó¤ÐPšý»öÏ’Ž¸Ðs0¼¶•%Õ䜖àÏ×4K´œ¼¶ÑCêÄJΔÆuEe“²—䙣ûû…Iv€Ÿ¿PDã.Jsv¹ ÖPë6™þ·:vøV7£Êè,¹O¯Ãûê$qКw<Üßc{´-§×‹ßSb­¾ßLNÓç$Nýà+Ø{ÏQ Ã_ÉÔ£&$ûdûú%ÿˆe3œÅ1ßC*Ë0àÅlë”k‹ÆÚ'(=ÖžÎRëæ¸{Uœ–T¢ˆ¾ˆæ´Œæ ò<4#¸lÆÝß…ÌòÏ{ëIf9Ü=–wˆÚˆüe‘I¾¼¿^܂φ5–DÃnFìj±BeMï×N¼“†Ï0 ³yDz>.gPÉe¡Ž±ûZI·f«ä–„wZ MaàQUv+HœâúV¯èŸŸG0ù4 ®ƒu­ÎѸ¹Î€q¸Ûänº¸—|aM¬&|aŽøÞ¹(¿4!.Ï·¨7¸Ë–rF•¡mÅãd§Ä¯ÓÔqJ¼ á<%[Ý®%@scH?m7ÓG›7JêØ¼ñí9ýk‰>åºð縖%S±kð[ü5}†µ ìpP˜s~ ÆY¼ B¤ú_ø?ùvƒžJÃÜV}± <ÅS|!ÓÂM¹àöÿðeî®*‡Š,LY[áÙucm-Ýic§KÑfe×Ó³_l·ë›– ·µw8rärmÒÌíÇÈojAË•Z—˜¼{àõ|ÐõHÚ­ŽG± õ)‘S„ÏÉ„ÉëN´pëTŒjœŠîap‚]Ÿ^*ËEZ9jtGŠ8ꎕ6èÇ3§ äUðò·GTøY‚UþËä9ëÀ Áí¨">pc>!¯m3Ù4¯í΀ýŸÿv"Ä„/  åÂ>éÝaºÈIa#ïµ´'Ö¥·•’íâñ5Ë$> ãîôõ öY(ô*×!}¬Î«Â¸ô¤½)½SÜÏfÅ7zü¹ëi¢ß¬{5‘vÇÞÁ®†'ÉÑtòÎ~ÀHø‚zˆ°½˜M}õ׆:Ðý"iH7Â_"úË(‡_x0)IúþÖªI5L­0íÞW Z˜gW¨kòýëíõ¹ŽðÃýã¿ÀOÌaåˆv8íåíõ§¤„röÈ—»eþŠHJ¿‘áà51š? ¼"¼Ó~|©†HÔu­[Ô³Rªì)°fÇŒî%¯Ý56/Ýønü‰}¥û勨5oN1Ÿ¢Ð$]Ô¦è`Ü‘àsÀåŸÅ÷ÑÓXeB²;Â<¹‚vÖ! 8xãø¨±5ÉÒ`9+ª¸>ìCïžb#bœÀ¢)>þ™Eñ¡mJœoÎ4Pêÿ&AYkO6½Ÿ¤ª¼å,¤¿¦y±Â• ijåVså¢Dv›€$™VU‰›n“ª®â¼e@²ÎÜ·8;¦2(÷-™Mý¸À €U­ŸÐ­P¬Œ?«¸ÛYŸø³GsdÉ&#«»sa0º*±cî#ð-“&òñß°<ÃÊ,N‘dØÝùk›AR×FƒLr׬E5åA²óøô¦gßNtµ®ì?¬–Ѿ9!¬ÅŽ›“ˆû²éüfcùñ’¦F‹¤DÜ%G;&v戫¿ð-‚zÕÎ.Ðx©JÚv¨â¯Äcu"¦wQSÿa£È ]‹)µ¾ïI]¼§Tƒ6¾ …ëpøH®s’Žˆ ¬\É–`Ò`>Ogv<hÃÁ"vj@³ÔIHÑc.Q5ž€…½1Ó{ìã€uÍFÁúÕ;„çàoÓE6)­sBû™<6O‰}+Ê™orÊ~°‘à톧Ou˵’xf×lîû®Q®‘¥ç$Y«~…»3 œ“Ôc ®ÍœíþâtÄHÅ/—ˆ±vz8py£ÃZ|¹Fw3sþŒ*äˆôX'QٺѶ¾b’ÈxU(>|€¾…àúhe'd!µÔžä¼ñXµÚSG­§UÁ<®öä6èu||¼ú%óg¡@b`Tè— VñbÜ" ÐèÚ°F´ õçÍ&B`E@0A?>ÉBœr¹±[ƒGÎË«XМ ÕèI©ÄÚc-©z×o-‘8HÕ+ûŒÅ{ØgŒ{ª˜5†”ž!^¢«6^'Òõ}PÎpú+ OŒäk0è`ãTø’ÛÓ•lå¸$9BŽ&«Žö·©˜îIJ¢ðEâ‚טP p„i‹è¯77j`õ_põõ#3¿„ˆ\‰4ɶ˜Ì‚æ„þ-¼+j#ÑÂìQ_Ü\Ûf`(¡–'i/8ÛÏ`·޵yñ@G+ÿ«äfý }¾ˆ’ªøõĬ&êCŸ[ò<Ö˜¸ﯨæÄL’­¢ýÈÜ Ú¿™š@ xehåƒ&~6¯s;¬¬q g‚†»J$náϧRø\ÖÓq5”rš;;. ü¿óÿ—™IBi›8­/J¤gpPÉK 8È6ŸcÉL¿"{¼\¯Êà@&xž£ß±¨óáš½”’²Ž•A÷Z½J{Yjõž,ÁL˜ï¯Æö,¸#•¯mõÂùÔŸœO-‰ÝüöD$•nšVD7˜¼ø{5²’¶ ÍLÊçUW{r+½}ô)/¡?‡e0úïñޝöSDeЧ"Ú`<‘”‘ûDË†Ž¢¥*ÕÁrås*4èZÝóöhP¹¢6J?¨òêÙ f»åV^ŒE¶yúLuX.ÞpvC)}H‹ ÷¢þ–õˆj>U•HwiZÀª:´wêòõ=€»D¾j²)|O³gs3Êp "‡ê½q¬_è­Y¤¾Ôáά£\gãe4ûaɯÂgOÄ´{8rúF‡sJà8Ú×µ’±c«†ÆÀWê$ÙÓSã0Ê©6‹P_gšÇU^.Ÿμ\Dk8ç±4rÞ‚ÇÒ ó#Å"ûîxò¥z¬ýŽ'¹8ÎN+:t’¾Òòƒ—àŠžïÓëèGŒ´Ö4 ýþÆ‹ù¯žÅÐeù/M×”wûå ÊÓˆ¹Å`¥›XrmyVõåY‡W³Ùrá'…:×ïÝHÃÛÛ•ÑÛ ×&¬\“°VŽþ ž(üø<|Ò$D‰ãø4 8!õ¹9 #Ê…`^àIkâ«ùÞÏ}Å<~"’"ž!gÚ’¾¼jÜ­¡C¨S-mYiæ†/™xÊ“÷Z-ŒÛ1gü„ü1Z¨¿@0¶õW“Ûî»—¾Aƒz– ÏÅ d¡–Ô_˜ç’•Óš9#A­—°]£g30ÃQ2yΣ{P9&gj6+±±<™.Ç.Ÿ ˆé÷}W6ö”l±8Ž”½Rý?}]­¨3ÀÓ’§Ð˜ÍÛ8Ñkr}”´@_UHDX6}E$XeXþÊvÑ‚üf³§Úz{:n´§œ 7ú5‡S*ªÚc 3ä=“Gf߸˜ï–bGµ|± vbT!ð s\6Žl«g‰ 8“MgpŽhSøŒóE桘8S1’ëZ˜],…Iïïki×›¤œLªÊ’§KxHiöàË`¼ƒÏ(fØGnª%Ü:«/jë4T=¶™çµA…¢¼wÔL-Ý}¶[iü/ë).-êíAS[n5õ1J÷ÈtMÓ¢Ä'ç¢#õ#’ãx;®X­Ê$IJ~¹Tõs§—cF@IÏaf‰ô†KÂg–!& ‘×µ©dXtq]nѹ):W -`KQ5ÂYø¿9I}Ïú¦ 0„îtjm]r2"ùÃWL^`•„Ù¹1èÇÑ"‡ic;ã¼ämÍä2GŠÇj"Ucãbþ'„œiøj9¨´„5Õ%Yû¶èmtç̦Ë8ÀÊȈ]°Â#fV{&ÁúE£«©õ#£ãôüóÀR RÛâåŸòùGvï±”Ï7Œ¹â©ýªTÀ`*M©éíéUGâdÙ 0ÕÍñíå0¤wr¾Kà71õøÂ4i{e9"F´´ý…Ÿ÷4÷Ò9l¢ ¼£$¸‰JÂïÛ«Ðq¿—#xM%Jīۆ‘%rÒ/Ø1X\#´{:1bÀË7€ÒÖâ­‡›ë$s'â¦Ç²³Ò¡º‹ìõ„wÔ¸Ú{_•Nu{ZÇÊ7ì1Ié½Ã\ÞÀ M“é,‹E ¿CG?Ô“¿õMªë[ñ·Ö>œ¡¡+5æ›ÌŸ=„êéÜÀv‰Ÿµ¬Z´‰Ð„Îììüã)â7B`È9I—Bÿw ÿMÃÝ˜Ç Á"'À %—[:R‘~†ßhX*­Ë݇5š^jÚñ„å|B!/ë1·÷2Ëâ)¸$v=6¶l,ÇÞL#ÝòV’|ºãaºH nc5Ì#ïJX¢–sV±–§IbãAß-‹ò%+ècÒÃþµ(L¤É˜ñúòœŠH÷^ƒØäª[§žÜ7,•°“ú+„D5|!µO–dhÉZ]Þ”KŠ.N÷±\WoQ.¹L¢Yh°.Т?Þ†hÔU¨Tiz üÅÇå™F¸$)^eÏQ>{\ÎýD"7$± ¼ÃøDæz¯€J>`PœÎr'UûßmìMr'=9R÷TTEä3“uˆÜ?7èêáÍ=i­nlµæâzÃi•Ü¡)™,?•¤ÑðäÇNÃvx›êu§z©S»í4ŒÃáEè=Ê´½ƒQ_ÁyxR[(A˧ôG\%–½"ÚÞž1É¿Â(iFó¦Rˆ7m¼™ÕëEË{¤’ÏÖæ®"4AR&¸†eÍp˜À#ãÖ®¼ds’åZKéRÝ+\ÚÖÖÇvdé–«Hä%$fõR•Õ1èÚճ7·îªç”wýÌcÎôÑÐV úÎ)æÚY¿OÝ ´\æÙBdÈu¸Þzv”§…ªDüÍäøž.r©†'ÂÁ?Qb/É¿½„³²8ŠhÛ°@‚ÑëVÞQ•s¨¢¹àa]^LNPSjæ˜æ±|ç}i×äã,LçßBKá +èá·;I-ôÖ›A©Jð#úœjéê~ÜDä>]™û–¼ç’ÓÂ/03íL†nP!“Ý6ß$o^DÜ“€™Œ >ꀈ}P•@ÌŠJÒžpģϖü)/ÿ…q`ãì=Ö“³3q´pŸâ§’‰ä¤`v‚·§”/&WÁ›ÌŸýà3G¦JÁÓpä½ô˜#àFÖ§j˜¶¥T0åÚkšþc¶Ì0B›Ø)ãGzRõäàÒ‡‡zFü-»C¯ùߦ8>aŠ´šJÄjʳPßmf–ê˜ü2,ÄÑÀ´¿½™Ï»e._ݹVŸ;A_›—½s¸ ;û[šýÈ ò'•ixޤ=DÀ@?ôlœÂcØ6d‹ä )ÕY”a$|€9Ðb^#ÙT±+%ÏSG9О_r-Å—SÝkL±Ä öuešç1îÄTNŠ«¾Ñl†§ c†'Áe뽚2›Ü§î[3ÝÀ’M—Az½LÆàÅ¥‘Q’$uM)ê¨d’~(‘ÎçðVŠFÞáê‘-ØSÛŠ5Ölý Dm6àO®eÝ’¶„¤=‘ü¯?e­z¥É‹€ÞéÕ˜Wn«‰z5ÆoX’²C]pÛnfún…îí¢ ¨4ßr.?>9sšv®L†VªéÁUv{Z^jc™=†sŸ›\\@¬ø×á¿ ++Ÿ>~‹M‚ëÀEY¯¯«™‘¯¯oŸ©‰TÄ@„sÏ>XÆ¡[©ÙQh½Cv1i2YìbƒŸ—]¬.Aª’ÃèÝ^†Ï¿E íÚ®×–ûí—Œ”•™ð ~,¿èVzkÔ(ìZyKÒ$Å›° ¹N•(¹4½\zîç?.©^°ÚLöõG— Uuu€Fï¹,¸âÑpxáG1çØ¨€;IiiýÔ7Ji)ñݧøi:K+ Jú0:UÅhz1:U)ö¤ÑC€Kq™&;7¨åt¥1Ãr.Bz^‡ä;äØŠáÁá°HÔ]¥ÏV¯Çîà‹ œ™"ü[øŠ!ê°Tšà¶H­ö²,LΫÎgµêŸ}óXo‚­• ‰F0IFg%BWêÞÁVeäeß[8ãÏήÏ'Œ£à°^{Q¢çîô¸a¨öàIPû8ñ¬ÊôðH]m¦¢ýâ@{^— 8ˆAA“&8Ëcìpòÿ“4¹ÆYè· 4jX½^æv /½‰Ë`ÀS˜i¦Äìô¨ŠXïÂŒ§’¾nd‡Vnf°NÇõõ'{»û£{ÇÁñÑ€ÆªÖæûÍäv§~p?¤ä3ñû0ƒÅ~¾ð1Tü N¦#§ésò‘ §Kú.rÈF±¥gX9ÃÖ‰ž®‹N–Y¸?tŠï›Ük b̃Þc EvA 2ƒÒH¯ç"•_9“|lð§LÉF|l3ÙsÌ_Þ Èü½ÑÞºvˆ{£&Þ~L3d¿*ù±»ð: þ5ĨÁŸ¨ûÊPè5¬ ù˜R–¢½5¹±Ø–˜ÔZóÌÛûž*gR ãT{"ÇJu™ùt%;^וֲãX*[„³Ê¥³tìʃúnóüár9¿Èþ¾L­üNùPE¹ »²:èŸç<™äôpN¢ßü¨˜D¶zjéØOñºjöA™Ž 1ȉh:;ÿ2†w0˜Êôi]†ÅÇO˜IÝ(0 ÈQÍ ÍCõClŒð4ÍÇz𭃺$‘§!‚ãUÏH š£[Ä?xìÕ%_0‘Í—²oÄQµbÁÏUVH†Ò̹MâtöÃL ­(ntSÚh›Ë­ÎZ|ó_}¥ÎlRÜÝÀL”ä~àAÏØo¼0ø­p¾h¡µJôìºj‘ЬÒL”@P¯û4˜t^–‚ˆ|†'0:ÀΤC[Wº!¤‰•×B­c=¶yvc; vTÀÆÿ"—ÜF±Q•:pãØN­^¼Á¥~¾ÿà@cͦž„/°1ÿÉb„é çŠ0ú¢‡7…Ve•SÇü鈽7 mLè½Isuc;ä=ÝP5ð>d0úÜïa´zÌð>Û,D^Ë·udØà\fIû¢î1KÛ鞎³YNnù­ñrs7>J4Ñ—‡(X‡ø—ùý´Çk°ó.r›g'0ÃÄÆ—; C5;RÇ^á^Ñû°ª&üv± uQ®zà1ÕàÂ!J1gÁÝÆŠ\àÛ·“8‚—‡Ð†J‘€ò ´U4(ápÎ@°o£Ñ;çà’Él‰òô7™9ÞÐ1ò^ËKš†—,5`dÖpzvjPC@—Q™Ý‰)ChP´/S¿Jo³Åi•¦ïѯ|ÄûYá9‚¡Œtw‹4äuòToEy´ ·vB¡úH› \ úfòC‡œàŒò<À(Õñ·+'5§›Û—CÍЛ–øMõmÒ=Ì¿GJä´JhlajÔyz¥ý³?(½›k?ˆ–¹\ ¤3!‚>ü]0Žs\fõaŠÚè ç`^ßo÷’fó·m*1yaf’­nÈä<ÀÚ‚‹ü ÙfŸĶªbTnÂzJ½ÎuX¯Tü¡T*"<³,,Œn·P€>ÄÆ\ðZk¹+4Á”õÄ¡Ÿ\ßþÃ5äŦõß‚odTKIÎQa\uÉPºÁ`Ü+8`ÑŒ-éŒ\XyݪcíÇ|-€»ëI„3TE=6\û4̉OO?‡xíÑ6ñÚ2dîÏCÔ€Å|~EDÝ<üFòÛªï µ•|–áËÍë›É߸­dÜ׋Ëlz–¯´¤½:²*ò>ÞÞÚA걎Áäã¡ «hr,'Dç<É¢¹Ÿ½Žïï£8…ÄM‹cÌ:ؾÎÀNi=Åâ/_¾Ì£ ˆÃg? ?s>å°~†»úÿãDÓ_úé^+9(K; S¬Á)r~ÆÝd0°Hÿ/onÐy+Wä¢ÇJtGrÛ¬wöÖ„ÖÁ`t쎛r~Üðÿ„Ü%þå?ùßýÏô€Ä2Ž_Ù/±šmü€ýNb”eV*A>ûëåé™pؽ¼Âb¡^ôyÿ:ÑòÐqÒ™QýÌ‘È%°k‡¸ ­Q›ÁdIW!è 8„Ž7Zˆœ¨Í wiZ.Ì:Ã-?!*R+?t˜|•Å®!Únk1Ê{ðÿÁä‘8½$žMx…#•dƒL‰ŸÜ¤ð¢¢ìÂÛ˜Fáo» h§qá^…~“ÎïòêÉ2{ÏŒ#— á×®£Û)ΪYj›Ý³þA©tiþÂI(b×rQ/Ãç8R=Å’J† ÞÔoþ+F6]›kåwá1‡Æ5m\§ia2UsíxëÈhP‡ÈDønSÝ ü¼ªdšÊ8sœ@Û}˜Ê»ã±ÿr&`ÝÅ!˜Ênjp—iip™=•é,2˜G¿Íâ ,2},8EŒÇº‘ÄDy¾,Ñ É¦•›“n£ r Ì;’å‹™:'XãÈÛ)+ÜŸ–×"/ë/Åè­nuMÁ RÇ«€u¼BÞ«0jáß.NY·õìTÖpžÞQ ¤øÇM‡‹GXÛÁv¹ÞƤ(Ôa:hŠJÇJo£‘­1Ú­Ø´*·,¯TW¾{yïõͽ]©à”Þ*^±ßž¬äü•žS4¨^oÂk+1WXR ;¿ŸÍ@ ‰Ë äfî¿ Þk9X°Œ9Ô¦;AúÚŽa6(z8ª Wž%Qq¿”æ”»ÄðGG±ÊoI€ëü¯7çÃ[På3\Çcÿ¹ã±Šnõ‹XV1®4ÛgT}+Øb£âÕ,ÓòHÇœ€ö‘:—eõqÉ+óH?›Ðð铨ߓ}¢Ä>¡3ƒ—>d,Æð2b”À}Dà¡òíaþ-T6ì WØ9ð“ä_—QH\‚-ey÷’˜ÙÙ3;Ó«ñÜÜè?[ü9ý‰îk‘=­N;Ôúi¾2Ÿ³„Iø%X9‡h9+†ÀÑ÷VØè¨J+9ÍÊ ÷ʨy.VxLe‹,} %ÊÚ˜WG+h½2^®½f$¶{^+äÓ‘¹@Røû;L'ðÁñŸv9*?Àƒá°øß§Ùœo?UArM®Ž"w©«kè'EÝ%!N±"‰Öž‹Ç׆,lLV0 4ÛêàKà^w÷ÖV±Lë)à¶gƒE±¶à¹¬ZÞ_–rËXÑ:¯O¥Zç§’MqåIþÃ.`ÎÀ™=>û,ÉQ»&+Ìr',°ÖzoØ[©S ¾™””+²WîdÑ7ß“E¿ºêÆ–yô³Ð2I°ñDR{ÊÚYnqõö‰MiòZÿÛ ××µI_¿½PÆuw U&°ùÌ„·¶ Wy¬eIðz•‘Í¥ÄÃ(qZËâuø$–lSñEhCÐi㪙:\K¦TÓc•ôŒ-É…ë8–nÖ}ºêW]ÃùmbÉ-¯Çšˆmq}z‰9wUpÃÓH§ ë¦ÿà„Öe¿«…!0<ÝÅ4ŽÎ4q¢Ô¯¹–·v¿Íyâr† —lÎru.ŸhÅëgÆnЬÐoí1.'#´ÁÑɈ…a-œŒÜ„Xà—ý×2!È%'uÄoÙ÷tòèÇq˜<”†´Zú>Z5~U3ž¯ ÕxK…†{a‰ÊÊH€ÇDsoͦºhQc.&U¼¤z¹Èê;3Û›#%ööûÔò•L^E $MÞ€«Ú¾î fbÂŒ$¾ÞÀ2…%¾_~‰_¢Lý&,Œ_²¯ ÷Eþ;ŽV˜„ FVÑ,77¡«ßÞ:…çvóÊÃNðƒ]•&(Š.ƒ`ÛôœBC²”ˆø†<Á³š$bèï̧/'ï$"AÅeˆwD‡îL`¹ØiæßýEZ‡fÊò”“ž}KfS?¦ôjŽåI=­k Ù,<Í“cË/™dÑ“Ó V¥gµ‚6ëì‘ k=Ú+]cQ"õ(þ)0­U™eãá™eM›¢fO1´Ý-ܽ\‰.™uo§Q2ŒJ™ŒÜÏã¹ÿ²s·¼¿‡u5þ ïÏ¡H 2X¼¹HÉðÀš}H“pŽ"vÿ*H»:,k@Ï‹> ‰œ0aP$¯9ZfÓÛ$YÃ&uÌʃ%™?´˜ÖT2󤼳Jf¯¿v‰fkö£x’¥™??6ˆö€¬¸FÿЫÇÖÑ´õ®1ònöF#þÃû*T>óã?ÙíµR;ƒ'9<Ü;þ »E®>²0Â'¤Øá0‹~ñrM ×JË-ít”ý(™\xƒ«šë*Z´´óôòbr+ìúTÎì0¹&ÔâZ¼kt<Úû|L ŽB')Azd—üDÂÂq#¤xü7<õ:‹;äpÖ³–“ ¡{Mfæ#˜Þž^U‚OÚàÎepaç'iÞù¹Úo¯Ï?äYŽùÂYtÿгˆøjŒ;¡ÓÏG㺊­ F_«»Î|ík¯=/n“Ü ÍTÙü^ 2GÇûi7ð¼TMÄÀõL‹8ã&i*Çrƒµ˜q5™Òe6 Muxïm®QD“>tÂÁ—öU}1QçÑ3¢£^Êþ^µCJè¤{?Ž‘7©íÜjËnÑ#¥š.þyôsX¤À2BWkdF÷ìæúöÛ'ŒC˃,T ƒ¹ÿ £ÏNèÜ¥)¾7C‡eyšŠ¢¯†%_ÉrŽ}Àôï…“ žB7“Ë›éÙéרŸýÀ|ÖY2KçSSHß^ŠÌ‡·wš þ-ŒU. ™Äéìbº3ŸÔû§ÑlUoœõ@¡s¤ñ,*•ìHêHƒ^%¯-Ñ÷Iì®(BOf ,A)¥hf¥o¿z ¥u¬ý£ÜSv®P²qBx|¬§ŸìÈU}e-X¡5Qh•QYª»|#ÚÐ:²@i–p ò4L^OæÊôçÚQ°¸sí(_ñþ™f¨hËm`†+‰$5RȲeZ¢RkQÝØ èÀ^åå8‡]-J¦a¯ .µ•Z—ƒ¼¾m‚K8/ÕNI^¥㤬Ó>Ò&³ç„"´¬0Õo^šš¤å ´És€$zÁñ˜3š ‰ž³g°gÄAŒ‰sÂçF( rù ¬Ý‚1 Ï®/9·`(¦lKQ{o:é30nûWï[® XœWÏmó ƒ#ÎÐvu õ®™šw’›œ¢Ç:ù‰® ¼(^ÖDŸºh¨À-%\Âe]†’//_°Ã_žxéKe·=æ"œÞÚ|\c)³¸üL¡JÊãíëí§ÍN–y‘·*D±ôm#‡WŽ=2V¡Ë´øJ²ÆÆ•QTÅãªB„Q€Hîìóc4ãð²}„‡á_= JÊpX£àõÑ*U:ÞZBc¿DzY®mŽòVá4…ò62þ[< <ÔWp¹;Ä‘h  :Ü̬XÂs´²çÕô…¤NÐø>Á“j!è»u¢Ai¸§Ý;b4\¶ž ›¤áˆ±^J²~X6Ê€UrÙwa5„µ2ʱÿºRj´×)<æ¬L p9þWÕiÎý™rö\¥X«-L³·#Y÷ÀH¶ñ QúAšÄëü¹ž;Ø Y÷v6ç#¤œE„ZàãìÑ:K³e„ƒ-y¸?<ú<Üý eðåÔzëÌ!¬riAŠ|˰´^kµ×!ì¡ IæÿØ„¡ËaQ½ 7Õ+í0iBª·¶éÖ¬ÓÚ._Ȼ˿á9y‰‹Aµ©ºVý:Àœ IçþŸÇ¸fbMñÁO(“š\÷ü.Íáv>·/•ï‘s0(Å' ŸÂV‡Dò”‡$qÈh¾ˆC£V(Eæðí%ÉhÖ8Âöý½#Ë­‡ÞűšŽýß‚ÙTf÷LǾkÈšAYÂ+R•­EI,Î8¥ÚˆÔP ÜÄbú]tpa5™/ÞÈ'Ù ïÀþ¡âû%«oN†Õ;¥Þ«©ÑúDöêüØCËB6×PEš‰2e”æÆg‡Q—Œ‹’ê˜Xáÿ@yœØ‡ã–ùÒG]„Ë´€H<Æî½ @™†Ýó*µ« K©äj‰eчùGX+£¹€#oÀ˜4! H•<5¨¤iBO%gŸ¬Õ‹¼y6›>éÊD™ÛHÏïPÝ'B¯JwK†^¯ý Z %I¤ßѡד4ãÌÅü<ÆO xpR’ë=vò*ûdÖ<¨v‘×JÚ6é’Íüø ‘Ô'ãÉíõ™Z†%s:e˸ˆvxâ!;°¯¦„½†¯†˜žßÄcløeëEµP¦nó’·õcŒ¬2äs,ÿ¦A) äòvR:æ‚˵†¦ØaŒp¶Ln®åÛ£|+Ö£æö,ÂÕ NC*qÜrZð‹Àz´Â0²E‰¡á@‰S¢ýƒ‰øk¡¢j •©—0 ¼‘öW8+#b6I¥ÆFàµt„ÖÛDå¿«©5%R ^e¾^ߤŒÃa‘zˆ™õO;îó·ð•}“KžôT¸g8ÜÛí,–ÕJ¦¦e®hõJ»¸Zïê — LÇÓó‹»ås´§zyÂVšŽ7”fv2ŽŽ‡¢öAÕ‡ÁrB e¤‚!w›DG…À-óE©ÌÑ&žÖNæJÀ$‹bñ—/_æQÄ᳟…Ÿ#Á)4ü ·óþÇsÜ_úFô¢‘Bú•JPŠ]¨§”0Ú¢Ö7Ð];ú’üW? ð‰Mb¿À;´ü©òÁ·Ùè°@á›ú¬ù·¿ßL–‘Z¦T­Å …}H ñ”E˜ãÐÀã)èÈ£ß^H f¶aðšå}®ÐÜWаƒl0y©BXµ½—Ðp÷¨%Gw8— ý ~ªs„FE¯Äw$ê4ߑ޳EÁ*Ûc—B}{]7ÉùžŒ¤ ²G9õ²4-‚d'ð_ó½÷•bâÁ{lë¢f>ܪz•qÁo/‹H¢ö½µP¢ÖkÇ\äõ8 £Á"Âk¤¤¨øLÃeíà(÷¦zD»60¯Û˜«ð k akMöòŎRsIN¯ªQ¹îÞQcc½iÁALNÒ Ó®ý@£•þ{ŽÓâ1Mÿæ=øhŒ²Â8¬œ¸ëT:¶‘5oãÒQ)·_Y¶ˆÈ]¸¸róüÖ[ϨÚmL+_ÅÁµF¥©5 žwûõãmÈo‡e.+ÿŽ€ý=h¢]ÊOîí›É¢‰›Rá£âÇ&†<-Ø eQ]>ß ª ÛGŸò7µCÉß—°4sÉM2’ø£¦¤½ÔµF#WDþ÷{‰©ÕEŒ âôjœ…÷Xícj–gÃÂF&hìúŠ—7ñrZˆ—wc:P‘šYºÀHá÷,¯¤='ˆ‚xz¬+ȳr"b¥a]-$±i•ë!©qmM¤ÇåXW9ZñVi@\ƒÅè–ŸpXö¨¶_—4(þÌç(Áˆ=L”›øF–í]²;­I«”²j)lȲ7W $I¢X!µ þü ê¼k×Ì"ëMÈßÊ`ÍVÈîÀ&ªg+-»nõÔÖc4¶DºgÓ¾šXÆ3·ž›îªG¼•œe‡Jã˜g}u¶IÄ„*Lµwå)óØ&ÇAeô=¦óp‚†öŸù¿•á ¯MšÞ.¼†fD½â´0½ëÞÛ; {Šïð![\ä`£½ 'WÄÁÖ°Ý”u_ʽ+—f.•°"ºÝ·Z´Êà9†Ž„æÙ—žDó,³Ha ‰ üvvç±×æÒ¼›mr˜=ö—4ΆTp¥¥t„ȃº"Ê3‘À[ÀÎâ}zÛ €¤mꨕ”†ã8O›±*¤;VŦIR²’÷N¢Å#8àeëY¶¿[¢Î§t>ív§~ðÕO‚ç((”g÷•T­©‰}ˆö·¯_°Ü6åÜ‚lIßbù" Qê!m²ÇÕ)„vÌ®Õx7uëÜqYEh&C6¹¢´âyl3m$ƒiËëfÒÓoçQ²|µÁ§œ$HÛ ¢‰Óƒ¿f°T¶¡±&ØÞªÒ,ÉoIk^U>[Ó°tÌ5,ÑžŠ `Ó—Ç×AUA Pá_‡OB-Ò”Äû'EÇŒN`$î4Ì+@í!ƈöVÓ×R®†9.mˉOtæ;1Lº¥ÿ€ [Ž”äŒHšL·3ñ&¿ƒ2_f¢-’@– ÍÉ0 s,|ñXŒà[ž:Á}ÿÃÝ+$Z 0nC@(lôúUÕñ½ ¹&—je‘%ßôÍ*Ë ÞƒC­ƒšMÖÙ¶’¾Òƒ”†€Lç!Õ$¸-l,m¦’¼£1ZçTiým‚«Ðµ¢ÀvŸ¼žFX©‡`“ÕqY5âˆT·Ø¢JèØˆlŒ¾†à˜ÈPœ»ÛSi"yì-£ƒÝãC³bh~]毿ùQ!ˆZ#AÖ‡Z¢ú7õ¼ }n@GjÌâ^òg"Õ¾Ê&Yú«IXGvJvwÙ`¦î:ÍGN‚°5ïé`4¨Ìºíá"L0ü“X9*Ê®»o\OÝJû$ù?¢ðÙ*H°L¥•£®•tg,ùz§¤WÆ8™{ ÅÒý»•ðW—ˆYŠ4”â¼IO&Ï*Nú=Ífh¬Þÿz]”½§Š#‹´‚ôÒ¿ óˆÂ¡®€I›¹ÔëŠ‡Ž­ 1FÖ``ûnu¥l‹«ŽòÖ±8Ø`w1ù›Ÿ%(MJˆI‚øÀKU?1e`‰ÓV¼5º.T×êÃlŠš%ùx« ‘Z«Ÿj]³„£&”Ãt§ÇºómÊÄCøšºúr©2Ò§—cç9bî4*e‰ ,#!Ÿ›0pâ†äªÈå~=?‚C¯î±ò¾õj¼&û@%8ÞQ›0{»e5håØ\¾/×âZxüˆ+ Â+zâX#/«Žüä|Oœû­3Bšƒ¯s’O¢ <xÓFK·#GY“)¥#鮺éó4ýqK÷òᱤ•6´RÊò(1|Éiñ‹"”ùakTº6—uøhôÑcwÈ&]^»Î|îàyÂßC{ã ~ã,¸Í5£„hƒAy»lLY^“©D‘¯Ê¸(ßEM­¸$G¶{äz·†¢lü (‡*mïŽX©WåÝÞkÀ±š8VkCñÊ’8}£¹@1ôOâ浘ü3r2à†IN)ü±á?gt¶¤Å-ÙÚ0B§÷–M1ÁŒ7eN#x2ê­@Í’ä+¾=‹jñÙ©ÂaŸl$äy0)Æ0H{ï±Øàä)¢À k­`5E}-Ò3HI·íUê}¯O<àBâY¥zßçIY//&ª#«„¡¸¸Î©ÊÁ€¹ ·#2øŽšUÕ­ÊÿmU’€MJPë2›4ïNÖF>)Ž&e!Ëês¶€^j"¤Ð®$ɦ­à´dgëÕíwQ¥–éSü}«Î?“½ÕE2æ°ò]`ÊÆe¸rpüñxl×€ŸMž˜Oº£ËÏßÑÑÁ›û§6Ž÷Ì(2É1ò^Yá:è¦P¹A¯#CV”i@xÍÜÌ8Õ­mœ€»5¹IÉqд¡bx‹rŒ.tÑÖ¬T_`¾žÐθ¥ŒFý‡‡jÃ7Eì>dOëË\q¨‹š…j§>U D‘jê“¶ Ò€”ZΜ~øŠÉCBF>Ätsa¾~6êì¦Ë ½Ò1Еs’ÿû”Fö%øÇž‘ÍTжµ2’r..X*m7Z×·cÉÔã7ü¯*ä{ôƒôùÂY¥¤[j³K®³ÃØpÏcfв[RY!DÒÙrnú4¿OR¹²‚¯EúxϲÈ&$Ÿ„ÙoaøÃé¦8jTÄáÚ(Û•ò96ÂiôÝ” `”É$ ä8Kô“ÀSêA%%•ñ²x¼Z)E@eAÀØ’¢XaT!%ñ% l ¯\0­”Vcš6ƒóLvPqž]eZþNЛI©:<ЩÓx'Äsæ±m3!ÂÒ  D””ÓHQ±½ÚÒÁç#idÌ¢ìk”'YHŠçNMHË-²ÀL»ÃÛG Wòݼ¹Ö[â¼=o¶„ ìýîò†‡N¹ÕÜ/`;{Œ73Þþ.(4t½%€*;21e7ÑìGX|ý‡üwä|ªÊÿî6ò¿ÿòŸüÓtfšŸ…&n#Z¡0Alóz†aüN¡ƒ’ÈŠ×{8è#¶„œ]Ù)Y¤/¢’b*Xä¡ ^ÁH?!Œ\W }†Ve˜8¦ê#nÎefv̸°A4wÞ0§™ýsE3÷âúîŒfëÒÌ}D¢¹ê¢8ïìô;ù†vÅqÀé~Vƒ°e*:dßëÂ;2‹óH*§Õ.œà5p%^¹ZîÓ4¤“äô®’p|r¯ÕÆÖY‡\“¦”_Š€‹º¨Á’ûwNOøºXï³6gÀÚ;êãŠ&ù×e+º1Vû»­:…åîàØ¨9§ÿŽâØGá€IŠHŠ“40sFV½Uÿ¦)æSk/\ð{‚A„iÆœ}MÓ}‰ &'2$TÄNdeŒú3ÔXïÉ1¸l¸7À@מ§Éý;ÿ9 S„i™AýTN¦*sg¢Ü Gtµ.=oÅϽîåNéVàr}¨ÊF–®ËE”Lžƒó0yÐÓÐÍÁ+ŒuÈ6¦ ˆþ¨% ±öèýä œ²‹¨À—R®æ©~=`;Tâ$ÊIžOÑv£" Xà9£‡˜Äœ3.ø:jR".(:²ð ù7¬ Ö“ÉÈQC ·ë±· … Á…ïÄb¦V9‡%&3„å“2ŠÕvž¼Bæ¼m™ßCÁ7r4×a.Œ§ere®‹çÀl;SQºöiÔëŠ'ÚGƒÛæ2‡µI÷÷¦Ü±Ð»öØ|ëã{Qfì2HÍ™„ÿþdÏB–¨l™4@Ô”¨ª«<Ö®Ämz{zÕtCQ)É„ôŒµÈ0 8#îÎ`©¦„e×évÿë”hrå‹p.îì§dɨAkKL}áÂWÎØØØçþ‡92æŸ(ßC3 ‘L+çî' þø~'d7E¬'…;BCQæu‘T‰ãnÄCYKô§)Á ÷D ØÏdÂ6H¯wä¾ O؆|pm™?¬oÖ~ºÒ6¬œûFäõð7°UÓçüTèL»eYmÂVÙq…YÁµµM9aÕ¸ÖoÀbß7’“ÙÓl@á}˜Ál:”>j»‡˜ú@ÚŸç,*Š0¡9µ …"âK<ºiIò˜+Ϊª2ržß8>ê²&Q@løY;"$v›‡¿óø< ¸ì‹h¥YÿísjÿÈÜûãæ~×Á.EqË9 Àm¸àÂæ&oæû”{!ë ÁeL"«ýíá$Tjù«Záö·Ixt㣽áÍK5ʉ—ê98Og?ÀÊ8U”#¿5G Æ_šÃˆ(@B£«ë“÷͇Z›µ·4íDÄvâëxå-g$GØd½¶B“rF'qBýJ=Ö‡'°NÇi`9ò¸ì7\YåÞwêΫÓÉJ[Úv%n Ãg¯Ë¼Š–_P_t«êZõÌÀø[õÍš ”ß òs6< m)‹²`ú9úö‰*i0fÕÁµæêæÇà±:S´³­sp,{ß$6=,-³ñlF¡C#§?fK¢«4^ *FcÐq¹ú.–¨øìvm<–Sqvcäw¥½i\›òß’ 骼@ÆZOR0ÓX©-2oÔªÚâf $ŸPÜ*Lf6†n®å¿ðÜ9âqCWb Ü™{{%ƒ –q‰n1é£yè+f+<ú‘éÚRmá$Í£—«ûû<,ôö3¹šžý1¡µºfðMÙ×fY\*Ž-oïÈË1ã|s¼XŽÜõb§NâÅ£cME¥sßñ¾¦ø±ÿDÌÒ¢0mŒy"®¯­éc¥7À#-¥„êß@5¸ºV~Ë5ʵæ1ׂ·Z»×27€Ò¶©eö˜+>±!¼ÁI“¹¼‚|_¿„µÍ¢™Ò/à+4x¸™Å÷ØXwž…+˜·Q‡Õ› \ #‹öÝ’x?Chèf0jìŒê ‚6z=-&m*ÖRÌ`·,h1{ô3ØWL 5@jCÇBœÚP¾bU¼‚;«íå+:¸“δ)$í–ö¤h}«æÝ‚UÄ%ï–÷ކ’5Ñì9 RÁƒñNÐÃèB°éØn"Dz‰¾ˆœ?ŽGRžQµÛÞ™ÊÈ ó¯ö˜Ú‰Ý"főʪ⃠ò«$~½(3W‹CäµKŠœÝgélGNn½ŒÌ:£RÍ€!ÞÇp‹ ¯FhâKX‘öçÖtcèªÀµõ‹ÊU¬ì'ùU°"Æ …,/¦òŒ² {å±Î”~$¯6åBT³,¾f}ß·o†nÏTó aÃüìù#¯Um-l£ÈŒQp¢nQZ!Oep§ò®si”u!Zá´-÷[‡“ôXߌ‘uäæY­Qåxï°aC+ìŒÅs&kh˛ݿ$.î©ÎbYÜàèGƒ¿)!ŸAíá¾ÏÔÏaBYµÆ’ ¢X@Sɵ,2nˆ©V”™%R¿µY 9(Ñ8°ÆèƒZÖ,¹íIÖ,$²©íp³¤¢Ô]{¬5À ÆÒ° Òëe2ÎÉ‚: ç!EzO‚ƒ Âæ•r#pSWxÓ>„SåvB¶ƒüþÊë$@¨îâÆâ¨N¹‹Á ;°âö\ Ð1ÉëŠÁ\µ¯¹¶5G ½ŠˆùéÕk]çZþZØ?ØÈWÇwB _hKM¤vT†8OŸ)¡¬ˆÔdÃOL¤V ;²$Èv³å“S,He´™<2·™ŠÁ"`œ=†s1PÈkdYN÷}ƒrÎ V¢kâH¹å¡¬¼é"ò|© q;Œ`šóžÌ›6âÐfë˜å¨»s¯Jè§Ã˜SÕlðú–€6 þ¸ÃWáóöÎvšvÃêáç.Ûiäp¬j£ÛÉ·Ä¿‹uPêkšÆ°û(n›™Žm˜5K¹]9êÑV•X%ù7ŒÒ•Õrdã»’(©!jSœê6OÛø€ñW€A!XÚŽ÷"Nõ#…CŠ2¾•`ø" |çõ‘ÛcXP~ãþ¡é7^,Á_ N#ícPà`Ÿa@j–"'(ýÒÂÏÀU™$tLªŠÕZhI8©i¦ zªtÉÆ{­é¢ëtrŽ3d;"^P²Ìž|³1ŠqWJùî\ûdLu7êØ·¿{¬¯ð¶åAzm…·å¥ŽXR•Žßo&_¦…ñõµÐu ÔÄ> ÙÌæáƒ‡? ‰2ß)«Å7Ë“›ÑÞ5a£É2 wàG‚žê+ 1ÌQ\~E`RcoŸòjÆ0—z`:)7êûx½<V"×õq¦Ñl ;¨¯ ýðL*þ[3í6hk7Y,Wã"-T]jiÕ$¦U»ñ³‡ÐLB£×RfU+è$ShI{YQý¶>5Mp‰xÄ0 ¦Ëûûȱâå½–3yAI‰m©lT:„²“R:J‚‹°xLƒ÷½óÕEè4^ •ß=ñ&4-Z<ÂÔXIF”ŽW€*¼Äv¢4Ü=é.øy|á/Ðê8½¼ ç‹Xl‚? ä†ïå#½?s[.dŠ&jty¬ÓÑb0$¡à²^·Õþ>è­áŸÇ‡öRT¦'Ù"ߟKóûÀ+5Š7ÈÈÆº Vz:BÖQÎü{4½ïü@¨sÃÁT)ò IîJÚ/±¦õ¢‹mâŽñ—ìáÝ2‡³ó\§ÖLˆ­@ZRÕîGªGQŒDSeé©UIƾÛ/¶Ù‚Ê`WÎ#‹†JWÉÖn=D¦LbD—‹¯Gè2ÔI–é,×a„ô/f\eškHLò3)ï ë@y®8úî¡H%¥Íâ³ K ´ÃlÇc\3y–& ¼wc“©$½w`©#·àôê‚÷q–CQj4T֛ŕá»Î‰ÕE-½™‚ëtzGvˆå&¬èNõÑîjw¸ÏÔ#¼R[ﱯ‹1%7HŠ…‚3ˆÇø–ˆÛ±€l“XT³/.—ÿum-'ð¹a«=]K©tĈ¨é9­H}9~.×ëúµ,}‹Ê—±:‹~ÌŠG¿ÀׇÔS9Fœ|¾Ü|XæKX[_ɾúø™cVª„½ ò’-q‚G¤úÏ©à 1›ß^¾¡L6PóÊ,¯èSMqçB,’8#7¢_R#@E½ÂÓóפð_ôRÍãžøä¸D²Zí‰ôµ Xy åªV0|¸ƒM÷Åóh©Q°Øà†C4RKl_\ì:KüÁëÈÒo¤*LTÅaa2àñ¬EéµÄè!·A°EÄã£ÑS|}5ÉpTãïÎ(e&<í¢5\^°TЇke°úüÙ¢3sA6Paƒ4OEK: ékÚFp²œÃǽ—¿H‚hq”ß`ÏLª›R.A$ìÏ­ûokB¸-i°jìÓ‘âJsA=¸šÖ%ûNÀ•°-§qZ1KþË¡ „Zâÿ&š”þ%ÙoX+]@–š‚¶ª³™WúW?|-œPYº`S]Ñ6ãç´'–»Dà«*‘/B\¸_n ¬'?6A;«·‚UÉeKzД۰ 6Ô¨~UÐÐ[Ù©µë‘¡Åj MÑw …Çæ±7†wW2^ß^fñ2‡}íØ‹.x('½ByËðD–/îæ5Ó¡®ò]ÕU•XüóƆxÀ¨-*à¬08ò»{£þøŽ&ÀÞŽ*|ü¯œ{П¼2Hd.ƒë63BDäM4öߢN¿„€“òZ—aC:`–ET2ô€c*]fælÜ‚6hw0ÇîᮊKÎÏN/ü…gü5}ÆW€¤É öõìò”^bÇ&ã ÅšÔöåxlåõ¬)‚ê^˜¨¤· Õ­kXïŸüDWl8,:tlsTŽÁÃs *Õå3dÉm«º‰/½7Õå2ÉuÂ)2—‹ åeae •ÄÊÅÏ$ß•¾¼+[èÏÁÁÒ[·ÑÎ ÙrÙ–Cçá[(ÂL}O ÿÔ•XXòãaÑrþ¹i–Ø3=Ö¦ k£ëë±.ÀåcmMH-c?;œ¹†Ðh9ã±>øN€Ë‚É^¡äÁ¹C9¸rÓ(6í1grÓ•¸'rÓ‡¥WtíÃh˧á,ãl'ÜØPbÙ¬»Þ“F¬Ý»®ÞN]þn¤!HAdá>ô ·˜ 0ºÁðÐNË3Ó…ç Ä3!gìZSÿ¸ŠHŠ'î±öxɆRÒ!Q>YÞŨ[ªB7v[%Ûi/²zl©»¨g–C®KHSv‘?\‡8oÀ„ª9¹Ðu’#ÿ-Ê4w—æÑ;A“ëÛ\¦´RÑ¢î¬"fè±Ã–€nåjƒ…—¿/C­)¶â1&{Iï¿ñŸœ3ØO竈ÊfƒóÙ]êҺѮ©ÚØ<ýE°³ˆ—08ì¢ gx/Þm*•ä¿…qü·$}NþêgÈm£ÙV޾­\¿ÌiMA„¦—Ýå$Mv—m;蕨‡9ñ—PõP­·©óäzþbJÙ™°dy¬Úêhm$ûYø™³ð†Åð3ÜËøG |é +×¥F•Ge]:${ôœ (  ¦MÄ0ÀÕ²xHEp@„7Rñ¥õÁ¯oAL#„£y-ð3àï¡ÈòŠþþËò•¹n«ƒz$ÇL¶åy:‹ˆÐsmС¿^žž1ÃÒßT_6H‚‹áá 'yÊ{ópùf–V4 ÂE8Ù?û±Hi–4¬ßX `{ÌḄ^(ξ®ši ™ÄÀ|]#WËqc’=¢ºè’¸h‡¤tB¶WŵÍã“:›ÖŒl»qhy%‡¸^Ð?‡X«Ô¡—+bá¦Ú­ïQ¹­S¡Ì×Ñæ"`+ü`W{×iNgé"<½T‰ üˆë .YÚ,qdTMUÖL¹ÚL­Ê䀱“…¶íè¹ÔkP•ÞkB¾rEG˜ÇF·uq×rw˜¢Ø³oT5Ræ˜?ëhŽY$ôέ[…gieð˜K¥Ë_å}ƒÉ\ê;& ;Íüû¢qÈ׺[RÛSvšPCÒBZD²WžràÈãN£¢*3¾.ú{±AŸLNÒv|rÍÒ´”úHv ƒÞ•?»F¿hKô#p?†¦miÚEç1IGTÙls\“RˆµÜpý™áŽùsÿ »åƒHØ \º{Ƙ[]ÇR•e%gÅo¢Â–Þ5•DԲДìYË‚¿HRD%Vs«½ )3XUc®]úÄCF:#챇  Œ;‚q\¿iµÔ ¥¯6&„äð£˜7J:¤:ºá*ÐMó·ÆñÜB {(×§ ñ§à§'÷ÚHB’Ç6Z?ãå'6ᆞÚÒlZY|ÝKo *cÀ7´ÖÈWž‹ËMe±ÁžRÊFÁß"’ö1åSðß9¬½eKúEIb§à}bý#CÔ†fˆï¯ÛsqÇô§µ”–€à؆aö÷¥+\7ÊÇVÍ?'àÀ‘.¦ç<™©/,hþÀǛ݈=¯„h‰Á ½•‹<®¼P§õPUÁ^’°\üvz¸Ï‹È¬.´Úk¬°ø{šÂ 7ç¿Tßœÿò67çýǶðçó—Ñn’ïP)äç8ˆî·ð»ðgþ;ÚÝ=Ø7þ‹†»Ãƒƒÿ ÷G‡‡ûƒÃÁÞìööÿÁv·p/+–yágŒýG^¶ÿïÚó6ÿIÿü/ﱯßp¶\Mþ æÝ¯7ìëùÕÉß ý˶HÃß³¹ïÃåé}Ù•ÿÍ^Ó%ì C óa|žGJY|I36Oa4½bÃKŠázh!–ù ñÌ_.oÙ/´NÇŒÃPØy4 “¾f“ÛëÉÕôÛgØJÂMOêžž5)kà4ä¢Çÿo'‡ûа¥ ®> ±l œþYºxmò âì~ì½;ýÿ7‹îY’ŸØsáÚž®¾ãå|bgÉìó'6:fDƒ¯nû³ðxøý½½ÝOìkšxîŘíƒÁÎ`o÷½.ÑŸ³„ù¢‚᎟QqôèHKíÌ‘ Õ9=ªZTõpµ,zx,ðÆQc— ¹¦Ùiv”g~ètþ£ ùöp<˧«y~þt™&;¿LÎÙ \áOIã~Ò˜-øëäæÌï0Àw—Ãõ¢d/©ü[<§Ÿ0r‰¹zù¨c,Läçóá‹"0~æÃM/?3ówáz ,Y) ãÎátõÔØ–@–&ñ«xiíó q¥Ë‡Gø/‚cŸC˜&Ò™¡DÒ=¼×\­©ÙE²âhJìÛ?O¾M0 —»ÇQ"ÏÉÓe6¿Lðö›ÿ4^,²‡î™ú…?}üÌnåié=\Êì4›û¯ž[Pä·¼E9ãèz8a–¥ô)J Ûp¥e2ã%>ÄjUñËjºÏ|dOáï( óeLò¾HŽ‹ð.TÃåîU\­~®}f¨;R¾Ø‡¹ÿ#ÄRÜän.$‡q®ÇBN‚v÷ú™?¿wákù&pÚC+ á¢z1Œ6QÉ b±Ó‚ÞŽæZ3þ óhì¢Óþ/ñ0ù2¿Ê‡5Y/L?,qp%5@?³³{ÚlÄŽB#_ü'Þ )|)Bœ\öÀ†'àC“»ƒèÿ2®éx\­pz§ð(¤‚KÂpT¿ ŸðŒgØŽà N)xÔ/ý¢t§"/)î_B@%d¥/ÁÕPø‰°n\^oZÝ2>æX¼Õs¸·8ÄÚx½Áƒ¿n?;¾zÂÿÀªÏ—½'ÙpwwËoÂ."œ¢ËÅÀ»Íù‹ uƒ”T>W}dWÜqìà)ȤHü6»<]µ.ð/›ù7üÁM øNü…Í’ÿÃYÃëAC'Ño~TL¢`3¤9 Wø-õ`¡5x'LoV nsÍ÷¾n¸@ë}ݼ‚}Ôu@ð.;q•„ã“óÓ([½}uè]Þ8‡ÈžNùmõÎõ±w|ëß’ƒ¾†.µ7¿ý$çß·ºT i—]4\ár£.;øÏŸN7i¨Ü&ü ÝlYïÎáuÈC´ß‚¨H³otËËCy݉m2<ê­GËGû©V>õ½d>;ëø/qzçÇ)±S&(ŸÑu:”:X¶¤©——Ñ쇫î­í™ûN•¬kêŽi¹ªÓ¤¢ÆC‹ZMûå;åþ“ .žÍêÎnÚÚ×PŸÑêU]®9¾­ YûwB.nþd™éwIë¶usç•E>sm9¸¸ß›t‘ÆéÃkÍ}¯nµý¨oU>ù[Âe‡&„j¬ì ?ÔõE´»Ãß5ñ?çÏç/»ÃY:Ÿ§[CÿlÀÿ v£ÁaÿÿøÿóþÀÿüÿùÿóþçüÏøŸ?ð?àÞÿs‚ûÇò|bðMèlT`ÂëÄ›€g‰^ê²â›A{ rÅlÜyß^ áO©À)¢ˆ“ÑK®š6M?ç]ϧ+¬txô…ÊÕÝ-ê^$÷3i²ž&.nÒªÔwq“‡ÖÓ4KãzÝæÈñmY·™»¸G×·¨ê Ãçëà}¾ì¬cæ‹ÞU8ºS×SG1ÁÜ"ET”:™ãΟèžb-@=!C‰è\+Ëóžõltµ§=ãz=nV×à ã"øž¾3 TÍ–ï|Š;ÎÍ»ƒAÕÜþQùö¿q1£ŸáæÅ<Ùuš¾3®Ú6°Ã÷£x™é_}þ™{z¸â€}~º©òâ}.­e†Ûß?ó>¸jƈ¹¦ßœsVãÏ;"Õˆ|øMæË;b;Ýæ‹þNN”4»7üKæÏxÎG|¥¦ÿ‰q‘ ‹z}Žè.ã)œ§`¹óÆŸw”wW·QpIN£‡ÉA„‡"?þÔ­ØK£d/}ÝQùñ§îhEˆ Jn *•ꎮ®YèM§ÏfGåÇŸº£•Q„)‡Úê®ê†Ÿ¹³RËËîìÑ\}Õ‡Ÿº“«0ä¯ÃEè«e×øüSwu5Ðpµ°¤!…zøšM?u‡Wà Ð;Òæ.þ¬¶Ÿ¸ËƒÑj—)™Š²M’¶ÙÉ:M&WÜ_]˜nÀüq»`ä¾¹EìÚÇS÷yˆ “Ûëó­cúÝï±u¿'aVpxwxª‰ Þu†*èñ?¡*K¨Àp ›¾[RMsùpÝßî±¾ÝïQ\8ÅMma0(l.>Þ4Û>Æ®ßÝî黵5µßépì—‚r”á8Ù Óùy3ãÃ1nÝòdná + ÿe^˜Y)óóϻՇeÔÕ®ä?„ļ»Í‘¸õŽUÚú þý3¿ºUK—2"#TØY³… ûO~ü3w}4R"Ûi>–_z×»Ôh4зL•É\BÊå-o÷‘õýó*²" ?óív`Ïè€(÷ œ©m×ëÛ³‹ ÏSÅI†µŽ–Þ.×$Ŭ\ïù1ša©zRøÖc9uHuÁ¡Pûž¥óE‰’å(gñœ‹êâNI€¹Q÷uQô4TpõX.ü—h¾œÓòþŽW„¡ƒ.ßù4ú÷{¿õ•±(ný$MfË,ƒ1õR?lŸÊ²Y7ÌËGØ›mál†F"¼›ôÌü$ˆãÕìÒÕ·rfGªž/Éq­nÑùN—þ9º:T]ÅÐD‹®Òâ)º*ÎwÞÕ=ÕÕ³ ¦H !ø*: ½IBÎ`Á©LÈ‹ÿ*,öôôÖ×)LÀ¯ö씳ÊäHïõÖSxïè¸eouÁöă SÙ¹BðÖÏßwÓ»#»wÍåµ\]¡ßMgíÎ6½kûº:”ßG_Gªæ7ɯÁâqë§v5ƒ-´#«kZÛÝMçÞ®V?¶‘ÙÙ®)w¨-ѳ„?°éÏš:h“T~ŸgõþýظwtáÄÉ`Ìû°—? ç©QÞjŒ tÔj…Á÷,»}3¿ r r`H6 >ðÿ”Žíj+ò«?û&Áty9Ö8z»OÝ ]‡‹¹£ãhæ8)üVìîî=:ü L…ô9ÇŽE3|üVQíßñìîÌþ¨kýÌ=š=º ŸãZÓ×dÖ6Áÿ.ú³·Ú’+øi;´_1…NÓ¹¿†ßý]öcTš:9¼ð~"§fëQÒ+zŽR:Åë®[+tÙvš„¿ù¯ØŸi, ŒeB¥iφ>b¶Ígys½ò‰àKö…L•†ÌEúŽg­²¿ÿC†v7ÔÕZúbï¢/{v_IJú3ödßA&~ •á7JÂ…%. ,õ4ãä^*Ù´úë»Çpð±Ãs^t™Ž«,Þëï'low´©#«‚ ïgéF Úaw¨­&”¬q]ÙFZü@ßó·d–½ð{c)IñèØ¿ÝnåȱîIû‹`GÁ@rçîl>¼åÝî—îvá#açíî)Ä #¸]hZJªºÿáç=i:< SpuoÔô– [W‘Àþ“Ý¡FA˜ç¢àéšëÔÍK—øÿžÌÃpçiµhTÕ˜kúöì‡p{}î¨óp¥÷ÕS¹\#¤Ÿ8f•æ²XöDöà?™F‡À‡ ¤TE+¦Ézô½ ¹‰ªˆÓ:Ãu½dÇS!_‚ò:Híææœ}0q8Wg­|,ô(Š"¶îzЦá;Ó!fñ¶ñÞ4©±ú`P«6‹€Z+im¡½Ž,¡¨BùŠ3ù4±yNûKÃU?Î í¢‚BÎÎ`'mÇ2'·Ù(˜®·Õ«<ß7“¢ƒ¢ÚE׎ޝ ô\áùâµN‚PR :>gtίŸÆÿí¹`±·žÃ:0fÃþËt˜ä‡¯!¡_ó:ᤕɻp¾é»‘ž¢ööú³¿æ¦WHYà)WsØÚ8¥«Ñ¦yO¡±‚gj™EÓøº`3ZJuEÆ‹=Åù7(3ºNèù8yØÓ¤ý@OŠÊÐh3xVÅšg´ñGf4’2£EÐ ™?©Ø¯¬WjPGíü®^¯âTƒÎü5U~d4Zµ;öÓ°ÖJHyhœÂ¾'^)]•8Jì6NçQzÄ|a·IŠ»ùì‡*˜ìÓt¾Ýn”±JµÑæ áÕàîm_çúÜo囑‹‘‰ns?Œ5|õ2„b»ØÆ<4Ê9H¬ÿZBwl®‡Uáç6û®QSKgG³.]Y} z?¸ëÿéÊúï¾7F¹í$%éÙ-öÆýíÕ¶šéúþW ŸÎ»12/§SjE™‘0 P×´o /?ÆØí¾ºi‚f”Ý.A’/ ]DÄÌc‡•Ân<E‹¹ÇR>ý",Ó t@DòÌ&XàT½:B×K‡¹‚þ[°Œyè*ÈG_Ç(‘× ï³0\‰^Ó‰»Ùò‰þ ÿçŸûyqAj³a O“@­ˆÖLô“G¦Ð×i”Ìx m¹(…—å7û- jÑw×-–yé0?t–p˜—ã·Ï` Wÿ*©ûMqÌøÅ ¼[>dr4V=‹¯Ëüõ7?ÒqD}h’ðíÄ_æ¡q¸O(Ö€óäáJ¤›}° +ý,Ç`?§MÐX‚a–@amôa )ý“Õh }À‚.èfà~ÅéSúܱ7-DiæBûVb»ÝMŽoÿyv~6¾þ/µòéäxiQp~ãm!«EL§¹»;º•Éòæ·ß‚5Lú"Q®×cך³kïõ?þøóÇŸ?þüñç?üùãÏþøóÇŸ?þüñç?üyþ~–Èx 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/data/ticket47988/schema_ipa4.1.tar.gz000066400000000000000000002524471421664411400273350ustar00rootroot00000000000000‹þÂTschema_ipa4.1.tarì]isÛFÒö×õ¯˜’\e»J¢yJvRû&)™JâË#±÷ ID ÀÅ!‰ÙÚÿþvÏÌ DŠ’ $D*eq<ÝÓÓ×(}zóìW®ÓÓ2üÛ(—OêÊ¿ñõ¦R­Ÿ”+õJ¹rú¦\©TÊõ7¤ñüÐÞ¼‰‚Ðð yN<ß2þX{ßCõÒ«ô餼ôûîøÖvƒ•k/hf»%Dz§Oôìàz}mÿ7*UÖÿÓÓF¹Öh@ÿŸTÐÿå'zÿ½×ß¼ÿß’¯óî%i]õ ºçßFäkïªõ ”æv@ú¾7ó?§>¥$ð¦á­áÓŸÉÊ‹ˆi¸Ä§–„¾=‰BJì®õÉóÉ ZaAäZÔ‡öÂ9%!õñ¦ìÇù嘜S—ú†CúÑıMÒ³Mê”YbI0§™¬Øígøþ¡x?´wæAËFh{îÏä†úüAª²mÑP îË!%AlÛe̽%À›!B¾µ‡L(‰:œ#w’ߺ£oWã´Ö¼üA~kÍËÑŸáÞpîA=½¡¼%{±tlh`ú†®ÒEgÐú÷7¿v{ÝÑ :ëŽ.;Ã!r5 MÒoFÝÖ¸×þxпvJdHéCœš2^û”X44l'ÿ€Þ —c‘¹qC¡—Ljß*ƒ˜ÞrµI8ž;cÔ±¾Kø3±§ÄõÂ#rëÛÐç¡—í¥sŽH×5KG¤ñ…Œ(°»®ï&="߯ÕÊGä«„xïE“”«`Ž+µò)›‚ž®K ˲yƒ  –Ô´6½3é’—€¾oFÈßHf@oÀðäÓ†­ùöl"pÇv¯YµéY”3E‘$ž×¹ðH¼É³ä4¶†.=÷ø¼ß#-háà# |¥2ZðíРé-&¶Ë˜@{¶k:‘eóÙˆ¹õŽHM~§f(YíØ ;ä÷sñÌKÃ7ôr^"ê{¡½%Œ9; äp{Ì5²H<×Y fxkùÁäÀ÷¢Ùþõ€Ä[ ÃÄ¢SÛeã÷ú0YÔ í©Œ®©í”t¾·:ýQ÷ê𛢔È{/òMñf¼? °ø ¹\úŠn7~ÃÁÇŒiy›7…¦T¢ÉÂÍã‚jŠ( üàÀƒA 7˜¾Ç~Ù. -E®É:õ½…ÐV9oއ»iDì#Ÿ‘â¯[Ïg,œÐX\&+ÑÚú±V"WÈ]|‘†…q Í wÆ CCRŒƒD°û¦yXKLøyÿNè* Ç¡.²Ðô(c+ ˜nÁ¡,ä°`=„ÒìA©ÏS-&»ðÓý]0“ õQ.v¬Hë°„YLâ ¥X@K¤;eÆFX&‘ØñG¼ØDïBŠƒKl`Ü€L“ÖAÐÁFŽ  ¶Âáí+f¤ˆcüfø…wÜ‚9‚JRÀjòô¥l0¤¶)ÔßJv‚E¦‚Ö@NA@aÈ0Ùc cÈÈfGtOL9`s(Œ¢‰€‘âí:ï¸öÄÿ[ ö¹ÞûÐúHªàx釆 ;o P¨â„ÅËvÖ=ÀÿÈМӅÁäħK|ÆeÀéPµÄZÁ N8ÎêxÆÄ<%²$ º˜•ÐåþDL÷Ÿkí­ ý8Z-iðù@ª¥ÊIés½\ª”*•Úi­\ªÁŸUˆÈeó¢CÞ[  ÖàØ¶Þ“vgØ"ï;c+Ó`2ø#¬ |1}*;ݧSµ¬­ÃœÛË÷døLøwB*à ÔQýþ¬4J•jQïÇWÀ*ð©Þ “¶ ö7ôüXtàªÿž||ë1•ÞrŒ XOeµTƒ‘Ö9¢’²8äÀö»p^”Œû„9þ¼…æø;8&ÍÁ0À?KŠ¡äã†DíÏî¯í®Ò§FÙ ŽMê‡\cѧŒüøõPüW?©Šø¯^n4N þk”áö}ü÷×>þÛÇûøoÿíã¿}ü·ÿþ²ñ_^øWõë’ Â*¼qÀ;‚éQ”ž?|¼¤a``‚[‰+J. טqÎ Ä bE7ÀZž;µgÇ„.þ{‰kÿíã¿}ü·ÿöñß>þÛÇûøïã?hq± y$GhR´bD3aq\ÌDÖá £5Ð(2€vg.¸†‚”ñ×áh aP*¨im<5­µ4­#{Aÿí¹9Ô†PCþðÜ?ÁkÆÍ—² <ްí- Ûý èÚ—’ZüEÚ—h¨À ú¨÷Bt«9ÅíË!˜"|jÃáÿöûÔêvtU2t Á¦E¤ëÀ0Q‘ptåß@Ô·‰Ö3‚ mˆ|°ƒžŽÈªB$ØH'±{_µßÂp9ôbÝÎ@ä7:A7L|@ƒMhЖÄ<=ìº{7Ú4M.®¥ÀwíÿDTÉòbÀ#}xø$Ö"ó÷X#ödd6rÉlÍÁA§H2™÷˜˜äôÊû»,* W%Âó! æžc<ì“Í‘åÜGe„þ'Ü‹îýZç^G4€èâ“ì>HÆ‹I÷I.ÿ´OÁ>_´öxî)ýùìÛt"¢ú%£áãqÝAs¸§Ü—F[ucûa„‹ÐØù†ÔJÛëÓ“çæ°Ù/°, º%˜VSh¾ „;¡(àF’ù±W\Q*aïiè¿9PêÐ÷Tr0VÜÅMª:Æ^f1'”º1!·8 ˜À”žÍYŒC/FM±¾^Ø3ÜqGŒ[OÖ­øB„*ߤ@,‰9J5ø1 ÷óÄ’¾2lHÌgûïŽDVZ9;,ÂÒbQ«»º…aJéñ›ØÈ[ Ø(H4¹¨Gj¾%§:ûš±š t ÕŒOˆú#BÃ/i‹{%í¬{Èà¬=B…ñF¥wÖ"ð÷„ó…dt=àýh©)XG—É8Ä› EŽÙ%z‹2äx+6 Òw(fß&ŒýÌWdmO£02/"ӤаÄÀ”I,Ž % r± äZÕØþ_¶ç™r@÷çª Z©üO½\Ûç^äÚçöùŸ}þgŸÿÙçöùŸ}þgŸÿyÙý¿bx2EÍbæM‚èdï§¶ÍT>÷ßüÀÔ øú ’¾Àó)å-ë^ Èv­4 VT$ ¹t%KVÕ}É^á©Ââb5ù¢Ël¢¦(xñI`]wê èTC˜®,JòbãA/ ²Öõ Àå®z}Üøðh {`ÖóQªw—-«áëó0Á§MÕ%“x.ïB3¨áS+ gø†I…àd-t€©Ê¢@^¿r'DC—‹ÉÛ•/zqUQùñÏÓpR\(ç†ÝNÊ!HŠ‹RÍ¿‚÷îé†ð¢¢8ኙaFÀ”ò¢ ]Úæu—,,˜_yúB¯*Ì1lǺŒp©îu&åEAÐY“ƒN¯*Ðù´ 'žZQ¸.fy‡/åÎØúLmQ0;wKÛgÚëkõªBÈš‘o‡«ì‘5Å@lQpŠM´ºi-¸î¦bA·ð€é0gî) rÏ2–|áFž.ÏÔÆYœ8.=¶/Fç§ZSØh÷}Ïïyúùs²°0m[iH¼¨0wÔ4i¤1Å¥…%3èÔˆœq,C¹iHÅóî(VÉäjn½ª(€##¸îêhØâÒÂBpê,Ó©*QVœ“@Í4$QV˜=ðfÃh¹Ì¤•ò¢ ýËðq.$cQ•òÂ2¹¦þE{Î Jbn]ÂnóS9=eK§áŒ];‹Ó2 ÀÍwRòÜ\&Y÷¨£BMwý¡ Å|Êîqع jV¤²p¬0N~mCT’•Z“¬Ú•Àtâ+C¢š|b?“”ÏV„$ëœuJ’òÝI቟”Ü û-sj¥ìwzÈÏ~ëÁ´<6`YA&^dë*Yû‰k&‡¸X”Ý­GnÉK¥Åe%ŸY9•6ª(•™˜`›®P¾š"mN”ò݇mâõ-P¡ÑÏx»‹LìKð FܧSÑCÔ”+&k[ÌÙÁ—>%úmu _>œU*ú²âÇ#Kz&,)W"øÈ¶*=ï$îûOáÞ_Z?¤zÖ&Fy‰rñ!W!,âêQÆ ìW®»¯¼#î[pE›]?]º{òÙ~®›”vN˜2ñ©¨sŒz˜Ä]KkÑ Z÷×VWéSùDÛÊó kÀ8ÿÙ“ÔúïF¹zº_ÿý×!ÛûT=ýò™çîé"ªåÓ/G|{Ô‡¥á‡à1}üøöð‰¯ÔŠ¯Ã§ÁáÆgEÉcnLÃ+#ß¿%Bßйm:4^“Ǿ|8çœ×Ø2S¼}ݱ„PõÐÁ„xË‘ܦïËÁnD\(óªKìc£(.¸®QLÅLŒ×ã:îä6¶‚‘Ÿ¦¤†—knÖ+ ?í¤kÅæycV&çc27-(0_†Ì¶ƒÞΩKÄól¬ ¯§Á‹qV=¥ìu2º³—Š{+JÓBëF Ù5‰*À.‘O-¿¯ŸËõ —ñ„Çls3È0¯åK¢ÕÃ^ÿš`ÝþÖ“r̽x“gÂ:ƒü«ß9'öç"6¢´ú¹hJ× Å/1™ÃÛòîB )Ž9,½køË‘AZ!Âó'†‰$áqlËîEGùðQÂäÀža´¹€hÙ‰Ã1ƒh¹Ä8{ø ŸÜLÚ Vë>"}¢ñ¢ÿKkX©&Àßä°R%ý³ï¤ß3‘¢w|Oª*.\†#lB¸b±ýE¬ú_k“óØ1¸™$ä[>°¢Þ¨ÖÙ§ühÒ+¦˜ÏO§y¾ã'!À6z·üÀжIý€í_9`;ËÔã&ØF—ÆÜ»¥7Ô?‚æøarOŽr"…í7™IÈ<è¢T´¢nÈ£z4½ã™! ‡L­l®UêåÿV+ÿ/#(r+~ÁŸJ}-mÒ֧Ɖ*œ&# Âþä|À£$&Ô¡ÖxÐ}ÿˆ|ᥬIçÎ0ÃcÂÊŸÊPü=+mwúƒN«9ê´Ó°ž…kzšÄ~šæ`‘™3äuX5Œ[£ñ Ù{KR'\Lp%ŸX¥~iþI‚r6G¤‡˜ä4—+ÁmºK¯ ÀOÜ)Ìç*åósoAÁÁr±–ýíá$P’Mf‡ÿN ¸!N„†¯a;Ú1 ob³3g‘À%/Ê8|Ï[ÄXœÂ F lúQExÝŸ›hæ3ÇÐváïO—4œØ^@xÅ‚úœLz éutˆ.'†yÝ÷íxCÚê Y'†/¦(h(ž`w‹óçAH$q’ Ïö ãþ¹˜RÏ|lGj*5ߌ`®,þŒ¿ú´Œ·Á¯“ÎTj¶U;õòSvÖ‰J^Ï›yî­ÏP¾¬âVNxÅ_O^OUô}סöp®±@”^ñ¢þ³&ß4Å‹F4ú±†ÄU=6|Yˆ¶qf/°˜ôhîèg¼6NTÊ*+¸->k†Zh^úJ-sn²ƒXÉø#l>KÐ7³Y!V(äáIYöo úT½ÄjbâÆÇ@±XKϲy“PxQòs|Åõùø€RVÇ|þ»P~×3pû†º‰~á ò"Ãá'¤Éy{ôVÄ÷²˜#"2áœBïÀ îûÚåƒû¿•êçÿmIO#+?Q8¿ŠÂ™#/ùRÀJ¬šV¦@Ä™Ù@¢x„àÔÑÚ¾Ê8:¹1Û‘t’KR×5½Åv$Ùâ‘âI:Ít’êÿTW’òb!TišÒC^à/LËçìòûîj:§^ÒÑ¿v¿¿±ìŒzÍèùô(µ:®é¯Ø eÖD|г¬'4¾Aœ%^8%Ui³ù×¶ |rM *Þ¦0½ˆrqÜ,ƺ𯷼äÙ?Ñ«Yñc°âÛQ¬ñ&N`޼îBÌê&ûñ¢Ø!±xòí„õ*ï´6Š·‡WMÁ£ÖÈðgÉHà¿È2h°»0 Æ 'Û¼¦l%Í.×'(7oØéÙnt§}Dâ=^*¾ýöº=Ü­(WDgÌûÑä;Ãá7~†±I®±tGºcIñ¢0Ù›8ág¶–àÆp"±z)[Òmß`Ÿ!}*5´#Š_ÕmÚÐþ#žø÷«†ßÐù?¸§lj(Ý6ïóĤ)îÔš~Ã`õ¼ñuP{¸^ÿÖN ö‹?Að¶n3ïPÓ|™a·ýÕ1Ìk<²7ímvîBß mvÂkÈÃÅ£dÙB™ØÏ¼÷›­Š”wS_ÖQžî¥<þ<”×sˆvåFÍ #N =¾XÐpîY»Fx;BWìS±*nV 6¦Ó 9[éÖM"æÍø‰¹[.Yv’³q.3Q¨~f9½7Ù»…ðþãnxµ4 èµüoEs°k¦úT ø,®VI¥äe‘’¢–EZÂ2·§+w$WKê°Žy<½;©gS”E"˜%§èäï$ï©÷MÞ|Ö®Ôd“5r~4Wî•ì_2Y*wç %Ë)éKåµâ$1§–Š„Ø:†e’L™ i<µ e&X+O%Bt$IbA–¯ Þãú#žW'Áܦÿ¤+•‰¤ïG¥ƒR1û®Ò¤$•Àÿiˆ¯jCš­”øU­*…&Us†iáì®`+ùQè…!T>¾/<Óææ6­ýõ§âïŽã>pñ¡¬¼ób»9L=d< d‘#~½mg³5bÔ¨rGæTµ·Žão°ç¡PDl·—Ö´—ò£\žý¥Zœ(ó—9ƒ‡upd''„Ì”¿ŸÉبÉôn[D=ÿÏÞŸ.7Ž$‰¢ðüm>Eܬ´/%3IÉUKÕís))3yJÛT-gl¬ @ À@-ÕgÞýs÷X…"ER"3¥žš$@ ÜÃÃÃÝ×tÊÕz¦p5ÿÖ4^Þ ¤³…‡¤ioœŠo©©-\ÙBÍãcÊ* Mã?‚÷OøŸ¡Úª!lÓiÏ0È£biÿØLÍßsé6·¹ªZÑ’èÝñ çøÿ¡ÿç77ªãUy€N÷ÿlî´²þŸûûoþŸ/ñ—‰ºgáÖtúU„,âz¢¨1NU_j‡Õ*ë¢~+oPu/¬.…wá»î³Ãæ~uŸB~s»q'¼ø{m¯J`®»Þ—Ûþß·ìm­ÄLdGt'ªÒU—¡/;'ñÏð/üý®d׋Ã]Ûö’d«¾ÍÛ³ÃID[µ}Õ„îï[°¤µf„(lÕŒ[LŒg @í(sµ‰s¯âŸ\g[jÑíéJ˜f¸Ô·šÛt4¯ÊýTð.üQ=´gkYäcÚfø^ï1H¬‡­VÑðôOÄôܬý‹šÌÄó¶ö·y±Ü™ÁÑÎä§ø0Ïž¹ì»{:Ú¼3†©é@¹éa¥ü|¼„á„¶ZÍ Ìx6?ͤïXã­ìãøgZ i ÅÊÖþò’h,±#ö·¿ñqšD¨s€¢Ù‚¿eŽÿT}p*m;ÊŠq-v´ß”ÄóÌ´S›WRrÀV;ÚÓ+a{{ø·èF©2ΉÊUdç*ŠñšÉÆ%HëŒ%T£GXKnì»,;n@bC@IwD½4êF ˶‚0ÀL3£M¼Ï‘¼v«#*ÿi¼$t©Â8÷DÁϺT-N¼Z6V(ÁLvhñŒÓ¡ímÌ „O-H´}}zŽåû/2=ÆÁÁ¶N–sá2mA©;êVŒÛïP=òà‘ÊROé}Œ`ƒ¥Å#]ca2Ùߣj¦øZ«Wç¢éF!M÷Mãíb'b>¾5Dò“âÀ,(ët”=‘;ÀÊd·V¬%)ÝA›Ü(•#ôžüT÷út«šýxz7Ø"¶´“Ïí³^Q/Tž³èE¢a°@ƒdo®yhéóp}uX7¾«ê›} Cßµ‚Y½–á(ÍV"üÅLféb†J7žkÈû…¤“æºÔhÈ I‚VJÕÄg&¤f)0ðÉ>y+|‚ÒÙÏmè;±á°dÝà)‘1czI$}#×z;»üýW—½þ †ÿ-…3ûÌçËîïíîIûÓÙiúT=ûTÿKÿŸÚ½Ó¹Ç«G›ÙG»§§¿cw¹Gsãë^þÑ)z´VÍ>zr}õÞ¯§²ì£õÜ£ðÿÿÑïüÁr6å£] ÛN÷´8ýGûºÿ5÷èaîѯ¿›OÊGaÞs½þŽž¿œf­WscíýÖ5e'þ(“xTvöÞi÷·ÎñiöÑ:>:Ϫ>ÔWµ¶ÄV°`uÝo4IÇ#߸ĥ2+SlvaàÌÇ¡ŽtXέÎ|eéМ H€ó»÷È»KùKpŸ U­š«+¿Ù^¢‹ìsÝ_cî¶*ãÝu-$?%‡Úú|r!å=ròð‡?»-i yÔÏ@«vð÷öñùéÞñåù^H݇.¤yu>$5u$Ñ@º–s.n3¡×%†ÎDa峓ö•È!.|ÿÔ~#[‘ßÛˆe¸ ²»—c%–”ÁQ‰d¿¹w²ÈÃûñÏ?ZöÈݳÃÑÏûýŠ©©ý,Ì ëËÅ0Ü„Æ ß;”N< °ðÀ ÞP*HDIᥕ”J絋ù¤Ÿš!þP&lþÁ½äµ™ðþd©Y•Çþõä˜áG=[†Ç:UÇYÿêØ™'Khú›cÆÏqÇj;,œü}rwÿþýáñ¯ùt #H ÀÄÏ(y4))xF\]Ý;3⊤hÇYÂŒ}LŒbé뛫‰¯^„ϰ(l‘É€1VdǤ ;R:L"+ˆ)=ˆ}Ih‡> oØ’bPç ˆÄ[èX‰) bƒ7þÁKID»Þøâk?ɾÂ~¥Ð ¿'äÏ*޲¶ÿz\*¾>¹Úc5yÕ?¾šo9× æ‹¸àØŸ®‡Ïa)Å{l‡c¾ãƮٷxænh:ÒÂ+¢Ãè@ÝïG®Îƒõ¹½+ËK s®GZ+ °Æ¶./N·u¶Õ»þô~÷ôt{N¬L°Gãï!<ËßÓqŽÑNø S ¬†§¹†^¬ ®jÇÍLjc¡PúTâìS°´`?L.¿Œ cƒt2¯¤Ä±i ÷<Í~e;,†ÿ&숔+ûÚaƒ‰ïã¯9éá0‡T@ -®‹ºÿ‘Å ìŸÊ¤IŒgÜà­7 û_Ú&P71Š¥oRôW?Ï­«ÉûÒ@4Ÿe.+æÃ&¶Õftã7Ìåð‘U?˜È#ûÖŠ`k <ÂÅ)Ö*æÜ}&4õŒ<_=ñƒcøÔòuc5%n0Ln•ÅøYoä~FÝ.}ÔjÆ‘{ç…“XÝ3ÃÊ2Ì8®5'<Í <_=ìåqå0ÞÚú$A§V€o7ã1°<ì+À>Ao íoá¤è€ID«Q¯ïÕñ8#3IÖÃgÞáÒ!ºr#/„mo€ëƒo˜7–ƒ e ¢£pvïù>î•h휚ºå;ìÑÆ ®.ŒŠ& Ä ë¢5Ø.$´âá7ŒáŸñžN&9»öÒÄ1C³RyL©[W¡ïÙbûœoÁìgh‹w¥d³ål†Ï9uÐv–g@4 ¥X¢êE JÔ´oî#ÛÒ¥á_ÝÇmÎúˆ$Éi³Rƒ¡™[FVŒ S¾}Žoí1]îTß…nÚ½ Àt:®<¸Ê«‡bÿàš I7Àñ<Œ}‹RèÞ¸°WJ3Ó¯:]ö@,üù翳àóôâø”ý[žÁÉ.vGÖ?Aï¼ Ba}þ¯ê³kXëµýü³^`>[Ë>û-½™ùû¯:¶Q—ÏŽJþ¯†|–]^õ;—í3õô$-~©ùß)œ—Ÿä{{{RpýÙü©b$Å–Ÿ¤H'{ØP0" ©O¹öñ¸þm ‚Í(&,\÷Oûèʈ!oÙÞ¦Œ“¾^8P:V5¬ÁP+ùtá÷ 8Š¿ €‹ç¾ ·8Íæ¾ª>6¿8*ÔR]<èvNÎÏL>>Ÿ>جæf^Ž—žHçÎÖÐCìxr³›D®+M-÷d-Ö÷…d:Qæ-©œ £wDg ¤RS 8œ4C4{“Š8þ×s4Ħ¡¤[Ð’õïj{px8-›ÃGZXDÿv8ºñîÈeh…0`ÙÚA–©=ÖCÙYaŒ*¢åÊ}ü™ú>,9¬9n¼kߨ»vdÿàöàχ0CRï`•#W¹+–b±O‘V”ãÅІ›h\ž…ƒ¼°t—<†oí× _WƒŽõãø‹Ëîyû,Ý€DóoMs_JŸ>mŸg›//ÎþÔoˆæÞÕéqGë\z>÷}O£©gÄY –æ=2ЇDáŠ×o(jÉÊñœ,±a.ßô»Ë¦Ç™D>hä’Å–¡šÎ õqÃ×Ç[þvR|>ø&òý4¯ÈÇvw™e'ªÇ”Çó›DøK„ÍcÒ²äÁ9`ô¾ôJ”¹µÆcÐ\終JVoºŽŠ×ðóV“ai:_¢^Å]©¹ä–ÐÙ¥\H²Çv±Ê¸gøŽi¾ìµÅBš…*dÛ÷ðœa‰çÛOñ bñ„âŽ?øLÆq˜e=õ (\©Gï ð¢=Ÿå™°eaýL½¯Nóà:¥èôðªHxÉ)0K˜ è=öH9G×±©42§ºa|8 ‡^ÀñEFýÕÄ m'Zð\ù…þrs‚cèý”Y36-•_éÕÆhüùãtqÞžÆ=dÒ³|O;VϽƒ}¦kGt’ж)Ðoáá9fèýO²yzHHd¢0Ž´ç¡QCvßæ¹-æÂ›aæJçìƒÀ˜¶ŽgÆ@ÎCååð±0:Z«ß èàûY*%ÒȬɃš?Èàœ”Å©•n{Pa¨²„]cL$Ü¿A¸æÅOwòž¶s€›÷#&›ÅäáC.ÏU.:Ÿ–‰L_¸Iê‡éOni±ÂâFÎ…GíyÓ;i ®Øžuëgæ-aµI4­i{jmõ—·*-2}QŒMR’ÜO'Α Õ–)úÊ|¤Pìñ¡ˆsæHAã£Yý¯|hÛqÉjAUTÖ •ÝÑvaÉmy.¡µ–FLd_œæ­8ta^†Ø.”lžÏ;Î;ÅóybÍØ[Ó|êÑWA^†_ WHÐ`å“Vncþ‰;Rlâú4™uø2Ðh#¾Gî "tnÍä=ѧfl‘« ¼¸ƒ gGu¤ð§$#é’l¤nàDGµþ>hÏl]_mχÓìñÇÒSšˆrõçŒ$uW,Nw>×¹t–fZSßbÕTä «nfÜ_óÓ¯³zX^B•S¤*¢se7”Ô$–}ËÉÁÊÛ°ÂÈýç›’ý2'ª²ýPO3ñžeÉU“„=Eú"‘$Š ÅÄ“t°D.Rá3ø°›¹+æAÁA Êúÿ ü5©"ìÂWQzzþ?ø¹Éÿ×Ä”€oùÿ^à﹕ZBÙ_³²—Loƒ™)ÓUnñ[ë^ýeæô¤‡Åà‹-#‘ç³6HäE¨,IOº´ùÇüŸcØ"ÉØyüŸÕ}XöéúoíSþσúÛú‰¿ŸXfþÙ.»‚ëÝÏý+ÐLDZÐ%ÿQÇžë²îiûäüt¤fqžŸ~Ü íIšèåñêÔš&½GýyIr`½Z­ïVk»õ&»ydŸ å‹% <×wØÿ °¿·ûgŽ{ãÁþ¼{rÙßÅtŸÿ«’-+¿<[¾p޵V˜Á¨ökÄX÷jp‡s@ÃNÂÄú iüš9[¥6¶ÓN—ñ‘m™^äµ¶àÂ\hŸå ÓØ%\¦ÌHM~dûµ‘VÏ!íüÓcRŒµ‘;´nðæŒ¯†Ž¯ë±ZN×TŠ/ºd[0oERJ…‡©ý$¼8ö&ô.žH"ý1ØÔˆè˜…×cŽ@G¼ûã¡°•§ÁOVàÜ{Nr›"Q5ÑÚýõÓGX¶ ?ú¦îåÔ‡ç1.:’„?òö‹èo^ôIÚûx #³CO´Í¢Í'Ð4 í?3WÅq½z1Ïá©cê†'òœS„¨CQÏI±Ô{Œw„iQØVxçF‘縱VSãŒhZÝþqèêHG×ðit ¿Gte•èlÕ59‡…£ºr˜(»+Zy®Ú@ñ8FùJt˜e9 “ÄÇÐL´´Græ–>µ¸AÁ6äiíš‹Š¼A“…xƒ±·ëϤœÕx.ÓŒ³M?`žÛ/=Oé{ªÞØ “ñª´ÿ§ôÿZ½º_ÍÖÿhÖoúÿKüýÄШt ú*‚s.;'?—•DÝ—Ê;¦&nÕHŠå…#°‡Q©áyæEš×’ð›\Þ¿&ZÝâ>62ÞÊÒMËL •1½–樦‰Ÿ¨g8‘ƒâr¬\FVô %0„ÊŠYúd¡1wÆð…aida¹“O”Çœ‘;Ë÷tfr¨zú’ÝF£YFsæ˜P¸©«ÂzÂÑÊÂñ›8a”%,Æ›…‹ÙS dÕöùÁÜÏ‚y:®Ÿƒ’Z7ȃ,=7ò¬JàΜ*y»0û­§;Ì‚]PÕû,Kˆ[îÈ FnùÃ0Ys”;½µFè׋_J oè%q‚4õ™|bIA>‹CQË@ч>lô²â%Z‹F`¨ à7Ö„z¯ÑKܱ1~lÜ©umßÈRÑ}nŒT-EK úNÌ~éY~pYò›™v-Ç›Ä84^HTW #P¤Çaà VÈŸ! 2ÈW zå…Û*è˜ÒãœyÁ·© ‹,:\Pñéñ—˜…ýÂAËdÐÂÚÁä8s7GúDýøùkÃ?„ƒì(å3ѸN»Ví°pü(‘†“,ªumÍQáè».táª-+ݨdžWõÀºRÏä´*ž'aI«é%!¨zc.!’Xs¶ŠŠt·}Ò¹îI÷R3Àfåœ+c¶›zÝPÐûé@î·2pCÀŽòµe'B¹µUé[ºŽe¥ù¬¢/\'7¶#oœ‘Ow&íZªËZ“ÒOÍ6Rö´&¡ ¥ \—H¯¹ÐõÙa9cŠª†±ðÓ Æš¨Àïñâm„SÈÖѢɮf3—õ´¶Œl•¹#E–ÅÁÎÙ øZ¿ŠÂ‡Ç´yQd±ÉÑ“Œ?jRJv¯Ü6±cÊ‹C’3èßW[›„&7•Ž¢[Âeë®ï©͸%M]!ƆkœTkÏr®b|½¶½òío¹èÿOœpuÖÿ'ýÿª†°ÿ·ªV íÿµê›ÿï‹ü¡ÿŸš¶+ë}“IÿúärGRkàlWkµ4¥9Û!̳G¼ÚŠ·Ùý-ŠlŒE“€ácÆél.#ž½fjÏ­Œ•q¶sÜ}ã¼'äÃkp§(«(ÃÐÛex¥£ì cŸa`ì8¬@ó  [Ü3çÆeî$Àäc3boCÓ4Óí8¿ü¼Ñ˜‚e­#-#ü|#E ¿Ž·ÈsUèÄß FJ𱟧b&ý=RI Nò8ù…“qŠºüÁ°‚njZÒÓilæ0LdN‰¶0’'"QØI3lqSŒóƒlz{êÙ/> B—2ñKøà,çÄøÜšáö¨’A-?1WôvŠ™ßçEëSX©3âtÍPZ«šœJjÒ²‰"Aˆ7y²\ð¡ÏÑéMwWøíÏÀÛÁËáí A‰¸zFbGd| ù‘¬.øKÙ]RÇKxxšâL‹ƒUž‚R`ï™lÅO! ‰+b¸úoþRE¦3™²xðRšg Þ”^ «¤Ñ M<¢Ûw¶_nF^[5}û{¿½õÖ?­;kWÄܯ ô„ÿg­ZÏÆ6µ·øÏùÃÊO§ÈLŽ/¯þ¶òµÏ>]ÿZ ®¢pY#ªŒõtãpÜ[‘û { '”Ø!rñ¸YæÎNÐ û#k)=bÕä­ðäÀåF*×é—‹kö…oîìjrã{6;ól7ˆ]ô€c `£àŠÆï÷Ä÷+Xºz&ú/² !«Ë¾EG{ð\(éˆxz‹©ÿxv†´:Þ$v‡Á“ì÷Nÿëåuzk_üÉ~ow»í‹þŸ¿S:šØÝ;‘Œ„nJzÃŒ¬ yÄ!Ÿv¿ÂóíO~„±ÏþÅi¯‡€\vY›]µ»ýÎñõY»Ë®®»W—½Ó=ò³}S*0ÖqËüAñŸ0;1ŒË¹g"×v)Ã&¡?Î2˜QV´à~¿`¤Bbþ•Èã'š¹éÑ&g‡u{o‡µŽXSÇàÔ]ù¦–ëMðýF£ºÃ>Áv‹Ïž·Yµ^«Õvkê»îµ<@ePÚAê°x>¶û`»cÞÞø¾Z ÿ"Uê fÚXèéð8¨àÝÆdñ)¥P¼ÝpáP|ט؃ÎJð·õî" v¿\±cèáÝ6OÉj´G_‡õZ7ПØþÄ‘%®“ûpk/QnjŠÞµqˆ|=¬8Y0èñíÓ¿ ý 8Âétä”á^`€²0ð2ÂR|€P3D‘>ï]X&äïMë÷À¢ ÒS­. m¤Tî;ýãø”*I@w¤ùLN"[|™B!Ù6¿kÇQˆ¤ÛQ_x·½Ç3úÓcáºÒ&Û#Os™`Ž•³(Æ…‹°©>榰¡§I`s„âɼàV_VËݶ&±œ£È'>»£oŠ'ÉåæQôV¾ÖöØ%âß$_„ad}sEÑ,t$É8Ni!&¶`¬{Dü|~oÜÇì pš$‹y:}Lî…k+‰U¢YÁ,ä² J«}Ñ%f;#ÚÅWÿÈL3YÉW9ÙQ“1a)²ˆâ 'E {¬3 ÍFì(D‘8ñ;¼Ðä>$.å»4 Òäî àà/#OÇ[áòC2”à" Õ—á Ÿ¸‡ínâ’T‘g¿(iƒF*œ6Ä(që)e^‚Þ0ÊЩU8h5dD³/¦GAΰ2ŒÆ‰‘âë&î8÷„ÿ;®ÏÙÞÖñ6&K¨û ع‡ "ÓbAx×1ŸâqÌ-äßo™ôŠû"Ò¥(£²²Ñ•ß`§'yé‚瀧M5†“ÔjÊO{§B©ìl¸ºþ7Hª[Éh[dgŽEbYÊq"³®½Ë1M×É›utÎ8àiBµD‘˜tQÝ«ðóã4þ5±|ÎÚ¨ <¹ÑjÞv4FáãBÜE,Æ¢‹»îH¤¡8;”:”°ún`@Ê˳{††CžáöQ28?”)~*a°ŒòY¸:ÆÂðjðpo´ñÄÿÊN´ªÒpŸ)®÷|XÈ…ïy£V>Û8ìÏ–Í “šFT’ê`ü'•®P¾¿>T¦¼¹>•w±í8°ž•Ÿ¸t‹¢|E50ŸÚÂÐÔ5hNB[ùrÃê Ùɧ«!1 çìõWˆòì6[<¹é<Œ¸O/griîŠIÓÀsÖ¥RÀ˃T#C~fçý²ôlx Ž«©ëR|‘JìŠiªÑ°ÈaRŒÈ˜%•CÒœ;öžéü[\ef-†‹‚Ö*ä®@Æ)“Õ‘ž¢$Ÿ-ϱ¯¾»ñ„ùÃEÎ¥­åÃTˆÏ20oÁ·µ¾‡[Q| rw!–Gêæ*°üÚö¨·¿—ýÛûX­Úa侞ÿß~³Öjeýÿªûoþ/ò÷ÓæŸí‚¸%2N÷”{ž,ÉóDåX,<Öô6^nQPYà6œøgzûãIêÆÕ¿"µ­7¹áGÃd¶Ü„DÖ ¡÷Q¶:é¥Õ6ÞY¶÷Î,Ôhùq¨,PT) ®ï]4L£b ÝYª<7¼Í~b¡06qèÛ<àÅeÿôgüfD=c6vPÂÑœFhI‚Žî–äkÞ‰'7¢úK=]‰©jŒiºe}˜ÜÂÒ²ž£uê΋½ßå¦'´ÇñÉð=øt̶°ŠÃã(œÄ"Mü6Ò3| Ä àmC7ùû;êÏ?~T ½Û÷ðSìÿù;-­í-iujìU±lVœìi ‡¿û…ÏÛ¢;;°Æ°rÒYî¶) É Øß™úª<Âjz÷Ë‹¹\qÔöZR’0RW‹ 7‰mkì*."rícáŸ(4+w¤dŠ…Ô>ÌäDQC—¾ë^ûËiZ௢Ÿæ«‘(S“!•/ätˆkÒ00JíDúMhoã—¼þ¥‡+uBž`:u1]Ahèôˆ«‘»¾,E”ÚÒªøä†gø÷WœëÓ2‚½ܒд‰5 æíirŸÙn®„µÃ=ix¡*a¯tVA†Ù½îvwÏ/O:Ÿ;Çm:f›‘ý¼Žj:Ž\ôñƒ q4~'Áï™ÒÅÏ Ü7Š+BR]GÒãÅ=™uÌ #ü1“hb'ÀÉåjøùw€+uè3Äïè¥'°æv'¾;ÍŸ÷y?7I¸T•‡¯Ð£¤šõƒò±BÁ·ž–nWxÒ*)¿f§Ó7h M×xó9DW¼÷\‚À¯¸KNÈÆ'á)s² a„ÈFãºö’Þv»oåU£‰õ/O.Ù.VtÍ–oNCbJTYù©t…îçeRôÿi ûìE1àp4-7à‡aøü \íŸ+¥±ŸÊ°7ïàçûrÕÚ—œö4Í •Žœkâ•uÀx`U3¿ä‰/ü3óç‚õ0ä=óM¼’˜u޶ªy¯.yâKÿƒÎ;n:sÍýaú “UÍ~mɳ_:üfþù!Ï>=ÛÌçA¬¢@XMÓÚ‘aŸÞ_h5—E ó‚óÃPÆI§Ï„`ûŒMAZ@zÖ¥ã‘%U_&c(þ5ýJïyÔR šê“IBIþÌ/[˜2îÙ†½¹SNÞr¨ÖÎ7ÓÒaL)Å«Úý[Kžê‚×Ë{º7¹ù)ïcŠ ¡ósáL§êÆ’W5|•±¯Nq<{‚1g½öÚØ÷JÁl¥” R,íXI<ã{}³`U¾ØñdŒžE®sÌ] f—,áîAÚS¾ y^£ïÞf½Šþ% EÆ ÖÏ®…ûïROAÖ&©ç‚êÚoÜÉhFNv°a 7³ ÷Ú½³sƒ8½x4+Ok­%ÔÜk£YÓäŽ\6)-+u…=Ymevlä?zy7IÎ7úëÆñ±Rt˜9–úΑ $’-öÁþÿa™Ì葇In Ñ*àŽU3ÀT›Á-é赞œ^uOÛýÓ“ÜW_þF @ þÙ Ñ_!@ª˜H©Ñ¬ïÕŽêÕj£ \ ÿÛ«·lN®Ž,/°¥ÊÀœRº¡ˆÔIäù Ê(ò˲)Üi!X?w¨¤ð'"§E'p0>Œ^Íæz ¢y”®¥¬Ÿ.­ ³šuf˜iq/©À:ÁŠðÒ§È@9X¥·r¾UߥÈ%ã #Ú 0’Gý—‰çÌèb^¯­$õtoXvì<ßí»¾;¾f|!ŠÖÁ­±U> ج”-ú|Ùy“’>wDNgøtIàÅG+O0+R }B×kŒ-©X܆XÙi5Çhmªl&fùñúMS]Õ,£=‹gÕêôN.Ä×§+˜Œ€uÛZµ¬tÆŒ{Ï™´Æþz`ä@ñ=&Kñ½äQû Ó„ÑÐ ¼¿ˆLŸ=E¼,Pµ¨I*Ë¿fñl´¼«—%2¹C…TurÍiL:½ocÐnü×÷0\ør0ðl7sñïZÊ×dƉåË$?Å8ƒ)-ÅÞ|–4¿&2°*,É1éY~(B8ÔÀçkáSøð#a ®0@ f"ב¬áÜMnCgÆ€llК'×yäa¡bÊFm­#¿6ÀF­Ü†”¼£Ðw/m{2¶‚dÝ·!uÚ4·Í`M–K#…Àmûq¸æWš”N©@ßùÞÍšJ ñô#žDÏ;µw_„ÄAÀä«—Ñft „äö,pŠûyQÐŽ4Ð"×åàáKf],9Áy‘¸e™±½ì^¦Ò\dL”:Š2÷2xÊÜ}²ZÕõXØõº†Ä}ècò_4ëV¸àY“ݼ^ÓàyHgvJŒÎ/¾·«ñ­¬µeçÇ©‰äà=˜ ¬ÓIõ²‚ƒä6zÞ+ƒÕè7æ0®‰`Ð’â÷ÊŠãû02Ë|…vâ&;ð I—×¼ºS`\EæÂÎÝ”“üЪVyŠº’Sšoî©~aË’I©\Lã±/RøƒLgk*nA1J.e£ž¨%Æ•­ W8Ñ.|íwyu¯×BÜw„—Õ“`kP+%tMÌ‘_Câ:v.>~éZÇ^‹9RŠ?椴§•ÀË“fª¹êçò‹fø2 7†š¥¡^ϵùº2•ù©—eÖÿs9x\?Ü)cv>+éœ(4²}þxˆ”¦Eu4<{|•’j<ÅÜ6N6feJ¦Ÿ?>EVaJQšœ‰u4ul`+OiLåà*gÑ6q—sf„WJ,üÚ‹šò Ï—ªÖ™74“¼Uâe¡½Sæl…ˆtÄŒóíÝô˜J<¤Œ÷x]z¸™¡8¤´5¤0u*IE°ŸfYq Áeò/ƒÚÊg+¿^×- Ïò¯ $q”3'2H¼- e- ÜÇä*Z9,$'j-gÍ'©£„*RÌ–Q‡IÆÌ—PÇ”…ZJS¨¢`•뇆3QvaM%¨7SË ¨uZ…d†HOŠEf»é“T6eí—“†?ëTä‚6¹ 1(àIâ)ZM%'„œ<™g“³2×”gD0}Á¯©D’0&^“tÆmiÌŸFb?ÂÖ÷ªŽEE&+}û>Öp w0ñy­ðß;ý¯—×X[«}ñ'û½Ýí¶/úþ¢Š~»w.ïÉ}¬¢ ÃŒ@|Ä!Ÿv¿ÂóíO~±ÏþÅi¯‡€\vY›]µ»ýÎñõY»Ë®®»W—½Ó=Ösݧ05 \GX¿(±ûže™8o2ö&Þd츢‰XŒü­x&oPË—®ør¿9ùðÇødlψ§×6–|‡hÿ‹­ë1\‘ñï?ž²ÿUYû_c¿Z}³ÿ½ÄßB}ƒÖ n: %¦$ÚgïcœÈ^#SK®ì¹¦Yƒn‘‰ŒÖö@ÐU@µÐ%«®£1“Ûü®D$.è¬xÐÃ×fñ´ z™Ð¶´Ãuphc"á/ªÀòP×ð`“ |÷3Ðï€â&í¦Ð‡}¢…«›¤õ…÷ïL~eng°,Ï(É &jpÀ'R±.äèÇ!t?,5PZIyÄÿG& ´uÊ¢ñT`þγàù­äqŒç² `Þ&Éx›M"?¿÷VXÉôîa ˆ’æ_¾â¾îž¥çih½K³#”‡‹¾u³sÝí§W=þ4<µ=q‚fQœŸ@ ÀÙ/¡zÛI„{Àå×#«†ÅS.±¿¼1amëþÖyAZ†ý0DÓÝdÌÝÄþè¸ñ7ØkwEçñÇ÷[øåí=èc{‡ŒMI†ÊàmT+pžnM¡é…¹§î+cÏÏþÕDþæ™—zɼÈd5éÄ\”ákZ¨;«,-”å;˦¿çÑ–K9TvR>ûU^öHò%aÖ™H› ÌLŽ@~Á…FîVDöC×r8ø*aœæ›V+ú¿È/bC7á¦x¾Þ=ôôÞËIweÔ,]8˜šì|°Ùýñr‹qªoª‰åk”˙Õx’°<³Ÿ@ƒ¥$P2Uñ#>{´L£ÛÔCÓ‰3—czH½ìpÑB{ b ÑìA¼„:YŠ1I2GÃ:=É?¶—5+éÚKH/I¦Q|j¡âaîcÜçFé~ ã%£{µ«ó°†«0Ú ZG&‡áɑיpšO'å¸JêÃZ¹¡ÉÏ4Úå‹ÿKqJÚt<ìÃîÄ_ïA×kû5Ï*Zb[ÁÚeåèqÈlYÜ8ê ¾í–í¢u…yøúG„åãåO^ ¢–Fì2©ç yKQàriØòòaÕ8šÝ–èŠyh3v®,tz¹Z: ©ìc)áÀÉÅ %ý"5ãÜ¿E•”e½4<8h $¦+Ï“EYšŠÜÿýÒ®,<( žçš—ƒ”ŸôZ]—Rßn–œÔ:8*€aÉrR½Ôï~90V `èGV£ka'„4!‡µ`>Áž·ì5¿ô× ~¹"ƒP¼9{Æa£d ̧M ¦f$Ý0\*‡Z­¡¨uØ*€!q¶ìåȵ~[GEÓ¡&–¿ÞpîÀq=v juÖ] i¦Êž·ªÑän­Ç[¯jÚ) V`üÌŠ“s:,_¶Átéø.’6Jö>,’6눰uT$-ßš´ZŠÄ¢.¥Ú[w®yT$µ'I¸šá¯rö«:,Ÿ}k¸L¹nå£7ØQߊ¿­5åìWôíê“;ô‚”ò#7Þ ¿‘}M=X¤¢9‰† åûÖãÌæ |}%t_µSøŽÉ ñ³ n>€ûáè]]šI²«ÜÍÁÐÖÖ–k”Š]BØåûó9ä¤Þ_®Ó÷F"þKå¹ÌÜ5ò]΂’æ V¼¥ãi*žNƒÙE½ïKXâ¬!î¹AÒÃÒ§DS“ñ‹/—ÁÀáSë)™¬Rx €ävr如­Kò&ò°Z´‹Ÿ¹CË~<ƒKömŽLR¯5ŠÀÙ8W¤CÍ/Œ';î-ÕXÜ2‹_ìÜñ°Z´#ß'/PV‹¶dŠèKý&bH,•mˆ¥7ƒã¨hçÑ(ió7×Zõ¨hµ|šÄ¿[^‚3µéü@,"õžKÁDWÖ$v¿0aÓ-Ò”±îíÏC6ÈžtÄÿth÷f23—̹O½:<õê¾&HX±n»@:¼(ñFÒÊòÉŠÝ“‹¾iÝ(Xö³°ð pKM÷å°\E^Q2Ðõó7 âc7J0vt7ô5rÙ¸jãqÙ ~uscm¯5¤^ï¬nŒ^q0ì`¯5˜þY¯f ^g0q싺s/ÐÆd´¿úЬ‡â¡Y+w$ŸBOǾçI{’Üf +½óŠÃB¢ÐŒ³C4ï¾æªœ>ÎÆÚ ´~ìoîr,M´¿&K†Öxý¡õx.G×)böþë Õòýðþw×úÆG¡ 2sç1Ù¿¹A}ÔøŠƒâ5 ©;\eGgÞ}Åa¶íÄ»#sdvˆé×^™€” PŽâ«r÷sõÌ 1å¦gM’Ð Ör¼RLóů٬KµŽj;Nâeo¦C¾9¬øó)y5é) ©úÂY1Šu§3‘yÝ™-E|ÍQ9HLã•ØZ3ßqäbÚï<Ø>¾wøñ¥nwD³2•/ úźÏz`~ëø Ô*08û®t¯[k0ûþ”±_„ɲ€s,†ƒÚ7 nÈ$ÔäHw§‹LÅÛÙêÝRj8Õ‡¬¢QfªŸZ÷7\€ê‘ lNÈæ÷Á¨óSöì!Ó†í%Y[‚Ea´ÁðdÓZÇK6žl’s!„ÃHÔjŒ{×Ý´h€.+š)êû€ËæCW¤ø=—5¤–Òåm8Ù,Ÿ•&ÊhrÓ;ÌÉ%ô¸q€• ÈF›¥„f¥2RÜ4¸²BQ⦕“<ЍpÉÇ«‡*+‘àÆ•;Š8áÆ•7Êèo,U)^¹¬q˜•5ÊhpãËŠet¸i€M1c„CÐÄ/þ-{£VXÎ;R§Ã*gÑÐhpcÊ ýyÖÂÝLȲR†Aƒ› XÎСÓá&–6tZtÆ^´‘òÆQVÞÐ)qƒÁÊ64:Ü`°r¢F9n˜´Q¯f¥rJÜ8Ðr¦RjÜ8ÐÊRà¿!º2nž]´^-=OÙd JU6¨iuë9îî$¸µâ[×ÙßotY©Cƒc7¾÷{ƒR¶8Yaƒ{ëmNñ!çzN lYeÅb|C‡Xº]H6ÉÛ¹^+;Ù( ²B‚óX#ÏÆŠçGV"˜ÄnnT!ïZvûG¬’ËjÙÍ>¾‹ëf£@Èmíä‹> ¬Ir‹E€¨æÉî8”ä àÊnê‘û¯ ô¸»öþÙ<€rg4QV£p `£@Ênó~h[~/“¯ÚZP¯e·ù‘lõìÎÎAØulr=I_ñF +õì>gùºëîÚî&•Hr)p‘à^¹QPd7{‚bù¡Z+†"»ß7ªæ"ÝíïÃèèïËu-^ýOàPΪص"ûöÆZ®<¿òÅ“ó ½—BÖâ ãÙ=Ÿ²Ï,=¯âŠÈîødTYn>èÕ‘Ýì1§½Š$—«…£Yê@¸QP”9 nÙ-ž¯6LKÌ'M"06SÉRù’‚€“Üb"+¾¯oÔ¬d÷ô‘õÀaÁà`L˜¹QÐd7tôŸuc;òƘ”q£@ÉoéA “džEÐÖ›ù”o”9nS'&»Ã{Žï.¿ÀËÊáhewx/¼ñCûÛ&‚R+&‰Â ª© d÷úaNÆîånœlZŠz+»çÖpÙ•IV AÎ8?}Ï~äg ŠÜêeÊÞNüÅo,ÿŠÚ(Pö3 ðº£ÈÔ˜Ažk³êmÕ[ÙíÞŽáйQ`äíÜd»îƒe'»ö’-]«†f?»Ùƒ 6ñ“ÝäÞµ6)Qm}?·ÕËWao¹è]àŽ)€}£ Ênû ·Ü`±£Í’ô÷³Û=€¹þ¼pã Énû˜­p@µövQÛ¸d`õý¬¢Ç‹iÅɦm0û¹œEÁ¿&!0€x2ÞÝ,oÕýœ’ïFÉÈïâIÊfAìOÉGä­{îäz.ƒa5´þƒÏç,T40øì†¹÷‘«9صúAs“–s.9¡H þN°›D–ýmØm.=!Æ4xKaÕœ)—‘ЃMÏ lwÃüÓr™yÂØMƒ"çeÚß6 †ì¶ŒÆ›BNKÉcÓ`È;Öyƒ ƒ!—ðÆÚ´ÕËþ‡*ߦ9iæ2ýE“`Óæ!»U÷zgöŠJ¯’\\ìïÂfg£:,µY1®¹~.¬ e¦Ê¥í 'ÉM …<w½pwsiûad»¼F…ûÀ«:nÔ$ewv«5ñqÕ€îg¢Õ&Éï¹$}˜…jwt¹aêT./_ò#ŠoCÉ…W U.þ=Õ4_wwìF^¸Y€e¥‚,`2S‰yÔ¼QŒ"—£/Àšyì³›˜M&——/…m÷›v‘ÏÆÇ+ŠÚI®}k^<Ú(ñ!—ˆÏaäîÞyQ2±|:ÊÛ,€r¡v!ŠEsÀpÀ˜ó'—‰³…]î&.èl”žKÀ—VƒuÈ6šF.åžMÚȲQà””SÜ<Ç‘F.­÷pßÅ’Àx&³a;Q#—POpmTõvãoî&¥Ïjäé‰LM‚Þ6i÷iäòçGƲ¯¶·QPäSêÐæ)´ÖÝpŒ}-¢*/ ºâü:2 ð^Ü,²K¥U¿ÞŽ7k·IE½ì­k{±Û—0]M¢õØz~ªüÄ´.§C ¿>æ×e Ö÷TŸ¢uÞígׂEV¾ iŸÀ®¯XŽÙyûOøPè9ì=³ø#XŸÞ¼p""9p mÏ Ö¬ d$ƒ+bÔsŽüº×‡РMãÊJnsPï?O;w +ÿ´“kú»V´NåšuôeïrMÐÊÑoÞ=¦ÃøväZ¹³•¨—?1éVÚ™š\d†)é=‚ˆ4¢n¤N+tWÍÀ¹Ê,ÏîÄwÓÛ“3¶z þža÷Òa®iœk1‚;™`>ð%üÇË£–RóÜIýÈ]£ñi2ÐobÁ(Ê÷÷—]º½ôäÞð¼áé=tÃ0:5Ú:jY‹eðäf”yêØw­ {ý[¦¹}F‰q/ý¦Z@iÓ'/pN.ø:L¬${û©¦Ï¾5ŒåEߊ¿ežíº FG–ŸinO’и¥ €Áþá[™èn³ÜåÞ>¾µ‚¡{ŒÉ2wx@ÐW<¹Q»F…ûÌÝ´^|É ëa‹6Õ²äÏÙžM¡Œ¸²"7H®ï_WlVöø³vœ¬bý4 Ö2ñö¸ÐFòÜ"Ýó+à"L¼ç:ÂûÆI¦ Ãæ³k"²‚“®u‚AX¾8ÒÆãÈu0‹©åÇŸ»Ém˜‰XÿR\§-eHܶž¸Ý“ÜÂâÅÂ{6(rÜ|rW¤+”àVv?…«ÑäŽ~ÿrÝXqr:)2µi®–H_‚qðÑó`%“qæ)ì–ö4pJïÑ»Óî&“8s›ßê°¤¾âìm|Å9í«)X?_6qO‡ãÞL†‘$ó" }šÄ¿[^‚øË ¢ó;ÆYÎŽ» ÀD]YØòïeزWq6‹Éñt§Ï)üÈ^¾FÐÐ’Öö¬ØBÎBò'ï¦ë݇DèÍŸ¬Ø=¹è»£±/·Gqç3ùp¨;)‡÷¯"/Ä 6˾S.Û»8¿Zvx?¯e`ëe4´ï/K± l< m³d8;Q×YmÛ” ç°ŽÜH±æ´‰Xóó1ħ=Ò—9Á*ñ”½¹ø6tìF &$$0~uÕï^ï¬.4èGÿ¬Wƒ°š„nÈ¥ í„ ñ†Xû:»Å~Š›•ƒ—úò1Ï6¤^Ì\÷&cÜë\'m'£Ôï®õ7MÁ“‡M¶ Œó›‹c†ßAë’°\ÁøÃÀòa‰*ekÛN¼;«Ô1«QÉ413àÉÅ•¦u/ Ï5¸‚–N¶t½h²ý0v3mŽõË3à´UXùR’OºÁc¦‰?æ3AÃÂøIµ¥y£?‚]¢/É­VõÂwõËôá¶aå÷».þÌÜWâSáÛê.wf¾Sùýoïã~Õ²í„'ØCäÿXö_þšÍ*üÛªV÷›Ú¿ôרo4ÿ£Vo¶šÕVkÿ?ªµfý þ¬ºô‘üMb•û”NÇú«ô¹§îoèßOì8?FÞð6a[ÇÛ¬^­±¯î½ï&Éî(ŒVä°  ǨüÀ㣱<î°³½«=²íöo½˜Q~µF ~b¡E‡ƒä·_Øc8a¶°«/J“³—0+p>†¼?Bqý›&ºnrë2ØMG1 tñåâš}qµï«É *;ól7ˆ]x[„²:³`x&ØÍ#½úÇÒcaŸÑדkñÈÓ: ö7ìàøü°ß½çûìÆe Ô&þôϲß;ý¯—×}Ö¾ø“ýÞîvÛý?gA‹‚»€:Þ“B*%0Ð×@¦ ÐÁùi÷ø+¼ÑþÔ9ëôÿdaÄ>wú§½û|ÙemvÕîö;Ç×gí.»ºî^]öN÷¬f‚_Ž6€¾Fa„Ì+±^³˜LQð“Ò3‘Ë–m œz¹½?‚ôù0ÃáOíбŒÊcj|ýC…‰] =1ŠDܘá¾iî”1BgéFÓ¨â»&“gÛ;¸±ð[½É åYÅU¼ ‡ãÐCÛ:¹˜‚cô8f2ØìI…ü×R”´ ¿7f²ÂK ÞîÊz þq{aâ.P®TMC¾•!ª˜S`3/¢a¢AƒeT¤hL ‚íë?€‹·» ‰°d–·g¼òôî½øßÞÇV5ˆ±ÈÄ]ô÷”üW«Õ[ÕŒüרß“ÿ^ä÷¿O§HˆÇ—WI~í³Og—Ç¿JñèꙂ]V¬ƒþæìžæ AâM }‹Ž`‡g Ì)éérô6¿¤7EÎC@J%=!èÍ)æ±eÈyš”Gs—"p9¯HÊ;bdiÄ©»ò-ÛÝa½ ¾ßhTs"^ E¼!á!<ØÿÇãuX|³„a«”I;¬ ð}µþE6xcB‰uâmˇ޸‘óݶCÇåHÑ(…D\ºà 2øBª¦öP/ÆßÖ»‹0Øýru"§ã¾ÛÆ…ŸÔV ~:´ÃÑ ì¨”ö úó¬0…û8­˜ûp÷22TӞ˟çä‹B¸Y0èñíÓ¿ ý€'ÚÈáq…56ÈÂÀÈKñAt…“!Êý!€xïÂ2‘¶©0¯0&:^¢3±ºÈ¶ˆ;ýãøôªß¹¼€îH%ò™8œD¶ø2>â26¿kAÁ@Òí¨/¼ÛÞƒ5-#MJš,à<äÙx(‡'ÂÄãƒE ØQHW^`CO“ÀæDáHp«‚/«ån[“XÎÏF†WX:Qxã*r¹y½•¯µ=v‰ø7ÉaYß #~DÅH[–d§´€Ó7(ë?Ÿß÷1;\‡&É¢ 3 àŒQ¶$Þ‚ ‚YÈeA3„ÔBkÄ/©Ñ.\ÿÈ”:bú*';j2&,EQô¤tu´Ùˆ…('~‡·š0ΗIØ€ x ÒŒþ2òtì¹.ïP1ÄcKZ¤!£ú2\á÷°ÁM\R€j òì%mÐHʱ%N‚ã‚Vš} z#]‘”S¤=´2¢ÙÓ£ ‡±ù.ŒÆ‰‘âë&î8÷¬ØyjÀ~vîáB ÿ¥XÞuÌ'‚øí÷ŠÞo™ôŠû"Ò>3™#›O/NòÒiÞÊPIG1{G‚(ìÀî蓚vFóžÙOR;dú »=îôVžLH²ËRHærîÔÜÒø O—9èúþjÝ2Ý_jn¹Vu5ƒÞ7Ý™âÚñ¬¸†• úÀô§)¾¤ÏÀôjÆ|hŽùäb¹áÛ+óQ†¢—]ëh£Ör¢sLó „ xÍ89êmÀ°ëY|+ß |f{ì‘ÑpÆÙ!¿ºþøº{öj»ä¬./™=²C¯?ã ^ûdHý ¤˜CžR|H/`[N/:ÂçTn#é…ðT|/½â$¢=™ñ•Ñç#mH½nu¦Tì)S†ä×¶“}¯xþ ìúAãhUæß§Îÿ÷÷[µ¬ý·Þ¬½Ù_âï'fÎ?,¼îçc†×;ìØòA­ô(_n¸;ÆCUÔLÏNÚW•Ÿ–ü—?k]Å'rûM…ÁžP猻ÖB¤Í¡yp78ç¶-PrÝí¤GB€,ÄÕÏ ZѼ€v/²Årg â §ÿyÝ&Û/&ßïPr§Ý¢@ :CúÔëwÍ{½É Oo«§fØ»à1~D‰'Ïí±r`c'1é8°íWFð‘ŽàÏŸp//Ư°x!\±B1žJÜLâGæX‰õ†çr<V Bn_•2óÛv8MræOMyýo”=ÆkÖÑv¢r¤#gÅ3¤„¹ÿš¸q‚$'â\à ÍSÐ\×Ñ|‰6iΦãLSÐ/?ïVXk߆ L ›¶ ïø ñSßÈ!žØ÷¬xç¨FþñƒÏ‚ïfžÐ‰Ï‚p`ço¤=;ª[E<…óï¹°qf®óòñY+IÞ[{‡:ÚOM'>…l¥—œf<°à—òÁª(¿|)µ£µD˜ü§¾Uh-œ‡ñ¾¾ôÇù‚Ë8q­+Òÿ„þß8¨Õ_Iÿo´êÒÿ«W-ÒÿëûoúÿKü ý_Í?èÿ¨Óë>ÑWwÎÅÏÆô-˧îWÓ÷{µÃ½*šg 5S iÌÞDžæÿI¬=–hÒY‰‡÷Jøz)SG¯áÆTêE+¦òÅ7PÕƒ"oWá-E(ºðLù~HO–?É[Ü¥Z#Wø)§¢(Þ^‡IX3¬7Z9¬S¢yÙ1V&^æ6³œ‘ 7HwÜ=K¹|½,:ÿ]«üÏÚví¨ÅÉ$²ü]ß †kèŠúB“(Kæi;ï0ù:MÝßzö-£ª]dXyþÈckH^gÜ!uH®[H÷[@øÂ¶ýJ‹`3f­±Ÿ›5_›S4¾}Œ1 6“·ßÃ4´æƒ Â¥êpð)$‰Gªuƒ.rox-Ãk3‡WtïÎ ·­Œ(äû]‚_Åéß$LJy[ßÜ]༔ÚO}Ž¸È‘©ý#LÃj#ÏÆcU¨µHì\]¡/-%ƒ‘.¯[ ÞQ†Žh)ðÅ/ŒñuCq=O¸hyûî®Út"ÞýgxSŒìÀ¡£&n]2Nû@¿æBDsÆÛ`à2Ù;z3ø‚97aˆ™¬fÇðÁÚSs5¿¿Ù·V»É,2žxôD»Yˆ{¿±ܤVŠÿÚ:Š'bM?TaÝÊ Œ¬´tÖÃãdY6|–¿Bab2Åz.÷ë†ì¼Ê®È1¯ãàÿ"£Ól:ú³º$f¾ž8Ï[› çÒ\™ÉdƱw㫈)Å›ùk±`9sãò¸¬qäâD ÿá{-ÃÉšn¬õnúa4Û>jl :GŒ1!¥äwJÜ {äÏ!›Çòý-ÞïÖÞúcC~÷{l=/‡ÛáhñTaE¸?Noò†˜îv¿º9ðßõVk©:ÏÖÇ(s`)³Ý‘ÀË&g ÂVüaÁdtƒòá€Ñó$ôñçMÞšæ”Í©HǦ¡It^vON»@•²ý­¿€ï957€°ó6§,â9y ý‚e¼MÂs&!o ½ó®*HÆÞªe¡ÌðòÆgÊ0·YqÞŒŸD4’äqzyðâZ#AôIˆù0Ø|2FžÏ0èEÏû«ô¶ù%‹]DL˜¡‘ëx“ÑK,ƒu›¼±UJò)·ñÒÍ•Þ`[¨ú£ð~Šü<#uèèÏGa²ðÙ`ñ±À.äg•¡ ÖA'z ~æ]Ë£õŸá ¢–’ŸO7$Æ ï1ùó1ëäÌûá(¡×Šôÿ±W.ªŒ¬o4 >ÏE?e£ã#¡IsÏÅ…Gœ¦©ôýCNFž¡ã2ø¶‚8<9%(½àqÇ7ÏoqilÕvªõ&ã¯3ËÆ"Éþã6Ú»PÏ ž„“‘?ï£S‹_¿MM-/^Ú“Ëâìrå4kHà¾r‰Jó&žf·“žç‰w(ÑlÃ\ßÅ€ ®éRV«·Cîâ©(P®¸c,Úvâ¬OÑô‰o2þfn:øí·é˜6y˃ãúÞ=býr@µU&FùÓ^ŠSû§Eý#å S èi”ze(ÌÛõ£ˆKy³¨³ö7 °]rå}úØJ<Îèñ7,b9oAævÞÝ/~ú´SØ…åó?¤a¦š—0A㼊ô¤stœuÿ¢„‰"!£ŒNç1`2“·¦^ ‡ìï÷œÕÈLûc0"ãG(…*ßþüÇ”_,ØLÑÅþ^½•#‹6"dÔ”,ä¸#×·xž`åc¹—‰9Ãk÷+2äLŸoö^»,uמòÓ Uºc­v©üAõ6ÃQ¿ñ„˜öhÞ¿©àfÑ‹³øŠh—º9èŸ3Oß;'ÄÚýìAÛ”[ü+ÚEçCúæ5´»#»>¦-8w'c˜Ôî™Ôt€ŠÍ?ÚÅ– ¦Ây7¯þéïf”†P¬OþTñLï¡D¨Ð)Ýóá™+ÞŠžæ89©I°Ç<Ã)b4.…­ýîõqÿºÛ>˱IäiH2b¯Ö19k½@L{ò@¯=AùHàÍÄONì\]é–Hh Ü„‡i¢P-‹k²-xp{Ú¶UŠ b·íy6•õCcNo?»êj2àUwVLñ,|ƿà2_óÆÿî}Üoyc+ž8á+Ń _;PñßMQÿ£zðÿýEE­:Wm ý“K‘€›mŠÃ3ù㻺¬É“f…û™©¿¢ä—‡{{øÆ¥¹J§½qHoŸA +N0*áäB¸~ÈŠ’4\ÌVE‡ç³Ø0 'c|J¿§*c"TƲñÊ„´#E¾6~çxäÈ„T ÞEŸ“¥Y}W~’۶Ѥ!+Ê™D”Aâ2XX÷„ÊP±Y¥ æn‘8obd>q¼œÒ„b,έwã%+AiÝ@é‰<.ŠQÇ ¼õD¨?Ãè1o«(Ã’ÌÊkœcñ²ÄQ[•0a”VÕ #ƒDχRëBzX’½ñ”ÑaÞl¿O¡‡Šâ¡AF :™šBÃ<žvvÒ’)1k@ø¹Ž±Ø©ˆàCYEXex|‡–ø–;a‡÷1 ïò±¡¹goP‰[·ðšc-Êdİ÷÷ ƒË±J~ɻŽA˜r,üL§ hˤEó¦U~ ,^VEòuŒÂ+^p÷W^¦†¯3Qêè¹@0ûbÛ7ÑÐí¸„#Y(v$,Ý”)a),þyfÅ‚ qN‡Ä8ä––ˆ¡x¥@{XÇ’ ¿÷?æ>¸ö$­òƒfJ‰¶½Ù1rP€‘Ó‡d) ʘ1_ë½ÐÜ28)fN®’µžçhA×>›û‡ØGÔÏ·P†NNíå¹h0Ò©ŸÊ0ñ™âôÿNÁŸÌ±µ@÷»+\r/¼ÐJáÃjgdô炸y$›ÿ˜Y—³#£V-^iBæ]j|ˆßÛZˆ_ÉŠ«Õʈr¾5Gc\lÑQÑû‘›côR¦ÆwÏáU9ØÎõDîó¹¤9uÝ$4¼$‡2£XÅ S¥”ãYg«nΖFºøµpÚG>SÈ­“ÐÍuàýkâvN8#¼PÜæu9€{ŽVÂcv 9·“[çó@nä@þK¡VŒ!ý´ñYNÛtórpA‡õ¸ØÖqUìk›ã^üóŽÂ¿<ß·^ËþÛjÔrù?á÷›ý÷%þ0ÿ§>ÿ•Ÿ˜¸lß\ÑÑmÛßRJNv¹Âä Kåëa»»ÿõ_Èž¦Ãÿ«~ý÷³Zýçzm‡µØ‰kóº‘[W½þ6vH…MÙÀzÀå=°ìØy>–)rÇ·aà^p/o¸ÚˆO¼8Àž4V³­ÿûf2ü¿úÁáAýþg†4[; $>1’C9Y!šjkÚaX^S"Ô»†´‚ Å*­ƒpÎñÄ@.$‰õS~íXÐ6&wdÊ_ £ßÛÛ£oœ¸c5Œd„K B&O52ì{°44qˆÊ/apO0ácÉ¿†¾C½wÃ+Ö•™2;i¦L¶…ÙŠ·ÙaòäêÁÑïPwmÖ›ß$ñšú@€èã[GûÛŒöß^Ü%Ô[½µÿßlK›ŽHF…y/{‹Sú–:¡äÀ‰n±‹Zý I]Ä^PÔIÞºŒ†œõëG‡ð"¼[»ïRWã`ÿ~=°ÝtÎ>±”ÎùK¬›ï,£mðÔ¯_,íc`zá¨öbŽh‡Sýˆ‹ÖÑ+8 f±V/ÆZý kÓ°Ö(ÆZã kÓ°Ö,ÆZó kåXkäùÚ×päö’Èu“7ÄMC\޵¥ˆ{coS1—coˆ¹3ôÞô’Ç ‘nQ4Án |9FÄ)ÃJ6°V`W!(þqè¸/DõÍê&ý~îŽÃ fðûhþ ¼ëÈ_NPÌF²¹Ã,J~£ooÄ ˜;*ÂÜLLõTLÛRH¹ðìoTœ‚}xMjÚ6ÙÉÚÃWÏÃ×sí0pNG–çk ÆÔêòÖíb‚XI °×[û›H9H#“3¼X•ÂuÐTëRÚb»a‘‹Ž .¹À‚Øhó-5 ïq‹&žV…9¤ªI‡¾äwÐÖ 3ˆ=Š©ˆS}\; Þ£ü&è‰rƒ5”ð|k¯<"ƒ ¼êlXûŸ ~°-äÁΜ"¾GÛ±o‘lW¥vý÷lèݹxìr¼ ¿1~3þÞ Qþ½œ0a»Ì6Ô³ lC3mÈȯæ ]›3ï¤ê‚ÙNúQ¶ e¥¢¶ºÙ¢AÚ ·Å´EÛHÒF¦š„f6ò/v¿0h4œ`,5¤Ië`áÅå`àÙî§ðŽq>éà?– %¹iL¼Äw3¡./ÌB_ûmSÿðü×u&âe5'ÀÓÏ«û–ŒÿiV[ÿÓ¬µjoç¿/ñ‡ç¿æüÃæÙs]v›$ãŸ?~yŽã»÷Väîy⯾/|T/}üNOÏZGõÆ^ÿOí–µØ'Gú^¦ˆX¦qî:‚³ãü…Q /õ<^p “6!QBkùî1ÒÈcä2ž\¤è€KÐuÿâ¾wüÆ,¡GÓ´•MCR³I×—”!ÊòÑÉ!™_›‹ VAW‘‡î…lGÜcÏc?ß Yíb-°½±å›,I5³9Ówƒ«ƒ<®Nã1˜?ÅT¦ñ»ç߇¥+¯€CÉuWÀ©~6u”GVÏÇ®SÈ¥ø­g2©5CÒV…£éCÖ¢Â}î äJô½ÏŠUz ™N¢zc~ãÈÜM¤~C[øì}éZÐßÈM}Æ °÷ÚêÖÚýí}lUƒxWÕe[… `ºþßlì×ÿïfkÿMÿ‘?4‡:Åz|yõ',Õ¯}öéìòøWé}…ÃÈ¡‰z¹Xrnàêý…=†Š»‹\L© –AH·ç#Õ%v0è xDÞO¼vˆTÜÈ—‹kö…RÇùìjrã{6;ól7ˆ] Äc &iÉ>ã÷{âûÐßçz&Vó‹òE¯Ë¾EG{ð\(éˆiµ¿öÅ aȲ&ç$vŸÛÛïô¿^^÷¡·öÅŸì÷v·Û¾èÿù 9FcÅe÷Îå=y£±i)`˜‘ðÜ¿ç§Ýã¯ð|ûS‡eAŸ;ý‹Ó^¹ì²6»jwûãë³v—]]w¯.{§{d‰ySª´ã&–çÇâ?avDPü­u‡±ô¶ëÝaBUL™þ8Ëøa0$èhîRþ‚±A˜ì°ûÈKT€°1=Úäì°N`ïí°Ö뻀œº+ß²ÝÖ›àûFu‡} ãŸ=o³j½V«íÖÕvÝk x:i¦Í‘ËSl°ÝLNí]€ï«•ð/ÒéBLˆ úŸ*|o¨È Ü÷‚o"’ÔqÕa‹¤òy§;”¼2*Sñõ{ÐY þ¶Þ]„Áî—«3†¶õw¢~N¨Ñ}‹Å†£/à9‡ñ4HÕÕvv0p“Žª}oäÉÅžÌcoE z|»ÇôïB°­Âã‰6rx\a&˜þ4ð2ÂR|DádˆX’éÞ…eÂOÆ^gró*óPúS±º(²Ï6vúÇñéU¿syÝ Jä3"z¾ŒÏÇl‹³Ûãq"évÔÞmïÑyŒbt élŒ—…J<<œIð¨^ƒˆfYvÒ•øx°7˜6G(fܪàËj¹ÛÖ$–s¹1jãr £o"ÄT’ËÍ£è­|­í±KÄ¿I¾fõÄ¢¶Á†Žá%‚Œã”xöÚ‚±îñóù½q³ƒÀuh’¬G•(9PõÃÁ,ä² ¢ìÄ7(Ïê^Ñû-“^q_DÚÁGb$!Láã`óéÅI^º ƒx-¤^DYMÄ·/Ü$¶-Ø‘UmfÖ£œ@¬‰‰Á29¥•qϵ'‘Ë_º £d7ô¡ŒåoʰSõYÉÔ´t Ðõ‡y£ºKÁ@ü¨‹BeA‹¬Ê450÷˜ÿ1„k=à¦Lc¹ ÙÖÚc¸ydøz ¢‹ûÄ)Ðh×{è­ª9ôOîÐ .#{½]+Âw×E¯×Yï¡×‹†Ž±‰eXXç¡72¤âι {Æzcü@.NÛ‹º|äÝ0\ꪬ/{̇Õt̘8c­|XK»ö¬ú°ž ø$XoBh¤Ã¹ M¥ ¯ƒ¨×s­È¾]oT7±÷b½‡Û2)ãô TC-?^â¸$†Ë÷~:î3+NøŽê‘ÖãlÎ>xx˜6—ô¬5ç|Ú>Ø{ lÒóï¬õ^”GÚFÈEÓîñZ¸vt`JIk¿}׫‘t5ŒoãÎÈ£ä€ÿ™TܵfØõjÓ¸rq[g2i*+B#û¸Å$/“ÏTò`­G^¯e–äñgζ×{w¬×2fö8! ç=Zæâ\¹dÈ…bLTj¢µ|½™±Š$¸»‹wÑ`¾Ö£oV3„ƒRa×õÝ;+H8í,“?,}üRŸ (‘ãI8²¼ ³äŪ»âpÑgD¶Q(]³æ’j ŒAµÚ¤©¨Õª¦ˆ±Q„Ôª›û&]åv“(©UÏhs›·?·êæ·iBR«nnm=Êvre%K>Á]5æî†éAbáG¼Yp˜ûÚÇh§Þ,   6 s›kÛv·¸ £(S =·zIYKµ´®ˆF5/¼~ 'ÑFqÚ†¹_²œ«ûã%ûVDvǦdS”iv“VE£‘Ãq¯6mM4³ö%Œ`Ý@9¶Ñ*’cO"ïnÃh*cyU'ŠÅ¥2è×;[ëSÅVæ}í}h[™SÐ ðöÜ—RD<¹Átyíã΋{²@Õù¼Y”Ÿ'žôƒj ïÙ¿ºiÝKËVöÍ}d»ÌžD( €$ÂuR`ÜÀŽÇ|== R³Z¶P‹{ÜÎ¥^(„ª¾—ê›O MOXó´l¡dL™öR»gÔÊ›J;¹qf,FdÒ²ÇùžB¡°+*ÇÓMðòêèÙ&ò>ÈgÐý[\šRÔˆ+PÚ±Te€ŸVìÂÏ磥xMíâYØ0½Ì”¥¾ÌŠêõcL’™H© ±éªka‰Q×dÖPW©‘@5©"Õ’ž·¤ŸÄÓ u¥+éªQi¼¹–Á ýXª‹©¶T±IG 6¤Ý)ÝA5¥’¸jR~ó=ÁˆR,)éWk㢤> ²Qëˆ×$5­™,ÁÙ)!9HàalM¿šñƒP7 ¯Þ*ÆSHÐÅÂÃŒ$}dÊ?/LÓ:>3òWÚfžë§íÚ)yÚˆû‹2™¼wù¨SÁAÌý üpØ<ƒ7œÍæC<€-mã’¸vmÄ%dÛeÈa¶]†~jßÎëi‹–­9ïÀœ6&ØXbgž±rcM›².¢é¼à h7h×£” Ï  AÁFÐ…æqü—˜tËuÁJ©|ró!>¼1?#ÞËA&„BJ›z-fañiŸy¶^4ÿæöÆ–MêUò?×êÕF-Íÿ¼_ÇüOÕýÆ[þ§—øûé'Uߥ*LøäÀ8=L"‹g¨À\l ‹d£”Ï.;'?ÿ­°ª·x2)~þ[ñƒ{5P#«¥µÅ%s*y¼®Ÿ¡ï>\Ÿ¹ï&<“rzÞÔrÀEÖGM€xØgÏõ¾–ê¶*ÇÚæœËYC¬<œ°û[7`~~Ü2¢o§:}2M§?Ä‹´¸zv̺3§–Ȳ33Y!¶d$ý-/è>lõ¶Ü–ï´ï€{¼@p$ÐÄ0a²^ÓgžÑëeïÍohcÇ\JCQ«e6~ÝŒbÂÐ…!FN,ÁYÞh2b¯Þ05>Àël'“(˜šfýkKˆWòPÄfÓ%p.ºFR» ïe6©ë¥3fðî8rw•ä¸óLZ+SÇҚ^3™x%X¨ÃÆ(¡ma¥+kâ'X‹Ã{ º(LågdJQp‰QYn\N6êk¸ž÷žY\‹¼ý<}, /¦ÂI<áà… Yõn1ƒXdoÁdâ½e‚y‚™ÄJaÕm9hÇ"0ñA„v+o,äÃÈúXŠ€ž| !>·pÃAîæÃäVc9qƒùüÂêûú’²£²Ü7—Îx®îÑ–Ôvî.ÂsíòmÆíXb—Ã|h˜T+¾'©‹Æ'ž 7À·D>0vã0]ãmTc|><<ò7æâKÊ5/?Æeâ@Ê1†<HˆÜMÈÀfÔØzžÀ’‘XŒOÍvb&J”=ÇHç–?jYäYcã{Ü"¦ŠPÜü·øDËÖrZ= ¬ÐeÞ€õ»×§b‰âF l§UÕ&£ì~¸«ÞZ”§u,Ì®ñŒ‡O‡hvЙ„´;ðËù†°7Ë<˜„ÎfŸ”FzÀ„"‰õÕN‘POš™h¹hCW/ÁÚ{(ÚXìü>0‡à‘ñŠ|"­$¹Y±©¢!ǰu'·øä §1aRÊ;ËŸ¸NÁy`d5MÐG%gƒ²GzvfqâEøL)1^vON»€í<•>Q1vÞÓrÔSïê³Òlj0÷Nϼ`òÀOKÆbÁIàÅ%1¾Ð%g§„Ƈ¹pP†€ËÈIÞÚwÀwh1ê@RH܇„…ø¬÷šdðFž§× >Uí¿L<¾@`Á~}ƒ¯ÊÄ +ö?}·™Í {ˆvXm{‘_ø †Õ"ÓÂûbµü}‘Bû¾DC|ŸS´Þçt”÷Årüû2‘÷}±dø¾H´z_,l¼/ÙÚõoj»¢ødvWãÀ²k¢Å_t‹–Å63Ù7RƒÁ#WE<‘±»QŽ JÌSÆþ¯G.Ê¡ù1ñÜ€ì ÇØl¶ yçk&ßþ^äíÿI«}E¶ü{¢þc½Y­eê?6µê›ýÿ%þ°þc:ÿÀ¹Ž©B±tíB&B·Aò¹Ê­7Ž©øBÄ‚8þ‡ïXcYòalèn;ößípôÿÅ“›ÿOÛÿNáf±½õ·¨ïQè¸þßß·lÛãí-þoþ;*ͽmϰ¯¿V‰É–_øÿªª@aâ1¡Õ“$4ð*SöRã§•:““þQÎö £2˜–æä4ÓsèµéþÖ’SÌEVPH±-óeû:˜,¨X¥#²ž¥D™`%-ßå&±`Ò É¼`@ÙÃd-¯i%ÃSúÖJ‡Ë9Ë”ŠzIļ6~忽ÕV4°›­zseÀôý¿Þlì×2õŸõúþÛþÿ?Á†cýôûøÇÌ ë=ë|–Åö{.«îíÕMЮŽêÕj£ úþ·×&à‰¡ebaQ}­ ¡ûù˜áJüðbûi9rsÈ‘µRj°À;¡"]xwvÀ3•/×pU»âfâùhoÎ1\‘Ãsh¡Öúw½µÿ?k‹a„Øbìðû0ˆ<7püG›;w”þtûEðTŒ"j=9½êž·û§'¥#|e<*ÿ`'´'èö!aôý/,uÀ'ïÈ"aÑÛòBÓµ‰³ÐV‚õŽ›Ö~7W¢2ä κâECK¼ô±dïÁ¤s‰˜ßx1µ7ÔHG6ØÅìnäÖ]8Á:ž'¼áeöð)HÊoä¹¾2ëU…Eôλº -èwßõÝ1^_{e‰y7ƒÓÌÝç ¶U§%c}m­C:uZCPÍý¶ãDn—,]Œ¬(¥L¼ù$6këºzSôÄÉ3K± â ¡£Ú<\[|¤*„GŠöa½Ž7áC)k_‰acŸækËÒ«rdÖÐý”3™²I'¼¡L›âǦlh¥£}mÌJÅ.Œ†– y¶ü^b%“²]í‡âáuEycZkâßM¡»²Á¾6Z% ƒêƒ÷¦rÄH[B†#1[oHQ9rc׎ÜÄŠ€-OJ‘¶âÍ©p#Ï@ÐS‚|¾[œä|{_Âù¢ïMfh¥Þ2«à÷ú¸ÝmŸIŸŠ‰ç(G 3GšwƒRqÈt¨5f<-VŽß™AÏÚx§Ãž?CP¨°³àÞ`Ì0⋦ÅPkáç4™×¤•^{N§×ÕGT÷\ ¥™Žp;(¡5ƒÂ¢ IJ ¡ÆëŠÍ\,ÿ§Pž­1Ñ2MðèKÿ–¥kÐÜÍ,âøØJÜ!†ðrì<ׯ-Ê2Ät'‘ë¤m€hØv‰¾:ƒþêŠþÆ‘;p#xúÄõ½;7zTipé\°Ñ¦!qún4‚ÞütɈ§5ÁU¼ñ‚Q 4¯wr!ï²eÇÞ(¯h!À˜ð0‘ãVs9x¶û)|€›c²å‰ `cݰ'_¸}ŒB_ÂÃ_Ùé([ŒŸ[Ö’ÎL…õ%³ƒÌºèPë:•_£¹œó\Ö?e]á“Rƒwú§½rÙemvÕîö;Ç×gí.»ºî^]öN÷XÏuŸÂÔ€paRÆVq, þf'†qù»µî\Lòäžî0Lî6~œe ü0t4w)ÁÔA˜ì°{ôÊÀ›Üôh“³Ã:½·ÃZG¬ïâ>Ý]ù–íî°Þßo4ª;ìÈ"øìy›UëµZm·Ö¨°ë^[ÀÓIs9í uX,» ûÌ}°Ý1oï|_­„‘ Þ˜Óo[>ôyÃÛî{Á7ºm£XDHÑ(…²KÐ~Ü¡ÄéYb:+ÁßÖ»‹0ØýruÆPäz· ?©­ü:th‡£!bhØþ„R]Њ¹wXÌã”%ª)ý˜H Mä cƨsôøvéß…þÆ(o%‰6rx\a0¦‡Bþ82ÂR|Dádx ÿ†˜• £¾eJ-’TA’œ{R²S«‹² ¢ ®ÃNÿ8>½êw./ »R‰|&¦d£üËø|̶°ù]{<ŽB$ÝŽú»í=XÓò±p]é@³‘œ'ÖŽÉA‘D‘ð,ÌqÌSÓÀvÒ•øÿ0˜6Gè G‚[|Y-wÛB½žÏÈ©˜j—K}‰:$¹Ü<ŠÞÊ×Ú»Dü›ä‹0Œ¬oБÈ"•d§´€Ó7(ë?Ÿß÷1;\‡&ÉbÂ4˜QÖVÂÓã`ƒ`rYÐ !5‡É-epxZ¢] Cÿ§@&_@ú«œì¨É˜°YDqГ"Ð=ÖÐf#v¢HœøÞjaâ¤Ó/e Òäî àà/#OÇ[áòCJÇ‹‹4rT_†+|â¶#¸‰K PvžÌ%mÐHE¾'1Jœ‡ôŸÌKЦww)<Ò Z Ñì‹éQÃØ|ctSNˆ_7qǹ'üß1p}Îö¶Ž·Èñ5`¿;÷p!<‚r7Šá]‹¢Ä7bÅ^Ñû-“^q_DÚÁGb$!Œ¶t°ùôâ$/]í½'R™©”¸ç óYC\K"‘Mƒœ*Œ4_ßcÆr/Eé‹>0Omƒê‚èÎâôˆòn‚‰Ž7 ic3„‰¤7Œ†æ©5ŒÆ¡9h¥°Ô÷WK½eÀ"M?\oXp¨bÇ”vˆ´ ‹%sÕ=U±ja¢«kç4±Âv^µáÜŽ++\KõÕð¸Ãš9kŽóÕµœ³ªÅT7aá >E¡åØ`7¨FÕªK¶ôs Á— ºExâáÅdtÿs&–Á%²7×”’-Õônûü¼Á•Ž÷ÒAõ0'ö€~°©Bn‚3$T))vÐù}¹ÂaÕäÛ]w›í °îU­£º¹Ž(µ‰LѺà4Mp&Iˆu\Ï©†íÂÓ*†§ï>l¤ Q7¥¹Ý~ 6 É:W¡ïÙ›TÝ\<Ýp‚ÆÁ ÞQåI¯ˆwçb%ÁÕ2ƒí=-mK âûÑ)ÙA¢ƒî`#稾o,¤Ó( £xõzøªÕÖz£H%Úܵnò;Pº.ží´íõ·2’¾sâÅ”9K8y}ȼ¿‰ì¡v`p»ÏatoEÎfsðF=KvX ÖûkåÂÜÊùÃQñzZ¹`·"â;84ºO&¢¢àñ­k[9õ=wÂŒÊSêªØZŒ\Û{èí?D…e'µ4×zìcQ­[sˤâ´üŸ¬µC4‹•yÙM¾¦)w²IW²mH‹¢Í(3¦J`‹ Ãæðdn¦ázO&¡ºv/khåfèy†&M͘MöÄ (miØ+=úœgˆRÃ÷µBôë4>i¾‰o-'¼?³âä˜v£õ¢Á}c”çx0¼NÃ;0‡g=¬×ðáýnEVíy=†xd ±X ^Ü­ª¬±|§c/Z·!ÖŒ!~ö­5›gUÒ`D×kÏY";|æÆ.¼„¡~ìñzCSöátPýÈûË#»gMnÞXÈeWa”¬±íçÆ…Ih‡KÛƒºôs --Ç.Ç´Ž²ŸJùvwl¯å&ñ\p¹C|öüJÿC•Pe–>°eˆö5}”#+^ÏQª=ò•ƒÖkϯÜ4nÂ0¹²"kä&K\ÏåÊê@‡õÙ[ƒ}B:­âvn©ØÞ«OÞ1¦SJb÷¢ZY.—>Àº2XèÕ´‹8ÛÁ¯­ùI#üj¿ Z¤g2`¦J,¾ö€ÌÑúöóà5¥Ö…FH‹3xf”\ÕtNîJ}S¿…®¤®¥b¢¸ .Qà^Jê: |)—F ñ‘f¨3( ï’ùUBùr`hdůÅA0¤MãZHQ ´%Œ¿™“Ñ–€)ï-s¼-CX[|¬ºÐ·ÌqîÛ"ŒM—ùL QÙ%çúãOóñÁ_‘g¥Só,ð²¢ßB3‘ M(3r¹6I¸³ùtŸ Ía^];yN£UìGSÀ—´»TSDêÊ…—†&9¨+žœxyËDs<×u«õwNšY. ÷©ýËC™€PFµn|wipI¡srëbù«@Ù0dÌ…(î™äEþ?{÷«ÞØr‚xeî_Où5´üŸMîÿÕ¬¼ù½ÄßO?±ÎU›}²b—]vN~þ[‘'êaSçþÄÚ*ûÖÏ+~dŒ]ö[\ôXÄœ+%)Âò™z%ß ×á3‹zâWWÂêÍúá^½ºWUª“sÑë÷Ïä²k佂£ã!yr$ÞÈåBxÐ ŠŸˆç¨¬ÉlF¬…³xlµtl¼èŠ:ºÜè¿`áÑ:w”mŒÒM)é7›F:ã5óü¸ß튯ó1ŠLèèáÃÆ!¹íБ~­Úh=1ħk. u0Œþ& T$ÅKÝ Ö ‡‘‡BÔRDÿ&Š— '¡Í€JJÿÉ}¦ƒø—×xøR¶>÷N>™ #KûsX‰Å9®ÚútÔë²ö¼¡ 4V2‰\>øzký&C¦¦ûð«ûhÖxØŠ†Úí¶9îÎÕÝ>º¢p.ˆæðpíg.g¡mŽÿÌ$øÃƒµ{CiƒYþ„Á®ûà‘SéúL-ù!îþfŽ?æ–° Ó©Ö×¹nƒöUVÐ5¥3^ËKÙ•.mÔjë7#rGûöGŽ ±ÓîÝËN\ßêójÚÚA"MW¶%&,ØÂ]^ÓýàpíÆ¯ë¾9úö>ã ;4×Uº µýÌÂö$+ëâD®ëЩMJWû/µÞƒ®™alªjl=Àt…0”=ä’oì·ÖŽÐTmѸ÷õóUFnê}e¸ò?ÃÇÝh c€Ýĉ¬A²ë¹É`7víøv× âÝjk/Ák›ª Ûíu¾˜°u±ILËAkí8qSìöN3«ZÖaà¥z%Ö­î µþÒ¯)ĽÆçeæâ6þ³´<ÅŸJ±!·‘ѯ2’ŠG'«ü|‰}OYÐ<`Qº»n9Ÿÿóäâ56—ÔÀÆÌ»z¡—G@5 Êøpò\y²X cÎc§Í&t3f¡ˆ¼û+ t áMú®Ì K©—Û3@©k üUÛp“æz¼‰C8j …µÃ ö ¾ò<ñ_P(½ËöÈð ºl3~½a¤Ö4в@Ea˜l&d-2~D¦ƒÆ[X 7 ¼ýÌĹƒÈo©ãMìÎò'›7}Yøt—7‚O²7¶C6׌ËàxˆBw”á“^à&#<Ñ´¡ðÕtyƒoË2/p*wdwæ±xbMa-L÷CÀæd‘ÿœ¸éJüÔ¹89b”/l÷_xƒµÏ¨Å#J¡·\xAHêYHú‘ÄƒÔ Z&÷ÖžFžÞc`_õ»1†ÖÛ( ¼¿¸¥"0xHÕÆ/+Z•£Ë!"§¹¨¼Q,¤Ÿyzˆ0b/Š_Ê"±L€[y€ÝH°Ë í΋€¸ Tûq¿kŠ%$áK¹„Œäw(“-àʰÌ)ÑEŽ+Lv'°ê{®Ù ☇@Ü)o»«]H9×­CθC±°(ï€J58™Ånɵ̑K*ÔÒw3§Ž²÷h×í÷Ïø™ÙNâ/u^ˆûêçEOý»=f9V8Öþ˜ÿoäŽwÇþdè«ñîÿ‰^<ÙüÍzµùæÿùoùÿÞòÿ½åÿ{Ëÿ÷–ÿï-ÿß[þ¿)ÿ_­ºÔúíç\@eóæ±^‘@™ËŇq$Ëýƒ.g.S=”&yÏ¥Hý¡Â„^‘âÆ²¿‰h|h–ǽ(Lãðˆ¥Qó2sûö«cá ‡žä ß#S$ôp3µ1¼©1=£UÓ™ ÍêFàâH'ˆñØu¦ãbDÏ|Ÿ¸8ª¦¸èž\LG<0/j­×ÃÂL†Šú^£v4/ƒÐ¯é1aÔ€_ʬ¿E€±Æ|ÔµGG]#‰KØ ½à lð‡æEß|ÔR|ô…dýFäc³á„úÑù2¿Ny¿ës6”½¶©e-ÿö>V[Xÿã¨þZõ?àâ ™«ÿÑx«ÿñ"oö¿7ûß›ýïÍþ÷fÿ{³ÿ½Ùÿ~$ûßZÖÿ1´òÓßÞw@»Hex ‚É‘#ñ÷ÞQ'=†ž8N+'éÑM€]ŽÇ˜ôv‚¬Š–:ê%–¨è‰y ¿­iÊ{k_²öŠ¥™q>CÉÒˆ'z#>s¢eŒ;¡×‚éƒ`bcGd¶Ø›£zI:¾*ݪ©O•ïXc>¸xV£nµ_N™#‘~9ÆÊ¤ˆ)C¹’³ô¦P}èÿaM’çÖ[ÈþuÐý2úüÿÚ›þ÷oúß›þ÷¦ÿ½éoúß›þ÷¦ÿýHú_­¶Tÿ6‘ìœ¤ÈøÖ¯©ÿÇ‘ÔPæå£íÁvè¦WY8b¼ÍlÜè"ÏšÃB3ÙˆsïÃj>{¾pú(ÆÍ€îÏ‹œÖ&"ç(‡œÓØ¥c1ºîÐ}(G’+Ÿƒ5²hâo€§Ą̀©U«9Ôt‚ÙPãß9jj9Ôœ¸ D^±£1Š åc߉ÓU­ZÏa„P¢“éh“ÅÈP<5¯·M}ó¸M­ÚÈÓc.|g ͈'žå´qj±ŠúV4tŸZY =4÷ÂZüÌ쟓_ešº7<ÒN7’8ë¤ñuè­£•¼ÈHTF $Œ&}Ù“Ï÷ü)æ”Æ›r)l„§T~%ÓžØ;]ñDÐ# Ÿyæ hK¡·¦ìb¼jîÝÙ +oŽVßÙÖÿ¸±b÷®þjõ?ªÍ–fÿ߯£ÿW£¶ÿfÿ‰¿e×ÿhðÂHRÏ*üÑÔÞŸ·âGɈÒðý±uxÿš¸HίS{eqf2ž[â²{rÚ9@»q‰ñ㰉͚|Â(I9w2¤F?¶Ž}Æü·©J!i‰Á·oð§MÏf­`ø}yšÃ¥> ˜‹ ›eþ¤(‹bHî·n…Î'-O•Oq0  Ê‚M‹Ý߆hDz OŒ¬`‚ç ô*™ã¦Ò¼h»p&t.EÁù VæáüËQsXšCss&Ae9PY ÒI Ã?Wx…S~npåÎŒåyMZ§ÑqÊKx~©ŽJÖ¢5²å2„•MÅO €ù5ºÁ4øQ=Ëq‘·näî±S~I5^‚ç»khÕjy#eKƒÁà©ôË4¡]X×ubq–AýΊòš–)>3LUË: ÜXíc6ŽÜEÒ)&–µ@ ¼Ÿ>Xvò|¤5Šòú”ì@ú~Á PæêJâz-+’šåÍД2#UÅðr€ÅváMŽî확XÛÌD)Ø—QE® D¦ÜEGQ¼Îô¡è+--‘±LMDÿzÜpîÒ¢‹Ž½•ŽýJ¹½éCO[K*Jnjà:YpdÚÔfªäÒþ-jzL’Fqi™÷Y²Ø0U9C7N\Ç(HÌÕPò¬~y1ÃÇ€ópvOƘË2ú8›¿G}‘Ái„y ÓƒŽE¤%> H=©“‘>¸EÆÓ2Ln-GºÐFvõÏ•^z¡ÄY‰oÃ{Y<ˆ4´=N£¨^ðD†H¬ÜåTr^4SgÙ†›V”·ÚéÈARBô·ûyaø$S.^mŒRôKX ôȇáBgp\ÆÒ°:¥^ùs¸Š\PÜoNß΄#O9Šó"¤‰ä§¸|´Ïr ¶àh¥V…¾Z°:}¼çè=GùÉ¥õ*ÕUIÐ Cæàé.A…Þ„ñÞ†/?¥º¥“øîê”̆}.ƃ/ëêL(rÇèšú#p"aÛ $Á!•n~$’'Ц¾—V iS §~Sõ¬W³ÿ|ýÔ>¦óNž#×`Á¹<¹&î›® à ù…‚¦`¥¸²2У±<ðT˜]˜ «b•^|BU°õª.Ójc¯)eÎ:«ZöZìÂM†YÕU;c@ÐõÈsOóœ™#¡jJuÚŒS'Âÿ×н²Fá$Hέ±>gªÁ¢r„šꉑ ²! ƒ‘ R{,B°mª,®ƒŒ¸4MFS׎Û|Ë级§-´7é%jÛ{N>OIH«rº}öô©ªõíi“wÜ~ù©+Ùô}ÓveyÑ“S×”RëÆ™·s3Hé¸bcùXÑLbóÎf­ŠÏ#ï™@ÌÃ÷1ég«üäz•2®†…®{r£'v‘Ÿy _‘z”B£K§;šwúæŸýY ™˜‰´€ÔÛ0ò’Ç'¨@=·v4ð$$3òÍKš×O¬'æžžY·yŸÁLlð|÷¸³—ë´ý!Róíèé)W§a¹wãÜ”«g˜e>¤0¦Ú;ÊwŽ o>gƧ0Ëø7l‹"áZXÁOiÆß<ò'Ëõk‡Éö\ý;±–QhÖ®z ×q»*]5iKF> Ì-fˆÿèxP&*°÷ž¤•ì$ør”™ Ü{-ä)?(»{v"³ Áȯ°ºHNí鞥©’!ŽåSy³µ „´ƒ§‹à°€_¯ ëÆ–8*ÞGÄ2’+€o- Sg©0ía0Ä僌›2ÇF€Çðçh&Ó‰q”‚t²ŽœEÍààz®=AÊÔÊ2 “='Ïç:SyMÁÆ´&¤­ŽÙÒ¥ Ð,´LM²žä³êX«xÅK–0…kd¸´ «.b·k‚N;PŒÐÝßê3át'M®(Ñ[Üáú±™×¶®¯ÿæuëëÿÖkƒzæü§YÛ‹ÿ~‘¿·ü¯où_ßò¿¾å}Ëÿú–ÿõ-ÿë•ÿµþãÕÿ­)ȼb  “‹¶t‚Î甕¸ü÷=e¥Tz6àà*æý`bADàŒéÍ-<(Š¿ßŒ‚Êó€½~ò›„7EL€Ùu¬;²0¢‹c„³È8ö†%;˜-)åÁ&"è0Em1¿‰O´Âæ—Ü» †f¬@³™89Jqrn =“æ&V8™bAZô=ׯ”·ŒÇÆÌDŠù|ïKMEÖ²)¦c"­4®,,I ƒ¾g¤ÔR¤dR’#N(!ÚÉÅHYŸ4¦s ¥®¯¯‡ž<²¼Ñd$WY(xñwÌsÒ ”[^ãÁðä"³‡Ó ÅÞp"˜ŒKõ¥ù£ß+Á4Sät]”Þ`¡ü¦¶¡A‘¼)6)滃äÇ ]¾9ÿ6ô9‰l&v3tRmIü‰PÅíê»ßÛ뚘Œ‰h¼0Ñt+ZXØ>¤ Ì/¸÷ÏCÕfî]šì|FÉÅddbŠŠ1ŠJZ"²6“®49šŽyÝB”ÑÉ®û†9sG¦ŠÖEÈ3lÝD†Ä™Uî¾ãuØÐkBN—GEb˜{8I2Lž7‹‡§¢Gäñ’¾°ñã÷­Ct÷ÈÙÇ‘^s·³¸Ál¸ƒVnËÿžY{ë°+º¸©ãä»–-[GYT‡Ap…Ih‡š)[]nþ‹›?SâÖ?ëíà™C¯wöÓÌ~µˆfÎÝä6,YK#º÷3@z~uvºƒøÿ×î}<é|9íõwÏOZqØö¥×k_u63׌ÐtnEç…92"Ý15<³NDÞ÷K½Sw'¦LÀf›2€šÍR7[¥10û¬4‡™í²ˆÞ& †M©fkÞP*K~Ä´å™Í» {/ïj¼-Ó…±h÷ÐÙtJ´rö‚râwD ˜ÙÉI*F’âmfFuY· ÅœIÝÕymî¾®çÏ:)¯ëw³.Xÿ#¶F7ÖêÊ<áÿÕ¬¥ñÿªþG½þVÿûEþ~ú‰ÅâÏ•Qîÿì:ad}ìºÎW+É-Q äâ²ú³v Ž`äó0F§+f:YáÉ?‹N¶ËäyðOÌ‘_ø¯ì1£wù»ŸOz¼nb„gãèîcì[cg÷ÿ•í" ú_ùHÐôƒ^ÃÕªeÁ~…{Q[¶Émb:lç™BFÖ˜\kPn£â%“˜<Âû õHã}ô]t¡ïÀfŠ™h¼ýšø0b¶Ë&ž#Û²ÃØ Ü=~wß*üÛEG¡±o=RŠ -x>»Q3¼<ôœ n6€O^¬½‚oé"ÓAm¿…éƒöà=SÌ‹Kž­Ó³ÆÄK´è Pzš—ôИë{©_¤ih¾Úœã3ÍÒ=Û gÏN>}DÁ3‰B¿ìé¦ùô)9¬!]ª,EeCjµèU È1>tèð£”ý@[º•®í"a\Ž9ñnYö·|霟–&‘P`Z%ˆœ‚p­ðuÒØ«Jf“ù[êxˆe=†[+¾u㟊ԗÂ% „obBgç²#•Yó×¢§KÄÒ[Ù¥æÊRöïFýÌQE†Åãméã½ègÇ{~Ò$H×§Ià‘»Öx¥ƒå¢8[åi’‘™ ϩ¶þëú÷“?hþÿ›^™ä}døHò™çaV2髟K“3Ï^m¿Yfïž?2´ŸgÿÊÚ4²`r³Áj‡#X‹úèSüQô>Þòä—Q „Òýyd%c7ú†>«²wʺj0ù™š‹\›}„†lÔá—d 1?Ê£PCµâyõ²¨zrnŒË¨>2û@÷óÅýºd°<¼hÄs¯-Ü g•4ª!鎯èáoT¦O-ÅÎ:k—õl— à]‘ïó§¿)Á‹·ÇLÝ™"ÇÐxfefª±(ûÑ$6!¨%=!XÒY~êjÙòÝ`˜Ü¾›y܆ÆxîxÚ@]¨#/ÁÖÏ~m9îÀšøÉϬµ½TÑ‹½SŸºåêÔìÐdíõB3!âWTm^~H<§‚ÕSàªìïÿ µË†‘TXJ M"­ƒBôÌPïXùÃãÛ!À/Aþb" =ãÔš‚¢BµøxðV§_Á²1 #õR£ìÐfCº8·Øvz*s^Ð÷8ºv8úïÖ¼ÀE«%?¨Iµñ¥ƒŒkéÙ ×óKU¹ ïijy•Do4r§•7(aÙ û¡ý åmGøÎvänêæDz$*‹­Ù; È&ŸƒnL+ħH'|Ù@b’ q."??;œÍ8/o0ëÄmwiydü43k€60‰ßÀ— )ž¦ÈOŠÃ”Ù!m@šFf¦””`$ÿ*ò.ñÍ—àÔŽÛ܃Ÿð=;˜†ÔFøLZŠ£’´cΙÜ“Èpoñ (Ǩˆ#WWÑ#w R!Ýb¤ G˜pØۥNÏyŸè`Ÿ·‰ ‰»é~,3­zvgÉ“ŽA‰ŠúîC’u®¡,ASƒBÞYtq¯MP„ßÂô^¸ËdS|Ï”à{.HL Ø NâRˆäÌ^Ъ9ܨŒ‘hyǯBEµÜSß8SSH­¥ÜB•Áðë ¿ž>wÖ6bÕ@ ¯‰ë¼BveEI€¾þB&žøþ#û×Äòyb:¹Úyùi7¹ X@Dô¦×מçtlvh á´ÇĨ)ì&Ÿ:—=•§’—e+Oß“äör’ C2¯ªâ£zrn-S5ç%·d™¥W(ܹ”š9曨ýB :ŽæÊ¯¬PÆ­’‹«ªÊJÛ9ÑFYÄ~^øˆofóö#ìø³¨kª14‹Õ¶,àÂüŧë(ÏÌÂØ{¸ âÔ-ôê²×ùe$T ^™ýšÅ2Çüi`Gd™¤×™©Ê'®z€Ž¦ãUÁ±:÷LÞŒïKwxÇd¨˜Ã½Ê+±0¥ë³-Ê8{ë‚ '›ypרf±õó„d.â"-má9'ÏK ƒCÐaOìd‚ K 8÷.ÊP¦3dº¸ lt1à Àh"èúÄl›ä «êHFŒÛrÐÁóXdãqì—“mZ˜ˆÈ$d·®?3Ç…Ñn}cÙßî1Ôb‡ÄÁ¿çã)7ݯïÕyÌ‘ï £-wo¸Ç|Ç­þƒ¿²ÍØîî?Ý(zÄaå"+ H¾¾gé­Q[:oäêHªW$Ñž:ôÚçL½'j`¬Ð{&ÏïE)e^VY4§Q ²%õû—-©¹zKºë Ü•U¶hÞ­Z?Ê«[kK£e£òÃÇò)Zà’¸-ý§ôkò4†ÇýòÔwRï7ªÉ¢×’.t7K_4\6dsê %[RÏ#Ù’õˆ-h7Ðc:ÀSç¶t.øbxGàÆ3;©Ò ut.Ý*§ÿ¨zXPX¦Ñ™ø©|.RÂ3g³`„› S°rFéJcd‘+Vá­7Žq‰?²o^àlÏŽŠZ³@Η:lA={桌BG mä×TzÌ£ œgѧ-;…ßoCßÝ »B¦˜oî ûDˆcOÊczñ˜'€M9xD¸S(dhN!z›ðnQL çРVšvÔ¤!Ë8°19•8ÒPH›¿Ù£Ö±gšs¦ÕÌ}-¯ËZðdS±m+ieñXèÿÀ=wâ‰ïÎ>߆¤~xç* ýò•Žwe¢n*´ìåú$CØHâ°Öc}kQ ?eÛð¿gHÙOÐË'XOÀd¡{åØ ÈON'IK‡9ã( é¹ç9™1æM*åP×BèqÖQÔ ‘Xä\x‚_éЩP¬]žnAã¼s #ï—ó$ÿàÎ:éçuHÇ5[ÏV=éßðÄèL$É—ôA¦î=&SKgäŠ6œU3Ô\CÌ6ÜæBÙž@ä&¦Ñp&"…¶pU€T*ž™=YTÞOý¤Ž;é³x'G¾Ô˜bÄp²P¨;IWß·çØ¬ó"¶¨Ëù6ít$³îÞÚžœ3vmÖÌ@J‰•\Cbõ«ö?„21bòF&‰˜yr),v`¢Ì↣…hëÍ©5WovTµ/ k¤Ù¡f÷Ëݶ3579û“)a™†s,©DõVblØ~Ëñ2ÃßÞÇV5ˆwïÝ›Õ%€™žÿ¥Újî×Òü/­}ÌÿÒhÔßò¿¼Äß[ý¯·ú_oõ¿Þê½Õÿz«ÿõVÿëGªÿU­û ع‡ áTŠQ,ïZœa߈q{Eï·–Z?ìÂMbÛ‚-øw÷F¥šË¤­š5í¡².¢×®Kø¼ÁÓÓãɰRí÷¯Ø‰‹-ùˆñ»f]ñ…›Ò¤”Ð~(²çôTyj$xЋìD¹ˆðþ%sæ=ÛØsªa¦:ã{ļÍgšúå˜ÁY,5Y]>WRÇå°…b+DQ x –gÃÌ}lÚ\§æ6­üɧÁzm}ɘÿsÒ âWÓÿêµVNÿk¾é/ó÷3æŸí‚[Yƒd÷6Ä£õÝh`×A¿ÁmyɯT²Â§æÿk404çC”¢ Ï~õó3•,YkTΤ^™z™5+ \Böò’IÂï¾PÎé ›äјâ®-kP¡œè¸Âƒq3ÝtjýÄ|ÓÚFU4=”ò¹à8bíѵŸG×9Ç„ÂO«@µ_|’NÕ“bSŸF ¯‹×fto…¸ÿq¼‹G«’žØÿa·oe÷ÿ}øçmÿ¿ŸÈ+ò š&k+K™PÐ@S»M’ñÏ?ÞßßïyV`í…ÑðcZö6þ8ŒckìíÆ¼ JÈ—ioI~ƽ¯Õ¨ã‚lQ犷aäýå:bpÚFܾh³/½ÞnûªÃÒ瘓õÚÊ|âgÜ€)[ƒÁ;/z=Ž4癚~5 ¶Ú4Ø„š“2tôÐOP¨û`qêÁý§vCÏ‘†·ÜMÓŒXaO5W-(j\ âc7JÈdÄ_ÝÇWaâª×;«Ë úÑ?ëÕЯ#öãdpîæµõ ßè¹dšîW˜ªëÄ{’7ÌæcßÂEwõíco|‹qØòEº®°´E¹–È'·IÂù§àœœBK¦€ß\â$ýð›H°®H”ï¼ä®e{ÛN¼;¡O/h}¯…+aOám uÒæ’Gò¸û[]ˆ+GŒ«0zЕJò¸“žísh‹»Ë˜x×O¬.zòÉÁMªûù˜5[õÆl@U÷Žöê&ˆ8GõjµQ­îÕà¿fê¨'#劖xe¶)ƒ5Χ ž m<·t„£Õ 5YCÛX!E˜æy¸JÄr'p;!ºU ,;öF°þú®ïŽo*×Z:¯…7`!-tz'êÆC çÕøö1Æ\å'®yž/€xWŒjŒâ5| ĵåWPˆ`°ª…øà"rn¹ŽìLûb‘;„ÙÁó¿ôUÎÒ¿L*x˜L×nÛÉŒ\¿’È%°$WB§l}<½B¸t/3î>¨gqæ5·ÀàðrÛ³0\ènnŠ‘Ûàa½Ž>Û>ºØ©°>ÏQ¦Í‘ŒЩ†¨…e׬1à ›iÝ׫kªsw‚ýxF22øB2~½D…—.KeånØYóÞ9¬[ìHÔdé—eõ@­‰Â@o@ŸFuteŒÕ"òyø‚ñæ‰[7¾ëd0W\0Ng6/9bi㮯èÛ0J…ôÃgæÂ¾ßÉ hSÇÊéƒíO¹sù7ôW;Aæ–à30ºüÎð4®æÄ”Ê;rÇ\“îû2 Û`Tî.ñTEævööé1ð˜%‚ ’&ÃG/#oè8h™ð‡f€ËÁPYî4¨<¶ðݾðÉCÜOA!›‡ T趇L6­Ú÷<rư¨–îÉ…XÔË!9Óö.giŒ.Å*³LB·ü§Kè2¾YÂòžª#Ðàq­ÃðUÈ‹¹ù!Dォà}×ã\¼)ÊTØ1Cþs¡Dn-'¼Oë*ˆ‚B°øÐç>­0j‚>ÿMB>ÿý»Âuœ®;…Rª«Ny7yY¡\‹O`ëù¸ªëDCÌu É$0ËÓ%>A3zS!âFÄ™¯QZhr‹òÆÒÔ: 2ófLuF^#úµ,åÊ!v¨µ,š $U¥{ ³•öfLš“ Â–‚Œ  »;¶§O tQaOŸ÷4}èrà°šž?ô}…}¬<×Ðs¬•÷¡Ø¨å2Õø²úPEé0áÿß½aIÕ7ò¬t Ÿ 9Xz³Â} à½ýA\HªŸ"ðéæÈŠÕM+R”ø+ã24ðbÀð)NXa3›+Òû0l2qöw‘Þ W˜ Û„å<²¡ÂÀìÃc* Þá-·+ÔÒ“àÏjSª)Ši£‚YŽ\Á-3Dó¶û|0~Tíj[f2<‚/Ž{Ó×à'¡ý´¸©Ã;£Ô’…·¥ÁÛ#)t2Ó…AþNúä¤˜Ê 2fŸ'˜#®@I-b4.¨PeW2=ð©“‹Òöò‘«IR½¤R'6¶ì„‡Í<#ss¨\ &ÆU<#uûuæ#ˆ‰EpÆ8WÖÚË‚ƒŠ›Bm{´üëÀK²OF×´¦Ä TسNÇQÁ7F®[ ¦Ç3UžV+)-"#BQ3ŸÎÀñ6ì´0¥*æé+HÎÏ|å[ º›Sãe,Üf:'/?-($;b~Ò ób6§@Úi €‚˜ÚuAÁ!8é^çDæÅ<Úã±/ìM8Óöyˆó @s'Ì[„‚ø*¬9[Ò@_xö7u·ñ¦œ[lú4ñüÔzÄ]Ð(ñ¦fm¡¸¸T£Ã§:<á$ÛÄ×õ$”(»õ¨d8>Mlj?AÊsÓ«‚Ør†ÞWä9¥Ÿœ{CÞa=UÄñÈqäfî.6 ¦Q¨6pec:µö%M§T›ËF;‹–Ä}+þf –ÆZxwgÖ+¸ˆëbn][þ> ‡=Ð’Çã÷‚yg¿˜gªulEfžØtÈ›â$|'Ì3:sG]øßV„.gjèt_Yï¾sEsEÔ„Šü £Ÿt›^‚TaB‚6̢ч¤ªs²‹¢0zãlÍsäOÐÊà%yu‹m`#Z¯4½S~G1 Å¨U×bÈ–<ãN¹Ý}n øäzøOe±¨pÀåW¹MCkøly>f" o±÷Ù¡ãJ60QyÆVH{*a7¿¦qϺÊ"º3VO²Ë¡÷»å%Wd½m¼¶N%màD²]œmÚ„«Ý¸ ÜöñL{ÚtÀâEsrïËïF+´ÎD®:>$ öè3oàR±¦;²DËíöÔñ€BO1Fœ“°rãPΞ.À`Õú|TÌ‘vž¸ y :ãÙO):¤ìý,„|ñÃr¦ã¶ù¸@¦Î<€-~æŒ O‰×@ªîôµ~Ö ÷Á3Ÿ?ülûëTÈ‹€^’k1(†ÿš¸* Y„¡5F®ó™¢s ~J|rqŽÆa༑çζþîúþ¯AxðMý8 ÿì>{3íN|¼ÔTÁ‡À´#³sØ„yÚU€)y0¦ŽÁù(dÐòæ’Hu*i¹Iª,¬ãIœ„£ß<÷Þ”ÊTó<t)—s¥eBÏOaiAhúá8ôÃác TùÛ)t@s¬â\M®CO½ùyF!øEÀw¥,R=Jç4ó ÊŽÓ\H…P*l~Χ‰¥YÓ\…)[LFíÁQY°dÐÆ)QQaó#CÉû¨ÈÜËÒ'lËB¢|\ ô?ÐÈÜL£¡âÈ©þð#o#ýƒqÈ´D·ay`3-ʰCh étøá#<ŒÆŒj…€8üú®¾ü‰>J¡L'ÉŽE¿§Ðú礼9fX y‚796|Õ׉¶Ÿ¢ ;ÎÕ˧>M(§%Â+0‰Ôñ™ÜQ0jÑw^î@¹“ÏõÃÎhìR"­Dе»NŸòÚ­†‰ë|ïôÌ &ÈÊɘ#¥&t5+”›;ÓÞSá)JLc 5Á…ǯm¥½øö‹ “Éͯîc,” #u€+×ð¿»|‚Ð-ð™kÁ£­ÂÔè4zùCiCᆭWжuN²±?üÃ0Czì`a®“²©ðgáA·ôAG˜]$£Y’.8Xx*&¹p礋÷¼¿Ä*¦KQ} vŸˆj*ª¦ó AŒ@Mx¡ùàØ»rð€ßÃ{ekáÑOe!ìj¬Sleí>v5rõXŠüÂ<æÐ vEmð•åÑó‘iÈÁýg÷˜k$w€²{¬¢‹Íª?°/ašº˜ÞË„˜eMüÆ?ñìÅidV>ª7¸_Àá:“+Úwò6>ºÅ³;¶Œ"ÒzG‹xPm—ÑŽõ ùá2Á›…pXÒ³Ú3³Ýk_rs3ý_“÷(÷›£ʤl‘|„9qßÅ=žÑÐMê{ÐÍGøoÏ! C÷û2ñ¤kó°ÂÄì÷(qágÏõ!lóÎ7óÜsùA–ÖØÅÀ0rbí÷ÝÑ;@•)Æ`섟ߞé飰YÞ0¤{ºun‘€¾™gn0”JÚÕ½sú0n;wa‚5×ÞKè ?5WÐp3†sŠ'ûBœç•á*Ü$‡ÙÜ8ˆš¯ /xø‰GaædyP]pëÈß‘P¡eÛqç“p›ÆF-Ê–Ç4C¬­DiãY-ú«»…¨]pzÀ{áÅÄŠM.ÏέpLDÉ‹èØZé3h`¼eÈwO@|d™¨Ê.ìG ¡å;*´‘ì{ò\ „Ì.zýþÿ!Ò,þEüÕn·Ó‹}õ3詟6³¸Šëq¿«îÅÝßÔïä¾ú=ú#ýy¢~Þrÿ}[Œ´køúçÞÉ'uÕó†ê7[ê÷Yh§ƒL¿ˆní+mdßÒ`®ZuAÙ\Ó«̸÷õó•Ö]Ôíu¾hø8=V'g)Ôý³žD`ùadeÞ·Ÿò>˜`Ì6 I‡2HSèsW,%¯¢­_¼Þ–¹Û°¡wÙI½š_FtYIbЬÓp (ßê <£Ž¸te28Ù?lØ6šŒÔ Þ@ M\á! ïNÜÈhèƒ(8/‘mÖ{ l˜nѪ;Ñ1zm<·4 ŵ;TfHK¤³ÃYm\ªÖmŸ?9‰‹La]›Â"†urÑcà #qųȦP`Éždàè]&²ºÆâf¹ƒ„ï|…LZ%ßšGêÓÎDN¼± ÿgSf ]@­¡?LµÆÝ1ĸ¾E7ZþÛÂr*K®0÷¶¥}¹®¹ëZþ(÷ù ›b_ƒ·Î¹û üº¾§iüvë~äÒ¶W4“=xú^h¢àUá;&ÞHMX=ËO¸ýŠnI©!{£Ânú ¼ºÂ‰H¥àoýêØÆ@‚1®ÛÎȼbÈ«Di5J>j~úXx ³Õ¶ô/‹9}™/_–ºM{òŠph=W“‘Up}cí LŸž6eŽz WV…·M!rõx3.n'½‰§»ÖrþèWàÅüP­'Ð&rµV2ïµA/‹åšÅG{2"&¾¨² Ú1¦4¹´ µ“ÏFÍÇäuFí§Id‘ ®^å}Úß’Þµi=*œÖi 87·8åÐÈ·Qm¹•Œo¦B„'YÉhúŸМÃ1ø“`£æÞ·’%XkfV¿táз+ëÞÁXLNQç^`\Ã4xƒ8ßѬ´acadZá HÝ“(íQ\]w* -w$ÂòÛ C| 'ÉÉ$Ê5ÂÔ¾Ž)ƒ(†¥Nv%zøSҜ㋷i(³º ÐÞ#Ëj3ÉÆðAÙ€Õìµ,Ÿ# •`{¢üš‹a‰-T~­Ô`žÉðÀÎÔ/›ßZ˜ÖaËWa+ªÁF¾æOVa+±xÒSÆøA?Z‚ý£Cr!OÏê¯`A}î_9zd¢lc“ ˆØ½ë’ë-°Üþ5 k‡%·Q˜$>ÍUà0ZsJ ô£R~FŸ…ÿÄ×1Ý'oá×çŸÙp=öCËéb¼á$¼2Mü™Oüô½çÛОüd6ó Œðcèe5Aua=/h¯ºZ£ÕB´¶ö¥¤#‘÷óqý qô3;¶|ODa‚iü-¾îvÐÀaùŸ?ñøy™Ü¢OÞ‹E šÂ݈žMÇí+õ.²)Q°*- ìCèúÔö[} OZbktcõ¬Q6¹=6c¥1¬,ˆk—D¯}ÎÔs¥ ©ùtñ~µØ^‹¦ÚÏÎõÔ÷ØrÑÏ¶àŽ€¢íS 9ÑȃXÞ€NŒƒÞö+ðËL jVæÕmç=Ù>6‚ÖkÛŠí;ZH«xÍÊÂ]G]s‡}ˆÊϧ"¾eøù˜>Mô.ÞßÃè[ÌÓôÅjšMº‡'_òƉîËG-ç€Ã9á&!jùd©À7)Išíh 6Ð$žNݤ¯á$*WšžÜbÕ5uª#¸Î­ñ˜Ü™uºûcC;¼ºYJlú©ª"9ñ“ú'¾’F8“Z0ð>e8YÖš¬pŸ‘QH´0­t“¥wËÃeJ¦_[>ÙÆÝgˆ˜%ôY iklN…¸â–°é¼ÄçUœtT˜^?~~hu¶ p’nÊxð’'Q0Û(ÛBÚÔö‡X!íväÙÐ*NTĪ"±”j%¾²Ò®Æ™ÂãÛ!<¡–$ÉØÀSRÃûl5~aʼfóå úyÒß½À ïÓщ'ú·h0 }õÑÏad»8œÁ@6ÖznÙ·0Ÿ©ÒÊ'¦b?Ð'ü:ð: ‡¤¥$ð7 NªI‰‡Æ n\_tþÀM"þ‹wZјbï Ãuè™ã>ÔÇÝqF"ÿ|š×–Ø 1‹!Á%!FvNJ¬H³Ô÷ƒ¤±ÅË‘±u{f¨^Mìèɬk÷CùRSc_˜éUõÁ™y8àm6Q ìiŽ…QsÏ\ÍÀh¶®96jÂS›ì˜Šñ…OOÑïs µžÇãåXÏÉG\aZb ø²|fêÀùCÚI% `µVD’wt@Iº‘ÙÈÏc‹ÚpçT퀧 Iž;{­Ü~*dŒ}•ÒHp숇ä¾ó¬ c¯2Ûæ±ïZQ¶“)2+ߟ„çÞyá$¦×*Ìxqa¤ì—"¥PÈx†÷pÚ³°‡Š-·ŸóNŸÔ½‡uñGzó½JyKy›74]>+EîÝ]Êùkã1ü‡yþÃF-sQp«° ZB`d4-7‰ÝaÜ1zÌ H™ ‡^:×—Aàè¨jf âø*Oýqlþ?ôÓÍ™‰…µÖ½8Èïêç™®È|¾‡·CÝ_©7qÂcÏŒ*a—‰°BcϹc2|…NxGÜia$ÊmÔÂ쑣މÁ\ÊÐ>² g.OI|ÅÃ4³ýÚ öÓoiÞLz7Zs…™ëqspã"L>¹@‰nzÝðÚ.xy™ºñn*L 4’6—rhDý‡­ùXpVÑLw½ë“K¬>²çC±¦Wó>ÍàJÀ|.(iÙ`‡zŽ€ìK4·…p –¢B"BÁbŒÔa}"ŒŸðcÿŠÂãkû¦› UÛVÆÖL•… <`Íaît¾¼Ž3g½ê¤:¢Ô 3Ñz´„s±ÜôÊ彊_¡kUé6mÒ‰žÚ*Œ·JÒ—ªlÚ²3ãôºÇt2TÂãî^„Zm!Ðû—ý+0¿Y_ ÷nÅëxÍó"jŽ£þÕ7×@ZÿÊâªr…ÝŽÌŽ7ô’XkÃÏÚx.©Gæ¨; %øvûQ7îóÀŽ}[ Ë d52ÈêZŽ7ÁÜæÂ~^aÜòK7ÝaiìCa%£/.Œc©&B¢´›ØQAªË%Ø,P×€A+휠 ZÄ uEÿ ¾®<-̃p)Q8Lk>ІÈÃswº./¢›¶ oÖ4µfŸ´ fsµG1>¥¥3-ªòËy/C)V`sçu]á~"ƒÒñM¹î<#:ø¶ÏzÚ8d“>™èÉLñT|ˆc‰D~[ìgžáD.ÖqEK—6 ´qúJ²˜…“4›°Å ÌY`´â ‹üxrã{ñ-Èú £äêÍl FÝ Ñò4:ÑP¦ ‰N¨zúJ^¨„€«.aànáâI{…¹iC×¹¶¸–;’¸lA“‘šìƒæ¾‰-˜ùU*câ ‘ºA\Q’sÆó@œƒR» 9£ÏˆºVØÄª$Oã3AOËΪÐZ§á•ü`§j™ÔQ'[Ã÷=ozýæø¤Iº¾ÏIväwâx„u*¦ðAÖ—Ñýôì{Ù”f®/#” #RO¦L#_Ä[%·žpÈKC]€”‘Xê º¢wÐö?ð€ù\äܹÒÍZtÝ;QtCäp§s­0ÊÔÍý­áî7×AV„Ÿ™îV¬.í¶žlrN?@c¨ÙIÉÞËRäÎ)窢6™ÄÙoA©Ãh9=å™Ñ {…~xë:о>ïL‘ÐhâEDß>Ct8¹q‡"†¹fàÈŸy\¦Ê§ÇTÝs)=âi”=„%ýf‘`§Î3wÑ=çš|¦¤´ip¥g؈„š*“«1=ìÌ6oå‰òàWĸE.¹V‰—ÛG­f Û¹¨é`´R0ø4ä I›Ñ$Wj©WO>÷¼GÖ^¬iž[¼|Çц#®ËU°L-ùŽ#ÌÙòRR‚¼&óö'î"ÝmMs9¹nscƒp‹2ˆF‘öÌbThÔ6Î7ùY6ô1S½¨A…a¼ 7OñV• 6ZYÃãs„#ˆÑFA×Ü+œ|ãŽ~ ôIÇQ–æÞ‘AÐŒ›?§°y’¯!cz„)Uð«ª.DGsX>™¿:ŒÒ;°hJê®}TÜj~[Oˆ‰™mß§­ürŒÎ8A§š«!Ú<<Ø_4ç––ì¯é¯ø“=ŒÇÀÞbnJúF™PJ=Fôž¸Áè÷ˆûrð†ŠÞtîš1uË3ÅlZ*n;ÊŒXÚŽ„µˆûi¬f$z6W%JfºhÂæ xNûÊ!šVÉK!Z3€‹C—XùZiš,Ð…g<Æ@n:*¥”áQèóÑü¿µ÷‹¬¾Ú”¦:r~xåF¢*!&L`ðÚ8¦ÃNNAÏ…^3ø^I™½l q¡~úÔêýzÁܱ˜f®Y°îêãÆŠNàoañ=äýÀC€Áí„'ž¡”‡† ÂƒÕ Rá.…µó­4ßñouýÔ®ÂÒ;:ëÝQV‚º:Ñž,\yp»Â°Ž…J…ú^¶«ªªÛéñ¶‹ÀQÌ);íO™í˽¤í§²]oízÃÛ$½ä‰¼>{¾:K[1¬k¨>0οÕj&©‹«)Ú½$ GHÀÌ7ƺ>ƧÞ,Ç͹òéA§˜?èBðZB3\’ÀÃSKÙRa²Æ*]‡ãGý6 ÀçxÓ,3 c»¥ƒ NK?‰¶+,‡‘B€á E×pÙqä¯^bEɉÄE…AÓiàœhÈ9q…?>]ázPh^¿¸îFÈ·#Ù,Øø9ŠðüN6Þb‰ä{ ã(ÝCÓ39À‘Ü%CàC/8yD„p;¡Âšö ˜ß†™ÔÞ0Ðw¹MH6]÷‘5–Wwj'-UàëÞzŒs_ºÀúùj(üî%·ÒýÌøzÃÆ¾6¼wMfUwÔÆDf®Âª°eÌÑ¡±|å•úkÙ 5™y†²ˆže)ÁÚ¨ÓÏÐo@Í"Ç@å E —*gž¨ã[×þFµnÌ©£Œ]æäe.ñ¹ì<òñ©V#:r¹KjßXR’HÒ`¸Ÿr=oAe&‹æ£ÂÍùɬ!}šfÀT…`É8ã¹h}áì'J ãYX4œ\ôz§Ç$cUȰŒVÞi¶¥´ ^Ñ;½æÆ$G¶!7R§Fæþø?ô5º Ì8_ª‘—º^i6óšªFåãyo CŒ¦ù£À:|BV6®®E>=goB£±‘=h*¾µü@Zþ ¥–϶cæ§Y•N¦¥DÔŽ§¦%HýJð…BõT†#Þª¯LMK!ˆy޹éé–šîý äš º'ŽRa¦{ 3äÞû^\]Ǽ”8¿"oIkX*™-ªpí,R¹m™¡—°_1^ wV_ =½´òâBC×ïèÌ£•ô[ñc`«0&yãkúNþ†z£ÂVSTˆÓÊ ç«æB÷u:¥ -Á‡¿K¾àÅIa À+Ï&f-û'¯Ü¤ÕçÙ| óŒ—`I«å-±†6—°«FžãrµROòQº¹Ø·at}]äú”Uóãßœ}`sbð¹yt5Óü(‡«–;ÕÁ2ªÐ!@É¡hÜ„2ï/Ê‘øéK…åÓßfªøzòÍ!M‚ä/# ô­Oãû+léÓ§Y yVÎg†ž¾–aÜÒC[ù? ¾í¦® É! .ÊHjù˜qÏož€ ´K U½Éz‹/g§»¿µÏ®OKüOg™]:;Ü«¢÷.B(yò8¢”½»¤ärÉ9@Ð3*2ë&œ$ä0ÆÄ‹{˜6¦ôa$!¼F›‹ÖŒGý ¦f †ñÌÛ*GA4°€®g’¸J†ÓDëÉpÒ¬n˜ÍñÉéhvkÖñ×÷ —e…ͼ0«{G{õFÓh׫ÕF†ÿ¥[–ôÊOL»ø`ÕPÓR:S7öRwq kwÎ=Ì¢ÄöØòF@h® × žñ2 ‚T¬ñ-hœÉ•¹ÇÉ*GT•i¶ª¤çá_žï[¬í8Ù| Ão%yOfæxŒÞ2l´\×았¨DܻÛ2ãùý­¹˜·…|ò¬ó[yTsYÇ{@&‹÷¹Î,Ñ(¼ Cßµ‚Y§ü`‡©0Ns0ù™z³~¸W':•’Ö¯Yÿ6hØaT•´Õ¨°V!+I©7ËMàNÉÌÃ8`}=“ÆŸ½OÅò£4ÈmXP¹Ïäm ô6Ô zϱ²§î$òX˜¨xú,‹a¸0òó /RA¼¬H܃‘‰S5V%à%ßj”ù&¾ '¾#Ä}[EýâÆgFÂTb§âVóP‚r«ÕkÔ˶"ç)-™We¶­¯Ügú¼È ^ u 2leK?“?%PÒÏÌy1AÚtD¸š†íyø ¬¾©œÅȧõÌ5óT=’›vǾ5vvãÉ`à=L©\®úf W³ç•,+L–è^ˆ_4ŒCu£ ¯=yᩲ ü8‘y?)¼.»'§]À½vƒBhIòš*›Í%™Ëž5ŒsF.R3rލÄJj>ûL”áF,À*¬€ÎÅ(æ®{rqŽÆ –z8PþærɳUBžŒbÑä/ ›Vqéï'UF* #×ñ¬]̶ãïÆ2›AÊå{^âîÆc×F™†Ü¢)f˜Þbêyvó8ƒ6QaË£ÙV¦"©3+e¢xCI¸Xhãu{X¯ãÅMø€½1Øû—&Üì‘–8ƒpƒ²U³Uo€÷ÇîÉéU÷ô¸Ý?=ñVXfÄ…dÞ·âoä`зj5Û$ktýXˆ°M®›+ä=«0Ú”tœÖtP£†.Ž‚ôxÐZ¢8 sõl•ëiÞƒ.:'ÆŒð¦)ÓÁ{ÁxÁgnƒE› æÏ‘§2˜–¤æR‘©ÂDÔe‹Lk ‡ÂútZ6“ÚT)oføl© ÑÔí2.R)oE§Î[îÃØ÷lá'^—4§K XY0''¤RÂüÛay{®dW«ÖG•W<Ÿt;¾wÈ Éƒ dóv±«ˆ-Qñ¹t‹­°g¡ÔS4ÙŽ€EÍ) ‰ÿ)MêÖrÂ{ 0Kó‡%fˆux~_è Íës±Õi:P÷3&µª7ª‹ÉåR*ÇÝ„÷æ8Ñ9&튭!ž Š¨þæÞC‘¢4+ VX~Fx ²ù™ç$“À·¹×<”~&Ïn6‘/¦â™ø‰·{G^.˜j-ýq`A—MH ñ}Ì¿Kãi{M…=G>(Ýk*ì© ¿e8QÇTm¯«ï¦+5#,HE—d¢W*L´h¶mWÖgE €ÕsãÍ#ega ý]€ðOë΢S'ÅŸ'X%ì_6±|TvTeœ­°ÿ /‹\7@~$`,;»%Ðç(¡-l'SÞA­Q’%»HxéºwîbÜ’k1æ­ ©,OiÂ>üÅuz•ùQÏÛ¥Ò}"O¸³* ™_¹úÐo­„ Å䉮2À“îT~̂ܡL߆¶?}|¶¶_*ñìëê[¢ŒŒ(¥ŠCÐÎ"Ö b§ ‘9qf> ⯮?ºEÛ2:¬ñe©LÅ,Î~xƒòª§Ò«,‘–¦±}ðF0K#†I—范s‘çIJ(§kUÇ»Áó ;Ñ‘®Àƒ)ô¹íóЧRÚ©°~U`63 ïiî-¹ZžòL6ˆE­TúÐ)neI¸iîáÊ•7=§A_TSzçYvai®°i+kæó&‘Ì%›'&5¡–#8Ê [™0¢Ì™-#•lšá^ZØW¬x?]œë„§Â¦`v!jZ?:Ì)f_"*ì‘3~[s¦q!íl†©2ÖÉu¯ýå”9RKäLa­Lq›Um+^OŠ]jé7Õ™‹ãPùj¸d–ù"sL³)γ»c~»åÙG ]j‘]^ó¯0j~UŽÖ‰C› U½a­‚­œP’Ð(‰n…qnA¬U‹ÙÅþ·¸Ë‡K~Ïf~hÖL3--,EˆÊ3„ï)ˆŠÐ!µ@*Ä0êûêZNN‡@Á²ÈDð<3D½Âö‹ C ™! ¶•ƒ†¾­è)¯ –À¿r"¹ŒðT¯‰ŸÂ["3´û d ÷™™"‹ô„ù(²ÜQ“µ¤‡»â­Üùk.ºlÂo•û¦=“Îç £ )£˜<@Ô‰þ„òÜeÏÏ>Tå‹€ùJŸŠµc3Ü+¨$*'´'xD—ž¦UØìÞy3욃bf®ç;M›âsY+J8+sº¦ž4”z›wcjöyp…=á2»§e1T­Ô,• ‘³Nž–Ø,„¡ŸYø¨GQ²½ù÷“ {–p wžK0„iRºy£PX‰fnðÌÂz§iÛ0¨×PÌÍ[kev2Ó"ê ðŒK@èó•ŸÖQ½!… i s&Xd< ÚƒV=Ô³x¶+Loœ‡·I2þùãÇ‘ç8¾KÁmî1¿ÀMê{ðåðߘ¾þqæUžpÛ  Þ´ÜÕ§¬5Oœ¨¦y;uX5NÕÑ™ué˪ä4¨ÕH±ˆ"½Ç ± d±%., v>A±¨8¾ËøaŒGìf¤ª™N54ãÙ3 ’ÈÝÕ@Âä`KôëAmf1·3Mk635}E­1Ó×=´7á1޳nÍj±àWZ8ûybUž‰bbÔs*sAYEbïÎíÂFÿæToÃŒ?ßzcÄs JÁƒ,âÅ8ž…œÆáÑs-%vUuZ`æÈ2óŒÀÔó#œÏrc&'g‡ü)0%îÙYu‡ª¬á þ\ëR‘å´ ñ\œ‡R ¹T¢ÑpuøÀ“>b¼2¦-Äìì#žch.Å,C–¶¤™TúؾQI+§j¼ØdævBQp1 'IŒ!`>™o©îå–È÷3Û­±¿ÿ/ÿ;¬J¿‚íœp ²â²üžç H©ƒ4ÛT~ ÐG/†ÑÐ ¼¿hñ#âŽkiTï½ðìo\6ýð šhª°)ëüâ*hÏsÒýÔèc39Çìöæ4=Ï?¥b2µô[öëéŸi¨åÏÒ- ULf9î(ÍhzFŸ¿ÅÜS§0û9_dù36ÓHÜPêRa/b;Ê;¹Ÿ>Œ·"êýÜщu©4»h_ÐÑ–å¹RÈ4{‹ºRäFsÝj9X¹•êÜzø ‚ð$rõD¯\~T©Gƒµâ[-ˆ/A»ÜÑ'äÄi«,g}Sr„v'ã 7ßiVŸŽŸÑW°< ¾¡Cn”Šî ÏN®÷³¥¢Ÿ/’\,ë¬KúM©<»s`-›"IO$$"æqE[‚+—f n䙜xƒÁñ-Å$À$TØ2"l¥a ¸ç€ Š`iì\µ-!ô`qv.<)µŒüÙüšiÊqXÊ»”a÷Z¬¤ËSšÙ8±4…¨mdU7¹¦ˆ]Š•~æ¸ÊÒX|v!bñ¹Rh„äëçKÉWÙ»– bÎåt¸TZÚL˜ðSÞÂßí°w%½Ãß ¼s(!¨óNV§¢º"8”§ÀÁ$5ùm·Ðþ³WÄrPBŒÏŽ9³°Xh¿VÅ È~}áŠÊÝ]/ …6†R&sCÏí!¨`.(ÔyIèyõ€|.õ\^|zäz5£ü¶ ¿Qä}̺ÞÈ‚\åCg©€çXƒ …÷˜|ùXü$~*’8{!Ï÷i—ÖîièZ\D÷9ezFã‹ðd‚¦€Êݬջ˜jœÓþˆ†¹Ùƒ-Ÿ4FÎë_ë_~p±H–£Îs¹?å“ MO&—c]¹LvO¼òæÈ¬Éëâr÷ºÆùåIçsç¸Ýï\^”Êå@¿Ë DÖÄ*H¦§áàÊ*Y˜3ÑNÍÖUää:«d–õÃËf¾³C?Œò.xÄðxC3ý6ø ”H™Í‘Û„üM=¦þy<®k‡T ϦÈ.oxëS¥ zøyf¡ {*ƒ×|Ì ˆGñ Õ£'%7Å–ÀÜ݇‡%DP¡D”Y嵓–.lkX•ÛbV bÙYز#—¥6ªE.·(¼\a£ŸsÁ˰ßópe‘¬F€ÜM/Ú€Èå){ÓÈÐÇËz”dèC¿bܪïÄ ÇJRc=ÃË8² ’í«~7“æÄ!]µÑYí– 3T€ÚD.¾£ÚóB*ì9“:GÁª™%‹WއTUÏ1Ý…¿¿ÅÚ‹z!V˜á‡¸°béÖ\ÛÏÙW°ŒIôIœØÔ9Ú,ÈÇ£TFéÉ ·žx‰!«zˆt.ûA÷ÙX¢3X}fŸ!­ä|>a ¶NÑûÑ(¸Ça è–Õ¯Ìx^íF¡»/#¼4w ôà\”÷ÕZ&ïSå›´2ôVÀn\âøpO8˜¹:07¬°üÖ2˜`©wY-g`?·ºîصðÐ-ìÖ’¶lp™Ò”R¶áÅy8¦ámõMFÅ‚üLõ¹Ù®°%gȈðÕl.º‡ÈË ð}ào2c©S‡ù"¢æh¶¨:=&JÂøšIdë‰ãí)j=.»Î³Ë‚Ì?Sµ”“Ý=£"†ßs™ üR…-ÑЋ%ª—v\¯©¼mQ&N°KZÈî­–^CsXŽCBe'[è°Üu÷^Û,TÛËçÌ)•*ôj™Òð{Þ_iÔ'ü†}›Äƒ«0öXç„E<.ô™ÇÿèiöÔ°Èé Ú5²&iJ•Ë¡eÌ\ˆn$6;wŸ•ˆl*co$:M/–]$-¨¬Ï½“O¦ÆÒfûs`’Å(´/D%.];l™y}qÑZ¸l¾0tzæžúo$!ŠõcùìÜ„hŠòK>¡ÖÒ´"oäÖµ˜EŒLÿ “Aó"Y¼[ <½Ÿ¹²ž>[F80r>ÊÀÖµow]ŠuIMêJØQ̵÷¢éiæ#·Å „O7´#$tòÂËPbqúåe`ØÏ0ÄUYŒeq#k’(á©Û+©c¨Ëï>ßnHzÕòTft DLœ{CÎÈ%%0“;–<*sªD.. ;–Þ7ôYxW&Ù»k¨Y(O4ÌQÂÝklN3YJ‰…Yºzf.Ë'œqçâèv䟢Žq±ú)’Ñ^"¼JÑbµêi‰ ©ú|î_„÷ZNÉ==ï„ T1_…¶JÓëñ6Tñ.æçðü§¸ót?Ìiܹ°xÆÔHñ²øSiIFÉþ l·í{– \C™ ü1‡¿3o@ÉûxF?ÌäGA¦®§Ž†@ÀwË#Ç_ÌÒîQ¼òŒ>³*wø¥$ö;ÐÕ³,¼DÏo˜ÈËsð %Æ{3~Sæ´ÌÝ\\ÈÓkö‰òpwd‰[h?¬ê™”ºî(¼sóÉ”x‹¥må$ƒ-ÿd´´‹&cöNϼ`BYÏ­1™9Sñ¹}gy>ø‹Ç¸Âh‡°¼0ÞšEg0¥aØ‹‡Íä;³ˆ×„Z©×ÄxB±ÕS¡aJБæ<ÅŸç$tÃsÞŽ¬G®Š`Çu\ ,£Çù´”ÕyŽ_Õ ýsüª*Sƒ¤ÔV¡j)Ü]Oþ,Œ<‘ncK"‘©ºÚÌ¡ÒrÄqfð³ß;mÜûúùÊ4õz_ÊÌŸq+ƒÙ£ÙÞaNd ’]ÏM»±kÇ·» ØïV[{ÉìùJ1Q…Í_¡DÎFÏ¥j ÈèÃI’•O3·oE÷Þ% ¨f´Æìiœ,¼08YºGØ«EÒh¾ì-×ûÙw¥XCÎìižEâQ<à(Î adô; ío@'“È8M> a›ÅÌw€Ó±ìC裖&hTwD ¬C[d>w瘕Y Ìîš;SÆ÷rCÍ‚¥Od-ná‡%OÐ#D=IÄíì§4ûl‹Ö1iåN Åìòãg¼¸Ä$ßË8{rDzÅÏž£¹K-‰ÿ¥^"l÷ D„2Còü…¡¦’+l¾ü¡œûæØñLÖ Œ«_R¢¯©©ŠOu–Á#¨ô"¿{(¤ ™£×¾Èø™„ZÕMúö뇰Î¼ŽŠ«Ž-jþÕ4*y¨Ìa¢’Å%˜¥<*L5/£–Gš]eîZe¾¥™ôgÇ^žºdÜÆ’üƒxRR>”©J7ˆBßwOêtåä‚„{g„òÜýÿþV•‚àFV0±|Æ_& @d¤åçøÅÕá+Ï𪟲msh.÷“±ŒËúLt³ÈÜ)‡àssÙ°8ŒÕ,;ª2¸áxî&·aêàŽ™,¹Ë tA‚ãšé88¢×bié)H?6ŽÂ;»ãîaº‘hìN?‹)Õhæ¨müDI𹕙[™:Ä@—æ;è¤^”Ê+ïÁ7<ýö‚üų©•váNšà\7’³·ßýc3àRDf¾ò°¥®hö9o VyçjÞäÄ-aT5õX”XGM懜#þ*Úo£pdV_7’-­Äzeîsâ8ñ-ÌNÌ2#íVŽáÑòJ¹QcþÙã0xCC¤J›_$“Ø1–åIx¢—ñ,‚´ÝôãÅö@¬€‘÷)õYí…\JáPÍ–#Õ£AtI¥ÑÌŽ.¥ªZjÇÔáÈUŸPu›¸.H~/é÷¯¦{¼Lq3]nà§Î¹‹ù?ØÓô{'ìN‚vf#,”(8AÂLÑr‹(2Y Dñ¦IÀ,ïŒÂpPX¸/sg çówj¹š¢Ç}ÝLtl˜°Úò¹®¦‰‹ äNáRÝxNå™'Ãû~?P î,~&KÎæŽYžpCÒSÜÄʈr²éuc™SKݑ΢xy] y–1È« »ìuþF™Å2žñl™ ¯²Ùuïòéj²·^þ¨0cWldSzð؈Äuvéü¸oÉ5twUF®T¿ºàÏ0ùÌ–‘Ãg;n«6;Fûª°мËòÇ7¢,We?¬7 ì‡ñc`s·/tK×̇=¸Áø¶%}óñéÛ(”¦vgEž`&”€Å.lLNœ:ŽâùÒÒj³ãäâÕÒäù¢”C}Šâ¯Vàóˆa»ËÛ·Ùl^ã¥>ã”ÓŠª=×ÎÔ{x¦ø¡Ë€ Ê"m2È °Ï^ïô˜òØmýÿÛ{׿¶•+k8Ÿù+º2©:vÕ‘CR¢dÏÔóVÑ”|ŽbÝF”Ž“ù2‘ „´¬üúwïÝw\H\Z²4%:•#6@²htïËÚk­àYßyÏ“±( ¬ËÆú%ØêK*˜Z×/õXçŠÖéÉp¬îò:fF{…[Œ©gð|øÕà ªÉ’ÂÛ" ¥‰¨²^óH@­†¡6Á.æB¨aÂS°ž“±h"ð,&S³’Œ‹$ž¯g”ƒ³®»Ñþ¢æJ%ëúA!øHŒ†&8ŠìUä1ÅÃl–ò%´ðsEö®Õ4‡• L¦p§Úð.b-W2OHn¢Ju¯W¹*LçÜlúýO %ùäxpj÷Ɇ>ñ*ɪÃkãÉbôA²4xà À™=~`ì í=-ïâpÎQ3ù…lu‡R2©Ï0õpYÜìá{ÿ¹Žx^ ›93¤Ù«K?]ב›éUaöM›ß6{,ïÈóÙ·ÆX˜FœA¸$ùsäÄä^¡¬±+Ýâš—Þ–kµæ¬åÌa˜ð³{ØYw¨ÆÔηs¶O̸¯â4 °Ñ²ÓãÓ°ßßíÃ铈\BgøÉØbŒ¸Ü^Ý)<ÎmB‚&w¨ØÈNb0‚¦w~Úõmó ƒP’„‘}id&FeÓˀɒË]ÓÎÁ’KK.g½ê±ŠZK« #P^„Z°œ?䣔óËobXvÜÔZŠ;‚þ`Í;‚çž÷ƒ7ÔÀøÕYvÃÚn¦œ]M?‡Þì•Î×ÙmŒ\dª–ôă«s˜"=aF˜RpÄZ×XœÎN§;ãIµ-.Ö#W¢N%a³& e…àñÈ"Ò¡Ü=*Ũ•ŸZØbAx°XÝèzÔ`’Ù,eÔ€bàæÁÀ!=ÏL—YUà¨()«@þíd©b¼:«Ç0—L%Ž¢–QW9œ¨o¤ŠGŒGJnc*ñÁ‚ÇÐÿ 9!ˆéÔ`…èXôÅÑ-…ºÇÖ†”z>(õ;±¦™ë@T¢{Ýc.êÓ†si—‰MîL Ÿ/XÓÅ/°¦GšHWsqážó -77ò15³éuó1õ ›Sε6á´Ö~k(}Ü`üO†n«|܆†csê¥÷š‹-á-^ñž°!þõMäg›lηÅ2F{]k¬K¦h}ø{|ïÏ‘3 µ|`ÝìÀv¹öJÙ¦è x¡¨Ôí1«©k­îF¡Ôæ¡î«ì"J ¼FëóWb”#ø+`,?i˜¸Z¡fîý t¼P»%\”ò}ò<20Ü|:ŽÓtí'SS'yF¨•SŒâfœÖUK(ñN+=Vd”ÄÈÁD퀄Ëáò¥WçàŽðRÀðVËäL’‹ðR¦Ï}2Ðu;ŒÛp„Cì±Þ¥@»|³NžÍ$Œ5kâZâ›Miö²š9Xö¶ÓÅêNb]ÐöR[i¹ÃÈ.¼æiVÍÑ‚5°P…fTÛÖà8¼ñoƒ(ÕBLö>—” SX¬HßoUá¢?À¡•ÍX݆ۨǜhR`l•—Ã[”¥½[Z‚WŃÖ%/èuºäxÇê‹5ì›Û /KÃcN°þô )Â2Õ†é 2€~§¢]ÉÓ¬i7ægS*Ø5YDaîMa­ž˜ˆÈ`?ºLòÛ•| RjŒVLÀТqÂG l-Y‘¶ga4OËUȱ|¹ºøïuœy_¸Ø µ°wˆà7Ø×I‚«Tx W0NÈP"—ßö T4+ˆ£Kº«Ã Há0I|rq¼ðÄÿî Óif¦+)Q!i ) NßD]h)}zÌHˆ_u>§›iSH$ðÔŸ­Q«»$,03=§óYÀºj-öš’â4!jAï1Þ@yX.é÷Hƒ·˜Ð+˜`4¤¼&âÆª‘hri’WeM4éóÖ{„ö/¼ï1Þ$ÿ7¼·rÏJOŠÎG)Ïÿpo«áF]Ř¢îl¥¼ÁžhŠU¡é:9+Ó3Èöj—tn½’qÖÐ3ЭÍÒnQ8 ïó\(h‹Úq¿ô±2 K*˜·ÀÔaÈ?REÛ^ÎÓËYM¼¹mœšó‚OUãY–è=vÑ}ÅÑzÍKR|³Ì-Ç ÿ|ù3*:™F+BŽóå(úÈ‹*í¬MD§]¨Œ°½øÍíÙ}Ûê;޲âåîÀ¼ œ{æíâïõ1SÑÿ xÄW¤¦•pœ–šËA­£ÌñU‡ªÌyP‹ó5‹óôXrÍÊð£¢ýêhBÈ»Šß1ñ`™4ˆGÇ™·!ä® gE°Æ‚ë‘%›Øî+à<–,W÷M‹”'Z§žÈÞóéqDaã!Í)¡¯,Ó†¨Ü9ÚóKôó"Æœ% ~‹»¡x\!|¢ØÂç±OŒOé]¼BCe‰þðæ ­ * Ê¯BÊä —ûê©äh)nµÃ‰À­R š´K5¸­4±øÇR!Ô Ä±è«á¹—õüÇ^!b•½¬uÚ§å#èo5†m²íkDü6wsQg,³FÚüÒòkÜç å×9’ýÕ×ÿ fygÕ×\ê«„1ØÄª„MÕ_R’NkBL{ìëÑ?ØåÑç_Ø"ônÙ;t¿¿#è;#ê cßÿ'ÉÀúƒÁæÇ|3¬¦±<.°E·£Z¤K“>Àù"Ý`âu¶ZgAŠÖF`»ìÕÐp§­çF»ôæÁ:EG®¥s>‰“„ˆMÈäágÑ^\Â4¾˜Qº m«Øëæ}>ŒÔ–¥Ë#Q”‡c„Ç`!#m½-%à ËÌM™ùg¢ü•Á^,¤?J’8I¯â’܃«zQ”²Cëx¤¤Û`R¸qW‘µ ‚ûŒ6Þá'Ž´ßÉÄQ6žœ SŠáÆh¢ hh?Õ}B½R—Wßzv°°V¥WÜ“Xøk‹Òc‹‘ë±_)F5í~lDS^Õc]#˜Ö è5Ë%©RéYcfwþU¤êj(l!1ò·Ÿ†Ò^ÝŽñµxå´ä‡ì©’ü8BÊ+9ÛEi¦tccëO(}(p„ŸNéÃØ„ÒÇ…—dFa?˜ú¨‰a(bDBŠ $¹ÙB0¾ Ä:óiV’Ü¢ÈyWî=é¶ÉÒ”ƒÖ¤1÷åêâVÓáp=F¶†åáJo¢Ï4“åVªÇ!_ö 6i¸¯U*öÕcOWÛbÑÒ;b™Üù³{Ê<«¥®Ç°-]/ˆ«»m¶ý*¦ºâxó#“Ûz ZÙ‘€c5£ /á DzÖæ´KŽ0=>² tÍf·J£åá`ì-—ºlY¡n á(ÉEš¾~8OT¨—@7ä>çÁâ±ÀÍ›ŸŸ¤|×ßéÑd÷b|9>µ7sÊ0aPrä`ÿ®’o‰BÚí†Ïì`Ofá’8ÎæÑÎ,ŒS' ̀ɋ×Ĩ®p7ž¯ùrZ &1yŒyü`¥ÙÊ ÷vJ³[j#êÖºW%Î÷ticÆÖäh>’hGòH~,©ò¸ÃšVy»í°¦ Ò¥ä{\z ˆw&ÎTm/%î¯ ë¤Á×_æçYœµþ¡·UÄVžQ†'ÏâÈex´GgÛ¹YpN®{ìéDx´8ÍÈ9Æz6Õ….coI pÔHÁƒlµYÓº&CêSW‰Ž>XPóqjV‡þ Œ|´-¡OáâÖ6Xì¶B‰$Îl~íùÚ¡ùÕ¦\¢¤a@µD^Ë”t&ýÇ”àˆ¼àb\|„dc‘Ô­åBÈK1ï2=íúÒˆ”€å[>-k«€?_¬gC€x”‹ÿ;P¬ºx¾B²Òå$³BиNZ<J½¾~VEë« LOÆJÚ!A×G^=¯Ì3¹-½{Ç‹æ;Ëxî‡Ú/½÷± 1š3jÏ“¶µ‚EÂ2ïø,1,¼°×âÞú–‡Å#:9ß冶°¨ñå¾ßñ 2ä4\H±…|- ÿ  ý¶Ù¹~1]Ï6%Á¥—¢×ŠØ¯èê)õ›ipkûxS9€_Ùeñp­#w¯¡Ì©;~K8/ïÿJåM§ìlsæª&[%Ì¢îŽÏFnüN$_{É×g˜¹Jk>cQºXeÝz2—ì’-tG”¹Š‚g—þ¢V'Z+3¯,é`OÛ!¬kH´[×Ý×qÝÃø! coþ–ç‡`®­æÏ´2SV&³¯Ÿÿš¾G”],ƒŒÍÅç°ôÖŸs€ææHïÆ¨KóHï0åØÎWºhúé0­3Ìùc˜Õ PŒýõbüÐ9oŒÍ0‘z¬îÓ¾4ذË.%<ǪH¾+$’2èâ•ì}õÖ;t½œzJôˆÚJìç,ÓØYøœKskA‰€uÈóí ÷ÍFbHZ§ŠŠø äìÛ°ú?²kâ‰ËÑJ_¯æÛd•ŸˆP¡ÙšÐ+E8fö‹üÓQœ²hVÞ9¬iΉ¦Ô¯iÎõ´¶E¥J%“Ëéño¹¸96Qòºµ{à.|¾¹˜Ã±=e€â`ÐyTÜÆun‘$Œ².=æ„¢_î?9Ñq„&ƒQߎúƒ‹ú¸¬ÜZ £ ‹¡mÐ-â˜ûËóI¼\Å–É™öOáh‰¼­£á £¤†crà®"JÜÓ¦@—2˜ËóG¦ËÁ”#Þ©÷ãâa>Ö¬U§¢(­§“Þ-øPºÍ *ذÿ÷ÿÁò±dÕü<ˆRšnÚqU³NÏ‘Cµw`—Áí]¦m ´0`=—6F‚G75î`m™i» ú:û#oA¦Q†E=H>)²©+ùäFÓ»…ÎÖÜ´ÂâëL Ußœ)ü­YÑÓ‘¢’2®6‡F‰rŽì÷Ì Ï‘”ˆ$ÌR#GÍåszl [°ÃùPL±Ë1Ñ¡ˆÙ_›©˜õꥳë^‹žKÝ‹¬Nˆ%ú³;–äeª]—éÊ›ù;©¿òHZ°¨-ØRkRßø\àP¨ Ù{ï?þ²qooe¹õX3¥Žú±¯€;s<ºñù d¥ŽZVXbzñô ÒØ®£à_k_×-ñ÷,˜Sõè¶Øÿö°šc5BÞÕÝêühž§¦”M/#LA—[†)8åÁï¨Ò“§ï€i <Êé;»‰¿OÐÇöz¾A4déÊѯHßP˜ìÜGCFRS < Ø<Å)’±¨=}H=&ÿM£·˜Xj æŠ3åtŸçÑD§žÅT¥Ú€Á—¤JZ+õXùxÈ^¼ êz’•D2 jm,ŒÉ S¹Çd& ˜¸0vòƒk䱸2d¨Ïk˜0U ûàËŽalRFÚ7-YÒÛ™eHd©‰-17 ¦¨¯Ÿ  Å¨>y™Sc ”¦©W ä?‰gv˜h£ÆH!+èYv)ì±'ͼâ98ŸrJO™‘y8Ô¤(˜E'7Œ´O8ÖÕOL2_1|"Ø­`@TåPÉOjÅfkíYD•²RöwèakMb”茸_Øéሥj*—6z8pv¥Ã;Ü¥Bá;’›ŸjSÞäœÚô³âÔË#$Ÿ>å9~ƒÏ?ÁÄ#ü–À-( 'š»I¶LÖ¦AŒt]$Q+!fQ EUð `ðôŠÒÃgŽŒô™Ð¯NÎEÈKꈀ5°ÅnS™qJpÛÈÒÏA4¿(ñÛœrÛT ;á‚>*·*_x‰‘YÊêaÈŠšÁèMý~uuSíÖO³¿âãoS’mªPª¶Î•ëKøÍ¿Éï¸MJÊôV¤¾ºº´·"Q#Ges=Y¢ÄË7ýnx úX¾Z»Í¥z®¸óæñé–/0HTe"ÓMV9X±=V›ydCÞm¿”‚ëÅE˜p5£«²Ÿ%ùxŽ%•ø™6ž©‰w ¶)Õ…~ ›¥ "ÕÖ`Qr[” ãÉÉÔ"¸ÓÍ?·®Kk9 M[†êeùîhDe!†‰ M±ÍëEõóð ˜|Tnú¨Ã(Ï{­•Z`(µÄÞ­£û(~ÓZ—üJÿû}“YURðïpVåùKòܨôߪkÎ- ‹ )”ÂÌt"/€Î â„•Ñ‘¤©©2I´e#½~shK¹"'É3‡Zšî×<ðÁ Éò‚»žò¡ÑÙ5)õ!͵_Dl øÓæÍ“u ¯‹ÚN(WâpëÚ$ëT…TIÆaÌ|‰“£È*OG¹§I«Ǻ9»5µ0KÌŠ6|­ºSøÑ]$y=áÀôÁ§½x˜Ÿ _'{Ì!9!FàÝ~´ OôhòH'ôq-¤Î@Š]q =¢i‡ez Ô솺‡J² 鱘°SÌÊ?RÓeµ÷È&˜|ò?ºÕÅ+'ˆ÷xŸd<‰3 ŽGö/•lEuBg nfötl—¶|G¼†ÏÆÉ­ÿö8Úâ: ²2í®'GKµÂ¹Ë¾#Çv®÷õᓊZÐ ÂC? `:=Ú…dÆdvD0X&Úù”L ÃBþYþbîƒÿMZ§]q¡[õMq¡úÉnjüÑîÂ/¼¢s½4£Ï•ÁŠsâ"ÄvÆérj<™{öPl*¼« ÃóL»àñ‚ø2Éý”*ýTˆüèn× @k è)˜ ºÈ¦»ˆÜå_þ¸J×VDK6°Ï8£¬ÙciLÎfð`jÚÑ1½Glçßã†>,¥5Áw$Ôå°¨ÌlÒZ/BpqnñaKï⤃DhlJñ,àë,2ؽ í0¶¡gªˆ¥é1DzYé±6¬£\`ܤ·¿"†ÌßJÄ0{Lp Tó6§¸è1GUý¶ÿ댽{¸Ÿ ÿ~~u‘>F³¼´¶³)(HkáÙwI,[–Þ«ïÎ÷æ%Vyƒ…vË|hBÎR‰ô3÷TŽô»Xß„ðÈêÍ<ˆéÑ~ìÝ*ô"ø™÷l…g ¤47f6¯Ç šq‹þy¼«{™I”+]u@Lʽf`l¹8šÎ’`•å2't„ñCÉ0(ïŠð7p#;³Vk€$‰¢€æÊKn}uu®`ÙÃ÷š#Y*ƒ<˜5³{Ÿôk$™0-éHÝuÀꦕ„Û”’ÔeÒlòHI¸~£¾Ÿ–ÛÈÛóM™!¥ð¤¶@°õXqŸ<;ß¹ž]9žŒ¯ŽÏÏjh/«gJ7RÔiŸGÖ*Ú~.gAJ¢nÄÔÆwr NâjС‘èÎÝýQWqp·œ¡;ÉêØA&E@|l‰V’5Á“낺¾ µšF¡¯-Ü0d†SÀí1²1sZÇK?»‹ç-(pIØh5¡•3jy–I|îqÃ"g¦FšJ}X³4bZÒCí9e B W ¡Ú+Q{I$HŠâK`V»_ʹ.r#Qzôßå¹öDÛ–G ìw8–­Æ›X,æÏ¯þão~¤Hp0€,«X©R‡œ;ôî©Cs lìÈdi_¿ZXóËÅD7„œ-¬øéWì·]Ðcôùš&R«pgŸÌðD)íp…âêØV×Ðbb’^Li~QTõÄ ´ê±‡|c—lc_¹A¶ÞÝÁ:»/ ×ûµ”ǨæÛcª,eÓŽP“¤Õâté²eˆIÔŠµ3eUÑ£î~S8^ÚR64ÜëŠÇ¶fp½|% EaÔÅÔ»J¼Ù=§ ”Ùb¾´:HAVRã­P<œRØ`ÀúšÜ åöo“b€&&B^ÆàZøI"b-€À-ååùXKWë­ˆæ±ΦLJ&ß–¡î±CÆt†3ºeö3ö¾ÑÀÜû¨ŽœßÕg1»¨¥×.A,½ò»ûæ•GOà[œÜ§ûjË;é1QÜA?ô`œÂã,vîœè)˜oÑë{åŽTÿ;wÀÛi‚+ UåG<YUßÐ\Y{cig8¯/ ^± ?ìU)ãYàýùñáÿ \nt,àæ}‡6L¿Ò)x“¸Ÿã%àÝ­CSôá e—sì‚ w*54ef\’ðF² ¥¥—ÞÛJ3öÒÛRágƒEI¥Ysûs·o„Xà?‡§^òÊ9’e•mkÀÀ©(sÃV4ȱG!| *sÀÌÑË'­ŽðláAvxF˜†TžÔP®Á(’ÞîøÖe]-JïÊŸv-;.}G;WÈNÁ| qP„A`—þ\–, éýƒa“¥ŽxìÚòv¯GA6¶¶ÑºJP4ȇµ-Rcͳ”.|°¾4º"²ÛµrE”?÷®s`µ)ÒàÚF©¬»´•…bÌ—PqWêìY´CÇübæ¤Y"Æó„p¤¨ÍÒêúoØt€ôu¡)w8‰£Ep;N|O 1¸Qטb¿SL Ö}Ó".Òúÿruq½B)–KtÌ'1¾aïf°-ÁЗœºp2AÙ—÷”^Óg±H?ýtÛ}}>—ªrJ…ౕ¥:¥$•QÁÅј¢²2­Èim+±KZbVL§iSå¦Ô9 ¶7ñ E—íÓF©ï",T+Ö^ýã/>8<\Ý£.<±œØµ¹Wí‹?«”®QvÙfSy;ÅD€ld[ó¹ÇŒh’¥XH o,sùê°<`铹;µEa‡ÇÇÛÚ¢DÛkÉ¥ôÍÉz!íâ•¿\…rÎRð!Å;2ÑŽ7afø8GÈî#èáaW0;¿&î ÜQ˜£¥Š/ÜW¹šÙ/¨M‚Ëe5Õš@Ý3ÕüÞL6U«PöììfUÃÁ¨¨š@ú»À¢8|ƒÆ·®ªâÊ5³7ЭlÞæë¤*K ´}‹K‰—[›* AT_„ ]…·\‚ƒƒ‰úf¡o}·Gá¹ ¾yAvØÖ¹c¯bVP¸GÅc–#Óc.9LJXRG5*/ª,H°kÚEhÉ*¢ŒÜH§0~®ôuƳ{Z×sÁ½à “QékY0Ùß㥯.Óa"`3l‡("†'0:ÀNül³£Ue³;eÞí²(Ý”í-JªÏž"¢7‰k®‚“õÿ>Cnvã³~Ë_®°[{Ú°‰5†ÙDecIÀÜ d£¤ ŠŸÁlp‡±ÙÓcî065hð+õß?GXá ´ªzÌ"<ä 6KÀ_I¯1¢Çr³¢ÿaÎgc¹ æ›âk^ ͽºeøëÕŠ‚9ëV©›V«ZêcÝdç ::š„e˜=+%©ÊŸ@%ï50Z«J×Vyc@X ‚ r~ë›ep/¸žDmÀ†tPp¡IW¼†^.à޸؉ñǰ''"¿º¸×޼ƒßºÂì«•Ôh<ÞÓVÜ)­J÷òT: 4Aïð’*÷`fcáó*NE³²­Z̹Þÿ'q•lÅ豿³˜8¦ØB~ÆJ½Nõ´ä5 Œ˜G™ÅþtÁñÄ*é 0‚ݾPœ–÷rd¸ñ<ǽ® V{ªy·fQÈá{“d‘ˆ•oüäÆb?Jx±5 ¬ÖXû>ÁóvðiØïïöaÐ}‚Æ«8„ çä²x˜½9¾6¶`ø°Áð [ÁO|4gßJ(Ú¾b3Ä´ÀžÁn|p|ó¼ºs±ss7¾/2ä™ -%Ð ¨<­¥Ü¸Œc[üY7Wâ ZÖîTá ¸bo{ÂݾELáý¸ô#Øáæ’Ê$ÜuxŒ.îAš®s…i²i+4h{xÆU]Ú®YkytDëfx¡.o§ØnwÞX2\ðy–[FR’³2ƒ•²„Ê23ö¿çój«÷=-±Œoí+þ¸òCuG>Ø"bûÞHJeê0´<¥Ü±Üݨµ‘ú‹do4,Ûz9 i¾÷²ïÕ†&ö(º%¤Ošmíäÿ©ÒÔ¾@ÌÅ¥¿ ¯`'3;/xË]å÷ÉÓ>é‹Q©‡{ÙbÍ´ïàʽ­äøuäÞEs"ß¼:=9ŽæþâÌŸáÚ…ÿûN• Ë“+0SD*¡Å$ˆ‚ìQh´ËX½TÖÔ ,ägt]Ê»hv”ódìš Ê~'W½IXˆ ÌÍop‡ p…8¯ @i‹u—ÞÒÏë œË„ŠMghyñ\·Ü5‚ƒÓóñÒr`ÎÇTÏ‹hàî4ÛÓwÎtMʗȃ¾ôy¾{Ü•ö£ùÜ0: ˜«3ŒNG÷{¬Ç«UWš÷Æ<,9úÄ O›î\³ñöš¬0͆‚b_`úÓ‡G‹X—7ñ›Óà7ÃŒYW#|¬öû*Ì›e°ºý‚†ÆU|B VÅ6¸ë¼pÍ7ëÃÑy˜!Ðb.\†Z208†3FAùAl wt,°ì0ñYgvÕ½¡™À9äõìGÑlê…”#ÕîŽ{¬5q{¬ù¶*ºehEåŠÌò‡_ ¥’’a?«M½4ÜŸdçf½X ŠMðo×VYŒPðÛ°¯vØVs r |÷ërúFR¶È5p'±ÎÊËËÔ ].ápEWUí¯óDÂ~Y%[𖸀©ÁIÑÖ© 6·µZ³%Wª Œv"ÿ5¥KKŠxÆÎY±B¯;û¤f-ò‚ð"‰oo)…е¹nåHºÉ Iê ñ¯q@,•£avG#þÃ{å]“|â…ãyr}y¬¨™¾L†»Ÿþ“]#•7m_þwÔÀâ9Ø~!6u0й@¬Wm sû9v°vQ˜µiv¥Þwß šÍ?)‡»É ¢Ý&ñÜ¿ñRµÈ__ž¼Kßc .¯=rŠ%ß âqìýtPr‡Ê Å0ÔYƒ·µÃ€Éç,%uÊßcMLD÷„gƒ~!s6ÅqžhJÿ¤d?]_0Î+†Ÿ¬§.¶E¤ ú«Z¢Õ ?Ox‚½ðZcVcz!»æ61<ÜIŽW[d‡YÜ逾 ¾Áó0T†ÐKàë—)DpBÄçä0(QA!ÒN7Ï ¾âsè$_lÕ UàE‰F¾ôa›õRcN‹”Id·Ü¥ÜëÎÜ÷xZ±ðÚ™5J•?Ïoî·ëìjz|ø9ôf÷a€©¢Y¼ÄT³¢¿$0$ôñ0µ£èá­3îÏâtv:ݹOª7J‘*j–­^«;`¯xmâøöúÒªEuä—"©4¥Þ¡\±ª@z¨Âr¸øúÑãd©–SN*ÁLä€hUëlî ¢-¥Ò NÒ(µm>yÛd5×ËövØjls[p¶–醬í†l“›âldôXW™ƒJœ5]ÏcãVNø]Bs‡ê´o|æÿ€Y(Šñìj¯ÏÚÍõÔ;RûOÖˆ29¹óÚ‹‡9Væè”]9ã äÉVáÃb‚½oD)«nâ©ño|ìz5ÜŒ¥sš¦ZÂ( ø Nžç—2u7ÿ?f–µ k‘s“<ÄŸ6lPʼœÅÙg"Ò2j€©¨ù»DI\‡Ž¸–;»…h¸~µk”é*ô HS£½Âüè1gh* ÷Á×¹÷ñCµ¡y³1— w/)r[Û’–äe$Is0ñ½y…&œMBˆº#HËÉ5žÈÄ”æÈ¢Àú–gB¦xdŽ þ&䇻 A­òj¸­”‡Põ€JÔcjÐó¡ZÐ*»)ÎÈß$è…Ì}tZâKÌWÌh¨&,qaÚ5½ù"n7ù÷žlω‰ç¤¹zÕ¦°1Ú…Ièá>üØ:ô˜½Õ«VœüŠ,ÆQÆ9øoÒǺó¡9\½aœËwdÎ}â©ï%³»é,Ö–Ã!Bañ¬$ÐÌ3Ê0¶ˆÍ#æ#¸²øÉ:‰Ö­q®í±€š ¼ ¥R¦™÷u>›ÊH‡iæµõ-J< ì($k«Š6ý–á³"&7Ú#‰ !ÃY3XDNŒü‹ö J•²»¶y,Øç +ï3E[ëU‹ÖƒnnÀ«r zM½>‹ˆƒ~¹Œ£Ÿ@A_´j²Îý-äuă¹ˆ‚Ù\Nà=$øÓ±í[ìG—þ¾ç”ì>8_¢k×õ"câuÇÙ…Íê^`‹2x(ÜŽsm(õ¥ÓéÉв›±¡V¼ŽW8P§à=DžàFÉÓO…äéI<»×Ù¡"â/]=›['RkªÖ O"ƶœ&‰Ä|Ç8¬wè' ñK«´·.óB3Áce5{ÌO.ŽA‹?e]¿0%O×i&W "´ÃšjÕòŠÙû«Øål‹ús“ìòU¥L] é*“àÀŠ)›3U”gÙåÁ/›³±q4Ù¬‘Æ‘«ÕŽÐÎ(pÇaúýøâ¹Ø³x‡Rvƒb¶[7!}’C.­Äöõ˜#>ÚüBàŒ2nØÏÒ°²ôÔ[)ö|¨„—¬'<2—¬$ÆQhzû(ÔCÈŠËñáñõTÌê\©IEM»ªüúá·Ê µÆÿý5T˜¨Ð®ª0™úŠ%ïðlúõeä§Glz·ìjÕÁ2ƒž2ÜÇ› 3ùý’\é^wTÂ^YÈØÂ^NTËÎi°]Àƒ4ƒÉ«’DT´EMäe¤TÁi“(ev¾PŸJyËÆ¤|¹z€u®ÇNí$f5ÎÐLì~¡”õìÞNÅËZ0yTÓÀ­OýŒž^IƳ‰²Š×¨³á¢a=†x÷ë‹ßÒ–¬EµàðeÉÄ–ËLG}²$bå ÚSÞmX`[3Ÿ–Ìv]Ç5Ú©îèÇj<ÿ~ÃQ½>î1w²[v°µk÷©šf× =«t(R,ƒÌ)nne¥]ïÍ)÷räf y %Ÿ–ˆ ´å¡Ñ7¸‰02|Ãkaßruè½Ünb5 °a‹éY^7b1˜ëÎôøP¯Çpiù-·À¶«$AiÁ'a8¨ª:R¥Š‘Ÿqú ®URëϺÃÁwÿÏ¿²? ‚ üVÍ?S!§?ÿ3Å×)ß„$Oatc-O¨¤2Ȫ ª«$hÖÑ rfó?ã›ûzKÍf¬Ÿ‘åÅiðo*¹Âøæ‰ß ~í÷øÃœ2¸DäK¼×"ú?{Û–öC†ð©™¿Ê>TåbB"/…ä¤~Y[Üõ¢S³$ä! “ÇX¶m!檪é®ÙwðѤþœÄÞD¸àSnÏ)0|D`¡MÙhçÁý¾R[ÄÑe¬…ùŒBz„YBÛ§Ò±¼û‹Cá™›é©Ú €“øv ¦Í­à¦Ñþ €Ú Ñò8´EQ1?"AÚ©ÏkÁéë9u$"èu,b'¼JwŽË?Ïf²›+ºßì£ÙÜ%He_«²±©ØAI=‹´0g^x޹ïÉøâúòX=û²¤mLÏÙá±C8»˜²å3ôóç^è¼ÔÌì±NPÝ|MKaUKG&w“DƒRVÓÇhvqu)/Å c¿Ë c)´ß%qü[ÕŽÀiXƒŽ‹Fƒêü‚Pæ¶ÐFw°äÈKž ø•rýù‹¯Ñ×ë1'0ÃΈúrH±ôÜÿÝv‡ÐÏ;úÁsŠŽà¤ò‚†ÃÝ~kž­Fô<[‚p 9= ¦v¹×å™ÚÏ“Ûë(ÈPñTjoÈ` òä4ÅJ™¨&• ŒºØg¤h?0ƒM¥¤SªŠ #锨Ñ1ç×5cÜÝÞvÓëw/™ãE¼½ ;mÙ_ùƒÏÃrRó¶¥9©j{~wÃü¦ýOT†¢ž“/Wk]06%¤ƒö.¦4‹¥ØÀ½%PUêGYþ &rwÃ"~±Nüè×¼{¼k`±ÇLâÕ£ ç÷À‹à1¹Aͺ•¨ÐèHr{•cbñJ¼TIŽ8ˆ>ÞàÚÅÏ¢=Fo]‰tö˜«ìpí@HU$ÖYqdâÆMéË©g¨,w“ÛsÇÙ<Ú™{éŽ"ÐÞ˜C’Ü£{ò"73ºo¡¥£yôc$J+¥5ü¿ ü_Ô˜vW°±k¸Bˆß»ÔñÔ/Xâ;{=†æ>Åá(K8*£µ¸¥’x+v+—žÙ$­d«Ë-Íú¯ÉÜ «‹;Þq¤œwÈ;ŽEXè|%íx$'_’À©­ÿà„’àœƒ‡œX÷mi„hwÏŒ¿GS°«jlbXs­°qdåˆèFØê„žÝÑ+>Nü˜.w&·p‚ pläpŸî$Ãu ¸$ÃÛ¼ ÂYRuU|yÅIð60{ÌQp²¦M_]glçë°E¾j?ÞûÅ=å(¡õrÇfñF!PÕê3èù°w¸.Œ†ûÉú Ù 7¦8)sÚ,¾–W‡<…ñ(ØÄù*ošœQj/OÆ6`,hD\§¢® ‹^Ì+¡ò°!nÿñ:Ó h‘tPím+z¬n'¬’¶È‘Dþwš_øßjç%Ï¥\G½i\{Û„ilƒï­¾7ß5w‡FFë4…'ïǬ¼±g\ç[äâÝYɆ½~.Ù°N•Ñ¢°!oÛcÛn•]ÔÒ ªŸ,Ø"$Wj:HK‘›çTófÆ9x lg[Ê™!3«°Jµkf6즻“`uv]~C•í/6ᧃ;#ܹ^…±7ÿìEó‡`ž)Ûç3QdR{Dìëç¿"¬6æJlMŸbéÊ÷‘É"®³òV1µ õ4ØS‰P7O!ÃÜa|›®‹à‡ûº0Ñ)ôÛc[‹Qšf¡Ìuê­µÞE›¶êRöªLô†8C#È`”^GÁ¿Ö¾B8[aîØóÈ>9PÛ1Á<ŸrûEäeëÄ wB˜¯kðSwf"wÊ‹™4#®ngâ3L~>mœ8ÌO «}ئ 7™N“Ä'–´l¢<íeù9na(WS‡Ó§9ðo_#dïƒqøà=¢4È„ü3#úÎÃçO`7¾ºJê´úÀsQ~_á­ð(Þx<¶1ÞÇß÷™G\ŸàD‹òÖ÷Ÿ ÛPת©ˆð§<©ŸÐ­ ´(Sù¶d»´ ›®çñ¹Ìð´Wú.ÅQ.bŽíÛÄ,]ל‘ZÇŽ¾EÉéœz?6ùAty©é Á쬦s0*ƒÁ·Êxp ,X§²Á¶’r£¶®ÊL©?#‡oc£Æðù:»ÉW‘`Ò±-2œ¯ ª!ñ!†ÉjaèyVmœ°=ÖTDbã’°,_ß—§J¤ž/„e}°+Ë‚¾`I›¤²L°öwqä£U–=Äï)g‚×ÞÐÎÍó“X¢…¬óù 2&O­+¸ m“':¿º˜‹'BQžš³tŸ©ë%Ïy˜3ÔMÂÕ¡.ýŒ,c_°Ú+vIX÷„QþO bƒûzê­«ô±zAmêna…™ÙQžŒ üLÌäÿÀóqiƒ&ÿM(&& ”¼Øõ;2þ´'eFJ$'}ÅpŠãòÛRJyàŸô­`kÓep‡­%Î6æÌZÚ.—+ƒ“cVepçÉ-¯}ãºPTñ&ƒØx %8/}ë1—Åo<~Hˆ_+òæçWU«®È3–س+ÎLòÛµ®8»ÂrK^cÇÛ_и¾„zuÖ룙ôº f÷¾”,~9•8e1È=3‰rx‡ñÌ´þMlŽ©žõRê+ciÆ`[Øñ9ÊÇÚw˜;îËAyë,>¥ÑýÀÜAä ±ÇžÇ?!½ VœuŒXÏ›Äåq’Q°ªôf9§Øæu}Õ‰w£ÁÓœ{¿ÇVUê„mà)ÕõEõaMpÖñá2»mL“q 4¸BÑuwØB› ¥E‹'õúš=\ˆÜÓTþ…L |Ñ<²ñ‘Ý䑪…Ò(Hhâçùv£ëÕÜð~p>$ÚŸ?FÞ2˜±5N’†ctêßTˆôt¹ŒîÝ= 8wÌÈ žƒæ¨T¼œª±7a²šïå‘KðØvcÑãu@è;,‰­ÚŠ¿z71¡;•’î6ÜÂ×Á®HJ“D}àÔSöÈØX …ÚoJ:L¿R¼Ö¸ù¯\Cʈ¸z‘!.Å-½üeÒÄY-ùˆìŒêöõ … ooRLªÆ›Ævœ**‹u¾d>Ã5‘B³x¹‚E"âd!²ö²,¤ìÄúá ÉÏފì¸òô˜é-÷ê<­ßÙ=؇ëch#-ãaè/ö{¶ Oa߇¯ý!ZiM¿ƒö%µ¿¯#ß´aÑ<…+BßË„™Ç>Çñ}ǸíÐÜ(8÷üÉoM 70#§Ó£ A.Þ­B/‚ßyÏ<~iLV^éj«@<°qÚÇ·Pn[•©ú´µƒMl¢§¢ÖÅx­öçÏ"ðŠü³öçñû[<)£Ýáæ–~V¨Š¦D éåÜ+c´iÚE#rïÂÓå5¼Š#ìb3ÉE0[¹«¬+)íBƯYí öQUx„ Ý¡`ÆÆK®Y{¦âSÍA èÎmÆ’Ó<™í ¸ø,($ª¶á87&j=ǹÐE>Õœq6ññlFnè܈߲5ÕCk2­r5X‹‰œÄ…n`ÏÁc\޳6¦¡Ëy=Zî 5{VN–›é S±Ì×tsÀl$`Nípù¦81ó =Aç‘=J¤8›Ý­—`ÜÊÏ0Ϊ›7! -(©õ+~ ž%geõÕ8QGŒÃeäº2Ò7󉂧”§Õ%ï)î± S KV§n›lZ9 ÔÕ„ºmUìöûÊ{ôpèrž?õBFßþ]_Ü:¶xýTþæžï\O.wNÏ¿OÆWÇçgìz:þí8t:Wªd†IÞ%}ºg2ùÉò8š…ë¹?Wùª‰4ŒJ+ÿr5YøˆÒt¾Rì¯nNê¤$kh2¦Œ9jÐxb€ÜwŽÒ‹DŠÜÙ…*ª¹PÜsgûb†™ÙŸ èsw7·W‘LT˜õåÎŒb®‹çÊ(6™˜•ðEœ?΋ÔÏ´­qq>=þ;–r¢TU)2ðçVC¡ž‡-ÏŸxHó t{HÎíÍâ ;"°qcŒ– ˆ›ÓÇrÝ祥FjY÷ê ˹m1GOÁå£Å%yó­íñ½5è'›à{Á‘t$(>0Š‘y±<§±“÷ë7³ÐI0Sü |Û8±ªkSÂÀµp&“‰×活ꢾß26É¿N ÷ÿ1p“‘›×â<Œ{5„l²•:–uD?Ï 3»óXjM2åC`õ¦A#N­ÉSd€á¦ks˜ŽÌ ¼Ù$Rü=†q^ÆqFÀ£ã`H´*}† ˜j•teæý`pèÏ’G]H‚J=¥n ¿BG[Ò“K2DwÕIQª2ј,ÇZï0²,S¯ô Ídå Œ WS¤ØM!Ù@ iðbÆÍG›º§i  t*OmN £hFßt7{¬ùÖtz2å%‚s3Ï‚g}–NB«âÍsCI]ãPMÝãø†‡;?¢}CÙ ´júH†QyëPå;µw+°îÃ}9àÓÜýgNÀ8‰ï=,qaxa07`öövƒ)=æLÇ_· ªª(o`˜X•w¹ŽÆ)™¸Øñnc,ÉJ¼ãù<²/„èÅä»úVˆëbäçË%¨ÝԸצõt\¹=R‰zA–ˆÊåZ)« YöSR$*ù:ɲ+‹?–¹'ñ…œU¡lxÅE„ÕÜFÕþEBicÓ¬Æ*þ,!á>ŠÈ$ñçús÷§ÑÇrîk"'´uá¬ÝÜT'—+“ãl“ºˆã NØx»¡ÊÝ’ ßÀa:ª’Ë•£é¢˜:03ïíl³ðhnÛº-Rµx<:s’GìÜ3Îï/]Ð1*wAQ“pè<(ºAxÎŒ%^_E(W¤6ðÏ`çÃçm”+nÀ@Ðy³…ß²v ‚ðÚ,Ãê€U^¢Nf‚O‚&dqÕ‡›È£p\ÀMäâmýž#t²óôK²ñEU˜ŠŠAbß6ÇûŒ70ø¸Ø¢&òãÁÞka(uöLGàt Àü0ðTÝzuû{ AM|_£ßZy‰·ôQö:Q·fñ£‰KG~ò fÔWð4ÌY´1 {Ì¥(<Ô ÀsólGxÀ :°—ËpÈ—«‹ÿ^Ç™wúeËåh¨‰½CƒŒ-ý[4Íß Š6ŸÉSa×ËK+¾Kmº*>!_ái(N(ÄK×V/@{Ú”C‚ýTð b.<)5@²4[€ÊâüÍ‚Ù:ôpõÎx©n íY9™·S÷X³ÓŒ¤¨²¶Q^èÚÆ+/¹õÍîiùÒÆŒNâ]ÃbgÄ>=KQãÁÀŒrÎ"ƒ·Ã×làX2H¼Eˆë1Π§À÷àAWØCÝñh®ø–áB4øjíœõÂ…õ@?<ˆ$'i…ÔBþ¸k}-› °a˜i5j%;£çVûÏc¢°6½}‹F€6¯îº!5Xü:Bw0eÞ7¼o.8~áÇüå*KKˆ}KëëêŠ Õ ˆ5eÈÿ4Üó"‰³x‡¼øaç9ú†ÞO”KEÉ7l=…£/*öZ±^ïlËäJ×ië'Y†mZt;'mdÈÿ:²JøïõqCèk­›W~/4ýçŠÔäèqŠÜžÇ8óE ˜^#Míí¡#´kOdz\ZÞ×äþk^Ñ’ƒêØ@X4;@uT"]NEAùœê•àRB‘`‡!¤ìJÊDÀöéexû°\+¥Ò£õêÝ:]ÃÓøH+æûÛ*yî:ßMŠŸ ¡¤£³pÂÊs 7ù‡ÞMóÑ$_žÇ<‘Ýë,EdtH*ê+´hö÷+;¬Ÿ¡º¿ð¿ÂSN¬Ðö>Ð2¦_¯ÐU-—×c¼…¦åÉLcm2YÝü'íÌ·æÔ`$‹iõFð)-HôŠ˜$‰ùx>kw!©ƒ}s„Šø)šâC/xƒ…þ)g ƒ}ì…°ŠÙÝR‡õ5ÝîX|A “m9OoñZöõª„q¹Ç8§¼j‘ŸI}xß[Â6‹`d˜ÞÁÎõÙz…QF–Â͆O2 ï5Ê“²ºæXÂZ ¶ÈÒŸë出¨J±%5ÀœÀ£²Õë±V(“ƒÜ áÚç\+CïÔU ›«”ì·…´’· hêOŽ …Pû¹B¨ 2/u—«pÈ6­Ù;‹´¤¢ ½Xß„Èfª|»­´*¾ãL5%ª0lä$D×/}ÔM@qŸ@Wºë8Çž z°uR`Åúïµ!2öùøìðFô‰þð_xˆ'' e1íYïÎåU]Ÿ„KÞÂÝrõOä,ƇÒc­\²mQäo~~â‡èo^‚´v®¯pôU(€Vj #ozGq´ãÿp#ƒQ <Íȉí]£l­‰í]^¦'X]¹áïqš¥EÊWã •%àÙg¸ƒ ÓÝ=4xÓ¥ùªxÓsŠÑòÉSÍŒ·wñ.pfºKúôê‚_C´Füæ%CD¨À•dH¯¹ÍB‹£t0~Êâü–‹+r~+ëÄf\íQ¤ÀNõÒ4žT-G ýíìð˜†Í6Ll `2]9À%G¶®]º èFkC}BÆõÏo–@ ­OÁk[\&wÅe•åÁû*•·B–Ÿ[3R,ÐÖŒà áþÝx³ûULÏSMèO1DÓcÑ>»6+’f N†é¸"Ey .U‹]íò^ªQé("h\,UÅÿ"‚&$÷%ðÃyj3uRµæ$Y°j9?™V(ƒ%Ãxø´îÜ ¹~¢ðŸB*QÖë‹|(+*—G²ž$fíG²†àï×%¾¾¹J|?5+q{.9›Øw^e×­bn$ƒ‰W'Óq¡^nQÁs6Fhs3„};Å/Yá"³Ã®/Jq#d'Þ#÷Ü× ²«ý—rÓ<õs¥ÑÚš]+¯{AÈI4Xð\E'è»çº¯¢pÚ/uÞ¯WÆã›»ËÇ£¨Oò$¢ »C3ºÿ5˜Ý—RÅÃÝË•RÓ/Jò‰0¾ Ñ£^ŽÑ§˜!–ªêé©U4R?³Ñcj‹Mqc™i=âi‰@9ÃÓuˆ¼ºˆ=’ÚÆ-FT™À­¤poð(/6Æ«ÆA¸üL´²(ÎØ’›µh§ãxˆ60^.‘ä ¸¬É:W¥)ïYGK0GöQrÙ»¢a¬Ï¾œ“Ó ‹’q ‚kÄ›Ýlª›I˜ÿªÅÛ`"짉öµÚŸLyÆÀønéœ÷£¼sZ.öi;×û“Ûׇ¿ŽúQºCÜÐÂy°püýøêÃko¯ÿõûû{Æñ5ì÷÷ÿ4îöú£ÑþŸúƒÝý½ÑŸXÿ úRx{á%Œý)»{×ûwåyÛŽ¿Ò×ôþƒ}>Âi69¿øL¸ß¯Øç“óÉWh¿B4(˜ì·‰·dð眖Ƌ £rÿÅã5-¸‰®Œ˜àA†lKE}‡fÓ#6¬#pµáûHRÖGô° £úíìšýÆ‹YÏ'±ðå£ÔÇÊú¶ sÄ©˜|ö*~¾ïK ßL^È!)(1aåw‹/úç• E÷xŽdQø;0ÝùV]–4,^‹5¸+p&ûv|õûùõ|ÛøììÛøòr|võÿ"O+Füïb‹–«0@ŸÕKÀæä¶§G—“ßáüñçcž»Lؗ㫳£ér~ÉÆìb|yu<¹>_²‹ëË‹óéÑ0TümWŠ´4b¢‚ÍÀLňÿw'…~…svç‹£ß.NؾáÏBV96æý:Ò–ÅË› ªÊÿ!(\%wuöÿ*•àå¥ÌÏçÓyʼăN¯î>0ówáûˆ(6ËŒžÃéꪱ%,<üÂ/F\y=h€qv‹ÅtˆVxðá1‘v;…p_SAë¼ôÓE<òü}rt\ºðu œ%òœ4^'3ñË”cï°ùÏãÕ*‰qê«_øóûìêNž/à«ÌAƒø(ð†«BC.Dà!8a–Äô.ˆ`ØðMëhÆ/è"‰—bµ*ùeõ¸Ï¼u*ïQBšËø©%dì«éró(¾­úYûÀÎñúÛÓǰôî}DøG·Ôuø"9S=R¢!-éëšüüþÞøùNàshOY¸`‹Ý6Ì6$‡±XÈÇ‚îÎæZþËniî"ß?ÅÅ”…—ú£|ÚQ“uÃôÅ¢ߤ&èv¼ ÍFì(4#ñÆÿÊ[á2ù?2Ÿ4Ë­‰ NÀ‹&w1þa\Óñ pµÂÇ;†KqK¹ |Hc˜Žê—ážñÛÄG .5bsr¿(çõTDÈD/ñ&Ì}„ßç>߆܄”§¹GV]ÆËŠÛ£F} },¹Ñ+\Hñëöµã«'üo«>_öÞMÞ³a¿?€å7b§> IÅÄ»NùÐÓÊ>?²ç+î‹8wð¬Ç¦²³96­ üǦ³;éÑÜÁܖЄÏÌ£ÿd³èÿ¥ôÁêíä6øæÙE0/ægõ±šE\­ öêÄì†Ü2:Ð%YåëTWÿ¾¬·J†¿¬Î« gÕÔ /r­ôg_BÇ9ŒçpúÛ·bÏõ±Üõ£hŽñªÞ‹Ã/rÖГ`ác¶¢d•4¿ÈH[dµxð¥uþ¬JeG^îÒ玽´®‹©=ÇÍè÷«Ó“c°?~”M~ûŒ—6 Ι6–Gl rûØ‹ëzÚV;ý'wúòðl"µ 7¾´ÎS2£²÷…£/­ûíQ§/¢ûí©žq='&˜sŠÐLpéÎ-éùÃ/míui~ÖÒUè=žvOÝþÒºüGà?LLU «ã…£?³û±ž©Ö¾Ï“æÅÝ^&Ó+zÌ¿nÆ'>ÖÓfñŠ^O¯à›gû ýŽ»{ÏNÇÿ€vÉì†]{ï¨ûüú–ºÔ^¿ûQÊ?!ºZ*Ј´Ë®ß®p¾Q#lÿRðét“öˆòmÂÏÐÍ–õîì^ú^«á5u¯0V´b‰9Y·¾jzÍ#F*„ùHñÍ9¾yÍc,Ú6°Ãñ‚pè_½Í#=(ø#`Ÿn«¼x™KkÞ„áö÷kÞ‹fŒxÖô’ÏœÕøzgä§OjF>Ìy'Óõ ‰¯>åþ$'JšÝÆþ-ñf<çŽ#^‡\ÓÿŸH‰ÅN£²¯ê*œÄ·`¹óÆ×;ˇýƒâ6 .Éap 9ˆðPäÛW=Ð’½4ˆÆáêÎÓ•o_õ@KBAt½ZQ±¨|ûªZ\³Ð›ŽÌÊ·¯z ¥Q„)‡Úê¡ê†×<Øý²Á~¼‘«¯zóªY ,€!é¯|O-»ÆûW=Ôb ákaIQ$ðõô5›^õ€‹á©iæVÛ+ò`T2%SÏD4I¾yÊAŸx{Å…é ,ÁûëÕfî³[Ä®}<x_’ôõåÉ“cºõ÷“Õ߉ŸdÞíj¢‚=„¡ z|¿£”‡K¨Àð 6ÝÛé,^¹ì®…½vÔÝOº»_‚0sŠ›z‚É °¹xyãäé1vÝz»«{kë~¾Ðé ¤1Ô…ŽÉv˜ÎëÍŒÆ#pí–'ó îаÄð_§™™•2߿ޭv8Ì£®¦ðMÞ­OÌ»O9Ÿ|`¥¶¾F'ˆ¿_ó­+Zº”©9 ÂÁš-Ùÿî…¯y裑ܠfq:–zÑ»Ôh4Ð]¦ÊäEà>ý´—|¨ûϫȲÄw|ÍŸv»ÆD¹ïÜ)‘ÚÓz}»v±áI¬8ɰÖÑR‹¤BjYÌJy"öp̰T=ʼkƒ±œÚ§º`_h¿Îâå* DÉr2ŽxNEuqˆ?§4£œhS:)z*¸º,§Þ`¹^Òòþ‚W„aPÑóiðï—ÞõÂ\]ŸÄÑl$0§¾!õÃÓSYÖ†%h:ÂÑìk /Ë`3ü‰ð®â“à»a9‘@|§Ú÷Ô‡hÎËÞgÈ 'i‹ÒÆ ÂtŤAVÆoå =é6ªJð¢ô$Žï¯8߇È>óQ|v`ç†SR¤f±º¨!|JÑ„Q {7ó¢y07nýû6C}.gv¤êù¢×êƒOát9è×1Ô¡*†& •O1T±p¾ð¡îª¡ÏCŠÄ‚¯d°0šÈç &œJcE^W1СÂòáHO`}ÂØàÖrV™齞ûÞýø©áhuÁö Ãnyd‘ËøvóóûbF÷Ñ]½Eyã ‹+ô‹ì'{°µfïÆ±§òËëHÕüFé%X~£¨öÏ_üýþÀú®×<¢¡9¢3ÿá×ʘ>F³¦ þ1žÝâxH®àÕh¯ä:Œ—Þ~÷9ŽQîÑIá†LÀû œ>0O^%½¢‡ Â)¥S¼îÆðÄaÅ.ÛŽ#ÿ›÷ˆãxMsi`,Ãè *M{6ô³mKÈ›ë”O_²+d*7eNãïþxÖ({ðó/úp°?´‡¡¾­¡/ö"ƲkE,«¯q${öH„ rá5P~¦„ |±Ä¥¥'œÜK%›Š¿Þÿ=€·-®ó¦ Ët\fñ^~™°ÝþÞhÛFÖAßkFÚ¡?ÔVJÖ8®û·A$¿Ó_$~zWˆ^Ó‰ýdýþ†ÿŠóO¼4;%µY.O“—@­ˆÖDŒ“G¦0ÖiÍx m½Ê…_Ë;{Í+Ñg7ÍÖiî0?t‘°Ÿæã§a"—ÿ*©úMqÌøÅ¹³¾Mäl,»Ÿ×éã7/ÐqD}hê“ðí…·N}ãp9žP¬çÉÃ¥H7û`6–ûYŽÁ2~Nš 1Â,ÂÚèà ’›/ú'ËÑú€]ÐÍÀýŠ9ü¨WLég´r[ÄÞ´¥™OسÛÍ:9¾þûñÉñøòjåÓÉñÜ¢à¼ãm!«EL§¹Û;º¥ÉòúÝoÀ&}‘ Õë±kÍÙ}ýÓÛëíõöz{½½Þ^o¯·×Ûëíõöz{½½Þ^o¯·×Ûëíõöz{½½Þ^o¯·×Ûëíõöz{½½Þ^o¯·×Ûëíõöz{=×ëÿry$° 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/data/ticket48212/000077500000000000000000000000001421664411400236315ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/data/ticket48212/__init__.py000066400000000000000000000000001421664411400257300ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/data/ticket48212/example1k_posix.ldif000066400000000000000000012600561421664411400276140ustar00rootroot00000000000000dn: dc=example,dc=com objectClass: top objectClass: domain dc: example aci: (target=ldap:///dc=example,dc=com)(targetattr=*)(version 3.0; acl "acl1"; allow(write) userdn = "ldap:///self";) aci: (target=ldap:///dc=example,dc=com)(targetattr=*)(version 3.0; acl "acl2"; allow(read, search, compare) userdn = "ldap:///anyone";) dn: ou=People,dc=example,dc=com objectClass: top objectClass: organizationalunit ou: People dn: ou=Groups,dc=example,dc=com objectClass: top objectClass: organizationalunit ou: Groups dn: cn=user0,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user0 sn: user0 uid: uid0 givenname: givenname0 description: description0 userPassword: password0 mail: uid0 uidnumber: 0 gidnumber: 0 homeDirectory: /home/uid0 dn: cn=user1,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user1 sn: user1 uid: uid1 givenname: givenname1 description: description1 userPassword: password1 mail: uid1 uidnumber: 1 gidnumber: 1 homeDirectory: /home/uid1 dn: cn=user2,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user2 sn: user2 uid: uid2 givenname: givenname2 description: description2 userPassword: password2 mail: uid2 uidnumber: 2 gidnumber: 2 homeDirectory: /home/uid2 dn: cn=user3,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user3 sn: user3 uid: uid3 givenname: givenname3 description: description3 userPassword: password3 mail: uid3 uidnumber: 3 gidnumber: 3 homeDirectory: /home/uid3 dn: cn=user4,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user4 sn: user4 uid: uid4 givenname: givenname4 description: description4 userPassword: password4 mail: uid4 uidnumber: 4 gidnumber: 4 homeDirectory: /home/uid4 dn: cn=user5,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user5 sn: user5 uid: uid5 givenname: givenname5 description: description5 userPassword: password5 mail: uid5 uidnumber: 5 gidnumber: 5 homeDirectory: /home/uid5 dn: cn=user6,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user6 sn: user6 uid: uid6 givenname: givenname6 description: description6 userPassword: password6 mail: uid6 uidnumber: 6 gidnumber: 6 homeDirectory: /home/uid6 dn: cn=user7,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user7 sn: user7 uid: uid7 givenname: givenname7 description: description7 userPassword: password7 mail: uid7 uidnumber: 7 gidnumber: 7 homeDirectory: /home/uid7 dn: cn=user8,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user8 sn: user8 uid: uid8 givenname: givenname8 description: description8 userPassword: password8 mail: uid8 uidnumber: 8 gidnumber: 8 homeDirectory: /home/uid8 dn: cn=user9,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user9 sn: user9 uid: uid9 givenname: givenname9 description: description9 userPassword: password9 mail: uid9 uidnumber: 9 gidnumber: 9 homeDirectory: /home/uid9 dn: cn=user10,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user10 sn: user10 uid: uid10 givenname: givenname10 description: description10 userPassword: password10 mail: uid10 uidnumber: 10 gidnumber: 10 homeDirectory: /home/uid10 dn: cn=user11,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user11 sn: user11 uid: uid11 givenname: givenname11 description: description11 userPassword: password11 mail: uid11 uidnumber: 11 gidnumber: 11 homeDirectory: /home/uid11 dn: cn=user12,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user12 sn: user12 uid: uid12 givenname: givenname12 description: description12 userPassword: password12 mail: uid12 uidnumber: 12 gidnumber: 12 homeDirectory: /home/uid12 dn: cn=user13,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user13 sn: user13 uid: uid13 givenname: givenname13 description: description13 userPassword: password13 mail: uid13 uidnumber: 13 gidnumber: 13 homeDirectory: /home/uid13 dn: cn=user14,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user14 sn: user14 uid: uid14 givenname: givenname14 description: description14 userPassword: password14 mail: uid14 uidnumber: 14 gidnumber: 14 homeDirectory: /home/uid14 dn: cn=user15,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user15 sn: user15 uid: uid15 givenname: givenname15 description: description15 userPassword: password15 mail: uid15 uidnumber: 15 gidnumber: 15 homeDirectory: /home/uid15 dn: cn=user16,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user16 sn: user16 uid: uid16 givenname: givenname16 description: description16 userPassword: password16 mail: uid16 uidnumber: 16 gidnumber: 16 homeDirectory: /home/uid16 dn: cn=user17,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user17 sn: user17 uid: uid17 givenname: givenname17 description: description17 userPassword: password17 mail: uid17 uidnumber: 17 gidnumber: 17 homeDirectory: /home/uid17 dn: cn=user18,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user18 sn: user18 uid: uid18 givenname: givenname18 description: description18 userPassword: password18 mail: uid18 uidnumber: 18 gidnumber: 18 homeDirectory: /home/uid18 dn: cn=user19,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user19 sn: user19 uid: uid19 givenname: givenname19 description: description19 userPassword: password19 mail: uid19 uidnumber: 19 gidnumber: 19 homeDirectory: /home/uid19 dn: cn=user20,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user20 sn: user20 uid: uid20 givenname: givenname20 description: description20 userPassword: password20 mail: uid20 uidnumber: 20 gidnumber: 20 homeDirectory: /home/uid20 dn: cn=user21,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user21 sn: user21 uid: uid21 givenname: givenname21 description: description21 userPassword: password21 mail: uid21 uidnumber: 21 gidnumber: 21 homeDirectory: /home/uid21 dn: cn=user22,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user22 sn: user22 uid: uid22 givenname: givenname22 description: description22 userPassword: password22 mail: uid22 uidnumber: 22 gidnumber: 22 homeDirectory: /home/uid22 dn: cn=user23,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user23 sn: user23 uid: uid23 givenname: givenname23 description: description23 userPassword: password23 mail: uid23 uidnumber: 23 gidnumber: 23 homeDirectory: /home/uid23 dn: cn=user24,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user24 sn: user24 uid: uid24 givenname: givenname24 description: description24 userPassword: password24 mail: uid24 uidnumber: 24 gidnumber: 24 homeDirectory: /home/uid24 dn: cn=user25,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user25 sn: user25 uid: uid25 givenname: givenname25 description: description25 userPassword: password25 mail: uid25 uidnumber: 25 gidnumber: 25 homeDirectory: /home/uid25 dn: cn=user26,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user26 sn: user26 uid: uid26 givenname: givenname26 description: description26 userPassword: password26 mail: uid26 uidnumber: 26 gidnumber: 26 homeDirectory: /home/uid26 dn: cn=user27,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user27 sn: user27 uid: uid27 givenname: givenname27 description: description27 userPassword: password27 mail: uid27 uidnumber: 27 gidnumber: 27 homeDirectory: /home/uid27 dn: cn=user28,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user28 sn: user28 uid: uid28 givenname: givenname28 description: description28 userPassword: password28 mail: uid28 uidnumber: 28 gidnumber: 28 homeDirectory: /home/uid28 dn: cn=user29,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user29 sn: user29 uid: uid29 givenname: givenname29 description: description29 userPassword: password29 mail: uid29 uidnumber: 29 gidnumber: 29 homeDirectory: /home/uid29 dn: cn=user30,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user30 sn: user30 uid: uid30 givenname: givenname30 description: description30 userPassword: password30 mail: uid30 uidnumber: 30 gidnumber: 30 homeDirectory: /home/uid30 dn: cn=user31,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user31 sn: user31 uid: uid31 givenname: givenname31 description: description31 userPassword: password31 mail: uid31 uidnumber: 31 gidnumber: 31 homeDirectory: /home/uid31 dn: cn=user32,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user32 sn: user32 uid: uid32 givenname: givenname32 description: description32 userPassword: password32 mail: uid32 uidnumber: 32 gidnumber: 32 homeDirectory: /home/uid32 dn: cn=user33,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user33 sn: user33 uid: uid33 givenname: givenname33 description: description33 userPassword: password33 mail: uid33 uidnumber: 33 gidnumber: 33 homeDirectory: /home/uid33 dn: cn=user34,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user34 sn: user34 uid: uid34 givenname: givenname34 description: description34 userPassword: password34 mail: uid34 uidnumber: 34 gidnumber: 34 homeDirectory: /home/uid34 dn: cn=user35,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user35 sn: user35 uid: uid35 givenname: givenname35 description: description35 userPassword: password35 mail: uid35 uidnumber: 35 gidnumber: 35 homeDirectory: /home/uid35 dn: cn=user36,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user36 sn: user36 uid: uid36 givenname: givenname36 description: description36 userPassword: password36 mail: uid36 uidnumber: 36 gidnumber: 36 homeDirectory: /home/uid36 dn: cn=user37,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user37 sn: user37 uid: uid37 givenname: givenname37 description: description37 userPassword: password37 mail: uid37 uidnumber: 37 gidnumber: 37 homeDirectory: /home/uid37 dn: cn=user38,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user38 sn: user38 uid: uid38 givenname: givenname38 description: description38 userPassword: password38 mail: uid38 uidnumber: 38 gidnumber: 38 homeDirectory: /home/uid38 dn: cn=user39,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user39 sn: user39 uid: uid39 givenname: givenname39 description: description39 userPassword: password39 mail: uid39 uidnumber: 39 gidnumber: 39 homeDirectory: /home/uid39 dn: cn=user40,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user40 sn: user40 uid: uid40 givenname: givenname40 description: description40 userPassword: password40 mail: uid40 uidnumber: 40 gidnumber: 40 homeDirectory: /home/uid40 dn: cn=user41,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user41 sn: user41 uid: uid41 givenname: givenname41 description: description41 userPassword: password41 mail: uid41 uidnumber: 41 gidnumber: 41 homeDirectory: /home/uid41 dn: cn=user42,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user42 sn: user42 uid: uid42 givenname: givenname42 description: description42 userPassword: password42 mail: uid42 uidnumber: 42 gidnumber: 42 homeDirectory: /home/uid42 dn: cn=user43,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user43 sn: user43 uid: uid43 givenname: givenname43 description: description43 userPassword: password43 mail: uid43 uidnumber: 43 gidnumber: 43 homeDirectory: /home/uid43 dn: cn=user44,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user44 sn: user44 uid: uid44 givenname: givenname44 description: description44 userPassword: password44 mail: uid44 uidnumber: 44 gidnumber: 44 homeDirectory: /home/uid44 dn: cn=user45,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user45 sn: user45 uid: uid45 givenname: givenname45 description: description45 userPassword: password45 mail: uid45 uidnumber: 45 gidnumber: 45 homeDirectory: /home/uid45 dn: cn=user46,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user46 sn: user46 uid: uid46 givenname: givenname46 description: description46 userPassword: password46 mail: uid46 uidnumber: 46 gidnumber: 46 homeDirectory: /home/uid46 dn: cn=user47,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user47 sn: user47 uid: uid47 givenname: givenname47 description: description47 userPassword: password47 mail: uid47 uidnumber: 47 gidnumber: 47 homeDirectory: /home/uid47 dn: cn=user48,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user48 sn: user48 uid: uid48 givenname: givenname48 description: description48 userPassword: password48 mail: uid48 uidnumber: 48 gidnumber: 48 homeDirectory: /home/uid48 dn: cn=user49,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user49 sn: user49 uid: uid49 givenname: givenname49 description: description49 userPassword: password49 mail: uid49 uidnumber: 49 gidnumber: 49 homeDirectory: /home/uid49 dn: cn=user50,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user50 sn: user50 uid: uid50 givenname: givenname50 description: description50 userPassword: password50 mail: uid50 uidnumber: 50 gidnumber: 50 homeDirectory: /home/uid50 dn: cn=user51,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user51 sn: user51 uid: uid51 givenname: givenname51 description: description51 userPassword: password51 mail: uid51 uidnumber: 51 gidnumber: 51 homeDirectory: /home/uid51 dn: cn=user52,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user52 sn: user52 uid: uid52 givenname: givenname52 description: description52 userPassword: password52 mail: uid52 uidnumber: 52 gidnumber: 52 homeDirectory: /home/uid52 dn: cn=user53,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user53 sn: user53 uid: uid53 givenname: givenname53 description: description53 userPassword: password53 mail: uid53 uidnumber: 53 gidnumber: 53 homeDirectory: /home/uid53 dn: cn=user54,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user54 sn: user54 uid: uid54 givenname: givenname54 description: description54 userPassword: password54 mail: uid54 uidnumber: 54 gidnumber: 54 homeDirectory: /home/uid54 dn: cn=user55,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user55 sn: user55 uid: uid55 givenname: givenname55 description: description55 userPassword: password55 mail: uid55 uidnumber: 55 gidnumber: 55 homeDirectory: /home/uid55 dn: cn=user56,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user56 sn: user56 uid: uid56 givenname: givenname56 description: description56 userPassword: password56 mail: uid56 uidnumber: 56 gidnumber: 56 homeDirectory: /home/uid56 dn: cn=user57,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user57 sn: user57 uid: uid57 givenname: givenname57 description: description57 userPassword: password57 mail: uid57 uidnumber: 57 gidnumber: 57 homeDirectory: /home/uid57 dn: cn=user58,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user58 sn: user58 uid: uid58 givenname: givenname58 description: description58 userPassword: password58 mail: uid58 uidnumber: 58 gidnumber: 58 homeDirectory: /home/uid58 dn: cn=user59,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user59 sn: user59 uid: uid59 givenname: givenname59 description: description59 userPassword: password59 mail: uid59 uidnumber: 59 gidnumber: 59 homeDirectory: /home/uid59 dn: cn=user60,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user60 sn: user60 uid: uid60 givenname: givenname60 description: description60 userPassword: password60 mail: uid60 uidnumber: 60 gidnumber: 60 homeDirectory: /home/uid60 dn: cn=user61,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user61 sn: user61 uid: uid61 givenname: givenname61 description: description61 userPassword: password61 mail: uid61 uidnumber: 61 gidnumber: 61 homeDirectory: /home/uid61 dn: cn=user62,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user62 sn: user62 uid: uid62 givenname: givenname62 description: description62 userPassword: password62 mail: uid62 uidnumber: 62 gidnumber: 62 homeDirectory: /home/uid62 dn: cn=user63,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user63 sn: user63 uid: uid63 givenname: givenname63 description: description63 userPassword: password63 mail: uid63 uidnumber: 63 gidnumber: 63 homeDirectory: /home/uid63 dn: cn=user64,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user64 sn: user64 uid: uid64 givenname: givenname64 description: description64 userPassword: password64 mail: uid64 uidnumber: 64 gidnumber: 64 homeDirectory: /home/uid64 dn: cn=user65,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user65 sn: user65 uid: uid65 givenname: givenname65 description: description65 userPassword: password65 mail: uid65 uidnumber: 65 gidnumber: 65 homeDirectory: /home/uid65 dn: cn=user66,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user66 sn: user66 uid: uid66 givenname: givenname66 description: description66 userPassword: password66 mail: uid66 uidnumber: 66 gidnumber: 66 homeDirectory: /home/uid66 dn: cn=user67,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user67 sn: user67 uid: uid67 givenname: givenname67 description: description67 userPassword: password67 mail: uid67 uidnumber: 67 gidnumber: 67 homeDirectory: /home/uid67 dn: cn=user68,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user68 sn: user68 uid: uid68 givenname: givenname68 description: description68 userPassword: password68 mail: uid68 uidnumber: 68 gidnumber: 68 homeDirectory: /home/uid68 dn: cn=user69,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user69 sn: user69 uid: uid69 givenname: givenname69 description: description69 userPassword: password69 mail: uid69 uidnumber: 69 gidnumber: 69 homeDirectory: /home/uid69 dn: cn=user70,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user70 sn: user70 uid: uid70 givenname: givenname70 description: description70 userPassword: password70 mail: uid70 uidnumber: 70 gidnumber: 70 homeDirectory: /home/uid70 dn: cn=user71,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user71 sn: user71 uid: uid71 givenname: givenname71 description: description71 userPassword: password71 mail: uid71 uidnumber: 71 gidnumber: 71 homeDirectory: /home/uid71 dn: cn=user72,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user72 sn: user72 uid: uid72 givenname: givenname72 description: description72 userPassword: password72 mail: uid72 uidnumber: 72 gidnumber: 72 homeDirectory: /home/uid72 dn: cn=user73,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user73 sn: user73 uid: uid73 givenname: givenname73 description: description73 userPassword: password73 mail: uid73 uidnumber: 73 gidnumber: 73 homeDirectory: /home/uid73 dn: cn=user74,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user74 sn: user74 uid: uid74 givenname: givenname74 description: description74 userPassword: password74 mail: uid74 uidnumber: 74 gidnumber: 74 homeDirectory: /home/uid74 dn: cn=user75,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user75 sn: user75 uid: uid75 givenname: givenname75 description: description75 userPassword: password75 mail: uid75 uidnumber: 75 gidnumber: 75 homeDirectory: /home/uid75 dn: cn=user76,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user76 sn: user76 uid: uid76 givenname: givenname76 description: description76 userPassword: password76 mail: uid76 uidnumber: 76 gidnumber: 76 homeDirectory: /home/uid76 dn: cn=user77,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user77 sn: user77 uid: uid77 givenname: givenname77 description: description77 userPassword: password77 mail: uid77 uidnumber: 77 gidnumber: 77 homeDirectory: /home/uid77 dn: cn=user78,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user78 sn: user78 uid: uid78 givenname: givenname78 description: description78 userPassword: password78 mail: uid78 uidnumber: 78 gidnumber: 78 homeDirectory: /home/uid78 dn: cn=user79,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user79 sn: user79 uid: uid79 givenname: givenname79 description: description79 userPassword: password79 mail: uid79 uidnumber: 79 gidnumber: 79 homeDirectory: /home/uid79 dn: cn=user80,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user80 sn: user80 uid: uid80 givenname: givenname80 description: description80 userPassword: password80 mail: uid80 uidnumber: 80 gidnumber: 80 homeDirectory: /home/uid80 dn: cn=user81,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user81 sn: user81 uid: uid81 givenname: givenname81 description: description81 userPassword: password81 mail: uid81 uidnumber: 81 gidnumber: 81 homeDirectory: /home/uid81 dn: cn=user82,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user82 sn: user82 uid: uid82 givenname: givenname82 description: description82 userPassword: password82 mail: uid82 uidnumber: 82 gidnumber: 82 homeDirectory: /home/uid82 dn: cn=user83,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user83 sn: user83 uid: uid83 givenname: givenname83 description: description83 userPassword: password83 mail: uid83 uidnumber: 83 gidnumber: 83 homeDirectory: /home/uid83 dn: cn=user84,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user84 sn: user84 uid: uid84 givenname: givenname84 description: description84 userPassword: password84 mail: uid84 uidnumber: 84 gidnumber: 84 homeDirectory: /home/uid84 dn: cn=user85,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user85 sn: user85 uid: uid85 givenname: givenname85 description: description85 userPassword: password85 mail: uid85 uidnumber: 85 gidnumber: 85 homeDirectory: /home/uid85 dn: cn=user86,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user86 sn: user86 uid: uid86 givenname: givenname86 description: description86 userPassword: password86 mail: uid86 uidnumber: 86 gidnumber: 86 homeDirectory: /home/uid86 dn: cn=user87,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user87 sn: user87 uid: uid87 givenname: givenname87 description: description87 userPassword: password87 mail: uid87 uidnumber: 87 gidnumber: 87 homeDirectory: /home/uid87 dn: cn=user88,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user88 sn: user88 uid: uid88 givenname: givenname88 description: description88 userPassword: password88 mail: uid88 uidnumber: 88 gidnumber: 88 homeDirectory: /home/uid88 dn: cn=user89,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user89 sn: user89 uid: uid89 givenname: givenname89 description: description89 userPassword: password89 mail: uid89 uidnumber: 89 gidnumber: 89 homeDirectory: /home/uid89 dn: cn=user90,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user90 sn: user90 uid: uid90 givenname: givenname90 description: description90 userPassword: password90 mail: uid90 uidnumber: 90 gidnumber: 90 homeDirectory: /home/uid90 dn: cn=user91,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user91 sn: user91 uid: uid91 givenname: givenname91 description: description91 userPassword: password91 mail: uid91 uidnumber: 91 gidnumber: 91 homeDirectory: /home/uid91 dn: cn=user92,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user92 sn: user92 uid: uid92 givenname: givenname92 description: description92 userPassword: password92 mail: uid92 uidnumber: 92 gidnumber: 92 homeDirectory: /home/uid92 dn: cn=user93,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user93 sn: user93 uid: uid93 givenname: givenname93 description: description93 userPassword: password93 mail: uid93 uidnumber: 93 gidnumber: 93 homeDirectory: /home/uid93 dn: cn=user94,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user94 sn: user94 uid: uid94 givenname: givenname94 description: description94 userPassword: password94 mail: uid94 uidnumber: 94 gidnumber: 94 homeDirectory: /home/uid94 dn: cn=user95,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user95 sn: user95 uid: uid95 givenname: givenname95 description: description95 userPassword: password95 mail: uid95 uidnumber: 95 gidnumber: 95 homeDirectory: /home/uid95 dn: cn=user96,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user96 sn: user96 uid: uid96 givenname: givenname96 description: description96 userPassword: password96 mail: uid96 uidnumber: 96 gidnumber: 96 homeDirectory: /home/uid96 dn: cn=user97,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user97 sn: user97 uid: uid97 givenname: givenname97 description: description97 userPassword: password97 mail: uid97 uidnumber: 97 gidnumber: 97 homeDirectory: /home/uid97 dn: cn=user98,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user98 sn: user98 uid: uid98 givenname: givenname98 description: description98 userPassword: password98 mail: uid98 uidnumber: 98 gidnumber: 98 homeDirectory: /home/uid98 dn: cn=user99,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user99 sn: user99 uid: uid99 givenname: givenname99 description: description99 userPassword: password99 mail: uid99 uidnumber: 99 gidnumber: 99 homeDirectory: /home/uid99 dn: cn=user100,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user100 sn: user100 uid: uid100 givenname: givenname100 description: description100 userPassword: password100 mail: uid100 uidnumber: 100 gidnumber: 100 homeDirectory: /home/uid100 dn: cn=user101,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user101 sn: user101 uid: uid101 givenname: givenname101 description: description101 userPassword: password101 mail: uid101 uidnumber: 101 gidnumber: 101 homeDirectory: /home/uid101 dn: cn=user102,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user102 sn: user102 uid: uid102 givenname: givenname102 description: description102 userPassword: password102 mail: uid102 uidnumber: 102 gidnumber: 102 homeDirectory: /home/uid102 dn: cn=user103,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user103 sn: user103 uid: uid103 givenname: givenname103 description: description103 userPassword: password103 mail: uid103 uidnumber: 103 gidnumber: 103 homeDirectory: /home/uid103 dn: cn=user104,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user104 sn: user104 uid: uid104 givenname: givenname104 description: description104 userPassword: password104 mail: uid104 uidnumber: 104 gidnumber: 104 homeDirectory: /home/uid104 dn: cn=user105,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user105 sn: user105 uid: uid105 givenname: givenname105 description: description105 userPassword: password105 mail: uid105 uidnumber: 105 gidnumber: 105 homeDirectory: /home/uid105 dn: cn=user106,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user106 sn: user106 uid: uid106 givenname: givenname106 description: description106 userPassword: password106 mail: uid106 uidnumber: 106 gidnumber: 106 homeDirectory: /home/uid106 dn: cn=user107,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user107 sn: user107 uid: uid107 givenname: givenname107 description: description107 userPassword: password107 mail: uid107 uidnumber: 107 gidnumber: 107 homeDirectory: /home/uid107 dn: cn=user108,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user108 sn: user108 uid: uid108 givenname: givenname108 description: description108 userPassword: password108 mail: uid108 uidnumber: 108 gidnumber: 108 homeDirectory: /home/uid108 dn: cn=user109,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user109 sn: user109 uid: uid109 givenname: givenname109 description: description109 userPassword: password109 mail: uid109 uidnumber: 109 gidnumber: 109 homeDirectory: /home/uid109 dn: cn=user110,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user110 sn: user110 uid: uid110 givenname: givenname110 description: description110 userPassword: password110 mail: uid110 uidnumber: 110 gidnumber: 110 homeDirectory: /home/uid110 dn: cn=user111,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user111 sn: user111 uid: uid111 givenname: givenname111 description: description111 userPassword: password111 mail: uid111 uidnumber: 111 gidnumber: 111 homeDirectory: /home/uid111 dn: cn=user112,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user112 sn: user112 uid: uid112 givenname: givenname112 description: description112 userPassword: password112 mail: uid112 uidnumber: 112 gidnumber: 112 homeDirectory: /home/uid112 dn: cn=user113,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user113 sn: user113 uid: uid113 givenname: givenname113 description: description113 userPassword: password113 mail: uid113 uidnumber: 113 gidnumber: 113 homeDirectory: /home/uid113 dn: cn=user114,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user114 sn: user114 uid: uid114 givenname: givenname114 description: description114 userPassword: password114 mail: uid114 uidnumber: 114 gidnumber: 114 homeDirectory: /home/uid114 dn: cn=user115,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user115 sn: user115 uid: uid115 givenname: givenname115 description: description115 userPassword: password115 mail: uid115 uidnumber: 115 gidnumber: 115 homeDirectory: /home/uid115 dn: cn=user116,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user116 sn: user116 uid: uid116 givenname: givenname116 description: description116 userPassword: password116 mail: uid116 uidnumber: 116 gidnumber: 116 homeDirectory: /home/uid116 dn: cn=user117,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user117 sn: user117 uid: uid117 givenname: givenname117 description: description117 userPassword: password117 mail: uid117 uidnumber: 117 gidnumber: 117 homeDirectory: /home/uid117 dn: cn=user118,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user118 sn: user118 uid: uid118 givenname: givenname118 description: description118 userPassword: password118 mail: uid118 uidnumber: 118 gidnumber: 118 homeDirectory: /home/uid118 dn: cn=user119,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user119 sn: user119 uid: uid119 givenname: givenname119 description: description119 userPassword: password119 mail: uid119 uidnumber: 119 gidnumber: 119 homeDirectory: /home/uid119 dn: cn=user120,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user120 sn: user120 uid: uid120 givenname: givenname120 description: description120 userPassword: password120 mail: uid120 uidnumber: 120 gidnumber: 120 homeDirectory: /home/uid120 dn: cn=user121,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user121 sn: user121 uid: uid121 givenname: givenname121 description: description121 userPassword: password121 mail: uid121 uidnumber: 121 gidnumber: 121 homeDirectory: /home/uid121 dn: cn=user122,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user122 sn: user122 uid: uid122 givenname: givenname122 description: description122 userPassword: password122 mail: uid122 uidnumber: 122 gidnumber: 122 homeDirectory: /home/uid122 dn: cn=user123,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user123 sn: user123 uid: uid123 givenname: givenname123 description: description123 userPassword: password123 mail: uid123 uidnumber: 123 gidnumber: 123 homeDirectory: /home/uid123 dn: cn=user124,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user124 sn: user124 uid: uid124 givenname: givenname124 description: description124 userPassword: password124 mail: uid124 uidnumber: 124 gidnumber: 124 homeDirectory: /home/uid124 dn: cn=user125,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user125 sn: user125 uid: uid125 givenname: givenname125 description: description125 userPassword: password125 mail: uid125 uidnumber: 125 gidnumber: 125 homeDirectory: /home/uid125 dn: cn=user126,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user126 sn: user126 uid: uid126 givenname: givenname126 description: description126 userPassword: password126 mail: uid126 uidnumber: 126 gidnumber: 126 homeDirectory: /home/uid126 dn: cn=user127,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user127 sn: user127 uid: uid127 givenname: givenname127 description: description127 userPassword: password127 mail: uid127 uidnumber: 127 gidnumber: 127 homeDirectory: /home/uid127 dn: cn=user128,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user128 sn: user128 uid: uid128 givenname: givenname128 description: description128 userPassword: password128 mail: uid128 uidnumber: 128 gidnumber: 128 homeDirectory: /home/uid128 dn: cn=user129,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user129 sn: user129 uid: uid129 givenname: givenname129 description: description129 userPassword: password129 mail: uid129 uidnumber: 129 gidnumber: 129 homeDirectory: /home/uid129 dn: cn=user130,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user130 sn: user130 uid: uid130 givenname: givenname130 description: description130 userPassword: password130 mail: uid130 uidnumber: 130 gidnumber: 130 homeDirectory: /home/uid130 dn: cn=user131,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user131 sn: user131 uid: uid131 givenname: givenname131 description: description131 userPassword: password131 mail: uid131 uidnumber: 131 gidnumber: 131 homeDirectory: /home/uid131 dn: cn=user132,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user132 sn: user132 uid: uid132 givenname: givenname132 description: description132 userPassword: password132 mail: uid132 uidnumber: 132 gidnumber: 132 homeDirectory: /home/uid132 dn: cn=user133,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user133 sn: user133 uid: uid133 givenname: givenname133 description: description133 userPassword: password133 mail: uid133 uidnumber: 133 gidnumber: 133 homeDirectory: /home/uid133 dn: cn=user134,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user134 sn: user134 uid: uid134 givenname: givenname134 description: description134 userPassword: password134 mail: uid134 uidnumber: 134 gidnumber: 134 homeDirectory: /home/uid134 dn: cn=user135,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user135 sn: user135 uid: uid135 givenname: givenname135 description: description135 userPassword: password135 mail: uid135 uidnumber: 135 gidnumber: 135 homeDirectory: /home/uid135 dn: cn=user136,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user136 sn: user136 uid: uid136 givenname: givenname136 description: description136 userPassword: password136 mail: uid136 uidnumber: 136 gidnumber: 136 homeDirectory: /home/uid136 dn: cn=user137,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user137 sn: user137 uid: uid137 givenname: givenname137 description: description137 userPassword: password137 mail: uid137 uidnumber: 137 gidnumber: 137 homeDirectory: /home/uid137 dn: cn=user138,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user138 sn: user138 uid: uid138 givenname: givenname138 description: description138 userPassword: password138 mail: uid138 uidnumber: 138 gidnumber: 138 homeDirectory: /home/uid138 dn: cn=user139,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user139 sn: user139 uid: uid139 givenname: givenname139 description: description139 userPassword: password139 mail: uid139 uidnumber: 139 gidnumber: 139 homeDirectory: /home/uid139 dn: cn=user140,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user140 sn: user140 uid: uid140 givenname: givenname140 description: description140 userPassword: password140 mail: uid140 uidnumber: 140 gidnumber: 140 homeDirectory: /home/uid140 dn: cn=user141,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user141 sn: user141 uid: uid141 givenname: givenname141 description: description141 userPassword: password141 mail: uid141 uidnumber: 141 gidnumber: 141 homeDirectory: /home/uid141 dn: cn=user142,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user142 sn: user142 uid: uid142 givenname: givenname142 description: description142 userPassword: password142 mail: uid142 uidnumber: 142 gidnumber: 142 homeDirectory: /home/uid142 dn: cn=user143,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user143 sn: user143 uid: uid143 givenname: givenname143 description: description143 userPassword: password143 mail: uid143 uidnumber: 143 gidnumber: 143 homeDirectory: /home/uid143 dn: cn=user144,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user144 sn: user144 uid: uid144 givenname: givenname144 description: description144 userPassword: password144 mail: uid144 uidnumber: 144 gidnumber: 144 homeDirectory: /home/uid144 dn: cn=user145,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user145 sn: user145 uid: uid145 givenname: givenname145 description: description145 userPassword: password145 mail: uid145 uidnumber: 145 gidnumber: 145 homeDirectory: /home/uid145 dn: cn=user146,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user146 sn: user146 uid: uid146 givenname: givenname146 description: description146 userPassword: password146 mail: uid146 uidnumber: 146 gidnumber: 146 homeDirectory: /home/uid146 dn: cn=user147,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user147 sn: user147 uid: uid147 givenname: givenname147 description: description147 userPassword: password147 mail: uid147 uidnumber: 147 gidnumber: 147 homeDirectory: /home/uid147 dn: cn=user148,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user148 sn: user148 uid: uid148 givenname: givenname148 description: description148 userPassword: password148 mail: uid148 uidnumber: 148 gidnumber: 148 homeDirectory: /home/uid148 dn: cn=user149,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user149 sn: user149 uid: uid149 givenname: givenname149 description: description149 userPassword: password149 mail: uid149 uidnumber: 149 gidnumber: 149 homeDirectory: /home/uid149 dn: cn=user150,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user150 sn: user150 uid: uid150 givenname: givenname150 description: description150 userPassword: password150 mail: uid150 uidnumber: 150 gidnumber: 150 homeDirectory: /home/uid150 dn: cn=user151,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user151 sn: user151 uid: uid151 givenname: givenname151 description: description151 userPassword: password151 mail: uid151 uidnumber: 151 gidnumber: 151 homeDirectory: /home/uid151 dn: cn=user152,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user152 sn: user152 uid: uid152 givenname: givenname152 description: description152 userPassword: password152 mail: uid152 uidnumber: 152 gidnumber: 152 homeDirectory: /home/uid152 dn: cn=user153,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user153 sn: user153 uid: uid153 givenname: givenname153 description: description153 userPassword: password153 mail: uid153 uidnumber: 153 gidnumber: 153 homeDirectory: /home/uid153 dn: cn=user154,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user154 sn: user154 uid: uid154 givenname: givenname154 description: description154 userPassword: password154 mail: uid154 uidnumber: 154 gidnumber: 154 homeDirectory: /home/uid154 dn: cn=user155,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user155 sn: user155 uid: uid155 givenname: givenname155 description: description155 userPassword: password155 mail: uid155 uidnumber: 155 gidnumber: 155 homeDirectory: /home/uid155 dn: cn=user156,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user156 sn: user156 uid: uid156 givenname: givenname156 description: description156 userPassword: password156 mail: uid156 uidnumber: 156 gidnumber: 156 homeDirectory: /home/uid156 dn: cn=user157,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user157 sn: user157 uid: uid157 givenname: givenname157 description: description157 userPassword: password157 mail: uid157 uidnumber: 157 gidnumber: 157 homeDirectory: /home/uid157 dn: cn=user158,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user158 sn: user158 uid: uid158 givenname: givenname158 description: description158 userPassword: password158 mail: uid158 uidnumber: 158 gidnumber: 158 homeDirectory: /home/uid158 dn: cn=user159,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user159 sn: user159 uid: uid159 givenname: givenname159 description: description159 userPassword: password159 mail: uid159 uidnumber: 159 gidnumber: 159 homeDirectory: /home/uid159 dn: cn=user160,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user160 sn: user160 uid: uid160 givenname: givenname160 description: description160 userPassword: password160 mail: uid160 uidnumber: 160 gidnumber: 160 homeDirectory: /home/uid160 dn: cn=user161,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user161 sn: user161 uid: uid161 givenname: givenname161 description: description161 userPassword: password161 mail: uid161 uidnumber: 161 gidnumber: 161 homeDirectory: /home/uid161 dn: cn=user162,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user162 sn: user162 uid: uid162 givenname: givenname162 description: description162 userPassword: password162 mail: uid162 uidnumber: 162 gidnumber: 162 homeDirectory: /home/uid162 dn: cn=user163,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user163 sn: user163 uid: uid163 givenname: givenname163 description: description163 userPassword: password163 mail: uid163 uidnumber: 163 gidnumber: 163 homeDirectory: /home/uid163 dn: cn=user164,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user164 sn: user164 uid: uid164 givenname: givenname164 description: description164 userPassword: password164 mail: uid164 uidnumber: 164 gidnumber: 164 homeDirectory: /home/uid164 dn: cn=user165,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user165 sn: user165 uid: uid165 givenname: givenname165 description: description165 userPassword: password165 mail: uid165 uidnumber: 165 gidnumber: 165 homeDirectory: /home/uid165 dn: cn=user166,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user166 sn: user166 uid: uid166 givenname: givenname166 description: description166 userPassword: password166 mail: uid166 uidnumber: 166 gidnumber: 166 homeDirectory: /home/uid166 dn: cn=user167,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user167 sn: user167 uid: uid167 givenname: givenname167 description: description167 userPassword: password167 mail: uid167 uidnumber: 167 gidnumber: 167 homeDirectory: /home/uid167 dn: cn=user168,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user168 sn: user168 uid: uid168 givenname: givenname168 description: description168 userPassword: password168 mail: uid168 uidnumber: 168 gidnumber: 168 homeDirectory: /home/uid168 dn: cn=user169,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user169 sn: user169 uid: uid169 givenname: givenname169 description: description169 userPassword: password169 mail: uid169 uidnumber: 169 gidnumber: 169 homeDirectory: /home/uid169 dn: cn=user170,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user170 sn: user170 uid: uid170 givenname: givenname170 description: description170 userPassword: password170 mail: uid170 uidnumber: 170 gidnumber: 170 homeDirectory: /home/uid170 dn: cn=user171,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user171 sn: user171 uid: uid171 givenname: givenname171 description: description171 userPassword: password171 mail: uid171 uidnumber: 171 gidnumber: 171 homeDirectory: /home/uid171 dn: cn=user172,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user172 sn: user172 uid: uid172 givenname: givenname172 description: description172 userPassword: password172 mail: uid172 uidnumber: 172 gidnumber: 172 homeDirectory: /home/uid172 dn: cn=user173,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user173 sn: user173 uid: uid173 givenname: givenname173 description: description173 userPassword: password173 mail: uid173 uidnumber: 173 gidnumber: 173 homeDirectory: /home/uid173 dn: cn=user174,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user174 sn: user174 uid: uid174 givenname: givenname174 description: description174 userPassword: password174 mail: uid174 uidnumber: 174 gidnumber: 174 homeDirectory: /home/uid174 dn: cn=user175,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user175 sn: user175 uid: uid175 givenname: givenname175 description: description175 userPassword: password175 mail: uid175 uidnumber: 175 gidnumber: 175 homeDirectory: /home/uid175 dn: cn=user176,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user176 sn: user176 uid: uid176 givenname: givenname176 description: description176 userPassword: password176 mail: uid176 uidnumber: 176 gidnumber: 176 homeDirectory: /home/uid176 dn: cn=user177,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user177 sn: user177 uid: uid177 givenname: givenname177 description: description177 userPassword: password177 mail: uid177 uidnumber: 177 gidnumber: 177 homeDirectory: /home/uid177 dn: cn=user178,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user178 sn: user178 uid: uid178 givenname: givenname178 description: description178 userPassword: password178 mail: uid178 uidnumber: 178 gidnumber: 178 homeDirectory: /home/uid178 dn: cn=user179,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user179 sn: user179 uid: uid179 givenname: givenname179 description: description179 userPassword: password179 mail: uid179 uidnumber: 179 gidnumber: 179 homeDirectory: /home/uid179 dn: cn=user180,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user180 sn: user180 uid: uid180 givenname: givenname180 description: description180 userPassword: password180 mail: uid180 uidnumber: 180 gidnumber: 180 homeDirectory: /home/uid180 dn: cn=user181,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user181 sn: user181 uid: uid181 givenname: givenname181 description: description181 userPassword: password181 mail: uid181 uidnumber: 181 gidnumber: 181 homeDirectory: /home/uid181 dn: cn=user182,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user182 sn: user182 uid: uid182 givenname: givenname182 description: description182 userPassword: password182 mail: uid182 uidnumber: 182 gidnumber: 182 homeDirectory: /home/uid182 dn: cn=user183,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user183 sn: user183 uid: uid183 givenname: givenname183 description: description183 userPassword: password183 mail: uid183 uidnumber: 183 gidnumber: 183 homeDirectory: /home/uid183 dn: cn=user184,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user184 sn: user184 uid: uid184 givenname: givenname184 description: description184 userPassword: password184 mail: uid184 uidnumber: 184 gidnumber: 184 homeDirectory: /home/uid184 dn: cn=user185,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user185 sn: user185 uid: uid185 givenname: givenname185 description: description185 userPassword: password185 mail: uid185 uidnumber: 185 gidnumber: 185 homeDirectory: /home/uid185 dn: cn=user186,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user186 sn: user186 uid: uid186 givenname: givenname186 description: description186 userPassword: password186 mail: uid186 uidnumber: 186 gidnumber: 186 homeDirectory: /home/uid186 dn: cn=user187,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user187 sn: user187 uid: uid187 givenname: givenname187 description: description187 userPassword: password187 mail: uid187 uidnumber: 187 gidnumber: 187 homeDirectory: /home/uid187 dn: cn=user188,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user188 sn: user188 uid: uid188 givenname: givenname188 description: description188 userPassword: password188 mail: uid188 uidnumber: 188 gidnumber: 188 homeDirectory: /home/uid188 dn: cn=user189,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user189 sn: user189 uid: uid189 givenname: givenname189 description: description189 userPassword: password189 mail: uid189 uidnumber: 189 gidnumber: 189 homeDirectory: /home/uid189 dn: cn=user190,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user190 sn: user190 uid: uid190 givenname: givenname190 description: description190 userPassword: password190 mail: uid190 uidnumber: 190 gidnumber: 190 homeDirectory: /home/uid190 dn: cn=user191,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user191 sn: user191 uid: uid191 givenname: givenname191 description: description191 userPassword: password191 mail: uid191 uidnumber: 191 gidnumber: 191 homeDirectory: /home/uid191 dn: cn=user192,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user192 sn: user192 uid: uid192 givenname: givenname192 description: description192 userPassword: password192 mail: uid192 uidnumber: 192 gidnumber: 192 homeDirectory: /home/uid192 dn: cn=user193,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user193 sn: user193 uid: uid193 givenname: givenname193 description: description193 userPassword: password193 mail: uid193 uidnumber: 193 gidnumber: 193 homeDirectory: /home/uid193 dn: cn=user194,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user194 sn: user194 uid: uid194 givenname: givenname194 description: description194 userPassword: password194 mail: uid194 uidnumber: 194 gidnumber: 194 homeDirectory: /home/uid194 dn: cn=user195,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user195 sn: user195 uid: uid195 givenname: givenname195 description: description195 userPassword: password195 mail: uid195 uidnumber: 195 gidnumber: 195 homeDirectory: /home/uid195 dn: cn=user196,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user196 sn: user196 uid: uid196 givenname: givenname196 description: description196 userPassword: password196 mail: uid196 uidnumber: 196 gidnumber: 196 homeDirectory: /home/uid196 dn: cn=user197,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user197 sn: user197 uid: uid197 givenname: givenname197 description: description197 userPassword: password197 mail: uid197 uidnumber: 197 gidnumber: 197 homeDirectory: /home/uid197 dn: cn=user198,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user198 sn: user198 uid: uid198 givenname: givenname198 description: description198 userPassword: password198 mail: uid198 uidnumber: 198 gidnumber: 198 homeDirectory: /home/uid198 dn: cn=user199,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user199 sn: user199 uid: uid199 givenname: givenname199 description: description199 userPassword: password199 mail: uid199 uidnumber: 199 gidnumber: 199 homeDirectory: /home/uid199 dn: cn=user200,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user200 sn: user200 uid: uid200 givenname: givenname200 description: description200 userPassword: password200 mail: uid200 uidnumber: 200 gidnumber: 200 homeDirectory: /home/uid200 dn: cn=user201,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user201 sn: user201 uid: uid201 givenname: givenname201 description: description201 userPassword: password201 mail: uid201 uidnumber: 201 gidnumber: 201 homeDirectory: /home/uid201 dn: cn=user202,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user202 sn: user202 uid: uid202 givenname: givenname202 description: description202 userPassword: password202 mail: uid202 uidnumber: 202 gidnumber: 202 homeDirectory: /home/uid202 dn: cn=user203,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user203 sn: user203 uid: uid203 givenname: givenname203 description: description203 userPassword: password203 mail: uid203 uidnumber: 203 gidnumber: 203 homeDirectory: /home/uid203 dn: cn=user204,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user204 sn: user204 uid: uid204 givenname: givenname204 description: description204 userPassword: password204 mail: uid204 uidnumber: 204 gidnumber: 204 homeDirectory: /home/uid204 dn: cn=user205,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user205 sn: user205 uid: uid205 givenname: givenname205 description: description205 userPassword: password205 mail: uid205 uidnumber: 205 gidnumber: 205 homeDirectory: /home/uid205 dn: cn=user206,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user206 sn: user206 uid: uid206 givenname: givenname206 description: description206 userPassword: password206 mail: uid206 uidnumber: 206 gidnumber: 206 homeDirectory: /home/uid206 dn: cn=user207,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user207 sn: user207 uid: uid207 givenname: givenname207 description: description207 userPassword: password207 mail: uid207 uidnumber: 207 gidnumber: 207 homeDirectory: /home/uid207 dn: cn=user208,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user208 sn: user208 uid: uid208 givenname: givenname208 description: description208 userPassword: password208 mail: uid208 uidnumber: 208 gidnumber: 208 homeDirectory: /home/uid208 dn: cn=user209,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user209 sn: user209 uid: uid209 givenname: givenname209 description: description209 userPassword: password209 mail: uid209 uidnumber: 209 gidnumber: 209 homeDirectory: /home/uid209 dn: cn=user210,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user210 sn: user210 uid: uid210 givenname: givenname210 description: description210 userPassword: password210 mail: uid210 uidnumber: 210 gidnumber: 210 homeDirectory: /home/uid210 dn: cn=user211,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user211 sn: user211 uid: uid211 givenname: givenname211 description: description211 userPassword: password211 mail: uid211 uidnumber: 211 gidnumber: 211 homeDirectory: /home/uid211 dn: cn=user212,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user212 sn: user212 uid: uid212 givenname: givenname212 description: description212 userPassword: password212 mail: uid212 uidnumber: 212 gidnumber: 212 homeDirectory: /home/uid212 dn: cn=user213,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user213 sn: user213 uid: uid213 givenname: givenname213 description: description213 userPassword: password213 mail: uid213 uidnumber: 213 gidnumber: 213 homeDirectory: /home/uid213 dn: cn=user214,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user214 sn: user214 uid: uid214 givenname: givenname214 description: description214 userPassword: password214 mail: uid214 uidnumber: 214 gidnumber: 214 homeDirectory: /home/uid214 dn: cn=user215,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user215 sn: user215 uid: uid215 givenname: givenname215 description: description215 userPassword: password215 mail: uid215 uidnumber: 215 gidnumber: 215 homeDirectory: /home/uid215 dn: cn=user216,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user216 sn: user216 uid: uid216 givenname: givenname216 description: description216 userPassword: password216 mail: uid216 uidnumber: 216 gidnumber: 216 homeDirectory: /home/uid216 dn: cn=user217,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user217 sn: user217 uid: uid217 givenname: givenname217 description: description217 userPassword: password217 mail: uid217 uidnumber: 217 gidnumber: 217 homeDirectory: /home/uid217 dn: cn=user218,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user218 sn: user218 uid: uid218 givenname: givenname218 description: description218 userPassword: password218 mail: uid218 uidnumber: 218 gidnumber: 218 homeDirectory: /home/uid218 dn: cn=user219,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user219 sn: user219 uid: uid219 givenname: givenname219 description: description219 userPassword: password219 mail: uid219 uidnumber: 219 gidnumber: 219 homeDirectory: /home/uid219 dn: cn=user220,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user220 sn: user220 uid: uid220 givenname: givenname220 description: description220 userPassword: password220 mail: uid220 uidnumber: 220 gidnumber: 220 homeDirectory: /home/uid220 dn: cn=user221,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user221 sn: user221 uid: uid221 givenname: givenname221 description: description221 userPassword: password221 mail: uid221 uidnumber: 221 gidnumber: 221 homeDirectory: /home/uid221 dn: cn=user222,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user222 sn: user222 uid: uid222 givenname: givenname222 description: description222 userPassword: password222 mail: uid222 uidnumber: 222 gidnumber: 222 homeDirectory: /home/uid222 dn: cn=user223,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user223 sn: user223 uid: uid223 givenname: givenname223 description: description223 userPassword: password223 mail: uid223 uidnumber: 223 gidnumber: 223 homeDirectory: /home/uid223 dn: cn=user224,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user224 sn: user224 uid: uid224 givenname: givenname224 description: description224 userPassword: password224 mail: uid224 uidnumber: 224 gidnumber: 224 homeDirectory: /home/uid224 dn: cn=user225,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user225 sn: user225 uid: uid225 givenname: givenname225 description: description225 userPassword: password225 mail: uid225 uidnumber: 225 gidnumber: 225 homeDirectory: /home/uid225 dn: cn=user226,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user226 sn: user226 uid: uid226 givenname: givenname226 description: description226 userPassword: password226 mail: uid226 uidnumber: 226 gidnumber: 226 homeDirectory: /home/uid226 dn: cn=user227,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user227 sn: user227 uid: uid227 givenname: givenname227 description: description227 userPassword: password227 mail: uid227 uidnumber: 227 gidnumber: 227 homeDirectory: /home/uid227 dn: cn=user228,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user228 sn: user228 uid: uid228 givenname: givenname228 description: description228 userPassword: password228 mail: uid228 uidnumber: 228 gidnumber: 228 homeDirectory: /home/uid228 dn: cn=user229,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user229 sn: user229 uid: uid229 givenname: givenname229 description: description229 userPassword: password229 mail: uid229 uidnumber: 229 gidnumber: 229 homeDirectory: /home/uid229 dn: cn=user230,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user230 sn: user230 uid: uid230 givenname: givenname230 description: description230 userPassword: password230 mail: uid230 uidnumber: 230 gidnumber: 230 homeDirectory: /home/uid230 dn: cn=user231,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user231 sn: user231 uid: uid231 givenname: givenname231 description: description231 userPassword: password231 mail: uid231 uidnumber: 231 gidnumber: 231 homeDirectory: /home/uid231 dn: cn=user232,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user232 sn: user232 uid: uid232 givenname: givenname232 description: description232 userPassword: password232 mail: uid232 uidnumber: 232 gidnumber: 232 homeDirectory: /home/uid232 dn: cn=user233,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user233 sn: user233 uid: uid233 givenname: givenname233 description: description233 userPassword: password233 mail: uid233 uidnumber: 233 gidnumber: 233 homeDirectory: /home/uid233 dn: cn=user234,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user234 sn: user234 uid: uid234 givenname: givenname234 description: description234 userPassword: password234 mail: uid234 uidnumber: 234 gidnumber: 234 homeDirectory: /home/uid234 dn: cn=user235,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user235 sn: user235 uid: uid235 givenname: givenname235 description: description235 userPassword: password235 mail: uid235 uidnumber: 235 gidnumber: 235 homeDirectory: /home/uid235 dn: cn=user236,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user236 sn: user236 uid: uid236 givenname: givenname236 description: description236 userPassword: password236 mail: uid236 uidnumber: 236 gidnumber: 236 homeDirectory: /home/uid236 dn: cn=user237,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user237 sn: user237 uid: uid237 givenname: givenname237 description: description237 userPassword: password237 mail: uid237 uidnumber: 237 gidnumber: 237 homeDirectory: /home/uid237 dn: cn=user238,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user238 sn: user238 uid: uid238 givenname: givenname238 description: description238 userPassword: password238 mail: uid238 uidnumber: 238 gidnumber: 238 homeDirectory: /home/uid238 dn: cn=user239,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user239 sn: user239 uid: uid239 givenname: givenname239 description: description239 userPassword: password239 mail: uid239 uidnumber: 239 gidnumber: 239 homeDirectory: /home/uid239 dn: cn=user240,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user240 sn: user240 uid: uid240 givenname: givenname240 description: description240 userPassword: password240 mail: uid240 uidnumber: 240 gidnumber: 240 homeDirectory: /home/uid240 dn: cn=user241,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user241 sn: user241 uid: uid241 givenname: givenname241 description: description241 userPassword: password241 mail: uid241 uidnumber: 241 gidnumber: 241 homeDirectory: /home/uid241 dn: cn=user242,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user242 sn: user242 uid: uid242 givenname: givenname242 description: description242 userPassword: password242 mail: uid242 uidnumber: 242 gidnumber: 242 homeDirectory: /home/uid242 dn: cn=user243,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user243 sn: user243 uid: uid243 givenname: givenname243 description: description243 userPassword: password243 mail: uid243 uidnumber: 243 gidnumber: 243 homeDirectory: /home/uid243 dn: cn=user244,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user244 sn: user244 uid: uid244 givenname: givenname244 description: description244 userPassword: password244 mail: uid244 uidnumber: 244 gidnumber: 244 homeDirectory: /home/uid244 dn: cn=user245,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user245 sn: user245 uid: uid245 givenname: givenname245 description: description245 userPassword: password245 mail: uid245 uidnumber: 245 gidnumber: 245 homeDirectory: /home/uid245 dn: cn=user246,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user246 sn: user246 uid: uid246 givenname: givenname246 description: description246 userPassword: password246 mail: uid246 uidnumber: 246 gidnumber: 246 homeDirectory: /home/uid246 dn: cn=user247,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user247 sn: user247 uid: uid247 givenname: givenname247 description: description247 userPassword: password247 mail: uid247 uidnumber: 247 gidnumber: 247 homeDirectory: /home/uid247 dn: cn=user248,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user248 sn: user248 uid: uid248 givenname: givenname248 description: description248 userPassword: password248 mail: uid248 uidnumber: 248 gidnumber: 248 homeDirectory: /home/uid248 dn: cn=user249,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user249 sn: user249 uid: uid249 givenname: givenname249 description: description249 userPassword: password249 mail: uid249 uidnumber: 249 gidnumber: 249 homeDirectory: /home/uid249 dn: cn=user250,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user250 sn: user250 uid: uid250 givenname: givenname250 description: description250 userPassword: password250 mail: uid250 uidnumber: 250 gidnumber: 250 homeDirectory: /home/uid250 dn: cn=user251,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user251 sn: user251 uid: uid251 givenname: givenname251 description: description251 userPassword: password251 mail: uid251 uidnumber: 251 gidnumber: 251 homeDirectory: /home/uid251 dn: cn=user252,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user252 sn: user252 uid: uid252 givenname: givenname252 description: description252 userPassword: password252 mail: uid252 uidnumber: 252 gidnumber: 252 homeDirectory: /home/uid252 dn: cn=user253,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user253 sn: user253 uid: uid253 givenname: givenname253 description: description253 userPassword: password253 mail: uid253 uidnumber: 253 gidnumber: 253 homeDirectory: /home/uid253 dn: cn=user254,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user254 sn: user254 uid: uid254 givenname: givenname254 description: description254 userPassword: password254 mail: uid254 uidnumber: 254 gidnumber: 254 homeDirectory: /home/uid254 dn: cn=user255,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user255 sn: user255 uid: uid255 givenname: givenname255 description: description255 userPassword: password255 mail: uid255 uidnumber: 255 gidnumber: 255 homeDirectory: /home/uid255 dn: cn=user256,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user256 sn: user256 uid: uid256 givenname: givenname256 description: description256 userPassword: password256 mail: uid256 uidnumber: 256 gidnumber: 256 homeDirectory: /home/uid256 dn: cn=user257,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user257 sn: user257 uid: uid257 givenname: givenname257 description: description257 userPassword: password257 mail: uid257 uidnumber: 257 gidnumber: 257 homeDirectory: /home/uid257 dn: cn=user258,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user258 sn: user258 uid: uid258 givenname: givenname258 description: description258 userPassword: password258 mail: uid258 uidnumber: 258 gidnumber: 258 homeDirectory: /home/uid258 dn: cn=user259,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user259 sn: user259 uid: uid259 givenname: givenname259 description: description259 userPassword: password259 mail: uid259 uidnumber: 259 gidnumber: 259 homeDirectory: /home/uid259 dn: cn=user260,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user260 sn: user260 uid: uid260 givenname: givenname260 description: description260 userPassword: password260 mail: uid260 uidnumber: 260 gidnumber: 260 homeDirectory: /home/uid260 dn: cn=user261,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user261 sn: user261 uid: uid261 givenname: givenname261 description: description261 userPassword: password261 mail: uid261 uidnumber: 261 gidnumber: 261 homeDirectory: /home/uid261 dn: cn=user262,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user262 sn: user262 uid: uid262 givenname: givenname262 description: description262 userPassword: password262 mail: uid262 uidnumber: 262 gidnumber: 262 homeDirectory: /home/uid262 dn: cn=user263,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user263 sn: user263 uid: uid263 givenname: givenname263 description: description263 userPassword: password263 mail: uid263 uidnumber: 263 gidnumber: 263 homeDirectory: /home/uid263 dn: cn=user264,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user264 sn: user264 uid: uid264 givenname: givenname264 description: description264 userPassword: password264 mail: uid264 uidnumber: 264 gidnumber: 264 homeDirectory: /home/uid264 dn: cn=user265,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user265 sn: user265 uid: uid265 givenname: givenname265 description: description265 userPassword: password265 mail: uid265 uidnumber: 265 gidnumber: 265 homeDirectory: /home/uid265 dn: cn=user266,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user266 sn: user266 uid: uid266 givenname: givenname266 description: description266 userPassword: password266 mail: uid266 uidnumber: 266 gidnumber: 266 homeDirectory: /home/uid266 dn: cn=user267,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user267 sn: user267 uid: uid267 givenname: givenname267 description: description267 userPassword: password267 mail: uid267 uidnumber: 267 gidnumber: 267 homeDirectory: /home/uid267 dn: cn=user268,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user268 sn: user268 uid: uid268 givenname: givenname268 description: description268 userPassword: password268 mail: uid268 uidnumber: 268 gidnumber: 268 homeDirectory: /home/uid268 dn: cn=user269,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user269 sn: user269 uid: uid269 givenname: givenname269 description: description269 userPassword: password269 mail: uid269 uidnumber: 269 gidnumber: 269 homeDirectory: /home/uid269 dn: cn=user270,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user270 sn: user270 uid: uid270 givenname: givenname270 description: description270 userPassword: password270 mail: uid270 uidnumber: 270 gidnumber: 270 homeDirectory: /home/uid270 dn: cn=user271,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user271 sn: user271 uid: uid271 givenname: givenname271 description: description271 userPassword: password271 mail: uid271 uidnumber: 271 gidnumber: 271 homeDirectory: /home/uid271 dn: cn=user272,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user272 sn: user272 uid: uid272 givenname: givenname272 description: description272 userPassword: password272 mail: uid272 uidnumber: 272 gidnumber: 272 homeDirectory: /home/uid272 dn: cn=user273,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user273 sn: user273 uid: uid273 givenname: givenname273 description: description273 userPassword: password273 mail: uid273 uidnumber: 273 gidnumber: 273 homeDirectory: /home/uid273 dn: cn=user274,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user274 sn: user274 uid: uid274 givenname: givenname274 description: description274 userPassword: password274 mail: uid274 uidnumber: 274 gidnumber: 274 homeDirectory: /home/uid274 dn: cn=user275,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user275 sn: user275 uid: uid275 givenname: givenname275 description: description275 userPassword: password275 mail: uid275 uidnumber: 275 gidnumber: 275 homeDirectory: /home/uid275 dn: cn=user276,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user276 sn: user276 uid: uid276 givenname: givenname276 description: description276 userPassword: password276 mail: uid276 uidnumber: 276 gidnumber: 276 homeDirectory: /home/uid276 dn: cn=user277,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user277 sn: user277 uid: uid277 givenname: givenname277 description: description277 userPassword: password277 mail: uid277 uidnumber: 277 gidnumber: 277 homeDirectory: /home/uid277 dn: cn=user278,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user278 sn: user278 uid: uid278 givenname: givenname278 description: description278 userPassword: password278 mail: uid278 uidnumber: 278 gidnumber: 278 homeDirectory: /home/uid278 dn: cn=user279,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user279 sn: user279 uid: uid279 givenname: givenname279 description: description279 userPassword: password279 mail: uid279 uidnumber: 279 gidnumber: 279 homeDirectory: /home/uid279 dn: cn=user280,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user280 sn: user280 uid: uid280 givenname: givenname280 description: description280 userPassword: password280 mail: uid280 uidnumber: 280 gidnumber: 280 homeDirectory: /home/uid280 dn: cn=user281,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user281 sn: user281 uid: uid281 givenname: givenname281 description: description281 userPassword: password281 mail: uid281 uidnumber: 281 gidnumber: 281 homeDirectory: /home/uid281 dn: cn=user282,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user282 sn: user282 uid: uid282 givenname: givenname282 description: description282 userPassword: password282 mail: uid282 uidnumber: 282 gidnumber: 282 homeDirectory: /home/uid282 dn: cn=user283,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user283 sn: user283 uid: uid283 givenname: givenname283 description: description283 userPassword: password283 mail: uid283 uidnumber: 283 gidnumber: 283 homeDirectory: /home/uid283 dn: cn=user284,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user284 sn: user284 uid: uid284 givenname: givenname284 description: description284 userPassword: password284 mail: uid284 uidnumber: 284 gidnumber: 284 homeDirectory: /home/uid284 dn: cn=user285,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user285 sn: user285 uid: uid285 givenname: givenname285 description: description285 userPassword: password285 mail: uid285 uidnumber: 285 gidnumber: 285 homeDirectory: /home/uid285 dn: cn=user286,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user286 sn: user286 uid: uid286 givenname: givenname286 description: description286 userPassword: password286 mail: uid286 uidnumber: 286 gidnumber: 286 homeDirectory: /home/uid286 dn: cn=user287,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user287 sn: user287 uid: uid287 givenname: givenname287 description: description287 userPassword: password287 mail: uid287 uidnumber: 287 gidnumber: 287 homeDirectory: /home/uid287 dn: cn=user288,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user288 sn: user288 uid: uid288 givenname: givenname288 description: description288 userPassword: password288 mail: uid288 uidnumber: 288 gidnumber: 288 homeDirectory: /home/uid288 dn: cn=user289,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user289 sn: user289 uid: uid289 givenname: givenname289 description: description289 userPassword: password289 mail: uid289 uidnumber: 289 gidnumber: 289 homeDirectory: /home/uid289 dn: cn=user290,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user290 sn: user290 uid: uid290 givenname: givenname290 description: description290 userPassword: password290 mail: uid290 uidnumber: 290 gidnumber: 290 homeDirectory: /home/uid290 dn: cn=user291,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user291 sn: user291 uid: uid291 givenname: givenname291 description: description291 userPassword: password291 mail: uid291 uidnumber: 291 gidnumber: 291 homeDirectory: /home/uid291 dn: cn=user292,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user292 sn: user292 uid: uid292 givenname: givenname292 description: description292 userPassword: password292 mail: uid292 uidnumber: 292 gidnumber: 292 homeDirectory: /home/uid292 dn: cn=user293,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user293 sn: user293 uid: uid293 givenname: givenname293 description: description293 userPassword: password293 mail: uid293 uidnumber: 293 gidnumber: 293 homeDirectory: /home/uid293 dn: cn=user294,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user294 sn: user294 uid: uid294 givenname: givenname294 description: description294 userPassword: password294 mail: uid294 uidnumber: 294 gidnumber: 294 homeDirectory: /home/uid294 dn: cn=user295,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user295 sn: user295 uid: uid295 givenname: givenname295 description: description295 userPassword: password295 mail: uid295 uidnumber: 295 gidnumber: 295 homeDirectory: /home/uid295 dn: cn=user296,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user296 sn: user296 uid: uid296 givenname: givenname296 description: description296 userPassword: password296 mail: uid296 uidnumber: 296 gidnumber: 296 homeDirectory: /home/uid296 dn: cn=user297,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user297 sn: user297 uid: uid297 givenname: givenname297 description: description297 userPassword: password297 mail: uid297 uidnumber: 297 gidnumber: 297 homeDirectory: /home/uid297 dn: cn=user298,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user298 sn: user298 uid: uid298 givenname: givenname298 description: description298 userPassword: password298 mail: uid298 uidnumber: 298 gidnumber: 298 homeDirectory: /home/uid298 dn: cn=user299,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user299 sn: user299 uid: uid299 givenname: givenname299 description: description299 userPassword: password299 mail: uid299 uidnumber: 299 gidnumber: 299 homeDirectory: /home/uid299 dn: cn=user300,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user300 sn: user300 uid: uid300 givenname: givenname300 description: description300 userPassword: password300 mail: uid300 uidnumber: 300 gidnumber: 300 homeDirectory: /home/uid300 dn: cn=user301,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user301 sn: user301 uid: uid301 givenname: givenname301 description: description301 userPassword: password301 mail: uid301 uidnumber: 301 gidnumber: 301 homeDirectory: /home/uid301 dn: cn=user302,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user302 sn: user302 uid: uid302 givenname: givenname302 description: description302 userPassword: password302 mail: uid302 uidnumber: 302 gidnumber: 302 homeDirectory: /home/uid302 dn: cn=user303,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user303 sn: user303 uid: uid303 givenname: givenname303 description: description303 userPassword: password303 mail: uid303 uidnumber: 303 gidnumber: 303 homeDirectory: /home/uid303 dn: cn=user304,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user304 sn: user304 uid: uid304 givenname: givenname304 description: description304 userPassword: password304 mail: uid304 uidnumber: 304 gidnumber: 304 homeDirectory: /home/uid304 dn: cn=user305,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user305 sn: user305 uid: uid305 givenname: givenname305 description: description305 userPassword: password305 mail: uid305 uidnumber: 305 gidnumber: 305 homeDirectory: /home/uid305 dn: cn=user306,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user306 sn: user306 uid: uid306 givenname: givenname306 description: description306 userPassword: password306 mail: uid306 uidnumber: 306 gidnumber: 306 homeDirectory: /home/uid306 dn: cn=user307,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user307 sn: user307 uid: uid307 givenname: givenname307 description: description307 userPassword: password307 mail: uid307 uidnumber: 307 gidnumber: 307 homeDirectory: /home/uid307 dn: cn=user308,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user308 sn: user308 uid: uid308 givenname: givenname308 description: description308 userPassword: password308 mail: uid308 uidnumber: 308 gidnumber: 308 homeDirectory: /home/uid308 dn: cn=user309,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user309 sn: user309 uid: uid309 givenname: givenname309 description: description309 userPassword: password309 mail: uid309 uidnumber: 309 gidnumber: 309 homeDirectory: /home/uid309 dn: cn=user310,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user310 sn: user310 uid: uid310 givenname: givenname310 description: description310 userPassword: password310 mail: uid310 uidnumber: 310 gidnumber: 310 homeDirectory: /home/uid310 dn: cn=user311,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user311 sn: user311 uid: uid311 givenname: givenname311 description: description311 userPassword: password311 mail: uid311 uidnumber: 311 gidnumber: 311 homeDirectory: /home/uid311 dn: cn=user312,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user312 sn: user312 uid: uid312 givenname: givenname312 description: description312 userPassword: password312 mail: uid312 uidnumber: 312 gidnumber: 312 homeDirectory: /home/uid312 dn: cn=user313,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user313 sn: user313 uid: uid313 givenname: givenname313 description: description313 userPassword: password313 mail: uid313 uidnumber: 313 gidnumber: 313 homeDirectory: /home/uid313 dn: cn=user314,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user314 sn: user314 uid: uid314 givenname: givenname314 description: description314 userPassword: password314 mail: uid314 uidnumber: 314 gidnumber: 314 homeDirectory: /home/uid314 dn: cn=user315,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user315 sn: user315 uid: uid315 givenname: givenname315 description: description315 userPassword: password315 mail: uid315 uidnumber: 315 gidnumber: 315 homeDirectory: /home/uid315 dn: cn=user316,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user316 sn: user316 uid: uid316 givenname: givenname316 description: description316 userPassword: password316 mail: uid316 uidnumber: 316 gidnumber: 316 homeDirectory: /home/uid316 dn: cn=user317,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user317 sn: user317 uid: uid317 givenname: givenname317 description: description317 userPassword: password317 mail: uid317 uidnumber: 317 gidnumber: 317 homeDirectory: /home/uid317 dn: cn=user318,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user318 sn: user318 uid: uid318 givenname: givenname318 description: description318 userPassword: password318 mail: uid318 uidnumber: 318 gidnumber: 318 homeDirectory: /home/uid318 dn: cn=user319,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user319 sn: user319 uid: uid319 givenname: givenname319 description: description319 userPassword: password319 mail: uid319 uidnumber: 319 gidnumber: 319 homeDirectory: /home/uid319 dn: cn=user320,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user320 sn: user320 uid: uid320 givenname: givenname320 description: description320 userPassword: password320 mail: uid320 uidnumber: 320 gidnumber: 320 homeDirectory: /home/uid320 dn: cn=user321,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user321 sn: user321 uid: uid321 givenname: givenname321 description: description321 userPassword: password321 mail: uid321 uidnumber: 321 gidnumber: 321 homeDirectory: /home/uid321 dn: cn=user322,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user322 sn: user322 uid: uid322 givenname: givenname322 description: description322 userPassword: password322 mail: uid322 uidnumber: 322 gidnumber: 322 homeDirectory: /home/uid322 dn: cn=user323,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user323 sn: user323 uid: uid323 givenname: givenname323 description: description323 userPassword: password323 mail: uid323 uidnumber: 323 gidnumber: 323 homeDirectory: /home/uid323 dn: cn=user324,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user324 sn: user324 uid: uid324 givenname: givenname324 description: description324 userPassword: password324 mail: uid324 uidnumber: 324 gidnumber: 324 homeDirectory: /home/uid324 dn: cn=user325,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user325 sn: user325 uid: uid325 givenname: givenname325 description: description325 userPassword: password325 mail: uid325 uidnumber: 325 gidnumber: 325 homeDirectory: /home/uid325 dn: cn=user326,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user326 sn: user326 uid: uid326 givenname: givenname326 description: description326 userPassword: password326 mail: uid326 uidnumber: 326 gidnumber: 326 homeDirectory: /home/uid326 dn: cn=user327,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user327 sn: user327 uid: uid327 givenname: givenname327 description: description327 userPassword: password327 mail: uid327 uidnumber: 327 gidnumber: 327 homeDirectory: /home/uid327 dn: cn=user328,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user328 sn: user328 uid: uid328 givenname: givenname328 description: description328 userPassword: password328 mail: uid328 uidnumber: 328 gidnumber: 328 homeDirectory: /home/uid328 dn: cn=user329,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user329 sn: user329 uid: uid329 givenname: givenname329 description: description329 userPassword: password329 mail: uid329 uidnumber: 329 gidnumber: 329 homeDirectory: /home/uid329 dn: cn=user330,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user330 sn: user330 uid: uid330 givenname: givenname330 description: description330 userPassword: password330 mail: uid330 uidnumber: 330 gidnumber: 330 homeDirectory: /home/uid330 dn: cn=user331,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user331 sn: user331 uid: uid331 givenname: givenname331 description: description331 userPassword: password331 mail: uid331 uidnumber: 331 gidnumber: 331 homeDirectory: /home/uid331 dn: cn=user332,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user332 sn: user332 uid: uid332 givenname: givenname332 description: description332 userPassword: password332 mail: uid332 uidnumber: 332 gidnumber: 332 homeDirectory: /home/uid332 dn: cn=user333,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user333 sn: user333 uid: uid333 givenname: givenname333 description: description333 userPassword: password333 mail: uid333 uidnumber: 333 gidnumber: 333 homeDirectory: /home/uid333 dn: cn=user334,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user334 sn: user334 uid: uid334 givenname: givenname334 description: description334 userPassword: password334 mail: uid334 uidnumber: 334 gidnumber: 334 homeDirectory: /home/uid334 dn: cn=user335,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user335 sn: user335 uid: uid335 givenname: givenname335 description: description335 userPassword: password335 mail: uid335 uidnumber: 335 gidnumber: 335 homeDirectory: /home/uid335 dn: cn=user336,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user336 sn: user336 uid: uid336 givenname: givenname336 description: description336 userPassword: password336 mail: uid336 uidnumber: 336 gidnumber: 336 homeDirectory: /home/uid336 dn: cn=user337,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user337 sn: user337 uid: uid337 givenname: givenname337 description: description337 userPassword: password337 mail: uid337 uidnumber: 337 gidnumber: 337 homeDirectory: /home/uid337 dn: cn=user338,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user338 sn: user338 uid: uid338 givenname: givenname338 description: description338 userPassword: password338 mail: uid338 uidnumber: 338 gidnumber: 338 homeDirectory: /home/uid338 dn: cn=user339,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user339 sn: user339 uid: uid339 givenname: givenname339 description: description339 userPassword: password339 mail: uid339 uidnumber: 339 gidnumber: 339 homeDirectory: /home/uid339 dn: cn=user340,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user340 sn: user340 uid: uid340 givenname: givenname340 description: description340 userPassword: password340 mail: uid340 uidnumber: 340 gidnumber: 340 homeDirectory: /home/uid340 dn: cn=user341,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user341 sn: user341 uid: uid341 givenname: givenname341 description: description341 userPassword: password341 mail: uid341 uidnumber: 341 gidnumber: 341 homeDirectory: /home/uid341 dn: cn=user342,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user342 sn: user342 uid: uid342 givenname: givenname342 description: description342 userPassword: password342 mail: uid342 uidnumber: 342 gidnumber: 342 homeDirectory: /home/uid342 dn: cn=user343,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user343 sn: user343 uid: uid343 givenname: givenname343 description: description343 userPassword: password343 mail: uid343 uidnumber: 343 gidnumber: 343 homeDirectory: /home/uid343 dn: cn=user344,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user344 sn: user344 uid: uid344 givenname: givenname344 description: description344 userPassword: password344 mail: uid344 uidnumber: 344 gidnumber: 344 homeDirectory: /home/uid344 dn: cn=user345,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user345 sn: user345 uid: uid345 givenname: givenname345 description: description345 userPassword: password345 mail: uid345 uidnumber: 345 gidnumber: 345 homeDirectory: /home/uid345 dn: cn=user346,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user346 sn: user346 uid: uid346 givenname: givenname346 description: description346 userPassword: password346 mail: uid346 uidnumber: 346 gidnumber: 346 homeDirectory: /home/uid346 dn: cn=user347,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user347 sn: user347 uid: uid347 givenname: givenname347 description: description347 userPassword: password347 mail: uid347 uidnumber: 347 gidnumber: 347 homeDirectory: /home/uid347 dn: cn=user348,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user348 sn: user348 uid: uid348 givenname: givenname348 description: description348 userPassword: password348 mail: uid348 uidnumber: 348 gidnumber: 348 homeDirectory: /home/uid348 dn: cn=user349,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user349 sn: user349 uid: uid349 givenname: givenname349 description: description349 userPassword: password349 mail: uid349 uidnumber: 349 gidnumber: 349 homeDirectory: /home/uid349 dn: cn=user350,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user350 sn: user350 uid: uid350 givenname: givenname350 description: description350 userPassword: password350 mail: uid350 uidnumber: 350 gidnumber: 350 homeDirectory: /home/uid350 dn: cn=user351,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user351 sn: user351 uid: uid351 givenname: givenname351 description: description351 userPassword: password351 mail: uid351 uidnumber: 351 gidnumber: 351 homeDirectory: /home/uid351 dn: cn=user352,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user352 sn: user352 uid: uid352 givenname: givenname352 description: description352 userPassword: password352 mail: uid352 uidnumber: 352 gidnumber: 352 homeDirectory: /home/uid352 dn: cn=user353,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user353 sn: user353 uid: uid353 givenname: givenname353 description: description353 userPassword: password353 mail: uid353 uidnumber: 353 gidnumber: 353 homeDirectory: /home/uid353 dn: cn=user354,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user354 sn: user354 uid: uid354 givenname: givenname354 description: description354 userPassword: password354 mail: uid354 uidnumber: 354 gidnumber: 354 homeDirectory: /home/uid354 dn: cn=user355,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user355 sn: user355 uid: uid355 givenname: givenname355 description: description355 userPassword: password355 mail: uid355 uidnumber: 355 gidnumber: 355 homeDirectory: /home/uid355 dn: cn=user356,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user356 sn: user356 uid: uid356 givenname: givenname356 description: description356 userPassword: password356 mail: uid356 uidnumber: 356 gidnumber: 356 homeDirectory: /home/uid356 dn: cn=user357,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user357 sn: user357 uid: uid357 givenname: givenname357 description: description357 userPassword: password357 mail: uid357 uidnumber: 357 gidnumber: 357 homeDirectory: /home/uid357 dn: cn=user358,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user358 sn: user358 uid: uid358 givenname: givenname358 description: description358 userPassword: password358 mail: uid358 uidnumber: 358 gidnumber: 358 homeDirectory: /home/uid358 dn: cn=user359,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user359 sn: user359 uid: uid359 givenname: givenname359 description: description359 userPassword: password359 mail: uid359 uidnumber: 359 gidnumber: 359 homeDirectory: /home/uid359 dn: cn=user360,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user360 sn: user360 uid: uid360 givenname: givenname360 description: description360 userPassword: password360 mail: uid360 uidnumber: 360 gidnumber: 360 homeDirectory: /home/uid360 dn: cn=user361,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user361 sn: user361 uid: uid361 givenname: givenname361 description: description361 userPassword: password361 mail: uid361 uidnumber: 361 gidnumber: 361 homeDirectory: /home/uid361 dn: cn=user362,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user362 sn: user362 uid: uid362 givenname: givenname362 description: description362 userPassword: password362 mail: uid362 uidnumber: 362 gidnumber: 362 homeDirectory: /home/uid362 dn: cn=user363,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user363 sn: user363 uid: uid363 givenname: givenname363 description: description363 userPassword: password363 mail: uid363 uidnumber: 363 gidnumber: 363 homeDirectory: /home/uid363 dn: cn=user364,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user364 sn: user364 uid: uid364 givenname: givenname364 description: description364 userPassword: password364 mail: uid364 uidnumber: 364 gidnumber: 364 homeDirectory: /home/uid364 dn: cn=user365,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user365 sn: user365 uid: uid365 givenname: givenname365 description: description365 userPassword: password365 mail: uid365 uidnumber: 365 gidnumber: 365 homeDirectory: /home/uid365 dn: cn=user366,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user366 sn: user366 uid: uid366 givenname: givenname366 description: description366 userPassword: password366 mail: uid366 uidnumber: 366 gidnumber: 366 homeDirectory: /home/uid366 dn: cn=user367,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user367 sn: user367 uid: uid367 givenname: givenname367 description: description367 userPassword: password367 mail: uid367 uidnumber: 367 gidnumber: 367 homeDirectory: /home/uid367 dn: cn=user368,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user368 sn: user368 uid: uid368 givenname: givenname368 description: description368 userPassword: password368 mail: uid368 uidnumber: 368 gidnumber: 368 homeDirectory: /home/uid368 dn: cn=user369,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user369 sn: user369 uid: uid369 givenname: givenname369 description: description369 userPassword: password369 mail: uid369 uidnumber: 369 gidnumber: 369 homeDirectory: /home/uid369 dn: cn=user370,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user370 sn: user370 uid: uid370 givenname: givenname370 description: description370 userPassword: password370 mail: uid370 uidnumber: 370 gidnumber: 370 homeDirectory: /home/uid370 dn: cn=user371,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user371 sn: user371 uid: uid371 givenname: givenname371 description: description371 userPassword: password371 mail: uid371 uidnumber: 371 gidnumber: 371 homeDirectory: /home/uid371 dn: cn=user372,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user372 sn: user372 uid: uid372 givenname: givenname372 description: description372 userPassword: password372 mail: uid372 uidnumber: 372 gidnumber: 372 homeDirectory: /home/uid372 dn: cn=user373,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user373 sn: user373 uid: uid373 givenname: givenname373 description: description373 userPassword: password373 mail: uid373 uidnumber: 373 gidnumber: 373 homeDirectory: /home/uid373 dn: cn=user374,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user374 sn: user374 uid: uid374 givenname: givenname374 description: description374 userPassword: password374 mail: uid374 uidnumber: 374 gidnumber: 374 homeDirectory: /home/uid374 dn: cn=user375,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user375 sn: user375 uid: uid375 givenname: givenname375 description: description375 userPassword: password375 mail: uid375 uidnumber: 375 gidnumber: 375 homeDirectory: /home/uid375 dn: cn=user376,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user376 sn: user376 uid: uid376 givenname: givenname376 description: description376 userPassword: password376 mail: uid376 uidnumber: 376 gidnumber: 376 homeDirectory: /home/uid376 dn: cn=user377,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user377 sn: user377 uid: uid377 givenname: givenname377 description: description377 userPassword: password377 mail: uid377 uidnumber: 377 gidnumber: 377 homeDirectory: /home/uid377 dn: cn=user378,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user378 sn: user378 uid: uid378 givenname: givenname378 description: description378 userPassword: password378 mail: uid378 uidnumber: 378 gidnumber: 378 homeDirectory: /home/uid378 dn: cn=user379,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user379 sn: user379 uid: uid379 givenname: givenname379 description: description379 userPassword: password379 mail: uid379 uidnumber: 379 gidnumber: 379 homeDirectory: /home/uid379 dn: cn=user380,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user380 sn: user380 uid: uid380 givenname: givenname380 description: description380 userPassword: password380 mail: uid380 uidnumber: 380 gidnumber: 380 homeDirectory: /home/uid380 dn: cn=user381,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user381 sn: user381 uid: uid381 givenname: givenname381 description: description381 userPassword: password381 mail: uid381 uidnumber: 381 gidnumber: 381 homeDirectory: /home/uid381 dn: cn=user382,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user382 sn: user382 uid: uid382 givenname: givenname382 description: description382 userPassword: password382 mail: uid382 uidnumber: 382 gidnumber: 382 homeDirectory: /home/uid382 dn: cn=user383,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user383 sn: user383 uid: uid383 givenname: givenname383 description: description383 userPassword: password383 mail: uid383 uidnumber: 383 gidnumber: 383 homeDirectory: /home/uid383 dn: cn=user384,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user384 sn: user384 uid: uid384 givenname: givenname384 description: description384 userPassword: password384 mail: uid384 uidnumber: 384 gidnumber: 384 homeDirectory: /home/uid384 dn: cn=user385,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user385 sn: user385 uid: uid385 givenname: givenname385 description: description385 userPassword: password385 mail: uid385 uidnumber: 385 gidnumber: 385 homeDirectory: /home/uid385 dn: cn=user386,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user386 sn: user386 uid: uid386 givenname: givenname386 description: description386 userPassword: password386 mail: uid386 uidnumber: 386 gidnumber: 386 homeDirectory: /home/uid386 dn: cn=user387,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user387 sn: user387 uid: uid387 givenname: givenname387 description: description387 userPassword: password387 mail: uid387 uidnumber: 387 gidnumber: 387 homeDirectory: /home/uid387 dn: cn=user388,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user388 sn: user388 uid: uid388 givenname: givenname388 description: description388 userPassword: password388 mail: uid388 uidnumber: 388 gidnumber: 388 homeDirectory: /home/uid388 dn: cn=user389,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user389 sn: user389 uid: uid389 givenname: givenname389 description: description389 userPassword: password389 mail: uid389 uidnumber: 389 gidnumber: 389 homeDirectory: /home/uid389 dn: cn=user390,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user390 sn: user390 uid: uid390 givenname: givenname390 description: description390 userPassword: password390 mail: uid390 uidnumber: 390 gidnumber: 390 homeDirectory: /home/uid390 dn: cn=user391,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user391 sn: user391 uid: uid391 givenname: givenname391 description: description391 userPassword: password391 mail: uid391 uidnumber: 391 gidnumber: 391 homeDirectory: /home/uid391 dn: cn=user392,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user392 sn: user392 uid: uid392 givenname: givenname392 description: description392 userPassword: password392 mail: uid392 uidnumber: 392 gidnumber: 392 homeDirectory: /home/uid392 dn: cn=user393,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user393 sn: user393 uid: uid393 givenname: givenname393 description: description393 userPassword: password393 mail: uid393 uidnumber: 393 gidnumber: 393 homeDirectory: /home/uid393 dn: cn=user394,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user394 sn: user394 uid: uid394 givenname: givenname394 description: description394 userPassword: password394 mail: uid394 uidnumber: 394 gidnumber: 394 homeDirectory: /home/uid394 dn: cn=user395,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user395 sn: user395 uid: uid395 givenname: givenname395 description: description395 userPassword: password395 mail: uid395 uidnumber: 395 gidnumber: 395 homeDirectory: /home/uid395 dn: cn=user396,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user396 sn: user396 uid: uid396 givenname: givenname396 description: description396 userPassword: password396 mail: uid396 uidnumber: 396 gidnumber: 396 homeDirectory: /home/uid396 dn: cn=user397,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user397 sn: user397 uid: uid397 givenname: givenname397 description: description397 userPassword: password397 mail: uid397 uidnumber: 397 gidnumber: 397 homeDirectory: /home/uid397 dn: cn=user398,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user398 sn: user398 uid: uid398 givenname: givenname398 description: description398 userPassword: password398 mail: uid398 uidnumber: 398 gidnumber: 398 homeDirectory: /home/uid398 dn: cn=user399,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user399 sn: user399 uid: uid399 givenname: givenname399 description: description399 userPassword: password399 mail: uid399 uidnumber: 399 gidnumber: 399 homeDirectory: /home/uid399 dn: cn=user400,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user400 sn: user400 uid: uid400 givenname: givenname400 description: description400 userPassword: password400 mail: uid400 uidnumber: 400 gidnumber: 400 homeDirectory: /home/uid400 dn: cn=user401,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user401 sn: user401 uid: uid401 givenname: givenname401 description: description401 userPassword: password401 mail: uid401 uidnumber: 401 gidnumber: 401 homeDirectory: /home/uid401 dn: cn=user402,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user402 sn: user402 uid: uid402 givenname: givenname402 description: description402 userPassword: password402 mail: uid402 uidnumber: 402 gidnumber: 402 homeDirectory: /home/uid402 dn: cn=user403,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user403 sn: user403 uid: uid403 givenname: givenname403 description: description403 userPassword: password403 mail: uid403 uidnumber: 403 gidnumber: 403 homeDirectory: /home/uid403 dn: cn=user404,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user404 sn: user404 uid: uid404 givenname: givenname404 description: description404 userPassword: password404 mail: uid404 uidnumber: 404 gidnumber: 404 homeDirectory: /home/uid404 dn: cn=user405,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user405 sn: user405 uid: uid405 givenname: givenname405 description: description405 userPassword: password405 mail: uid405 uidnumber: 405 gidnumber: 405 homeDirectory: /home/uid405 dn: cn=user406,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user406 sn: user406 uid: uid406 givenname: givenname406 description: description406 userPassword: password406 mail: uid406 uidnumber: 406 gidnumber: 406 homeDirectory: /home/uid406 dn: cn=user407,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user407 sn: user407 uid: uid407 givenname: givenname407 description: description407 userPassword: password407 mail: uid407 uidnumber: 407 gidnumber: 407 homeDirectory: /home/uid407 dn: cn=user408,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user408 sn: user408 uid: uid408 givenname: givenname408 description: description408 userPassword: password408 mail: uid408 uidnumber: 408 gidnumber: 408 homeDirectory: /home/uid408 dn: cn=user409,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user409 sn: user409 uid: uid409 givenname: givenname409 description: description409 userPassword: password409 mail: uid409 uidnumber: 409 gidnumber: 409 homeDirectory: /home/uid409 dn: cn=user410,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user410 sn: user410 uid: uid410 givenname: givenname410 description: description410 userPassword: password410 mail: uid410 uidnumber: 410 gidnumber: 410 homeDirectory: /home/uid410 dn: cn=user411,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user411 sn: user411 uid: uid411 givenname: givenname411 description: description411 userPassword: password411 mail: uid411 uidnumber: 411 gidnumber: 411 homeDirectory: /home/uid411 dn: cn=user412,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user412 sn: user412 uid: uid412 givenname: givenname412 description: description412 userPassword: password412 mail: uid412 uidnumber: 412 gidnumber: 412 homeDirectory: /home/uid412 dn: cn=user413,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user413 sn: user413 uid: uid413 givenname: givenname413 description: description413 userPassword: password413 mail: uid413 uidnumber: 413 gidnumber: 413 homeDirectory: /home/uid413 dn: cn=user414,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user414 sn: user414 uid: uid414 givenname: givenname414 description: description414 userPassword: password414 mail: uid414 uidnumber: 414 gidnumber: 414 homeDirectory: /home/uid414 dn: cn=user415,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user415 sn: user415 uid: uid415 givenname: givenname415 description: description415 userPassword: password415 mail: uid415 uidnumber: 415 gidnumber: 415 homeDirectory: /home/uid415 dn: cn=user416,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user416 sn: user416 uid: uid416 givenname: givenname416 description: description416 userPassword: password416 mail: uid416 uidnumber: 416 gidnumber: 416 homeDirectory: /home/uid416 dn: cn=user417,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user417 sn: user417 uid: uid417 givenname: givenname417 description: description417 userPassword: password417 mail: uid417 uidnumber: 417 gidnumber: 417 homeDirectory: /home/uid417 dn: cn=user418,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user418 sn: user418 uid: uid418 givenname: givenname418 description: description418 userPassword: password418 mail: uid418 uidnumber: 418 gidnumber: 418 homeDirectory: /home/uid418 dn: cn=user419,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user419 sn: user419 uid: uid419 givenname: givenname419 description: description419 userPassword: password419 mail: uid419 uidnumber: 419 gidnumber: 419 homeDirectory: /home/uid419 dn: cn=user420,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user420 sn: user420 uid: uid420 givenname: givenname420 description: description420 userPassword: password420 mail: uid420 uidnumber: 420 gidnumber: 420 homeDirectory: /home/uid420 dn: cn=user421,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user421 sn: user421 uid: uid421 givenname: givenname421 description: description421 userPassword: password421 mail: uid421 uidnumber: 421 gidnumber: 421 homeDirectory: /home/uid421 dn: cn=user422,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user422 sn: user422 uid: uid422 givenname: givenname422 description: description422 userPassword: password422 mail: uid422 uidnumber: 422 gidnumber: 422 homeDirectory: /home/uid422 dn: cn=user423,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user423 sn: user423 uid: uid423 givenname: givenname423 description: description423 userPassword: password423 mail: uid423 uidnumber: 423 gidnumber: 423 homeDirectory: /home/uid423 dn: cn=user424,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user424 sn: user424 uid: uid424 givenname: givenname424 description: description424 userPassword: password424 mail: uid424 uidnumber: 424 gidnumber: 424 homeDirectory: /home/uid424 dn: cn=user425,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user425 sn: user425 uid: uid425 givenname: givenname425 description: description425 userPassword: password425 mail: uid425 uidnumber: 425 gidnumber: 425 homeDirectory: /home/uid425 dn: cn=user426,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user426 sn: user426 uid: uid426 givenname: givenname426 description: description426 userPassword: password426 mail: uid426 uidnumber: 426 gidnumber: 426 homeDirectory: /home/uid426 dn: cn=user427,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user427 sn: user427 uid: uid427 givenname: givenname427 description: description427 userPassword: password427 mail: uid427 uidnumber: 427 gidnumber: 427 homeDirectory: /home/uid427 dn: cn=user428,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user428 sn: user428 uid: uid428 givenname: givenname428 description: description428 userPassword: password428 mail: uid428 uidnumber: 428 gidnumber: 428 homeDirectory: /home/uid428 dn: cn=user429,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user429 sn: user429 uid: uid429 givenname: givenname429 description: description429 userPassword: password429 mail: uid429 uidnumber: 429 gidnumber: 429 homeDirectory: /home/uid429 dn: cn=user430,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user430 sn: user430 uid: uid430 givenname: givenname430 description: description430 userPassword: password430 mail: uid430 uidnumber: 430 gidnumber: 430 homeDirectory: /home/uid430 dn: cn=user431,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user431 sn: user431 uid: uid431 givenname: givenname431 description: description431 userPassword: password431 mail: uid431 uidnumber: 431 gidnumber: 431 homeDirectory: /home/uid431 dn: cn=user432,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user432 sn: user432 uid: uid432 givenname: givenname432 description: description432 userPassword: password432 mail: uid432 uidnumber: 432 gidnumber: 432 homeDirectory: /home/uid432 dn: cn=user433,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user433 sn: user433 uid: uid433 givenname: givenname433 description: description433 userPassword: password433 mail: uid433 uidnumber: 433 gidnumber: 433 homeDirectory: /home/uid433 dn: cn=user434,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user434 sn: user434 uid: uid434 givenname: givenname434 description: description434 userPassword: password434 mail: uid434 uidnumber: 434 gidnumber: 434 homeDirectory: /home/uid434 dn: cn=user435,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user435 sn: user435 uid: uid435 givenname: givenname435 description: description435 userPassword: password435 mail: uid435 uidnumber: 435 gidnumber: 435 homeDirectory: /home/uid435 dn: cn=user436,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user436 sn: user436 uid: uid436 givenname: givenname436 description: description436 userPassword: password436 mail: uid436 uidnumber: 436 gidnumber: 436 homeDirectory: /home/uid436 dn: cn=user437,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user437 sn: user437 uid: uid437 givenname: givenname437 description: description437 userPassword: password437 mail: uid437 uidnumber: 437 gidnumber: 437 homeDirectory: /home/uid437 dn: cn=user438,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user438 sn: user438 uid: uid438 givenname: givenname438 description: description438 userPassword: password438 mail: uid438 uidnumber: 438 gidnumber: 438 homeDirectory: /home/uid438 dn: cn=user439,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user439 sn: user439 uid: uid439 givenname: givenname439 description: description439 userPassword: password439 mail: uid439 uidnumber: 439 gidnumber: 439 homeDirectory: /home/uid439 dn: cn=user440,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user440 sn: user440 uid: uid440 givenname: givenname440 description: description440 userPassword: password440 mail: uid440 uidnumber: 440 gidnumber: 440 homeDirectory: /home/uid440 dn: cn=user441,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user441 sn: user441 uid: uid441 givenname: givenname441 description: description441 userPassword: password441 mail: uid441 uidnumber: 441 gidnumber: 441 homeDirectory: /home/uid441 dn: cn=user442,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user442 sn: user442 uid: uid442 givenname: givenname442 description: description442 userPassword: password442 mail: uid442 uidnumber: 442 gidnumber: 442 homeDirectory: /home/uid442 dn: cn=user443,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user443 sn: user443 uid: uid443 givenname: givenname443 description: description443 userPassword: password443 mail: uid443 uidnumber: 443 gidnumber: 443 homeDirectory: /home/uid443 dn: cn=user444,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user444 sn: user444 uid: uid444 givenname: givenname444 description: description444 userPassword: password444 mail: uid444 uidnumber: 444 gidnumber: 444 homeDirectory: /home/uid444 dn: cn=user445,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user445 sn: user445 uid: uid445 givenname: givenname445 description: description445 userPassword: password445 mail: uid445 uidnumber: 445 gidnumber: 445 homeDirectory: /home/uid445 dn: cn=user446,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user446 sn: user446 uid: uid446 givenname: givenname446 description: description446 userPassword: password446 mail: uid446 uidnumber: 446 gidnumber: 446 homeDirectory: /home/uid446 dn: cn=user447,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user447 sn: user447 uid: uid447 givenname: givenname447 description: description447 userPassword: password447 mail: uid447 uidnumber: 447 gidnumber: 447 homeDirectory: /home/uid447 dn: cn=user448,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user448 sn: user448 uid: uid448 givenname: givenname448 description: description448 userPassword: password448 mail: uid448 uidnumber: 448 gidnumber: 448 homeDirectory: /home/uid448 dn: cn=user449,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user449 sn: user449 uid: uid449 givenname: givenname449 description: description449 userPassword: password449 mail: uid449 uidnumber: 449 gidnumber: 449 homeDirectory: /home/uid449 dn: cn=user450,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user450 sn: user450 uid: uid450 givenname: givenname450 description: description450 userPassword: password450 mail: uid450 uidnumber: 450 gidnumber: 450 homeDirectory: /home/uid450 dn: cn=user451,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user451 sn: user451 uid: uid451 givenname: givenname451 description: description451 userPassword: password451 mail: uid451 uidnumber: 451 gidnumber: 451 homeDirectory: /home/uid451 dn: cn=user452,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user452 sn: user452 uid: uid452 givenname: givenname452 description: description452 userPassword: password452 mail: uid452 uidnumber: 452 gidnumber: 452 homeDirectory: /home/uid452 dn: cn=user453,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user453 sn: user453 uid: uid453 givenname: givenname453 description: description453 userPassword: password453 mail: uid453 uidnumber: 453 gidnumber: 453 homeDirectory: /home/uid453 dn: cn=user454,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user454 sn: user454 uid: uid454 givenname: givenname454 description: description454 userPassword: password454 mail: uid454 uidnumber: 454 gidnumber: 454 homeDirectory: /home/uid454 dn: cn=user455,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user455 sn: user455 uid: uid455 givenname: givenname455 description: description455 userPassword: password455 mail: uid455 uidnumber: 455 gidnumber: 455 homeDirectory: /home/uid455 dn: cn=user456,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user456 sn: user456 uid: uid456 givenname: givenname456 description: description456 userPassword: password456 mail: uid456 uidnumber: 456 gidnumber: 456 homeDirectory: /home/uid456 dn: cn=user457,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user457 sn: user457 uid: uid457 givenname: givenname457 description: description457 userPassword: password457 mail: uid457 uidnumber: 457 gidnumber: 457 homeDirectory: /home/uid457 dn: cn=user458,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user458 sn: user458 uid: uid458 givenname: givenname458 description: description458 userPassword: password458 mail: uid458 uidnumber: 458 gidnumber: 458 homeDirectory: /home/uid458 dn: cn=user459,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user459 sn: user459 uid: uid459 givenname: givenname459 description: description459 userPassword: password459 mail: uid459 uidnumber: 459 gidnumber: 459 homeDirectory: /home/uid459 dn: cn=user460,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user460 sn: user460 uid: uid460 givenname: givenname460 description: description460 userPassword: password460 mail: uid460 uidnumber: 460 gidnumber: 460 homeDirectory: /home/uid460 dn: cn=user461,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user461 sn: user461 uid: uid461 givenname: givenname461 description: description461 userPassword: password461 mail: uid461 uidnumber: 461 gidnumber: 461 homeDirectory: /home/uid461 dn: cn=user462,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user462 sn: user462 uid: uid462 givenname: givenname462 description: description462 userPassword: password462 mail: uid462 uidnumber: 462 gidnumber: 462 homeDirectory: /home/uid462 dn: cn=user463,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user463 sn: user463 uid: uid463 givenname: givenname463 description: description463 userPassword: password463 mail: uid463 uidnumber: 463 gidnumber: 463 homeDirectory: /home/uid463 dn: cn=user464,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user464 sn: user464 uid: uid464 givenname: givenname464 description: description464 userPassword: password464 mail: uid464 uidnumber: 464 gidnumber: 464 homeDirectory: /home/uid464 dn: cn=user465,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user465 sn: user465 uid: uid465 givenname: givenname465 description: description465 userPassword: password465 mail: uid465 uidnumber: 465 gidnumber: 465 homeDirectory: /home/uid465 dn: cn=user466,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user466 sn: user466 uid: uid466 givenname: givenname466 description: description466 userPassword: password466 mail: uid466 uidnumber: 466 gidnumber: 466 homeDirectory: /home/uid466 dn: cn=user467,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user467 sn: user467 uid: uid467 givenname: givenname467 description: description467 userPassword: password467 mail: uid467 uidnumber: 467 gidnumber: 467 homeDirectory: /home/uid467 dn: cn=user468,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user468 sn: user468 uid: uid468 givenname: givenname468 description: description468 userPassword: password468 mail: uid468 uidnumber: 468 gidnumber: 468 homeDirectory: /home/uid468 dn: cn=user469,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user469 sn: user469 uid: uid469 givenname: givenname469 description: description469 userPassword: password469 mail: uid469 uidnumber: 469 gidnumber: 469 homeDirectory: /home/uid469 dn: cn=user470,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user470 sn: user470 uid: uid470 givenname: givenname470 description: description470 userPassword: password470 mail: uid470 uidnumber: 470 gidnumber: 470 homeDirectory: /home/uid470 dn: cn=user471,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user471 sn: user471 uid: uid471 givenname: givenname471 description: description471 userPassword: password471 mail: uid471 uidnumber: 471 gidnumber: 471 homeDirectory: /home/uid471 dn: cn=user472,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user472 sn: user472 uid: uid472 givenname: givenname472 description: description472 userPassword: password472 mail: uid472 uidnumber: 472 gidnumber: 472 homeDirectory: /home/uid472 dn: cn=user473,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user473 sn: user473 uid: uid473 givenname: givenname473 description: description473 userPassword: password473 mail: uid473 uidnumber: 473 gidnumber: 473 homeDirectory: /home/uid473 dn: cn=user474,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user474 sn: user474 uid: uid474 givenname: givenname474 description: description474 userPassword: password474 mail: uid474 uidnumber: 474 gidnumber: 474 homeDirectory: /home/uid474 dn: cn=user475,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user475 sn: user475 uid: uid475 givenname: givenname475 description: description475 userPassword: password475 mail: uid475 uidnumber: 475 gidnumber: 475 homeDirectory: /home/uid475 dn: cn=user476,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user476 sn: user476 uid: uid476 givenname: givenname476 description: description476 userPassword: password476 mail: uid476 uidnumber: 476 gidnumber: 476 homeDirectory: /home/uid476 dn: cn=user477,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user477 sn: user477 uid: uid477 givenname: givenname477 description: description477 userPassword: password477 mail: uid477 uidnumber: 477 gidnumber: 477 homeDirectory: /home/uid477 dn: cn=user478,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user478 sn: user478 uid: uid478 givenname: givenname478 description: description478 userPassword: password478 mail: uid478 uidnumber: 478 gidnumber: 478 homeDirectory: /home/uid478 dn: cn=user479,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user479 sn: user479 uid: uid479 givenname: givenname479 description: description479 userPassword: password479 mail: uid479 uidnumber: 479 gidnumber: 479 homeDirectory: /home/uid479 dn: cn=user480,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user480 sn: user480 uid: uid480 givenname: givenname480 description: description480 userPassword: password480 mail: uid480 uidnumber: 480 gidnumber: 480 homeDirectory: /home/uid480 dn: cn=user481,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user481 sn: user481 uid: uid481 givenname: givenname481 description: description481 userPassword: password481 mail: uid481 uidnumber: 481 gidnumber: 481 homeDirectory: /home/uid481 dn: cn=user482,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user482 sn: user482 uid: uid482 givenname: givenname482 description: description482 userPassword: password482 mail: uid482 uidnumber: 482 gidnumber: 482 homeDirectory: /home/uid482 dn: cn=user483,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user483 sn: user483 uid: uid483 givenname: givenname483 description: description483 userPassword: password483 mail: uid483 uidnumber: 483 gidnumber: 483 homeDirectory: /home/uid483 dn: cn=user484,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user484 sn: user484 uid: uid484 givenname: givenname484 description: description484 userPassword: password484 mail: uid484 uidnumber: 484 gidnumber: 484 homeDirectory: /home/uid484 dn: cn=user485,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user485 sn: user485 uid: uid485 givenname: givenname485 description: description485 userPassword: password485 mail: uid485 uidnumber: 485 gidnumber: 485 homeDirectory: /home/uid485 dn: cn=user486,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user486 sn: user486 uid: uid486 givenname: givenname486 description: description486 userPassword: password486 mail: uid486 uidnumber: 486 gidnumber: 486 homeDirectory: /home/uid486 dn: cn=user487,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user487 sn: user487 uid: uid487 givenname: givenname487 description: description487 userPassword: password487 mail: uid487 uidnumber: 487 gidnumber: 487 homeDirectory: /home/uid487 dn: cn=user488,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user488 sn: user488 uid: uid488 givenname: givenname488 description: description488 userPassword: password488 mail: uid488 uidnumber: 488 gidnumber: 488 homeDirectory: /home/uid488 dn: cn=user489,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user489 sn: user489 uid: uid489 givenname: givenname489 description: description489 userPassword: password489 mail: uid489 uidnumber: 489 gidnumber: 489 homeDirectory: /home/uid489 dn: cn=user490,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user490 sn: user490 uid: uid490 givenname: givenname490 description: description490 userPassword: password490 mail: uid490 uidnumber: 490 gidnumber: 490 homeDirectory: /home/uid490 dn: cn=user491,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user491 sn: user491 uid: uid491 givenname: givenname491 description: description491 userPassword: password491 mail: uid491 uidnumber: 491 gidnumber: 491 homeDirectory: /home/uid491 dn: cn=user492,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user492 sn: user492 uid: uid492 givenname: givenname492 description: description492 userPassword: password492 mail: uid492 uidnumber: 492 gidnumber: 492 homeDirectory: /home/uid492 dn: cn=user493,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user493 sn: user493 uid: uid493 givenname: givenname493 description: description493 userPassword: password493 mail: uid493 uidnumber: 493 gidnumber: 493 homeDirectory: /home/uid493 dn: cn=user494,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user494 sn: user494 uid: uid494 givenname: givenname494 description: description494 userPassword: password494 mail: uid494 uidnumber: 494 gidnumber: 494 homeDirectory: /home/uid494 dn: cn=user495,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user495 sn: user495 uid: uid495 givenname: givenname495 description: description495 userPassword: password495 mail: uid495 uidnumber: 495 gidnumber: 495 homeDirectory: /home/uid495 dn: cn=user496,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user496 sn: user496 uid: uid496 givenname: givenname496 description: description496 userPassword: password496 mail: uid496 uidnumber: 496 gidnumber: 496 homeDirectory: /home/uid496 dn: cn=user497,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user497 sn: user497 uid: uid497 givenname: givenname497 description: description497 userPassword: password497 mail: uid497 uidnumber: 497 gidnumber: 497 homeDirectory: /home/uid497 dn: cn=user498,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user498 sn: user498 uid: uid498 givenname: givenname498 description: description498 userPassword: password498 mail: uid498 uidnumber: 498 gidnumber: 498 homeDirectory: /home/uid498 dn: cn=user499,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user499 sn: user499 uid: uid499 givenname: givenname499 description: description499 userPassword: password499 mail: uid499 uidnumber: 499 gidnumber: 499 homeDirectory: /home/uid499 dn: cn=user500,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user500 sn: user500 uid: uid500 givenname: givenname500 description: description500 userPassword: password500 mail: uid500 uidnumber: 500 gidnumber: 500 homeDirectory: /home/uid500 dn: cn=user501,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user501 sn: user501 uid: uid501 givenname: givenname501 description: description501 userPassword: password501 mail: uid501 uidnumber: 501 gidnumber: 501 homeDirectory: /home/uid501 dn: cn=user502,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user502 sn: user502 uid: uid502 givenname: givenname502 description: description502 userPassword: password502 mail: uid502 uidnumber: 502 gidnumber: 502 homeDirectory: /home/uid502 dn: cn=user503,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user503 sn: user503 uid: uid503 givenname: givenname503 description: description503 userPassword: password503 mail: uid503 uidnumber: 503 gidnumber: 503 homeDirectory: /home/uid503 dn: cn=user504,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user504 sn: user504 uid: uid504 givenname: givenname504 description: description504 userPassword: password504 mail: uid504 uidnumber: 504 gidnumber: 504 homeDirectory: /home/uid504 dn: cn=user505,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user505 sn: user505 uid: uid505 givenname: givenname505 description: description505 userPassword: password505 mail: uid505 uidnumber: 505 gidnumber: 505 homeDirectory: /home/uid505 dn: cn=user506,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user506 sn: user506 uid: uid506 givenname: givenname506 description: description506 userPassword: password506 mail: uid506 uidnumber: 506 gidnumber: 506 homeDirectory: /home/uid506 dn: cn=user507,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user507 sn: user507 uid: uid507 givenname: givenname507 description: description507 userPassword: password507 mail: uid507 uidnumber: 507 gidnumber: 507 homeDirectory: /home/uid507 dn: cn=user508,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user508 sn: user508 uid: uid508 givenname: givenname508 description: description508 userPassword: password508 mail: uid508 uidnumber: 508 gidnumber: 508 homeDirectory: /home/uid508 dn: cn=user509,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user509 sn: user509 uid: uid509 givenname: givenname509 description: description509 userPassword: password509 mail: uid509 uidnumber: 509 gidnumber: 509 homeDirectory: /home/uid509 dn: cn=user510,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user510 sn: user510 uid: uid510 givenname: givenname510 description: description510 userPassword: password510 mail: uid510 uidnumber: 510 gidnumber: 510 homeDirectory: /home/uid510 dn: cn=user511,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user511 sn: user511 uid: uid511 givenname: givenname511 description: description511 userPassword: password511 mail: uid511 uidnumber: 511 gidnumber: 511 homeDirectory: /home/uid511 dn: cn=user512,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user512 sn: user512 uid: uid512 givenname: givenname512 description: description512 userPassword: password512 mail: uid512 uidnumber: 512 gidnumber: 512 homeDirectory: /home/uid512 dn: cn=user513,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user513 sn: user513 uid: uid513 givenname: givenname513 description: description513 userPassword: password513 mail: uid513 uidnumber: 513 gidnumber: 513 homeDirectory: /home/uid513 dn: cn=user514,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user514 sn: user514 uid: uid514 givenname: givenname514 description: description514 userPassword: password514 mail: uid514 uidnumber: 514 gidnumber: 514 homeDirectory: /home/uid514 dn: cn=user515,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user515 sn: user515 uid: uid515 givenname: givenname515 description: description515 userPassword: password515 mail: uid515 uidnumber: 515 gidnumber: 515 homeDirectory: /home/uid515 dn: cn=user516,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user516 sn: user516 uid: uid516 givenname: givenname516 description: description516 userPassword: password516 mail: uid516 uidnumber: 516 gidnumber: 516 homeDirectory: /home/uid516 dn: cn=user517,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user517 sn: user517 uid: uid517 givenname: givenname517 description: description517 userPassword: password517 mail: uid517 uidnumber: 517 gidnumber: 517 homeDirectory: /home/uid517 dn: cn=user518,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user518 sn: user518 uid: uid518 givenname: givenname518 description: description518 userPassword: password518 mail: uid518 uidnumber: 518 gidnumber: 518 homeDirectory: /home/uid518 dn: cn=user519,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user519 sn: user519 uid: uid519 givenname: givenname519 description: description519 userPassword: password519 mail: uid519 uidnumber: 519 gidnumber: 519 homeDirectory: /home/uid519 dn: cn=user520,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user520 sn: user520 uid: uid520 givenname: givenname520 description: description520 userPassword: password520 mail: uid520 uidnumber: 520 gidnumber: 520 homeDirectory: /home/uid520 dn: cn=user521,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user521 sn: user521 uid: uid521 givenname: givenname521 description: description521 userPassword: password521 mail: uid521 uidnumber: 521 gidnumber: 521 homeDirectory: /home/uid521 dn: cn=user522,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user522 sn: user522 uid: uid522 givenname: givenname522 description: description522 userPassword: password522 mail: uid522 uidnumber: 522 gidnumber: 522 homeDirectory: /home/uid522 dn: cn=user523,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user523 sn: user523 uid: uid523 givenname: givenname523 description: description523 userPassword: password523 mail: uid523 uidnumber: 523 gidnumber: 523 homeDirectory: /home/uid523 dn: cn=user524,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user524 sn: user524 uid: uid524 givenname: givenname524 description: description524 userPassword: password524 mail: uid524 uidnumber: 524 gidnumber: 524 homeDirectory: /home/uid524 dn: cn=user525,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user525 sn: user525 uid: uid525 givenname: givenname525 description: description525 userPassword: password525 mail: uid525 uidnumber: 525 gidnumber: 525 homeDirectory: /home/uid525 dn: cn=user526,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user526 sn: user526 uid: uid526 givenname: givenname526 description: description526 userPassword: password526 mail: uid526 uidnumber: 526 gidnumber: 526 homeDirectory: /home/uid526 dn: cn=user527,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user527 sn: user527 uid: uid527 givenname: givenname527 description: description527 userPassword: password527 mail: uid527 uidnumber: 527 gidnumber: 527 homeDirectory: /home/uid527 dn: cn=user528,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user528 sn: user528 uid: uid528 givenname: givenname528 description: description528 userPassword: password528 mail: uid528 uidnumber: 528 gidnumber: 528 homeDirectory: /home/uid528 dn: cn=user529,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user529 sn: user529 uid: uid529 givenname: givenname529 description: description529 userPassword: password529 mail: uid529 uidnumber: 529 gidnumber: 529 homeDirectory: /home/uid529 dn: cn=user530,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user530 sn: user530 uid: uid530 givenname: givenname530 description: description530 userPassword: password530 mail: uid530 uidnumber: 530 gidnumber: 530 homeDirectory: /home/uid530 dn: cn=user531,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user531 sn: user531 uid: uid531 givenname: givenname531 description: description531 userPassword: password531 mail: uid531 uidnumber: 531 gidnumber: 531 homeDirectory: /home/uid531 dn: cn=user532,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user532 sn: user532 uid: uid532 givenname: givenname532 description: description532 userPassword: password532 mail: uid532 uidnumber: 532 gidnumber: 532 homeDirectory: /home/uid532 dn: cn=user533,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user533 sn: user533 uid: uid533 givenname: givenname533 description: description533 userPassword: password533 mail: uid533 uidnumber: 533 gidnumber: 533 homeDirectory: /home/uid533 dn: cn=user534,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user534 sn: user534 uid: uid534 givenname: givenname534 description: description534 userPassword: password534 mail: uid534 uidnumber: 534 gidnumber: 534 homeDirectory: /home/uid534 dn: cn=user535,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user535 sn: user535 uid: uid535 givenname: givenname535 description: description535 userPassword: password535 mail: uid535 uidnumber: 535 gidnumber: 535 homeDirectory: /home/uid535 dn: cn=user536,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user536 sn: user536 uid: uid536 givenname: givenname536 description: description536 userPassword: password536 mail: uid536 uidnumber: 536 gidnumber: 536 homeDirectory: /home/uid536 dn: cn=user537,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user537 sn: user537 uid: uid537 givenname: givenname537 description: description537 userPassword: password537 mail: uid537 uidnumber: 537 gidnumber: 537 homeDirectory: /home/uid537 dn: cn=user538,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user538 sn: user538 uid: uid538 givenname: givenname538 description: description538 userPassword: password538 mail: uid538 uidnumber: 538 gidnumber: 538 homeDirectory: /home/uid538 dn: cn=user539,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user539 sn: user539 uid: uid539 givenname: givenname539 description: description539 userPassword: password539 mail: uid539 uidnumber: 539 gidnumber: 539 homeDirectory: /home/uid539 dn: cn=user540,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user540 sn: user540 uid: uid540 givenname: givenname540 description: description540 userPassword: password540 mail: uid540 uidnumber: 540 gidnumber: 540 homeDirectory: /home/uid540 dn: cn=user541,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user541 sn: user541 uid: uid541 givenname: givenname541 description: description541 userPassword: password541 mail: uid541 uidnumber: 541 gidnumber: 541 homeDirectory: /home/uid541 dn: cn=user542,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user542 sn: user542 uid: uid542 givenname: givenname542 description: description542 userPassword: password542 mail: uid542 uidnumber: 542 gidnumber: 542 homeDirectory: /home/uid542 dn: cn=user543,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user543 sn: user543 uid: uid543 givenname: givenname543 description: description543 userPassword: password543 mail: uid543 uidnumber: 543 gidnumber: 543 homeDirectory: /home/uid543 dn: cn=user544,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user544 sn: user544 uid: uid544 givenname: givenname544 description: description544 userPassword: password544 mail: uid544 uidnumber: 544 gidnumber: 544 homeDirectory: /home/uid544 dn: cn=user545,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user545 sn: user545 uid: uid545 givenname: givenname545 description: description545 userPassword: password545 mail: uid545 uidnumber: 545 gidnumber: 545 homeDirectory: /home/uid545 dn: cn=user546,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user546 sn: user546 uid: uid546 givenname: givenname546 description: description546 userPassword: password546 mail: uid546 uidnumber: 546 gidnumber: 546 homeDirectory: /home/uid546 dn: cn=user547,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user547 sn: user547 uid: uid547 givenname: givenname547 description: description547 userPassword: password547 mail: uid547 uidnumber: 547 gidnumber: 547 homeDirectory: /home/uid547 dn: cn=user548,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user548 sn: user548 uid: uid548 givenname: givenname548 description: description548 userPassword: password548 mail: uid548 uidnumber: 548 gidnumber: 548 homeDirectory: /home/uid548 dn: cn=user549,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user549 sn: user549 uid: uid549 givenname: givenname549 description: description549 userPassword: password549 mail: uid549 uidnumber: 549 gidnumber: 549 homeDirectory: /home/uid549 dn: cn=user550,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user550 sn: user550 uid: uid550 givenname: givenname550 description: description550 userPassword: password550 mail: uid550 uidnumber: 550 gidnumber: 550 homeDirectory: /home/uid550 dn: cn=user551,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user551 sn: user551 uid: uid551 givenname: givenname551 description: description551 userPassword: password551 mail: uid551 uidnumber: 551 gidnumber: 551 homeDirectory: /home/uid551 dn: cn=user552,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user552 sn: user552 uid: uid552 givenname: givenname552 description: description552 userPassword: password552 mail: uid552 uidnumber: 552 gidnumber: 552 homeDirectory: /home/uid552 dn: cn=user553,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user553 sn: user553 uid: uid553 givenname: givenname553 description: description553 userPassword: password553 mail: uid553 uidnumber: 553 gidnumber: 553 homeDirectory: /home/uid553 dn: cn=user554,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user554 sn: user554 uid: uid554 givenname: givenname554 description: description554 userPassword: password554 mail: uid554 uidnumber: 554 gidnumber: 554 homeDirectory: /home/uid554 dn: cn=user555,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user555 sn: user555 uid: uid555 givenname: givenname555 description: description555 userPassword: password555 mail: uid555 uidnumber: 555 gidnumber: 555 homeDirectory: /home/uid555 dn: cn=user556,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user556 sn: user556 uid: uid556 givenname: givenname556 description: description556 userPassword: password556 mail: uid556 uidnumber: 556 gidnumber: 556 homeDirectory: /home/uid556 dn: cn=user557,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user557 sn: user557 uid: uid557 givenname: givenname557 description: description557 userPassword: password557 mail: uid557 uidnumber: 557 gidnumber: 557 homeDirectory: /home/uid557 dn: cn=user558,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user558 sn: user558 uid: uid558 givenname: givenname558 description: description558 userPassword: password558 mail: uid558 uidnumber: 558 gidnumber: 558 homeDirectory: /home/uid558 dn: cn=user559,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user559 sn: user559 uid: uid559 givenname: givenname559 description: description559 userPassword: password559 mail: uid559 uidnumber: 559 gidnumber: 559 homeDirectory: /home/uid559 dn: cn=user560,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user560 sn: user560 uid: uid560 givenname: givenname560 description: description560 userPassword: password560 mail: uid560 uidnumber: 560 gidnumber: 560 homeDirectory: /home/uid560 dn: cn=user561,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user561 sn: user561 uid: uid561 givenname: givenname561 description: description561 userPassword: password561 mail: uid561 uidnumber: 561 gidnumber: 561 homeDirectory: /home/uid561 dn: cn=user562,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user562 sn: user562 uid: uid562 givenname: givenname562 description: description562 userPassword: password562 mail: uid562 uidnumber: 562 gidnumber: 562 homeDirectory: /home/uid562 dn: cn=user563,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user563 sn: user563 uid: uid563 givenname: givenname563 description: description563 userPassword: password563 mail: uid563 uidnumber: 563 gidnumber: 563 homeDirectory: /home/uid563 dn: cn=user564,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user564 sn: user564 uid: uid564 givenname: givenname564 description: description564 userPassword: password564 mail: uid564 uidnumber: 564 gidnumber: 564 homeDirectory: /home/uid564 dn: cn=user565,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user565 sn: user565 uid: uid565 givenname: givenname565 description: description565 userPassword: password565 mail: uid565 uidnumber: 565 gidnumber: 565 homeDirectory: /home/uid565 dn: cn=user566,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user566 sn: user566 uid: uid566 givenname: givenname566 description: description566 userPassword: password566 mail: uid566 uidnumber: 566 gidnumber: 566 homeDirectory: /home/uid566 dn: cn=user567,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user567 sn: user567 uid: uid567 givenname: givenname567 description: description567 userPassword: password567 mail: uid567 uidnumber: 567 gidnumber: 567 homeDirectory: /home/uid567 dn: cn=user568,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user568 sn: user568 uid: uid568 givenname: givenname568 description: description568 userPassword: password568 mail: uid568 uidnumber: 568 gidnumber: 568 homeDirectory: /home/uid568 dn: cn=user569,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user569 sn: user569 uid: uid569 givenname: givenname569 description: description569 userPassword: password569 mail: uid569 uidnumber: 569 gidnumber: 569 homeDirectory: /home/uid569 dn: cn=user570,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user570 sn: user570 uid: uid570 givenname: givenname570 description: description570 userPassword: password570 mail: uid570 uidnumber: 570 gidnumber: 570 homeDirectory: /home/uid570 dn: cn=user571,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user571 sn: user571 uid: uid571 givenname: givenname571 description: description571 userPassword: password571 mail: uid571 uidnumber: 571 gidnumber: 571 homeDirectory: /home/uid571 dn: cn=user572,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user572 sn: user572 uid: uid572 givenname: givenname572 description: description572 userPassword: password572 mail: uid572 uidnumber: 572 gidnumber: 572 homeDirectory: /home/uid572 dn: cn=user573,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user573 sn: user573 uid: uid573 givenname: givenname573 description: description573 userPassword: password573 mail: uid573 uidnumber: 573 gidnumber: 573 homeDirectory: /home/uid573 dn: cn=user574,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user574 sn: user574 uid: uid574 givenname: givenname574 description: description574 userPassword: password574 mail: uid574 uidnumber: 574 gidnumber: 574 homeDirectory: /home/uid574 dn: cn=user575,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user575 sn: user575 uid: uid575 givenname: givenname575 description: description575 userPassword: password575 mail: uid575 uidnumber: 575 gidnumber: 575 homeDirectory: /home/uid575 dn: cn=user576,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user576 sn: user576 uid: uid576 givenname: givenname576 description: description576 userPassword: password576 mail: uid576 uidnumber: 576 gidnumber: 576 homeDirectory: /home/uid576 dn: cn=user577,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user577 sn: user577 uid: uid577 givenname: givenname577 description: description577 userPassword: password577 mail: uid577 uidnumber: 577 gidnumber: 577 homeDirectory: /home/uid577 dn: cn=user578,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user578 sn: user578 uid: uid578 givenname: givenname578 description: description578 userPassword: password578 mail: uid578 uidnumber: 578 gidnumber: 578 homeDirectory: /home/uid578 dn: cn=user579,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user579 sn: user579 uid: uid579 givenname: givenname579 description: description579 userPassword: password579 mail: uid579 uidnumber: 579 gidnumber: 579 homeDirectory: /home/uid579 dn: cn=user580,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user580 sn: user580 uid: uid580 givenname: givenname580 description: description580 userPassword: password580 mail: uid580 uidnumber: 580 gidnumber: 580 homeDirectory: /home/uid580 dn: cn=user581,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user581 sn: user581 uid: uid581 givenname: givenname581 description: description581 userPassword: password581 mail: uid581 uidnumber: 581 gidnumber: 581 homeDirectory: /home/uid581 dn: cn=user582,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user582 sn: user582 uid: uid582 givenname: givenname582 description: description582 userPassword: password582 mail: uid582 uidnumber: 582 gidnumber: 582 homeDirectory: /home/uid582 dn: cn=user583,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user583 sn: user583 uid: uid583 givenname: givenname583 description: description583 userPassword: password583 mail: uid583 uidnumber: 583 gidnumber: 583 homeDirectory: /home/uid583 dn: cn=user584,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user584 sn: user584 uid: uid584 givenname: givenname584 description: description584 userPassword: password584 mail: uid584 uidnumber: 584 gidnumber: 584 homeDirectory: /home/uid584 dn: cn=user585,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user585 sn: user585 uid: uid585 givenname: givenname585 description: description585 userPassword: password585 mail: uid585 uidnumber: 585 gidnumber: 585 homeDirectory: /home/uid585 dn: cn=user586,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user586 sn: user586 uid: uid586 givenname: givenname586 description: description586 userPassword: password586 mail: uid586 uidnumber: 586 gidnumber: 586 homeDirectory: /home/uid586 dn: cn=user587,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user587 sn: user587 uid: uid587 givenname: givenname587 description: description587 userPassword: password587 mail: uid587 uidnumber: 587 gidnumber: 587 homeDirectory: /home/uid587 dn: cn=user588,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user588 sn: user588 uid: uid588 givenname: givenname588 description: description588 userPassword: password588 mail: uid588 uidnumber: 588 gidnumber: 588 homeDirectory: /home/uid588 dn: cn=user589,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user589 sn: user589 uid: uid589 givenname: givenname589 description: description589 userPassword: password589 mail: uid589 uidnumber: 589 gidnumber: 589 homeDirectory: /home/uid589 dn: cn=user590,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user590 sn: user590 uid: uid590 givenname: givenname590 description: description590 userPassword: password590 mail: uid590 uidnumber: 590 gidnumber: 590 homeDirectory: /home/uid590 dn: cn=user591,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user591 sn: user591 uid: uid591 givenname: givenname591 description: description591 userPassword: password591 mail: uid591 uidnumber: 591 gidnumber: 591 homeDirectory: /home/uid591 dn: cn=user592,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user592 sn: user592 uid: uid592 givenname: givenname592 description: description592 userPassword: password592 mail: uid592 uidnumber: 592 gidnumber: 592 homeDirectory: /home/uid592 dn: cn=user593,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user593 sn: user593 uid: uid593 givenname: givenname593 description: description593 userPassword: password593 mail: uid593 uidnumber: 593 gidnumber: 593 homeDirectory: /home/uid593 dn: cn=user594,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user594 sn: user594 uid: uid594 givenname: givenname594 description: description594 userPassword: password594 mail: uid594 uidnumber: 594 gidnumber: 594 homeDirectory: /home/uid594 dn: cn=user595,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user595 sn: user595 uid: uid595 givenname: givenname595 description: description595 userPassword: password595 mail: uid595 uidnumber: 595 gidnumber: 595 homeDirectory: /home/uid595 dn: cn=user596,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user596 sn: user596 uid: uid596 givenname: givenname596 description: description596 userPassword: password596 mail: uid596 uidnumber: 596 gidnumber: 596 homeDirectory: /home/uid596 dn: cn=user597,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user597 sn: user597 uid: uid597 givenname: givenname597 description: description597 userPassword: password597 mail: uid597 uidnumber: 597 gidnumber: 597 homeDirectory: /home/uid597 dn: cn=user598,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user598 sn: user598 uid: uid598 givenname: givenname598 description: description598 userPassword: password598 mail: uid598 uidnumber: 598 gidnumber: 598 homeDirectory: /home/uid598 dn: cn=user599,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user599 sn: user599 uid: uid599 givenname: givenname599 description: description599 userPassword: password599 mail: uid599 uidnumber: 599 gidnumber: 599 homeDirectory: /home/uid599 dn: cn=user600,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user600 sn: user600 uid: uid600 givenname: givenname600 description: description600 userPassword: password600 mail: uid600 uidnumber: 600 gidnumber: 600 homeDirectory: /home/uid600 dn: cn=user601,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user601 sn: user601 uid: uid601 givenname: givenname601 description: description601 userPassword: password601 mail: uid601 uidnumber: 601 gidnumber: 601 homeDirectory: /home/uid601 dn: cn=user602,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user602 sn: user602 uid: uid602 givenname: givenname602 description: description602 userPassword: password602 mail: uid602 uidnumber: 602 gidnumber: 602 homeDirectory: /home/uid602 dn: cn=user603,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user603 sn: user603 uid: uid603 givenname: givenname603 description: description603 userPassword: password603 mail: uid603 uidnumber: 603 gidnumber: 603 homeDirectory: /home/uid603 dn: cn=user604,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user604 sn: user604 uid: uid604 givenname: givenname604 description: description604 userPassword: password604 mail: uid604 uidnumber: 604 gidnumber: 604 homeDirectory: /home/uid604 dn: cn=user605,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user605 sn: user605 uid: uid605 givenname: givenname605 description: description605 userPassword: password605 mail: uid605 uidnumber: 605 gidnumber: 605 homeDirectory: /home/uid605 dn: cn=user606,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user606 sn: user606 uid: uid606 givenname: givenname606 description: description606 userPassword: password606 mail: uid606 uidnumber: 606 gidnumber: 606 homeDirectory: /home/uid606 dn: cn=user607,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user607 sn: user607 uid: uid607 givenname: givenname607 description: description607 userPassword: password607 mail: uid607 uidnumber: 607 gidnumber: 607 homeDirectory: /home/uid607 dn: cn=user608,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user608 sn: user608 uid: uid608 givenname: givenname608 description: description608 userPassword: password608 mail: uid608 uidnumber: 608 gidnumber: 608 homeDirectory: /home/uid608 dn: cn=user609,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user609 sn: user609 uid: uid609 givenname: givenname609 description: description609 userPassword: password609 mail: uid609 uidnumber: 609 gidnumber: 609 homeDirectory: /home/uid609 dn: cn=user610,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user610 sn: user610 uid: uid610 givenname: givenname610 description: description610 userPassword: password610 mail: uid610 uidnumber: 610 gidnumber: 610 homeDirectory: /home/uid610 dn: cn=user611,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user611 sn: user611 uid: uid611 givenname: givenname611 description: description611 userPassword: password611 mail: uid611 uidnumber: 611 gidnumber: 611 homeDirectory: /home/uid611 dn: cn=user612,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user612 sn: user612 uid: uid612 givenname: givenname612 description: description612 userPassword: password612 mail: uid612 uidnumber: 612 gidnumber: 612 homeDirectory: /home/uid612 dn: cn=user613,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user613 sn: user613 uid: uid613 givenname: givenname613 description: description613 userPassword: password613 mail: uid613 uidnumber: 613 gidnumber: 613 homeDirectory: /home/uid613 dn: cn=user614,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user614 sn: user614 uid: uid614 givenname: givenname614 description: description614 userPassword: password614 mail: uid614 uidnumber: 614 gidnumber: 614 homeDirectory: /home/uid614 dn: cn=user615,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user615 sn: user615 uid: uid615 givenname: givenname615 description: description615 userPassword: password615 mail: uid615 uidnumber: 615 gidnumber: 615 homeDirectory: /home/uid615 dn: cn=user616,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user616 sn: user616 uid: uid616 givenname: givenname616 description: description616 userPassword: password616 mail: uid616 uidnumber: 616 gidnumber: 616 homeDirectory: /home/uid616 dn: cn=user617,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user617 sn: user617 uid: uid617 givenname: givenname617 description: description617 userPassword: password617 mail: uid617 uidnumber: 617 gidnumber: 617 homeDirectory: /home/uid617 dn: cn=user618,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user618 sn: user618 uid: uid618 givenname: givenname618 description: description618 userPassword: password618 mail: uid618 uidnumber: 618 gidnumber: 618 homeDirectory: /home/uid618 dn: cn=user619,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user619 sn: user619 uid: uid619 givenname: givenname619 description: description619 userPassword: password619 mail: uid619 uidnumber: 619 gidnumber: 619 homeDirectory: /home/uid619 dn: cn=user620,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user620 sn: user620 uid: uid620 givenname: givenname620 description: description620 userPassword: password620 mail: uid620 uidnumber: 620 gidnumber: 620 homeDirectory: /home/uid620 dn: cn=user621,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user621 sn: user621 uid: uid621 givenname: givenname621 description: description621 userPassword: password621 mail: uid621 uidnumber: 621 gidnumber: 621 homeDirectory: /home/uid621 dn: cn=user622,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user622 sn: user622 uid: uid622 givenname: givenname622 description: description622 userPassword: password622 mail: uid622 uidnumber: 622 gidnumber: 622 homeDirectory: /home/uid622 dn: cn=user623,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user623 sn: user623 uid: uid623 givenname: givenname623 description: description623 userPassword: password623 mail: uid623 uidnumber: 623 gidnumber: 623 homeDirectory: /home/uid623 dn: cn=user624,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user624 sn: user624 uid: uid624 givenname: givenname624 description: description624 userPassword: password624 mail: uid624 uidnumber: 624 gidnumber: 624 homeDirectory: /home/uid624 dn: cn=user625,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user625 sn: user625 uid: uid625 givenname: givenname625 description: description625 userPassword: password625 mail: uid625 uidnumber: 625 gidnumber: 625 homeDirectory: /home/uid625 dn: cn=user626,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user626 sn: user626 uid: uid626 givenname: givenname626 description: description626 userPassword: password626 mail: uid626 uidnumber: 626 gidnumber: 626 homeDirectory: /home/uid626 dn: cn=user627,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user627 sn: user627 uid: uid627 givenname: givenname627 description: description627 userPassword: password627 mail: uid627 uidnumber: 627 gidnumber: 627 homeDirectory: /home/uid627 dn: cn=user628,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user628 sn: user628 uid: uid628 givenname: givenname628 description: description628 userPassword: password628 mail: uid628 uidnumber: 628 gidnumber: 628 homeDirectory: /home/uid628 dn: cn=user629,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user629 sn: user629 uid: uid629 givenname: givenname629 description: description629 userPassword: password629 mail: uid629 uidnumber: 629 gidnumber: 629 homeDirectory: /home/uid629 dn: cn=user630,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user630 sn: user630 uid: uid630 givenname: givenname630 description: description630 userPassword: password630 mail: uid630 uidnumber: 630 gidnumber: 630 homeDirectory: /home/uid630 dn: cn=user631,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user631 sn: user631 uid: uid631 givenname: givenname631 description: description631 userPassword: password631 mail: uid631 uidnumber: 631 gidnumber: 631 homeDirectory: /home/uid631 dn: cn=user632,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user632 sn: user632 uid: uid632 givenname: givenname632 description: description632 userPassword: password632 mail: uid632 uidnumber: 632 gidnumber: 632 homeDirectory: /home/uid632 dn: cn=user633,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user633 sn: user633 uid: uid633 givenname: givenname633 description: description633 userPassword: password633 mail: uid633 uidnumber: 633 gidnumber: 633 homeDirectory: /home/uid633 dn: cn=user634,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user634 sn: user634 uid: uid634 givenname: givenname634 description: description634 userPassword: password634 mail: uid634 uidnumber: 634 gidnumber: 634 homeDirectory: /home/uid634 dn: cn=user635,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user635 sn: user635 uid: uid635 givenname: givenname635 description: description635 userPassword: password635 mail: uid635 uidnumber: 635 gidnumber: 635 homeDirectory: /home/uid635 dn: cn=user636,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user636 sn: user636 uid: uid636 givenname: givenname636 description: description636 userPassword: password636 mail: uid636 uidnumber: 636 gidnumber: 636 homeDirectory: /home/uid636 dn: cn=user637,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user637 sn: user637 uid: uid637 givenname: givenname637 description: description637 userPassword: password637 mail: uid637 uidnumber: 637 gidnumber: 637 homeDirectory: /home/uid637 dn: cn=user638,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user638 sn: user638 uid: uid638 givenname: givenname638 description: description638 userPassword: password638 mail: uid638 uidnumber: 638 gidnumber: 638 homeDirectory: /home/uid638 dn: cn=user639,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user639 sn: user639 uid: uid639 givenname: givenname639 description: description639 userPassword: password639 mail: uid639 uidnumber: 639 gidnumber: 639 homeDirectory: /home/uid639 dn: cn=user640,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user640 sn: user640 uid: uid640 givenname: givenname640 description: description640 userPassword: password640 mail: uid640 uidnumber: 640 gidnumber: 640 homeDirectory: /home/uid640 dn: cn=user641,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user641 sn: user641 uid: uid641 givenname: givenname641 description: description641 userPassword: password641 mail: uid641 uidnumber: 641 gidnumber: 641 homeDirectory: /home/uid641 dn: cn=user642,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user642 sn: user642 uid: uid642 givenname: givenname642 description: description642 userPassword: password642 mail: uid642 uidnumber: 642 gidnumber: 642 homeDirectory: /home/uid642 dn: cn=user643,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user643 sn: user643 uid: uid643 givenname: givenname643 description: description643 userPassword: password643 mail: uid643 uidnumber: 643 gidnumber: 643 homeDirectory: /home/uid643 dn: cn=user644,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user644 sn: user644 uid: uid644 givenname: givenname644 description: description644 userPassword: password644 mail: uid644 uidnumber: 644 gidnumber: 644 homeDirectory: /home/uid644 dn: cn=user645,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user645 sn: user645 uid: uid645 givenname: givenname645 description: description645 userPassword: password645 mail: uid645 uidnumber: 645 gidnumber: 645 homeDirectory: /home/uid645 dn: cn=user646,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user646 sn: user646 uid: uid646 givenname: givenname646 description: description646 userPassword: password646 mail: uid646 uidnumber: 646 gidnumber: 646 homeDirectory: /home/uid646 dn: cn=user647,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user647 sn: user647 uid: uid647 givenname: givenname647 description: description647 userPassword: password647 mail: uid647 uidnumber: 647 gidnumber: 647 homeDirectory: /home/uid647 dn: cn=user648,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user648 sn: user648 uid: uid648 givenname: givenname648 description: description648 userPassword: password648 mail: uid648 uidnumber: 648 gidnumber: 648 homeDirectory: /home/uid648 dn: cn=user649,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user649 sn: user649 uid: uid649 givenname: givenname649 description: description649 userPassword: password649 mail: uid649 uidnumber: 649 gidnumber: 649 homeDirectory: /home/uid649 dn: cn=user650,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user650 sn: user650 uid: uid650 givenname: givenname650 description: description650 userPassword: password650 mail: uid650 uidnumber: 650 gidnumber: 650 homeDirectory: /home/uid650 dn: cn=user651,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user651 sn: user651 uid: uid651 givenname: givenname651 description: description651 userPassword: password651 mail: uid651 uidnumber: 651 gidnumber: 651 homeDirectory: /home/uid651 dn: cn=user652,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user652 sn: user652 uid: uid652 givenname: givenname652 description: description652 userPassword: password652 mail: uid652 uidnumber: 652 gidnumber: 652 homeDirectory: /home/uid652 dn: cn=user653,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user653 sn: user653 uid: uid653 givenname: givenname653 description: description653 userPassword: password653 mail: uid653 uidnumber: 653 gidnumber: 653 homeDirectory: /home/uid653 dn: cn=user654,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user654 sn: user654 uid: uid654 givenname: givenname654 description: description654 userPassword: password654 mail: uid654 uidnumber: 654 gidnumber: 654 homeDirectory: /home/uid654 dn: cn=user655,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user655 sn: user655 uid: uid655 givenname: givenname655 description: description655 userPassword: password655 mail: uid655 uidnumber: 655 gidnumber: 655 homeDirectory: /home/uid655 dn: cn=user656,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user656 sn: user656 uid: uid656 givenname: givenname656 description: description656 userPassword: password656 mail: uid656 uidnumber: 656 gidnumber: 656 homeDirectory: /home/uid656 dn: cn=user657,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user657 sn: user657 uid: uid657 givenname: givenname657 description: description657 userPassword: password657 mail: uid657 uidnumber: 657 gidnumber: 657 homeDirectory: /home/uid657 dn: cn=user658,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user658 sn: user658 uid: uid658 givenname: givenname658 description: description658 userPassword: password658 mail: uid658 uidnumber: 658 gidnumber: 658 homeDirectory: /home/uid658 dn: cn=user659,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user659 sn: user659 uid: uid659 givenname: givenname659 description: description659 userPassword: password659 mail: uid659 uidnumber: 659 gidnumber: 659 homeDirectory: /home/uid659 dn: cn=user660,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user660 sn: user660 uid: uid660 givenname: givenname660 description: description660 userPassword: password660 mail: uid660 uidnumber: 660 gidnumber: 660 homeDirectory: /home/uid660 dn: cn=user661,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user661 sn: user661 uid: uid661 givenname: givenname661 description: description661 userPassword: password661 mail: uid661 uidnumber: 661 gidnumber: 661 homeDirectory: /home/uid661 dn: cn=user662,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user662 sn: user662 uid: uid662 givenname: givenname662 description: description662 userPassword: password662 mail: uid662 uidnumber: 662 gidnumber: 662 homeDirectory: /home/uid662 dn: cn=user663,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user663 sn: user663 uid: uid663 givenname: givenname663 description: description663 userPassword: password663 mail: uid663 uidnumber: 663 gidnumber: 663 homeDirectory: /home/uid663 dn: cn=user664,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user664 sn: user664 uid: uid664 givenname: givenname664 description: description664 userPassword: password664 mail: uid664 uidnumber: 664 gidnumber: 664 homeDirectory: /home/uid664 dn: cn=user665,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user665 sn: user665 uid: uid665 givenname: givenname665 description: description665 userPassword: password665 mail: uid665 uidnumber: 665 gidnumber: 665 homeDirectory: /home/uid665 dn: cn=user666,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user666 sn: user666 uid: uid666 givenname: givenname666 description: description666 userPassword: password666 mail: uid666 uidnumber: 666 gidnumber: 666 homeDirectory: /home/uid666 dn: cn=user667,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user667 sn: user667 uid: uid667 givenname: givenname667 description: description667 userPassword: password667 mail: uid667 uidnumber: 667 gidnumber: 667 homeDirectory: /home/uid667 dn: cn=user668,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user668 sn: user668 uid: uid668 givenname: givenname668 description: description668 userPassword: password668 mail: uid668 uidnumber: 668 gidnumber: 668 homeDirectory: /home/uid668 dn: cn=user669,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user669 sn: user669 uid: uid669 givenname: givenname669 description: description669 userPassword: password669 mail: uid669 uidnumber: 669 gidnumber: 669 homeDirectory: /home/uid669 dn: cn=user670,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user670 sn: user670 uid: uid670 givenname: givenname670 description: description670 userPassword: password670 mail: uid670 uidnumber: 670 gidnumber: 670 homeDirectory: /home/uid670 dn: cn=user671,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user671 sn: user671 uid: uid671 givenname: givenname671 description: description671 userPassword: password671 mail: uid671 uidnumber: 671 gidnumber: 671 homeDirectory: /home/uid671 dn: cn=user672,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user672 sn: user672 uid: uid672 givenname: givenname672 description: description672 userPassword: password672 mail: uid672 uidnumber: 672 gidnumber: 672 homeDirectory: /home/uid672 dn: cn=user673,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user673 sn: user673 uid: uid673 givenname: givenname673 description: description673 userPassword: password673 mail: uid673 uidnumber: 673 gidnumber: 673 homeDirectory: /home/uid673 dn: cn=user674,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user674 sn: user674 uid: uid674 givenname: givenname674 description: description674 userPassword: password674 mail: uid674 uidnumber: 674 gidnumber: 674 homeDirectory: /home/uid674 dn: cn=user675,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user675 sn: user675 uid: uid675 givenname: givenname675 description: description675 userPassword: password675 mail: uid675 uidnumber: 675 gidnumber: 675 homeDirectory: /home/uid675 dn: cn=user676,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user676 sn: user676 uid: uid676 givenname: givenname676 description: description676 userPassword: password676 mail: uid676 uidnumber: 676 gidnumber: 676 homeDirectory: /home/uid676 dn: cn=user677,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user677 sn: user677 uid: uid677 givenname: givenname677 description: description677 userPassword: password677 mail: uid677 uidnumber: 677 gidnumber: 677 homeDirectory: /home/uid677 dn: cn=user678,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user678 sn: user678 uid: uid678 givenname: givenname678 description: description678 userPassword: password678 mail: uid678 uidnumber: 678 gidnumber: 678 homeDirectory: /home/uid678 dn: cn=user679,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user679 sn: user679 uid: uid679 givenname: givenname679 description: description679 userPassword: password679 mail: uid679 uidnumber: 679 gidnumber: 679 homeDirectory: /home/uid679 dn: cn=user680,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user680 sn: user680 uid: uid680 givenname: givenname680 description: description680 userPassword: password680 mail: uid680 uidnumber: 680 gidnumber: 680 homeDirectory: /home/uid680 dn: cn=user681,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user681 sn: user681 uid: uid681 givenname: givenname681 description: description681 userPassword: password681 mail: uid681 uidnumber: 681 gidnumber: 681 homeDirectory: /home/uid681 dn: cn=user682,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user682 sn: user682 uid: uid682 givenname: givenname682 description: description682 userPassword: password682 mail: uid682 uidnumber: 682 gidnumber: 682 homeDirectory: /home/uid682 dn: cn=user683,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user683 sn: user683 uid: uid683 givenname: givenname683 description: description683 userPassword: password683 mail: uid683 uidnumber: 683 gidnumber: 683 homeDirectory: /home/uid683 dn: cn=user684,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user684 sn: user684 uid: uid684 givenname: givenname684 description: description684 userPassword: password684 mail: uid684 uidnumber: 684 gidnumber: 684 homeDirectory: /home/uid684 dn: cn=user685,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user685 sn: user685 uid: uid685 givenname: givenname685 description: description685 userPassword: password685 mail: uid685 uidnumber: 685 gidnumber: 685 homeDirectory: /home/uid685 dn: cn=user686,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user686 sn: user686 uid: uid686 givenname: givenname686 description: description686 userPassword: password686 mail: uid686 uidnumber: 686 gidnumber: 686 homeDirectory: /home/uid686 dn: cn=user687,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user687 sn: user687 uid: uid687 givenname: givenname687 description: description687 userPassword: password687 mail: uid687 uidnumber: 687 gidnumber: 687 homeDirectory: /home/uid687 dn: cn=user688,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user688 sn: user688 uid: uid688 givenname: givenname688 description: description688 userPassword: password688 mail: uid688 uidnumber: 688 gidnumber: 688 homeDirectory: /home/uid688 dn: cn=user689,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user689 sn: user689 uid: uid689 givenname: givenname689 description: description689 userPassword: password689 mail: uid689 uidnumber: 689 gidnumber: 689 homeDirectory: /home/uid689 dn: cn=user690,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user690 sn: user690 uid: uid690 givenname: givenname690 description: description690 userPassword: password690 mail: uid690 uidnumber: 690 gidnumber: 690 homeDirectory: /home/uid690 dn: cn=user691,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user691 sn: user691 uid: uid691 givenname: givenname691 description: description691 userPassword: password691 mail: uid691 uidnumber: 691 gidnumber: 691 homeDirectory: /home/uid691 dn: cn=user692,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user692 sn: user692 uid: uid692 givenname: givenname692 description: description692 userPassword: password692 mail: uid692 uidnumber: 692 gidnumber: 692 homeDirectory: /home/uid692 dn: cn=user693,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user693 sn: user693 uid: uid693 givenname: givenname693 description: description693 userPassword: password693 mail: uid693 uidnumber: 693 gidnumber: 693 homeDirectory: /home/uid693 dn: cn=user694,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user694 sn: user694 uid: uid694 givenname: givenname694 description: description694 userPassword: password694 mail: uid694 uidnumber: 694 gidnumber: 694 homeDirectory: /home/uid694 dn: cn=user695,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user695 sn: user695 uid: uid695 givenname: givenname695 description: description695 userPassword: password695 mail: uid695 uidnumber: 695 gidnumber: 695 homeDirectory: /home/uid695 dn: cn=user696,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user696 sn: user696 uid: uid696 givenname: givenname696 description: description696 userPassword: password696 mail: uid696 uidnumber: 696 gidnumber: 696 homeDirectory: /home/uid696 dn: cn=user697,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user697 sn: user697 uid: uid697 givenname: givenname697 description: description697 userPassword: password697 mail: uid697 uidnumber: 697 gidnumber: 697 homeDirectory: /home/uid697 dn: cn=user698,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user698 sn: user698 uid: uid698 givenname: givenname698 description: description698 userPassword: password698 mail: uid698 uidnumber: 698 gidnumber: 698 homeDirectory: /home/uid698 dn: cn=user699,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user699 sn: user699 uid: uid699 givenname: givenname699 description: description699 userPassword: password699 mail: uid699 uidnumber: 699 gidnumber: 699 homeDirectory: /home/uid699 dn: cn=user700,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user700 sn: user700 uid: uid700 givenname: givenname700 description: description700 userPassword: password700 mail: uid700 uidnumber: 700 gidnumber: 700 homeDirectory: /home/uid700 dn: cn=user701,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user701 sn: user701 uid: uid701 givenname: givenname701 description: description701 userPassword: password701 mail: uid701 uidnumber: 701 gidnumber: 701 homeDirectory: /home/uid701 dn: cn=user702,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user702 sn: user702 uid: uid702 givenname: givenname702 description: description702 userPassword: password702 mail: uid702 uidnumber: 702 gidnumber: 702 homeDirectory: /home/uid702 dn: cn=user703,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user703 sn: user703 uid: uid703 givenname: givenname703 description: description703 userPassword: password703 mail: uid703 uidnumber: 703 gidnumber: 703 homeDirectory: /home/uid703 dn: cn=user704,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user704 sn: user704 uid: uid704 givenname: givenname704 description: description704 userPassword: password704 mail: uid704 uidnumber: 704 gidnumber: 704 homeDirectory: /home/uid704 dn: cn=user705,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user705 sn: user705 uid: uid705 givenname: givenname705 description: description705 userPassword: password705 mail: uid705 uidnumber: 705 gidnumber: 705 homeDirectory: /home/uid705 dn: cn=user706,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user706 sn: user706 uid: uid706 givenname: givenname706 description: description706 userPassword: password706 mail: uid706 uidnumber: 706 gidnumber: 706 homeDirectory: /home/uid706 dn: cn=user707,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user707 sn: user707 uid: uid707 givenname: givenname707 description: description707 userPassword: password707 mail: uid707 uidnumber: 707 gidnumber: 707 homeDirectory: /home/uid707 dn: cn=user708,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user708 sn: user708 uid: uid708 givenname: givenname708 description: description708 userPassword: password708 mail: uid708 uidnumber: 708 gidnumber: 708 homeDirectory: /home/uid708 dn: cn=user709,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user709 sn: user709 uid: uid709 givenname: givenname709 description: description709 userPassword: password709 mail: uid709 uidnumber: 709 gidnumber: 709 homeDirectory: /home/uid709 dn: cn=user710,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user710 sn: user710 uid: uid710 givenname: givenname710 description: description710 userPassword: password710 mail: uid710 uidnumber: 710 gidnumber: 710 homeDirectory: /home/uid710 dn: cn=user711,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user711 sn: user711 uid: uid711 givenname: givenname711 description: description711 userPassword: password711 mail: uid711 uidnumber: 711 gidnumber: 711 homeDirectory: /home/uid711 dn: cn=user712,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user712 sn: user712 uid: uid712 givenname: givenname712 description: description712 userPassword: password712 mail: uid712 uidnumber: 712 gidnumber: 712 homeDirectory: /home/uid712 dn: cn=user713,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user713 sn: user713 uid: uid713 givenname: givenname713 description: description713 userPassword: password713 mail: uid713 uidnumber: 713 gidnumber: 713 homeDirectory: /home/uid713 dn: cn=user714,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user714 sn: user714 uid: uid714 givenname: givenname714 description: description714 userPassword: password714 mail: uid714 uidnumber: 714 gidnumber: 714 homeDirectory: /home/uid714 dn: cn=user715,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user715 sn: user715 uid: uid715 givenname: givenname715 description: description715 userPassword: password715 mail: uid715 uidnumber: 715 gidnumber: 715 homeDirectory: /home/uid715 dn: cn=user716,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user716 sn: user716 uid: uid716 givenname: givenname716 description: description716 userPassword: password716 mail: uid716 uidnumber: 716 gidnumber: 716 homeDirectory: /home/uid716 dn: cn=user717,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user717 sn: user717 uid: uid717 givenname: givenname717 description: description717 userPassword: password717 mail: uid717 uidnumber: 717 gidnumber: 717 homeDirectory: /home/uid717 dn: cn=user718,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user718 sn: user718 uid: uid718 givenname: givenname718 description: description718 userPassword: password718 mail: uid718 uidnumber: 718 gidnumber: 718 homeDirectory: /home/uid718 dn: cn=user719,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user719 sn: user719 uid: uid719 givenname: givenname719 description: description719 userPassword: password719 mail: uid719 uidnumber: 719 gidnumber: 719 homeDirectory: /home/uid719 dn: cn=user720,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user720 sn: user720 uid: uid720 givenname: givenname720 description: description720 userPassword: password720 mail: uid720 uidnumber: 720 gidnumber: 720 homeDirectory: /home/uid720 dn: cn=user721,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user721 sn: user721 uid: uid721 givenname: givenname721 description: description721 userPassword: password721 mail: uid721 uidnumber: 721 gidnumber: 721 homeDirectory: /home/uid721 dn: cn=user722,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user722 sn: user722 uid: uid722 givenname: givenname722 description: description722 userPassword: password722 mail: uid722 uidnumber: 722 gidnumber: 722 homeDirectory: /home/uid722 dn: cn=user723,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user723 sn: user723 uid: uid723 givenname: givenname723 description: description723 userPassword: password723 mail: uid723 uidnumber: 723 gidnumber: 723 homeDirectory: /home/uid723 dn: cn=user724,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user724 sn: user724 uid: uid724 givenname: givenname724 description: description724 userPassword: password724 mail: uid724 uidnumber: 724 gidnumber: 724 homeDirectory: /home/uid724 dn: cn=user725,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user725 sn: user725 uid: uid725 givenname: givenname725 description: description725 userPassword: password725 mail: uid725 uidnumber: 725 gidnumber: 725 homeDirectory: /home/uid725 dn: cn=user726,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user726 sn: user726 uid: uid726 givenname: givenname726 description: description726 userPassword: password726 mail: uid726 uidnumber: 726 gidnumber: 726 homeDirectory: /home/uid726 dn: cn=user727,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user727 sn: user727 uid: uid727 givenname: givenname727 description: description727 userPassword: password727 mail: uid727 uidnumber: 727 gidnumber: 727 homeDirectory: /home/uid727 dn: cn=user728,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user728 sn: user728 uid: uid728 givenname: givenname728 description: description728 userPassword: password728 mail: uid728 uidnumber: 728 gidnumber: 728 homeDirectory: /home/uid728 dn: cn=user729,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user729 sn: user729 uid: uid729 givenname: givenname729 description: description729 userPassword: password729 mail: uid729 uidnumber: 729 gidnumber: 729 homeDirectory: /home/uid729 dn: cn=user730,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user730 sn: user730 uid: uid730 givenname: givenname730 description: description730 userPassword: password730 mail: uid730 uidnumber: 730 gidnumber: 730 homeDirectory: /home/uid730 dn: cn=user731,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user731 sn: user731 uid: uid731 givenname: givenname731 description: description731 userPassword: password731 mail: uid731 uidnumber: 731 gidnumber: 731 homeDirectory: /home/uid731 dn: cn=user732,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user732 sn: user732 uid: uid732 givenname: givenname732 description: description732 userPassword: password732 mail: uid732 uidnumber: 732 gidnumber: 732 homeDirectory: /home/uid732 dn: cn=user733,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user733 sn: user733 uid: uid733 givenname: givenname733 description: description733 userPassword: password733 mail: uid733 uidnumber: 733 gidnumber: 733 homeDirectory: /home/uid733 dn: cn=user734,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user734 sn: user734 uid: uid734 givenname: givenname734 description: description734 userPassword: password734 mail: uid734 uidnumber: 734 gidnumber: 734 homeDirectory: /home/uid734 dn: cn=user735,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user735 sn: user735 uid: uid735 givenname: givenname735 description: description735 userPassword: password735 mail: uid735 uidnumber: 735 gidnumber: 735 homeDirectory: /home/uid735 dn: cn=user736,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user736 sn: user736 uid: uid736 givenname: givenname736 description: description736 userPassword: password736 mail: uid736 uidnumber: 736 gidnumber: 736 homeDirectory: /home/uid736 dn: cn=user737,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user737 sn: user737 uid: uid737 givenname: givenname737 description: description737 userPassword: password737 mail: uid737 uidnumber: 737 gidnumber: 737 homeDirectory: /home/uid737 dn: cn=user738,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user738 sn: user738 uid: uid738 givenname: givenname738 description: description738 userPassword: password738 mail: uid738 uidnumber: 738 gidnumber: 738 homeDirectory: /home/uid738 dn: cn=user739,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user739 sn: user739 uid: uid739 givenname: givenname739 description: description739 userPassword: password739 mail: uid739 uidnumber: 739 gidnumber: 739 homeDirectory: /home/uid739 dn: cn=user740,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user740 sn: user740 uid: uid740 givenname: givenname740 description: description740 userPassword: password740 mail: uid740 uidnumber: 740 gidnumber: 740 homeDirectory: /home/uid740 dn: cn=user741,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user741 sn: user741 uid: uid741 givenname: givenname741 description: description741 userPassword: password741 mail: uid741 uidnumber: 741 gidnumber: 741 homeDirectory: /home/uid741 dn: cn=user742,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user742 sn: user742 uid: uid742 givenname: givenname742 description: description742 userPassword: password742 mail: uid742 uidnumber: 742 gidnumber: 742 homeDirectory: /home/uid742 dn: cn=user743,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user743 sn: user743 uid: uid743 givenname: givenname743 description: description743 userPassword: password743 mail: uid743 uidnumber: 743 gidnumber: 743 homeDirectory: /home/uid743 dn: cn=user744,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user744 sn: user744 uid: uid744 givenname: givenname744 description: description744 userPassword: password744 mail: uid744 uidnumber: 744 gidnumber: 744 homeDirectory: /home/uid744 dn: cn=user745,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user745 sn: user745 uid: uid745 givenname: givenname745 description: description745 userPassword: password745 mail: uid745 uidnumber: 745 gidnumber: 745 homeDirectory: /home/uid745 dn: cn=user746,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user746 sn: user746 uid: uid746 givenname: givenname746 description: description746 userPassword: password746 mail: uid746 uidnumber: 746 gidnumber: 746 homeDirectory: /home/uid746 dn: cn=user747,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user747 sn: user747 uid: uid747 givenname: givenname747 description: description747 userPassword: password747 mail: uid747 uidnumber: 747 gidnumber: 747 homeDirectory: /home/uid747 dn: cn=user748,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user748 sn: user748 uid: uid748 givenname: givenname748 description: description748 userPassword: password748 mail: uid748 uidnumber: 748 gidnumber: 748 homeDirectory: /home/uid748 dn: cn=user749,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user749 sn: user749 uid: uid749 givenname: givenname749 description: description749 userPassword: password749 mail: uid749 uidnumber: 749 gidnumber: 749 homeDirectory: /home/uid749 dn: cn=user750,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user750 sn: user750 uid: uid750 givenname: givenname750 description: description750 userPassword: password750 mail: uid750 uidnumber: 750 gidnumber: 750 homeDirectory: /home/uid750 dn: cn=user751,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user751 sn: user751 uid: uid751 givenname: givenname751 description: description751 userPassword: password751 mail: uid751 uidnumber: 751 gidnumber: 751 homeDirectory: /home/uid751 dn: cn=user752,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user752 sn: user752 uid: uid752 givenname: givenname752 description: description752 userPassword: password752 mail: uid752 uidnumber: 752 gidnumber: 752 homeDirectory: /home/uid752 dn: cn=user753,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user753 sn: user753 uid: uid753 givenname: givenname753 description: description753 userPassword: password753 mail: uid753 uidnumber: 753 gidnumber: 753 homeDirectory: /home/uid753 dn: cn=user754,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user754 sn: user754 uid: uid754 givenname: givenname754 description: description754 userPassword: password754 mail: uid754 uidnumber: 754 gidnumber: 754 homeDirectory: /home/uid754 dn: cn=user755,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user755 sn: user755 uid: uid755 givenname: givenname755 description: description755 userPassword: password755 mail: uid755 uidnumber: 755 gidnumber: 755 homeDirectory: /home/uid755 dn: cn=user756,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user756 sn: user756 uid: uid756 givenname: givenname756 description: description756 userPassword: password756 mail: uid756 uidnumber: 756 gidnumber: 756 homeDirectory: /home/uid756 dn: cn=user757,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user757 sn: user757 uid: uid757 givenname: givenname757 description: description757 userPassword: password757 mail: uid757 uidnumber: 757 gidnumber: 757 homeDirectory: /home/uid757 dn: cn=user758,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user758 sn: user758 uid: uid758 givenname: givenname758 description: description758 userPassword: password758 mail: uid758 uidnumber: 758 gidnumber: 758 homeDirectory: /home/uid758 dn: cn=user759,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user759 sn: user759 uid: uid759 givenname: givenname759 description: description759 userPassword: password759 mail: uid759 uidnumber: 759 gidnumber: 759 homeDirectory: /home/uid759 dn: cn=user760,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user760 sn: user760 uid: uid760 givenname: givenname760 description: description760 userPassword: password760 mail: uid760 uidnumber: 760 gidnumber: 760 homeDirectory: /home/uid760 dn: cn=user761,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user761 sn: user761 uid: uid761 givenname: givenname761 description: description761 userPassword: password761 mail: uid761 uidnumber: 761 gidnumber: 761 homeDirectory: /home/uid761 dn: cn=user762,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user762 sn: user762 uid: uid762 givenname: givenname762 description: description762 userPassword: password762 mail: uid762 uidnumber: 762 gidnumber: 762 homeDirectory: /home/uid762 dn: cn=user763,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user763 sn: user763 uid: uid763 givenname: givenname763 description: description763 userPassword: password763 mail: uid763 uidnumber: 763 gidnumber: 763 homeDirectory: /home/uid763 dn: cn=user764,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user764 sn: user764 uid: uid764 givenname: givenname764 description: description764 userPassword: password764 mail: uid764 uidnumber: 764 gidnumber: 764 homeDirectory: /home/uid764 dn: cn=user765,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user765 sn: user765 uid: uid765 givenname: givenname765 description: description765 userPassword: password765 mail: uid765 uidnumber: 765 gidnumber: 765 homeDirectory: /home/uid765 dn: cn=user766,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user766 sn: user766 uid: uid766 givenname: givenname766 description: description766 userPassword: password766 mail: uid766 uidnumber: 766 gidnumber: 766 homeDirectory: /home/uid766 dn: cn=user767,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user767 sn: user767 uid: uid767 givenname: givenname767 description: description767 userPassword: password767 mail: uid767 uidnumber: 767 gidnumber: 767 homeDirectory: /home/uid767 dn: cn=user768,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user768 sn: user768 uid: uid768 givenname: givenname768 description: description768 userPassword: password768 mail: uid768 uidnumber: 768 gidnumber: 768 homeDirectory: /home/uid768 dn: cn=user769,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user769 sn: user769 uid: uid769 givenname: givenname769 description: description769 userPassword: password769 mail: uid769 uidnumber: 769 gidnumber: 769 homeDirectory: /home/uid769 dn: cn=user770,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user770 sn: user770 uid: uid770 givenname: givenname770 description: description770 userPassword: password770 mail: uid770 uidnumber: 770 gidnumber: 770 homeDirectory: /home/uid770 dn: cn=user771,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user771 sn: user771 uid: uid771 givenname: givenname771 description: description771 userPassword: password771 mail: uid771 uidnumber: 771 gidnumber: 771 homeDirectory: /home/uid771 dn: cn=user772,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user772 sn: user772 uid: uid772 givenname: givenname772 description: description772 userPassword: password772 mail: uid772 uidnumber: 772 gidnumber: 772 homeDirectory: /home/uid772 dn: cn=user773,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user773 sn: user773 uid: uid773 givenname: givenname773 description: description773 userPassword: password773 mail: uid773 uidnumber: 773 gidnumber: 773 homeDirectory: /home/uid773 dn: cn=user774,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user774 sn: user774 uid: uid774 givenname: givenname774 description: description774 userPassword: password774 mail: uid774 uidnumber: 774 gidnumber: 774 homeDirectory: /home/uid774 dn: cn=user775,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user775 sn: user775 uid: uid775 givenname: givenname775 description: description775 userPassword: password775 mail: uid775 uidnumber: 775 gidnumber: 775 homeDirectory: /home/uid775 dn: cn=user776,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user776 sn: user776 uid: uid776 givenname: givenname776 description: description776 userPassword: password776 mail: uid776 uidnumber: 776 gidnumber: 776 homeDirectory: /home/uid776 dn: cn=user777,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user777 sn: user777 uid: uid777 givenname: givenname777 description: description777 userPassword: password777 mail: uid777 uidnumber: 777 gidnumber: 777 homeDirectory: /home/uid777 dn: cn=user778,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user778 sn: user778 uid: uid778 givenname: givenname778 description: description778 userPassword: password778 mail: uid778 uidnumber: 778 gidnumber: 778 homeDirectory: /home/uid778 dn: cn=user779,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user779 sn: user779 uid: uid779 givenname: givenname779 description: description779 userPassword: password779 mail: uid779 uidnumber: 779 gidnumber: 779 homeDirectory: /home/uid779 dn: cn=user780,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user780 sn: user780 uid: uid780 givenname: givenname780 description: description780 userPassword: password780 mail: uid780 uidnumber: 780 gidnumber: 780 homeDirectory: /home/uid780 dn: cn=user781,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user781 sn: user781 uid: uid781 givenname: givenname781 description: description781 userPassword: password781 mail: uid781 uidnumber: 781 gidnumber: 781 homeDirectory: /home/uid781 dn: cn=user782,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user782 sn: user782 uid: uid782 givenname: givenname782 description: description782 userPassword: password782 mail: uid782 uidnumber: 782 gidnumber: 782 homeDirectory: /home/uid782 dn: cn=user783,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user783 sn: user783 uid: uid783 givenname: givenname783 description: description783 userPassword: password783 mail: uid783 uidnumber: 783 gidnumber: 783 homeDirectory: /home/uid783 dn: cn=user784,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user784 sn: user784 uid: uid784 givenname: givenname784 description: description784 userPassword: password784 mail: uid784 uidnumber: 784 gidnumber: 784 homeDirectory: /home/uid784 dn: cn=user785,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user785 sn: user785 uid: uid785 givenname: givenname785 description: description785 userPassword: password785 mail: uid785 uidnumber: 785 gidnumber: 785 homeDirectory: /home/uid785 dn: cn=user786,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user786 sn: user786 uid: uid786 givenname: givenname786 description: description786 userPassword: password786 mail: uid786 uidnumber: 786 gidnumber: 786 homeDirectory: /home/uid786 dn: cn=user787,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user787 sn: user787 uid: uid787 givenname: givenname787 description: description787 userPassword: password787 mail: uid787 uidnumber: 787 gidnumber: 787 homeDirectory: /home/uid787 dn: cn=user788,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user788 sn: user788 uid: uid788 givenname: givenname788 description: description788 userPassword: password788 mail: uid788 uidnumber: 788 gidnumber: 788 homeDirectory: /home/uid788 dn: cn=user789,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user789 sn: user789 uid: uid789 givenname: givenname789 description: description789 userPassword: password789 mail: uid789 uidnumber: 789 gidnumber: 789 homeDirectory: /home/uid789 dn: cn=user790,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user790 sn: user790 uid: uid790 givenname: givenname790 description: description790 userPassword: password790 mail: uid790 uidnumber: 790 gidnumber: 790 homeDirectory: /home/uid790 dn: cn=user791,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user791 sn: user791 uid: uid791 givenname: givenname791 description: description791 userPassword: password791 mail: uid791 uidnumber: 791 gidnumber: 791 homeDirectory: /home/uid791 dn: cn=user792,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user792 sn: user792 uid: uid792 givenname: givenname792 description: description792 userPassword: password792 mail: uid792 uidnumber: 792 gidnumber: 792 homeDirectory: /home/uid792 dn: cn=user793,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user793 sn: user793 uid: uid793 givenname: givenname793 description: description793 userPassword: password793 mail: uid793 uidnumber: 793 gidnumber: 793 homeDirectory: /home/uid793 dn: cn=user794,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user794 sn: user794 uid: uid794 givenname: givenname794 description: description794 userPassword: password794 mail: uid794 uidnumber: 794 gidnumber: 794 homeDirectory: /home/uid794 dn: cn=user795,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user795 sn: user795 uid: uid795 givenname: givenname795 description: description795 userPassword: password795 mail: uid795 uidnumber: 795 gidnumber: 795 homeDirectory: /home/uid795 dn: cn=user796,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user796 sn: user796 uid: uid796 givenname: givenname796 description: description796 userPassword: password796 mail: uid796 uidnumber: 796 gidnumber: 796 homeDirectory: /home/uid796 dn: cn=user797,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user797 sn: user797 uid: uid797 givenname: givenname797 description: description797 userPassword: password797 mail: uid797 uidnumber: 797 gidnumber: 797 homeDirectory: /home/uid797 dn: cn=user798,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user798 sn: user798 uid: uid798 givenname: givenname798 description: description798 userPassword: password798 mail: uid798 uidnumber: 798 gidnumber: 798 homeDirectory: /home/uid798 dn: cn=user799,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user799 sn: user799 uid: uid799 givenname: givenname799 description: description799 userPassword: password799 mail: uid799 uidnumber: 799 gidnumber: 799 homeDirectory: /home/uid799 dn: cn=user800,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user800 sn: user800 uid: uid800 givenname: givenname800 description: description800 userPassword: password800 mail: uid800 uidnumber: 800 gidnumber: 800 homeDirectory: /home/uid800 dn: cn=user801,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user801 sn: user801 uid: uid801 givenname: givenname801 description: description801 userPassword: password801 mail: uid801 uidnumber: 801 gidnumber: 801 homeDirectory: /home/uid801 dn: cn=user802,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user802 sn: user802 uid: uid802 givenname: givenname802 description: description802 userPassword: password802 mail: uid802 uidnumber: 802 gidnumber: 802 homeDirectory: /home/uid802 dn: cn=user803,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user803 sn: user803 uid: uid803 givenname: givenname803 description: description803 userPassword: password803 mail: uid803 uidnumber: 803 gidnumber: 803 homeDirectory: /home/uid803 dn: cn=user804,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user804 sn: user804 uid: uid804 givenname: givenname804 description: description804 userPassword: password804 mail: uid804 uidnumber: 804 gidnumber: 804 homeDirectory: /home/uid804 dn: cn=user805,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user805 sn: user805 uid: uid805 givenname: givenname805 description: description805 userPassword: password805 mail: uid805 uidnumber: 805 gidnumber: 805 homeDirectory: /home/uid805 dn: cn=user806,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user806 sn: user806 uid: uid806 givenname: givenname806 description: description806 userPassword: password806 mail: uid806 uidnumber: 806 gidnumber: 806 homeDirectory: /home/uid806 dn: cn=user807,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user807 sn: user807 uid: uid807 givenname: givenname807 description: description807 userPassword: password807 mail: uid807 uidnumber: 807 gidnumber: 807 homeDirectory: /home/uid807 dn: cn=user808,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user808 sn: user808 uid: uid808 givenname: givenname808 description: description808 userPassword: password808 mail: uid808 uidnumber: 808 gidnumber: 808 homeDirectory: /home/uid808 dn: cn=user809,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user809 sn: user809 uid: uid809 givenname: givenname809 description: description809 userPassword: password809 mail: uid809 uidnumber: 809 gidnumber: 809 homeDirectory: /home/uid809 dn: cn=user810,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user810 sn: user810 uid: uid810 givenname: givenname810 description: description810 userPassword: password810 mail: uid810 uidnumber: 810 gidnumber: 810 homeDirectory: /home/uid810 dn: cn=user811,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user811 sn: user811 uid: uid811 givenname: givenname811 description: description811 userPassword: password811 mail: uid811 uidnumber: 811 gidnumber: 811 homeDirectory: /home/uid811 dn: cn=user812,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user812 sn: user812 uid: uid812 givenname: givenname812 description: description812 userPassword: password812 mail: uid812 uidnumber: 812 gidnumber: 812 homeDirectory: /home/uid812 dn: cn=user813,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user813 sn: user813 uid: uid813 givenname: givenname813 description: description813 userPassword: password813 mail: uid813 uidnumber: 813 gidnumber: 813 homeDirectory: /home/uid813 dn: cn=user814,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user814 sn: user814 uid: uid814 givenname: givenname814 description: description814 userPassword: password814 mail: uid814 uidnumber: 814 gidnumber: 814 homeDirectory: /home/uid814 dn: cn=user815,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user815 sn: user815 uid: uid815 givenname: givenname815 description: description815 userPassword: password815 mail: uid815 uidnumber: 815 gidnumber: 815 homeDirectory: /home/uid815 dn: cn=user816,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user816 sn: user816 uid: uid816 givenname: givenname816 description: description816 userPassword: password816 mail: uid816 uidnumber: 816 gidnumber: 816 homeDirectory: /home/uid816 dn: cn=user817,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user817 sn: user817 uid: uid817 givenname: givenname817 description: description817 userPassword: password817 mail: uid817 uidnumber: 817 gidnumber: 817 homeDirectory: /home/uid817 dn: cn=user818,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user818 sn: user818 uid: uid818 givenname: givenname818 description: description818 userPassword: password818 mail: uid818 uidnumber: 818 gidnumber: 818 homeDirectory: /home/uid818 dn: cn=user819,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user819 sn: user819 uid: uid819 givenname: givenname819 description: description819 userPassword: password819 mail: uid819 uidnumber: 819 gidnumber: 819 homeDirectory: /home/uid819 dn: cn=user820,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user820 sn: user820 uid: uid820 givenname: givenname820 description: description820 userPassword: password820 mail: uid820 uidnumber: 820 gidnumber: 820 homeDirectory: /home/uid820 dn: cn=user821,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user821 sn: user821 uid: uid821 givenname: givenname821 description: description821 userPassword: password821 mail: uid821 uidnumber: 821 gidnumber: 821 homeDirectory: /home/uid821 dn: cn=user822,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user822 sn: user822 uid: uid822 givenname: givenname822 description: description822 userPassword: password822 mail: uid822 uidnumber: 822 gidnumber: 822 homeDirectory: /home/uid822 dn: cn=user823,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user823 sn: user823 uid: uid823 givenname: givenname823 description: description823 userPassword: password823 mail: uid823 uidnumber: 823 gidnumber: 823 homeDirectory: /home/uid823 dn: cn=user824,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user824 sn: user824 uid: uid824 givenname: givenname824 description: description824 userPassword: password824 mail: uid824 uidnumber: 824 gidnumber: 824 homeDirectory: /home/uid824 dn: cn=user825,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user825 sn: user825 uid: uid825 givenname: givenname825 description: description825 userPassword: password825 mail: uid825 uidnumber: 825 gidnumber: 825 homeDirectory: /home/uid825 dn: cn=user826,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user826 sn: user826 uid: uid826 givenname: givenname826 description: description826 userPassword: password826 mail: uid826 uidnumber: 826 gidnumber: 826 homeDirectory: /home/uid826 dn: cn=user827,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user827 sn: user827 uid: uid827 givenname: givenname827 description: description827 userPassword: password827 mail: uid827 uidnumber: 827 gidnumber: 827 homeDirectory: /home/uid827 dn: cn=user828,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user828 sn: user828 uid: uid828 givenname: givenname828 description: description828 userPassword: password828 mail: uid828 uidnumber: 828 gidnumber: 828 homeDirectory: /home/uid828 dn: cn=user829,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user829 sn: user829 uid: uid829 givenname: givenname829 description: description829 userPassword: password829 mail: uid829 uidnumber: 829 gidnumber: 829 homeDirectory: /home/uid829 dn: cn=user830,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user830 sn: user830 uid: uid830 givenname: givenname830 description: description830 userPassword: password830 mail: uid830 uidnumber: 830 gidnumber: 830 homeDirectory: /home/uid830 dn: cn=user831,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user831 sn: user831 uid: uid831 givenname: givenname831 description: description831 userPassword: password831 mail: uid831 uidnumber: 831 gidnumber: 831 homeDirectory: /home/uid831 dn: cn=user832,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user832 sn: user832 uid: uid832 givenname: givenname832 description: description832 userPassword: password832 mail: uid832 uidnumber: 832 gidnumber: 832 homeDirectory: /home/uid832 dn: cn=user833,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user833 sn: user833 uid: uid833 givenname: givenname833 description: description833 userPassword: password833 mail: uid833 uidnumber: 833 gidnumber: 833 homeDirectory: /home/uid833 dn: cn=user834,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user834 sn: user834 uid: uid834 givenname: givenname834 description: description834 userPassword: password834 mail: uid834 uidnumber: 834 gidnumber: 834 homeDirectory: /home/uid834 dn: cn=user835,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user835 sn: user835 uid: uid835 givenname: givenname835 description: description835 userPassword: password835 mail: uid835 uidnumber: 835 gidnumber: 835 homeDirectory: /home/uid835 dn: cn=user836,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user836 sn: user836 uid: uid836 givenname: givenname836 description: description836 userPassword: password836 mail: uid836 uidnumber: 836 gidnumber: 836 homeDirectory: /home/uid836 dn: cn=user837,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user837 sn: user837 uid: uid837 givenname: givenname837 description: description837 userPassword: password837 mail: uid837 uidnumber: 837 gidnumber: 837 homeDirectory: /home/uid837 dn: cn=user838,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user838 sn: user838 uid: uid838 givenname: givenname838 description: description838 userPassword: password838 mail: uid838 uidnumber: 838 gidnumber: 838 homeDirectory: /home/uid838 dn: cn=user839,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user839 sn: user839 uid: uid839 givenname: givenname839 description: description839 userPassword: password839 mail: uid839 uidnumber: 839 gidnumber: 839 homeDirectory: /home/uid839 dn: cn=user840,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user840 sn: user840 uid: uid840 givenname: givenname840 description: description840 userPassword: password840 mail: uid840 uidnumber: 840 gidnumber: 840 homeDirectory: /home/uid840 dn: cn=user841,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user841 sn: user841 uid: uid841 givenname: givenname841 description: description841 userPassword: password841 mail: uid841 uidnumber: 841 gidnumber: 841 homeDirectory: /home/uid841 dn: cn=user842,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user842 sn: user842 uid: uid842 givenname: givenname842 description: description842 userPassword: password842 mail: uid842 uidnumber: 842 gidnumber: 842 homeDirectory: /home/uid842 dn: cn=user843,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user843 sn: user843 uid: uid843 givenname: givenname843 description: description843 userPassword: password843 mail: uid843 uidnumber: 843 gidnumber: 843 homeDirectory: /home/uid843 dn: cn=user844,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user844 sn: user844 uid: uid844 givenname: givenname844 description: description844 userPassword: password844 mail: uid844 uidnumber: 844 gidnumber: 844 homeDirectory: /home/uid844 dn: cn=user845,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user845 sn: user845 uid: uid845 givenname: givenname845 description: description845 userPassword: password845 mail: uid845 uidnumber: 845 gidnumber: 845 homeDirectory: /home/uid845 dn: cn=user846,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user846 sn: user846 uid: uid846 givenname: givenname846 description: description846 userPassword: password846 mail: uid846 uidnumber: 846 gidnumber: 846 homeDirectory: /home/uid846 dn: cn=user847,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user847 sn: user847 uid: uid847 givenname: givenname847 description: description847 userPassword: password847 mail: uid847 uidnumber: 847 gidnumber: 847 homeDirectory: /home/uid847 dn: cn=user848,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user848 sn: user848 uid: uid848 givenname: givenname848 description: description848 userPassword: password848 mail: uid848 uidnumber: 848 gidnumber: 848 homeDirectory: /home/uid848 dn: cn=user849,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user849 sn: user849 uid: uid849 givenname: givenname849 description: description849 userPassword: password849 mail: uid849 uidnumber: 849 gidnumber: 849 homeDirectory: /home/uid849 dn: cn=user850,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user850 sn: user850 uid: uid850 givenname: givenname850 description: description850 userPassword: password850 mail: uid850 uidnumber: 850 gidnumber: 850 homeDirectory: /home/uid850 dn: cn=user851,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user851 sn: user851 uid: uid851 givenname: givenname851 description: description851 userPassword: password851 mail: uid851 uidnumber: 851 gidnumber: 851 homeDirectory: /home/uid851 dn: cn=user852,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user852 sn: user852 uid: uid852 givenname: givenname852 description: description852 userPassword: password852 mail: uid852 uidnumber: 852 gidnumber: 852 homeDirectory: /home/uid852 dn: cn=user853,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user853 sn: user853 uid: uid853 givenname: givenname853 description: description853 userPassword: password853 mail: uid853 uidnumber: 853 gidnumber: 853 homeDirectory: /home/uid853 dn: cn=user854,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user854 sn: user854 uid: uid854 givenname: givenname854 description: description854 userPassword: password854 mail: uid854 uidnumber: 854 gidnumber: 854 homeDirectory: /home/uid854 dn: cn=user855,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user855 sn: user855 uid: uid855 givenname: givenname855 description: description855 userPassword: password855 mail: uid855 uidnumber: 855 gidnumber: 855 homeDirectory: /home/uid855 dn: cn=user856,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user856 sn: user856 uid: uid856 givenname: givenname856 description: description856 userPassword: password856 mail: uid856 uidnumber: 856 gidnumber: 856 homeDirectory: /home/uid856 dn: cn=user857,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user857 sn: user857 uid: uid857 givenname: givenname857 description: description857 userPassword: password857 mail: uid857 uidnumber: 857 gidnumber: 857 homeDirectory: /home/uid857 dn: cn=user858,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user858 sn: user858 uid: uid858 givenname: givenname858 description: description858 userPassword: password858 mail: uid858 uidnumber: 858 gidnumber: 858 homeDirectory: /home/uid858 dn: cn=user859,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user859 sn: user859 uid: uid859 givenname: givenname859 description: description859 userPassword: password859 mail: uid859 uidnumber: 859 gidnumber: 859 homeDirectory: /home/uid859 dn: cn=user860,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user860 sn: user860 uid: uid860 givenname: givenname860 description: description860 userPassword: password860 mail: uid860 uidnumber: 860 gidnumber: 860 homeDirectory: /home/uid860 dn: cn=user861,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user861 sn: user861 uid: uid861 givenname: givenname861 description: description861 userPassword: password861 mail: uid861 uidnumber: 861 gidnumber: 861 homeDirectory: /home/uid861 dn: cn=user862,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user862 sn: user862 uid: uid862 givenname: givenname862 description: description862 userPassword: password862 mail: uid862 uidnumber: 862 gidnumber: 862 homeDirectory: /home/uid862 dn: cn=user863,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user863 sn: user863 uid: uid863 givenname: givenname863 description: description863 userPassword: password863 mail: uid863 uidnumber: 863 gidnumber: 863 homeDirectory: /home/uid863 dn: cn=user864,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user864 sn: user864 uid: uid864 givenname: givenname864 description: description864 userPassword: password864 mail: uid864 uidnumber: 864 gidnumber: 864 homeDirectory: /home/uid864 dn: cn=user865,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user865 sn: user865 uid: uid865 givenname: givenname865 description: description865 userPassword: password865 mail: uid865 uidnumber: 865 gidnumber: 865 homeDirectory: /home/uid865 dn: cn=user866,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user866 sn: user866 uid: uid866 givenname: givenname866 description: description866 userPassword: password866 mail: uid866 uidnumber: 866 gidnumber: 866 homeDirectory: /home/uid866 dn: cn=user867,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user867 sn: user867 uid: uid867 givenname: givenname867 description: description867 userPassword: password867 mail: uid867 uidnumber: 867 gidnumber: 867 homeDirectory: /home/uid867 dn: cn=user868,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user868 sn: user868 uid: uid868 givenname: givenname868 description: description868 userPassword: password868 mail: uid868 uidnumber: 868 gidnumber: 868 homeDirectory: /home/uid868 dn: cn=user869,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user869 sn: user869 uid: uid869 givenname: givenname869 description: description869 userPassword: password869 mail: uid869 uidnumber: 869 gidnumber: 869 homeDirectory: /home/uid869 dn: cn=user870,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user870 sn: user870 uid: uid870 givenname: givenname870 description: description870 userPassword: password870 mail: uid870 uidnumber: 870 gidnumber: 870 homeDirectory: /home/uid870 dn: cn=user871,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user871 sn: user871 uid: uid871 givenname: givenname871 description: description871 userPassword: password871 mail: uid871 uidnumber: 871 gidnumber: 871 homeDirectory: /home/uid871 dn: cn=user872,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user872 sn: user872 uid: uid872 givenname: givenname872 description: description872 userPassword: password872 mail: uid872 uidnumber: 872 gidnumber: 872 homeDirectory: /home/uid872 dn: cn=user873,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user873 sn: user873 uid: uid873 givenname: givenname873 description: description873 userPassword: password873 mail: uid873 uidnumber: 873 gidnumber: 873 homeDirectory: /home/uid873 dn: cn=user874,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user874 sn: user874 uid: uid874 givenname: givenname874 description: description874 userPassword: password874 mail: uid874 uidnumber: 874 gidnumber: 874 homeDirectory: /home/uid874 dn: cn=user875,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user875 sn: user875 uid: uid875 givenname: givenname875 description: description875 userPassword: password875 mail: uid875 uidnumber: 875 gidnumber: 875 homeDirectory: /home/uid875 dn: cn=user876,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user876 sn: user876 uid: uid876 givenname: givenname876 description: description876 userPassword: password876 mail: uid876 uidnumber: 876 gidnumber: 876 homeDirectory: /home/uid876 dn: cn=user877,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user877 sn: user877 uid: uid877 givenname: givenname877 description: description877 userPassword: password877 mail: uid877 uidnumber: 877 gidnumber: 877 homeDirectory: /home/uid877 dn: cn=user878,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user878 sn: user878 uid: uid878 givenname: givenname878 description: description878 userPassword: password878 mail: uid878 uidnumber: 878 gidnumber: 878 homeDirectory: /home/uid878 dn: cn=user879,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user879 sn: user879 uid: uid879 givenname: givenname879 description: description879 userPassword: password879 mail: uid879 uidnumber: 879 gidnumber: 879 homeDirectory: /home/uid879 dn: cn=user880,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user880 sn: user880 uid: uid880 givenname: givenname880 description: description880 userPassword: password880 mail: uid880 uidnumber: 880 gidnumber: 880 homeDirectory: /home/uid880 dn: cn=user881,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user881 sn: user881 uid: uid881 givenname: givenname881 description: description881 userPassword: password881 mail: uid881 uidnumber: 881 gidnumber: 881 homeDirectory: /home/uid881 dn: cn=user882,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user882 sn: user882 uid: uid882 givenname: givenname882 description: description882 userPassword: password882 mail: uid882 uidnumber: 882 gidnumber: 882 homeDirectory: /home/uid882 dn: cn=user883,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user883 sn: user883 uid: uid883 givenname: givenname883 description: description883 userPassword: password883 mail: uid883 uidnumber: 883 gidnumber: 883 homeDirectory: /home/uid883 dn: cn=user884,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user884 sn: user884 uid: uid884 givenname: givenname884 description: description884 userPassword: password884 mail: uid884 uidnumber: 884 gidnumber: 884 homeDirectory: /home/uid884 dn: cn=user885,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user885 sn: user885 uid: uid885 givenname: givenname885 description: description885 userPassword: password885 mail: uid885 uidnumber: 885 gidnumber: 885 homeDirectory: /home/uid885 dn: cn=user886,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user886 sn: user886 uid: uid886 givenname: givenname886 description: description886 userPassword: password886 mail: uid886 uidnumber: 886 gidnumber: 886 homeDirectory: /home/uid886 dn: cn=user887,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user887 sn: user887 uid: uid887 givenname: givenname887 description: description887 userPassword: password887 mail: uid887 uidnumber: 887 gidnumber: 887 homeDirectory: /home/uid887 dn: cn=user888,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user888 sn: user888 uid: uid888 givenname: givenname888 description: description888 userPassword: password888 mail: uid888 uidnumber: 888 gidnumber: 888 homeDirectory: /home/uid888 dn: cn=user889,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user889 sn: user889 uid: uid889 givenname: givenname889 description: description889 userPassword: password889 mail: uid889 uidnumber: 889 gidnumber: 889 homeDirectory: /home/uid889 dn: cn=user890,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user890 sn: user890 uid: uid890 givenname: givenname890 description: description890 userPassword: password890 mail: uid890 uidnumber: 890 gidnumber: 890 homeDirectory: /home/uid890 dn: cn=user891,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user891 sn: user891 uid: uid891 givenname: givenname891 description: description891 userPassword: password891 mail: uid891 uidnumber: 891 gidnumber: 891 homeDirectory: /home/uid891 dn: cn=user892,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user892 sn: user892 uid: uid892 givenname: givenname892 description: description892 userPassword: password892 mail: uid892 uidnumber: 892 gidnumber: 892 homeDirectory: /home/uid892 dn: cn=user893,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user893 sn: user893 uid: uid893 givenname: givenname893 description: description893 userPassword: password893 mail: uid893 uidnumber: 893 gidnumber: 893 homeDirectory: /home/uid893 dn: cn=user894,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user894 sn: user894 uid: uid894 givenname: givenname894 description: description894 userPassword: password894 mail: uid894 uidnumber: 894 gidnumber: 894 homeDirectory: /home/uid894 dn: cn=user895,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user895 sn: user895 uid: uid895 givenname: givenname895 description: description895 userPassword: password895 mail: uid895 uidnumber: 895 gidnumber: 895 homeDirectory: /home/uid895 dn: cn=user896,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user896 sn: user896 uid: uid896 givenname: givenname896 description: description896 userPassword: password896 mail: uid896 uidnumber: 896 gidnumber: 896 homeDirectory: /home/uid896 dn: cn=user897,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user897 sn: user897 uid: uid897 givenname: givenname897 description: description897 userPassword: password897 mail: uid897 uidnumber: 897 gidnumber: 897 homeDirectory: /home/uid897 dn: cn=user898,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user898 sn: user898 uid: uid898 givenname: givenname898 description: description898 userPassword: password898 mail: uid898 uidnumber: 898 gidnumber: 898 homeDirectory: /home/uid898 dn: cn=user899,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user899 sn: user899 uid: uid899 givenname: givenname899 description: description899 userPassword: password899 mail: uid899 uidnumber: 899 gidnumber: 899 homeDirectory: /home/uid899 dn: cn=user900,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user900 sn: user900 uid: uid900 givenname: givenname900 description: description900 userPassword: password900 mail: uid900 uidnumber: 900 gidnumber: 900 homeDirectory: /home/uid900 dn: cn=user901,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user901 sn: user901 uid: uid901 givenname: givenname901 description: description901 userPassword: password901 mail: uid901 uidnumber: 901 gidnumber: 901 homeDirectory: /home/uid901 dn: cn=user902,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user902 sn: user902 uid: uid902 givenname: givenname902 description: description902 userPassword: password902 mail: uid902 uidnumber: 902 gidnumber: 902 homeDirectory: /home/uid902 dn: cn=user903,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user903 sn: user903 uid: uid903 givenname: givenname903 description: description903 userPassword: password903 mail: uid903 uidnumber: 903 gidnumber: 903 homeDirectory: /home/uid903 dn: cn=user904,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user904 sn: user904 uid: uid904 givenname: givenname904 description: description904 userPassword: password904 mail: uid904 uidnumber: 904 gidnumber: 904 homeDirectory: /home/uid904 dn: cn=user905,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user905 sn: user905 uid: uid905 givenname: givenname905 description: description905 userPassword: password905 mail: uid905 uidnumber: 905 gidnumber: 905 homeDirectory: /home/uid905 dn: cn=user906,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user906 sn: user906 uid: uid906 givenname: givenname906 description: description906 userPassword: password906 mail: uid906 uidnumber: 906 gidnumber: 906 homeDirectory: /home/uid906 dn: cn=user907,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user907 sn: user907 uid: uid907 givenname: givenname907 description: description907 userPassword: password907 mail: uid907 uidnumber: 907 gidnumber: 907 homeDirectory: /home/uid907 dn: cn=user908,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user908 sn: user908 uid: uid908 givenname: givenname908 description: description908 userPassword: password908 mail: uid908 uidnumber: 908 gidnumber: 908 homeDirectory: /home/uid908 dn: cn=user909,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user909 sn: user909 uid: uid909 givenname: givenname909 description: description909 userPassword: password909 mail: uid909 uidnumber: 909 gidnumber: 909 homeDirectory: /home/uid909 dn: cn=user910,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user910 sn: user910 uid: uid910 givenname: givenname910 description: description910 userPassword: password910 mail: uid910 uidnumber: 910 gidnumber: 910 homeDirectory: /home/uid910 dn: cn=user911,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user911 sn: user911 uid: uid911 givenname: givenname911 description: description911 userPassword: password911 mail: uid911 uidnumber: 911 gidnumber: 911 homeDirectory: /home/uid911 dn: cn=user912,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user912 sn: user912 uid: uid912 givenname: givenname912 description: description912 userPassword: password912 mail: uid912 uidnumber: 912 gidnumber: 912 homeDirectory: /home/uid912 dn: cn=user913,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user913 sn: user913 uid: uid913 givenname: givenname913 description: description913 userPassword: password913 mail: uid913 uidnumber: 913 gidnumber: 913 homeDirectory: /home/uid913 dn: cn=user914,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user914 sn: user914 uid: uid914 givenname: givenname914 description: description914 userPassword: password914 mail: uid914 uidnumber: 914 gidnumber: 914 homeDirectory: /home/uid914 dn: cn=user915,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user915 sn: user915 uid: uid915 givenname: givenname915 description: description915 userPassword: password915 mail: uid915 uidnumber: 915 gidnumber: 915 homeDirectory: /home/uid915 dn: cn=user916,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user916 sn: user916 uid: uid916 givenname: givenname916 description: description916 userPassword: password916 mail: uid916 uidnumber: 916 gidnumber: 916 homeDirectory: /home/uid916 dn: cn=user917,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user917 sn: user917 uid: uid917 givenname: givenname917 description: description917 userPassword: password917 mail: uid917 uidnumber: 917 gidnumber: 917 homeDirectory: /home/uid917 dn: cn=user918,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user918 sn: user918 uid: uid918 givenname: givenname918 description: description918 userPassword: password918 mail: uid918 uidnumber: 918 gidnumber: 918 homeDirectory: /home/uid918 dn: cn=user919,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user919 sn: user919 uid: uid919 givenname: givenname919 description: description919 userPassword: password919 mail: uid919 uidnumber: 919 gidnumber: 919 homeDirectory: /home/uid919 dn: cn=user920,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user920 sn: user920 uid: uid920 givenname: givenname920 description: description920 userPassword: password920 mail: uid920 uidnumber: 920 gidnumber: 920 homeDirectory: /home/uid920 dn: cn=user921,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user921 sn: user921 uid: uid921 givenname: givenname921 description: description921 userPassword: password921 mail: uid921 uidnumber: 921 gidnumber: 921 homeDirectory: /home/uid921 dn: cn=user922,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user922 sn: user922 uid: uid922 givenname: givenname922 description: description922 userPassword: password922 mail: uid922 uidnumber: 922 gidnumber: 922 homeDirectory: /home/uid922 dn: cn=user923,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user923 sn: user923 uid: uid923 givenname: givenname923 description: description923 userPassword: password923 mail: uid923 uidnumber: 923 gidnumber: 923 homeDirectory: /home/uid923 dn: cn=user924,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user924 sn: user924 uid: uid924 givenname: givenname924 description: description924 userPassword: password924 mail: uid924 uidnumber: 924 gidnumber: 924 homeDirectory: /home/uid924 dn: cn=user925,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user925 sn: user925 uid: uid925 givenname: givenname925 description: description925 userPassword: password925 mail: uid925 uidnumber: 925 gidnumber: 925 homeDirectory: /home/uid925 dn: cn=user926,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user926 sn: user926 uid: uid926 givenname: givenname926 description: description926 userPassword: password926 mail: uid926 uidnumber: 926 gidnumber: 926 homeDirectory: /home/uid926 dn: cn=user927,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user927 sn: user927 uid: uid927 givenname: givenname927 description: description927 userPassword: password927 mail: uid927 uidnumber: 927 gidnumber: 927 homeDirectory: /home/uid927 dn: cn=user928,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user928 sn: user928 uid: uid928 givenname: givenname928 description: description928 userPassword: password928 mail: uid928 uidnumber: 928 gidnumber: 928 homeDirectory: /home/uid928 dn: cn=user929,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user929 sn: user929 uid: uid929 givenname: givenname929 description: description929 userPassword: password929 mail: uid929 uidnumber: 929 gidnumber: 929 homeDirectory: /home/uid929 dn: cn=user930,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user930 sn: user930 uid: uid930 givenname: givenname930 description: description930 userPassword: password930 mail: uid930 uidnumber: 930 gidnumber: 930 homeDirectory: /home/uid930 dn: cn=user931,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user931 sn: user931 uid: uid931 givenname: givenname931 description: description931 userPassword: password931 mail: uid931 uidnumber: 931 gidnumber: 931 homeDirectory: /home/uid931 dn: cn=user932,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user932 sn: user932 uid: uid932 givenname: givenname932 description: description932 userPassword: password932 mail: uid932 uidnumber: 932 gidnumber: 932 homeDirectory: /home/uid932 dn: cn=user933,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user933 sn: user933 uid: uid933 givenname: givenname933 description: description933 userPassword: password933 mail: uid933 uidnumber: 933 gidnumber: 933 homeDirectory: /home/uid933 dn: cn=user934,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user934 sn: user934 uid: uid934 givenname: givenname934 description: description934 userPassword: password934 mail: uid934 uidnumber: 934 gidnumber: 934 homeDirectory: /home/uid934 dn: cn=user935,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user935 sn: user935 uid: uid935 givenname: givenname935 description: description935 userPassword: password935 mail: uid935 uidnumber: 935 gidnumber: 935 homeDirectory: /home/uid935 dn: cn=user936,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user936 sn: user936 uid: uid936 givenname: givenname936 description: description936 userPassword: password936 mail: uid936 uidnumber: 936 gidnumber: 936 homeDirectory: /home/uid936 dn: cn=user937,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user937 sn: user937 uid: uid937 givenname: givenname937 description: description937 userPassword: password937 mail: uid937 uidnumber: 937 gidnumber: 937 homeDirectory: /home/uid937 dn: cn=user938,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user938 sn: user938 uid: uid938 givenname: givenname938 description: description938 userPassword: password938 mail: uid938 uidnumber: 938 gidnumber: 938 homeDirectory: /home/uid938 dn: cn=user939,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user939 sn: user939 uid: uid939 givenname: givenname939 description: description939 userPassword: password939 mail: uid939 uidnumber: 939 gidnumber: 939 homeDirectory: /home/uid939 dn: cn=user940,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user940 sn: user940 uid: uid940 givenname: givenname940 description: description940 userPassword: password940 mail: uid940 uidnumber: 940 gidnumber: 940 homeDirectory: /home/uid940 dn: cn=user941,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user941 sn: user941 uid: uid941 givenname: givenname941 description: description941 userPassword: password941 mail: uid941 uidnumber: 941 gidnumber: 941 homeDirectory: /home/uid941 dn: cn=user942,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user942 sn: user942 uid: uid942 givenname: givenname942 description: description942 userPassword: password942 mail: uid942 uidnumber: 942 gidnumber: 942 homeDirectory: /home/uid942 dn: cn=user943,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user943 sn: user943 uid: uid943 givenname: givenname943 description: description943 userPassword: password943 mail: uid943 uidnumber: 943 gidnumber: 943 homeDirectory: /home/uid943 dn: cn=user944,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user944 sn: user944 uid: uid944 givenname: givenname944 description: description944 userPassword: password944 mail: uid944 uidnumber: 944 gidnumber: 944 homeDirectory: /home/uid944 dn: cn=user945,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user945 sn: user945 uid: uid945 givenname: givenname945 description: description945 userPassword: password945 mail: uid945 uidnumber: 945 gidnumber: 945 homeDirectory: /home/uid945 dn: cn=user946,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user946 sn: user946 uid: uid946 givenname: givenname946 description: description946 userPassword: password946 mail: uid946 uidnumber: 946 gidnumber: 946 homeDirectory: /home/uid946 dn: cn=user947,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user947 sn: user947 uid: uid947 givenname: givenname947 description: description947 userPassword: password947 mail: uid947 uidnumber: 947 gidnumber: 947 homeDirectory: /home/uid947 dn: cn=user948,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user948 sn: user948 uid: uid948 givenname: givenname948 description: description948 userPassword: password948 mail: uid948 uidnumber: 948 gidnumber: 948 homeDirectory: /home/uid948 dn: cn=user949,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user949 sn: user949 uid: uid949 givenname: givenname949 description: description949 userPassword: password949 mail: uid949 uidnumber: 949 gidnumber: 949 homeDirectory: /home/uid949 dn: cn=user950,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user950 sn: user950 uid: uid950 givenname: givenname950 description: description950 userPassword: password950 mail: uid950 uidnumber: 950 gidnumber: 950 homeDirectory: /home/uid950 dn: cn=user951,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user951 sn: user951 uid: uid951 givenname: givenname951 description: description951 userPassword: password951 mail: uid951 uidnumber: 951 gidnumber: 951 homeDirectory: /home/uid951 dn: cn=user952,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user952 sn: user952 uid: uid952 givenname: givenname952 description: description952 userPassword: password952 mail: uid952 uidnumber: 952 gidnumber: 952 homeDirectory: /home/uid952 dn: cn=user953,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user953 sn: user953 uid: uid953 givenname: givenname953 description: description953 userPassword: password953 mail: uid953 uidnumber: 953 gidnumber: 953 homeDirectory: /home/uid953 dn: cn=user954,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user954 sn: user954 uid: uid954 givenname: givenname954 description: description954 userPassword: password954 mail: uid954 uidnumber: 954 gidnumber: 954 homeDirectory: /home/uid954 dn: cn=user955,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user955 sn: user955 uid: uid955 givenname: givenname955 description: description955 userPassword: password955 mail: uid955 uidnumber: 955 gidnumber: 955 homeDirectory: /home/uid955 dn: cn=user956,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user956 sn: user956 uid: uid956 givenname: givenname956 description: description956 userPassword: password956 mail: uid956 uidnumber: 956 gidnumber: 956 homeDirectory: /home/uid956 dn: cn=user957,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user957 sn: user957 uid: uid957 givenname: givenname957 description: description957 userPassword: password957 mail: uid957 uidnumber: 957 gidnumber: 957 homeDirectory: /home/uid957 dn: cn=user958,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user958 sn: user958 uid: uid958 givenname: givenname958 description: description958 userPassword: password958 mail: uid958 uidnumber: 958 gidnumber: 958 homeDirectory: /home/uid958 dn: cn=user959,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user959 sn: user959 uid: uid959 givenname: givenname959 description: description959 userPassword: password959 mail: uid959 uidnumber: 959 gidnumber: 959 homeDirectory: /home/uid959 dn: cn=user960,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user960 sn: user960 uid: uid960 givenname: givenname960 description: description960 userPassword: password960 mail: uid960 uidnumber: 960 gidnumber: 960 homeDirectory: /home/uid960 dn: cn=user961,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user961 sn: user961 uid: uid961 givenname: givenname961 description: description961 userPassword: password961 mail: uid961 uidnumber: 961 gidnumber: 961 homeDirectory: /home/uid961 dn: cn=user962,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user962 sn: user962 uid: uid962 givenname: givenname962 description: description962 userPassword: password962 mail: uid962 uidnumber: 962 gidnumber: 962 homeDirectory: /home/uid962 dn: cn=user963,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user963 sn: user963 uid: uid963 givenname: givenname963 description: description963 userPassword: password963 mail: uid963 uidnumber: 963 gidnumber: 963 homeDirectory: /home/uid963 dn: cn=user964,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user964 sn: user964 uid: uid964 givenname: givenname964 description: description964 userPassword: password964 mail: uid964 uidnumber: 964 gidnumber: 964 homeDirectory: /home/uid964 dn: cn=user965,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user965 sn: user965 uid: uid965 givenname: givenname965 description: description965 userPassword: password965 mail: uid965 uidnumber: 965 gidnumber: 965 homeDirectory: /home/uid965 dn: cn=user966,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user966 sn: user966 uid: uid966 givenname: givenname966 description: description966 userPassword: password966 mail: uid966 uidnumber: 966 gidnumber: 966 homeDirectory: /home/uid966 dn: cn=user967,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user967 sn: user967 uid: uid967 givenname: givenname967 description: description967 userPassword: password967 mail: uid967 uidnumber: 967 gidnumber: 967 homeDirectory: /home/uid967 dn: cn=user968,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user968 sn: user968 uid: uid968 givenname: givenname968 description: description968 userPassword: password968 mail: uid968 uidnumber: 968 gidnumber: 968 homeDirectory: /home/uid968 dn: cn=user969,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user969 sn: user969 uid: uid969 givenname: givenname969 description: description969 userPassword: password969 mail: uid969 uidnumber: 969 gidnumber: 969 homeDirectory: /home/uid969 dn: cn=user970,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user970 sn: user970 uid: uid970 givenname: givenname970 description: description970 userPassword: password970 mail: uid970 uidnumber: 970 gidnumber: 970 homeDirectory: /home/uid970 dn: cn=user971,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user971 sn: user971 uid: uid971 givenname: givenname971 description: description971 userPassword: password971 mail: uid971 uidnumber: 971 gidnumber: 971 homeDirectory: /home/uid971 dn: cn=user972,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user972 sn: user972 uid: uid972 givenname: givenname972 description: description972 userPassword: password972 mail: uid972 uidnumber: 972 gidnumber: 972 homeDirectory: /home/uid972 dn: cn=user973,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user973 sn: user973 uid: uid973 givenname: givenname973 description: description973 userPassword: password973 mail: uid973 uidnumber: 973 gidnumber: 973 homeDirectory: /home/uid973 dn: cn=user974,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user974 sn: user974 uid: uid974 givenname: givenname974 description: description974 userPassword: password974 mail: uid974 uidnumber: 974 gidnumber: 974 homeDirectory: /home/uid974 dn: cn=user975,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user975 sn: user975 uid: uid975 givenname: givenname975 description: description975 userPassword: password975 mail: uid975 uidnumber: 975 gidnumber: 975 homeDirectory: /home/uid975 dn: cn=user976,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user976 sn: user976 uid: uid976 givenname: givenname976 description: description976 userPassword: password976 mail: uid976 uidnumber: 976 gidnumber: 976 homeDirectory: /home/uid976 dn: cn=user977,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user977 sn: user977 uid: uid977 givenname: givenname977 description: description977 userPassword: password977 mail: uid977 uidnumber: 977 gidnumber: 977 homeDirectory: /home/uid977 dn: cn=user978,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user978 sn: user978 uid: uid978 givenname: givenname978 description: description978 userPassword: password978 mail: uid978 uidnumber: 978 gidnumber: 978 homeDirectory: /home/uid978 dn: cn=user979,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user979 sn: user979 uid: uid979 givenname: givenname979 description: description979 userPassword: password979 mail: uid979 uidnumber: 979 gidnumber: 979 homeDirectory: /home/uid979 dn: cn=user980,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user980 sn: user980 uid: uid980 givenname: givenname980 description: description980 userPassword: password980 mail: uid980 uidnumber: 980 gidnumber: 980 homeDirectory: /home/uid980 dn: cn=user981,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user981 sn: user981 uid: uid981 givenname: givenname981 description: description981 userPassword: password981 mail: uid981 uidnumber: 981 gidnumber: 981 homeDirectory: /home/uid981 dn: cn=user982,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user982 sn: user982 uid: uid982 givenname: givenname982 description: description982 userPassword: password982 mail: uid982 uidnumber: 982 gidnumber: 982 homeDirectory: /home/uid982 dn: cn=user983,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user983 sn: user983 uid: uid983 givenname: givenname983 description: description983 userPassword: password983 mail: uid983 uidnumber: 983 gidnumber: 983 homeDirectory: /home/uid983 dn: cn=user984,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user984 sn: user984 uid: uid984 givenname: givenname984 description: description984 userPassword: password984 mail: uid984 uidnumber: 984 gidnumber: 984 homeDirectory: /home/uid984 dn: cn=user985,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user985 sn: user985 uid: uid985 givenname: givenname985 description: description985 userPassword: password985 mail: uid985 uidnumber: 985 gidnumber: 985 homeDirectory: /home/uid985 dn: cn=user986,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user986 sn: user986 uid: uid986 givenname: givenname986 description: description986 userPassword: password986 mail: uid986 uidnumber: 986 gidnumber: 986 homeDirectory: /home/uid986 dn: cn=user987,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user987 sn: user987 uid: uid987 givenname: givenname987 description: description987 userPassword: password987 mail: uid987 uidnumber: 987 gidnumber: 987 homeDirectory: /home/uid987 dn: cn=user988,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user988 sn: user988 uid: uid988 givenname: givenname988 description: description988 userPassword: password988 mail: uid988 uidnumber: 988 gidnumber: 988 homeDirectory: /home/uid988 dn: cn=user989,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user989 sn: user989 uid: uid989 givenname: givenname989 description: description989 userPassword: password989 mail: uid989 uidnumber: 989 gidnumber: 989 homeDirectory: /home/uid989 dn: cn=user990,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user990 sn: user990 uid: uid990 givenname: givenname990 description: description990 userPassword: password990 mail: uid990 uidnumber: 990 gidnumber: 990 homeDirectory: /home/uid990 dn: cn=user991,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user991 sn: user991 uid: uid991 givenname: givenname991 description: description991 userPassword: password991 mail: uid991 uidnumber: 991 gidnumber: 991 homeDirectory: /home/uid991 dn: cn=user992,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user992 sn: user992 uid: uid992 givenname: givenname992 description: description992 userPassword: password992 mail: uid992 uidnumber: 992 gidnumber: 992 homeDirectory: /home/uid992 dn: cn=user993,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user993 sn: user993 uid: uid993 givenname: givenname993 description: description993 userPassword: password993 mail: uid993 uidnumber: 993 gidnumber: 993 homeDirectory: /home/uid993 dn: cn=user994,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user994 sn: user994 uid: uid994 givenname: givenname994 description: description994 userPassword: password994 mail: uid994 uidnumber: 994 gidnumber: 994 homeDirectory: /home/uid994 dn: cn=user995,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user995 sn: user995 uid: uid995 givenname: givenname995 description: description995 userPassword: password995 mail: uid995 uidnumber: 995 gidnumber: 995 homeDirectory: /home/uid995 dn: cn=user996,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user996 sn: user996 uid: uid996 givenname: givenname996 description: description996 userPassword: password996 mail: uid996 uidnumber: 996 gidnumber: 996 homeDirectory: /home/uid996 dn: cn=user997,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user997 sn: user997 uid: uid997 givenname: givenname997 description: description997 userPassword: password997 mail: uid997 uidnumber: 997 gidnumber: 997 homeDirectory: /home/uid997 dn: cn=user998,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user998 sn: user998 uid: uid998 givenname: givenname998 description: description998 userPassword: password998 mail: uid998 uidnumber: 998 gidnumber: 998 homeDirectory: /home/uid998 dn: cn=user999,ou=People,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectClass: posixAccount cn: user999 sn: user999 uid: uid999 givenname: givenname999 description: description999 userPassword: password999 mail: uid999 uidnumber: 999 gidnumber: 999 homeDirectory: /home/uid999 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/data/ticket49121/000077500000000000000000000000001421664411400236315ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/data/ticket49121/utf8str.txt000066400000000000000000000000201421664411400260010ustar00rootroot00000000000000ã‚ã„ã†ãˆãŠ 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/data/ticket49441/000077500000000000000000000000001421664411400236365ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/data/ticket49441/binary.ldif000066400000000000000000001776501421664411400260020ustar00rootroot00000000000000version: 1 # entry-id: 1 dn: dc=example,dc=com objectClass: domain objectClass: top dc: example nsUniqueId: f49ca102-c2ee11e7-9170b029-e68fda34 creatorsName: modifiersName: createTimestamp: 20171106123544Z modifyTimestamp: 20171106123544Z # entry-id: 2 dn: ou=binary,dc=example,dc=com certificateRevocationList;binary:: MIITbjCCElYCAQEwDQYJKoZIhvcNAQEFBQAwVzELMAk GA1UEBhMCVVMxEDAOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGUNlcnRpZmljYXRpb24gQXV0aG9y aXRpZXMxEjAQBgNVBAsTCURDb21TdWJDQRcNMTcxMDE2MTUxNjAyWhcNMTcxMDE5MTUxNjAyWjCCE ZcwIwIEV4cj0hcNMTYxMTMwMDAyNDA0WjAMMAoGA1UdFQQDCgEAMCMCBFeHI9EXDTE2MTEzMDAwMj gwNVowDDAKBgNVHRUEAwoBADAjAgRXhyPPFw0xNjExMzAwMDIxNDJaMAwwCgYDVR0VBAMKAQAwIwI EV4cjzhcNMTYxMTMwMDAzMTE0WjAMMAoGA1UdFQQDCgEAMCMCBFeHI2gXDTE2MTEyOTE1MTM0M1ow DDAKBgNVHRUEAwoBADA9AgRXhwCzFw0xNjExMDIyMjQ0NThaMCYwCgYDVR0VBAMKAQEwGAYDVR0YB BEYDzIwMTYwOTA3MDEzODU1WjAjAgRXhvE4Fw0xNjA4MDExNDA5MTFaMAwwCgYDVR0VBAMKAQAwIw IEV4bxNxcNMTYwODAxMTQwODU4WjAMMAoGA1UdFQQDCgEAMCMCBEkD2YYXDTE2MDcwNTE1NTg0NVo wDDAKBgNVHRUEAwoBADAjAgRJA9mFFw0xNjA3MDUxNTU1MTlaMAwwCgYDVR0VBAMKAQAwIwIESQPT cRcNMTYxMTMwMDAyODA1WjAMMAoGA1UdFQQDCgEAMCMCBEkD03AXDTE2MTEzMDAwMjgwNVowDDAKB gNVHRUEAwoBADAjAgRJA9NuFw0xNjA2MjAxNjQ4NTlaMAwwCgYDVR0VBAMKAQAwIwIESQPSOBcNMT YwNjE3MTU1OTM4WjAMMAoGA1UdFQQDCgEAMCMCBEkD0jcXDTE2MTEzMDAwMzExNFowDDAKBgNVHRU EAwoBADAjAgRJA9I0Fw0xNjA2MjAxNzAyMDJaMAwwCgYDVR0VBAMKAQAwIwIESQPSMxcNMTYwNjIw MTcwMjAyWjAMMAoGA1UdFQQDCgEAMCMCBEkD0jEXDTE2MDYxNzE1NDgwMlowDDAKBgNVHRUEAwoBA DAjAgRJA9IwFw0xNjExMzAwMDMxMTRaMAwwCgYDVR0VBAMKAQAwIwIESQPSLhcNMTYwNjE3MTU0MD A2WjAMMAoGA1UdFQQDCgEAMCMCBEkD0VIXDTE2MTEzMDAwMzExNFowDDAKBgNVHRUEAwoBADAjAgR JA9FRFw0xNjExMzAwMDMxMTRaMAwwCgYDVR0VBAMKAQAwIwIESQPRTxcNMTYwNjE1MTkyMDU4WjAM MAoGA1UdFQQDCgEAMCMCBEkD0U4XDTE2MDYxNTE5MjYyMlowDDAKBgNVHRUEAwoBADAjAgRJA9FLF w0xNjA2MTUxODQ5MzZaMAwwCgYDVR0VBAMKAQAwIwIESQPRShcNMTYwNjE1MTQzNDU1WjAMMAoGA1 UdFQQDCgEAMCMCBEkD0UkXDTE2MDYxNTE0MzEyMlowDDAKBgNVHRUEAwoBADAjAgRJA9FIFw0xNjA 2MTUxNDMwMTdaMAwwCgYDVR0VBAMKAQAwIwIESQPQexcNMTYwNjE1MTkyNjIyWjAMMAoGA1UdFQQD CgEAMCMCBEkD0HoXDTE2MDYxNTE5MjYyMlowDDAKBgNVHRUEAwoBADAjAgRJA9B4Fw0xNjA2MTQxM TQ3MzlaMAwwCgYDVR0VBAMKAQAwIwIESQPQdxcNMTYwNjE1MTkyNTU5WjAMMAoGA1UdFQQDCgEAMC MCBEkD0HYXDTE2MDYxNTE5MjU1OVowDDAKBgNVHRUEAwoBADAjAgRJA9B0Fw0xNjA2MTQxMTQzMzh aMAwwCgYDVR0VBAMKAQAwIwIESQPQcxcNMTYwNjE0MTE0MDU4WjAMMAoGA1UdFQQDCgEAMCMCBEkD 0HIXDTE2MDYxNTE5MjU0NlowDDAKBgNVHRUEAwoBADAjAgRJA9BwFw0xNjA2MTQxMTE3NDlaMAwwC gYDVR0VBAMKAQAwIwIESQPLhhcNMTYwNjAxMjI1NTA1WjAMMAoGA1UdFQQDCgEAMCMCBEkDyRgXDT E2MDUyNjIxNDQwOFowDDAKBgNVHRUEAwoBADAjAgRJA8kXFw0xNjA1MjYyMTQzMjdaMAwwCgYDVR0 VBAMKAQAwIwIESQPIsRcNMTYwNTI2MTUxOTMwWjAMMAoGA1UdFQQDCgEAMCMCBEkDmmEXDTE2MDYx NTE5MjU0NlowDDAKBgNVHRUEAwoBADAjAgRJA5pgFw0xNjA2MTUxOTI1NDZaMAwwCgYDVR0VBAMKA QAwIwIESQOZ9RcNMTYwNjE1MTkyNDQzWjAMMAoGA1UdFQQDCgEFMCMCBEkDmfQXDTE2MDYxNTE5Mj Q0M1owDDAKBgNVHRUEAwoBBTAjAgRJA5nyFw0xNjAyMDExOTM0MTlaMAwwCgYDVR0VBAMKAQAwIwI ESQOXgBcNMTYwMTI2MTUwNTE5WjAMMAoGA1UdFQQDCgEAMCMCBEkDh0oXDTE1MTIxNzE3MzE0NVow DDAKBgNVHRUEAwoBAzAjAgRJA3ZBFw0xNjAyMDIxNDM3MTZaMAwwCgYDVR0VBAMKAQMwIwIESQN2Q BcNMTYwMjAyMTQzNzAzWjAMMAoGA1UdFQQDCgEDMCMCBEkDXsUXDTE1MTIwODIwMTM0OVowDDAKBg NVHRUEAwoBAzAjAgRJA17EFw0xNTEyMDgyMDEzNDlaMAwwCgYDVR0VBAMKAQMwIwIESQNewxcNMTU xMjA4MjAxMzUwWjAMMAoGA1UdFQQDCgEDMCMCBEkDWrkXDTE1MTIwODIwMTM1MFowDDAKBgNVHRUE AwoBAzAjAgRJA1q4Fw0xNTEyMDgyMDEzNTBaMAwwCgYDVR0VBAMKAQMwIwIESQNatxcNMTUxMjA4M jAxMzUwWjAMMAoGA1UdFQQDCgEDMCMCBEkDNjMXDTE2MDcwNTIwMDcxMlowDDAKBgNVHRUEAwoBBT AjAgRJAwpwFw0xNjA2MTUxOTQwMDNaMAwwCgYDVR0VBAMKAQAwIwIESQMKbxcNMTYwNjE1MTk0MDA zWjAMMAoGA1UdFQQDCgEAMCMCBEkC2Z0XDTE0MTAyMDE2NDgzN1owDDAKBgNVHRUEAwoBBTAjAgRJ AthhFw0xNDEwMjAxNjQ4MzdaMAwwCgYDVR0VBAMKAQUwIwIESQLX7RcNMTQxMTEyMjAyNjA1WjAMM AoGA1UdFQQDCgEFMCMCBEkC1+sXDTE0MTAyNzE1NTI1OVowDDAKBgNVHRUEAwoBAzAjAgRJAn2hFw 0xNDAzMTMxNjUwMjZaMAwwCgYDVR0VBAMKAQAwIwIESQJ9MxcNMTQwMzEyMTUxODI5WjAMMAoGA1U dFQQDCgEAMCMCBEkCfTEXDTE0MDMxMjExMzMzNVowDDAKBgNVHRUEAwoBADAjAgRJAn0wFw0xNDAz MTIxMjE4MjFaMAwwCgYDVR0VBAMKAQAwIwIESQJ8YxcNMTQwMzEyMTEyNzEwWjAMMAoGA1UdFQQDC gEAMCMCBEkCfGEXDTE0MDMxMDE0NTYxNlowDDAKBgNVHRUEAwoBADAjAgRJAnxgFw0xNDAzMTAxNT A4MTVaMAwwCgYDVR0VBAMKAQAwIwIESQJ8XhcNMTQwMzEwMTIzMDM3WjAMMAoGA1UdFQQDCgEAMCM CBEkCfF0XDTE0MDMxMDE0NTMyMlowDDAKBgNVHRUEAwoBADAjAgRJAnxbFw0xNDAzMTAxMDQ5NDBa MAwwCgYDVR0VBAMKAQAwIwIESQJ8WhcNMTQwMzEwMTIwOTM2WjAMMAoGA1UdFQQDCgEAMCMCBEkCe ywXDTE0MDMwNzEwMzcxM1owDDAKBgNVHRUEAwoBADAjAgRJAnsrFw0xNDAzMTAxMDQ3MTdaMAwwCg YDVR0VBAMKAQAwIwIESQJ6xRcNMTQwMzA2MTEwMDM3WjAMMAoGA1UdFQQDCgEAMCMCBEkCesQXDTE 0MDMwNzEwMzMyNVowDDAKBgNVHRUEAwoBADAjAgRJAm7jFw0xNDAyMDQyMTMwMjFaMAwwCgYDVR0V BAMKAQAwIwIESQJrWhcNMTQwMTI3MTIyMTI0WjAMMAoGA1UdFQQDCgEAMCMCBEkCa1kXDTE0MDMwN jEwNTY0OFowDDAKBgNVHRUEAwoBADAjAgRJAmjyFw0xNDAxMjExMDEyMTlaMAwwCgYDVR0VBAMKAQ AwIwIESQJiPRcNMTQwMTAyMTYwMjIxWjAMMAoGA1UdFQQDCgEAMCMCBEkCXFgXDTEzMTIxODE3NTI wNVowDDAKBgNVHRUEAwoBADAjAgRJAlW1Fw0xMzEyMDIxNTAzNTVaMAwwCgYDVR0VBAMKAQAwIwIE SQJVshcNMTMxMjAyMTQ1NTM2WjAMMAoGA1UdFQQDCgEAMCMCBEkCVbEXDTEzMTIwMjE0NTk1OVowD DAKBgNVHRUEAwoBADAjAgRJAlWvFw0xMzEyMDIxNDE3MzBaMAwwCgYDVR0VBAMKAQAwIwIESQJVrh cNMTMxMjAyMTQ0OTMxWjAMMAoGA1UdFQQDCgEAMCMCBEkCVawXDTEzMTIwMjEzMTA1OFowDDAKBgN VHRUEAwoBADAjAgRJAlWrFw0xMzEyMDIxNDEyMTVaMAwwCgYDVR0VBAMKAQAwIwIESQJONRcNMTMx MTEyMjExMzI0WjAMMAoGA1UdFQQDCgEAMCMCBEkCJrkXDTEzMDkxMDA2NDUyNFowDDAKBgNVHRUEA woBADAjAgRJAhmPFw0xMzA4MjExMDM0MTFaMAwwCgYDVR0VBAMKAQAwIwIESQIVrBcNMTMwODEyMT g1NTU1WjAMMAoGA1UdFQQDCgEAMCMCBEkCFasXDTEzMTIxODE3MDQ0MlowDDAKBgNVHRUEAwoBADA jAgRJAhAoFw0xMzA3MjkxNjAwMzVaMAwwCgYDVR0VBAMKAQAwIwIESQIQJxcNMTQwMTAyMTU1MDUy WjAMMAoGA1UdFQQDCgEAMCMCBEkCCh8XDTEzMDcxNTA3MzY1NlowDDAKBgNVHRUEAwoBADAjAgRJA gexFw0xMzA3MDgxNTU5MTRaMAwwCgYDVR0VBAMKAQAwIwIESQH73BcNMTMwNzI5MTU1NTAzWjAMMA oGA1UdFQQDCgEAMCMCBEkB5EcXDTEzMDUyOTE0MDUyNVowDDAKBgNVHRUEAwoBADAjAgRJAcDtFw0 xMzA1MTAyMDExNTBaMAwwCgYDVR0VBAMKAQAwIwIESQGmXBcNMTMwNDEwMDkyMTI2WjAMMAoGA1Ud FQQDCgEAMCMCBEkBnj0XDTEzMDMyNTE4MTc0MFowDDAKBgNVHRUEAwoBADAjAgRJAYMOFw0xMzAyM TExMTEwNDdaMAwwCgYDVR0VBAMKAQAwIwIESQF4PRcNMTMwODEyMTg0ODE2WjAMMAoGA1UdFQQDCg EAMCMCBEkBcwcXDTEzMDEwMzE2NTgyMFowDDAKBgNVHRUEAwoBADAjAgRJAXMEFw0xMzAxMDMxMDA yMjRaMAwwCgYDVR0VBAMKAQAwIwIESQFuRxcNMTMxMDA3MTMwMjM1WjAMMAoGA1UdFQQDCgEFMCMC BEkBaLsXDTEzMDQxMDA5MTY1NVowDDAKBgNVHRUEAwoBADAjAgRJAWaQFw0xMjExMjkxNjAxMzJaM AwwCgYDVR0VBAMKAQAwIwIESQFmhBcNMTIxMTI5MTE1NTIyWjAMMAoGA1UdFQQDCgEAMCMCBEkBZo MXDTEyMTEyOTE1MjYwNVowDDAKBgNVHRUEAwoBADAjAgRJAWaBFw0xMjExMjkxMTAzNTJaMAwwCgY DVR0VBAMKAQAwIwIESQFmgBcNMTIxMTI5MTE1MTU4WjAMMAoGA1UdFQQDCgEAMCMCBEkBYT8XDTEy MTExNTA5NTI1OVowDDAKBgNVHRUEAwoBADAjAgRJAWCrFw0xMjExMTQxNDM2NDVaMAwwCgYDVR0VB AMKAQAwIwIESQFgqhcNMTIxMTE1MDk0ODI1WjAMMAoGA1UdFQQDCgEAMCMCBEkBXT4XDTEzMTIwMj EzMDcwMVowDDAKBgNVHRUEAwoBADAjAgRJAVvbFw0xMjExMjkxMTAwMzFaMAwwCgYDVR0VBAMKAQC gMDAuMAsGA1UdFAQEAgIo8DAfBgNVHSMEGDAWgBT0Fi4Bu6uQGaQoQg2dwB+crxCGKzANBgkqhkiG 9w0BAQUFAAOCAQEATe14zpsSjrGcW4yNZrdGtsupuJge+DQV+h1ZwBEQtsmOmMvbSdMsu+vMvTzHQ KWJq56picjixY6v4vPqhRRZWP8evOc0NuoxpiUhgez3CKFQoJ2bdeaS/WCfqss3Sa4FZTUzkVWZde moDH8CcHt5in3H7SwF5i9/rKB/bLuTjQg+LRKh2E9+FAkJn1S/ZRh1Vjd/KuRFOXD6odjV54oTWE0 6PcHBdwip62ridLdQopt3+e1UgwKBNJAmBD6uMN1tPmenUYWxh4xI7Ft4HQR58TdIiTZmfQHmEkjl dBNEAoUK1hvRy6E2mSdRq9Yex8f+rGdxI1+++6lHaN1+M8jQ4g== userCertificate;binary:: MIKE/jCCg+YCAQEwX6FdMFukWTBXMQswCQYDVQQGEwJVUzEQMA4GA 1UEChMHRW50cnVzdDEiMCAGA1UECxMZQ2VydGlmaWNhdGlvbiBBdXRob3JpdGllczESMBAGA1UECx MJRENvbVN1YkNBMGegZTBjMFukWTBXMQswCQYDVQQGEwJVUzEQMA4GA1UEChMHRW50cnVzdDEiMCA GA1UECxMZQ2VydGlmaWNhdGlvbiBBdXRob3JpdGllczESMBAGA1UECxMJRENvbVN1YkNBAgRIwMPg MA0GCSqGSIb3DQEBBQUAAgRXh6kjMCIYDzIwMTcxMDE1MjI0NjEzWhgPMjAxNzExMTQyMjQ2MTNaM IKCuTCCEQoGCSqGSIb2fQdEADGCEPswghD3gAEEMIIQ8DBvMFcxCzAJBgNVBAYTAlVTMRAwDgYDVQ QKEwdFbnRydXN0MSIwIAYDVQQLExlDZXJ0aWZpY2F0aW9uIEF1dGhvcml0aWVzMRIwEAYDVQQLEwl EQ29tU3ViQ0EWFENBIERvbWFpbiBTZWFyY2hiYXNlME4wPzEVMBMGCgmSJomT8ixkARkWBWxvY2Fs MRQwEgYKCZImiZPyLGQBGRYEVGVzdDEQMA4GA1UECxMHRGV2aWNlcxYLQ0xTIERldmljZXMwgYswa DEVMBMGCgmSJomT8ixkARkWBWxvY2FsMRQwEgYKCZImiZPyLGQBGRYEVGVzdDETMBEGA1UECxMKVG VzdCBVc2VyczEkMCIGA1UECxMbU1NPIEFkbWluaXN0cmF0aW9uIEFjY291bnRzFh9DTFMgU1NPIEF kbWluaXN0cmF0aW9uIEFjY291bnRzMFQwQjEVMBMGCgmSJomT8ixkARkWBWxvY2FsMRQwEgYKCZIm iZPyLGQBGRYEVGVzdDETMBEGA1UECxMKVGVzdCBVc2VycxYOQ0xTIFRlc3QgVXNlcnMwfDBfMRUwE wYKCZImiZPyLGQBGRYFbG9jYWwxFDASBgoJkiaJk/IsZAEZFgRUZXN0MRswGQYDVQQLExJEb21haW 4gQ29udHJvbGxlcnMxEzARBgNVBAsTCkdCIFNlcnZlcnMWGUNMUyBHQiBEb21haW4gQ29udHJvbGx lcnMwfDBfMRUwEwYKCZImiZPyLGQBGRYFbG9jYWwxFDASBgoJkiaJk/IsZAEZFgR0ZXN0MRswGQYD VQQLExJEb21haW4gQ29udHJvbGxlcnMxEzARBgNVBAsTClVTIFNlcnZlcnMWGUNMUyBVUyBEb21ha W4gQ29udHJvbGxlcnMwgaIwgY4xFTATBgoJkiaJk/IsZAEZFgVsb2NhbDEUMBIGCgmSJomT8ixkAR kWBFRlc3QxFDASBgNVBAsTC1Rlc3QtT2ZmaWNlMRAwDgYDVQQLEwdTZXJ2ZXJzMRMwEQYDVQQLEwp HQiBTZXJ2ZXJzMRQwEgYDVQQLEwtBcHBsaWNhdGlvbjEMMAoGA1UECxMDV0VCFg9DTFMgR0IgV2Vi IEFwcHMwgbUwgaExFTATBgoJkiaJk/IsZAEZFgVsb2NhbDEUMBIGCgmSJomT8ixkARkWBFRlc3QxF DASBgNVBAsTC1Rlc3QtT2ZmaWNlMRAwDgYDVQQLEwdTZXJ2ZXJzMRMwEQYDVQQLEwpHQiBTZXJ2ZX JzMRQwEgYDVQQLEwtBcHBsaWNhdGlvbjEMMAoGA1UECxMDV0VCMREwDwYDVQQLEwhJbnRyYW5ldBY PQ0xTIEdCIEludHJhbmV0MIG1MIGhMRUwEwYKCZImiZPyLGQBGRYFbG9jYWwxFDASBgoJkiaJk/Is ZAEZFgRUZXN0MRQwEgYDVQQLEwtUZXN0LU9mZmljZTEQMA4GA1UECxMHU2VydmVyczETMBEGA1UEC xMKVVMgU2VydmVyczEUMBIGA1UECxMLQXBwbGljYXRpb24xDDAKBgNVBAsTA1dFQjERMA8GA1UECx MISW50cmFuZXQWD0NMUyBVUyBJbnRyYW5ldDA8MDExCzAJBgNVBAYTAlVTMRAwDgYDVQQKEwdFbnR ydXN0MRAwDgYDVQQLEwdEeW5Db3JwFgdEeW5Db3JwMEowODELMAkGA1UEBhMCVVMxEDAOBgNVBAoT B0VudHJ1c3QxFzAVBgNVBAsTDkFkbWluaXN0cmF0b3JzFg5BZG1pbmlzdHJhdG9yczBKMDgxCzAJB gNVBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN0MRcwFQYDVQQLEw5HZW5lcmFsIE1vdG9ycxYOR2VuZX JhbCBNb3RvcnMwczBZMQswCQYDVQQGEwJVUzEQMA4GA1UEChMHRW50cnVzdDEXMBUGA1UECxMOR2V uZXJhbCBNb3RvcnMxHzAdBgNVBAsTFkdNIFVzZXIgQWRtaW5pc3RyYXRvcnMWFkdNIFVzZXIgQWRt aW5pc3RyYXRvcnMwXzBPMQswCQYDVQQGEwJVUzEQMA4GA1UEChMHRW50cnVzdDEXMBUGA1UECxMOR 2VuZXJhbCBNb3RvcnMxFTATBgNVBAsTDEdNIEVuZCBVc2VycxYMR00gRW5kIFVzZXJzMFYwQzEVMB MGCgmSJomT8ixkARkWBWxvY2FsMRQwEgYKCZImiZPyLGQBGRYEVGVzdDEUMBIGA1UECxMLV2ViIFN lcnZlcnMWD0NMUyBXZWIgU2VydmVyczBeMEcxFTATBgoJkiaJk/IsZAEZFgVsb2NhbDEUMBIGCgmS JomT8ixkARkWBFRlc3QxGDAWBgNVBAsTD0NNUyBBZG1pbiBVc2VycxYTQ0xTIENNUyBBZG1pbiBVc 2VyczBeMEcxFTATBgoJkiaJk/IsZAEZFgVsb2NhbDEUMBIGCgmSJomT8ixkARkWBFRlc3QxGDAWBg NVBAsTD1BLSSBBZG1pbiBVc2VycxYTQ0xTIFBLSSBBZG1pbiBVc2VyczBLMD8xCzAJBgNVBAYTAnV zMRAwDgYDVQQKEwdlbnRydXN0MQ8wDQYDVQQLEwZtb2JpbGUxDTALBgNVBAsTBGRlbW8WCERlbW8g TURNMEgwMzELMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0VudHJ1c3QxEjAQBgNVBAsTCUVtcGxveWVlc xYRRW50cnVzdCBFbXBsb3llZXMwWzBQMRUwEwYKCZImiZPyLGQBGRYFTG9jYWwxFDASBgoJkiaJk/ IsZAEZFgRUZXN0MRMwEQYDVQQLEwpUZXN0IFVzZXJzMQwwCgYDVQQHEwNERVYWB0NMUyBERVYwJDA cMQswCQYDVQQGEwJ1czENMAsGA1UEChMETklTVBYETklTVDB2MGcxCzAJBgNVBAYTAlVTMRAwDgYD VQQKEwdFbnRydXN0MRkwFwYDVQQLExBNYW5hZ2VkIFNlcnZpY2VzMRkwFwYDVQQLExBEZW1vIENvb VByaXYgU3ViMRAwDgYDVQQLEwdEZXZpY2VzFgtNU08gRGV2aWNlczCBhDBuMQswCQYDVQQGEwJVUz EQMA4GA1UEChMHRW50cnVzdDEZMBcGA1UECxMQTWFuYWdlZCBTZXJ2aWNlczEZMBcGA1UECxMQRGV tbyBDb21Qcml2IFN1YjEXMBUGA1UECxMOQWRtaW5pc3RyYXRvcnMWEk1TTyBBZG1pbmlzdHJhdG9y czB6MGkxCzAJBgNVBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN0MRkwFwYDVQQLExBNYW5hZ2VkIFNlc nZpY2VzMRkwFwYDVQQLExBEZW1vIENvbVByaXYgU3ViMRIwEAYDVQQLEwlFbXBsb3llZXMWDU1TTy BFbXBsb3llZXMwRDAxMQswCQYDVQQGEwJVUzEQMA4GA1UEChMHR290U3ZlbjEQMA4GA1UECxMHRGV 2aWNlcxYPR290U3ZlbiBEZXZpY2VzMIGEMFoxCzAJBgNVBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN0 MSAwHgYDVQQLExdFbnRydXN0IFNhbGVzIEVuZ2luZWVyczEXMBUGA1UECxMOQWRtaW5pc3RyYXRvc nMWJkVudHJ1c3QgU2FsZXMgRW5naW5lZXJzIEFkbWluaXN0cmF0b3JzMHYwUzELMAkGA1UEBhMCVV MxEDAOBgNVBAoTB0VudHJ1c3QxIDAeBgNVBAsTF0VudHJ1c3QgU2FsZXMgRW5naW5lZXJzMRAwDgY DVQQLEwdEZXZpY2VzFh9FbnRydXN0IFNhbGVzIEVuZ2luZWVycyBEZXZpY2VzMHIwUTELMAkGA1UE BhMCVVMxEDAOBgNVBAoTB0VudHJ1c3QxIDAeBgNVBAsTF0VudHJ1c3QgU2FsZXMgRW5naW5lZXJzM Q4wDAYDVQQLEwVDYXJkcxYdRW50cnVzdCBTYWxlcyBFbmdpbmVlcnMgQ2FyZHMwdDBSMQswCQYDVQ QGEwJVUzEQMA4GA1UEChMHRW50cnVzdDEgMB4GA1UECxMXRW50cnVzdCBTYWxlcyBFbmdpbmVlcnM xDzANBgNVBAsTBlBlb3BsZRYeRW50cnVzdCBTYWxlcyBFbmdpbmVlcnMgUGVvcGxlMIGKMF0xCzAJ BgNVBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN0MSMwIQYDVQQLExpFbnRydXN0IFByb2R1Y3QgTWFuY WdlbWVudDEXMBUGA1UECxMOQWRtaW5pc3RyYXRvcnMWKUVudHJ1c3QgUHJvZHVjdCBNYW5hZ2VtZW 50IEFkbWluaXN0cmF0b3JzMHwwVjELMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0VudHJ1c3QxIzAhBgN VBAsTGkVudHJ1c3QgUHJvZHVjdCBNYW5hZ2VtZW50MRAwDgYDVQQLEwdEZXZpY2VzFiJFbnRydXN0 IFByb2R1Y3QgTWFuYWdlbWVudCBEZXZpY2VzMHgwVDELMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0Vud HJ1c3QxIzAhBgNVBAsTGkVudHJ1c3QgUHJvZHVjdCBNYW5hZ2VtZW50MQ4wDAYDVQQLEwVDYXJkcx YgRW50cnVzdCBQcm9kdWN0IE1hbmFnZW1lbnQgQ2FyZHMwejBVMQswCQYDVQQGEwJVUzEQMA4GA1U EChMHRW50cnVzdDEjMCEGA1UECxMaRW50cnVzdCBQcm9kdWN0IE1hbmFnZW1lbnQxDzANBgNVBAsT BlBlb3BsZRYhRW50cnVzdCBQcm9kdWN0IE1hbmFnZW1lbnQgUGVvcGxlMCQwHDELMAkGA1UEBhMCT loxDTALBgNVBAoTBExJTloWBExJTlowTDA1MQswCQYDVQQGEwJOWjENMAsGA1UEChMETElOWjEXMB UGA1UECxMOQWRtaW5pc3RyYXRvcnMWE0xJTlogQWRtaW5pc3RyYXRvcnMwPjAuMQswCQYDVQQGEwJ OWjENMAsGA1UEChMETElOWjEQMA4GA1UECxMHRGV2aWNlcxYMTElOWiBEZXZpY2VzMDwwLTELMAkG A1UEBhMCTloxDTALBgNVBAoTBExJTloxDzANBgNVBAsTBlBlb3BsZRYLTElOWiBQZW9wbGUwVDA0M QswCQYDVQQGEwJVUzElMCMGA1UEChMcTWFnZWxsYW4gSGVhbHRoIFNlcnZpY2VzIEluYxYcTWFnZW xsYW4gSGVhbHRoIFNlcnZpY2VzIEluYzBnMFExFTATBgoJkiaJk/IsZAEZFgVsb2NhbDEUMBIGCgm SJomT8ixkARkWBHRlc3QxEzARBgNVBAsTClRlc3QgVXNlcnMxDTALBgNVBAcTBFRlc3QWEkNMUyBU ZXN0IFVzZXIgVGVzdDBEMDoxCzAJBgNVBAYTAnVzMSswKQYDVQQKEyJGZWRlcmFsIEhvbWUgTG9hb iBCYW5rIG9mIE5ldyBZb3JrFgZGSExCTlkwWjBKMQswCQYDVQQGEwJ1czErMCkGA1UEChMiRmVkZX JhbCBIb21lIExvYW4gQmFuayBvZiBOZXcgWW9yazEOMAwGA1UECxMFMUxpbmsWDEZITEJOWSAxTGl uazBcMEsxCzAJBgNVBAYTAnVzMSswKQYDVQQKEyJGZWRlcmFsIEhvbWUgTG9hbiBCYW5rIG9mIE5l dyBZb3JrMQ8wDQYDVQQLEwZBZG1pbnMWDUZITEJOWSBBZG1pbnMwSAYJKoZIhvZ9B0QQMTswOTAQA gEAAgEAAgEIAgEPAwIDeDAQAgEAAgEAAgEIAgEKAwIAeTAQAgEAAgEAAgEIAgEKAwIAeQMBADBxBg kqhkiG9n0HTUAxZAxiQUVTLUNCQy0xMjgsIEFFUy1DQkMtMjU2LCBBRVMtR0NNLTEyOCwgQUVTLUd DTS0yNTYsIFRSSVBMRURFUy1DQkMtMTkyLCBDQVNUNS1DQkMtODAsIENBU1Q1LUNCQy0xMjgwdgYJ KoZIhvZ9B01BMWkMZ0VDRFNBLVJFQ09NTUVOREVELCBSU0FQU1MtUkVDT01NRU5ERUQsIFJTQS1SR UNPTU1FTkRFRCwgRFNBLVJFQ09NTUVOREVELCBFQ0RTQS1TSEExLCBSU0EtU0hBMSwgRFNBLVNIQT EwFwYJKoZIhvZ9B00QMQoECFJTQS0yMDQ4MIIWSQYJKoZIhvZ9B00AMYIWOjCCFjYwgYACAQAwADB 5MQswCQYDVQQGEwJVUzEQMA4GA1UEChMHRW50cnVzdDEiMCAGA1UECxMZQ2VydGlmaWNhdGlvbiBB dXRob3JpdGllczESMBAGA1UECxMJRENvbVN1YkNBMSAwHgYDVQQDExdTZWN1cml0eSBPZmZpY2VyI FBvbGljeTB9AgEBMAAwdjELMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGU NlcnRpZmljYXRpb24gQXV0aG9yaXRpZXMxEjAQBgNVBAsTCURDb21TdWJDQTEdMBsGA1UEAxMUQWR taW5pc3RyYXRvciBQb2xpY3kweAIBAjAAMHExCzAJBgNVBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN0 MSIwIAYDVQQLExlDZXJ0aWZpY2F0aW9uIEF1dGhvcml0aWVzMRIwEAYDVQQLEwlEQ29tU3ViQ0ExG DAWBgNVBAMTD0VuZCBVc2VyIFBvbGljeTB9AgEDMAAwdjELMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0 VudHJ1c3QxIjAgBgNVBAsTGUNlcnRpZmljYXRpb24gQXV0aG9yaXRpZXMxEjAQBgNVBAsTCURDb21 TdWJDQTEdMBsGA1UEAxMUQWRtaW5pc3RyYXRvciBQb2xpY3kwfQIBBDAAMHYxCzAJBgNVBAYTAlVT MRAwDgYDVQQKEwdFbnRydXN0MSIwIAYDVQQLExlDZXJ0aWZpY2F0aW9uIEF1dGhvcml0aWVzMRIwE AYDVQQLEwlEQ29tU3ViQ0ExHTAbBgNVBAMTFEFkbWluaXN0cmF0b3IgUG9saWN5MHMCAQUwADBsMQ swCQYDVQQGEwJVUzEQMA4GA1UEChMHRW50cnVzdDEiMCAGA1UECxMZQ2VydGlmaWNhdGlvbiBBdXR ob3JpdGllczESMBAGA1UECxMJRENvbVN1YkNBMRMwEQYDVQQDEwpBU0ggUG9saWN5MH0CAQYwADB2 MQswCQYDVQQGEwJVUzEQMA4GA1UEChMHRW50cnVzdDEiMCAGA1UECxMZQ2VydGlmaWNhdGlvbiBBd XRob3JpdGllczESMBAGA1UECxMJRENvbVN1YkNBMR0wGwYDVQQDExRBZG1pbmlzdHJhdG9yIFBvbG ljeTB9AgEHMAAwdjELMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGUNlcnR pZmljYXRpb24gQXV0aG9yaXRpZXMxEjAQBgNVBAsTCURDb21TdWJDQTEdMBsGA1UEAxMUQWRtaW5p c3RyYXRvciBQb2xpY3kwfAIBCDAAMHUxCzAJBgNVBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN0MSIwI AYDVQQLExlDZXJ0aWZpY2F0aW9uIEF1dGhvcml0aWVzMRIwEAYDVQQLEwlEQ29tU3ViQ0ExHDAaBg NVBAMTE1NlcnZlciBMb2dpbiBQb2xpY3kwfAIBCTAAMHUxCzAJBgNVBAYTAlVTMRAwDgYDVQQKEwd FbnRydXN0MSIwIAYDVQQLExlDZXJ0aWZpY2F0aW9uIEF1dGhvcml0aWVzMRIwEAYDVQQLEwlEQ29t U3ViQ0ExHDAaBgNVBAMTE1NlcnZlciBMb2dpbiBQb2xpY3kwfQIBCjAAMHYxCzAJBgNVBAYTAlVTM RAwDgYDVQQKEwdFbnRydXN0MSIwIAYDVQQLExlDZXJ0aWZpY2F0aW9uIEF1dGhvcml0aWVzMRIwEA YDVQQLEwlEQ29tU3ViQ0ExHTAbBgNVBAMTFEFkbWluaXN0cmF0b3IgUG9saWN5MIGAAgEMMAAweTE LMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGUNlcnRpZmljYXRpb24gQXV0 aG9yaXRpZXMxEjAQBgNVBAsTCURDb21TdWJDQTEgMB4GA1UEAxMXQ0xTIFNlcnZlciBMb2dpbiBQb 2xpY3kwgYACAQ0wADB5MQswCQYDVQQGEwJVUzEQMA4GA1UEChMHRW50cnVzdDEiMCAGA1UECxMZQ2 VydGlmaWNhdGlvbiBBdXRob3JpdGllczESMBAGA1UECxMJRENvbVN1YkNBMSAwHgYDVQQDExdTZWN 1cml0eSBPZmZpY2VyIFBvbGljeTCBgAIBDjAAMHkxCzAJBgNVBAYTAlVTMRAwDgYDVQQKEwdFbnRy dXN0MSIwIAYDVQQLExlDZXJ0aWZpY2F0aW9uIEF1dGhvcml0aWVzMRIwEAYDVQQLEwlEQ29tU3ViQ 0ExIDAeBgNVBAMTF1NlY3VyaXR5IE9mZmljZXIgUG9saWN5MH0CAQ8wADB2MQswCQYDVQQGEwJVUz EQMA4GA1UEChMHRW50cnVzdDEiMCAGA1UECxMZQ2VydGlmaWNhdGlvbiBBdXRob3JpdGllczESMBA GA1UECxMJRENvbVN1YkNBMR0wGwYDVQQDExRBZG1pbmlzdHJhdG9yIFBvbGljeTB9AgERMAAwdjEL MAkGA1UEBhMCVVMxEDAOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGUNlcnRpZmljYXRpb24gQXV0a G9yaXRpZXMxEjAQBgNVBAsTCURDb21TdWJDQTEdMBsGA1UEAxMUQWRtaW5pc3RyYXRvciBQb2xpY3 kwfAIBCzAAMHUxCzAJBgNVBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN0MSIwIAYDVQQLExlDZXJ0aWZ pY2F0aW9uIEF1dGhvcml0aWVzMRIwEAYDVQQLEwlEQ29tU3ViQ0ExHDAaBgNVBAMTE0NMUyBFbmQg VXNlciBQb2xpY3kwfQIBEjAAMHYxCzAJBgNVBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN0MSIwIAYDV QQLExlDZXJ0aWZpY2F0aW9uIEF1dGhvcml0aWVzMRIwEAYDVQQLEwlEQ29tU3ViQ0ExHTAbBgNVBA MTFEFkbWluaXN0cmF0b3IgUG9saWN5MH0CARMwADB2MQswCQYDVQQGEwJVUzEQMA4GA1UEChMHRW5 0cnVzdDEiMCAGA1UECxMZQ2VydGlmaWNhdGlvbiBBdXRob3JpdGllczESMBAGA1UECxMJRENvbVN1 YkNBMR0wGwYDVQQDExRBZG1pbmlzdHJhdG9yIFBvbGljeTCBgAIBFDAAMHkxCzAJBgNVBAYTAlVTM RAwDgYDVQQKEwdFbnRydXN0MSIwIAYDVQQLExlDZXJ0aWZpY2F0aW9uIEF1dGhvcml0aWVzMRIwEA YDVQQLEwlEQ29tU3ViQ0ExIDAeBgNVBAMTF0R5bkNvcnAgRW5kIFVzZXIgUG9saWN5MH8CASAwADB 4MQswCQYDVQQGEwJVUzEQMA4GA1UEChMHRW50cnVzdDEiMCAGA1UECxMZQ2VydGlmaWNhdGlvbiBB dXRob3JpdGllczESMBAGA1UECxMJRENvbVN1YkNBMR8wHQYDVQQDExZDU1JFUyBSZXF1ZXN0b3IgU G9saWN5MHkCASEwADByMQswCQYDVQQGEwJVUzEQMA4GA1UEChMHRW50cnVzdDEiMCAGA1UECxMZQ2 VydGlmaWNhdGlvbiBBdXRob3JpdGllczESMBAGA1UECxMJRENvbVN1YkNBMRkwFwYDVQQDExBNRE1 XUyBYQVAgUG9saWN5MEkCASIwADBCMQswCQYDVQQGEwJVUzEQMA4GA1UEChMHRW50cnVzdDEhMB8G A1UEAxMYU09BUCBBZG1pbiBFeHBvcnQgUG9saWN5MIGDAgEjMAAwfDELMAkGA1UEBhMCVVMxEDAOB gNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGUNlcnRpZmljYXRpb24gQXV0aG9yaXRpZXMxEjAQBgNVBA sTCURDb21TdWJDQTEjMCEGA1UEAxMaRXhwb3J0YWJsZSBFbmQgVXNlciBQb2xpY3kweAIBJDAAMHE xCzAJBgNVBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN0MSIwIAYDVQQLExlDZXJ0aWZpY2F0aW9uIEF1 dGhvcml0aWVzMRIwEAYDVQQLEwlEQ29tU3ViQ0ExGDAWBgNVBAMTD0VuZCBVc2VyIFBvbGljeTB9A gElMAAwdjELMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGUNlcnRpZmljYX Rpb24gQXV0aG9yaXRpZXMxEjAQBgNVBAsTCURDb21TdWJDQTEdMBsGA1UEAxMUQWRtaW5pc3RyYXR vciBQb2xpY3kwfQIBJjAAMHYxCzAJBgNVBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN0MSIwIAYDVQQL ExlDZXJ0aWZpY2F0aW9uIEF1dGhvcml0aWVzMRIwEAYDVQQLEwlEQ29tU3ViQ0ExHTAbBgNVBAMTF E1vYmlsZSBEZXZpY2UgUG9saWN5MHwCAScwADB1MQswCQYDVQQGEwJVUzEQMA4GA1UEChMHRW50cn VzdDEiMCAGA1UECxMZQ2VydGlmaWNhdGlvbiBBdXRob3JpdGllczESMBAGA1UECxMJRENvbVN1YkN BMRwwGgYDVQQDExNTZXJ2ZXIgTG9naW4gUG9saWN5MH0CASgwADB2MQswCQYDVQQGEwJVUzEQMA4G A1UEChMHRW50cnVzdDEiMCAGA1UECxMZQ2VydGlmaWNhdGlvbiBBdXRob3JpdGllczESMBAGA1UEC xMJRENvbVN1YkNBMR0wGwYDVQQDExRBZG1pbmlzdHJhdG9yIFBvbGljeTCBgQIBKTAAMHoxCzAJBg NVBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN0MSIwIAYDVQQLExlDZXJ0aWZpY2F0aW9uIEF1dGhvcml 0aWVzMRIwEAYDVQQLEwlEQ29tU3ViQ0ExITAfBgNVBAMTGFNQT0MgU2VydmVyIExvZ2luIFBvbGlj eTCBggIBKjAAMHsxCzAJBgNVBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN0MSIwIAYDVQQLExlDZXJ0a WZpY2F0aW9uIEF1dGhvcml0aWVzMRIwEAYDVQQLEwlEQ29tU3ViQ0ExIjAgBgNVBAMTGVNQT0MgQW RtaW5pc3RyYXRvciBQb2xpY3kwfAIBKzAAMHUxCzAJBgNVBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN 0MSIwIAYDVQQLExlDZXJ0aWZpY2F0aW9uIEF1dGhvcml0aWVzMRIwEAYDVQQLEwlEQ29tU3ViQ0Ex HDAaBgNVBAMTE1NlcnZlciBMb2dpbiBQb2xpY3kwgZECASwwADCBiTELMAkGA1UEBhMCVVMxEDAOB gNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGUNlcnRpZmljYXRpb24gQXV0aG9yaXRpZXMxEjAQBgNVBA sTCURDb21TdWJDQTEwMC4GA1UEAxMnTWFzdGVyIExpc3QgU2lnbmVyIEFkbWluaXN0cmF0b3IgUG9 saWN5MH0CAS0wADB2MQswCQYDVQQGEwJVUzEQMA4GA1UEChMHRW50cnVzdDEiMCAGA1UECxMZQ2Vy dGlmaWNhdGlvbiBBdXRob3JpdGllczESMBAGA1UECxMJRENvbVN1YkNBMR0wGwYDVQQDExRBZG1pb mlzdHJhdG9yIFBvbGljeTB4AgEuMAAwcTELMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0VudHJ1c3QxIj AgBgNVBAsTGUNlcnRpZmljYXRpb24gQXV0aG9yaXRpZXMxEjAQBgNVBAsTCURDb21TdWJDQTEYMBY GA1UEAxMPRW5kIFVzZXIgUG9saWN5MH0CAS8wADB2MQswCQYDVQQGEwJVUzEQMA4GA1UEChMHRW50 cnVzdDEiMCAGA1UECxMZQ2VydGlmaWNhdGlvbiBBdXRob3JpdGllczESMBAGA1UECxMJRENvbVN1Y kNBMR0wGwYDVQQDExRBZG1pbmlzdHJhdG9yIFBvbGljeTB4AgExMAAwcTELMAkGA1UEBhMCVVMxED AOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGUNlcnRpZmljYXRpb24gQXV0aG9yaXRpZXMxEjAQBgN VBAsTCURDb21TdWJDQTEYMBYGA1UEAxMPRW5kIFVzZXIgUG9saWN5MH0CATIwADB2MQswCQYDVQQG EwJVUzEQMA4GA1UEChMHRW50cnVzdDEiMCAGA1UECxMZQ2VydGlmaWNhdGlvbiBBdXRob3JpdGllc zESMBAGA1UECxMJRENvbVN1YkNBMR0wGwYDVQQDExRBZG1pbmlzdHJhdG9yIFBvbGljeTB8AgEwMA AwdTELMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGUNlcnRpZmljYXRpb24 gQXV0aG9yaXRpZXMxEjAQBgNVBAsTCURDb21TdWJDQTEcMBoGA1UEAxMTU2VydmVyIExvZ2luIFBv bGljeTB9AgEzMAAwdjELMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGUNlc nRpZmljYXRpb24gQXV0aG9yaXRpZXMxEjAQBgNVBAsTCURDb21TdWJDQTEdMBsGA1UEAxMUQWRtaW 5pc3RyYXRvciBQb2xpY3kwfQIBNTAAMHYxCzAJBgNVBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN0MSI wIAYDVQQLExlDZXJ0aWZpY2F0aW9uIEF1dGhvcml0aWVzMRIwEAYDVQQLEwlEQ29tU3ViQ0ExHTAb BgNVBAMTFENhcmQgRW5kIFVzZXIgUG9saWN5MHgCATQwADBxMQswCQYDVQQGEwJVUzEQMA4GA1UEC hMHRW50cnVzdDEiMCAGA1UECxMZQ2VydGlmaWNhdGlvbiBBdXRob3JpdGllczESMBAGA1UECxMJRE NvbVN1YkNBMRgwFgYDVQQDEw9FbmQgVXNlciBQb2xpY3kwfAIBNjAAMHUxCzAJBgNVBAYTAlVTMRA wDgYDVQQKEwdFbnRydXN0MSIwIAYDVQQLExlDZXJ0aWZpY2F0aW9uIEF1dGhvcml0aWVzMRIwEAYD VQQLEwlEQ29tU3ViQ0ExHDAaBgNVBAMTE01ETSBFbmQgVXNlciBQb2xpY3kwfAIBNzAAMHUxCzAJB gNVBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN0MSIwIAYDVQQLExlDZXJ0aWZpY2F0aW9uIEF1dGhvcm l0aWVzMRIwEAYDVQQLEwlEQ29tU3ViQ0ExHDAaBgNVBAMTE1NlcnZlciBMb2dpbiBQb2xpY3kwgYU CATgwADB+MQswCQYDVQQGEwJVUzEQMA4GA1UEChMHRW50cnVzdDEiMCAGA1UECxMZQ2VydGlmaWNh dGlvbiBBdXRob3JpdGllczESMBAGA1UECxMJRENvbVN1YkNBMSUwIwYDVQQDExxNU08gVU1TIEFkb WluaXN0cmF0b3IgUG9saWN5MIJZ7wYKKoZIhvZ9B00uADGCWd8wglnbMDEwFwwSY3NjX3BpdjFrX2 NhcmRhdXRoAgEnMBYwFDASDA1QaXYxS0NhcmRBdXRoAgFDMEwwEwwOY3NjX3Bpdm1peGVkXzMCASg wNTAQMA4MCVBpdjFLQXV0aAIBRDAPMA0MCFBpdjJLRW5jAgFFMBAwDgwJUGl2MktTaWduAgFGMIG4 MBAMC2VudF9hZF9jbHMxAgE3MIGjMIGgMA4MCUR1YWxVc2FnZQIBXDCBjTELMAkGA1UEBhMCVVMxE DAOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGUNlcnRpZmljYXRpb24gQXV0aG9yaXRpZXMxEjAQBg NVBAsTCURDb21TdWJDQTE0MDIGA1UEAxMrQ0xTIDF5ciBEb21haW4gQ29udHJvbGxlciBEdWFsIFV zYWdlIFBvbGljeTCBuDAQDAtlbnRfYWRfY2xzMgIBODCBozCBoDAODAlEdWFsVXNhZ2UCAV0wgY0x CzAJBgNVBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN0MSIwIAYDVQQLExlDZXJ0aWZpY2F0aW9uIEF1d Ghvcml0aWVzMRIwEAYDVQQLEwlEQ29tU3ViQ0ExNDAyBgNVBAMTK0NMUyAyeXIgRG9tYWluIENvbn Ryb2xsZXIgRHVhbCBVc2FnZSBQb2xpY3kwdTARDAxlbnRfYWRfY2xzMm0CAVIwTjAjMBAMCkVuY3J 5cHRpb24CAgCQog8MCkVuY3J5cHRpb24CAQEwJzASDAxWZXJpZmljYXRpb24CAgCRohEMDFZlcmlm aWNhdGlvbgIBAqIQDAtlbnRfZGVmYXVsdAIBAzCBvjASDA1lbnRfYWRfY2xzMm1hAgFUMIGnMIGkM A8MCUR1YWxVc2FnZQICAJQwgZAxCzAJBgNVBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN0MSIwIAYDVQ QLExlDZXJ0aWZpY2F0aW9uIEF1dGhvcml0aWVzMRIwEAYDVQQLEwlEQ29tU3ViQ0ExNzA1BgNVBAM TLkNMUyAybW9udGggRG9tYWluIENvbnRyb2xsZXIgRHVhbCBVc2FnZSBQb2xpY3kwgbAwDgwJZW50 X2FkX2RjAgF4MIGdMIGaMBAMCkR1YWwgVXNhZ2UCAgDSMIGFMQswCQYDVQQGEwJVUzEQMA4GA1UEC hMHRW50cnVzdDEiMCAGA1UECxMZQ2VydGlmaWNhdGlvbiBBdXRob3JpdGllczESMBAGA1UECxMJRE NvbVN1YkNBMSwwKgYDVQQDEyNFbnRlcnByaXNlIERvbWFpbiBDb250cm9sbGVyIFBvbGljeTBHMBk ME2VudF9hZG1zcnZjc191bXNfZWECAgCLMCowEjAQDApFbmNyeXB0aW9uAgIA9DAUMBIMDFZlcmlm aWNhdGlvbgICAPUwRTAZDBRlbnRfYWRtc3J2Y3NfdXNlcnJlZwIBEjAoMBEwDwwKRW5jcnlwdGlvb gIBHjATMBEMDFZlcmlmaWNhdGlvbgIBHzCBzzAZDBRlbnRfYWRtc3J2Y3NfdXNybWdtdAIBETCBsT ARMA8MCkVuY3J5cHRpb24CARwwgZswEQwMVmVyaWZpY2F0aW9uAgEdMIGFMQswCQYDVQQGEwJVUzE QMA4GA1UEChMHRW50cnVzdDEiMCAGA1UECxMZQ2VydGlmaWNhdGlvbiBBdXRob3JpdGllczESMBAG A1UECxMJRENvbVN1YkNBMSwwKgYDVQQDEyNUcnVlUGFzcyBTZXJ2ZXIgVmVyaWZpY2F0aW9uIFBvb GljeTA6MA4MCWVudF9iYXNpYwIBJjAoMBEwDwwKRW5jcnlwdGlvbgIBQTATMBEMDFZlcmlmaWNhdG lvbgIBQjCCATkwDQwIZW50X2NsczECAS8wggEmMIGOMA8MCkVuY3J5cHRpb24CAVIwezELMAkGA1U EBhMCVVMxEDAOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGUNlcnRpZmljYXRpb24gQXV0aG9yaXRp ZXMxEjAQBgNVBAsTCURDb21TdWJDQTEiMCAGA1UEAxMZQ0xTIDF5ciBFbmNyeXB0aW9uIFBvbGlje TCBkjARDAxWZXJpZmljYXRpb24CAVMwfTELMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0VudHJ1c3QxIj AgBgNVBAsTGUNlcnRpZmljYXRpb24gQXV0aG9yaXRpZXMxEjAQBgNVBAsTCURDb21TdWJDQTEkMCI GA1UEAxMbQ0xTIDF5ciBWZXJpZmljYXRpb24gUG9saWN5MIIBOTANDAhlbnRfY2xzMgIBMDCCASYw gY4wDwwKRW5jcnlwdGlvbgIBVDB7MQswCQYDVQQGEwJVUzEQMA4GA1UEChMHRW50cnVzdDEiMCAGA 1UECxMZQ2VydGlmaWNhdGlvbiBBdXRob3JpdGllczESMBAGA1UECxMJRENvbVN1YkNBMSIwIAYDVQ QDExlDTFMgMnlyIEVuY3J5cHRpb24gUG9saWN5MIGSMBEMDFZlcmlmaWNhdGlvbgIBVTB9MQswCQY DVQQGEwJVUzEQMA4GA1UEChMHRW50cnVzdDEiMCAGA1UECxMZQ2VydGlmaWNhdGlvbiBBdXRob3Jp dGllczESMBAGA1UECxMJRENvbVN1YkNBMSQwIgYDVQQDExtDTFMgMnlyIFZlcmlmaWNhdGlvbiBQb 2xpY3kwQDASDA1lbnRfY2xzX2FkbWluAgFXMCowEjAQDApFbmNyeXB0aW9uAgIAmDAUMBIMDFZlcm lmaWNhdGlvbgICAJkwggFPMBMMDmVudF9jbHNfYWRtaW4yAgFWMIIBNjCBljAQDApFbmNyeXB0aW9 uAgIAljCBgTELMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGUNlcnRpZmlj YXRpb24gQXV0aG9yaXRpZXMxEjAQBgNVBAsTCURDb21TdWJDQTEoMCYGA1UEAxMfQ0xTIEFkbWluI DJ5ciBFbmNyeXB0aW9uIFBvbGljeTCBmjASDAxWZXJpZmljYXRpb24CAgCXMIGDMQswCQYDVQQGEw JVUzEQMA4GA1UEChMHRW50cnVzdDEiMCAGA1UECxMZQ2VydGlmaWNhdGlvbiBBdXRob3JpdGllczE SMBAGA1UECxMJRENvbVN1YkNBMSowKAYDVQQDEyFDTFMgQWRtaW4gMnlyIFZlcmlmaWNhdGlvbiBQ b2xpY3kwgbgwFwwSZW50X2Ntc2NsaWVudF9jbHMxAgExMIGcMIGZMA8MCkR1YWwgVXNhZ2UCAVYwg YUxCzAJBgNVBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN0MSIwIAYDVQQLExlDZXJ0aWZpY2F0aW9uIE F1dGhvcml0aWVzMRIwEAYDVQQLEwlEQ29tU3ViQ0ExLDAqBgNVBAMTI0NMUyAxeXIgQUkgQ2xpZW5 0IER1YWwgVXNhZ2UgUG9saWN5MIG/MBkMFGVudF9jbXNjbGllbnRfY2xzMV9mAgEzMIGhMIGeMA8M CkR1YWwgVXNhZ2UCAVgwgYoxCzAJBgNVBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN0MSIwIAYDVQQLE xlDZXJ0aWZpY2F0aW9uIEF1dGhvcml0aWVzMRIwEAYDVQQLEwlEQ29tU3ViQ0ExMTAvBgNVBAMTKE NMUyAxeXIgQUkgQ2xpZW50IEZpbGUgRHVhbCBVc2FnZSBQb2xpY3kwgbgwFwwSZW50X2Ntc2NsaWV udF9jbHMyAgEyMIGcMIGZMA8MCkR1YWwgVXNhZ2UCAVcwgYUxCzAJBgNVBAYTAlVTMRAwDgYDVQQK EwdFbnRydXN0MSIwIAYDVQQLExlDZXJ0aWZpY2F0aW9uIEF1dGhvcml0aWVzMRIwEAYDVQQLEwlEQ 29tU3ViQ0ExLDAqBgNVBAMTI0NMUyAyeXIgQUkgQ2xpZW50IER1YWwgVXNhZ2UgUG9saWN5MIG/MB kMFGVudF9jbXNjbGllbnRfY2xzMl9mAgE0MIGhMIGeMA8MCkR1YWwgVXNhZ2UCAVkwgYoxCzAJBgN VBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN0MSIwIAYDVQQLExlDZXJ0aWZpY2F0aW9uIEF1dGhvcml0 aWVzMRIwEAYDVQQLEwlEQ29tU3ViQ0ExMTAvBgNVBAMTKENMUyAyeXIgQUkgQ2xpZW50IEZpbGUgR HVhbCBVc2FnZSBQb2xpY3kwLjAXDBJlbnRfY21zY2xpZW50X3NrZHUCASowEzARMA8MCkR1YWwgVX NhZ2UCAUkwMDAZDBRlbnRfY21zY2xpZW50X3NrZHVfZgIBKzATMBEwDwwKRHVhbCBVc2FnZQIBSjC BuDAXDBJlbnRfY21zc2VydmVyX2NsczECATUwgZwwgZkwDwwKRHVhbCBVc2FnZQIBWjCBhTELMAkG A1UEBhMCVVMxEDAOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGUNlcnRpZmljYXRpb24gQXV0aG9ya XRpZXMxEjAQBgNVBAsTCURDb21TdWJDQTEsMCoGA1UEAxMjQ0xTIDF5ciBBSSBTZXJ2ZXIgRHVhbC BVc2FnZSBQb2xpY3kwgbgwFwwSZW50X2Ntc3NlcnZlcl9jbHMyAgE2MIGcMIGZMA8MCkR1YWwgVXN hZ2UCAVswgYUxCzAJBgNVBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN0MSIwIAYDVQQLExlDZXJ0aWZp Y2F0aW9uIEF1dGhvcml0aWVzMRIwEAYDVQQLEwlEQ29tU3ViQ0ExLDAqBgNVBAMTI0NMUyAyeXIgQ UkgU2VydmVyIER1YWwgVXNhZ2UgUG9saWN5MC4wFwwSZW50X2Ntc3NlcnZlcl9za2R1AgEsMBMwET APDApEdWFsIFVzYWdlAgFLMEYwGAwSZW50X2NzcmVzX2FwcHJvdmVyAgIAjDAqMBIwEAwKRW5jcnl wdGlvbgICAPYwFDASDAxWZXJpZmljYXRpb24CAgD3MEYwGAwTZW50X2NzcmVzX3JlcXVlc3RvcgIB bzAqMBIwEAwKRW5jcnlwdGlvbgICAMUwFDASDAxWZXJpZmljYXRpb24CAgDGMDwwEAwLZW50X2RlZ mF1bHQCAQMwKDARMA8MCkVuY3J5cHRpb24CAQEwEzARDAxWZXJpZmljYXRpb24CAQIwggE8MBAMC2 VudF9kZXNrdG9wAgEHMIIBJjCBjjAPDApFbmNyeXB0aW9uAgEJMHsxCzAJBgNVBAYTAlVTMRAwDgY DVQQKEwdFbnRydXN0MSIwIAYDVQQLExlDZXJ0aWZpY2F0aW9uIEF1dGhvcml0aWVzMRIwEAYDVQQL EwlEQ29tU3ViQ0ExIjAgBgNVBAMTGVNhZmVOZXQgRW5jcnlwdGlvbiBQb2xpY3kwgZIwEQwMVmVya WZpY2F0aW9uAgEKMH0xCzAJBgNVBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN0MSIwIAYDVQQLExlDZX J0aWZpY2F0aW9uIEF1dGhvcml0aWVzMRIwEAYDVQQLEwlEQ29tU3ViQ0ExJDAiBgNVBAMTG1NhZmV OZXQgVmVyaWZpY2F0aW9uIFBvbGljeTCBpDAVDBBlbnRfZHVfYmFzaWNfZWt1AgFtMIGKMIGHMBAM CkR1YWwgVXNhZ2UCAgDCMHMxCzAJBgNVBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN0MSIwIAYDVQQLE xlDZXJ0aWZpY2F0aW9uIEF1dGhvcml0aWVzMRIwEAYDVQQLEwlEQ29tU3ViQ0ExGjAYBgNVBAMTEU R1YWwgVXNhZ2UgUG9saWN5MEMwFQwQZW50X2VhY2NhdHRhY2hlZAIBaDAqMBIwEAwKRW5jcnlwdGl vbgICALgwFDASDAxWZXJpZmljYXRpb24CAgC5MD0wDwwKZW50X2VhY2NvbgIBajAqMBIwEAwKRW5j cnlwdGlvbgICALwwFDASDAxWZXJpZmljYXRpb24CAgC9MEUwFwwSZW50X2VhY2NzdGFuZGFsb25lA gFpMCowEjAQDApFbmNyeXB0aW9uAgIAujAUMBIMDFZlcmlmaWNhdGlvbgICALswggGiMAwMB2VudF 9lZnMCARUwggGQMHgwCAwDRUZTAgEnMGwxCzAJBgNVBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN0MSI wIAYDVQQLExlDZXJ0aWZpY2F0aW9uIEF1dGhvcml0aWVzMRIwEAYDVQQLEwlEQ29tU3ViQ0ExEzAR BgNVBAMTCkVGUyBQb2xpY3kwgYYwDwwKRW5jcnlwdGlvbgIBJTBzMQswCQYDVQQGEwJVUzEQMA4GA 1UEChMHRW50cnVzdDEiMCAGA1UECxMZQ2VydGlmaWNhdGlvbiBBdXRob3JpdGllczESMBAGA1UECx MJRENvbVN1YkNBMRowGAYDVQQDExFFbmNyeXB0aW9uIFBvbGljeTCBijARDAxWZXJpZmljYXRpb24 CASYwdTELMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGUNlcnRpZmljYXRp b24gQXV0aG9yaXRpZXMxEjAQBgNVBAsTCURDb21TdWJDQTEcMBoGA1UEAxMTVmVyaWZpY2F0aW9uI FBvbGljeTBEMBgME2VudF9lc3Zwbl9jb21tZWRvaWQCASUwKDARMA8MCkVuY3J5cHRpb24CAT8wEz ARDAxWZXJpZmljYXRpb24CAUAwggE7MA8MCmVudF9ldG9rZW4CAWwwggEmMIGOMBAMCkVuY3J5cHR pb24CAgDAMHoxCzAJBgNVBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN0MSIwIAYDVQQLExlDZXJ0aWZp Y2F0aW9uIEF1dGhvcml0aWVzMRIwEAYDVQQLEwlEQ29tU3ViQ0ExITAfBgNVBAMTGGVUb2tlbiBFb mNyeXB0aW9uIFBvbGljeTCBkjASDAxWZXJpZmljYXRpb24CAgDBMHwxCzAJBgNVBAYTAlVTMRAwDg YDVQQKEwdFbnRydXN0MSIwIAYDVQQLExlDZXJ0aWZpY2F0aW9uIEF1dGhvcml0aWVzMRIwEAYDVQQ LEwlEQ29tU3ViQ0ExIzAhBgNVBAMTGmVUb2tlbiBWZXJpZmljYXRpb24gUG9saWN5MIIBOTAPDApl bnRfZXhwb3J0AgEGMIIBJDCBjTAPDApFbmNyeXB0aW9uAgEHMHoxCzAJBgNVBAYTAlVTMRAwDgYDV QQKEwdFbnRydXN0MSIwIAYDVQQLExlDZXJ0aWZpY2F0aW9uIEF1dGhvcml0aWVzMRIwEAYDVQQLEw lEQ29tU3ViQ0ExITAfBgNVBAMUGEVuY3J5cHRpb24gUG9saWN5X0V4cG9ydDCBkTARDAxWZXJpZml jYXRpb24CAQgwfDELMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGUNlcnRp ZmljYXRpb24gQXV0aG9yaXRpZXMxEjAQBgNVBAsTCURDb21TdWJDQTEjMCEGA1UEAxQaVmVyaWZpY 2F0aW9uIFBvbGljeV9FeHBvcnQwggE9MBQMD2VudF9nZW1hbHRvX2NzcAIBXjCCASMwgYowEAwKRW 5jcnlwdGlvbgICAKswdjELMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGUN lcnRpZmljYXRpb24gQXV0aG9yaXRpZXMxEjAQBgNVBAsTCURDb21TdWJDQTEdMBsGA1UEAxMUR00g RW5jcnlwdGlvbiBQb2xpY3kwgZMwEgwMVmVyaWZpY2F0aW9uAgIArDB9MQswCQYDVQQGEwJVUzEQM A4GA1UEChMHRW50cnVzdDEiMCAGA1UECxMZQ2VydGlmaWNhdGlvbiBBdXRob3JpdGllczESMBAGA1 UECxMJRENvbVN1YkNBMSQwIgYDVQQDExtHZW1hbHRvIFZlcmlmaWNhdGlvbiBQb2xpY3kwgb8wFgw RZW50X2lpc19za2R1X2NsczECATkwgaQwgaEwDwwKRHVhbCBVc2FnZQIBXjCBjTELMAkGA1UEBhMC VVMxEDAOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGUNlcnRpZmljYXRpb24gQXV0aG9yaXRpZXMxE jAQBgNVBAsTCURDb21TdWJDQTE0MDIGA1UEAxMrQ0xTIDF5ciBJSVMgRHVhbCBVc2FnZSBObyBLZX kgQmFja3VwIFBvbGljeTCBvzAWDBFlbnRfaWlzX3NrZHVfY2xzMgIBOjCBpDCBoTAPDApEdWFsIFV zYWdlAgFfMIGNMQswCQYDVQQGEwJVUzEQMA4GA1UEChMHRW50cnVzdDEiMCAGA1UECxMZQ2VydGlm aWNhdGlvbiBBdXRob3JpdGllczESMBAGA1UECxMJRENvbVN1YkNBMTQwMgYDVQQDEytDTFMgMnlyI ElJUyBEdWFsIFVzYWdlIE5vIEtleSBCYWNrdXAgUG9saWN5MHswFwwSZW50X2lpc19za2R1X2Nscz JtAgFTME4wIzAQDApFbmNyeXB0aW9uAgIAkqIPDApFbmNyeXB0aW9uAgEBMCcwEgwMVmVyaWZpY2F 0aW9uAgIAk6IRDAxWZXJpZmljYXRpb24CAQKiEAwLZW50X2RlZmF1bHQCAQMwgcUwGAwTZW50X2lp c19za2R1X2NsczJtYQIBVTCBqDCBpTAQDApEdWFsIFVzYWdlAgIAlTCBkDELMAkGA1UEBhMCVVMxE DAOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGUNlcnRpZmljYXRpb24gQXV0aG9yaXRpZXMxEjAQBg NVBAsTCURDb21TdWJDQTE3MDUGA1UEAxMuQ0xTIDJtb250aCBJSVMgRHVhbCBVc2FnZSBObyBLZXk gQmFja3VwIFBvbGljeTCBpzAQDAtlbnRfbWFjaGluZQIBeTCBkjCBjzAQDApEdWFsIFVzYWdlAgIA 0zB7MQswCQYDVQQGEwJVUzEQMA4GA1UEChMHRW50cnVzdDEiMCAGA1UECxMZQ2VydGlmaWNhdGlvb iBBdXRob3JpdGllczESMBAGA1UECxMJRENvbVN1YkNBMSIwIAYDVQQDExlFbnRlcnByaXNlIE1hY2 hpbmUgUG9saWN5MEAwEgwNZW50X21kbXdzX2NsaQIBcDAqMBIwEAwKRW5jcnlwdGlvbgICAMcwFDA SDAxWZXJpZmljYXRpb24CAgDIMEIwFAwPZW50X21saXN0X2FkbWluAgF/MCowEjAQDApFbmNyeXB0 aW9uAgIA3jAUMBIMDFZlcmlmaWNhdGlvbgICAN8wggE5MBUMEGVudF9tbGlzdF9zaWduZXICAX4wg gEeMIGHMBAMCkVuY3J5cHRpb24CAgDcMHMxCzAJBgNVBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN0MS IwIAYDVQQLExlDZXJ0aWZpY2F0aW9uIEF1dGhvcml0aWVzMRIwEAYDVQQLEwlEQ29tU3ViQ0ExGjA YBgNVBAMTEUVuY3J5cHRpb24gUG9saWN5MIGRMBIMDFZlcmlmaWNhdGlvbgICAN0wezELMAkGA1UE BhMCVVMxEDAOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGUNlcnRpZmljYXRpb24gQXV0aG9yaXRpZ XMxEjAQBgNVBAsTCURDb21TdWJDQTEiMCAGA1UEAxMZTWFzdGVyIExpc3QgU2lnbmVyIFBvbGljeT CCAeQwGAwTZW50X21zX3NjX2NhcGlfY2xzMQIBLTCCAcYwgZ0wDwwKRHVhbCBVc2FnZQIBTDCBiTE LMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGUNlcnRpZmljYXRpb24gQXV0 aG9yaXRpZXMxEjAQBgNVBAsTCURDb21TdWJDQTEwMC4GA1UEAxMnQ0xTIDF5ciBEdWFsIFVzYWdlI E5vIEtleSBCYWNrdXAgUG9saWN5MIGOMA8MCkVuY3J5cHRpb24CAU4wezELMAkGA1UEBhMCVVMxED AOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGUNlcnRpZmljYXRpb24gQXV0aG9yaXRpZXMxEjAQBgN VBAsTCURDb21TdWJDQTEiMCAGA1UEAxMZQ0xTIDF5ciBFbmNyeXB0aW9uIFBvbGljeTCBkjARDAxW ZXJpZmljYXRpb24CAU0wfTELMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTG UNlcnRpZmljYXRpb24gQXV0aG9yaXRpZXMxEjAQBgNVBAsTCURDb21TdWJDQTEkMCIGA1UEAxMbQ0 xTIDF5ciBWZXJpZmljYXRpb24gUG9saWN5MIIB5DAYDBNlbnRfbXNfc2NfY2FwaV9jbHMyAgEuMII BxjCBnTAPDApEdWFsIFVzYWdlAgFPMIGJMQswCQYDVQQGEwJVUzEQMA4GA1UEChMHRW50cnVzdDEi MCAGA1UECxMZQ2VydGlmaWNhdGlvbiBBdXRob3JpdGllczESMBAGA1UECxMJRENvbVN1YkNBMTAwL gYDVQQDEydDTFMgMnlyIER1YWwgVXNhZ2UgTm8gS2V5IEJhY2t1cCBQb2xpY3kwgY4wDwwKRW5jcn lwdGlvbgIBUTB7MQswCQYDVQQGEwJVUzEQMA4GA1UEChMHRW50cnVzdDEiMCAGA1UECxMZQ2VydGl maWNhdGlvbiBBdXRob3JpdGllczESMBAGA1UECxMJRENvbVN1YkNBMSIwIAYDVQQDExlDTFMgMnly IEVuY3J5cHRpb24gUG9saWN5MIGSMBEMDFZlcmlmaWNhdGlvbgIBUDB9MQswCQYDVQQGEwJVUzEQM A4GA1UEChMHRW50cnVzdDEiMCAGA1UECxMZQ2VydGlmaWNhdGlvbiBBdXRob3JpdGllczESMBAGA1 UECxMJRENvbVN1YkNBMSQwIgYDVQQDExtDTFMgMnlyIFZlcmlmaWNhdGlvbiBQb2xpY3kwggHyMBk MFGVudF9tc19zY19jYXBpX2NsczJtAgFRMIIB0zCBoTAQDApEdWFsIFVzYWdlAgIAjTCBjDELMAkG A1UEBhMCVVMxEDAOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGUNlcnRpZmljYXRpb24gQXV0aG9ya XRpZXMxEjAQBgNVBAsTCURDb21TdWJDQTEzMDEGA1UEAxMqQ0xTIDJtb250aCBEdWFsIFVzYWdlIE 5vIEtleSBCYWNrdXAgUG9saWN5MIGSMBAMCkVuY3J5cHRpb24CAgCPMH4xCzAJBgNVBAYTAlVTMRA wDgYDVQQKEwdFbnRydXN0MSIwIAYDVQQLExlDZXJ0aWZpY2F0aW9uIEF1dGhvcml0aWVzMRIwEAYD VQQLEwlEQ29tU3ViQ0ExJTAjBgNVBAMTHENMUyAybW9udGggRW5jcnlwdGlvbiBQb2xpY3kwgZcwE gwMVmVyaWZpY2F0aW9uAgIAjjCBgDELMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0VudHJ1c3QxIjAgBg NVBAsTGUNlcnRpZmljYXRpb24gQXV0aG9yaXRpZXMxEjAQBgNVBAsTCURDb21TdWJDQTEnMCUGA1U EAxMeQ0xTIDJtb250aCBWZXJpZmljYXRpb24gUG9saWN5MIIB5zAYDBNlbnRfbXNfc2NfY2FwaV9j bHM0AgFPMIIByTCBnjAQDApEdWFsIFVzYWdlAgIAhzCBiTELMAkGA1UEBhMCVVMxEDAOBgNVBAoTB 0VudHJ1c3QxIjAgBgNVBAsTGUNlcnRpZmljYXRpb24gQXV0aG9yaXRpZXMxEjAQBgNVBAsTCURDb2 1TdWJDQTEwMC4GA1UEAxMnQ0xTIDR5ciBEdWFsIFVzYWdlIE5vIEtleSBCYWNrdXAgUG9saWN5MIG PMBAMCkVuY3J5cHRpb24CAgCJMHsxCzAJBgNVBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN0MSIwIAYD VQQLExlDZXJ0aWZpY2F0aW9uIEF1dGhvcml0aWVzMRIwEAYDVQQLEwlEQ29tU3ViQ0ExIjAgBgNVB AMTGUNMUyA0eXIgRW5jcnlwdGlvbiBQb2xpY3kwgZMwEgwMVmVyaWZpY2F0aW9uAgIAiDB9MQswCQ YDVQQGEwJVUzEQMA4GA1UEChMHRW50cnVzdDEiMCAGA1UECxMZQ2VydGlmaWNhdGlvbiBBdXRob3J pdGllczESMBAGA1UECxMJRENvbVN1YkNBMSQwIgYDVQQDExtDTFMgNHlyIFZlcmlmaWNhdGlvbiBQ b2xpY3kwggHnMBgME2VudF9tc19zY19jYXBpX2NsczUCAVAwggHJMIGeMBAMCkR1YWwgVXNhZ2UCA gCKMIGJMQswCQYDVQQGEwJVUzEQMA4GA1UEChMHRW50cnVzdDEiMCAGA1UECxMZQ2VydGlmaWNhdG lvbiBBdXRob3JpdGllczESMBAGA1UECxMJRENvbVN1YkNBMTAwLgYDVQQDEydDTFMgNXlyIER1YWw gVXNhZ2UgTm8gS2V5IEJhY2t1cCBQb2xpY3kwgY8wEAwKRW5jcnlwdGlvbgICAIwwezELMAkGA1UE BhMCVVMxEDAOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGUNlcnRpZmljYXRpb24gQXV0aG9yaXRpZ XMxEjAQBgNVBAsTCURDb21TdWJDQTEiMCAGA1UEAxMZQ0xTIDV5ciBFbmNyeXB0aW9uIFBvbGljeT CBkzASDAxWZXJpZmljYXRpb24CAgCLMH0xCzAJBgNVBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN0MSI wIAYDVQQLExlDZXJ0aWZpY2F0aW9uIEF1dGhvcml0aWVzMRIwEAYDVQQLEwlEQ29tU3ViQ0ExJDAi BgNVBAMTG0NMUyA1eXIgVmVyaWZpY2F0aW9uIFBvbGljeTCCAfgwGAwTZW50X21zX3NjX2NsczRfM TAyNAIBXDCCAdowgaMwEAwKRHVhbCBVc2FnZQICAKUwgY4xCzAJBgNVBAYTAlVTMRAwDgYDVQQKEw dFbnRydXN0MSIwIAYDVQQLExlDZXJ0aWZpY2F0aW9uIEF1dGhvcml0aWVzMRIwEAYDVQQLEwlEQ29 tU3ViQ0ExNTAzBgNVBAMTLENMUyAxMDI0IDR5ciBEdWFsIFVzYWdlIE5vIEtleSBCYWNrdXAgUG9s aWN5MIGVMBAMCkVuY3J5cHRpb24CAgCnMIGAMQswCQYDVQQGEwJVUzEQMA4GA1UEChMHRW50cnVzd DEiMCAGA1UECxMZQ2VydGlmaWNhdGlvbiBBdXRob3JpdGllczESMBAGA1UECxMJRENvbVN1YkNBMS cwJQYDVQQDEx5DTFMgMTAyNCA0eXIgRW5jcnlwdGlvbiBQb2xpY3kwgZkwEgwMVmVyaWZpY2F0aW9 uAgIApjCBgjELMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGUNlcnRpZmlj YXRpb24gQXV0aG9yaXRpZXMxEjAQBgNVBAsTCURDb21TdWJDQTEpMCcGA1UEAxMgQ0xTIDEwMjQgN HlyIFZlcmlmaWNhdGlvbiBQb2xpY3kwggH4MBgME2VudF9tc19zY19jbHM0XzIwNDgCAVowggHaMI GjMBAMCkR1YWwgVXNhZ2UCAgCfMIGOMQswCQYDVQQGEwJVUzEQMA4GA1UEChMHRW50cnVzdDEiMCA GA1UECxMZQ2VydGlmaWNhdGlvbiBBdXRob3JpdGllczESMBAGA1UECxMJRENvbVN1YkNBMTUwMwYD VQQDEyxDTFMgMjA0OCA0eXIgRHVhbCBVc2FnZSBObyBLZXkgQmFja3VwIFBvbGljeTCBlTAQDApFb mNyeXB0aW9uAgIAoTCBgDELMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGU NlcnRpZmljYXRpb24gQXV0aG9yaXRpZXMxEjAQBgNVBAsTCURDb21TdWJDQTEnMCUGA1UEAxMeQ0x TIDIwNDggNHlyIEVuY3J5cHRpb24gUG9saWN5MIGZMBIMDFZlcmlmaWNhdGlvbgICAKAwgYIxCzAJ BgNVBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN0MSIwIAYDVQQLExlDZXJ0aWZpY2F0aW9uIEF1dGhvc ml0aWVzMRIwEAYDVQQLEwlEQ29tU3ViQ0ExKTAnBgNVBAMTIENMUyAyMDQ4IDR5ciBWZXJpZmljYX Rpb24gUG9saWN5MIIB+DAYDBNlbnRfbXNfc2NfY2xzNV8xMDI0AgFdMIIB2jCBozAQDApEdWFsIFV zYWdlAgIAqDCBjjELMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGUNlcnRp ZmljYXRpb24gQXV0aG9yaXRpZXMxEjAQBgNVBAsTCURDb21TdWJDQTE1MDMGA1UEAxMsQ0xTIDEwM jQgNXlyIER1YWwgVXNhZ2UgTm8gS2V5IEJhY2t1cCBQb2xpY3kwgZUwEAwKRW5jcnlwdGlvbgICAK owgYAxCzAJBgNVBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN0MSIwIAYDVQQLExlDZXJ0aWZpY2F0aW9 uIEF1dGhvcml0aWVzMRIwEAYDVQQLEwlEQ29tU3ViQ0ExJzAlBgNVBAMTHkNMUyAxMDI0IDV5ciBF bmNyeXB0aW9uIFBvbGljeTCBmTASDAxWZXJpZmljYXRpb24CAgCpMIGCMQswCQYDVQQGEwJVUzEQM A4GA1UEChMHRW50cnVzdDEiMCAGA1UECxMZQ2VydGlmaWNhdGlvbiBBdXRob3JpdGllczESMBAGA1 UECxMJRENvbVN1YkNBMSkwJwYDVQQDEyBDTFMgMTAyNCA1eXIgVmVyaWZpY2F0aW9uIFBvbGljeTC CAfgwGAwTZW50X21zX3NjX2NsczVfMjA0OAIBWzCCAdowgaMwEAwKRHVhbCBVc2FnZQICAKIwgY4x CzAJBgNVBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN0MSIwIAYDVQQLExlDZXJ0aWZpY2F0aW9uIEF1d Ghvcml0aWVzMRIwEAYDVQQLEwlEQ29tU3ViQ0ExNTAzBgNVBAMTLENMUyAyMDQ4IDV5ciBEdWFsIF VzYWdlIE5vIEtleSBCYWNrdXAgUG9saWN5MIGVMBAMCkVuY3J5cHRpb24CAgCkMIGAMQswCQYDVQQ GEwJVUzEQMA4GA1UEChMHRW50cnVzdDEiMCAGA1UECxMZQ2VydGlmaWNhdGlvbiBBdXRob3JpdGll czESMBAGA1UECxMJRENvbVN1YkNBMScwJQYDVQQDEx5DTFMgMjA0OCA1eXIgRW5jcnlwdGlvbiBQb 2xpY3kwgZkwEgwMVmVyaWZpY2F0aW9uAgIAozCBgjELMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0VudH J1c3QxIjAgBgNVBAsTGUNlcnRpZmljYXRpb24gQXV0aG9yaXRpZXMxEjAQBgNVBAsTCURDb21TdWJ DQTEpMCcGA1UEAxMgQ0xTIDIwNDggNXlyIFZlcmlmaWNhdGlvbiBQb2xpY3kwggHpMBcMEmVudF9t c19zY19jbHNfMjA0OAIBWDCCAcwwgZ8wEAwKRHVhbCBVc2FnZQICAJowgYoxCzAJBgNVBAYTAlVTM RAwDgYDVQQKEwdFbnRydXN0MSIwIAYDVQQLExlDZXJ0aWZpY2F0aW9uIEF1dGhvcml0aWVzMRIwEA YDVQQLEwlEQ29tU3ViQ0ExMTAvBgNVBAMTKENMUyAyMDQ4IER1YWwgVXNhZ2UgTm8gS2V5IEJhY2t 1cCBQb2xpY3kwgZAwEAwKRW5jcnlwdGlvbgICAJwwfDELMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0Vu dHJ1c3QxIjAgBgNVBAsTGUNlcnRpZmljYXRpb24gQXV0aG9yaXRpZXMxEjAQBgNVBAsTCURDb21Td WJDQTEjMCEGA1UEAxMaQ0xTIDIwNDggRW5jcnlwdGlvbiBQb2xpY3kwgZQwEgwMVmVyaWZpY2F0aW 9uAgIAmzB+MQswCQYDVQQGEwJVUzEQMA4GA1UEChMHRW50cnVzdDEiMCAGA1UECxMZQ2VydGlmaWN hdGlvbiBBdXRob3JpdGllczESMBAGA1UECxMJRENvbVN1YkNBMSUwIwYDVQQDExxDTFMgMjA0OCBW ZXJpZmljYXRpb24gUG9saWN5MIG1MBgME2VudF9tc19zbXJ0Y3JkX2NhcGkCAQ8wgZgwgZUwDwwKR HVhbCBVc2FnZQIBGTCBgTELMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGU NlcnRpZmljYXRpb24gQXV0aG9yaXRpZXMxEjAQBgNVBAsTCURDb21TdWJDQTEoMCYGA1UEAxMfRHV hbCBVc2FnZSBObyBLZXkgQmFja3VwIFBvbGljeTCCAakwEAwKZW50X21zY2FwaQICAIEwggGTMHkw CQwDRUZTAgIA4zBsMQswCQYDVQQGEwJVUzEQMA4GA1UEChMHRW50cnVzdDEiMCAGA1UECxMZQ2Vyd GlmaWNhdGlvbiBBdXRob3JpdGllczESMBAGA1UECxMJRENvbVN1YkNBMRMwEQYDVQQDEwpFRlMgUG 9saWN5MIGHMBAMCkVuY3J5cHRpb24CAgDhMHMxCzAJBgNVBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN 0MSIwIAYDVQQLExlDZXJ0aWZpY2F0aW9uIEF1dGhvcml0aWVzMRIwEAYDVQQLEwlEQ29tU3ViQ0Ex GjAYBgNVBAMTEUVuY3J5cHRpb24gUG9saWN5MIGLMBIMDFZlcmlmaWNhdGlvbgICAOIwdTELMAkGA 1UEBhMCVVMxEDAOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGUNlcnRpZmljYXRpb24gQXV0aG9yaX RpZXMxEjAQBgNVBAsTCURDb21TdWJDQTEcMBoGA1UEAxMTVmVyaWZpY2F0aW9uIFBvbGljeTBDMBc MEmVudF9tc2Z0X3NtYXJ0Y2FyZAIBDjAoMBEwDwwKRW5jcnlwdGlvbgIBFzATMBEMDFZlcmlmaWNh dGlvbgIBGDA/MBMMDmVudF9tc2dzY2FubmVyAgENMCgwETAPDApFbmNyeXB0aW9uAgEVMBMwEQwMV mVyaWZpY2F0aW9uAgEWMD4wEgwNZW50X21zZ3NlcnZlcgIBDDAoMBEwDwwKRW5jcnlwdGlvbgIBEz ATMBEMDFZlcmlmaWNhdGlvbgIBFDBAMBIMDGVudF9tc29hZG1pbgICAIkwKjASMBAMCkVuY3J5cHR pb24CAgDxMBQwEgwMVmVyaWZpY2F0aW9uAgIA8DCCATkwFQwQZW50X21zdHdva2V5cGFpcgIBWTCC AR4wgYowEAwKRW5jcnlwdGlvbgICAJ0wdjELMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0VudHJ1c3QxI jAgBgNVBAsTGUNlcnRpZmljYXRpb24gQXV0aG9yaXRpZXMxEjAQBgNVBAsTCURDb21TdWJDQTEdMB sGA1UEAxMUR00gRW5jcnlwdGlvbiBQb2xpY3kwgY4wEgwMVmVyaWZpY2F0aW9uAgIAnjB4MQswCQY DVQQGEwJVUzEQMA4GA1UEChMHRW50cnVzdDEiMCAGA1UECxMZQ2VydGlmaWNhdGlvbiBBdXRob3Jp dGllczESMBAGA1UECxMJRENvbVN1YkNBMR8wHQYDVQQDExZHTSBWZXJpZmljYXRpb24gUG9saWN5M IIBvjARDAxlbnRfbm9ucmVwdWQCARQwggGnMIGGMA8MCkVuY3J5cHRpb24CASIwczELMAkGA1UEBh MCVVMxEDAOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGUNlcnRpZmljYXRpb24gQXV0aG9yaXRpZXM xEjAQBgNVBAsTCURDb21TdWJDQTEaMBgGA1UEAxMRRW5jcnlwdGlvbiBQb2xpY3kwgY4wEwwOTm9u cmVwdWRpYXRpb24CASQwdzELMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTG UNlcnRpZmljYXRpb24gQXV0aG9yaXRpZXMxEjAQBgNVBAsTCURDb21TdWJDQTEeMBwGA1UEAxMVTm 9ucmVwdWRpYXRpb24gUG9saWN5MIGKMBEMDFZlcmlmaWNhdGlvbgIBIzB1MQswCQYDVQQGEwJVUzE QMA4GA1UEChMHRW50cnVzdDEiMCAGA1UECxMZQ2VydGlmaWNhdGlvbiBBdXRob3JpdGllczESMBAG A1UECxMJRENvbVN1YkNBMRwwGgYDVQQDExNWZXJpZmljYXRpb24gUG9saWN5MIICQDAZDBRlbnRfb m9ucmVwdWRfYW5kX2VmcwIBFzCCAiEweDAIDANFRlMCAS0wbDELMAkGA1UEBhMCVVMxEDAOBgNVBA oTB0VudHJ1c3QxIjAgBgNVBAsTGUNlcnRpZmljYXRpb24gQXV0aG9yaXRpZXMxEjAQBgNVBAsTCUR Db21TdWJDQTETMBEGA1UEAxMKRUZTIFBvbGljeTCBhjAPDApFbmNyeXB0aW9uAgEqMHMxCzAJBgNV BAYTAlVTMRAwDgYDVQQKEwdFbnRydXN0MSIwIAYDVQQLExlDZXJ0aWZpY2F0aW9uIEF1dGhvcml0a WVzMRIwEAYDVQQLEwlEQ29tU3ViQ0ExGjAYBgNVBAMTEUVuY3J5cHRpb24gUG9saWN5MIGOMBMMDk 5vbnJlcHVkaWF0aW9uAgEsMHcxCzAJBgNVBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN0MSIwIAYDVQQ LExlDZXJ0aWZpY2F0aW9uIEF1dGhvcml0aWVzMRIwEAYDVQQLEwlEQ29tU3ViQ0ExHjAcBgNVBAMT FU5vbnJlcHVkaWF0aW9uIFBvbGljeTCBijARDAxWZXJpZmljYXRpb24CASswdTELMAkGA1UEBhMCV VMxEDAOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGUNlcnRpZmljYXRpb24gQXV0aG9yaXRpZXMxEj AQBgNVBAsTCURDb21TdWJDQTEcMBoGA1UEAxMTVmVyaWZpY2F0aW9uIFBvbGljeTA5MA0MCGVudF9 vY3NwAgEpMCgwETAPDApFbmNyeXB0aW9uAgFHMBMwEQwMVmVyaWZpY2F0aW9uAgFIMD0wEQwMZW50 X3Byb2ZzcnZyAgEFMCgwETAPDApFbmNyeXB0aW9uAgEFMBMwEQwMVmVyaWZpY2F0aW9uAgEGMDgwD AwHZW50X3JkcAIBQDAoMBEwDwwKRW5jcnlwdGlvbgIBaTATMBEMDFZlcmlmaWNhdGlvbgIBajCBqj ASDA1lbnRfc2lnbl9uaXN0AgFyMIGTMIGQMBIMDFZlcmlmaWNhdGlvbgICAMowejELMAkGA1UEBhM CVVMxEDAOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGUNlcnRpZmljYXRpb24gQXV0aG9yaXRpZXMx EjAQBgNVBAsTCURDb21TdWJDQTEhMB8GA1UEAxMYTklTVCBWZXJpZmljYXRpb24gUG9saWN5MIGkM BYMEWVudF9za3BfZHVhbHVzYWdlAgEYMIGJMIGGMA8MCkR1YWwgVXNhZ2UCAS4wczELMAkGA1UEBh MCVVMxEDAOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGUNlcnRpZmljYXRpb24gQXV0aG9yaXRpZXM xEjAQBgNVBAsTCURDb21TdWJDQTEaMBgGA1UEAxMRRHVhbCBVc2FnZSBQb2xpY3kwLTATDA1lbnRf c2twbm9ucmVwAgIAgDAWMBQwEgwMVmVyaWZpY2F0aW9uAgIA4DAwMBgMEmVudF9za3Bub25yZXBfY XV0aAICAIYwFDASMBAMCkR1YWwgVXNhZ2UCAgDrMEEwEwwOZW50X3Nwb2NfYWRtaW4CAXwwKjASMB AMCkVuY3J5cHRpb24CAgDYMBQwEgwMVmVyaWZpY2F0aW9uAgIA2TBCMBQMD2VudF9zcG9jX2NsaWV udAIBejAqMBIwEAwKRW5jcnlwdGlvbgICANQwFDASDAxWZXJpZmljYXRpb24CAgDVMD4wEAwLZW50 X3Nwb2NfZHYCAX0wKjASMBAMCkVuY3J5cHRpb24CAgDaMBQwEgwMVmVyaWZpY2F0aW9uAgIA2zBCM BQMD2VudF9zcG9jX3NlcnZlcgIBezAqMBIwEAwKRW5jcnlwdGlvbgICANYwFDASDAxWZXJpZmljYX Rpb24CAgDXMIG+MBMMDWVudF9zc2xfYmFzaWMCAgCIMIGmMBIwEAwKRW5jcnlwdGlvbgICAO8wgY8 wEgwMVmVyaWZpY2F0aW9uAgIA7jB5MQswCQYDVQQGEwJVUzEQMA4GA1UEChMHRW50cnVzdDEiMCAG A1UECxMZQ2VydGlmaWNhdGlvbiBBdXRob3JpdGllczESMBAGA1UECxMJRENvbVN1YkNBMSAwHgYDV QQDFBdWZXJpZmljYXRpb25fcDEwIFBvbGljeTB7MBIMDGVudF9zc2xfY2VydAICAIcwUDAkMBAMCk VuY3J5cHRpb24CAgDsohAMCkVuY3J5cHRpb24CAgDvMCgwEgwMVmVyaWZpY2F0aW9uAgIA7aISDAx WZXJpZmljYXRpb24CAgDuohMMDWVudF9zc2xfYmFzaWMCAgCIMIIBKDAXDBJlbnRfc3RhbmRhbG9u ZV9lZnMCARYwggELMIGLMBAMC0NNUCBTaWduaW5nAgEpMHcxCzAJBgNVBAYTAlVTMRAwDgYDVQQKE wdFbnRydXN0MSIwIAYDVQQLExlDZXJ0aWZpY2F0aW9uIEF1dGhvcml0aWVzMRIwEAYDVQQLEwlEQ2 9tU3ViQ0ExHjAcBgNVBAMTFU1TIENNUCBTaWduaW5nIFBvbGljeTB7MAgMA0VGUwIBKDBvMQswCQY DVQQGEwJVUzEQMA4GA1UEChMHRW50cnVzdDEiMCAGA1UECxMZQ2VydGlmaWNhdGlvbiBBdXRob3Jp dGllczESMBAGA1UECxMJRENvbVN1YkNBMRYwFAYDVQQDEw1NUyBFRlMgUG9saWN5MD4wEgwNZW50X 3RpbWVzdGFtcAIBBDAoMBEwDwwKRW5jcnlwdGlvbgIBAzATMBEMDFZlcmlmaWNhdGlvbgIBBDBDMB UMEGVudF90aW1lc3RhbXBpbmcCAXcwKjASMBAMCkVuY3J5cHRpb24CAgDQMBQwEgwMVmVyaWZpY2F 0aW9uAgIA0TCBxzARDAxlbnRfdHJ1ZXBhc3MCAQgwgbEwETAPDApFbmNyeXB0aW9uAgELMIGbMBEM DFZlcmlmaWNhdGlvbgIBDDCBhTELMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0VudHJ1c3QxIjAgBgNVB AsTGUNlcnRpZmljYXRpb24gQXV0aG9yaXRpZXMxEjAQBgNVBAsTCURDb21TdWJDQTEsMCoGA1UEAx MjVHJ1ZVBhc3MgU2VydmVyIFZlcmlmaWNhdGlvbiBQb2xpY3kwgc0wFwwSZW50X3RydWVwYXNzX21 1bHRpAgEJMIGxMBEwDwwKRW5jcnlwdGlvbgIBDTCBmzARDAxWZXJpZmljYXRpb24CAQ4wgYUxCzAJ BgNVBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN0MSIwIAYDVQQLExlDZXJ0aWZpY2F0aW9uIEF1dGhvc ml0aWVzMRIwEAYDVQQLEwlEQ29tU3ViQ0ExLDAqBgNVBAMTI1RydWVQYXNzIFNlcnZlciBWZXJpZm ljYXRpb24gUG9saWN5MIIBLzATDA5lbnRfdHdva2V5cGFpcgIBEzCCARYwgYYwDwwKRW5jcnlwdGl vbgIBIDBzMQswCQYDVQQGEwJVUzEQMA4GA1UEChMHRW50cnVzdDEiMCAGA1UECxMZQ2VydGlmaWNh dGlvbiBBdXRob3JpdGllczESMBAGA1UECxMJRENvbVN1YkNBMRowGAYDVQQDExFFbmNyeXB0aW9uI FBvbGljeTCBijARDAxWZXJpZmljYXRpb24CASEwdTELMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0VudH J1c3QxIjAgBgNVBAsTGUNlcnRpZmljYXRpb24gQXV0aG9yaXRpZXMxEjAQBgNVBAsTCURDb21TdWJ DQTEcMBoGA1UEAxMTVmVyaWZpY2F0aW9uIFBvbGljeTCCAUMwFwwSZW50X3R3b2tleXBhaXJfcDEw AgEkMIIBJjCBjjATDA5FbmNyeXB0aW9uX3AxMAIBPTB3MQswCQYDVQQGEwJVUzEQMA4GA1UEChMHR W50cnVzdDEiMCAGA1UECxMZQ2VydGlmaWNhdGlvbiBBdXRob3JpdGllczESMBAGA1UECxMJRENvbV N1YkNBMR4wHAYDVQQDFBVFbmNyeXB0aW9uX3AxMCBQb2xpY3kwgZIwFQwQVmVyaWZpY2F0aW9uX3A xMAIBPjB5MQswCQYDVQQGEwJVUzEQMA4GA1UEChMHRW50cnVzdDEiMCAGA1UECxMZQ2VydGlmaWNh dGlvbiBBdXRob3JpdGllczESMBAGA1UECxMJRENvbVN1YkNBMSAwHgYDVQQDFBdWZXJpZmljYXRpb 25fcDEwIFBvbGljeTBBMBMMDWVudF91bXNfYWRtaW4CAgCKMCowEjAQDApFbmNyeXB0aW9uAgIA8j AUMBIMDFZlcmlmaWNhdGlvbgICAPMwOzAPDAplbnRfeGFwc3J2AgEQMCgwETAPDApFbmNyeXB0aW9 uAgEaMBMwEQwMVmVyaWZpY2F0aW9uAgEbMIGuMBUMEGVwYXNzX2RvY19zaWduZXICAWQwgZQwgZEw FQwPRG9jdW1lbnQgU2lnbmVyAgIAtzB4MQswCQYDVQQGEwJVUzEQMA4GA1UEChMHRW50cnVzdDEiM CAGA1UECxMZQ2VydGlmaWNhdGlvbiBBdXRob3JpdGllczESMBAGA1UECxMJRENvbVN1YkNBMR8wHQ YDVQQDExZEb2N1bWVudCBTaWduZXIgUG9saWN5MIGzMBoMFGVwYXNzX2RvY19zaWduZXJfZHRsAgI AhDCBlDCBkTAVDA9Eb2N1bWVudCBTaWduZXICAgDoMHgxCzAJBgNVBAYTAlVTMRAwDgYDVQQKEwdF bnRydXN0MSIwIAYDVQQLExlDZXJ0aWZpY2F0aW9uIEF1dGhvcml0aWVzMRIwEAYDVQQLEwlEQ29tU 3ViQ0ExHzAdBgNVBAMTFkRvY3VtZW50IFNpZ25lciBQb2xpY3kwgbYwFwwSZXBhc3NfbWxpc3Rfc2 lnbmVyAgFjMIGaMIGXMBgMEk1hc3RlciBMaXN0IFNpZ25lcgICALYwezELMAkGA1UEBhMCVVMxEDA OBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGUNlcnRpZmljYXRpb24gQXV0aG9yaXRpZXMxEjAQBgNV BAsTCURDb21TdWJDQTEiMCAGA1UEAxMZTWFzdGVyIExpc3QgU2lnbmVyIFBvbGljeTAqMBIMDW1vY mlsZV9kZXZpY2UCAXEwFDASMBAMCkR1YWwgVXNhZ2UCAgDJMIG4MBYMEW1vYmlsZV9kZXZpY2VfMW twAgF2MIGdMIGaMBIMDFZlcmlmaWNhdGlvbgICAM8wgYMxCzAJBgNVBAYTAlVTMRAwDgYDVQQKEwd FbnRydXN0MSIwIAYDVQQLExlDZXJ0aWZpY2F0aW9uIEF1dGhvcml0aWVzMRIwEAYDVQQLEwlEQ29t U3ViQ0ExKjAoBgNVBAMTIU1vYmlsZSBEZXZpY2UgVmVyaWZpY2F0aW9uIFBvbGljeTCCAVowEAwKb XNfdGhyZWV5cgICAIUwggFEMIGdMBAMCkVuY3J5cHRpb24CAgDpMIGIMQswCQYDVQQGEwJVUzEQMA 4GA1UEChMHRW50cnVzdDEiMCAGA1UECxMZQ2VydGlmaWNhdGlvbiBBdXRob3JpdGllczESMBAGA1U ECxMJRENvbVN1YkNBMS8wLQYDVQQDEyZNaWNyb1NvZnQgVGhyZWUgWWVhciBFbmNyeXB0aW9uIFBv bGljeTCBoTASDAxWZXJpZmljYXRpb24CAgDqMIGKMQswCQYDVQQGEwJVUzEQMA4GA1UEChMHRW50c nVzdDEiMCAGA1UECxMZQ2VydGlmaWNhdGlvbiBBdXRob3JpdGllczESMBAGA1UECxMJRENvbVN1Yk NBMTEwLwYDVQQDEyhNaWNyb1NvZnQgVGhyZWUgWWVhciBWZXJpZmljYXRpb24gUG9saWN5MD4wEgw NbXNfdnBuX3NlcnZlcgIBIDAoMBEwDwwKRW5jcnlwdGlvbgIBODATMBEMDFZlcmlmaWNhdGlvbgIB OTCBmDAPDApzc2xfZGV2aWNlAgFzMIGEMIGBMAkMA3NzbAICAMswdDELMAkGA1UEBhMCVVMxEDAOB gNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGUNlcnRpZmljYXRpb24gQXV0aG9yaXRpZXMxEjAQBgNVBA sTCURDb21TdWJDQTEbMBkGA1UEAxMSU1NMIEludGVyb3AgUG9saWN5MIIBQDAXDBJzc2xfZGV2aWN lX2ludGVyb3ACAXQwggEjMIGQMAoMBHNzbDECAgDMMIGBMQswCQYDVQQGEwJVUzEQMA4GA1UEChMH RW50cnVzdDEiMCAGA1UECxMZQ2VydGlmaWNhdGlvbiBBdXRob3JpdGllczESMBAGA1UECxMJRENvb VN1YkNBMSgwJgYDVQQDEx9TU0wgSW50ZXJvcCBWZXJpZmljYXRpb24gUG9saWN5MIGNMAoMBHNzbD ICAgDNMH8xCzAJBgNVBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN0MSIwIAYDVQQLExlDZXJ0aWZpY2F 0aW9uIEF1dGhvcml0aWVzMRIwEAYDVQQLEwlEQ29tU3ViQ0ExJjAkBgNVBAMTHVNTTCBJbnRlcm9w IEVuY3J5cHRpb24gUG9saWN5MEMwFwwSdnBuX2NsaWVudF9tYWNoaW5lAgEhMCgwETAPDApFbmNye XB0aW9uAgE6MBMwEQwMVmVyaWZpY2F0aW9uAgE7MIGiMBQMD3Zwbl9jbGllbnRfdXNlcgIBGTCBiT CBhjAPDApEdWFsIFVzYWdlAgEvMHMxCzAJBgNVBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN0MSIwIAY DVQQLExlDZXJ0aWZpY2F0aW9uIEF1dGhvcml0aWVzMRIwEAYDVQQLEwlEQ29tU3ViQ0ExGjAYBgNV BAMTEUR1YWwgVXNhZ2UgUG9saWN5MDgwDAwHdnBuX2RpcgIBCjAoMBEwDwwKRW5jcnlwdGlvbgIBD zATMBEMDFZlcmlmaWNhdGlvbgIBEDA6MA4MCXZwbl9ub2RpcgIBCzAoMBEwDwwKRW5jcnlwdGlvbg IBETATMBEMDFZlcmlmaWNhdGlvbgIBEjA6MA4MCXdlYl9hZF9kYwIBHzAoMBEwDwwKRW5jcnlwdGl vbgIBNjATMBEMDFZlcmlmaWNhdGlvbgIBNzA/MBMMDndlYl9hZF9kY19jbHMxAgFDMCgwETAPDApF bmNyeXB0aW9uAgFvMBMwEQwMVmVyaWZpY2F0aW9uAgFwMD8wEwwOd2ViX2FkX2RjX2NsczICAUQwK DARMA8MCkVuY3J5cHRpb24CAXEwEzARDAxWZXJpZmljYXRpb24CAXIwggE+MBAMC3dlYl9hZF9zdn IyAgFhMIIBKDCBjzAQDApFbmNyeXB0aW9uAgIAsTB7MQswCQYDVQQGEwJVUzEQMA4GA1UEChMHRW5 0cnVzdDEiMCAGA1UECxMZQ2VydGlmaWNhdGlvbiBBdXRob3JpdGllczESMBAGA1UECxMJRENvbVN1 YkNBMSIwIAYDVQQDExlDTFMgMnlyIEVuY3J5cHRpb24gUG9saWN5MIGTMBIMDFZlcmlmaWNhdGlvb gICALIwfTELMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGUNlcnRpZmljYX Rpb24gQXV0aG9yaXRpZXMxEjAQBgNVBAsTCURDb21TdWJDQTEkMCIGA1UEAxMbQ0xTIDJ5ciBWZXJ pZmljYXRpb24gUG9saWN5MIIBLjAQDAt3ZWJfYWRfc3ZyMwIBYjCCARgwgYcwEAwKRW5jcnlwdGlv bgICALMwczELMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGUNlcnRpZmljY XRpb24gQXV0aG9yaXRpZXMxEjAQBgNVBAsTCURDb21TdWJDQTEaMBgGA1UEAxMRRW5jcnlwdGlvbi BQb2xpY3kwgYswEgwMVmVyaWZpY2F0aW9uAgIAtDB1MQswCQYDVQQGEwJVUzEQMA4GA1UEChMHRW5 0cnVzdDEiMCAGA1UECxMZQ2VydGlmaWNhdGlvbiBBdXRob3JpdGllczESMBAGA1UECxMJRENvbVN1 YkNBMRwwGgYDVQQDExNWZXJpZmljYXRpb24gUG9saWN5MD8wEwwOd2ViX2FpX2Ntc19jbGkCAT4wK DARMA8MCkVuY3J5cHRpb24CAWUwEzARDAxWZXJpZmljYXRpb24CAWYwPjASDA13ZWJfYWlfY21zX2 RzAgE/MCgwETAPDApFbmNyeXB0aW9uAgFnMBMwEQwMVmVyaWZpY2F0aW9uAgFoMD8wEwwOd2ViX2F pX2Ntc19zdnICAT0wKDARMA8MCkVuY3J5cHRpb24CAWMwEzARDAxWZXJpZmljYXRpb24CAWQwPDAO DAl3ZWJfYmFzaWMCAWswKjASMBAMCkVuY3J5cHRpb24CAgC+MBQwEgwMVmVyaWZpY2F0aW9uAgIAv zBCMBQMDndlYl9jbGlzdnJfZXhwAgIAgjAqMBIwEAwKRW5jcnlwdGlvbgICAOQwFDASDAxWZXJpZm ljYXRpb24CAgDlMDkwDQwId2ViX2NsczECAUUwKDARMA8MCkVuY3J5cHRpb24CAXMwEzARDAxWZXJ pZmljYXRpb24CAXQwOTANDAh3ZWJfY2xzMgIBRjAoMBEwDwwKRW5jcnlwdGlvbgIBdTATMBEMDFZl cmlmaWNhdGlvbgIBdjA+MBIMDXdlYl9jbXNjbGllbnQCAUEwKDARMA8MCkVuY3J5cHRpb24CAWswE zARDAxWZXJpZmljYXRpb24CAWwwRDAXDBJ3ZWJfY21zY2xpZW50X2NsczECAUswKTARMA8MCkVuY3 J5cHRpb24CAX8wFDASDAxWZXJpZmljYXRpb24CAgCAMEUwFwwSd2ViX2Ntc2NsaWVudF9jbHMyAgF MMCowEjAQDApFbmNyeXB0aW9uAgIAgTAUMBIMDFZlcmlmaWNhdGlvbgICAIIwPjASDA13ZWJfY21z c2VydmVyAgFCMCgwETAPDApFbmNyeXB0aW9uAgFtMBMwEQwMVmVyaWZpY2F0aW9uAgFuMEUwFwwSd 2ViX2Ntc3NlcnZlcl9jbHMxAgFNMCowEjAQDApFbmNyeXB0aW9uAgIAgzAUMBIMDFZlcmlmaWNhdG lvbgICAIQwRTAXDBJ3ZWJfY21zc2VydmVyX2NsczICAU4wKjASMBAMCkVuY3J5cHRpb24CAgCFMBQ wEgwMVmVyaWZpY2F0aW9uAgIAhjA9MBEMDHdlYl9jb2Rlc2lnbgIBHjAoMBEwDwwKRW5jcnlwdGlv bgIBNDATMBEMDFZlcmlmaWNhdGlvbgIBNTBCMBYMEXdlYl9jb2Rlc2lnbl9jbHMxAgFJMCgwETAPD ApFbmNyeXB0aW9uAgF7MBMwEQwMVmVyaWZpY2F0aW9uAgF8MEIwFgwRd2ViX2NvZGVzaWduX2Nscz ICAUowKDARMA8MCkVuY3J5cHRpb24CAX0wEzARDAxWZXJpZmljYXRpb24CAX4wggEsMBAMC3dlYl9 kZWZhdWx0AgEcMIIBFjCBhjAPDApFbmNyeXB0aW9uAgEwMHMxCzAJBgNVBAYTAlVTMRAwDgYDVQQK EwdFbnRydXN0MSIwIAYDVQQLExlDZXJ0aWZpY2F0aW9uIEF1dGhvcml0aWVzMRIwEAYDVQQLEwlEQ 29tU3ViQ0ExGjAYBgNVBAMTEUVuY3J5cHRpb24gUG9saWN5MIGKMBEMDFZlcmlmaWNhdGlvbgIBMT B1MQswCQYDVQQGEwJVUzEQMA4GA1UEChMHRW50cnVzdDEiMCAGA1UECxMZQ2VydGlmaWNhdGlvbiB BdXRob3JpdGllczESMBAGA1UECxMJRENvbVN1YkNBMRwwGgYDVQQDExNWZXJpZmljYXRpb24gUG9s aWN5MCwwEwwOd2ViX29uZWtleXBhaXICATwwFTATMBEMDFZlcmlmaWNhdGlvbgIBYjA7MA8MCndlY l9zZXJ2ZXICAR0wKDARMA8MCkVuY3J5cHRpb24CATIwEzARDAxWZXJpZmljYXRpb24CATMwKjAQDA t3ZWJfc2VydmVyMgIBdTAWMBQwEgwMVmVyaWZpY2F0aW9uAgIAzjBEMBYMEHdlYl9zZXJ2ZXJfYmF zaWMCAgCDMCowEjAQDApFbmNyeXB0aW9uAgIA5jAUMBIMDFZlcmlmaWNhdGlvbgICAOcwQDAUDA93 ZWJfc2VydmVyX2NsczECAUcwKDARMA8MCkVuY3J5cHRpb24CAXcwEzARDAxWZXJpZmljYXRpb24CA XgwggFAMBQMD3dlYl9zZXJ2ZXJfY2xzMgIBSDCCASYwgY4wDwwKRW5jcnlwdGlvbgIBeTB7MQswCQ YDVQQGEwJVUzEQMA4GA1UEChMHRW50cnVzdDEiMCAGA1UECxMZQ2VydGlmaWNhdGlvbiBBdXRob3J pdGllczESMBAGA1UECxMJRENvbVN1YkNBMSIwIAYDVQQDExlDTFMgMnlyIEVuY3J5cHRpb24gUG9s aWN5MIGSMBEMDFZlcmlmaWNhdGlvbgIBejB9MQswCQYDVQQGEwJVUzEQMA4GA1UEChMHRW50cnVzd DEiMCAGA1UECxMZQ2VydGlmaWNhdGlvbiBBdXRob3JpdGllczESMBAGA1UECxMJRENvbVN1YkNBMS QwIgYDVQQDExtDTFMgMnlyIFZlcmlmaWNhdGlvbiBQb2xpY3kwggEyMBQMD3dlYl9zZXJ2ZXJfY2x zMwIBYDCCARgwgYcwEAwKRW5jcnlwdGlvbgICAK8wczELMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0Vu dHJ1c3QxIjAgBgNVBAsTGUNlcnRpZmljYXRpb24gQXV0aG9yaXRpZXMxEjAQBgNVBAsTCURDb21Td WJDQTEaMBgGA1UEAxMRRW5jcnlwdGlvbiBQb2xpY3kwgYswEgwMVmVyaWZpY2F0aW9uAgIAsDB1MQ swCQYDVQQGEwJVUzEQMA4GA1UEChMHRW50cnVzdDEiMCAGA1UECxMZQ2VydGlmaWNhdGlvbiBBdXR ob3JpdGllczESMBAGA1UECxMJRENvbVN1YkNBMRwwGgYDVQQDExNWZXJpZmljYXRpb24gUG9saWN5 MIIBQjAUDA93ZWJfc2VydmVyX2NsczQCAV8wggEoMIGPMBAMCkVuY3J5cHRpb24CAgCtMHsxCzAJB gNVBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN0MSIwIAYDVQQLExlDZXJ0aWZpY2F0aW9uIEF1dGhvcm l0aWVzMRIwEAYDVQQLEwlEQ29tU3ViQ0ExIjAgBgNVBAMTGUNMUyA0eXIgRW5jcnlwdGlvbiBQb2x pY3kwgZMwEgwMVmVyaWZpY2F0aW9uAgIArjB9MQswCQYDVQQGEwJVUzEQMA4GA1UEChMHRW50cnVz dDEiMCAGA1UECxMZQ2VydGlmaWNhdGlvbiBBdXRob3JpdGllczESMBAGA1UECxMJRENvbVN1YkNBM SQwIgYDVQQDExtDTFMgNHlyIFZlcmlmaWNhdGlvbiBQb2xpY3kwRjAYDBN3ZWJfc2VydmVyX2V4cG VyaWFuAgFuMCowEjAQDApFbmNyeXB0aW9uAgIAwzAUMBIMDFZlcmlmaWNhdGlvbgICAMQwQDAUDA9 3ZWJfc2VydmVyX2hpZ2gCATswKDARMA8MCkVuY3J5cHRpb24CAWAwEzARDAxWZXJpZmljYXRpb24C AWEwGwYJKoZIhvZ9B000MQ4wDAYKKoZIhvZ9B001ATAhMB8GA1UdIwQYMBaAFDy++9gIa1JL8T+Oh 9HW5F160lV9MA0GCSqGSIb3DQEBBQUAA4IBAQBelvaP82tFhjcHOTSDP97QLcqo2yE9RjjLtC/In8 u/Zi/8y6jR9GRE11U6GbF+5+EJ5pckTMJ8Oorn3ZVOl4dKyzTN9m2rLjdUXNWd/th8Ja1RD/9hpMD o5HUUYJEoOQxufTZnWfEZ2AISB7rXLCFZpdHGvc3H2ORtkhV+SuTmLpNkN1Zsbv8TXNi4szuX5sbA y/mX7G8q0Twbb+GGpZjlKV226xc2Ejy3uYGrUK0kEr6u/ONTK1844vsuZPkcJOMcj7/c4o8oKKVMT Fyafl1swsxHWn6MTh6WqI5k2LBcyEZSptDcG1brE7BU1JAOE9F7nkaoOOWefJs3n7B8piLg crossCertificatePair;binary:: MIIGUqCCBk4wggZKMIIFMqADAgECAgRIwMPgMA0GCSqGSIb3 DQEBBQUAMFgxCzAJBgNVBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN0MSIwIAYDVQQLExlDZXJ0aWZpY 2F0aW9uIEF1dGhvcml0aWVzMRMwEQYDVQQLEwpEQ29tUm9vdENBMB4XDTEwMDQyMDE0NDQwNloXDT MwMDMyMDE1MTQwNlowVzELMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGUN lcnRpZmljYXRpb24gQXV0aG9yaXRpZXMxEjAQBgNVBAsTCURDb21TdWJDQTCCASIwDQYJKoZIhvcN AQEBBQADggEPADCCAQoCggEBAOMj486WAJ+GC3aOTn7g1p3+tzHJ8YUAoLW0y4WC6eleA+Yq9M+FP Xlo+E6AMak4+HENfQMBa5bUgqJMGL20ZOktm0jpMtGtbS/J6Y9TrujpysVnO4SZwuWJOlwV+DLfgH JYFcE/oeVej/TcoQw+zV0RkeDVA4npgOw5FWKzPlnKANF8KN598KK92jx+p60egFYyIY04MknO/cH APZXT7tVIp1ljyHyNwMPWiwYdyVdR7IkrFQrb55lHEj4/KdHoLISe4/sQB1Yw6S9fz+A7HhF3BBkb tNJk+jfjDL2/hNq0VP9b9zURJKSGEUTBaoAbvcWw7p7v2t7VOTB5Wb496SECAwEAAaOCAxswggMXM A4GA1UdDwEB/wQEAwIBBjA8BgNVHSAENTAzMA8GDWCGSAGG+muBSAMKAgEwDwYNYIZIAYb6a4FIAw oCAjAPBg1ghkgBhvprgUgDCgIDMA8GA1UdEwEB/wQFMAMBAf8wggEBBggrBgEFBQcBAQSB9DCB8TC BnQYIKwYBBQUHMAKGgZBsZGFwOi8vZGNvbWRpcjEubWFuYWdlZC5lbnRydXN0LmNvbS9vdT1EQ29t Um9vdENBLG91PUNlcnRpZmljYXRpb24lMjBBdXRob3JpdGllcyxvPUVudHJ1c3QsYz1VUz9jQUNlc nRpZmljYXRlO2JpbmFyeSxjcm9zc0NlcnRpZmljYXRlUGFpcjtiaW5hcnkwTwYIKwYBBQUHMAKGQ2 h0dHA6Ly9kY29td2ViMS5tYW5hZ2VkLmVudHJ1c3QuY29tL0FJQS9DZXJ0c0lzc3VlZFRvRENvbVJ vb3RDQS5wN2MwggFUBgNVHR8EggFLMIIBRzCB06CB0KCBzYY4aHR0cDovL2Rjb213ZWIxLm1hbmFn ZWQuZW50cnVzdC5jb20vQ1JMcy9EQ29tUm9vdENBMS5jcmyGgZBsZGFwOi8vZGNvbWRpcjEubWFuY WdlZC5lbnRydXN0LmNvbS9jbj1XaW5Db21iaW5lZDEsb3U9RENvbVJvb3RDQSxvdT1DZXJ0aWZpY2 F0aW9uJTIwQXV0aG9yaXRpZXMsbz1FbnRydXN0LGM9VVM/Y2VydGlmaWNhdGVSZXZvY2F0aW9uTGl zdDtiaW5hcnkwb6BtoGukaTBnMQswCQYDVQQGEwJVUzEQMA4GA1UEChMHRW50cnVzdDEiMCAGA1UE CxMZQ2VydGlmaWNhdGlvbiBBdXRob3JpdGllczETMBEGA1UECxMKRENvbVJvb3RDQTENMAsGA1UEA xMEQ1JMMTAfBgNVHSMEGDAWgBRFx/xyHQhRD4vvL4V0iTRGDDP/JTAdBgNVHQ4EFgQUPL772AhrUk vxP46H0dbkXXrSVX0wGQYJKoZIhvZ9B0EABAwwChsEVjcuMQMCAIEwDQYJKoZIhvcNAQEFBQADggE BAJQrdloQCgTe0ahJyTU/fsKLzYXVGJOwnrwyof/+7emUfZS/OhKYuCfQ9w/wWLhT5SUzm9GDlUfk YUfpL+/5joymDJO8/thcEq/k2PJepSFf7IMY8635kNz27kI9fA8JQGn7nEI8WBjX26qs7Ho7QKVkv 6YEDuGeJwBLTGyNerDEf5n+DdMvrDmVAOs62T8uTZDb9gn/uIEGv3vaR+rs3KxvDhEr/2OFJtDWHw PdHFOrr1pNkNWqdStwoE2/fxUfccQhLn+H5GgKLD7YT74uUCi+VFP1juV3F7jUlytgtMnnbqRIbDn 4bMPn2HOmxdQ20amsdKX4bfosqFMepfSxWRQ= crossCertificatePair;binary:: MIIGQaCCBj0wggY5MIIFIaADAgECAgRIwJY0MA0GCSqGSIb3 DQEBBQUAMFgxCzAJBgNVBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN0MSIwIAYDVQQLExlDZXJ0aWZpY 2F0aW9uIEF1dGhvcml0aWVzMRMwEQYDVQQLEwpEQ29tUm9vdENBMB4XDTA4MDkwNTE4MDQxMVoXDT E4MDkwNTAyMTMzN1owVzELMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGUN lcnRpZmljYXRpb24gQXV0aG9yaXRpZXMxEjAQBgNVBAsTCURDb21TdWJDQTCCASIwDQYJKoZIhvcN AQEBBQADggEPADCCAQoCggEBAL+MSY0GXRSMIIm5l+bMpXvk8rlG/Rjqaw0TNZ2w+KsG6ktNWXDll A1i1l0Fvx2qj4O/z5bNfgmUmJZFamyWOS6TkwX2C+2DspI7P3a+gVTVu+7VJkevo3Hye2Pd6bAf/+ bfV2IhSyAOe0wW0sANyQrIjzsU1r6YBjpcT1E5QZdnzSrEYRoBhJGXf8/v+Zu21AqOZ9EpagpvmsZ 4UI8ORFg2PV0UOmnwNkMVO21JH1sUGYfKP9JAoO8vTzgwYbDN1w5DMC7SqWBl00OF6pGGaglJ5D16 OcopR8aZVePxj+dW+MADgEufai5CqhUKZ6CA1pa+P6c1lPcFEGgz9AQS420CAwEAAaOCAwowggMGM A4GA1UdDwEB/wQEAwIBBjA8BgNVHSAENTAzMA8GDWCGSAGG+muBSAMKAgEwDwYNYIZIAYb6a4FIAw oCAjAPBg1ghkgBhvprgUgDCgIDMA8GA1UdEwEB/wQFMAMBAf8wggEBBggrBgEFBQcBAQSB9DCB8TC BnQYIKwYBBQUHMAKGgZBsZGFwOi8vZGNvbWRpcjEubWFuYWdlZC5lbnRydXN0LmNvbS9vdT1EQ29t Um9vdENBLG91PUNlcnRpZmljYXRpb24lMjBBdXRob3JpdGllcyxvPUVudHJ1c3QsYz1VUz9jQUNlc nRpZmljYXRlO2JpbmFyeSxjcm9zc0NlcnRpZmljYXRlUGFpcjtiaW5hcnkwTwYIKwYBBQUHMAKGQ2 h0dHA6Ly9kY29td2ViMS5tYW5hZ2VkLmVudHJ1c3QuY29tL0FJQS9DZXJ0c0lzc3VlZFRvRENvbVJ vb3RDQS5wN2MwggFDBgNVHR8EggE6MIIBNjCBwqCBv6CBvIaBgGxkYXA6Ly9kY29tZGlyMS5tYW5h Z2VkLmVudHJ1c3QuY29tL291PURDb21Sb290Q0Esb3U9Q2VydGlmaWNhdGlvbiUyMEF1dGhvcml0a WVzLG89RW50cnVzdCxjPVVTP2NlcnRpZmljYXRlUmV2b2NhdGlvbkxpc3Q7YmluYXJ5hjdodHRwOi 8vZGNvbXdlYjEubWFuYWdlZC5lbnRydXN0LmNvbS9DUkxzL0RDb21Sb290Q0EuY3JsMG+gbaBrpGk wZzELMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGUNlcnRpZmljYXRpb24g QXV0aG9yaXRpZXMxEzARBgNVBAsTCkRDb21Sb290Q0ExDTALBgNVBAMTBENSTDEwHwYDVR0jBBgwF oAUh1mBY1JFXsCw39HI6bl1OBAu3tkwHQYDVR0OBBYEFPQWLgG7q5AZpChCDZ3AH5yvEIYrMBkGCS qGSIb2fQdBAAQMMAobBFY3LjEDAgCBMA0GCSqGSIb3DQEBBQUAA4IBAQCrafi2DFqdhpXtzeJpUgZ glNOwZUBOp5thJUH7+yMcgl5Ka4JIqqNpw3ZbFPFT9Ni4IzDmJYyPgqHmgRubxFWpAHdP8SjEK7pl 6DwDmbCAWBiq7SmSfqt502FUUyiTcZsCLi6GqE4fetej41t3NaGidqyVQXPJ26Ti2jNT4NzRnADi6 vOzMzxMSkWH1OaHoGLtTVpIjkbJZygnSmof4+gs4M1fmH4FVTcWV6t8zbTwkH4RTYSHVX04aM4ZBp nhMq6sk9uNL+qndpWkO7u7zr6K527kl6/t1Xr9/vnzD0ACVk/gluI7MvCUIzP55o01Rp90ZCMIMak u0qrESgh0GXln cACertificate;binary:: MIIGSjCCBTKgAwIBAgIESMDD4DANBgkqhkiG9w0BAQUFADBYMQswCQY DVQQGEwJVUzEQMA4GA1UEChMHRW50cnVzdDEiMCAGA1UECxMZQ2VydGlmaWNhdGlvbiBBdXRob3Jp dGllczETMBEGA1UECxMKRENvbVJvb3RDQTAeFw0xMDA0MjAxNDQ0MDZaFw0zMDAzMjAxNTE0MDZaM FcxCzAJBgNVBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN0MSIwIAYDVQQLExlDZXJ0aWZpY2F0aW9uIE F1dGhvcml0aWVzMRIwEAYDVQQLEwlEQ29tU3ViQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggE KAoIBAQDjI+POlgCfhgt2jk5+4Nad/rcxyfGFAKC1tMuFgunpXgPmKvTPhT15aPhOgDGpOPhxDX0D AWuW1IKiTBi9tGTpLZtI6TLRrW0vyemPU67o6crFZzuEmcLliTpcFfgy34ByWBXBP6HlXo/03KEMP s1dEZHg1QOJ6YDsORVisz5ZygDRfCjeffCivdo8fqetHoBWMiGNODJJzv3BwD2V0+7VSKdZY8h8jc DD1osGHclXUeyJKxUK2+eZRxI+PynR6CyEnuP7EAdWMOkvX8/gOx4RdwQZG7TSZPo34wy9v4TatFT /W/c1ESSkhhFEwWqAG73FsO6e79re1TkweVm+PekhAgMBAAGjggMbMIIDFzAOBgNVHQ8BAf8EBAMC AQYwPAYDVR0gBDUwMzAPBg1ghkgBhvprgUgDCgIBMA8GDWCGSAGG+muBSAMKAgIwDwYNYIZIAYb6a 4FIAwoCAzAPBgNVHRMBAf8EBTADAQH/MIIBAQYIKwYBBQUHAQEEgfQwgfEwgZ0GCCsGAQUFBzACho GQbGRhcDovL2Rjb21kaXIxLm1hbmFnZWQuZW50cnVzdC5jb20vb3U9RENvbVJvb3RDQSxvdT1DZXJ 0aWZpY2F0aW9uJTIwQXV0aG9yaXRpZXMsbz1FbnRydXN0LGM9VVM/Y0FDZXJ0aWZpY2F0ZTtiaW5h cnksY3Jvc3NDZXJ0aWZpY2F0ZVBhaXI7YmluYXJ5ME8GCCsGAQUFBzAChkNodHRwOi8vZGNvbXdlY jEubWFuYWdlZC5lbnRydXN0LmNvbS9BSUEvQ2VydHNJc3N1ZWRUb0RDb21Sb290Q0EucDdjMIIBVA YDVR0fBIIBSzCCAUcwgdOggdCggc2GOGh0dHA6Ly9kY29td2ViMS5tYW5hZ2VkLmVudHJ1c3QuY29 tL0NSTHMvRENvbVJvb3RDQTEuY3JshoGQbGRhcDovL2Rjb21kaXIxLm1hbmFnZWQuZW50cnVzdC5j b20vY249V2luQ29tYmluZWQxLG91PURDb21Sb290Q0Esb3U9Q2VydGlmaWNhdGlvbiUyMEF1dGhvc ml0aWVzLG89RW50cnVzdCxjPVVTP2NlcnRpZmljYXRlUmV2b2NhdGlvbkxpc3Q7YmluYXJ5MG+gba BrpGkwZzELMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGUNlcnRpZmljYXR pb24gQXV0aG9yaXRpZXMxEzARBgNVBAsTCkRDb21Sb290Q0ExDTALBgNVBAMTBENSTDEwHwYDVR0j BBgwFoAURcf8ch0IUQ+L7y+FdIk0Rgwz/yUwHQYDVR0OBBYEFDy++9gIa1JL8T+Oh9HW5F160lV9M BkGCSqGSIb2fQdBAAQMMAobBFY3LjEDAgCBMA0GCSqGSIb3DQEBBQUAA4IBAQCUK3ZaEAoE3tGoSc k1P37Ci82F1RiTsJ68MqH//u3plH2UvzoSmLgn0PcP8Fi4U+UlM5vRg5VH5GFH6S/v+Y6MpgyTvP7 YXBKv5NjyXqUhX+yDGPOt+ZDc9u5CPXwPCUBp+5xCPFgY19uqrOx6O0ClZL+mBA7hnicAS0xsjXqw xH+Z/g3TL6w5lQDrOtk/Lk2Q2/YJ/7iBBr972kfq7Nysbw4RK/9jhSbQ1h8D3RxTq69aTZDVqnUrc KBNv38VH3HEIS5/h+RoCiw+2E++LlAovlRT9Y7ldxe41JcrYLTJ526kSGw5+GzD59hzpsXUNtGprH Sl+G36LKhTHqX0sVkU cACertificate;binary:: MIIGOTCCBSGgAwIBAgIESMCWNDANBgkqhkiG9w0BAQUFADBYMQswCQY DVQQGEwJVUzEQMA4GA1UEChMHRW50cnVzdDEiMCAGA1UECxMZQ2VydGlmaWNhdGlvbiBBdXRob3Jp dGllczETMBEGA1UECxMKRENvbVJvb3RDQTAeFw0wODA5MDUxODA0MTFaFw0xODA5MDUwMjEzMzdaM FcxCzAJBgNVBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN0MSIwIAYDVQQLExlDZXJ0aWZpY2F0aW9uIE F1dGhvcml0aWVzMRIwEAYDVQQLEwlEQ29tU3ViQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggE KAoIBAQC/jEmNBl0UjCCJuZfmzKV75PK5Rv0Y6msNEzWdsPirBupLTVlw5ZQNYtZdBb8dqo+Dv8+W zX4JlJiWRWpsljkuk5MF9gvtg7KSOz92voFU1bvu1SZHr6Nx8ntj3emwH//m31diIUsgDntMFtLAD ckKyI87FNa+mAY6XE9ROUGXZ80qxGEaAYSRl3/P7/mbttQKjmfRKWoKb5rGeFCPDkRYNj1dFDpp8D ZDFTttSR9bFBmHyj/SQKDvL084MGGwzdcOQzAu0qlgZdNDheqRhmoJSeQ9ejnKKUfGmVXj8Y/nVvj AA4BLn2ouQqoVCmeggNaWvj+nNZT3BRBoM/QEEuNtAgMBAAGjggMKMIIDBjAOBgNVHQ8BAf8EBAMC AQYwPAYDVR0gBDUwMzAPBg1ghkgBhvprgUgDCgIBMA8GDWCGSAGG+muBSAMKAgIwDwYNYIZIAYb6a 4FIAwoCAzAPBgNVHRMBAf8EBTADAQH/MIIBAQYIKwYBBQUHAQEEgfQwgfEwgZ0GCCsGAQUFBzACho GQbGRhcDovL2Rjb21kaXIxLm1hbmFnZWQuZW50cnVzdC5jb20vb3U9RENvbVJvb3RDQSxvdT1DZXJ 0aWZpY2F0aW9uJTIwQXV0aG9yaXRpZXMsbz1FbnRydXN0LGM9VVM/Y0FDZXJ0aWZpY2F0ZTtiaW5h cnksY3Jvc3NDZXJ0aWZpY2F0ZVBhaXI7YmluYXJ5ME8GCCsGAQUFBzAChkNodHRwOi8vZGNvbXdlY jEubWFuYWdlZC5lbnRydXN0LmNvbS9BSUEvQ2VydHNJc3N1ZWRUb0RDb21Sb290Q0EucDdjMIIBQw YDVR0fBIIBOjCCATYwgcKggb+ggbyGgYBsZGFwOi8vZGNvbWRpcjEubWFuYWdlZC5lbnRydXN0LmN vbS9vdT1EQ29tUm9vdENBLG91PUNlcnRpZmljYXRpb24lMjBBdXRob3JpdGllcyxvPUVudHJ1c3Qs Yz1VUz9jZXJ0aWZpY2F0ZVJldm9jYXRpb25MaXN0O2JpbmFyeYY3aHR0cDovL2Rjb213ZWIxLm1hb mFnZWQuZW50cnVzdC5jb20vQ1JMcy9EQ29tUm9vdENBLmNybDBvoG2ga6RpMGcxCzAJBgNVBAYTAl VTMRAwDgYDVQQKEwdFbnRydXN0MSIwIAYDVQQLExlDZXJ0aWZpY2F0aW9uIEF1dGhvcml0aWVzMRM wEQYDVQQLEwpEQ29tUm9vdENBMQ0wCwYDVQQDEwRDUkwxMB8GA1UdIwQYMBaAFIdZgWNSRV7AsN/R yOm5dTgQLt7ZMB0GA1UdDgQWBBT0Fi4Bu6uQGaQoQg2dwB+crxCGKzAZBgkqhkiG9n0HQQAEDDAKG wRWNy4xAwIAgTANBgkqhkiG9w0BAQUFAAOCAQEAq2n4tgxanYaV7c3iaVIGYJTTsGVATqebYSVB+/ sjHIJeSmuCSKqjacN2WxTxU/TYuCMw5iWMj4Kh5oEbm8RVqQB3T/EoxCu6Zeg8A5mwgFgYqu0pkn6 redNhVFMok3GbAi4uhqhOH3rXo+NbdzWhonaslUFzyduk4tozU+Dc0ZwA4urzszM8TEpFh9Tmh6Bi 7U1aSI5GyWcoJ0pqH+PoLODNX5h+BVU3FlerfM208JB+EU2Eh1V9OGjOGQaZ4TKurJPbjS/qp3aVp Du7u86+iudu5Jev7dV6/f758w9AAlZP4JbiOzLwlCMz+eaNNUafdGQjCDGpLtKqxEoIdBl5Zw== objectClass: organizationalUnit objectClass: top objectClass: extensibleobject ou: binary nsUniqueId: f49ca103-c2ee11e7-9170b029-e68fda34 creatorsName: modifiersName: createTimestamp: 20171106123544Z modifyTimestamp: 20171106123544Z # entry-id: 3 dn: cn=test,ou=binary,dc=example,dc=com userCertificate:: MIIGfzCCBWcCAQEwgYOhgYAwfqR8MHoxCzAJBgNVBAYTAlVTMRAwDgYDVQQK EwdFbnRydXN0MSIwIAYDVQQLExlDZXJ0aWZpY2F0aW9uIEF1dGhvcml0aWVzMRIwEAYDVQQLEwlEQ 29tU3ViQ0ExITAfBgNVBAMTGFNQT0MgU2VydmVyIExvZ2luIFBvbGljeTBnoGUwYzBbpFkwVzELMA kGA1UEBhMCVVMxEDAOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGUNlcnRpZmljYXRpb24gQXV0aG9 yaXRpZXMxEjAQBgNVBAsTCURDb21TdWJDQQIESMDD4DANBgkqhkiG9w0BAQUFAAIEV4eo1TAiGA8y MDE3MTAxNTIyNDYwOVoYDzIwMTcxMTE0MjI0NjA5WjCCBBUwHwYJKoZIhvZ9B00BMRIwEAIBAAIBC AIBCAIBCgMCAGkwFAYJKoZIhvZ9B00DMQcwBQwDQUxMMBEGCSqGSIb2fQdNBTEEAwID2DAPBgkqhk iG9n0HTQYxAgwAMBcGCSqGSIb2fQdNCTEKDAhSU0EtMjA0ODApBgkqhkiG9n0HTQ4xHDAaDAlwcml udGFibGUMB3RlbGV0ZXgMBHV0ZjgwEQYJKoZIhvZ9B00PMQQDAgeAMBEGCSqGSIb2fQdNFTEEAwIH gDAQBgkqhkiG9n0HTRYxAwMBADAQBgkqhkiG9n0HTQgxAwMBADAQBgkqhkiG9n0HTSwxAwMBADAPB gkqhkiG9n0HTQsxAjAAMBAGCSqGSIb2fQdNDDEDAwEAMBAGCSqGSIb2fQdNDTEDAgEeMA8GCSqGSI b2fQdNEzECDAAwEAYJKoZIhvZ9B00XMQMBAQAwEQYJKoZIhvZ9B00YMQQCAgfQMBAGCSqGSIb2fQd NHzEDAQEAMBAGCSqGSIb2fQdNJjEDAwEAMBAGCSqGSIb2fQdNGTEDAgECMBAGCSqGSIb2fQdNGzED AQEAMBAGCSqGSIb2fQdNKTEDAQEAMBAGCSqGSIb2fQdNHDEDAgEAMBAGCSqGSIb2fQdNHTEDAgEBM BAGCSqGSIb2fQdNIDEDAwEAMBEGCSqGSIb2fQdNITEEAwIE8DAPBgkqhkiG9n0HTSMxAgwAMA8GCS qGSIb2fQdNJDECDAAwJAYJKoZIhvZ9B00lMRcwFQwJRGlyZWN0b3J5DANFQUIMA0dBTDAQBgkqhki G9n0HTSsxAwMBADAPBgkqhkiG9n0HTTYxAgwAMBEGCSqGSIb2fQdNMzEEAwIHgDAPBgkqhkiG9n0H TScxAgwAMBAGCSqGSIb2fQdNETEDAgECMBAGCSqGSIb2fQdNKDEDAgFkMBEGCiqGSIb2fQdNLQExA wIBAzBEBgoqhkiG9n0HTS0CMTYwNAwMZW50ZWxsaWdlbmNlDAZkaXJlY3QMCHpmLWxvY2FsDAp6Zi 1yb2FtaW5nDAZ6Zi1tc2YwFwYKKoZIhvZ9B00tAzEJDAdleGVjdXRlMBAGCSqGSIb2fQdNMTEDAQE AMBAGCSqGSIb2fQdNMjEDAQEAMBAGCSqGSIb2fQdNOTEDAQH/MA8GCSqGSIb2fQdNLzECDAAwEAYJ KoZIhvZ9B004MQMBAQAwEwYJKoZIhvZ9B003MQYMBENBU1QwEAYJKoZIhvZ9B007MQMBAQAwFgYJK oZIhvZ9B009MQkMB0VudHJ1c3QwEAYJKoZIhvZ9B00+MQMBAQAwEAYJKoZIhvZ9B00/MQMBAQAwFw YJKoZIhvZ9B00KMQoMCFJTQS0yMDQ4MBAGCSqGSIb2fQdNQzEDAQEAMCEwHwYDVR0jBBgwFoAUPL7 72AhrUkvxP46H0dbkXXrSVX0wDQYJKoZIhvcNAQEFBQADggEBADrezRWX0fuPC415BUa3tafMLaVO 24v3CP+qYud4Z6IKI7jNtt2pcneaYjQ7iaxypE3N7Wwlim6Ak4yuwwJ9SrKOSe7YPiFOuugvNy2fk +f2h3bFkLm40bkjPPH8bih4sLyU8RcN2cAJLxHINwXO3ALKBo3IdxrfcoKquO7g+R4+ZPvmS/95J9 aQ08FZKpkv+ORPRZySkr0zMUARdBBguklHqFeczn5tQnmJcsfVlP4DC7IPqw2xM8l3b+iAH5pyqgb o/Lk11VWkD11s3K8/Bf40eH23upDOwmYBAszHdXU4+5HNZ/An6xfVEjr/+KxUAEVD5TGQMVJY6SCS zN3ONRc= objectClass: top objectClass: extensibleobject cn: test nsUniqueId: f49ca104-c2ee11e7-9170b029-e68fda34 creatorsName: modifiersName: createTimestamp: 20171106123544Z modifyTimestamp: 20171106123544Z 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/data/tls/000077500000000000000000000000001421664411400225475ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/data/tls/ca.crt000066400000000000000000000023111421664411400236410ustar00rootroot00000000000000-----BEGIN CERTIFICATE----- MIIDTjCCAjagAwIBAgIFALr2peswDQYJKoZIhvcNAQELBQAwYDELMAkGA1UEBhMC QVUxEzARBgNVBAgTClF1ZWVuc2xhbmQxDjAMBgNVBAcTBTM4OWRzMRAwDgYDVQQK Ewd0ZXN0aW5nMRowGAYDVQQDExF0ZXN0cy5leGFtcGxlLmNvbTAeFw0yMjAyMTEw MjU0MzJaFw00MjAyMTEwMjU0MzJaMGAxCzAJBgNVBAYTAkFVMRMwEQYDVQQIEwpR dWVlbnNsYW5kMQ4wDAYDVQQHEwUzODlkczEQMA4GA1UEChMHdGVzdGluZzEaMBgG A1UEAxMRdGVzdHMuZXhhbXBsZS5jb20wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw ggEKAoIBAQCr2vsHEGtvlishhWeAU+qhPbdoJ6CBW6Dk7APlvwuOaAls4BA6I7CX ZG2tbaK38TuB1rB21/KOciTcy7TaF9X6OW+6Hkb4gGMnpy4sRbw4CKIfkNsCZ5av bQ9fsRbgM0q72YPjZlzO6tuvLimOLolhmSiSS00Ll20CteMMWZ/ApGBl163iohD4 pFWJhtyYG9DnZp5N6T3yHDFsrIyil2+G6ZSTOObRwXUEvHeZcGRiG6Py9t3vDOSg IUKYcgyihg9boEHVe76wHfMm6i3ELa7/QeVJNofbiPso6doqD0V+qmGhZsmpjP56 RcBR85ijo/eprohjDNXHAOUgdZ7K9DqrAgMBAAGjDzANMAsGA1UdDwQEAwICBDAN BgkqhkiG9w0BAQsFAAOCAQEASH8xxpue07K1K8T5SLDUT8iaBnCwub6s8atfqPbR xb2vdIX0p6WN+kmsNNsafyQYYz+M5LdMSeaTrzj52zvKvZ/5bgc+VqLXx35khaQU 0RgNgKxDgeY2vGVPFHDSNhJvBTtMxksUK0otW8tF70bTZEp2whkoHCu1nAXuEzaX BeglYO6YRtuY71u84gvd8vtq2Zy0sb5vG7uWn2ZTpA5maCK58r9XpUdpjyA4qhFB ClwQ45UzkLzTbolioT10N7Xp5clLzqiLYexFuoZhK2HROvgr8EFF7xl17qwVbTkt ZsURjsTOrWKVLiVn9AuCHeToPosZr4/pWWjFoweO+yfZEg== -----END CERTIFICATE----- 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/data/tls/cert9.db000066400000000000000000001100001421664411400240740ustar00rootroot00000000000000SQLite format 3@  .WJ ø è óÅ ° j * è@YindexckaidnssPublicCREATE INDEX ckaid ON nssPublic (a102)>UindexlabelnssPublicCREATE INDEX label ON nssPublic (a3)D]indexsubjectnssPublicCREATE INDEX subject ON nssPublic (a101)AYindexissuernssPublicCREATE INDEX issuer ON nssPublic (a81)‡OŽmtablenssPublicnssPublicCREATE TABLE nssPublic (id PRIMARY KEY UNIQUE ON CONFLICT ABORT, a0, a1, a2, a3, a10, a11, a12, a80, a81, a82, a83, a84, a85, a86, a87, a88, a89, a8a, a8b, a90, a100, a101, a102, a103, a104, a105, a106, a107, a108, a109, a10a, a10b, a10c, a110, a111, a120, a121, a122, a123, a124, a125, a126, a127, a128, a129, a130, a131, a132, a133, a134, a160, a161, a162, a163, a164, a165, a166, a170, a180, a181, a200, a201, a202, a210, a300, a301, a302, a400, a401, a402, a403, a404, a405, a406, a480, a481, a482, a500, a501, a502, a503, a40000211, a40000212, a80000001, ace534351, ace534352, ace534353, ace534354, ace534355, ace534356, ace534357, ace534358, ace534364, ace534365, ace534366, ace534367, ace534368, ace534369, ace534373, ace534374, ace536351, ace536352, ace536353, ace536354, ace536355, ace536356, ace536357, ace536358, ace536359, ace53635a, ace53635b, ace53635c, ace53635d, ace53635e, ace53635f, ace536360, ace5363b4, ace5363b5, ad5a0db00)1Eindexsqlite_autoindex_nssPublic_1nssPublicû û °øïæÝÔ˹°9¼Ý 9¼Ü9¼Û4º 4º4º/¡zß/¡zÞ /¡zÝ ‚ü'R ê ‚”,¿WgP0`1 0 UAU10U Queensland10 U389ds10U testing10Uinter.example.com gP0`1 0 UAU10U Queensland10 U389ds10U testing10Uinter.example.comgP0`1 0 UAU10U Queensland10 U389ds10U testing10Utests.example.comgP0`1 0 UAU10U Queensland10 U389ds10U testing10Utests.example.comgP0`1 0 UAU10U Queensland10 U389ds10U testing10Utests.example.comgP0`1 0 UAU10U Queensland10 U389ds10U testing10Utests.example.com £Œ£¨‘ù„ fN0_1 0 UAU10U Queensland10 U389ds10U testing10Uleaf.example.com¥ZgP0`1 0 UAU10U Queensland10 U389ds10U testing10Uinter.example.com¥ZgP0`1 0 UAU10U Queensland10 U389ds10U testing10Utests.example.com ¥Z ®î¶ÒùæÞÊ®¥Z testcrt¥Z¥Z testint¥Z¥Z testca ¥Z \Ê“\±˜èÏza 4þËô˜ÿüZ J!Ö:7e‹æB¬4þËô˜ÿüZ J!Ö:7e‹æB¬4eŠFp¶³/Øm¤fgx_`†$Ù94eŠFp¶³/Øm¤fgx_`†$Ù94ƒHæoåã 40|yHý¾!ñèâ3£4 ƒHæoåã 40|yHý¾!ñèâ3£ ·J …\¦à·‚&zP4,4º ÎSCS¥Z0`1 0 UAU10U Queensland10 U389ds10U testing10Utests.example.comºö¦‘ÎSCRÎSCRÎSCSÎSCS»n¢øàVÓœ£Ì™vxìGC’~S먴% >u0+’Hh‰C|0PP44ºtestint0‚N0‚6 ºö¦‘0  *†H†÷  0`1 0 UAU10U Queensland10 U389ds10U testing10Utests.example.com0 220211025558Z 420211025558Z0`1 0 UAU10U Queensland10 U389ds10U testing10Uinter.example.com0‚"0  *†H†÷ ‚0‚ ‚ð¦÷ã÷ÆŸ7ģаr}Â*Èf—ö¹—¥ 8lïæ¹ ³gŒí8Ñs›»&Äé“4ò…Š\áý)Фäb`s/U]X„´ˆ§$*ᳪ³Ð?ÔNk/”§»“N:^r~òJ¨¤qÜŠš‹|:”çä—hµãÖʧj—’U©Þ³H £våC{z†šÅˤt°¾?àmÚ‘~5¥&ÄOc coôÛ$±bñÜ+¤ÌÓ-îé.ƒYå›[!ÝD³ÊýÑA Ôd‘îÓKš‹ _0Xü55¤ðPwEÝkÇ­<–·×ñjõ®Þæ4ê×¢YÀ”–FF~¢XÊxEø¼´ç =h„փΣ皹þ`zhf±ïA~Zer$¯¿ÍÄ n¤[ï¨U„ø¯¨GªéŸÝ°våœDÆ‚ãt„iÜb­Bÿj'Ø|±ýR´>eMg‹Ÿ@ýðþ5.WOB¢Ëx÷ûæ1×FTé –TùAùT©ƒˆnÓf/¿sI’ž°±dlZ/Ô»´0uó©ûï´ƒP®Ãe¨ï•; ݰ-m”1íáÂÊŽI)`Là{r¡è³)¢Ø4޾0`1 0 UAU10U Queensland10 U389ds10U testing10Utests.example.comºö¦‘0`1 0 UAU10U Queensland10 U389ds10U testing10Uinter.example.comeŠFp¶³/Øm¤fgx_`†$Ù9ƒ3z4„ 4º¥Z¥ZeŠFp¶³/Øm¤fgx_`†$Ù9¥Z¥Zð¦÷ã÷ÆŸ7ģаr}Â*Èf—ö¹—¥ 8lïæ¹ ³gŒí8Ñs›»&Äé“4ò…Š\áý)Фäb`s/U]X„´ˆ§$*ᳪ³Ð?ÔNk/”§»“N:^r~òJ¨¤qÜŠš‹|:”çä—hµãÖʧj—’U©Þ³H £våC{z†šÅˤt°¾?àmÚ‘~5¥&ÄOc coôÛ$±bñÜ+¤ÌÓ-îé.ƒYå›[!ÝD³ÊýÑA Ôd‘îÓKš‹ _0Xü55¤ðPwEÝkÇ­<–·×ñjõ®Þæ4ê×¢YÀ”–FF~¢XÊxEø¼V¢×Ç~d…¤Ñ €¬Cæ6¼eOpÒ6o;LÆK+J-[ËEïFÓdJvÂ(+µœî6—è%`î˜FÛ˜ï[¼â ÝòûjÙœ´±¾o»–ŸfS¤fh"¹ò¿W¥Gi 8ªA \ã•3¼Ón‰b¡=t7µéåÉKΨ‹aìEº†a+aÑ:ø+ðAEïuî¬m9-fÅŽÄέb•.%gô ‚äè>‹¯éYhÅ£Žû'Ù0`1 0 UAU10U Queensland10 U389ds10U testing10Utests.example.comºö¥ë0`1 0 UAU10U Queensland10 U389ds10U testing10Utests.example.comƒHæoåã 40|yHý¾!ñèâ3£ƒ3z4„ /¡zÝ¥Z¥ZƒHæoåã 40|yHý¾!ñèâ3£¥Z¥Z«Úûko–+!…g€Sê¡=·h' [ äìå¿ Žh là:#°—dm­m¢·ñ;Ö°v×òŽr$ÜË´ÚÕú9oºFø€c'§.,E¼8¢Ûg–¯m_±à3J»Ùƒãf\ÎêÛ¯.)Ž.‰a™(’KM —mµã YŸÀ¤`e×­â¢ø¤U‰†Ü˜ÐçfžMé=ò1l¬Œ¢—o†é”“8æÑÁu¼w™pdb£òöÝï ä !B˜r ¢†[ AÕ{¾°ó&ê-Ä-®ÿAåI6‡Ûˆû(éÚ*E~ªa¡fÉ©ŒþzEÀQ󘣣÷©®ˆc ÕÇå užÊô:« MJ vM‚& zP4,9¼ÝÎSCS¥Z0`1 0 UAU10U Queensland10 U389ds10U testing10Uinter.example.comºö§ÎSCSÎSCSÎSCSÎSCS úJ´qÀ|®dÀl?¯|×ZVé;Öwþ}¹7)a]Î/‰Q|NPN49¼Ütestcrt0‚]0‚E ºö§0  *†H†÷  0`1 0 UAU10U Queensland10 U389ds10U testing10Uinter.example.com0 220211025711Z 420211025711Z0_1 0 UAU10U Queensland10 U389ds10U testing10Uleaf.example.com0‚"0  *†H†÷ ‚0‚ ‚äºïo–ÓšåèIÀ!98”]P¬Ф¬²‘ÐÙ3Ù)±”þmç25γä\v硲68êô^³ áŽE+J“@ÞfW–yÑÀgªß[ B Ï®M34yKˆbaHp ðÐ5è'*'.$¦×ך¯²ï—­’QCz!"âÞŒ(Yh'ý âíÜЇNéç2tÕ÷b Ÿy(vm>¡m&ßôð±Ô ‚Ë+xÅBHŠ£!ö¥Ã׌ѮF¼Æ²†—õÊôOÛ›€pŸæ¡¦'>« :awA]H,l¡?oÄ™&+—1–º*è[ËÞ}•÷ Íý£00U0‚leaf.example.com0  *†H†÷  ‚Û²ï¡ñÀ Éÿ-øÃrp÷;&uú²HU|V%’—î’] ›@Æ™÷ž©‡ÓM`ÛMO”*uÓˆKH—z®Ø¯îévùÕêxØ@YÍô1ƒ¥2ÿ÷öŽñY¾;ot‚ØŽáª^znå `³ÎœïüDFЧ¸äêL#{W‰óRr`ƒW Äš´ÉÌS¦ˆp#÷Ž\ôõy +Mù×UzÂ6JE@ßÓ9¡ù¬5Ï0¥IÁ€ÕúÒ7Û<ŠþR °•o 0©‡¸¿Mßæ,L•„}ïð*R¥5ùç¶kŠîÁ€ïþïú¢ʱ,4É‚eÞï+±ƒP$d¦KÐ<0`1 0 UAU10U Queensland10 U389ds10U testing10Uinter.example.comºö§0_1 0 UAU10U Queensland10 U389ds10U testing10Uleaf.example.comþËô˜ÿüZ J!Ö:7e‹æB¬ƒ3z4„ 9¼Û¥Z¥ZþËô˜ÿüZ J!Ö:7e‹æB¬¥Z¥Zäºïo–ÓšåèIÀ!98”]P¬Ф¬²‘ÐÙ3Ù)±”þmç25γä\v硲68êô^³ áŽE+J“@ÞfW–yÑÀgªß[ B Ï®M34yKˆbaHp ðÐ5è'*'.$¦×ך¯²ï—­’QCz!"âÞŒ(Yh'ý âíÜЇNéç2tÕ÷b Ÿy(vm>¡m&ßôð±Ô ‚Ë+xÅBHŠ£!ö¥Ã׌ѮF¼Æ²†—õÊôOÛ›€pŸæ¡¦'>« :awA]H,l¡?oÄ™&+—1–º*è[ËÞ}•÷ Íý389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/data/tls/int.crt000066400000000000000000000023111421664411400240500ustar00rootroot00000000000000-----BEGIN CERTIFICATE----- MIIDTjCCAjagAwIBAgIFALr2ppEwDQYJKoZIhvcNAQELBQAwYDELMAkGA1UEBhMC QVUxEzARBgNVBAgTClF1ZWVuc2xhbmQxDjAMBgNVBAcTBTM4OWRzMRAwDgYDVQQK Ewd0ZXN0aW5nMRowGAYDVQQDExF0ZXN0cy5leGFtcGxlLmNvbTAeFw0yMjAyMTEw MjU1NThaFw00MjAyMTEwMjU1NThaMGAxCzAJBgNVBAYTAkFVMRMwEQYDVQQIEwpR dWVlbnNsYW5kMQ4wDAYDVQQHEwUzODlkczEQMA4GA1UEChMHdGVzdGluZzEaMBgG A1UEAxMRaW50ZXIuZXhhbXBsZS5jb20wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw ggEKAoIBAQDwpvfj98afN43Eo4qwgXJ9wioeyGaX9rmXpQs4bO/muaCzZ4ztONFz m7smxOmTNPKFilwC4f0p0KQB5GJgcy9VD10VWIS0iKckKuGzqrPQP9ROawIvlKe7 k046XnIHfvJKFaikcQfcipqLfDqU5+SXaLXj1sqnEWqXklUDqd6zSB4Ko3blQ3t6 hh2axcudpHSwvj/gbdqNkX41pSbET2MJY2/025AksWLx3CukzNMRLe7pLoNZ5Ztb Id1EsxHK/dFBCtRkke7TAUuai6BfMFj8NTWk8FB3Rd1rx608lo+31/Fq9a7e5jTq 16JZwJQVlkZGCH6iWB8VynhF+Lw8VZMPAgMBAAGjDzANMAsGA1UdDwQEAwICBDAN BgkqhkiG9w0BAQsFAAOCAQEAejpy+M0NsmN/SSlXDMK6Xly0ef8vxGcF2crjdwtT BrhEFm0hYmiCrJAmtr67lTdgvxM/lpQ+tOcgPWiE1oPOo+eauf5gj4F6aGYBse8I QZ1+WmVyJK+/zRXECo9upFsE76hVhPivqEeq6Z/dsHblnESNxoLjdIRp3JBirUL/ aifYfLH9UrQ+ZU1nCIufQP3w/jUuB1dPQgaiy3gG9/sA5jEd10ZU6QyWVB2B+UH5 VKkBg4hu0xJmL79zSZIFnrCxZGxaL9S7BrQcEDB186kQ+++0g1CuncMbZajvlTsg 3bAtbZQQMe3hwsoHjo1JKWAUTOB7cqEF6LMpotg0jn+Bvg== -----END CERTIFICATE----- 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/data/tls/key4.db000066400000000000000000001700001421664411400237300ustar00rootroot00000000000000SQLite format 3@ .WJ ø < îà © a  Û < ªl+tablemetaDatametaDataCREATE TABLE metaData (id PRIMARY KEY UNIQUE ON CONFLICT REPLACE, item1, item2)/Cindexsqlite_autoindex_metaData_1metaData B![indexckaidnssPrivateCREATE INDEX ckaid ON nssPrivate (a102)@!WindexlabelnssPrivateCREATE INDEX label ON nssPrivate (a3)F!_indexsubjectnssPrivateCREATE INDEX subject ON nssPrivate (a101)C![indexissuernssPrivateCREATE INDEX issuer ON nssPrivate (a81)‡R!!ŽotablenssPrivatenssPrivateCREATE TABLE nssPrivate (id PRIMARY KEY UNIQUE ON CONFLICT ABORT, a0, a1, a2, a3, a10, a11, a12, a80, a81, a82, a83, a84, a85, a86, a87, a88, a89, a8a, a8b, a90, a100, a101, a102, a103, a104, a105, a106, a107, a108, a109, a10a, a10b, a10c, a110, a111, a120, a121, a122, a123, a124, a125, a126, a127, a128, a129, a130, a131, a132, a133, a134, a160, a161, a162, a163, a164, a165, a166, a170, a180, a181, a200, a201, a202, a210, a300, a301, a302, a400, a401, a402, a403, a404, a405, a406, a480, a481, a482, a500, a501, a502, a503, a40000211, a40000212, a80000001, ace534351, ace534352, ace534353, ace534354, ace534355, ace534356, ace534357, ace534358, ace534364, ace534365, ace534366, ace534367, ace534368, ace534369, ace534373, ace534374, ace536351, ace536352, ace536353, ace536354, ace536355, ace536356, ace536357, ace536358, ace536359, ace53635a, ace53635b, ace53635c, ace53635d, ace53635e, ace53635f, ace536360, ace5363b4, ace5363b5, ad5a0db00)3G!indexsqlite_autoindex_nssPrivate_1nssPrivateöûö   æøïæ9¼Ú4º /¡zÜ òü÷ò éùñé¥Z¥Z ¥Z éùñé¥Z¥Z ¥Z ¶Ïè¶4þËô˜ÿüZ J!Ö:7e‹æB¬4eŠFp¶³/Øm¤fgx_`†$Ù94 ƒHæoåã 40|yHý¾!ñèâ3£öûö 0  4 ßôåÆ íˆJ§i+ Þ ¿  æ C   b $ × ¸ þ ß z < ™ [ "š|^@Ö¸  ý “ u W 9 Ï ±  ö Œ n P 2 È ªAsig_cert_3917bcdd_ce5363594Asig_cert_3917bcdd_ce5363583Asig_cert_3917bcdd_ce5363b52Asig_cert_3917bcdd_ce53635b1Asig_cert_3917bcdd_ce5363b40Asig_cert_3917bcdd_ce53635a/Asig_cert_3917bcdd_ce536360.Asig_cert_3917bcdb_00000122-Asig_cert_3917bcdb_00000120,?sig_key_3917bcda_00000122+?sig_key_3917bcda_00000120*?sig_key_3917bcda_00000126)?sig_key_3917bcda_00000125(?sig_key_3917bcda_00000124'?sig_key_3917bcda_00000123&?sig_key_3917bcda_00000128%?sig_key_3917bcda_00000127$Asig_cert_34ba1420_ce536359#Asig_cert_34ba1420_ce536358"Asig_cert_34ba1420_ce5363b5!Asig_cert_34ba1420_ce53635b Asig_cert_34ba1420_ce5363b4Asig_cert_34ba1420_ce53635aAsig_cert_34ba1420_ce536360Asig_cert_34ba141e_00000122Asig_cert_34ba141e_00000120?sig_key_34ba141d_00000122?sig_key_34ba141d_00000120?sig_key_34ba141d_00000126?sig_key_34ba141d_00000125?sig_key_34ba141d_00000124?sig_key_34ba141d_00000123?sig_key_34ba141d_00000128?sig_key_34ba141d_00000127Asig_cert_2fa17adf_ce536359Asig_cert_2fa17adf_ce536358Asig_cert_2fa17adf_ce5363b5Asig_cert_2fa17adf_ce53635bAsig_cert_2fa17adf_ce5363b4Asig_cert_2fa17adf_ce53635a Asig_cert_2fa17adf_ce536360 Asig_cert_2fa17add_00000122 Asig_cert_2fa17add_00000120 ?sig_key_2fa17adc_00000122 ?sig_key_2fa17adc_00000120?sig_key_2fa17adc_00000126?sig_key_2fa17adc_00000125?sig_key_2fa17adc_00000124?sig_key_2fa17adc_00000123?sig_key_2fa17adc_00000128?sig_key_2fa17adc_00000127  password ””’i4„ †„„„„„„ /¡zÜ¥Z¥ZƒHæoåã 40|yHý¾!ñèâ3£¥Z¥Z«Úûko–+!…g€Sê¡=·h' [ äìå¿ Žh là:#°—dm­m¢·ñ;Ö°v×òŽr$ÜË´ÚÕú9oºFø€c'§.,E¼8¢Ûg–¯m_±à3J»Ùƒãf\ÎêÛ¯.)Ž.‰a™(’KM —mµã YŸÀ¤`e×­â¢ø¤U‰†Ü˜ÐçfžMé=ò1l¬Œ¢—o†é”“8æÑÁu¼w™pdb£òöÝï ä !B˜r ¢†[ AÕ{¾°ó&ê-Ä-®ÿAåI6‡Ûˆû(éÚ*E~ªa¡fÉ©ŒþzEÀQ󘣣÷©®ˆc ÕÇå užÊô:«0‚„0n *†H†÷  0a0B *†H†÷  05 ®« –ë–FA íñ¢‚œ*+äöÉß1|—*Iè' 0 *†H†÷  0 `†He*mE,#Wƒ Lm*,‚—xÎ]Έ´áœ§(@>j ½¼ñþÔ”ÄuäIeÖŒµ%·^lÞßæT”ÖßḓË»›÷-’aN!Ñ)‚[0"Ú:ëršéª^¤bòñ Hä8‰È)5ˆ©(àü—_-3'Y¹N³õ‘ß™÷{ãák¬‰oíŸàX>˜x|Åô’ÚL‘âk‹Öäg•Úo¡¦8XÙ`´»z°ª‚í¼ƒ½à=-h 2“þúo0+}M)ÈÎc î÷MFSPõ½y“>.g…k#ÅRbbôL›«ä³Ü™Zõ¥ï‘Äé瘀˜Dÿ°ðÙºv±<ȸW ó…míýTâõåÇÚ–ñ&Šñgë'¬ÍëòЬ0‚0n *†H†÷  0a0B *†H†÷  05 ÖÈ1µçE3šãqGÈucPðÍMaH‰å#ãÖÈD' 0 *†H†÷  0 `†He*gŠ¡u4k‹Ñ£Ýg×ÂðüB[1Kœwš–´P(¸ 4Jûþ5±5çS}ÀÉg1ó'ºO€v="ª*1êfVi áÊŠÿnÖu¦BJIèLvOžÍ Ç÷1]ÈK¯³³…ü±»œ0º xâýìóü“x¹á¤ßBà×`ùO“|7¹Çh|»ÎþM!xli•R¶ã7LwP6ÛÂy0‚0n *†H†÷  0a0B *†H†÷  05 +ßO»pŒ<š<›'»_‘‘Õ î|êL A¹ y³Ø¶' 0 *†H†÷  0 `†He*Wʰn!Z¨ò‹©”KÅ'¦üšÖÙÖ_pœËnÿŒîú­5-Ç(ÝKjâ¼Vÿ»¡7 všè»T̯كÙdyv]¢@5©…>V^ŸpéDDäú“ë k‘$,¦Rxó—¡:@ºPMWq±m«÷K»šè’Y[ä¢Õ¢ÆœUWXÁ5Íx¿Ã2TÅý<5×+*@Ç]ø±H00‚0n *†H†÷  0a0B *†H†÷  05 Viyæ<ÑdzdÊó”&Èàš"Ôo)d=#¨' 0 *†H†÷  0 `†He*ñËzá§‘ºgš)NaVd¨®Tˆ¸mQÑÏÕ?¦¢´×Ùޥ®^ÊfÍ*›‰3fߟd• %õS÷ïaª*®F:ø³Ö„lî΂f&^¾ÉOvà܉÷Äû{)=ûņ¿G¶É¢Ê8À³ï`f´æûJæ$Š Û$.¿ú» ¿„n\T+kµÁt€l¿0‚0n *†H†÷  0a0B *†H†÷  05 .*¨ÔþîÃ3Úä‰.<.dOŽù‘ÿI¶¬õ' 0 *†H†÷  0 `†He*)¬êþïõâŠxo‚p¡‡^ñ·DÔ$µ9»‡ææu¶Ƚ…¦ªÈwO§TµdmZW¨Ü®û1äösR=Λýky÷ 9mlI´#h){ñø²Ðß§'•ZþÉ¿û*árûç yDDõR¤ê}îÂÐÝž'N²à}³pœä¯™oÝ.* pöšœ½ÇåFô³' 0 *†H†÷  0 `†He*MòÖÈS†^—ãâúä¹× i5£ÍЉ±*GN¢}}êý¬Hp)̪V:ê® ­Ð6Zd–+TY;  DÛ"\¼Cw:F‚c´X]mˆ±êƒ)SŽÖqÑ8D„_ Hõ>wæ5é~Œ9Ndn­–ZøÞËzÏÖ‚»"ZŸøŽPa(‚A#Î/„’¬PÃÊuSq,ïfÊø0‚0n *†H†÷  0a0B *†H†÷  05 „¡{º2A~Ÿ¨cuž"±p©ÚeÏY¶ûç+' 0 *†H†÷  0 `†He*·É¥Óv3)RԎ´oäKä_Îó¬R‚-û¯5ÃH]ÖºÒUº1-§À×9íÍåÎXÿQ<‹í›'NobA<]SÌšÅÚcØÏjË1o¤÷#}çâZÀ;‡Ç@° §ÀC_K*%§ê¹ˆ±´ã¶ë³.¼Ž»8>‰à‘&.Þl¶?qåÏS诀t« Ðf$¥Åj÷}T¿0‚0n *†H†÷  0a0B *†H†÷  05 § !˜€²ËT"‰fÁM'Y­Þ?MWý¢1FVêò\' 0 *†H†÷  0 `†He*Ê6ö~ÿ+¾ò/;Ú/&TîbéLíǧ|Aˬ*´Ó׺LŠ(µ ˜§"XÊZ›½ñ³¢òÛÿí·í ¹ØSzri*ÿkö„G©Å9f>ysó“]vÚ—n–úfÔ"¾ìÓÐøZ.n/²‘ùîë€ì”éÄðÐZ“³Ã<ÊÄ·{¥’%ÍÈLœö68ÃRŽâ@È«Æxmö0‚0n *†H†÷  0a0B *†H†÷  05 ~úâÔw;}‚þuG:YÌÊAWã“2Û ;7•Ö²§Öx' 0 *†H†÷  0 `†He*ZsƒëÖÞ [à’ÀÒ ›* ‰î‹ÞʳÏW¤]ÄÊZ‡ŸQÈ2ãF·Aè"õ¹¼‚zˆÀ*¤lB±è™ý¥ûW´èÚ¯÷ø­TyäéT0î5Õª©í‘78ð4ÁqŠ9ÿþ¾`®!½ Ž¦²ô9 ¥';°t`VÏšÕh=Ãc Íø{.£M›QˆQ>†_Ò®ÅYöãeSO,D–Ì0‚0n *†H†÷  0a0B *†H†÷  05 UŒÐaJ|äsð¢ãØ¥ë¸YÅ@RV}ëX' 0 *†H†÷  0 `†He*ùbŸö–T¦\'»½IècôLï“L*œÒŽÕ‹ÃmÛÌõe(ƒd~Ä ÊÄ—h´áàNËQؼW>ÝÀ¡ÛïËk~ÖjjßæxC²YC²Æu"Ð}Äa’¢]Z(£¢^Íɷسn¼£NÕéNTHØR_ +°®óbúû‹ºŒã1öo¤ 5>&'|8ÿóÑs=–i…bí¼’äÛ©ð¦÷ã÷ÆŸ7ģаr}Â*Èf—ö¹—¥ 8lïæ¹ ³gŒí8Ñs›»&Äé“4ò…Š\áý)Фäb`s/U]X„´ˆ§$*ᳪ³Ð?ÔNk/”§»“N:^r~òJ¨¤qÜŠš‹|:”çä—hµãÖʧj—’U©Þ³H £våC{z†šÅˤt°¾?àmÚ‘~5¥&ÄOc coôÛ$±bñÜ+¤ÌÓ-îé.ƒYå›[!ÝD³ÊýÑA Ôd‘îÓKš‹ _0Xü55¤ðPwEÝkÇ­<–·×ñjõ®Þæ4ê×¢YÀ”–FF~¢XÊxEø¼ç¼Ju&ÂÌ·£.2GÌ(å÷#J¦*' 0 *†H†÷  0 *†H†÷   ì‡&[Ú½iŽ»[Õ"ôÆfîÉáñªÅÏ ÷ÖL' 0 *†H†÷  0 *†H†÷   &SÐïßñYýkzJmÝÍ™—–y{CW~o‰ŽÞ2AE5"?‚sig_key_2fa17adc_0000012800] *†H†÷ 0P0B *†H†÷  05 €,ˆÅónØPv]ðÚ×LX8ž'GúZjo±÷ƒ·Å' 0 *†H†÷  0 *†H†÷   ;é RfµDô«mÀS_ΊˆFYs*dÀ*`•DÒ„(É"?‚sig_key_2fa17adc_0000012700] *†H†÷ 0P0B *†H†÷  05 ¯‘ш½ò7Ë"}7SÜ…J~C[P:ÿönûËÊC' 0 *†H†÷  0 *†H†÷   µ#ù}~³Yë8ÿÛç_v‚•Èš‡io#ÅöŠÂ74^&4‚password|®>ÖO!ÊåœAf@?í…0‚0n *†H†÷  0a0B *†H†÷  05 ŠoQ‰íÚ gÅ Tq𲑕äð±CÉÕŒÕXL”ç ' 0 *†H†÷  0 `†He*;}`EA¬7YOÊÞ¨¬:Ã9\cº=¿Çȯy¹I z[¶ j Ä  x Ò , †à;–ñL§]¸lÆ z#0A‚sig_cert_3917bcdd_ce5363b400] *†H†÷ 0P0B *†H†÷  05 ò‚vI°“^‚>^~'C …÷SsW-¾8*¥ñ,  ca•' 0 *†H†÷  0 *†H†÷   ðè OX¾Ö7ÞxVî"ß3²O`²]˜²–òK«ch!#/A‚sig_cert_3917bcdd_ce53635a00] *†H†÷ 0P0B *†H†÷  05 æ.ã¨%•XBB.zZâ¿Íà!kÐíôëhK}à}Ü' 0 *†H†÷  0 *†H†÷   %ò´ ê@ÂÕâðÌ­jŠ   Z9´Q!·K4k} P#.A‚sig_cert_3917bcdd_ce53636000] *†H†÷ 0P0B *†H†÷  05 çFŠe¼AÇ>õ‡Á.çã<ýÂeZÿ/óRÿŸ¢}Ë' 0 *†H†÷  0 *†H†÷   "ÓøU\*¾_í•ý4%Tx»¢Š²MlôbÑïœt#-A‚sig_cert_3917bcdb_0000012200] *†H†÷ 0P0B *†H†÷  05 Nçñ.¿ÅÜvä±sßí:œÀ½S‡gôñÅ»W }Ä' 0 *†H†÷  0 *†H†÷   w.ê"€£‡ÿ̽C[C“W¡ëA‡°;·(d‘[†L!ú#,A‚sig_cert_3917bcdb_0000012000] *†H†÷ 0P0B *†H†÷  05 ¼•¯|pÐÍ¥€á `½,‹ÎçfϵýßS~6É' 0 *†H†÷  0 *†H†÷   ˜Ÿ’ZkŠ6upÀ€ÿºÛ€æ}ìæ.g™°„â„Ñ£›"+?‚sig_key_3917bcda_0000012200] *†H†÷ 0P0B *†H†÷  05 j×û0g,¯ƒD(sÔø&ŸGä×оåÄTy7Gê7' 0 *†H†÷  0 *†H†÷   Õ›‘@n«J$nîs”‡åð }øèŸeðB]w®"*?‚sig_key_3917bcda_0000012000] *†H†÷ 0P0B *†H†÷  05 ÊÈ¿L:a4ì@€««@PÌAÿW4ô‡sÑÁ°vØ*' 0 *†H†÷  0 *†H†÷   K#ê¥ÕÐýڈЩӀ8òù{­GìÆ¤–‹ÂŽÑ")?‚sig_key_3917bcda_0000012600] *†H†÷ 0P0B *†H†÷  05 f‰„:g:Œü€g“ˆ‹›ûRøÎ’EóC®6>Öž’r:}' 0 *†H†÷  0 *†H†÷   É*W^"í°½¬`Þ\‹Ó-&`Î !3{Ž"(?‚sig_key_3917bcda_0000012500] *†H†÷ 0P0B *†H†÷  05 h`rkæ.Ö°˜ÂfÜ-Ò>V ¢”ﲓìï}_#§„' 0 *†H†÷  0 *†H†÷   |ÅSôñŸûëùÕ™y Ùw{P©O BuÒܤÂË B"'?‚sig_key_3917bcda_0000012400] *†H†÷ 0P0B *†H†÷  05 (]ÐW9ÑenRUÂÐgNDd°}X›bXÉíG:¸' 0 *†H†÷  0 *†H†÷   ×85Ö qCÁäª9e:œì˜‚ëV:p•b‹J0»³ÿC"&?‚sig_key_3917bcda_0000012300] *†H†÷ 0P0B *†H†÷  05 m)ÑEíˆK!ܨAõ1š< Ý–ŸÔFL`' 0 *†H†÷  0 *†H†÷   6ÕjøtR3Ä/¶ÁrækP£×þâµó™ÚÁÎ –à"%?‚sig_key_3917bcda_0000012800] *†H†÷ 0P0B *†H†÷  05 ýÕ¾ÇYüá³/Ãü$BÀö‘ŽÊëƒÔòñÇÃ÷$¤' 0 *†H†÷  0 *†H†÷   EA«åXûj]{¬JÑeýnù¬ˆÇ+ñ‘hÞU,ôè"$?‚sig_key_3917bcda_0000012700] *†H†÷ 0P0B *†H†÷  05 ËH¬R_û/k+ÝŽ;ñ¢$¿ŸE¨Øà)ÎÜ I' 0 *†H†÷  0 *†H†÷   ŠKÌ€S9|î¾|ÃÈMBŠŒ™œû|S›Ð(0ܘ67¶!QÅm\f†#"A‚sig_cert_34ba1420_ce53635800] *†H†÷ 0P0B *†H†÷  05 † i˜ðÅ‘Á¬+E@~û!¦-Sßdb’0×jùÝï' 0 *†H†÷  0 *†H†÷   ÛyZNs´mJñ-?çÏ÷–†®¾ 3œf²#!A‚sig_cert_34ba1420_ce5363b500] *†H†÷ 0P0B *†H†÷  05 °“Bd çè¿5š8I!v]%þßÜbâ7ÆƲ~' 0 *†H†÷  0 *†H†÷   Ÿ ëùaÖ%^z-°N+]gʃäôK“H>ìWR` ö¹# A‚sig_cert_34ba1420_ce53635b00] *†H†÷ 0P0B *†H†÷  05 ÀsÆ£V˜ûÏ-·Ã Uqàë ¦u¨dã{í!' 0 *†H†÷  0 *†H†÷   £’’ÎÁÑMÑE1Œ¢™R»¼ç÷Öý4Úî³,¶i8#A‚sig_cert_34ba1420_ce5363b400] *†H†÷ 0P0B *†H†÷  05 ÛüúÛž,ßayöÍ}ª®Ýå#a+ù¬_eÜ6(u'' 0 *†H†÷  0 *†H†÷   ‡ñ㡜ÀšZ6ÿC±& uhÊ t¨v×Üw„NM#A‚sig_cert_34ba1420_ce53635a00] *†H†÷ 0P0B *†H†÷  05 A2zòôUü1ÍÀÁË&¦ž7F²ºeÑO®fóWÈ' 0 *†H†÷  0 *†H†÷   ƒûäïÊälÏlÿd X1â«zç™8÷)EoõÞÓ/c<#A‚sig_cert_34ba1420_ce53636000] *†H†÷ 0P0B *†H†÷  05 ÏæGü%öÃÝ*8m¯›OšÎåò|ŠƒÌl(' 0 *†H†÷  0 *†H†÷   éĀܽo5Ãï©-ƒÕW—¼Ïº}aʆ-™Ø›†'ÛÈŠm¾Vêh<‹t7±"?‚sig_key_34ba141d_0000012000] *†H†÷ 0P0B *†H†÷  05 ?í¬ü½ZýyèR1=ˆ·óMš)2&ìƒpÂz;|òÍ•' 0 *†H†÷  0 *†H†÷   ÁûÓ+¦›\WvÖâ #r ŸöY²Òrá-o ””’i4„ †„„„„„„ 9¼Ú¥Z¥ZþËô˜ÿüZ J!Ö:7e‹æB¬¥Z¥Zäºïo–ÓšåèIÀ!98”]P¬Ф¬²‘ÐÙ3Ù)±”þmç25γä\v硲68êô^³ áŽE+J“@ÞfW–yÑÀgªß[ B Ï®M34yKˆbaHp ðÐ5è'*'.$¦×ך¯²ï—­’QCz!"âÞŒ(Yh'ý âíÜЇNéç2tÕ÷b Ÿy(vm>¡m&ßôð±Ô ‚Ë+xÅBHŠ£!ö¥Ã׌ѮF¼Æ²†—õÊôOÛ›€pŸæ¡¦'>« :awA]H,l¡?oÄ™&+—1–º*è[ËÞ}•÷ Íý0‚„0n *†H†÷  0a0B *†H†÷  05 Æp(IúÇ]žõ•Kh ½Ñ쾤šLøj"C?þ[2®}' 0 *†H†÷  0 `†He*}?ÓHÄH²êÌŒ»^Ù‚E6®×»ºÆLñãUOñ.)²ÁúÇmlÛ׹ȱ]wQxûÇk}½³>@£ Í0>H¤Y~N£¿ðù¥⬔sS;ƒü‡œ;C ª}>3ýuD{S®úIÞ ã—³Fšf5ûÝùÜÒêhÆEI0àÃðèþvfƒ±’U¦JZq Ô¹ñI[äã³ÍÐÍ‚ÕÜã½Að–šè`©í#ˆ.Çуâé¨qò~Oô“?ÚhžZe‡˜aꈿ¥ëòDÀÚfçpwê·ß …±:19aDÝE¯Bûº«¢Œ´Z´JQ¦P⪺‚óCÀW™0‚0n *†H†÷  0a0B *†H†÷  05 óüÖí"<²˜.ŽB]¸¦ûWÚiÏЫÓù%˜~exOª' 0 *†H†÷  0 `†He*í++†U–Ê´WY¡Ó”´÷ »èâX¦Þfàó—£XdÊîÝð|PŒLï°w2=ç—-/þ[\{ôèn¤¿+vC& èÙ¬°4éÛ‚"ˆÒîìU‚;N]†¾mñáfùû+¢cwc-—Á# .~ ã­•LNeÓp©»šz^t&Ú9M-ÑQÚÔ*†ÒæÄ GöQ½¬ÌRtÏ$†0‚0n *†H†÷  0a0B *†H†÷  05 Nu¤_´‚ f¨,–{é)]DFè Ô3˜Ò“°Æ' 0 *†H†÷  0 `†He*k;ªÓÿl¨HÀJD‘éÁǪ“Mì(âMè~_ø- €ë£÷äV§’œèr.’™ï¾´üý\n£a͓༈ ®i%:þoÓû ¿¡ÏƒšîŽ Uù$‰ÏÄóeþÝ á%ÆN¿Õ(øia/`)§…ðíÈLÑ#jý¬Q‡Lšªž ê¸$µ OhŒÓ¯&€ù0â®xT43é0‚0n *†H†÷  0a0B *†H†÷  05 ±Q”ÚþþÄ$FÝÓú¢K”Š iðôã†ñö@&ø¯I' 0 *†H†÷  0 `†He*ЇÖòÃ¥ÒþÃH²¥½s¦¢XžmÐÏ_ÒGP5à’¸º«åÃâ oÿ½¡_lúâ¡ÿ¾¨èÎÊdÀMUô/³4ÐÈ=¬tËEø{ß÷£Qdﻈ;‚w d¼Y¤¦Ÿ›¿P§¬ÙܺÊ*½ÏiòPÖ`_²$š.Ô‡9?C²n #dp<¦±î0‚0n *†H†÷  0a0B *†H†÷  05 ÷uü;†×y@‡4–„2¡m&ßôð±Ô ‚Ë+xÅBHŠ£!ö¥Ã׌ѮF¼Æ²†—õÊôOÛ›€pŸæ¡¦'>« :awA]H,l¡?oÄ™&+—1–º*è[ËÞ}•÷ Íý  hZ´ h#4A‚sig_cert_3917bcdd_ce53635900] *†H†÷ 0P0B *†H†÷  05 Õ‰aýªÀÝ€µ›˜¡Œ–È÷N f™Å´6ÿ‰ Zbû' 0 *†H†÷  0 *†H†÷   ã×ü™5ˆf·P‹Æç†T¼YRœ~A*qú;ý—ñ#3A‚sig_cert_3917bcdd_ce53635800] *†H†÷ 0P0B *†H†÷  05 à#FAÞ1ºÛq¤ãݪ½ÇVÌQøÐy…„œ=˜ØÃ™ž' 0 *†H†÷  0 *†H†÷   ¶²‘~S8/nåi?ôØ8U±¶‰F© XÚÛå»Í#2A‚sig_cert_3917bcdd_ce5363b500] *†H†÷ 0P0B *†H†÷  05 ‹ÄDŸ'•S†ªm¦ÎàÚkع~Ä"+Æ äу£' 0 *†H†÷  0 *†H†÷   —ž‘Ø6µl÷Éb’“-h"J#tuìÓ$RVÞ#1A‚sig_cert_3917bcdd_ce53635b00] *†H†÷ 0P0B *†H†÷  05 ˆŒµV£ÜÞ¬n¬e¸½M¬ù§‚Á‡y­€$^&"Eqx7' 0 *†H†÷  0 *†H†÷   é¨bÙ¿´U¡Ó%ë–—öN[®I §Õûyƒª4`389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/data/tls/leaf.crt000066400000000000000000000023371421664411400241750ustar00rootroot00000000000000-----BEGIN CERTIFICATE----- MIIDXTCCAkWgAwIBAgIFALr2pxswDQYJKoZIhvcNAQELBQAwYDELMAkGA1UEBhMC QVUxEzARBgNVBAgTClF1ZWVuc2xhbmQxDjAMBgNVBAcTBTM4OWRzMRAwDgYDVQQK Ewd0ZXN0aW5nMRowGAYDVQQDExFpbnRlci5leGFtcGxlLmNvbTAeFw0yMjAyMTEw MjU3MTFaFw00MjAyMTEwMjU3MTFaMF8xCzAJBgNVBAYTAkFVMRMwEQYDVQQIEwpR dWVlbnNsYW5kMQ4wDAYDVQQHEwUzODlkczEQMA4GA1UEChMHdGVzdGluZzEZMBcG A1UEAxMQbGVhZi5leGFtcGxlLmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC AQoCggEBAOS672+WBtOa5ehJwCESHjk4HpRdHlCsAQiKpKyykdDZM9kpsZT+becy Nc6zHORcduehsjY46vQDXrMN4Q8AjoFFK0qTA0DeZleWeQMa0cBnqt9bCkIJzwCu TY8zNHlLiA5iYUhwCvDQNegnjSonLiSm19earxyy75etklFDeiEij+KdHN6MKFlo J/0L4u3ckNCHTunnGjIfdNUT92Ignxt5KAN2bT6hbSbf9PCx1A2Cyyt4DsVCSIqj IfalkMPXGY2M0a5GvMayhpf1yvRP25uAEQZwn+ahpic+qwk6YXdBXUgsbBcFoT8H kG/EmSYrlwAxlroq6FvL3n0RlfcMzf0CAwEAAaMfMB0wGwYDVR0RBBQwEoIQbGVh Zi5leGFtcGxlLmNvbTANBgkqhkiG9w0BAQsFAAOCAQEA27IS76HxwAnJH/8tEPjD DnJw9zsmkHX6skhVfFYlkpfukl0Lm0DGmfeeqYfTBU1g2x5NTxeUBip104gES0iX eq7Yr+7pdvnV6pB42EAeWRDN9DGDpTL/9/aO8Vm+O28SdILYjuGqXnoPbuUgYLPO nO/8REbQp7jk6kwje1eJ81JyYINXCwzEEpq0ycwaU6aIcCP3BY5c9PV5DStN+ddV esI2SkVABd8b0zmh+aw1zzACpUnBgNX60jfbPIr+UqCwlW8LMKmHuL9NkN/mLEyV hH3v8CpSpTWB+cOntmuK7sESgO8c/u/6ohYPyrEsNBTJgmXeHO8rsYNQAiRkpkvQ PA== -----END CERTIFICATE----- 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/data/tls/pkcs11.txt000066400000000000000000000006301421664411400244110ustar00rootroot00000000000000library= name=NSS Internal PKCS #11 Module parameters=configdir='.' certPrefix='' keyPrefix='' secmod='secmod.db' flags= updatedir='' updateCertPrefix='' updateKeyPrefix='' updateid='' updateTokenDescription='' NSS=Flags=internal,critical trustOrder=75 cipherOrder=100 slotParams=(1={slotFlags=[ECC,RSA,DSA,DH,RC2,RC4,DES,RANDOM,SHA1,MD5,MD2,SSL,TLS,AES,Camellia,SEED,SHA256,SHA512] askpw=any timeout=30}) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/data/tls/pwdfile.txt000066400000000000000000000000511421664411400247360ustar00rootroot00000000000000Moo0weeYacaema3ViireX1kee7iedeixigohtooy 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/data/tls/server-export.p12000066400000000000000000000107351421664411400257260ustar00rootroot000000000000000€0€ *†H†÷  €$€‚o0€0€ *†H†÷  €$€‚š0‚–0‚’ *†H†÷   ‚;0‚70a *†H†÷  0T03 *†H†÷  0&YEgR`öˆªÍG”Õk 'À 0 *†H†÷  0 `†He*¿Ì[¨ðOÖs$üõ/ æ×‚Ðt5¸¦P‹é4ø=ãd˜"$‚Lì§+ˆFÿ·`ÂÖ¹Ì5ÝðÇÚ‡e. ³; K‘à)Œ˜&ŠJåt”üŇÀNíW{®8xUíFœÖ09W)ÊŽh1ÞR‰¹’Á ï@þw|Ù)w»ÑýÙ)ÍXýf;—ºZÛù¼‚±L×"\1ê¼òêy{l®a Í­IäýXPQ›÷9ª¢ÙS{NåQÛ'؃í{±ñ iFT¾ë_[BêŽ!_ÎbÒÔc,€.µg± ÍW<½ÏF Ó‰—æz«ßÂ3Q8UA);ЧŸ»y²³WbkõJï€æ/rÚøÝ9Â>Täp-À7É(yÜf(2¸üѺ͉$;¸É4øàÁ७©ÿw…>6¶Ñhþï`àŸ"\¥_»Vs£at~pkO¹¸(ICl¹Ù˜AMW˜UEBÇaà\F—°£©Ñ5á…÷Ý GJzAª£šÐ÷=iJþ¢ð‘KÙC¦p‹¸®!.‰¥æEÌ“p%h‹ä$ÄP÷Y‚å]3‘¹PÛLnÅX6˜þ„/ÛOVú(áñ?ÿÄ„Vzä[',”ã;v”]E“÷ˆÙ&Ý"@M‰¢ÅvÁT„rÀßg{6§Òõ"oˆû§ï´•ø8ë þ0¬ô|Ñ?i.»Šé€Ë.æ® æPÒ]î]¾]«÷»Š-¿´Á-¸ûÚ8‡xþ!‡¶¶ÈOw÷f~Û#Ì!Þð0b±Ê%BS8a+Šv$øgEÒ"züI¾€¬ ! Ý2¢É!*ŸöÓ†±\b°YÂe5G‡%–LQÑü‡œ;÷²š¦á`M&Æ¡š%ôKGè„kþÆçcŽF+¾–¸³NåÂÅä§Ï)üž ciøèxãt/ž’Ϫ• µ ïÿZ¤ˆ®ä”ó±Ãƒizd“ü?8ÅÔÚáw¬?g40c #@¼Ågµÿ[€-YgòJágy-Ôvj À4òoõUB’¦§œÒä±Å86‰ÀP#Øc–4´¬7¯Óþ~¦D ×¹ÈšTÊ K&<2oŽæ®Ô´ä&ÓrËÃŒø«lÁ¦Ä™9%ùÆÀ»RÑ'6Ï7ÓÕâê“=Bý×áÓåâÉ‹äÒ±Kä:QÑú…%§•|¿Ä .ªŒ{L$<—ˆ×Ç]Œµ@«vg£eUïc˜°ü™h—=Íkf—ñ÷îqòÈh[QþfÇÂl¦¤oËà2G†ý02™‚Ùœ€øóú×6ä0WúDp÷ ªvÎë'4_ùy>€å‚!Y¬î­±ä¯±°‘ƒ9©UyÉ•+ß„³#}çsŠMá7pöQg\0ëæêÓÑ^NX£èLúù¨WðrÑ‚½ Ý×sLÐíþ¥c{n…Ä.p~LFUeq!¯ï­M|(ЫY.rà£âÛ'»6gc¬œ¶÷‘²ïûç ®·[YéK{oà˜uÀ¼ûÛÇþórð^w„uXþÍM¤¦Gj«EÀ'ßhð¾V#»‰—Á:EOQ#+•iûó2†ù!Banx_Ì "t¹]†+žîÊûæ–Ût:ài^€ „ã ”@ŒÊ¶1D0 *†H†÷  1testcrt0# *†H†÷  1 úJ´qÀ|®dÀl?¯|×Z0€ *†H†÷  €0€0€ *†H†÷ 0a *†H†÷  0T03 *†H†÷  0&ѶœÉØïôv"ÛX‚’Mm 'À0 *†H†÷  0 `†He“Ÿìè}ÕjÌ—Z„ €‚ í@µ+4JÒm[ö;v‘ŽëتEXâóK|+‰®ÂÑ;ÃaÒXs­i->§á³8O&ÑvtaA%oƒ÷ë ÚMØÒzfòè÷øé³àq:7í£1².ù_x(ãùp•‡†ä¤wþýP:’&º|§ÁÇK"‰@ÄܽRN2ämÀžvÍ‹Ac55KkêU{n •ŽñÎWeþãŠNñ0baÊ›@ÅS*Oó'åÂì9U.=ÐF¨»Ê~k —¯Ç2~Á\ÈOËñN¥I¯}Ô¡RrNhÕ­†ÏÒV”ìuÈM+y\—²¤{cÎø3m¿—Ÿ”[…$MÕ·Ty|«ÍëJŠk¿ÖWîóóMݺý`[#UVÑ­5⪠hö71Èý}Êu`m ‚q çbÊ×uòÈädÌ ö}¯1>ð™ÀQíÂóh§,äïð<©@IN‹ÄWÈÜøHdö>Aöµfg»d~t]%J "C†8Èšê]#â"Ó¼Ûñ]£}¢ÚÅ‹Qy&CœW‘Nâ¡df*4Õ¿*F,^hÍ_¨½G½ioY }ÞÊ: –¶â¸ðκ.gŸ• ÎR¨CˆkOo’mŽH(ŽX«ý¡†n…ØÔ\öÁ¿jÝþ¢‰£ÕU• õ¬ëhÿ‘ã¸[ÛÙÈåÀdÂÊê—UÖ<›•v×ë”çã•á®QlÖ…Fô«æÛg$…ÞīǦ»HÏÍdäÈL§þ’q_ˆV&^I~1•ã(r˜`õiJY?ÚdeJû­kFîóò¨:w&ý«âÞŽØ6)êw¯íDlv”¬òR ?CÉÕ5cFú¸®ÉgNÑþD »œû¢¡•¸¨¿3á=lm0Njêþ‘lOh´Í~®O|RÃlÜc»¾_ŠÁ´jß5)Ïm¬Oâ·mï6Åͦ¬€º4/“Ù*r,Û0 ì-¶£\öu ê|[LdÖ6¿ï1‘:ëKß›¯‘ÒmZo€H"ã‚ÛL’ï˶8âǽ«—Ob’kK…Ý,óõË©:DçÜ„¯Á¼î®YYó)‘lhœ%ÿz±îP"AwŠj¢ðtŸ]uz8ɨʄ»DÄÄ‚™þ•4¹(Vªž|¤Ë`‰?Ëbwwrlõ3šhtQßøuâx†Û À­mÑ¢½-¸ˆ©ÿ3ν¼Ö.ÓLðñ,7™gûG^*³”E† ¾ñ^óÎÈ’æØ¢Œ£-WñÜ€FÃç_Jzwéµî P«ŒÔå«)€aÆOø~Ui ÿ†<ÙhÅ{Òóæ¯i :LË$/駤¥f #<?66& ìpCš˜µZžŸ¦šñ¹eÔo‡ÖÚýñMMôª‰©7BÅ(3OîÓ5bóS]RÑÀû 5_s.w›¿MK©Õ–|`‡*Ò iÆŠò6åZôç©4Ó|¿ºÃÙ *ðdrU 9,)rVè[«âvBí¦W¯s’”o¹à«? ôçÞŽ‡uUõ¡L†aî“ QÉú«ÚB€F1è69e˜é+ÿ1àDóm")ªäe%ôeÀrTv+Gi]#7_í«>ñyˆ‰ÈȰ?|d¬§MH\âV‹5²¢¸±QG]2EX^¶¯2ãæ™Ëø¿¹ †ÔšÎýÝ_^`‘rt r*#Ük ¿àø"è!Ã'9–±¹7Õ¤a :]n_ÎϾ„l`¿á)öÊbeR¼Ò¢¡8GÏURîšg ù©ü˜ô!=‚Å<ÉqîZöØúfÊš4¶Fا.Q]1f[3Ð]ßÖ­àa6ŠR]K€iåŒóÐHñJ‘pé6uÚ<´ï°6—ud8áΜ/Ý{ñûÝËÂCÅñœ3o^Ìj«¦OW~‡Õû#‡¸`£*ùclï5H_LyWœ™Û{ ºbeå|j•GZM-R(胠aˆ»3ÌÎç÷‘u™EXà]Ð4¤O"øèã»N#ͬ¦Œ=²dHêTèL7A„$ãCI»nRÿž4È›æëE{ˆ¹ÚNë8Ê‹%Y§7¹Ç}übÇ ©­@JÙ€3ò’^x"­“êG…½•Æíš0<ضÞ-FÑR+Å«¢¬‚­¡êtZ¤eQ¤±EÒ~ñžr¹礆GÜÎÌ€G~nô×´¾â´&À6w•æù‹Ax̨† Ùß³xGò74&Y&«eÌ N`uvÜGÁ â/Âöfç´B½”!›æœe³¶5~­É.Wé`/¦0 sîÅÀ„à#»ã¾Bdcs ²Æ=yBÁ²­¯ õ’Án?ÙÝzt â›:ý`é>8޼¹Êzxöà‡&µ«õëSòì/§!xä’í6‚áÆˆ€;“”{zB,nÒÉÕk†âÔézóƹêoáµE¤?”ø|qQ•]ä\S{ÉlÑÍX7Ö™ˆ^uÒÿE(Ì9`?}m¥’ŽÀ¥Ïÿcnêf7^I%‹Q¶Ï™% Ur¯±HÅ7*‚¸Û7©ŠhýêU’„̇o ókáUŸ¢ÌU×»©%/º$ÄɯŒî?àæº—{Ðaïûñ¸Û*»»>óƒl¿E)®³pïG ËHíßSù…àl† JÑ9øÁåê]”OULuvÜ4Gê"ãƒ*l ¢?d‚mlôɳWcå9@Ùå÷Ž×KÄ‚Üâ™Øý¥6`‹UT¯‚ëÐç; ÑypLº=7˜¾:„á;ã«ÖÎÜ.—¹a§¤ð·ëä0_m‹EYˆ€r˜Æb)Òst}U8k‘гcl`pFî3œ·ŽŒê¾²Ÿ¢ÈäòÔ"Ÿu_Xa¦à¾ñêžÒcâM1*ààá»cæ€(|Ì6.’LÚ[UŽ}ü™f¥Œ,4ûebð Ãcnš+øAŒŠø•Ð9LüiðØíå‚ò­âEw!ïdÉ/.ÚGlÙ¯©Îæ…ÌÌÆÁ]“S{p”äÁ÷bë šÑhtG`Äï¢h:Ç‹wK1hý ÉÛ^ŒaŠêáã›7°sKÿP¸|/ ”—¦£œgS:¥ú-U°0J010  `†He ¨*½°É‘‚st¼ yw°FŒíMoüžq6šp«²¼Ü¦Cß .¤ŸÕyšÿèB 'À389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/data/tls/tls_import_ca_chain.pem000066400000000000000000000046221421664411400272570ustar00rootroot00000000000000-----BEGIN CERTIFICATE----- MIIDTjCCAjagAwIBAgIFALr2ppEwDQYJKoZIhvcNAQELBQAwYDELMAkGA1UEBhMC QVUxEzARBgNVBAgTClF1ZWVuc2xhbmQxDjAMBgNVBAcTBTM4OWRzMRAwDgYDVQQK Ewd0ZXN0aW5nMRowGAYDVQQDExF0ZXN0cy5leGFtcGxlLmNvbTAeFw0yMjAyMTEw MjU1NThaFw00MjAyMTEwMjU1NThaMGAxCzAJBgNVBAYTAkFVMRMwEQYDVQQIEwpR dWVlbnNsYW5kMQ4wDAYDVQQHEwUzODlkczEQMA4GA1UEChMHdGVzdGluZzEaMBgG A1UEAxMRaW50ZXIuZXhhbXBsZS5jb20wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw ggEKAoIBAQDwpvfj98afN43Eo4qwgXJ9wioeyGaX9rmXpQs4bO/muaCzZ4ztONFz m7smxOmTNPKFilwC4f0p0KQB5GJgcy9VD10VWIS0iKckKuGzqrPQP9ROawIvlKe7 k046XnIHfvJKFaikcQfcipqLfDqU5+SXaLXj1sqnEWqXklUDqd6zSB4Ko3blQ3t6 hh2axcudpHSwvj/gbdqNkX41pSbET2MJY2/025AksWLx3CukzNMRLe7pLoNZ5Ztb Id1EsxHK/dFBCtRkke7TAUuai6BfMFj8NTWk8FB3Rd1rx608lo+31/Fq9a7e5jTq 16JZwJQVlkZGCH6iWB8VynhF+Lw8VZMPAgMBAAGjDzANMAsGA1UdDwQEAwICBDAN BgkqhkiG9w0BAQsFAAOCAQEAejpy+M0NsmN/SSlXDMK6Xly0ef8vxGcF2crjdwtT BrhEFm0hYmiCrJAmtr67lTdgvxM/lpQ+tOcgPWiE1oPOo+eauf5gj4F6aGYBse8I QZ1+WmVyJK+/zRXECo9upFsE76hVhPivqEeq6Z/dsHblnESNxoLjdIRp3JBirUL/ aifYfLH9UrQ+ZU1nCIufQP3w/jUuB1dPQgaiy3gG9/sA5jEd10ZU6QyWVB2B+UH5 VKkBg4hu0xJmL79zSZIFnrCxZGxaL9S7BrQcEDB186kQ+++0g1CuncMbZajvlTsg 3bAtbZQQMe3hwsoHjo1JKWAUTOB7cqEF6LMpotg0jn+Bvg== -----END CERTIFICATE----- -----BEGIN CERTIFICATE----- MIIDTjCCAjagAwIBAgIFALr2peswDQYJKoZIhvcNAQELBQAwYDELMAkGA1UEBhMC QVUxEzARBgNVBAgTClF1ZWVuc2xhbmQxDjAMBgNVBAcTBTM4OWRzMRAwDgYDVQQK Ewd0ZXN0aW5nMRowGAYDVQQDExF0ZXN0cy5leGFtcGxlLmNvbTAeFw0yMjAyMTEw MjU0MzJaFw00MjAyMTEwMjU0MzJaMGAxCzAJBgNVBAYTAkFVMRMwEQYDVQQIEwpR dWVlbnNsYW5kMQ4wDAYDVQQHEwUzODlkczEQMA4GA1UEChMHdGVzdGluZzEaMBgG A1UEAxMRdGVzdHMuZXhhbXBsZS5jb20wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw ggEKAoIBAQCr2vsHEGtvlishhWeAU+qhPbdoJ6CBW6Dk7APlvwuOaAls4BA6I7CX ZG2tbaK38TuB1rB21/KOciTcy7TaF9X6OW+6Hkb4gGMnpy4sRbw4CKIfkNsCZ5av bQ9fsRbgM0q72YPjZlzO6tuvLimOLolhmSiSS00Ll20CteMMWZ/ApGBl163iohD4 pFWJhtyYG9DnZp5N6T3yHDFsrIyil2+G6ZSTOObRwXUEvHeZcGRiG6Py9t3vDOSg IUKYcgyihg9boEHVe76wHfMm6i3ELa7/QeVJNofbiPso6doqD0V+qmGhZsmpjP56 RcBR85ijo/eprohjDNXHAOUgdZ7K9DqrAgMBAAGjDzANMAsGA1UdDwQEAwICBDAN BgkqhkiG9w0BAQsFAAOCAQEASH8xxpue07K1K8T5SLDUT8iaBnCwub6s8atfqPbR xb2vdIX0p6WN+kmsNNsafyQYYz+M5LdMSeaTrzj52zvKvZ/5bgc+VqLXx35khaQU 0RgNgKxDgeY2vGVPFHDSNhJvBTtMxksUK0otW8tF70bTZEp2whkoHCu1nAXuEzaX BeglYO6YRtuY71u84gvd8vtq2Zy0sb5vG7uWn2ZTpA5maCK58r9XpUdpjyA4qhFB ClwQ45UzkLzTbolioT10N7Xp5clLzqiLYexFuoZhK2HROvgr8EFF7xl17qwVbTkt ZsURjsTOrWKVLiVn9AuCHeToPosZr4/pWWjFoweO+yfZEg== -----END CERTIFICATE----- 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/data/tls/tls_import_crt_chain.pem000066400000000000000000000071611421664411400274650ustar00rootroot00000000000000-----BEGIN CERTIFICATE----- MIIDXTCCAkWgAwIBAgIFALr2pxswDQYJKoZIhvcNAQELBQAwYDELMAkGA1UEBhMC QVUxEzARBgNVBAgTClF1ZWVuc2xhbmQxDjAMBgNVBAcTBTM4OWRzMRAwDgYDVQQK Ewd0ZXN0aW5nMRowGAYDVQQDExFpbnRlci5leGFtcGxlLmNvbTAeFw0yMjAyMTEw MjU3MTFaFw00MjAyMTEwMjU3MTFaMF8xCzAJBgNVBAYTAkFVMRMwEQYDVQQIEwpR dWVlbnNsYW5kMQ4wDAYDVQQHEwUzODlkczEQMA4GA1UEChMHdGVzdGluZzEZMBcG A1UEAxMQbGVhZi5leGFtcGxlLmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC AQoCggEBAOS672+WBtOa5ehJwCESHjk4HpRdHlCsAQiKpKyykdDZM9kpsZT+becy Nc6zHORcduehsjY46vQDXrMN4Q8AjoFFK0qTA0DeZleWeQMa0cBnqt9bCkIJzwCu TY8zNHlLiA5iYUhwCvDQNegnjSonLiSm19earxyy75etklFDeiEij+KdHN6MKFlo J/0L4u3ckNCHTunnGjIfdNUT92Ignxt5KAN2bT6hbSbf9PCx1A2Cyyt4DsVCSIqj IfalkMPXGY2M0a5GvMayhpf1yvRP25uAEQZwn+ahpic+qwk6YXdBXUgsbBcFoT8H kG/EmSYrlwAxlroq6FvL3n0RlfcMzf0CAwEAAaMfMB0wGwYDVR0RBBQwEoIQbGVh Zi5leGFtcGxlLmNvbTANBgkqhkiG9w0BAQsFAAOCAQEA27IS76HxwAnJH/8tEPjD DnJw9zsmkHX6skhVfFYlkpfukl0Lm0DGmfeeqYfTBU1g2x5NTxeUBip104gES0iX eq7Yr+7pdvnV6pB42EAeWRDN9DGDpTL/9/aO8Vm+O28SdILYjuGqXnoPbuUgYLPO nO/8REbQp7jk6kwje1eJ81JyYINXCwzEEpq0ycwaU6aIcCP3BY5c9PV5DStN+ddV esI2SkVABd8b0zmh+aw1zzACpUnBgNX60jfbPIr+UqCwlW8LMKmHuL9NkN/mLEyV hH3v8CpSpTWB+cOntmuK7sESgO8c/u/6ohYPyrEsNBTJgmXeHO8rsYNQAiRkpkvQ PA== -----END CERTIFICATE----- -----BEGIN CERTIFICATE----- MIIDTjCCAjagAwIBAgIFALr2ppEwDQYJKoZIhvcNAQELBQAwYDELMAkGA1UEBhMC QVUxEzARBgNVBAgTClF1ZWVuc2xhbmQxDjAMBgNVBAcTBTM4OWRzMRAwDgYDVQQK Ewd0ZXN0aW5nMRowGAYDVQQDExF0ZXN0cy5leGFtcGxlLmNvbTAeFw0yMjAyMTEw MjU1NThaFw00MjAyMTEwMjU1NThaMGAxCzAJBgNVBAYTAkFVMRMwEQYDVQQIEwpR dWVlbnNsYW5kMQ4wDAYDVQQHEwUzODlkczEQMA4GA1UEChMHdGVzdGluZzEaMBgG A1UEAxMRaW50ZXIuZXhhbXBsZS5jb20wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw ggEKAoIBAQDwpvfj98afN43Eo4qwgXJ9wioeyGaX9rmXpQs4bO/muaCzZ4ztONFz m7smxOmTNPKFilwC4f0p0KQB5GJgcy9VD10VWIS0iKckKuGzqrPQP9ROawIvlKe7 k046XnIHfvJKFaikcQfcipqLfDqU5+SXaLXj1sqnEWqXklUDqd6zSB4Ko3blQ3t6 hh2axcudpHSwvj/gbdqNkX41pSbET2MJY2/025AksWLx3CukzNMRLe7pLoNZ5Ztb Id1EsxHK/dFBCtRkke7TAUuai6BfMFj8NTWk8FB3Rd1rx608lo+31/Fq9a7e5jTq 16JZwJQVlkZGCH6iWB8VynhF+Lw8VZMPAgMBAAGjDzANMAsGA1UdDwQEAwICBDAN BgkqhkiG9w0BAQsFAAOCAQEAejpy+M0NsmN/SSlXDMK6Xly0ef8vxGcF2crjdwtT BrhEFm0hYmiCrJAmtr67lTdgvxM/lpQ+tOcgPWiE1oPOo+eauf5gj4F6aGYBse8I QZ1+WmVyJK+/zRXECo9upFsE76hVhPivqEeq6Z/dsHblnESNxoLjdIRp3JBirUL/ aifYfLH9UrQ+ZU1nCIufQP3w/jUuB1dPQgaiy3gG9/sA5jEd10ZU6QyWVB2B+UH5 VKkBg4hu0xJmL79zSZIFnrCxZGxaL9S7BrQcEDB186kQ+++0g1CuncMbZajvlTsg 3bAtbZQQMe3hwsoHjo1JKWAUTOB7cqEF6LMpotg0jn+Bvg== -----END CERTIFICATE----- -----BEGIN CERTIFICATE----- MIIDTjCCAjagAwIBAgIFALr2peswDQYJKoZIhvcNAQELBQAwYDELMAkGA1UEBhMC QVUxEzARBgNVBAgTClF1ZWVuc2xhbmQxDjAMBgNVBAcTBTM4OWRzMRAwDgYDVQQK Ewd0ZXN0aW5nMRowGAYDVQQDExF0ZXN0cy5leGFtcGxlLmNvbTAeFw0yMjAyMTEw MjU0MzJaFw00MjAyMTEwMjU0MzJaMGAxCzAJBgNVBAYTAkFVMRMwEQYDVQQIEwpR dWVlbnNsYW5kMQ4wDAYDVQQHEwUzODlkczEQMA4GA1UEChMHdGVzdGluZzEaMBgG A1UEAxMRdGVzdHMuZXhhbXBsZS5jb20wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw ggEKAoIBAQCr2vsHEGtvlishhWeAU+qhPbdoJ6CBW6Dk7APlvwuOaAls4BA6I7CX ZG2tbaK38TuB1rB21/KOciTcy7TaF9X6OW+6Hkb4gGMnpy4sRbw4CKIfkNsCZ5av bQ9fsRbgM0q72YPjZlzO6tuvLimOLolhmSiSS00Ll20CteMMWZ/ApGBl163iohD4 pFWJhtyYG9DnZp5N6T3yHDFsrIyil2+G6ZSTOObRwXUEvHeZcGRiG6Py9t3vDOSg IUKYcgyihg9boEHVe76wHfMm6i3ELa7/QeVJNofbiPso6doqD0V+qmGhZsmpjP56 RcBR85ijo/eprohjDNXHAOUgdZ7K9DqrAgMBAAGjDzANMAsGA1UdDwQEAwICBDAN BgkqhkiG9w0BAQsFAAOCAQEASH8xxpue07K1K8T5SLDUT8iaBnCwub6s8atfqPbR xb2vdIX0p6WN+kmsNNsafyQYYz+M5LdMSeaTrzj52zvKvZ/5bgc+VqLXx35khaQU 0RgNgKxDgeY2vGVPFHDSNhJvBTtMxksUK0otW8tF70bTZEp2whkoHCu1nAXuEzaX BeglYO6YRtuY71u84gvd8vtq2Zy0sb5vG7uWn2ZTpA5maCK58r9XpUdpjyA4qhFB ClwQ45UzkLzTbolioT10N7Xp5clLzqiLYexFuoZhK2HROvgr8EFF7xl17qwVbTkt ZsURjsTOrWKVLiVn9AuCHeToPosZr4/pWWjFoweO+yfZEg== -----END CERTIFICATE----- 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/data/tls/tls_import_key.pem000066400000000000000000000034761421664411400263300ustar00rootroot00000000000000Bag Attributes friendlyName: testcrt localKeyID: 19 09 FA 11 4A B4 71 C0 7C 17 AE 64 C0 1C 6C 3F AF 7C D7 5A Key Attributes: -----BEGIN PRIVATE KEY----- MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQDkuu9vlgbTmuXo ScAhEh45OB6UXR5QrAEIiqSsspHQ2TPZKbGU/m3nMjXOsxzkXHbnobI2OOr0A16z DeEPAI6BRStKkwNA3mZXlnkDGtHAZ6rfWwpCCc8Ark2PMzR5S4gOYmFIcArw0DXo J40qJy4kptfXmq8csu+XrZJRQ3ohIo/inRzejChZaCf9C+Lt3JDQh07p5xoyH3TV E/diIJ8beSgDdm0+oW0m3/TwsdQNgssreA7FQkiKoyH2pZDD1xmNjNGuRrzGsoaX 9cr0T9ubgBEGcJ/moaYnPqsJOmF3QV1ILGwXBaE/B5BvxJkmK5cAMZa6Kuhby959 EZX3DM39AgMBAAECggEABFzUaEpyQuL3c6DEe1z/GpRJcQb9pwhA1MrgLTMSuOsL pB65dmAL9Jbuk8yyxmBFHFHnNkWLpa/SxJOFMWYPUcPh+YAoVbpoNU93a2m9im/v wGbaITxSqG6qqAqP+6hHJg8WT+1jKAiwnobymFU6+hP8le4rXN7E1x3GZqpkz/Dx k3eNkQbuykqKH046iDgZUqSHw4hriieJc3RiVnaThTnRd1qeKnAIUZ/nhSas5nQa d/sd6dWCycOb7tvw1Vcm6zTu+uTA8ai1uW6wU7N/vYa4jxZYYsYZYgHQxoK+RSzH glWD1n0u9VTUVDqISd+BocHwuRunOvOlZlMonTqhAQKBgQD+moK+5AyQjd3RJnVi xv/MYmw7nT+zXufZJ+bgPGJE8mEgCWAMy/8ysDZX77P4ZZuDVXb1mxVetxVo1n6d 0Ggl/UP/rAqMb3WW86mJfDfGorL6fHg8GzWnQhNWDe6MmGOAPZOJN6UcMWSZshzj yuey1cDQpuBe8j0UXwYA+ys84QKBgQDl/BiEXLM9brLOEEUF/9I2JaL6teIqFivP fhscTHfng1dgnrq5hkD/jSUT+zeZ43/fXuVEYzpNgfY7sS0n77//xzN7nljk4809 2KReeIoQqnkaQ75Vo5dhlkfX/J+jvD7MbeuXGIMKEV3PnLXwQYCdC87iH4CI4PtC 9I+wwd94nQKBgGgsUjjG2HlBArRz9u2+nKVE1CIkOg8rUtPgZq/zJQYu4hyYmWtD AJz9yo56bnnBITtAedcOaFUDtkfaE56AykxY7zyqaPqDFGr6MbEmWS/2HCMvUIbP X0mbWIwKUUPHilbLWxV25iC9+PqGDRoLSHg8y5LT5NQUa3dtVeiK3GshAoGAKa7F Ksg6XDoCAkMEn4+8I8Ayh8oLUaFvE05Bz6E0Yit13LcoFJP2l9qXC8YOT7/h3zQt zXVGjeGuJSd5jbFwVQVfmVobtnBrNHhdYhnqvBaJmG8Kwi7CMxevsb/Bl0V5BEgv 2NTCe0KmhAhdGUxl6RDI0EbxXt2X7IyytlCNFikCgYBysxlYfApfVJKwdNevnV1V CI1gGJpIJNZnlX2At7Db2llClxPTQBFRh820k0o+Vaj95VEGDWci/nIUq5odvIzQ GjVDHSEPsp699J31dreJYZN6mJR9YOI5f5Hak8TP4mlwJWQr+edBofOql6lUkaQJ 8muEOzjKY0ty08BdBhC+lQ== -----END PRIVATE KEY----- 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/data/tls/tls_import_key_chain.pem000066400000000000000000000060351421664411400274640ustar00rootroot00000000000000-----BEGIN CERTIFICATE----- MIIDXTCCAkWgAwIBAgIFALr2pxswDQYJKoZIhvcNAQELBQAwYDELMAkGA1UEBhMC QVUxEzARBgNVBAgTClF1ZWVuc2xhbmQxDjAMBgNVBAcTBTM4OWRzMRAwDgYDVQQK Ewd0ZXN0aW5nMRowGAYDVQQDExFpbnRlci5leGFtcGxlLmNvbTAeFw0yMjAyMTEw MjU3MTFaFw00MjAyMTEwMjU3MTFaMF8xCzAJBgNVBAYTAkFVMRMwEQYDVQQIEwpR dWVlbnNsYW5kMQ4wDAYDVQQHEwUzODlkczEQMA4GA1UEChMHdGVzdGluZzEZMBcG A1UEAxMQbGVhZi5leGFtcGxlLmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC AQoCggEBAOS672+WBtOa5ehJwCESHjk4HpRdHlCsAQiKpKyykdDZM9kpsZT+becy Nc6zHORcduehsjY46vQDXrMN4Q8AjoFFK0qTA0DeZleWeQMa0cBnqt9bCkIJzwCu TY8zNHlLiA5iYUhwCvDQNegnjSonLiSm19earxyy75etklFDeiEij+KdHN6MKFlo J/0L4u3ckNCHTunnGjIfdNUT92Ignxt5KAN2bT6hbSbf9PCx1A2Cyyt4DsVCSIqj IfalkMPXGY2M0a5GvMayhpf1yvRP25uAEQZwn+ahpic+qwk6YXdBXUgsbBcFoT8H kG/EmSYrlwAxlroq6FvL3n0RlfcMzf0CAwEAAaMfMB0wGwYDVR0RBBQwEoIQbGVh Zi5leGFtcGxlLmNvbTANBgkqhkiG9w0BAQsFAAOCAQEA27IS76HxwAnJH/8tEPjD DnJw9zsmkHX6skhVfFYlkpfukl0Lm0DGmfeeqYfTBU1g2x5NTxeUBip104gES0iX eq7Yr+7pdvnV6pB42EAeWRDN9DGDpTL/9/aO8Vm+O28SdILYjuGqXnoPbuUgYLPO nO/8REbQp7jk6kwje1eJ81JyYINXCwzEEpq0ycwaU6aIcCP3BY5c9PV5DStN+ddV esI2SkVABd8b0zmh+aw1zzACpUnBgNX60jfbPIr+UqCwlW8LMKmHuL9NkN/mLEyV hH3v8CpSpTWB+cOntmuK7sESgO8c/u/6ohYPyrEsNBTJgmXeHO8rsYNQAiRkpkvQ PA== -----END CERTIFICATE----- Bag Attributes friendlyName: testcrt localKeyID: 19 09 FA 11 4A B4 71 C0 7C 17 AE 64 C0 1C 6C 3F AF 7C D7 5A Key Attributes: -----BEGIN PRIVATE KEY----- MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQDkuu9vlgbTmuXo ScAhEh45OB6UXR5QrAEIiqSsspHQ2TPZKbGU/m3nMjXOsxzkXHbnobI2OOr0A16z DeEPAI6BRStKkwNA3mZXlnkDGtHAZ6rfWwpCCc8Ark2PMzR5S4gOYmFIcArw0DXo J40qJy4kptfXmq8csu+XrZJRQ3ohIo/inRzejChZaCf9C+Lt3JDQh07p5xoyH3TV E/diIJ8beSgDdm0+oW0m3/TwsdQNgssreA7FQkiKoyH2pZDD1xmNjNGuRrzGsoaX 9cr0T9ubgBEGcJ/moaYnPqsJOmF3QV1ILGwXBaE/B5BvxJkmK5cAMZa6Kuhby959 EZX3DM39AgMBAAECggEABFzUaEpyQuL3c6DEe1z/GpRJcQb9pwhA1MrgLTMSuOsL pB65dmAL9Jbuk8yyxmBFHFHnNkWLpa/SxJOFMWYPUcPh+YAoVbpoNU93a2m9im/v wGbaITxSqG6qqAqP+6hHJg8WT+1jKAiwnobymFU6+hP8le4rXN7E1x3GZqpkz/Dx k3eNkQbuykqKH046iDgZUqSHw4hriieJc3RiVnaThTnRd1qeKnAIUZ/nhSas5nQa d/sd6dWCycOb7tvw1Vcm6zTu+uTA8ai1uW6wU7N/vYa4jxZYYsYZYgHQxoK+RSzH glWD1n0u9VTUVDqISd+BocHwuRunOvOlZlMonTqhAQKBgQD+moK+5AyQjd3RJnVi xv/MYmw7nT+zXufZJ+bgPGJE8mEgCWAMy/8ysDZX77P4ZZuDVXb1mxVetxVo1n6d 0Ggl/UP/rAqMb3WW86mJfDfGorL6fHg8GzWnQhNWDe6MmGOAPZOJN6UcMWSZshzj yuey1cDQpuBe8j0UXwYA+ys84QKBgQDl/BiEXLM9brLOEEUF/9I2JaL6teIqFivP fhscTHfng1dgnrq5hkD/jSUT+zeZ43/fXuVEYzpNgfY7sS0n77//xzN7nljk4809 2KReeIoQqnkaQ75Vo5dhlkfX/J+jvD7MbeuXGIMKEV3PnLXwQYCdC87iH4CI4PtC 9I+wwd94nQKBgGgsUjjG2HlBArRz9u2+nKVE1CIkOg8rUtPgZq/zJQYu4hyYmWtD AJz9yo56bnnBITtAedcOaFUDtkfaE56AykxY7zyqaPqDFGr6MbEmWS/2HCMvUIbP X0mbWIwKUUPHilbLWxV25iC9+PqGDRoLSHg8y5LT5NQUa3dtVeiK3GshAoGAKa7F Ksg6XDoCAkMEn4+8I8Ayh8oLUaFvE05Bz6E0Yit13LcoFJP2l9qXC8YOT7/h3zQt zXVGjeGuJSd5jbFwVQVfmVobtnBrNHhdYhnqvBaJmG8Kwi7CMxevsb/Bl0V5BEgv 2NTCe0KmhAhdGUxl6RDI0EbxXt2X7IyytlCNFikCgYBysxlYfApfVJKwdNevnV1V CI1gGJpIJNZnlX2At7Db2llClxPTQBFRh820k0o+Vaj95VEGDWci/nIUq5odvIzQ GjVDHSEPsp699J31dreJYZN6mJR9YOI5f5Hak8TP4mlwJWQr+edBofOql6lUkaQJ 8muEOzjKY0ty08BdBhC+lQ== -----END PRIVATE KEY----- 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/longduration/000077500000000000000000000000001421664411400235415ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/longduration/automembers_long_test.py000066400000000000000000001115621421664411400305220ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2019 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- """ Will do stress testing of automember plugin """ import os import pytest from lib389.tasks import DEFAULT_SUFFIX from lib389.topologies import topology_m4 as topo_m4 from lib389.idm.nscontainer import nsContainers, nsContainer from lib389.idm.organizationalunit import OrganizationalUnits from lib389.idm.domain import Domain from lib389.idm.posixgroup import PosixGroups from lib389.plugins import AutoMembershipPlugin, AutoMembershipDefinitions, \ MemberOfPlugin, AutoMembershipRegexRules from lib389.backend import Backends from lib389.config import Config from lib389.replica import ReplicationManager from lib389.tasks import AutomemberRebuildMembershipTask from lib389.idm.group import Groups, Group, nsAdminGroups, nsAdminGroup SUBSUFFIX = f'dc=SubSuffix,{DEFAULT_SUFFIX}' REPMANDN = "cn=ReplManager" REPMANSFX = "dc=replmangr,dc=com" CACHE_SIZE = '-1' CACHEMEM_SIZE = '10485760' pytestmark = pytest.mark.tier3 @pytest.fixture(scope="module") def _create_entries(topo_m4): """ Will act as module .Will set up required user/entries for the test cases. """ for instance in [topo_m4.ms['supplier1'], topo_m4.ms['supplier2'], topo_m4.ms['supplier3'], topo_m4.ms['supplier4']]: assert instance.status() for org in ['autouserGroups', 'Employees', 'TaskEmployees']: OrganizationalUnits(topo_m4.ms['supplier1'], DEFAULT_SUFFIX).create(properties={'ou': org}) Backends(topo_m4.ms['supplier1']).create(properties={ 'cn': 'SubAutoMembers', 'nsslapd-suffix': SUBSUFFIX, 'nsslapd-CACHE_SIZE': CACHE_SIZE, 'nsslapd-CACHEMEM_SIZE': CACHEMEM_SIZE }) Domain(topo_m4.ms['supplier1'], SUBSUFFIX).create(properties={ 'dc': SUBSUFFIX.split('=')[1].split(',')[0], 'aci': [ f'(targetattr="userPassword")(version 3.0;aci "Replication Manager Access";' f'allow (write,compare) userdn="ldap:///{REPMANDN},cn=config";)', f'(target ="ldap:///{SUBSUFFIX}")(targetattr !="cn||sn||uid")(version 3.0;' f'acl "Group Permission";allow (write)(groupdn = "ldap:///cn=GroupMgr,{SUBSUFFIX}");)', f'(target ="ldap:///{SUBSUFFIX}")(targetattr !="userPassword")(version 3.0;' f'acl "Anonym-read access"; allow (read,search,compare) (userdn="ldap:///anyone");)'] }) for suff, grp in [(DEFAULT_SUFFIX, 'SubDef1'), (DEFAULT_SUFFIX, 'SubDef2'), (DEFAULT_SUFFIX, 'SubDef3'), (DEFAULT_SUFFIX, 'SubDef4'), (DEFAULT_SUFFIX, 'SubDef5'), (DEFAULT_SUFFIX, 'Employees'), (DEFAULT_SUFFIX, 'NewEmployees'), (DEFAULT_SUFFIX, 'testuserGroups'), (SUBSUFFIX, 'subsuffGroups'), (SUBSUFFIX, 'Employees'), (DEFAULT_SUFFIX, 'autoMembersPlugin'), (DEFAULT_SUFFIX, 'replsubGroups'), ("cn=replsubGroups,{}".format(DEFAULT_SUFFIX), 'Managers'), ("cn=replsubGroups,{}".format(DEFAULT_SUFFIX), 'Contractors'), ("cn=replsubGroups,{}".format(DEFAULT_SUFFIX), 'Interns'), ("cn=replsubGroups,{}".format(DEFAULT_SUFFIX), 'Visitors'), ("ou=autouserGroups,{}".format(DEFAULT_SUFFIX), 'SuffDef1'), ("ou=autouserGroups,{}".format(DEFAULT_SUFFIX), 'SuffDef2'), ("ou=autouserGroups,{}".format(DEFAULT_SUFFIX), 'SuffDef3'), ("ou=autouserGroups,{}".format(DEFAULT_SUFFIX), 'SuffDef4'), ("ou=autouserGroups,{}".format(DEFAULT_SUFFIX), 'SuffDef5'), ("ou=autouserGroups,{}".format(DEFAULT_SUFFIX), 'Contractors'), ("ou=autouserGroups,{}".format(DEFAULT_SUFFIX), 'Managers'), ("CN=testuserGroups,{}".format(DEFAULT_SUFFIX), 'TestDef1'), ("CN=testuserGroups,{}".format(DEFAULT_SUFFIX), 'TestDef2'), ("CN=testuserGroups,{}".format(DEFAULT_SUFFIX), 'TestDef3'), ("CN=testuserGroups,{}".format(DEFAULT_SUFFIX), 'TestDef4'), ("CN=testuserGroups,{}".format(DEFAULT_SUFFIX), 'TestDef5')]: Groups(topo_m4.ms['supplier1'], suff, rdn=None).create(properties={'cn': grp}) for suff, grp, gid in [(SUBSUFFIX, 'SubDef1', '111'), (SUBSUFFIX, 'SubDef2', '222'), (SUBSUFFIX, 'SubDef3', '333'), (SUBSUFFIX, 'SubDef4', '444'), (SUBSUFFIX, 'SubDef5', '555'), ('cn=subsuffGroups,{}'.format(SUBSUFFIX), 'Managers', '666'), ('cn=subsuffGroups,{}'.format(SUBSUFFIX), 'Contractors', '999')]: PosixGroups(topo_m4.ms['supplier1'], suff, rdn=None).create(properties={ 'cn': grp, 'gidNumber': gid}) for supplier in [topo_m4.ms['supplier1'], topo_m4.ms['supplier2'], topo_m4.ms['supplier3'], topo_m4.ms['supplier4']]: AutoMembershipPlugin(supplier).add("nsslapd-pluginConfigArea", "cn=autoMembersPlugin,{}".format(DEFAULT_SUFFIX)) MemberOfPlugin(supplier).enable() automembers = AutoMembershipDefinitions(topo_m4.ms['supplier1'], f'cn=autoMembersPlugin,{DEFAULT_SUFFIX}') automember1 = automembers.create(properties={ 'cn': 'replsubGroups', 'autoMemberScope': f'ou=Employees,{DEFAULT_SUFFIX}', 'autoMemberFilter': "objectclass=posixAccount", 'autoMemberDefaultGroup': [f'cn=SubDef1,{DEFAULT_SUFFIX}', f'cn=SubDef2,{DEFAULT_SUFFIX}', f'cn=SubDef3,{DEFAULT_SUFFIX}', f'cn=SubDef4,{DEFAULT_SUFFIX}', f'cn=SubDef5,{DEFAULT_SUFFIX}'], 'autoMemberGroupingAttr': 'member:dn' }) automembers = AutoMembershipRegexRules(topo_m4.ms['supplier1'], automember1.dn) automembers.create(properties={ 'cn': 'Managers', 'description': f'Group placement for Managers', 'autoMemberTargetGroup': [f'cn=Managers,cn=replsubGroups,{DEFAULT_SUFFIX}'], 'autoMemberInclusiveRegex': ['uidNumber=^5..5$', 'gidNumber=^[1-4]..3$', 'nsAdminGroupName=^Manager$|^Supervisor$'], "autoMemberExclusiveRegex": ['uidNumber=^999$', 'gidNumber=^[6-8].0$', 'nsAdminGroupName=^Junior$'], }) automembers.create(properties={ 'cn': 'Contractors', 'description': f'Group placement for Contractors', 'autoMemberTargetGroup': [f'cn=Contractors,cn=replsubGroups,{DEFAULT_SUFFIX}'], 'autoMemberInclusiveRegex': ['uidNumber=^8..5$', 'gidNumber=^[5-9]..3$', 'nsAdminGroupName=^Contract|^Temporary$'], "autoMemberExclusiveRegex": ['uidNumber=^[1,3,8]99$', 'gidNumber=^[2-4]00$', 'nsAdminGroupName=^Employee$'], }) automembers.create(properties={ 'cn': 'Interns', 'description': f'Group placement for Interns', 'autoMemberTargetGroup': [f'cn=Interns,cn=replsubGroups,{DEFAULT_SUFFIX}'], 'autoMemberInclusiveRegex': ['uidNumber=^1..6$', 'gidNumber=^[1-9]..3$', 'nsAdminGroupName=^Interns$|^Trainees$'], "autoMemberExclusiveRegex": ['uidNumber=^[1-9]99$', 'gidNumber=^[1-9]00$', 'nsAdminGroupName=^Students$'],}) automembers.create(properties={ 'cn': 'Visitors', 'description': f'Group placement for Visitors', 'autoMemberTargetGroup': [f'cn=Visitors,cn=replsubGroups,{DEFAULT_SUFFIX}'], 'autoMemberInclusiveRegex': ['uidNumber=^1..6$', 'gidNumber=^[1-5]6.3$', 'nsAdminGroupName=^Visitors$'], "autoMemberExclusiveRegex": ['uidNumber=^[7-9]99$', 'gidNumber=^[7-9]00$', 'nsAdminGroupName=^Inter'], }) for instance in [topo_m4.ms['supplier1'], topo_m4.ms['supplier2'], topo_m4.ms['supplier3'], topo_m4.ms['supplier4']]: instance.restart() def delete_users_and_wait(topo_m4, automem_scope): """ Deletes entries after test and waits for replication. """ for user in nsAdminGroups(topo_m4.ms['supplier1'], automem_scope, rdn=None).list(): user.delete() for supplier in [topo_m4.ms['supplier2'], topo_m4.ms['supplier3'], topo_m4.ms['supplier4']]: ReplicationManager(DEFAULT_SUFFIX).wait_for_replication(topo_m4.ms['supplier1'], supplier, timeout=30000) def create_entry(topo_m4, user_id, suffix, uid_no, gid_no, role_usr): """ Will create entries with nsAdminGroup objectclass """ user = nsAdminGroups(topo_m4.ms['supplier1'], suffix, rdn=None).create(properties={ 'cn': user_id, 'sn': user_id, 'uid': user_id, 'homeDirectory': '/home/{}'.format(user_id), 'loginShell': '/bin/bash', 'uidNumber': uid_no, 'gidNumber': gid_no, 'objectclass': ['top', 'person', 'posixaccount', 'inetuser', 'nsMemberOf', 'nsAccount', 'nsAdminGroup'], 'nsAdminGroupName': role_usr, 'seeAlso': 'uid={},{}'.format(user_id, suffix), 'entrydn': 'uid={},{}'.format(user_id, suffix) }) return user def test_adding_300_user(topo_m4, _create_entries): """ Adding 300 user entries matching the inclusive regex rules for all targetted groups at M1 and checking the same created in M2 & M3 :id: fcd867bc-be57-11e9-9842-8c16451d917b :setup: Instance with 4 suppliers :steps: 1. Add 300 user entries matching the inclusive regex rules at topo_m4.ms['supplier1'] 2. Check the same created in rest suppliers :expected results: 1. Pass 2. Pass """ user_rdn = "long01usr" automem_scope = "ou=Employees,{}".format(DEFAULT_SUFFIX) grp_container = "cn=replsubGroups,{}".format(DEFAULT_SUFFIX) default_group1 = "cn=SubDef1,{}".format(DEFAULT_SUFFIX) default_group2 = "cn=SubDef2,{}".format(DEFAULT_SUFFIX) # Adding BulkUsers for number in range(300): create_entry(topo_m4, f'{user_rdn}{number}', automem_scope, '5795', '5693', 'Contractor') try: # Check to sync the entries for supplier in [topo_m4.ms['supplier2'], topo_m4.ms['supplier3'], topo_m4.ms['supplier4']]: ReplicationManager(DEFAULT_SUFFIX).wait_for_replication(topo_m4.ms['supplier1'], supplier, timeout=30000) for instance, grp in [(topo_m4.ms['supplier2'], 'Managers'), (topo_m4.ms['supplier3'], 'Contractors'), (topo_m4.ms['supplier4'], 'Interns')]: assert len(nsAdminGroup( instance, f'cn={grp},{grp_container}').get_attr_vals_utf8('member')) == 300 for grp in [default_group1, default_group2]: assert not Group(topo_m4.ms['supplier4'], grp).get_attr_vals_utf8('member') assert not Group(topo_m4.ms['supplier3'], grp).get_attr_vals_utf8('member') finally: delete_users_and_wait(topo_m4, automem_scope) def test_adding_1000_users(topo_m4, _create_entries): """ Adding 1000 users matching inclusive regex for Managers/Contractors and exclusive regex for Interns/Visitors :id: f641e612-be57-11e9-94e6-8c16451d917b :setup: Instance with 4 suppliers :steps: 1. Add 1000 user entries matching the inclusive/exclusive regex rules at topo_m4.ms['supplier1'] 2. Check the same created in rest suppliers :expected results: 1. Pass 2. Pass """ automem_scope = "ou=Employees,{}".format(DEFAULT_SUFFIX) grp_container = "cn=replsubGroups,{}".format(DEFAULT_SUFFIX) default_group1 = "cn=SubDef1,{}".format(DEFAULT_SUFFIX) default_group2 = "cn=SubDef2,{}".format(DEFAULT_SUFFIX) # Adding 1000 users for number in range(1000): create_entry(topo_m4, f'automemusrs{number}', automem_scope, '799', '5693', 'Manager') try: # Check to sync the entries for supplier in [topo_m4.ms['supplier2'], topo_m4.ms['supplier3'], topo_m4.ms['supplier4']]: ReplicationManager(DEFAULT_SUFFIX).wait_for_replication(topo_m4.ms['supplier1'], supplier, timeout=30000) for instance, grp in [(topo_m4.ms['supplier1'], 'Managers'), (topo_m4.ms['supplier3'], 'Contractors')]: assert len(nsAdminGroup( instance, "cn={},{}".format(grp, grp_container)).get_attr_vals_utf8('member')) == 1000 for instance, grp in [(topo_m4.ms['supplier2'], 'Interns'), (topo_m4.ms['supplier4'], 'Visitors')]: assert not nsAdminGroup( instance, "cn={},{}".format(grp, grp_container)).get_attr_vals_utf8('member') for grp in [default_group1, default_group2]: assert not Group(topo_m4.ms['supplier2'], grp).get_attr_vals_utf8('member') assert not Group(topo_m4.ms['supplier3'], grp).get_attr_vals_utf8('member') finally: delete_users_and_wait(topo_m4, automem_scope) def test_adding_3000_users(topo_m4, _create_entries): """ Adding 3000 users matching all inclusive regex rules and no matching exclusive regex rules :id: ee54576e-be57-11e9-b536-8c16451d917b :setup: Instance with 4 suppliers :steps: 1. Add 3000 user entries matching the inclusive/exclusive regex rules at topo_m4.ms['supplier1'] 2. Check the same created in rest suppliers :expected results: 1. Pass 2. Pass """ automem_scope = "ou=Employees,{}".format(DEFAULT_SUFFIX) grp_container = "cn=replsubGroups,{}".format(DEFAULT_SUFFIX) default_group1 = "cn=SubDef3,{}".format(DEFAULT_SUFFIX) default_group2 = "cn=SubDef5,{}".format(DEFAULT_SUFFIX) # Adding 3000 users for number in range(3000): create_entry(topo_m4, f'automemusrs{number}', automem_scope, '5995', '5693', 'Manager') try: for supplier in [topo_m4.ms['supplier2'], topo_m4.ms['supplier3'], topo_m4.ms['supplier4']]: ReplicationManager(DEFAULT_SUFFIX).wait_for_replication(topo_m4.ms['supplier1'], supplier, timeout=30000) for instance, grp in [(topo_m4.ms['supplier1'], 'Managers'), (topo_m4.ms['supplier3'], 'Contractors'), (topo_m4.ms['supplier2'], 'Interns'), (topo_m4.ms['supplier4'], 'Visitors') ]: assert len( nsAdminGroup(instance, "cn={},{}".format(grp, grp_container)).get_attr_vals_utf8('member')) == 3000 for grp in [default_group1, default_group2]: assert not Group(topo_m4.ms['supplier2'], grp).get_attr_vals_utf8('member') assert not Group(topo_m4.ms['supplier3'], grp).get_attr_vals_utf8('member') finally: delete_users_and_wait(topo_m4, automem_scope) def test_3000_users_matching_all_exclusive_regex(topo_m4, _create_entries): """ Adding 3000 users matching all exclusive regex rules and no matching inclusive regex rules :id: e789331e-be57-11e9-b298-8c16451d917b :setup: Instance with 4 suppliers :steps: 1. Add 3000 user entries matching the inclusive/exclusive regex rules at topo_m4.ms['supplier1'] 2. Check the same created in rest suppliers :expected results: 1. Pass 2. Pass """ automem_scope = "ou=Employees,{}".format(DEFAULT_SUFFIX) grp_container = "cn=replsubGroups,{}".format(DEFAULT_SUFFIX) default_group1 = "cn=SubDef1,{}".format(DEFAULT_SUFFIX) default_group2 = "cn=SubDef2,{}".format(DEFAULT_SUFFIX) default_group4 = "cn=SubDef4,{}".format(DEFAULT_SUFFIX) # Adding 3000 users for number in range(3000): create_entry(topo_m4, f'automemusrs{number}', automem_scope, '399', '700', 'Manager') try: for supplier in [topo_m4.ms['supplier2'], topo_m4.ms['supplier3'], topo_m4.ms['supplier4']]: ReplicationManager(DEFAULT_SUFFIX).wait_for_replication(topo_m4.ms['supplier1'], supplier, timeout=30000) for instance, grp in [(topo_m4.ms['supplier1'], default_group4), (topo_m4.ms['supplier2'], default_group1), (topo_m4.ms['supplier3'], default_group2), (topo_m4.ms['supplier4'], default_group2)]: assert len(nsAdminGroup(instance, grp).get_attr_vals_utf8('member')) == 3000 for grp, instance in [('Managers', topo_m4.ms['supplier3']), ('Contractors', topo_m4.ms['supplier2'])]: assert not nsAdminGroup( instance, "cn={},{}".format(grp, grp_container)).get_attr_vals_utf8('member') finally: delete_users_and_wait(topo_m4, automem_scope) def test_no_matching_inclusive_regex_rules(topo_m4, _create_entries): """ Adding 3000 users matching all exclusive regex rules and no matching inclusive regex rules :id: e0cc0e16-be57-11e9-9c0f-8c16451d917b :setup: Instance with 4 suppliers :steps: 1. Add 3000 user entries matching the inclusive/exclusive regex rules at topo_m4.ms['supplier1'] 2. Check the same created in rest suppliers :expected results: 1. Pass 2. Pass """ automem_scope = "ou=Employees,{}".format(DEFAULT_SUFFIX) grp_container = "cn=replsubGroups,{}".format(DEFAULT_SUFFIX) default_group1 = "cn=SubDef1,{}".format(DEFAULT_SUFFIX) # Adding 3000 users for number in range(3000): create_entry(topo_m4, f'automemusrs{number}', automem_scope, '399', '700', 'Manager') try: for supplier in [topo_m4.ms['supplier2'], topo_m4.ms['supplier3'], topo_m4.ms['supplier4']]: ReplicationManager(DEFAULT_SUFFIX).wait_for_replication(topo_m4.ms['supplier1'], supplier, timeout=30000) for instance, grp in [(topo_m4.ms['supplier1'], "cn=SubDef4,{}".format(DEFAULT_SUFFIX)), (topo_m4.ms['supplier2'], default_group1), (topo_m4.ms['supplier3'], "cn=SubDef2,{}".format(DEFAULT_SUFFIX)), (topo_m4.ms['supplier4'], "cn=SubDef3,{}".format(DEFAULT_SUFFIX))]: assert len(nsAdminGroup(instance, grp).get_attr_vals_utf8('member')) == 3000 for grp, instance in [('Managers', topo_m4.ms['supplier3']), ('Contractors', topo_m4.ms['supplier2'])]: assert not nsAdminGroup( instance, "cn={},{}".format(grp, grp_container)).get_attr_vals_utf8('member') finally: delete_users_and_wait(topo_m4, automem_scope) def test_adding_deleting_and_re_adding_the_same_3000(topo_m4, _create_entries): """ Adding, Deleting and re-adding the same 3000 users matching all exclusive regex rules and no matching inclusive regex rules :id: d939247c-be57-11e9-825d-8c16451d917b :setup: Instance with 4 suppliers :steps: 1. Add 3000 user entries matching the inclusive/exclusive regex rules at topo_m4.ms['supplier1'] 2. Check the same created in rest suppliers 3. Delete 3000 users 4. Again add 3000 users 5. Check the same created in rest suppliers :expected results: 1. Pass 2. Pass 3. Pass 4. Pass 5. Pass """ automem_scope = "ou=Employees,{}".format(DEFAULT_SUFFIX) grp_container = "cn=replsubGroups,{}".format(DEFAULT_SUFFIX) default_group1 = "cn=SubDef1,{}".format(DEFAULT_SUFFIX) # Adding for number in range(3000): create_entry(topo_m4, f'automemusrs{number}', automem_scope, '399', '700', 'Manager') try: for supplier in [topo_m4.ms['supplier2'], topo_m4.ms['supplier3'], topo_m4.ms['supplier4']]: ReplicationManager(DEFAULT_SUFFIX).wait_for_replication(topo_m4.ms['supplier1'], supplier, timeout=30000) assert len(nsAdminGroup(topo_m4.ms['supplier2'], default_group1).get_attr_vals_utf8('member')) == 3000 # Deleting for user in nsAdminGroups(topo_m4.ms['supplier2'], automem_scope, rdn=None).list(): user.delete() for supplier in [topo_m4.ms['supplier1'], topo_m4.ms['supplier3'], topo_m4.ms['supplier4']]: ReplicationManager(DEFAULT_SUFFIX).wait_for_replication(topo_m4.ms['supplier2'], supplier, timeout=30000) # Again adding for number in range(3000): create_entry(topo_m4, f'automemusrs{number}', automem_scope, '399', '700', 'Manager') for supplier in [topo_m4.ms['supplier2'], topo_m4.ms['supplier3'], topo_m4.ms['supplier4']]: ReplicationManager(DEFAULT_SUFFIX).wait_for_replication(topo_m4.ms['supplier1'], supplier, timeout=30000) for instance, grp in [(topo_m4.ms['supplier1'], "cn=SubDef4,{}".format(DEFAULT_SUFFIX)), (topo_m4.ms['supplier3'], "cn=SubDef5,{}".format(DEFAULT_SUFFIX)), (topo_m4.ms['supplier4'], "cn=SubDef3,{}".format(DEFAULT_SUFFIX))]: assert len(nsAdminGroup(instance, grp).get_attr_vals_utf8('member')) == 3000 for grp, instance in [('Interns', topo_m4.ms['supplier3']), ('Contractors', topo_m4.ms['supplier2'])]: assert not nsAdminGroup( instance, "cn={},{}".format(grp, grp_container)).get_attr_vals_utf8('member') finally: delete_users_and_wait(topo_m4, automem_scope) def test_re_adding_the_same_3000_users(topo_m4, _create_entries): """ Adding, Deleting and re-adding the same 3000 users matching all inclusive regex rules and no matching exclusive regex rules :id: d2f5f112-be57-11e9-b164-8c16451d917b :setup: Instance with 4 suppliers :steps: 1. Add 3000 user entries matching the inclusive/exclusive regex rules at topo_m4.ms['supplier1'] 2. Check the same created in rest suppliers 3. Delete 3000 users 4. Again add 3000 users 5. Check the same created in rest suppliers :expected results: 1. Pass 2. Pass 3. Pass 4. Pass 5. Pass """ automem_scope = "ou=Employees,{}".format(DEFAULT_SUFFIX) grp_container = "cn=replsubGroups,{}".format(DEFAULT_SUFFIX) default_group1 = "cn=SubDef3,{}".format(DEFAULT_SUFFIX) default_group2 = "cn=SubDef5,{}".format(DEFAULT_SUFFIX) # Adding for number in range(3000): create_entry(topo_m4, f'automemusrs{number}', automem_scope, '5995', '5693', 'Manager') try: for supplier in [topo_m4.ms['supplier1'], topo_m4.ms['supplier3'], topo_m4.ms['supplier4']]: ReplicationManager(DEFAULT_SUFFIX).wait_for_replication(topo_m4.ms['supplier2'], supplier, timeout=30000) assert len(nsAdminGroup( topo_m4.ms['supplier2'], f'cn=Contractors,{grp_container}').get_attr_vals_utf8('member')) == 3000 # Deleting delete_users_and_wait(topo_m4, automem_scope) # re-adding for number in range(3000): create_entry(topo_m4, f'automemusrs{number}', automem_scope, '5995', '5693', 'Manager') for supplier in [topo_m4.ms['supplier2'], topo_m4.ms['supplier3'], topo_m4.ms['supplier4']]: ReplicationManager(DEFAULT_SUFFIX).wait_for_replication(topo_m4.ms['supplier1'], supplier, timeout=30000) for instance, grp in [(topo_m4.ms['supplier1'], "cn=Managers,{}".format(grp_container)), (topo_m4.ms['supplier3'], "cn=Contractors,{}".format(grp_container)), (topo_m4.ms['supplier4'], "cn=Visitors,{}".format(grp_container)), (topo_m4.ms['supplier2'], "cn=Interns,{}".format(grp_container))]: assert len(nsAdminGroup(instance, grp).get_attr_vals_utf8('member')) == 3000 for grp, instance in [(default_group2, topo_m4.ms['supplier4']), (default_group1, topo_m4.ms['supplier3'])]: assert not nsAdminGroup(instance, grp).get_attr_vals_utf8('member') finally: delete_users_and_wait(topo_m4, automem_scope) def test_users_with_different_uid_and_gid_nos(topo_m4, _create_entries): """ Adding, Deleting and re-adding the same 3000 users with different uid and gid nos, with different inclusive/exclusive matching regex rules :id: cc595a1a-be57-11e9-b053-8c16451d917b :setup: Instance with 4 suppliers :steps: 1. Add 3000 user entries matching the inclusive/exclusive regex rules at topo_m4.ms['supplier1'] 2. Check the same created in rest suppliers 3. Delete 3000 users 4. Again add 3000 users 5. Check the same created in rest suppliers :expected results: 1. Pass 2. Pass 3. Pass 4. Pass 5. Pass """ automem_scope = "ou=Employees,{}".format(DEFAULT_SUFFIX) grp_container = "cn=replsubGroups,{}".format(DEFAULT_SUFFIX) default_group1 = "cn=SubDef3,{}".format(DEFAULT_SUFFIX) default_group2 = "cn=SubDef5,{}".format(DEFAULT_SUFFIX) # Adding for number in range(3000): create_entry(topo_m4, f'automemusrs{number}', automem_scope, '3994', '5695', 'OnDeputation') try: for supplier in [topo_m4.ms['supplier2'], topo_m4.ms['supplier3'], topo_m4.ms['supplier4']]: ReplicationManager(DEFAULT_SUFFIX).wait_for_replication(topo_m4.ms['supplier1'], supplier, timeout=30000) for intstance, grp in [(topo_m4.ms['supplier2'], default_group1), (topo_m4.ms['supplier3'], default_group2)]: assert len(nsAdminGroup(intstance, grp).get_attr_vals_utf8('member')) == 3000 for grp, instance in [('Contractors', topo_m4.ms['supplier3']), ('Managers', topo_m4.ms['supplier1'])]: assert not nsAdminGroup( instance, "cn={},{}".format(grp, grp_container)).get_attr_vals_utf8('member') # Deleting for user in nsAdminGroups(topo_m4.ms['supplier1'], automem_scope, rdn=None).list(): user.delete() for supplier in [topo_m4.ms['supplier2'], topo_m4.ms['supplier3'], topo_m4.ms['supplier4']]: ReplicationManager(DEFAULT_SUFFIX).wait_for_replication(topo_m4.ms['supplier1'], supplier, timeout=30000) # re-adding for number in range(3000): create_entry(topo_m4, f'automemusrs{number}', automem_scope, '5995', '5693', 'OnDeputation') for supplier in [topo_m4.ms['supplier2'], topo_m4.ms['supplier3'], topo_m4.ms['supplier4']]: ReplicationManager(DEFAULT_SUFFIX).wait_for_replication(topo_m4.ms['supplier1'], supplier, timeout=30000) for grp, instance in [('Contractors', topo_m4.ms['supplier3']), ('Managers', topo_m4.ms['supplier1']), ('Interns', topo_m4.ms['supplier2']), ('Visitors', topo_m4.ms['supplier4'])]: assert len(nsAdminGroup( instance, f'cn={grp},{grp_container}').get_attr_vals_utf8('member')) == 3000 for instance, grp in [(topo_m4.ms['supplier2'], default_group1), (topo_m4.ms['supplier3'], default_group2)]: assert not nsAdminGroup(instance, grp).get_attr_vals_utf8('member') finally: delete_users_and_wait(topo_m4, automem_scope) def test_bulk_users_to_non_automemscope(topo_m4, _create_entries): """ Adding bulk users to non-automem_scope and then running modrdn operation to change the ou to automem_scope :id: c532dc0c-be57-11e9-bcca-8c16451d917b :setup: Instance with 4 suppliers :steps: 1. Running modrdn operation to change the ou to automem_scope 2. Add 3000 user entries to non-automem_scope at topo_m4.ms['supplier1'] 3. Run AutomemberRebuildMembershipTask 4. Check the same created in rest suppliers :expected results: 1. Pass 2. Pass 3. Pass 4. Pass """ automem_scope = "cn=EmployeesNew,{}".format(DEFAULT_SUFFIX) grp_container = "cn=replsubGroups,{}".format(DEFAULT_SUFFIX) default_group1 = "cn=SubDef3,{}".format(DEFAULT_SUFFIX) default_group2 = "cn=SubDef5,{}".format(DEFAULT_SUFFIX) nsContainers(topo_m4.ms['supplier1'], DEFAULT_SUFFIX).create(properties={'cn': 'ChangeThisCN'}) Group(topo_m4.ms['supplier1'], f'cn=replsubGroups,cn=autoMembersPlugin,{DEFAULT_SUFFIX}').replace('autoMemberScope', automem_scope) for instance in [topo_m4.ms['supplier1'], topo_m4.ms['supplier2'], topo_m4.ms['supplier3'], topo_m4.ms['supplier4']]: instance.restart() # Adding BulkUsers for number in range(3000): create_entry(topo_m4, f'automemusrs{number}', f'cn=ChangeThisCN,{DEFAULT_SUFFIX}', '5995', '5693', 'Supervisor') try: for supplier in [topo_m4.ms['supplier2'], topo_m4.ms['supplier3'], topo_m4.ms['supplier4']]: ReplicationManager(DEFAULT_SUFFIX).wait_for_replication(topo_m4.ms['supplier1'], supplier, timeout=30000) for instance, grp in [(topo_m4.ms['supplier2'], default_group1), (topo_m4.ms['supplier1'], "cn=Managers,{}".format(grp_container))]: assert not nsAdminGroup(instance, grp).get_attr_vals_utf8('member') # Deleting BulkUsers "User_Name" Suffix "Nof_Users" topo_m4.ms['supplier3'].rename_s(f"CN=ChangeThisCN,{DEFAULT_SUFFIX}", f'cn=EmployeesNew', newsuperior=DEFAULT_SUFFIX, delold=1) for supplier in [topo_m4.ms['supplier2'], topo_m4.ms['supplier3'], topo_m4.ms['supplier4']]: ReplicationManager(DEFAULT_SUFFIX).wait_for_replication(topo_m4.ms['supplier1'], supplier, timeout=30000) AutomemberRebuildMembershipTask(topo_m4.ms['supplier1']).create(properties={ 'basedn': automem_scope, 'filter': "objectClass=posixAccount" }) for supplier in [topo_m4.ms['supplier2'], topo_m4.ms['supplier3'], topo_m4.ms['supplier4']]: ReplicationManager(DEFAULT_SUFFIX).wait_for_replication(topo_m4.ms['supplier1'], supplier, timeout=30000) for instance, grp in [(topo_m4.ms['supplier1'], 'Managers'), (topo_m4.ms['supplier2'], 'Interns'), (topo_m4.ms['supplier3'], 'Contractors'), (topo_m4.ms['supplier4'], 'Visitors')]: assert len(nsAdminGroup( instance, f'cn={grp},{grp_container}').get_attr_vals_utf8('member')) == 3000 for grp, instance in [(default_group1, topo_m4.ms['supplier2']), (default_group2, topo_m4.ms['supplier3'])]: assert not nsAdminGroup(instance, grp).get_attr_vals_utf8('member') finally: delete_users_and_wait(topo_m4, automem_scope) nsContainer(topo_m4.ms['supplier1'], "CN=EmployeesNew,{}".format(DEFAULT_SUFFIX)).delete() def test_automemscope_and_running_modrdn(topo_m4, _create_entries): """ Adding bulk users to non-automem_scope and running modrdn operation with new superior to automem_scope :id: bf60f958-be57-11e9-945d-8c16451d917b :setup: Instance with 4 suppliers :steps: 1. Running modrdn operation to change the ou to automem_scope 2. Add 3000 user entries to non-automem_scope at topo_m4.ms['supplier1'] 3. Run AutomemberRebuildMembershipTask 4. Check the same created in rest suppliers :expected results: 1. Pass 2. Pass 3. Pass 4. Pass """ user_rdn = "long09usr" automem_scope1 = "ou=Employees,{}".format(DEFAULT_SUFFIX) automem_scope2 = "cn=NewEmployees,{}".format(DEFAULT_SUFFIX) grp_container = "cn=replsubGroups,{}".format(DEFAULT_SUFFIX) default_group1 = "cn=SubDef3,{}".format(DEFAULT_SUFFIX) default_group2 = "cn=SubDef5,{}".format(DEFAULT_SUFFIX) OrganizationalUnits(topo_m4.ms['supplier1'], DEFAULT_SUFFIX).create(properties={'ou': 'NewEmployees'}) Group(topo_m4.ms['supplier1'], f'cn=replsubGroups,cn=autoMembersPlugin,{DEFAULT_SUFFIX}').replace('autoMemberScope', automem_scope2) for instance in [topo_m4.ms['supplier1'], topo_m4.ms['supplier2'], topo_m4.ms['supplier3'], topo_m4.ms['supplier4']]: Config(instance).replace('nsslapd-errorlog-level', '73728') instance.restart() # Adding bulk users for number in range(3000): create_entry(topo_m4, f'automemusrs{number}', automem_scope1, '3994', '5695', 'OnDeputation') try: for supplier in [topo_m4.ms['supplier2'], topo_m4.ms['supplier3'], topo_m4.ms['supplier4']]: ReplicationManager(DEFAULT_SUFFIX).wait_for_replication(topo_m4.ms['supplier1'], supplier, timeout=30000) for grp, instance in [(default_group2, topo_m4.ms['supplier3']), ("cn=Managers,{}".format(grp_container), topo_m4.ms['supplier1']), ("cn=Contractors,{}".format(grp_container), topo_m4.ms['supplier3'])]: assert not nsAdminGroup(instance, grp).get_attr_vals_utf8('member') count = 0 for user in nsAdminGroups(topo_m4.ms['supplier3'], automem_scope1, rdn=None).list(): topo_m4.ms['supplier1'].rename_s(user.dn, f'cn=New{user_rdn}{count}', newsuperior=automem_scope2, delold=1) count += 1 for supplier in [topo_m4.ms['supplier2'], topo_m4.ms['supplier3'], topo_m4.ms['supplier4']]: ReplicationManager(DEFAULT_SUFFIX).wait_for_replication(topo_m4.ms['supplier1'], supplier, timeout=30000) AutomemberRebuildMembershipTask(topo_m4.ms['supplier1']).create(properties={ 'basedn': automem_scope2, 'filter': "objectClass=posixAccount" }) for supplier in [topo_m4.ms['supplier2'], topo_m4.ms['supplier3'], topo_m4.ms['supplier4']]: ReplicationManager(DEFAULT_SUFFIX).wait_for_replication(topo_m4.ms['supplier1'], supplier, timeout=30000) for instance, grp in [(topo_m4.ms['supplier3'], default_group2), (topo_m4.ms['supplier3'], default_group1)]: assert len(nsAdminGroup(instance, grp).get_attr_vals_utf8('member')) == 3000 for instance, grp in [(topo_m4.ms['supplier1'], 'Managers'), (topo_m4.ms['supplier3'], 'Contractors'), (topo_m4.ms['supplier2'], 'Interns'), (topo_m4.ms['supplier4'], 'Visitors')]: assert not nsAdminGroup( instance, "cn={},{}".format(grp, grp_container)).get_attr_vals_utf8('member') finally: for scope in [automem_scope1, automem_scope2]: delete_users_and_wait(topo_m4, scope) if __name__ == '__main__': CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s -v %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/longduration/db_protect_long_test.py000066400000000000000000000306471421664411400303300ustar00rootroot00000000000000#nunn --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2021 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- """ Will Verify which tasks (Import/Export/Backup/Restore/Reindex (Offline/Online)) may run at the same time """ import os import logging import pytest import time import enum import shutil import json from threading import Thread, get_ident as get_tid from enum import auto as EnumAuto from lib389.topologies import topology_st as topo from lib389.dbgen import dbgen_users from lib389.backend import Backend from lib389.properties import ( TASK_WAIT ) #pytestmark = pytest.mark.tier1 NBUSERS=15000 # Should have enough user so that jobs spends at least a few seconds BASE_SUFFIX="dc=i4585,dc=test" # result reference file got from version 1.4.2.12 JSONREFNAME = os.path.join(os.path.dirname(__file__), '../data/longduration/db_protect_long_test_reference_1.4.2.12.json') #Results OK="OK" KO="KO" BUSY="KO" # So far, no diffrence between failure and failure due to busy # data associated with both suffixes (i.e DN, bakend name, ldif files, and backup directory ) _suffix1_info={ 'index': 1 } _suffix2_info={ 'index': 2 } # Threads result _result = {} # Threads _threads = {} #Mode OFFLINE="OFFLINE" ONLINE="ONLINE" DEBUGGING = os.getenv("DEBUGGING", default=False) if DEBUGGING: logging.getLogger(__name__).setLevel(logging.DEBUG) else: logging.getLogger(__name__).setLevel(logging.INFO) log = logging.getLogger(__name__) """ create suffix bakend, generate ldif, populate the bakend, get a backup and initialize suffix_info Note: suffix_info['index'] must be set when calling the function """ def _init_suffix(topo, suffix_info): index = suffix_info['index'] # Init suffix_info values suffix = f'dc=suffix{index},' + BASE_SUFFIX suffix_info['suffix'] = suffix ldif_dir = topo.standalone.get_ldif_dir() bak_dir = topo.standalone.get_bak_dir() suffix_info['name'] = f'suffix{index}' suffix_info['rbak'] = bak_dir + f'/r_i4585.bak' # For archive2db suffix_info['wbak'] = bak_dir + f'/w_i4585.bak' # For db2archive suffix_info['rldif'] = ldif_dir + f'/r_suffix{index}.ldif' # For ldif2db suffix_info['wldif'] = ldif_dir + f'/w_suffix{index}.ldif' # For db2ldif # create suffix backend be = Backend(topo.standalone) be.create(properties={'cn': suffix_info['name'], 'nsslapd-suffix': suffix}) # Generate rldif ldif file, populate backend, and generate rbak archive dbgen_users(topo.standalone, NBUSERS, suffix_info['rldif'], suffix) # Populate the backend result = _run_ldif2db(topo, ONLINE, suffix_info) assert( result == 0 ) # Generate archive (only second suffix is created) if index == 2: shutil.rmtree(suffix_info['rbak'], ignore_errors=True) result = _job_db2archive(topo, ONLINE, suffix_info['rbak']) assert( result == 0 ) """ determine json file name """ def _get_json_filename(topo): return f"{topo.standalone.ds_paths.prefix}/var/log/dirsrv/test_db_protect.json" """ Compare two results pairs Note: In the Success + Failure case, do not care about the order because of the threads race """ def is_same_result(res1, res2): if res1 == res2: return True if res1 == "OK + KO" and res2 == "KO + OK": return True if res2 == "OK + KO" and res1 == "KO + OK": return True return False """ Run a job within a dedicated thread """ def _worker(idx, job, topo, mode): log.info(f"Thread {idx} id: {get_tid()} started {mode} job {job.__name__}") rc0 = None rc = None try: rc = job(topo, mode) rc0 = rc if mode == ONLINE: if rc == 0: rc = OK else: rc = KO else: if rc: rc = OK else: rc = KO except Exception as err: log.info(f"Thread {idx} ended {mode} job {job.__name__} with exception {err}") log.info(err, exc_info=True) rc = KO _result[idx] = rc log.info(f"Thread {idx} ended {mode} job {job.__name__} with result {rc} (was {rc0})") """ Create a new thread to run a job """ def _start_work(*args): idx = args[0] _threads[idx] = Thread(target=_worker, args=args) log.info(f"created Thread {idx} id: {_threads[idx].ident}") _result[idx] = None _threads[idx].start() """ Wait until thread worker has finished then return the result """ def _wait4work(idx): _threads[idx].join() log.info(f"completed wait on thread {idx} id: {_threads[idx].ident} result is {_result[idx]}") return _result[idx] """ Tests all pairs of jobs and check that we got the expected result (first job is running in mode1 (ONLINE/OFFLINE)mode) (second job is running in mode2 (ONLINE/OFFLINE)mode) """ def _check_all_job_pairs(topo, state, mode1, mode2, result): """ Checks all couple of jobs with mode1 online/offline for first job and mode2 for second job """ for idx1, job1 in enumerate(job_list): for idx2, job2 in enumerate(job_list): log.info(f"Testing {mode1} {job1} + {mode2} {job2}") _start_work("job1", job1, topo, mode1) # Wait enough to insure job1 is started time.sleep(0.5) _start_work("job2", job2, topo, mode2) res1 = _wait4work("job1") res2 = _wait4work("job2") key = f"Instance {state} {mode1} {job1.__name__} + {mode2} {job2.__name__}" val = f"{res1} + {res2}" result[key] = val log.info(f"{key} ==> {val}") """ ********* JOBS DEFINITION ********** """ def _run_ldif2db(topo, mode, suffix_info): if mode == OFFLINE: return topo.standalone.ldif2db(suffix_info['name'], None, None, None, suffix_info['rldif']) else: return topo.standalone.tasks.importLDIF(benamebase=suffix_info['name'], input_file=suffix_info['rldif'], args={TASK_WAIT: True}) def _job_ldif2dbSuffix1(topo, mode): return _run_ldif2db(topo, mode, _suffix1_info) def _job_ldif2dbSuffix2(topo, mode): return _run_ldif2db(topo, mode, _suffix2_info) def _run_db2ldif(topo, mode, suffix_info): if os.path.exists(suffix_info['wldif']): os.remove(suffix_info['wldif']) if mode == OFFLINE: return topo.standalone.db2ldif(suffix_info['name'], None, None, False, False, suffix_info['wldif']) else: return topo.standalone.tasks.exportLDIF(benamebase=suffix_info['name'], output_file=suffix_info['wldif'], args={TASK_WAIT: True}) def _job_db2ldifSuffix1(topo, mode): return _run_db2ldif(topo, mode, _suffix1_info) def _job_db2ldifSuffix2(topo, mode): return _run_db2ldif(topo, mode, _suffix2_info) def _run_db2index(topo, mode, suffix_info): if mode == OFFLINE: return topo.standalone.db2index(bename=suffix_info['name'], attrs=['cn']) else: return topo.standalone.tasks.reindex(topo.standalone, benamebase=suffix_info['name'], attrname='cn', args={TASK_WAIT: True}) def _job_db2indexSuffix1(topo, mode): return _run_db2index(topo, mode, _suffix1_info) def _job_db2indexSuffix2(topo, mode): return _run_db2index(topo, mode, _suffix2_info) def _job_db2archive(topo, mode, backup_dir=None): # backup is quite fast solets do it several time to increase chance of having concurrent task if backup_dir is None: backup_dir = _suffix1_info['wbak'] shutil.rmtree(backup_dir, ignore_errors=True) if mode == OFFLINE: for i in range(3): rc = topo.standalone.db2bak(backup_dir) if not rc: return False return True else: for i in range(3): rc = topo.standalone.tasks.db2bak(backup_dir=backup_dir, args={TASK_WAIT: True}) if (rc != 0): return rc return 0 def _job_archive2db(topo, mode, backup_dir=None): # restore is quite fast solets do it several time to increase chance of having concurrent task if backup_dir is None: backup_dir = _suffix1_info['rbak'] if mode == OFFLINE: for i in range(3): rc = topo.standalone.bak2db(backup_dir) if not rc: return False return True else: for i in range(3): rc = topo.standalone.tasks.bak2db(backup_dir=backup_dir, args={TASK_WAIT: True}) if (rc != 0): return rc return 0 def _job_nothing(topo, mode): if mode == OFFLINE: return True return 0 """ ********* END OF JOBS DEFINITION ********** """ # job_list must be defined after the job get defined job_list = [ _job_nothing, _job_db2ldifSuffix1, _job_db2ldifSuffix2, _job_ldif2dbSuffix1, _job_ldif2dbSuffix2, _job_db2indexSuffix1, _job_db2indexSuffix2, _job_db2archive, _job_archive2db ] """ Beware this test is very long (several hours) it checks the results when two task (like import/export/reindex/backup/archive are run at the same time) and store the result in a json file the compare with a reference """ def test_db_protect(topo): """ Add an index, then import via cn=tasks :id: 462bc550-87d6-11eb-9310-482ae39447e5 :setup: Standalone Instance :steps: 1. Initialize suffixes 2. Stop server instance 3. Compute results for all couples of jobs in OFFLINE,OFFLINE mode 4. Start server instance 5. Compute results for all couples of jobs in OFFLINE,OFFLINE mode 6. Compute results for all couples of jobs in ONLINE,OFFLINE mode 7. Compute results for all couples of jobs in OFFLINE,ONLINE mode 8. Compute results for all couples of jobs in ONLINE,ONLINE mode 9. Store results in log file and json file 10. Read json reference file 11. Compute the difference between result and reference 12. Logs the differences 13. Assert if differences is not empty :expected results: 1. Operation successful 2. Operation successful 3. Operation successful 4. Operation successful 5. Operation successful 6. Operation successful 7. Operation successful 8. Operation successful 9. Operation successful 10. Operation successful 11. Operation successful 12. Operation successful 13. Operation successful """ # Step 1: Initialize suffixes _init_suffix(topo, _suffix1_info) _init_suffix(topo, _suffix2_info) result={} # Step 2: Stop server instance topo.standalone.stop() log.info("Server instance is now stopped.") # Step 3: Compute results for all couples of jobs in OFFLINE,OFFLINE mode _check_all_job_pairs(topo, OFFLINE, OFFLINE, OFFLINE, result) # Step 4: Start server instance topo.standalone.start() log.info("Server instance is now started.") # Step 5: Compute results for all couples of jobs in OFFLINE,OFFLINE mode _check_all_job_pairs(topo, ONLINE, OFFLINE, OFFLINE, result) # Step 6: Compute results for all couples of jobs in ONLINE,OFFLINE mode _check_all_job_pairs(topo, ONLINE, ONLINE, OFFLINE, result) # Step 7: Compute results for all couples of jobs in OFFLINE,ONLINE mode _check_all_job_pairs(topo, ONLINE, OFFLINE, ONLINE, result) # Step 8: Compute results for all couples of jobs in ONLINE,ONLINE mode _check_all_job_pairs(topo, ONLINE, ONLINE, ONLINE, result) # Step 9: Logs the results and store the json file for key,val in result.items(): log.info(f"{key} ==> {val}") with open(_get_json_filename(topo), "w") as jfile: json.dump(result, jfile) # Step 10: read json reference file with open(JSONREFNAME, "r") as jfile: ref = json.load(jfile) # Step 11: Compute the differences differences={} for key, value in result.items(): if key in ref: if not is_same_result(value, ref[key]): differences[key] = ( value, ref[key] ) else: differences[key] = ( value, None ) for key, value in ref.items(): if not key in result: differences[key] = ( None, value ) # Step 12: Log the differences log.info(f"difference between result an 1.4.2.12 reference are:") log.info(f" key: (result, reference)") for key, value in differences.items(): log.info(f"{key}: {value}") # Step 13: assert if there are differences assert not differences 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/perf/000077500000000000000000000000001421664411400217705ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/perf/create_data.py000077500000000000000000000226721421664411400246120ustar00rootroot00000000000000#!/usr/bin/python2 from __future__ import ( print_function, division ) import sys import math class RHDSData(object): def __init__( self, stream=sys.stdout, users=10000, groups=100, grps_puser=20, nest_level=10, ngrps_puser=10, domain="redhat.com", basedn="dc=example,dc=com", ): self.users = users self.groups = groups self.basedn = basedn self.domain = domain self.stream = stream self.grps_puser = grps_puser self.nest_level = nest_level self.ngrps_puser = ngrps_puser self.user_defaults = { 'objectClass': [ 'person', 'top', 'inetorgperson', 'organizationalperson', 'inetuser', 'posixaccount'], 'uidNumber': ['-1'], 'gidNumber': ['-1'], } self.group_defaults = { 'objectClass': [ 'top', 'inetuser', 'posixgroup', 'groupofnames'], 'gidNumber': [-1], } def put_entry(self, entry): """ Abstract method, implementation depends on if we want just print LDIF, or update LDAP directly """ raise NotImplementedError() def gen_user(self, uid): user = dict(self.user_defaults) user['dn'] = 'uid={uid},ou=people,{suffix}'.format( uid=uid, suffix=self.basedn, ) user['uid'] = [uid] user['displayName'] = ['{} {}'.format(uid, uid)] user['sn'] = [uid] user['homeDirectory'] = ['/other-home/{}'.format(uid)] user['mail'] = ['{uid}@{domain}'.format( uid=uid, domain=self.domain)] user['givenName'] = [uid] user['cn'] = ['{} {}'.format(uid, uid)] return user def username_generator(self, start, stop, step=1): for i in range(start, stop, step): yield 'user%s' % i def gen_group(self, name, members=(), group_members=()): group = dict(self.group_defaults) group['dn'] = 'cn={name},ou=groups,{suffix}'.format( name=name, suffix=self.basedn, ) group['cn'] = [name] group['member'] = ['uid={uid},ou=people,{suffix}'.format( uid=uid, suffix=self.basedn, ) for uid in members] group['member'].extend( ['cn={name},ou=groups,{suffix}'.format( name=name, suffix=self.basedn, ) for name in group_members]) return group def groupname_generator(self, start, stop, step=1): for i in range(start, stop, step): yield 'group%s' % i def gen_users_and_groups(self): self.__gen_entries_with_groups( self.users, self.groups, self.grps_puser, self.ngrps_puser, self.nest_level, self.username_generator, self.gen_user, self.groupname_generator, self.gen_group ) def __gen_entries_with_groups( self, num_of_entries, num_of_groups, groups_per_entry, nested_groups_per_entry, max_nesting_level, gen_entry_name_f, gen_entry_f, gen_group_name_f, gen_group_f ): assert num_of_groups % groups_per_entry == 0 assert num_of_groups >= groups_per_entry assert groups_per_entry > nested_groups_per_entry assert max_nesting_level > 0 assert nested_groups_per_entry > 0 assert ( groups_per_entry - nested_groups_per_entry > int(math.ceil(nested_groups_per_entry / float(max_nesting_level))) ), ( "At least {} groups is required to generate proper amount of " "nested groups".format( nested_groups_per_entry + int(math.ceil( nested_groups_per_entry / float(max_nesting_level)) ) ) ) for uid in gen_entry_name_f(0, num_of_entries): self.put_entry(gen_entry_f(uid)) # create N groups per entry, of them are nested # User/Host (max nesting level = 2) # | # +--- G1 --- G2 (nested) --- G3 (nested, max level) # | # +--- G5 --- G6 (nested) # | # ...... # | # +--- GN # how many members should be added to groups (set of groups_per_entry # have the same members) entries_per_group = num_of_entries // (num_of_groups // groups_per_entry) # generate groups and put users there for i in range(num_of_groups // groups_per_entry): uids = list(gen_entry_name_f( i * entries_per_group, (i + 1) * entries_per_group )) # per user last_grp_name = None nest_lvl = 0 nested_groups_added = 0 for group_name in gen_group_name_f( i * groups_per_entry, (i + 1) * groups_per_entry, ): # create nested groups first if nested_groups_added < nested_groups_per_entry: if nest_lvl == 0: # the top group self.put_entry( gen_group_f( group_name, members=uids ) ) nest_lvl += 1 nested_groups_added += 1 elif nest_lvl == max_nesting_level: # the last level group this group is not nested self.put_entry( gen_group_f( group_name, group_members=[last_grp_name], ) ) nest_lvl = 0 else: # mid level group self.put_entry( gen_group_f( group_name, group_members=[last_grp_name] ) ) nested_groups_added += 1 nest_lvl += 1 last_grp_name = group_name else: # rest of groups have direct membership if nest_lvl != 0: # assign the last nested group if exists self.put_entry( gen_group_f( group_name, members=uids, group_members=[last_grp_name], ) ) nest_lvl = 0 else: self.put_entry( gen_group_f( group_name, members=uids ) ) def __generate_entries_with_users_groups( self, num_of_entries_direct_members, num_of_entries_indirect_members, entries_per_user, entries_per_group, gen_entry_name_f, gen_entry_f, ): assert num_of_entries_direct_members % entries_per_user == 0 assert num_of_entries_indirect_members % entries_per_group == 0 num_of_entries = num_of_entries_direct_members + num_of_entries_indirect_members # direct members users_per_entry = self.users // (num_of_entries_direct_members // entries_per_user) start_user = 0 stop_user = users_per_entry for name in gen_entry_name_f(0, num_of_entries_direct_members): self.put_entry( gen_entry_f( name, user_members=self.username_generator(start_user, stop_user), ) ) start_user = stop_user % self.users stop_user = start_user + users_per_entry stop_user = stop_user if stop_user < self.users else self.users groups_per_entry = self.groups // (num_of_entries_indirect_members // entries_per_group) # indirect members start_group = 0 stop_group = groups_per_entry for name in gen_entry_name_f(num_of_entries_direct_members, num_of_entries): self.put_entry( gen_entry_f( name, usergroup_members=self.groupname_generator(start_group, stop_group), ) ) start_group = stop_group % self.groups stop_group = start_group + groups_per_entry stop_group = stop_group if stop_group < self.groups else self.groups def do_magic(self): self.gen_users_and_groups() class RHDSDataLDIF(RHDSData): def put_entry(self, entry): print(file=self.stream) print("dn:", entry['dn'], file=self.stream) for k, values in entry.items(): if k == 'dn': continue for v in values: print("{}: {}".format(k, v), file=self.stream) print(file=self.stream) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/perf/memberof_test.py000077500000000000000000000466301421664411400252110ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2017 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import pytest import subprocess from lib389 import Entry from lib389.tasks import Tasks from lib389.dseldif import DSEldif from create_data import RHDSDataLDIF from lib389.properties import TASK_WAIT from lib389.utils import ldap, os, time, logging, ds_is_older from lib389._constants import SUFFIX, DN_SCHEMA, DN_DM, DEFAULT_SUFFIX, PASSWORD, PLUGIN_MEMBER_OF, \ PLUGIN_MANAGED_ENTRY, PLUGIN_AUTOMEMBER, DN_CONFIG_LDBM, HOST_STANDALONE, PORT_STANDALONE from lib389.topologies import topology_st as topo pytestmark = pytest.mark.tier3 MEMOF_PLUGIN = ('cn=' + PLUGIN_MEMBER_OF + ',cn=plugins,cn=config') MAN_ENTRY_PLUGIN = ('cn=' + PLUGIN_MANAGED_ENTRY + ',cn=plugins,cn=config') AUTO_MEM_PLUGIN = ('cn=' + PLUGIN_AUTOMEMBER + ',cn=plugins,cn=config') DOMAIN = 'redhat.com' LDAP_MOD = '/usr/bin/ldapmodify' FILTER = 'objectClass=*' USER_FILTER = '(|(uid=user*)(cn=group*))' MEMBEROF_ATTR = 'memberOf' DN_ATTR = 'dn:' logging.basicConfig(level=logging.DEBUG) log = logging.getLogger(__name__) @pytest.fixture(scope="module") def memberof_setup(topo, request): """Configure required plugins and restart the server""" log.info('Configuring memberOf, managedEntry and autoMembers plugins and restarting the server') topo.standalone.simple_bind_s(DN_DM, PASSWORD) try: topo.standalone.plugins.enable(name=PLUGIN_MEMBER_OF) except ldap.LDAPError as e: log.error('Failed to enable {} plugin'.format(PLUGIN_MEMBER_OF)) raise e try: topo.standalone.plugins.enable(name=PLUGIN_MANAGED_ENTRY) topo.standalone.plugins.enable(name=PLUGIN_AUTOMEMBER) except ldap.LDAPError as e: log.error('Failed to enable {}, {} plugins'.format(PLUGIN_MANAGED_ENTRY, PLUGIN_AUTOMEMBER)) raise e log.info('Change config values for db-locks and dbcachesize to import large ldif files') if ds_is_older('1.3.6'): topo.standalone.stop(timeout=10) dse_ldif = DSEldif(topo.standalone) try: dse_ldif.replace(DN_CONFIG_LDBM, 'nsslapd-db-locks', '100000') dse_ldif.replace(DN_CONFIG_LDBM, 'nsslapd-dbcachesize', '10000000') except: log.error('Failed to replace cn=config values of db-locks and dbcachesize') raise topo.standalone.start(timeout=10) else: try: topo.standalone.modify_s(DN_CONFIG_LDBM, [(ldap.MOD_REPLACE, 'nsslapd-db-locks', '100000')]) topo.standalone.modify_s(DN_CONFIG_LDBM, [(ldap.MOD_REPLACE, 'nsslapd-cache-autosize', '0')]) topo.standalone.modify_s(DN_CONFIG_LDBM, [(ldap.MOD_REPLACE, 'nsslapd-dbcachesize', '10000000')]) except ldap.LDAPError as e: log.error( 'Failed to replace values of nsslapd-db-locks and nsslapd-dbcachesize {}'.format(e.message['desc'])) raise e topo.standalone.restart(timeout=10) def fin(): log.info('Disabling plugins {}, {}, {}'.format(PLUGIN_MEMBER_OF, PLUGIN_MANAGED_ENTRY, PLUGIN_AUTOMEMBER)) topo.standalone.simple_bind_s(DN_DM, PASSWORD) try: topo.standalone.plugins.disable(name=PLUGIN_MEMBER_OF) topo.standalone.plugins.disable(name=PLUGIN_MANAGED_ENTRY) topo.standalone.plugins.disable(name=PLUGIN_AUTOMEMBER) except ldap.LDAPError as e: log.error('Failed to disable plugins, {}'.format(e.message['desc'])) assert False topo.standalone.restart(timeout=10) request.addfinalizer(fin) def _create_base_ldif(topo, import_base=False): """Create base ldif file to clean entries from suffix""" log.info('Add base entry for online import') ldif_dir = topo.standalone.get_ldif_dir() ldif_file = os.path.join(ldif_dir, '/perf.ldif') log.info('LDIF FILE is this: {}'.format(ldif_file)) base_ldif = """dn: dc=example,dc=com objectclass: top objectclass: domain dc: example dn: ou=people,dc=example,dc=com objectclass: top objectclass: organizationalUnit ou: people dn: ou=groups,dc=example,dc=com objectclass: top objectclass: organizationalUnit ou: groups """ with open(ldif_file, "w") as fd: fd.write(base_ldif) if import_base: log.info('Adding base entry to suffix to remove users/groups and leave only the OUs') try: topo.standalone.tasks.importLDIF(suffix=SUFFIX, input_file=ldif_file, args={TASK_WAIT: True}) except ValueError as e: log.error('Online import failed' + e.message('desc')) assert False else: log.info('Return LDIF file') return ldif_file def _run_fixup_memberof(topo): """Run fixup memberOf task and measure the time taken""" log.info('Running fixup memberOf task and measuring the time taken') start = time.time() try: topo.standalone.tasks.fixupMemberOf(suffix=SUFFIX, args={TASK_WAIT: True}) except ValueError as e: log.error('Running fixup MemberOf task failed' + e.message('desc')) assert False end = time.time() cmd_time = int(end - start) return cmd_time def _nested_import_add_ldif(topo, nof_users, nof_groups, grps_user, ngrps_user, nof_depth, is_import=False): """Create LDIF files for given nof users, groups and nested group levels""" log.info('Checking if the operation is Import or Ldapadd') if is_import: log.info('Import: Create base entry before adding users and groups') exp_entries = nof_users + nof_groups data_ldif = _create_base_ldif(topo, False) log.info('Create data LDIF file by appending users, groups and nested groups') with open(data_ldif, 'a') as file1: data = RHDSDataLDIF(stream=file1, users=nof_users, groups=nof_groups, grps_puser=grps_user, nest_level=nof_depth, ngrps_puser=ngrps_user, basedn=SUFFIX) data.do_magic() start = time.time() log.info('Run importLDIF task to add entries to Server') try: topo.standalone.tasks.importLDIF(suffix=SUFFIX, input_file=data_ldif, args={TASK_WAIT: True}) except ValueError as e: log.error('Online import failed' + e.message('desc')) assert False end = time.time() time_import = int(end - start) log.info('Check if number of entries created matches the expected entries') users_groups = topo.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, USER_FILTER, [DN_ATTR]) act_entries = str(users_groups).count(DN_ATTR) log.info('Expected entries: {}, Actual entries: {}'.format(exp_entries, act_entries)) assert act_entries == exp_entries return time_import else: log.info('Ldapadd: Create data LDIF file with users, groups and nested groups') ldif_dir = topo.standalone.get_ldif_dir() data_ldif = os.path.join(ldif_dir, '/perf_add.ldif') with open(data_ldif, 'w') as file1: data = RHDSDataLDIF(stream=file1, users=nof_users, groups=nof_groups, grps_puser=grps_user, nest_level=nof_depth, ngrps_puser=ngrps_user, basedn=SUFFIX) data.do_magic() start = time.time() log.info('Run LDAPMODIFY to add entries to Server') try: subprocess.check_output( [LDAP_MOD, '-cx', '-D', DN_DM, '-w', PASSWORD, '-h', HOST_STANDALONE, '-p', str(PORT_STANDALONE), '-af', data_ldif]) except subprocess.CalledProcessError as e: log.error('LDAPMODIFY failed to add entries, error:{:s}'.format(str(e))) raise e end = time.time() cmd_time = int(end - start) log.info('Time taken to complete LDAPADD: {} secs'.format(cmd_time)) return cmd_time def _sync_memberof_attrs(topo, exp_memberof): """Check if expected entries are created or attributes are synced""" log.info('_sync_memberof_attrs: Check if expected memberOf attributes are synced/created') loop = 0 start = time.time() entries = topo.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, FILTER, [MEMBEROF_ATTR]) act_memberof = str(entries).count(MEMBEROF_ATTR) end = time.time() cmd_time = int(end - start) log.info('Loop-{}, expected memberOf attrs: {}, synced: {}, time for search-{} secs'.format(loop, exp_memberof, act_memberof, cmd_time)) while act_memberof != exp_memberof: loop = loop + 1 time.sleep(30) start = time.time() entries = topo.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, FILTER, [MEMBEROF_ATTR]) act_memberof = str(entries).count(MEMBEROF_ATTR) end = time.time() cmd_time = cmd_time + int(end - start) log.info('Loop-{}, expected memberOf attrs: {}, synced: {}, time for search-{} secs'.format(loop, exp_memberof, act_memberof, cmd_time)) # Worst case scenario, exit the test after 10hrs of wait if loop > 1200: log.error('Either syncing memberOf attrs takes too long or some issue with the test itself') assert False sync_time = 1 + loop * 30 log.info('Expected memberOf attrs: {}, Actual memberOf attrs: {}'.format(exp_memberof, act_memberof)) assert act_memberof == exp_memberof return sync_time @pytest.mark.parametrize("nof_users, nof_groups, grps_user, ngrps_user, nof_depth", [(20000, 200, 20, 10, 5), (50000, 500, 50, 10, 10), (100000, 1000, 100, 20, 20)]) def test_nestgrps_import(topo, memberof_setup, nof_users, nof_groups, grps_user, ngrps_user, nof_depth): """Import large users and nested groups with N depth and measure the time taken :id: 169a09f2-2c2d-4e42-8b90-a0bd1034f278 :feature: MemberOf Plugin :setup: Standalone instance, memberOf plugin enabled :steps: 1. Create LDIF file for given nof_users and nof_groups 2. Import entries to server 3. Check if entries are created 4. Run fixupMemberOf task to create memberOf attributes 5. Check if memberOf attributes are synced for all users and groups 6. Compare the actual no of memberOf attributes to the expected 7. Measure the time taken to sync memberOf attributes :expectedresults: MemberOf attributes should be synced """ exp_memberof = (nof_users * grps_user) + ( (nof_groups // grps_user) * (ngrps_user // nof_depth) * (nof_depth * (nof_depth + 1)) // 2) log.info('Create nested ldif file with users-{}, groups-{}, nested-{}'.format(nof_users, nof_groups, nof_depth)) log.info('Import LDIF file and measure the time taken') import_time = _nested_import_add_ldif(topo, nof_users, nof_groups, grps_user, ngrps_user, nof_depth, True) log.info('Run fixup memberOf task and measure the time taken to complete the task') fixup_time = _run_fixup_memberof(topo) log.info('Check the total number of memberOf entries created for users and groups') sync_memberof = _sync_memberof_attrs(topo, exp_memberof) total_time = import_time + fixup_time + sync_memberof log.info('Time for import-{}secs, fixup task-{}secs, total time for memberOf sync: {}secs'.format(import_time, fixup_time, total_time)) @pytest.mark.parametrize("nof_users, nof_groups, grps_user, ngrps_user, nof_depth", [(20000, 100, 20, 10, 5), (50000, 200, 50, 10, 10), (100000, 100, 20, 10, 10)]) def test_nestgrps_add(topo, memberof_setup, nof_users, nof_groups, grps_user, ngrps_user, nof_depth): """Import large users and nested groups with n depth and measure the time taken :id: 6eda75c6-5ae0-4b17-b610-d217d7ec7542 :feature: MemberOf Plugin :setup: Standalone instance, memberOf plugin enabled :steps: 1. Create LDIF file for given nof_users and nof_groups 2. Add entries using LDAPADD 3. Check if entries are created 4. Check if memberOf attributes are synced for all users and groups 5. Compare the actual no of memberOf attributes to the expected 6. Measure the time taken to sync memberOf attributes :expectedresults: MemberOf attributes should be created and synced """ exp_memberof = (nof_users * grps_user) + ( (nof_groups // grps_user) * (ngrps_user // nof_depth) * (nof_depth * (nof_depth + 1)) // 2) log.info('Creating base_ldif file and importing it to wipe out all users and groups') _create_base_ldif(topo, True) log.info('Create nested ldif file with users-{}, groups-{}, nested-{}'.format(nof_users, nof_groups, nof_depth)) log.info('Run LDAPADD to add entries to Server') add_time = _nested_import_add_ldif(topo, nof_users, nof_groups, grps_user, ngrps_user, nof_depth, False) log.info('Check the total number of memberOf entries created for users and groups') sync_memberof = _sync_memberof_attrs(topo, exp_memberof) total_time = add_time + sync_memberof log.info('Time for ldapadd-{}secs, total time for memberOf sync: {}secs'.format(add_time, total_time)) @pytest.mark.parametrize("nof_users, nof_groups, grps_user, ngrps_user, nof_depth", [(20000, 200, 20, 10, 5), (50000, 500, 50, 10, 10), (100000, 1000, 100, 20, 20)]) def test_mod_nestgrp(topo, memberof_setup, nof_users, nof_groups, grps_user, ngrps_user, nof_depth): """Import bulk entries, modify nested groups at N depth and measure the time taken :id: 4bf8e753-6ded-4177-8225-aaf6aef4d131 :feature: MemberOf Plugin :setup: Standalone instance, memberOf plugin enabled :steps: 1. Import bulk entries with nested group and create memberOf attributes 2. Modify nested groups by adding new members at each nested level 3. Check new memberOf attributes created for users and groups 4. Compare the actual memberOf attributes with the expected 5. Measure the time taken to sync memberOf attributes :expectedresults: MemberOf attributes should be modified and synced """ exp_memberof = (nof_users * grps_user) + ( (nof_groups // grps_user) * (ngrps_user // nof_depth) * (nof_depth * (nof_depth + 1)) // 2) log.info('Create nested ldif file, import it and measure the time taken') import_time = _nested_import_add_ldif(topo, nof_users, nof_groups, grps_user, ngrps_user, nof_depth, True) log.info('Run fixup memberOf task and measure the time to complete the task') fixup_time = _run_fixup_memberof(topo) sync_memberof = _sync_memberof_attrs(topo, exp_memberof) total_time = import_time + fixup_time + sync_memberof log.info('Time for import-{}secs, fixup task-{}secs, total time for memberOf sync: {}secs'.format(import_time, fixup_time, total_time)) log.info('Add {} users to existing nested groups at all depth level'.format(nof_groups)) log.info('Add one user to each groups at different nest levels') start = time.time() for usr in range(nof_groups): usrrdn = 'newcliusr{}'.format(usr) userdn = 'uid={},ou=people,{}'.format(usrrdn, SUFFIX) groupdn = 'cn=group{},ou=groups,{}'.format(usr, SUFFIX) try: topo.standalone.add_s(Entry((userdn, { 'objectclass': 'top person inetUser inetOrgperson'.split(), 'cn': usrrdn, 'sn': usrrdn, 'userpassword': 'Secret123'}))) except ldap.LDAPError as e: log.error('Failed to add {} user: error {}'.format(userdn, e.message['desc'])) raise try: topo.standalone.modify_s(groupdn, [(ldap.MOD_ADD, 'member', userdn)]) except ldap.LDAPError as e: log.error('Error-{}: Failed to add user to group'.format(e.message['desc'])) assert False end = time.time() cmd_time = int(end - start) exp_memberof = (nof_users * grps_user) + nof_groups + ( (nof_groups // grps_user) * (ngrps_user // nof_depth) * (nof_depth * (nof_depth + 1))) log.info('Check the total number of memberOf entries created for users and groups') sync_memberof = _sync_memberof_attrs(topo, exp_memberof) total_time = cmd_time + sync_memberof log.info('Time taken add new members to existing nested groups + memberOf sync: {} secs'.format(total_time)) @pytest.mark.parametrize("nof_users, nof_groups, grps_user, ngrps_user, nof_depth", [(20000, 200, 20, 10, 5), (50000, 500, 50, 10, 10), (100000, 1000, 100, 20, 20)]) def test_del_nestgrp(topo, memberof_setup, nof_users, nof_groups, grps_user, ngrps_user, nof_depth): """Import bulk entries, delete nested groups at N depth and measure the time taken :id: d3d82ac5-d968-4cd6-a268-d380fc9fd51b :feature: MemberOf Plugin :setup: Standalone instance, memberOf plugin enabled :steps: 1. Import bulk users and groups with nested level N. 2. Run fixup memberOf task to create memberOf attributes 3. Delete nested groups at nested level N 4. Check memberOf attributes deleted for users and groups 5. Compare the actual memberOf attributes with the expected 6. Measure the time taken to sync memberOf attributes :expectedresults: MemberOf attributes should be deleted and synced """ exp_memberof = (nof_users * grps_user) + ( (nof_groups // grps_user) * (ngrps_user // nof_depth) * (nof_depth * (nof_depth + 1)) // 2) log.info('Create nested ldif file, import it and measure the time taken') import_time = _nested_import_add_ldif(topo, nof_users, nof_groups, grps_user, ngrps_user, nof_depth, True) log.info('Run fixup memberOf task and measure the time to complete the task') fixup_time = _run_fixup_memberof(topo) sync_memberof = _sync_memberof_attrs(topo, exp_memberof) total_time = import_time + fixup_time + sync_memberof log.info('Time taken to complete add users + memberOf sync: {} secs'.format(total_time)) log.info('Delete {} groups from nested groups at depth level-{}'.format(nof_depth, nof_depth)) start = time.time() for nos in range(nof_depth, nof_groups, grps_user): groupdn = 'cn=group{},ou=groups,{}'.format(nos, SUFFIX) try: topo.standalone.delete_s(groupdn) except ldap.LDAPError as e: log.error('Error-{}: Failed to delete group'.format(e.message['desc'])) assert False end = time.time() cmd_time = int(end - start) exp_memberof = exp_memberof - (nof_users + (nof_depth * (nof_groups // grps_user))) log.info('Check memberOf attributes after deleting groups at depth-{}'.format(nof_depth)) sync_memberof = _sync_memberof_attrs(topo, exp_memberof) total_time = cmd_time + sync_memberof log.info('Time taken to delete and sync memberOf attributes: {}secs'.format(total_time)) if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s {}".format(CURRENT_FILE)) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/perf/search_performance_test.py000066400000000000000000000077241421664411400272410ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2018 Red Hat, Inc. # Copyright (C) 2019 William Brown # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # # Performance tests look different to others, they require some extra # environmental settings. import ldap import os from lib389 import DirSrv from lib389._constants import DEFAULT_SUFFIX from lib389.topologies import topology_st as topology from lib389.idm.domain import Domain from lib389.idm.group import Groups from lib389.idm.user import nsUserAccounts from lib389.backend import Backends from lib389.ldclt import Ldclt import time # We want to write a CSV such as: # category,1 thread,4 thread,8 thread,16 thread # testcategory,500,800,1000,2000 # testcategory2,500,800,1000,2000 TEST_MARKER = 'configured: search_performance_test.py' # GROUP_MAX = 4000 # USER_MAX = 6000 GROUP_MAX = 4000 USER_MAX = 6000 TARGET_HOST = os.environ.get('PERF_TARGET_HOST', 'localhost') TARGET_PORT = os.environ.get('PERF_TARGET_PORT', '389') def assert_data_present(inst): # Do we have the backend marker? d = Domain(inst, DEFAULT_SUFFIX) try: desc = d.get_attr_val_utf8('description') if desc == TEST_MARKER: return except: # Just reset everything. pass # Reset the backends bes = Backends(inst) try: be = bes.get(DEFAULT_SUFFIX) be.delete() except: pass be = bes.create(properties={ 'nsslapd-suffix': DEFAULT_SUFFIX, 'cn': 'userRoot', }) be.create_sample_entries('001004002') # Load our data # We can't use dbgen as that relies on local access :( # Add 40,000 groups groups = Groups(inst, DEFAULT_SUFFIX) for i in range(1,GROUP_MAX): rdn = 'group_{0:07d}'.format(i) groups.create(properties={ 'cn': rdn, }) # Add 60,000 users users = nsUserAccounts(inst, DEFAULT_SUFFIX) for i in range(1,USER_MAX): rdn = 'user_{0:07d}'.format(i) users.create(properties={ 'uid': rdn, 'cn': rdn, 'displayName': rdn, 'uidNumber' : '%s' % i, 'gidNumber' : '%s' % i, 'homeDirectory' : '/home/%s' % rdn, 'userPassword': rdn, }) # Add the marker d.replace('description', TEST_MARKER) # Done! # Single uid # 1000 uid # 4000 uid # 5000 uid # 10,000 uid # & of single uid # & of two 1000 uid sets # & of two 4000 uid sets # & of two 5000 uid sets # & of two 10,000 uid sets # | of single uid # | of two 1000 uid sets # | of two 4000 uid sets # | of two 5000 uid sets # | of two 10,000 uid sets # & of user and group # | of user and group def _do_search_performance(inst, thread_count): # Configure thread count # Restart print("Configuring with %s threads ..." % thread_count) time.sleep(1) inst.config.set('nsslapd-threadnumber', str(thread_count)) inst.restart() ld = Ldclt(inst) out = ld.search_loadtest(DEFAULT_SUFFIX, "(uid=user_XXXXXXX)", min=1, max=USER_MAX) return out # Need a check here def test_user_search_performance(): inst = DirSrv(verbose=True) inst.remote_simple_allocate( f"ldaps://{TARGET_HOST}", password="password" ) # Need a better way to set this. inst.host = TARGET_HOST inst.port = TARGET_PORT inst.open(reqcert=ldap.OPT_X_TLS_NEVER) assert_data_present(inst) r1 = _do_search_performance(inst, 1) # r2 = _do_search_performance(inst, 4) # r3 = _do_search_performance(inst, 6) # r4 = _do_search_performance(inst, 8) # r5 = _do_search_performance(inst, 12) r6 = _do_search_performance(inst, 16) # print("category,t1,t4,t6,t8,t12,t16") # print("search,%s,%s,%s,%s,%s,%s" % (r1, r2, r3, r4, r5, r6)) def test_group_search_performance(): pass ## TODO # Tweak cache levels # turbo mode # ldclt threads = 2x server? # add perf logs to each test 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/stress/000077500000000000000000000000001421664411400223575ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/stress/README000066400000000000000000000016241421664411400232420ustar00rootroot00000000000000README for "Stress" Tests Reliablity Tests ============================== A generic high load, long running tests reliab7_5_test.py ------------------------------ This script is a light-weight version of the legacy TET stress test called "Reliabilty 15". This test consists of two MMR Suppliers, and a 5000 entry database. The test starts off with two threads doing unindexed searchesi(1 for each supplier). These do not exit untl the entire test completes. Then while the unindexed searches are going on, the test performs a set of adds, mods, deletes, and modrdns on each supplier at the same time. It performs this set of operations 1000 times. The main goal of this script is to test stablilty, replication convergence, and memory growth/fragmentation. Known issue: the server can deadlock in the libdb4 code while performing modrdns(under investigation via https://fedorahosted.org/389/ticket/48166) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/stress/__init__.py000066400000000000000000000000301421664411400244610ustar00rootroot00000000000000# -*- coding: utf-8 -*- 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/stress/cos/000077500000000000000000000000001421664411400231435ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/stress/cos/cos_scale_template_test.py000066400000000000000000000114641421664411400304100ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2017 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import pytest from lib389.topologies import topology_st from lib389.plugins import ClassOfServicePlugin from lib389.cos import CosIndirectDefinitions, CosTemplates, CosTemplate from lib389.idm.user import UserAccounts, TEST_USER_PROPERTIES from lib389.idm.organizationalunit import OrganizationalUnits from lib389._constants import DEFAULT_SUFFIX import time pytestmark = pytest.mark.tier3 # Given this should complete is about 0.005, this is generous. # For the final test with 20 templates, about 0.02 is an acceptable time. THRESHOLD = 0.05 class OUCosTemplate(CosTemplate): def __init__(self, instance, dn=None): """Create a OU specific cos template to replicate a specific user setup. This template provides ou attrs onto the target entry. :param instance: A dirsrv instance :type instance: DirSrv :param dn: The dn of the template :type dn: str """ super(OUCosTemplate, self).__init__(instance, dn) self._rdn_attribute = 'ou' self._must_attributes = ['ou'] self._create_objectclasses = [ 'top', 'cosTemplate', 'organizationalUnit', ] class OUCosTemplates(CosTemplates): def __init__(self, instance, basedn, rdn=None): """Create an OU specific cos templates to replicate a specific use setup. This costemplates object allows access to the OUCosTemplate types. :param instance: A dirsrv instance :type instance: DirSrv :param basedn: The basedn of the templates :type basedn: str :param rdn: The rdn of the templates :type rdn: str """ super(OUCosTemplates, self).__init__(instance, basedn, rdn) self._objectclasses = [ 'cosTemplate', 'organizationalUnit', ] self._filterattrs = ['ou'] self._childobject = OUCosTemplate def test_indirect_template_scale(topology_st): """Test that cos templates can be added at a reasonable scale :id: 7cbcdf22-1f9c-4222-9e76-685fe374fc20 :steps: 1. Enable COS plugin 2. Create the test user 3. Add an indirect cos template 4. Add a cos template 5. Add the user to the cos template and assert it works. 6. Add 25,000 templates to the database 7. Search the user. It should not exceed THRESHOLD. :expected results: 1. It is enabled. 2. It is created. 3. Is is created. 4. It is created. 5. It is valid. 6. They are created. 7. It is fast. """ cos_plugin = ClassOfServicePlugin(topology_st.standalone) cos_plugin.enable() topology_st.standalone.restart() # Now create, the indirect specifier, and a user to template onto. users = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX) user = users.create(properties=TEST_USER_PROPERTIES) cos_inds = CosIndirectDefinitions(topology_st.standalone, DEFAULT_SUFFIX) cos_ind = cos_inds.create(properties={ 'cn' : 'cosIndirectDef', 'cosIndirectSpecifier': 'seeAlso', 'cosAttribute': [ 'ou merge-schemes', 'description merge-schemes', 'postalCode merge-schemes', ], }) ous = OrganizationalUnits(topology_st.standalone, DEFAULT_SUFFIX) ou_temp = ous.create(properties={'ou': 'templates'}) cos_temps = OUCosTemplates(topology_st.standalone, ou_temp.dn) cos_temp_u = cos_temps.create(properties={ 'ou' : 'ou_temp_u', 'description' : 'desc_temp_u', 'postalCode': '0' }) # Edit the user to add the seeAlso ... user.set('seeAlso', cos_temp_u.dn) # Now create 25,0000 templates, they *don't* need to apply to the user though! for i in range(1, 25001): cos_temp_u = cos_temps.create(properties={ 'ou' : 'ou_temp_%s' % i, 'description' : 'desc_temp_%s' % i, 'postalCode': '%s' % i }) if i % 500 == 0: start_time = time.monotonic() u_search = users.get('testuser') attrs = u_search.get_attr_vals_utf8('postalCode') end_time = time.monotonic() diff_time = end_time - start_time assert diff_time < THRESHOLD if i == 10000: # Now add our user to this template also. user.add('seeAlso', cos_temp_u.dn) start_time = time.monotonic() attrs_after = u_search.get_attr_vals_utf8('postalCode') end_time = time.monotonic() diff_time = end_time - start_time assert(set(attrs) < set(attrs_after)) assert diff_time < THRESHOLD 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/stress/reliabilty/000077500000000000000000000000001421664411400245175ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/stress/reliabilty/__init__.py000066400000000000000000000000001421664411400266160ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/stress/reliabilty/reliab_7_5_test.py000066400000000000000000000502321421664411400300420ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2016 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- import sys import time import ldap import logging import pytest import threading import random from lib389 import DirSrv, Entry from lib389._constants import * from lib389.properties import * from lib389.tasks import * from lib389.utils import * from lib389.idm.directorymanager import DirectoryManager pytestmark = pytest.mark.tier3 logging.getLogger(__name__).setLevel(logging.DEBUG) formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s' + ' - %(message)s') handler = logging.StreamHandler() handler.setFormatter(formatter) log = logging.getLogger(__name__) log.addHandler(handler) installation1_prefix = None NUM_USERS = 5000 MAX_PASSES = 1000 CHECK_CONVERGENCE = True ENABLE_VALGRIND = False RUNNING = True DEBUGGING = os.getenv('DEBUGGING', default=False) class TopologyReplication(object): def __init__(self, supplier1, supplier2): supplier1.open() self.supplier1 = supplier1 supplier2.open() self.supplier2 = supplier2 @pytest.fixture(scope="module") def topology(request): global installation1_prefix if installation1_prefix: args_instance[SER_DEPLOYED_DIR] = installation1_prefix # Creating supplier 1... supplier1 = DirSrv(verbose=DEBUGGING) args_instance[SER_HOST] = HOST_SUPPLIER_1 args_instance[SER_PORT] = PORT_SUPPLIER_1 args_instance[SER_SECURE_PORT] = SECUREPORT_SUPPLIER_1 args_instance[SER_SERVERID_PROP] = SERVERID_SUPPLIER_1 args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX args_supplier = args_instance.copy() supplier1.allocate(args_supplier) instance_supplier1 = supplier1.exists() if instance_supplier1: supplier1.delete() supplier1.create() supplier1.open() supplier1.replica.enableReplication(suffix=SUFFIX, role=ReplicaRole.SUPPLIER, replicaId=REPLICAID_SUPPLIER_1) # Creating supplier 2... supplier2 = DirSrv(verbose=DEBUGGING) args_instance[SER_HOST] = HOST_SUPPLIER_2 args_instance[SER_PORT] = PORT_SUPPLIER_2 args_instance[SER_SECURE_PORT] = SECUREPORT_SUPPLIER_2 args_instance[SER_SERVERID_PROP] = SERVERID_SUPPLIER_2 args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX args_supplier = args_instance.copy() supplier2.allocate(args_supplier) instance_supplier2 = supplier2.exists() if instance_supplier2: supplier2.delete() supplier2.create() supplier2.open() supplier2.replica.enableReplication(suffix=SUFFIX, role=ReplicaRole.SUPPLIER, replicaId=REPLICAID_SUPPLIER_2) # # Create all the agreements # # Creating agreement from supplier 1 to supplier 2 properties = {RA_NAME: r'meTo_$host:$port', RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} m1_m2_agmt = supplier1.agreement.create(suffix=SUFFIX, host=supplier2.host, port=supplier2.port, properties=properties) if not m1_m2_agmt: log.fatal("Fail to create a supplier -> supplier replica agreement") sys.exit(1) log.debug("%s created" % m1_m2_agmt) # Creating agreement from supplier 2 to supplier 1 properties = {RA_NAME: r'meTo_$host:$port', RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} m2_m1_agmt = supplier2.agreement.create(suffix=SUFFIX, host=supplier1.host, port=supplier1.port, properties=properties) if not m2_m1_agmt: log.fatal("Fail to create a supplier -> supplier replica agreement") sys.exit(1) log.debug("%s created" % m2_m1_agmt) # Allow the replicas to get situated with the new agreements... time.sleep(5) # # Import tests entries into supplier1 before we initialize supplier2 # ldif_dir = supplier1.get_ldif_dir() import_ldif = ldif_dir + '/rel7.5-entries.ldif' # First generate an ldif try: ldif = open(import_ldif, 'w') except IOError as e: log.fatal('Failed to create test ldif, error: %s - %s' % (e.errno, e.strerror)) assert False # Create the root node ldif.write('dn: ' + DEFAULT_SUFFIX + '\n') ldif.write('objectclass: top\n') ldif.write('objectclass: domain\n') ldif.write('dc: example\n') ldif.write('\n') # Create the entries idx = 0 while idx < NUM_USERS: count = str(idx) ldif.write('dn: uid=supplier1_entry' + count + ',' + DEFAULT_SUFFIX + '\n') ldif.write('objectclass: top\n') ldif.write('objectclass: person\n') ldif.write('objectclass: inetorgperson\n') ldif.write('objectclass: organizationalperson\n') ldif.write('uid: supplier1_entry' + count + '\n') ldif.write('cn: supplier1 entry' + count + '\n') ldif.write('givenname: supplier1 ' + count + '\n') ldif.write('sn: entry ' + count + '\n') ldif.write('userpassword: supplier1_entry' + count + '\n') ldif.write('description: ' + 'a' * random.randint(1, 1000) + '\n') ldif.write('\n') ldif.write('dn: uid=supplier2_entry' + count + ',' + DEFAULT_SUFFIX + '\n') ldif.write('objectclass: top\n') ldif.write('objectclass: person\n') ldif.write('objectclass: inetorgperson\n') ldif.write('objectclass: organizationalperson\n') ldif.write('uid: supplier2_entry' + count + '\n') ldif.write('cn: supplier2 entry' + count + '\n') ldif.write('givenname: supplier2 ' + count + '\n') ldif.write('sn: entry ' + count + '\n') ldif.write('userpassword: supplier2_entry' + count + '\n') ldif.write('description: ' + 'a' * random.randint(1, 1000) + '\n') ldif.write('\n') idx += 1 ldif.close() # Now import it try: supplier1.tasks.importLDIF(suffix=DEFAULT_SUFFIX, input_file=import_ldif, args={TASK_WAIT: True}) except ValueError: log.fatal('test_reliab_7.5: Online import failed') assert False # # Initialize all the agreements # supplier1.agreement.init(SUFFIX, HOST_SUPPLIER_2, PORT_SUPPLIER_2) supplier1.waitForReplInit(m1_m2_agmt) # Check replication is working... if supplier1.testReplication(DEFAULT_SUFFIX, supplier2): log.info('Replication is working.') else: log.fatal('Replication is not working.') assert False # Clear out the tmp dir supplier1.clearTmpDir(__file__) # Delete each instance in the end def fin(): supplier1.delete() supplier2.delete() if ENABLE_VALGRIND: sbin_dir = get_sbin_dir(prefix=supplier1.prefix) valgrind_disable(sbin_dir) request.addfinalizer(fin) return TopologyReplication(supplier1, supplier2) class AddDelUsers(threading.Thread): def __init__(self, inst, supplierid): threading.Thread.__init__(self) self.daemon = True self.inst = inst self.id = supplierid def run(self): # Add 5000 entries idx = 0 RDN = 'uid=add_del_supplier_' + self.id + '-' conn = DirectoryManager(self.inst).bind() while idx < NUM_USERS: USER_DN = RDN + str(idx) + ',' + DEFAULT_SUFFIX try: conn.add_s(Entry((USER_DN, {'objectclass': 'top extensibleObject'.split(), 'uid': 'user' + str(idx), 'cn': 'g' * random.randint(1, 500) }))) except ldap.LDAPError as e: log.fatal('Add users to supplier ' + self.id + ' failed (' + USER_DN + ') error: ' + e.message['desc']) idx += 1 conn.close() # Delete 5000 entries conn = DirectoryManager(self.inst).bind() idx = 0 while idx < NUM_USERS: USER_DN = RDN + str(idx) + ',' + DEFAULT_SUFFIX try: conn.delete_s(USER_DN) except ldap.LDAPError as e: log.fatal('Failed to delete (' + USER_DN + ') on supplier ' + self.id + ': error ' + e.message['desc']) idx += 1 conn.close() class ModUsers(threading.Thread): # Do mods and modrdns def __init__(self, inst, supplierid): threading.Thread.__init__(self) self.daemon = True self.inst = inst self.id = supplierid def run(self): # Mod existing entries conn = DirectoryManager(self.inst).bind() idx = 0 while idx < NUM_USERS: USER_DN = ('uid=supplier' + self.id + '_entry' + str(idx) + ',' + DEFAULT_SUFFIX) try: conn.modify(USER_DN, [(ldap.MOD_REPLACE, 'givenname', 'new givenname supplier1-' + str(idx))]) except ldap.LDAPError as e: log.fatal('Failed to modify (' + USER_DN + ') on supplier ' + self.id + ': error ' + e.message['desc']) idx += 1 conn.close() # Modrdn existing entries conn = DirectoryManager(self.inst).bind() idx = 0 while idx < NUM_USERS: USER_DN = ('uid=supplier' + self.id + '_entry' + str(idx) + ',' + DEFAULT_SUFFIX) NEW_RDN = 'cn=supplier' + self.id + '_entry' + str(idx) try: conn.rename_s(USER_DN, NEW_RDN, delold=1) except ldap.LDAPError as e: log.error('Failed to modrdn (' + USER_DN + ') on supplier ' + self.id + ': error ' + e.message['desc']) idx += 1 conn.close() # Undo modrdn to we can rerun this test conn = DirectoryManager(self.inst).bind() idx = 0 while idx < NUM_USERS: USER_DN = ('cn=supplier' + self.id + '_entry' + str(idx) + ',' + DEFAULT_SUFFIX) NEW_RDN = 'uid=supplier' + self.id + '_entry' + str(idx) try: conn.rename_s(USER_DN, NEW_RDN, delold=1) except ldap.LDAPError as e: log.error('Failed to modrdn (' + USER_DN + ') on supplier ' + self.id + ': error ' + e.message['desc']) idx += 1 conn.close() class DoSearches(threading.Thread): # Search a supplier def __init__(self, inst, supplierid): threading.Thread.__init__(self) self.daemon = True self.inst = inst self.id = supplierid def run(self): # Equality conn = DirectoryManager(self.inst).bind() idx = 0 while idx < NUM_USERS: search_filter = ('(|(uid=supplier' + self.id + '_entry' + str(idx) + ')(cn=supplier' + self.id + '_entry' + str(idx) + '))') try: conn.search(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, search_filter) except ldap.LDAPError as e: log.fatal('Search Users: Search failed (%s): %s' % (search_filter, e.message['desc'])) conn.close() return idx += 1 conn.close() # Substring conn = DirectoryManager(self.inst).bind() idx = 0 while idx < NUM_USERS: search_filter = ('(|(uid=supplier' + self.id + '_entry' + str(idx) + '*)(cn=supplier' + self.id + '_entry' + str(idx) + '*))') try: conn.search(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, search_filter) except ldap.LDAPError as e: log.fatal('Search Users: Search failed (%s): %s' % (search_filter, e.message['desc'])) conn.close() return idx += 1 conn.close() class DoFullSearches(threading.Thread): # Search a supplier def __init__(self, inst): threading.Thread.__init__(self) self.daemon = True self.inst = inst def run(self): global RUNNING conn = DirectoryManager(self.inst).bind() while RUNNING: time.sleep(2) try: conn.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, 'objectclass=top') except ldap.LDAPError as e: log.fatal('Full Search Users: Search failed (%s): %s' % ('objectclass=*', e.message['desc'])) conn.close() assert False conn.close() def test_reliab7_5_init(topology): ''' Reduce entry cache - to increase the cache churn Then process "reliability 15" type tests ''' BACKEND_DN = 'cn=userroot,cn=ldbm database,cn=plugins,cn=config' # Update supplier 1 try: topology.supplier1.modify_s(BACKEND_DN, [(ldap.MOD_REPLACE, 'nsslapd-cachememsize', '512000'), (ldap.MOD_REPLACE, 'nsslapd-cachesize', '500')]) except ldap.LDAPError as e: log.fatal('Failed to set cache settings: error ' + e.message['desc']) assert False # Update supplier 2 try: topology.supplier2.modify_s(BACKEND_DN, [(ldap.MOD_REPLACE, 'nsslapd-cachememsize', '512000'), (ldap.MOD_REPLACE, 'nsslapd-cachesize', '500')]) except ldap.LDAPError as e: log.fatal('Failed to set cache settings: error ' + e.message['desc']) assert False # Restart the suppliers to pick up the new cache settings topology.supplier1.stop(timeout=10) topology.supplier2.stop(timeout=10) # This is the time to enable valgrind (if enabled) if ENABLE_VALGRIND: sbin_dir = get_sbin_dir(prefix=topology.supplier1.prefix) valgrind_enable(sbin_dir) topology.supplier1.start(timeout=30) topology.supplier2.start(timeout=30) def test_reliab7_5_run(topology): ''' Starting issuing adds, deletes, mods, modrdns, and searches ''' global RUNNING count = 1 RUNNING = True # Start some searches to run through the entire stress test fullSearch1 = DoFullSearches(topology.supplier1) fullSearch1.start() fullSearch2 = DoFullSearches(topology.supplier2) fullSearch2.start() while count <= MAX_PASSES: log.info('################## Reliabilty 7.5 Pass: %d' % count) # Supplier 1 add_del_users1 = AddDelUsers(topology.supplier1, '1') add_del_users1.start() mod_users1 = ModUsers(topology.supplier1, '1') mod_users1.start() search1 = DoSearches(topology.supplier1, '1') search1.start() # Supplier 2 add_del_users2 = AddDelUsers(topology.supplier2, '2') add_del_users2.start() mod_users2 = ModUsers(topology.supplier2, '2') mod_users2.start() search2 = DoSearches(topology.supplier2, '2') search2.start() # Search the suppliers search3 = DoSearches(topology.supplier1, '1') search3.start() search4 = DoSearches(topology.supplier2, '2') search4.start() # Wait for threads to finish log.info('################## Waiting for threads to finish...') add_del_users1.join() mod_users1.join() add_del_users2.join() mod_users2.join() log.info('################## Update threads finished.') search1.join() search2.join() search3.join() search4.join() log.info('################## All threads finished.') # Allow some time for replication to catch up before firing # off the next round of updates time.sleep(5) count += 1 # # Wait for replication to converge # if CHECK_CONVERGENCE: # Add an entry to each supplier, and wait for it to replicate SUPPLIER1_DN = 'uid=rel7.5-supplier1,' + DEFAULT_SUFFIX SUPPLIER2_DN = 'uid=rel7.5-supplier2,' + DEFAULT_SUFFIX # Supplier 1 try: topology.supplier1.add_s(Entry((SUPPLIER1_DN, {'objectclass': ['top', 'extensibleObject'], 'sn': '1', 'cn': 'user 1', 'uid': 'rel7.5-supplier1', 'userpassword': PASSWORD}))) except ldap.LDAPError as e: log.fatal('Failed to add replication test entry ' + SUPPLIER1_DN + ': error ' + e.message['desc']) assert False log.info('################## Waiting for supplier 2 to converge...') while True: entry = None try: entry = topology.supplier2.search_s(SUPPLIER1_DN, ldap.SCOPE_BASE, 'objectclass=*') except ldap.NO_SUCH_OBJECT: pass except ldap.LDAPError as e: log.fatal('Search Users: Search failed (%s): %s' % (SUPPLIER1_DN, e.message['desc'])) assert False if entry: break time.sleep(5) log.info('################## Supplier 2 converged.') # Supplier 2 try: topology.supplier2.add_s( Entry((SUPPLIER2_DN, {'objectclass': ['top', 'extensibleObject'], 'sn': '1', 'cn': 'user 1', 'uid': 'rel7.5-supplier2', 'userpassword': PASSWORD}))) except ldap.LDAPError as e: log.fatal('Failed to add replication test entry ' + SUPPLIER1_DN + ': error ' + e.message['desc']) assert False log.info('################## Waiting for supplier 1 to converge...') while True: entry = None try: entry = topology.supplier1.search_s(SUPPLIER2_DN, ldap.SCOPE_BASE, 'objectclass=*') except ldap.NO_SUCH_OBJECT: pass except ldap.LDAPError as e: log.fatal('Search Users: Search failed (%s): %s' % (SUPPLIER2_DN, e.message['desc'])) assert False if entry: break time.sleep(5) log.info('################## Supplier 1 converged.') # Stop the full searches RUNNING = False fullSearch1.join() fullSearch2.join() if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/stress/reliabilty/reliab_conn_test.py000066400000000000000000000145201421664411400304050ustar00rootroot00000000000000import os import sys import time import ldap import logging import pytest import signal import threading from lib389.tools import DirSrvTools from lib389._constants import * from lib389.properties import * from lib389.tasks import * from lib389.utils import * from lib389.idm.directorymanager import DirectoryManager from lib389.idm.user import UserAccounts from lib389.topologies import topology_st pytestmark = pytest.mark.tier3 logging.getLogger(__name__).setLevel(logging.DEBUG) log = logging.getLogger(__name__) MAX_CONNS = 10000000 MAX_THREADS = 20 STOP = False HOSTNAME = DirSrvTools.getLocalhost() PORT = 389 NUNC_STANS = False def signalHandler(signal, frame): """ handle control-C cleanly """ global STOP STOP = True sys.exit(0) def init(inst): """Set the idle timeout, and add sample entries """ inst.config.set('nsslapd-idletimeout', '5') if NUNC_STANS: inst.config.set('nsslapd-enable-nunc-stans', 'on') inst.restart() users = UserAccounts(inst, DEFAULT_SUFFIX) for idx in range(0, 9): user = users.create_test_user(uid=str(idx), gid=str(idx)) user.reset_password('password') class BindOnlyConn(threading.Thread): """This class opens and closes connections """ def __init__(self, inst): """Initialize the thread class with the server instance info""" threading.Thread.__init__(self) self.daemon = True self.inst = inst def run(self): """Keep opening and closing connections""" idx = 0 err_count = 0 global STOP while idx < MAX_CONNS and not STOP: try: conn = DirectoryManager(self.inst).bind(connOnly=True) conn.unbind_s() time.sleep(.2) err_count = 0 except ldap.LDAPError as e: err_count += 1 if err_count > 3: log.error('BindOnlyConn exiting thread: %s' % (str(e))) return time.sleep(.4) idx += 1 class IdleConn(threading.Thread): """This class opens and closes connections """ def __init__(self, inst): """Initialize the thread class with the server instance info""" threading.Thread.__init__(self) self.daemon = True self.inst = inst def run(self): """Assume idleTimeout is set to less than 10 seconds """ idx = 0 err_count = 0 global STOP while idx < (MAX_CONNS / 10) and not STOP: try: conn = self.inst.clone() conn.simple_bind_s('uid=test_user_0,dc=example,dc=com', 'password') conn.search_s('dc=example,dc=com', ldap.SCOPE_SUBTREE, 'uid=*') time.sleep(10) conn.search_s('dc=example,dc=com', ldap.SCOPE_SUBTREE, 'cn=*') conn.unbind_s() time.sleep(.2) err_count = 0 except ldap.LDAPError as e: err_count += 1 if err_count > 3: log.error('IdleConn exiting thread: %s' % (str(e))) return time.sleep(.4) idx += 1 class LongConn(threading.Thread): """This class opens and closes connections to a specified server """ def __init__(self, inst): """Initialize the thread class with the server instance info""" threading.Thread.__init__(self) self.daemon = True self.inst = inst def run(self): """Assume idleTimeout is set to less than 10 seconds """ idx = 0 err_count = 0 global STOP while idx < MAX_CONNS and not STOP: try: conn = self.inst.clone() conn.search_s('dc=example,dc=com', ldap.SCOPE_SUBTREE, 'objectclass=*') conn.search_s('dc=example,dc=com', ldap.SCOPE_SUBTREE, 'uid=mark') conn.search_s('dc=example,dc=com', ldap.SCOPE_SUBTREE, 'cn=*') conn.search_s('', ldap.SCOPE_BASE, 'objectclass=*') conn.unbind_s() time.sleep(.2) err_count = 0 except ldap.LDAPError as e: err_count += 1 if err_count > 3: log.error('LongConn search exiting thread: %s' % (str(e))) return time.sleep(.4) idx += 1 def test_connection_load(topology_st): """Send the server a variety of connections using many threads: - Open, Bind, Close - Open, Bind, Search, wait to trigger idletimeout, Search, Close - Open, Bind, Search, Search, Search, Close """ # setup the control-C signal handler signal.signal(signal.SIGINT, signalHandler) # Set the config and add sample entries log.info('Initializing setup...') init(topology_st.standalone) # # Bind/Unbind Conn Threads # log.info('Launching Bind-Only Connection threads...') threads = [] idx = 0 while idx < MAX_THREADS: threads.append(BindOnlyConn(topology_st.standalone)) idx += 1 for thread in threads: thread.start() time.sleep(0.1) # # Idle Conn Threads # log.info('Launching Idle Connection threads...') idx = 0 idle_threads = [] while idx < MAX_THREADS: idle_threads.append(IdleConn(topology_st.standalone)) idx += 1 for thread in idle_threads: thread.start() time.sleep(0.1) # # Long Conn Threads # log.info('Launching Long Connection threads...') idx = 0 long_threads = [] while idx < MAX_THREADS: long_threads.append(LongConn(topology_st.standalone)) idx += 1 for thread in long_threads: thread.start() time.sleep(0.1) # # Now wait for all the threads to complete # log.info('Waiting for threads to finish...') while threading.active_count() > 0: time.sleep(1) log.info('Done') if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/stress/replication/000077500000000000000000000000001421664411400246705ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/stress/replication/mmr_01_4m-2h-4c_test.py000066400000000000000000001231211421664411400306070ustar00rootroot00000000000000import os import sys import time import datetime import ldap import logging import pytest import threading from lib389._constants import * from lib389.properties import * from lib389.tasks import * from lib389.utils import * from lib389.repltools import ReplTools pytestmark = pytest.mark.tier3 logging.getLogger(__name__).setLevel(logging.INFO) log = logging.getLogger(__name__) DEBUGGING = False ADD_DEL_COUNT = 5000 MAX_LOOPS = 5 TEST_CONVERGE_LATENCY = True CONVERGENCE_TIMEOUT = '60' supplier_list = [] hub_list = [] con_list = [] TEST_START = time.time() LAST_DN_IDX = ADD_DEL_COUNT - 1 LAST_DN_M1 = 'DEL dn="uid=supplier_1-%d,%s' % (LAST_DN_IDX, DEFAULT_SUFFIX) LAST_DN_M2 = 'DEL dn="uid=supplier_2-%d,%s' % (LAST_DN_IDX, DEFAULT_SUFFIX) LAST_DN_M3 = 'DEL dn="uid=supplier_3-%d,%s' % (LAST_DN_IDX, DEFAULT_SUFFIX) LAST_DN_M4 = 'DEL dn="uid=supplier_4-%d,%s' % (LAST_DN_IDX, DEFAULT_SUFFIX) class TopologyReplication(object): """The Replication Topology Class""" def __init__(self, supplier1, supplier2, supplier3, supplier4, hub1, hub2, consumer1, consumer2, consumer3, consumer4): """Init""" supplier1.open() self.supplier1 = supplier1 supplier2.open() self.supplier2 = supplier2 supplier3.open() self.supplier3 = supplier3 supplier4.open() self.supplier4 = supplier4 hub1.open() self.hub1 = hub1 hub2.open() self.hub2 = hub2 consumer1.open() self.consumer1 = consumer1 consumer2.open() self.consumer2 = consumer2 consumer3.open() self.consumer3 = consumer3 consumer4.open() self.consumer4 = consumer4 supplier_list.append(supplier1.serverid) supplier_list.append(supplier2.serverid) supplier_list.append(supplier3.serverid) supplier_list.append(supplier4.serverid) hub_list.append(hub1.serverid) hub_list.append(hub2.serverid) con_list.append(consumer1.serverid) con_list.append(consumer2.serverid) con_list.append(consumer3.serverid) con_list.append(consumer4.serverid) @pytest.fixture(scope="module") def topology(request): """Create Replication Deployment""" # Creating supplier 1... if DEBUGGING: supplier1 = DirSrv(verbose=True) else: supplier1 = DirSrv(verbose=False) args_instance[SER_HOST] = HOST_SUPPLIER_1 args_instance[SER_PORT] = PORT_SUPPLIER_1 args_instance[SER_SERVERID_PROP] = SERVERID_SUPPLIER_1 args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX args_supplier = args_instance.copy() supplier1.allocate(args_supplier) instance_supplier1 = supplier1.exists() if instance_supplier1: supplier1.delete() supplier1.create() supplier1.open() supplier1.replica.enableReplication(suffix=SUFFIX, role=ReplicaRole.SUPPLIER, replicaId=REPLICAID_SUPPLIER_1) # Creating supplier 2... if DEBUGGING: supplier2 = DirSrv(verbose=True) else: supplier2 = DirSrv(verbose=False) args_instance[SER_HOST] = HOST_SUPPLIER_2 args_instance[SER_PORT] = PORT_SUPPLIER_2 args_instance[SER_SERVERID_PROP] = SERVERID_SUPPLIER_2 args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX args_supplier = args_instance.copy() supplier2.allocate(args_supplier) instance_supplier2 = supplier2.exists() if instance_supplier2: supplier2.delete() supplier2.create() supplier2.open() supplier2.replica.enableReplication(suffix=SUFFIX, role=ReplicaRole.SUPPLIER, replicaId=REPLICAID_SUPPLIER_2) # Creating supplier 3... if DEBUGGING: supplier3 = DirSrv(verbose=True) else: supplier3 = DirSrv(verbose=False) args_instance[SER_HOST] = HOST_SUPPLIER_3 args_instance[SER_PORT] = PORT_SUPPLIER_3 args_instance[SER_SERVERID_PROP] = SERVERID_SUPPLIER_3 args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX args_supplier = args_instance.copy() supplier3.allocate(args_supplier) instance_supplier3 = supplier3.exists() if instance_supplier3: supplier3.delete() supplier3.create() supplier3.open() supplier3.replica.enableReplication(suffix=SUFFIX, role=ReplicaRole.SUPPLIER, replicaId=REPLICAID_SUPPLIER_3) # Creating supplier 4... if DEBUGGING: supplier4 = DirSrv(verbose=True) else: supplier4 = DirSrv(verbose=False) args_instance[SER_HOST] = HOST_SUPPLIER_4 args_instance[SER_PORT] = PORT_SUPPLIER_4 args_instance[SER_SERVERID_PROP] = SERVERID_SUPPLIER_4 args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX args_supplier = args_instance.copy() supplier4.allocate(args_supplier) instance_supplier4 = supplier4.exists() if instance_supplier4: supplier4.delete() supplier4.create() supplier4.open() supplier4.replica.enableReplication(suffix=SUFFIX, role=ReplicaRole.SUPPLIER, replicaId=REPLICAID_SUPPLIER_4) # Creating hub 1... if DEBUGGING: hub1 = DirSrv(verbose=True) else: hub1 = DirSrv(verbose=False) args_instance[SER_HOST] = HOST_HUB_1 args_instance[SER_PORT] = PORT_HUB_1 args_instance[SER_SERVERID_PROP] = SERVERID_HUB_1 args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX args_hub = args_instance.copy() hub1.allocate(args_hub) instance_hub1 = hub1.exists() if instance_hub1: hub1.delete() hub1.create() hub1.open() hub1.replica.enableReplication(suffix=SUFFIX, role=ReplicaRole.HUB, replicaId=REPLICAID_HUB_1) # Creating hub 2... if DEBUGGING: hub2 = DirSrv(verbose=True) else: hub2 = DirSrv(verbose=False) args_instance[SER_HOST] = HOST_HUB_2 args_instance[SER_PORT] = PORT_HUB_2 args_instance[SER_SERVERID_PROP] = SERVERID_HUB_2 args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX args_hub = args_instance.copy() hub2.allocate(args_hub) instance_hub2 = hub2.exists() if instance_hub2: hub2.delete() hub2.create() hub2.open() hub2.replica.enableReplication(suffix=SUFFIX, role=ReplicaRole.HUB, replicaId=REPLICAID_HUB_2) # Creating consumer 1... if DEBUGGING: consumer1 = DirSrv(verbose=True) else: consumer1 = DirSrv(verbose=False) args_instance[SER_HOST] = HOST_CONSUMER_1 args_instance[SER_PORT] = PORT_CONSUMER_1 args_instance[SER_SERVERID_PROP] = SERVERID_CONSUMER_1 args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX args_consumer = args_instance.copy() consumer1.allocate(args_consumer) instance_consumer1 = consumer1.exists() if instance_consumer1: consumer1.delete() consumer1.create() consumer1.open() consumer1.replica.enableReplication(suffix=SUFFIX, role=ReplicaRole.CONSUMER, replicaId=CONSUMER_REPLICAID) # Creating consumer 2... if DEBUGGING: consumer2 = DirSrv(verbose=True) else: consumer2 = DirSrv(verbose=False) args_instance[SER_HOST] = HOST_CONSUMER_2 args_instance[SER_PORT] = PORT_CONSUMER_2 args_instance[SER_SERVERID_PROP] = SERVERID_CONSUMER_2 args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX args_consumer = args_instance.copy() consumer2.allocate(args_consumer) instance_consumer2 = consumer2.exists() if instance_consumer2: consumer2.delete() consumer2.create() consumer2.open() consumer2.replica.enableReplication(suffix=SUFFIX, role=ReplicaRole.CONSUMER, replicaId=CONSUMER_REPLICAID) # Creating consumer 3... if DEBUGGING: consumer3 = DirSrv(verbose=True) else: consumer3 = DirSrv(verbose=False) args_instance[SER_HOST] = HOST_CONSUMER_3 args_instance[SER_PORT] = PORT_CONSUMER_3 args_instance[SER_SERVERID_PROP] = SERVERID_CONSUMER_3 args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX args_consumer = args_instance.copy() consumer3.allocate(args_consumer) instance_consumer3 = consumer3.exists() if instance_consumer3: consumer3.delete() consumer3.create() consumer3.open() consumer3.replica.enableReplication(suffix=SUFFIX, role=ReplicaRole.CONSUMER, replicaId=CONSUMER_REPLICAID) # Creating consumer 4... if DEBUGGING: consumer4 = DirSrv(verbose=True) else: consumer4 = DirSrv(verbose=False) args_instance[SER_HOST] = HOST_CONSUMER_4 args_instance[SER_PORT] = PORT_CONSUMER_4 args_instance[SER_SERVERID_PROP] = SERVERID_CONSUMER_4 args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX args_consumer = args_instance.copy() consumer4.allocate(args_consumer) instance_consumer4 = consumer4.exists() if instance_consumer4: consumer4.delete() consumer4.create() consumer4.open() consumer4.replica.enableReplication(suffix=SUFFIX, role=ReplicaRole.CONSUMER, replicaId=CONSUMER_REPLICAID) # # Create all the agreements # # Creating agreement from supplier 1 to supplier 2 properties = {RA_NAME: 'meTo_' + supplier2.host + ':' + str(supplier2.port), RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} m1_m2_agmt = supplier1.agreement.create(suffix=SUFFIX, host=supplier2.host, port=supplier2.port, properties=properties) if not m1_m2_agmt: log.fatal("Fail to create a supplier -> supplier replica agreement") sys.exit(1) log.debug("%s created" % m1_m2_agmt) # Creating agreement from supplier 1 to supplier 3 properties = {RA_NAME: 'meTo_' + supplier3.host + ':' + str(supplier3.port), RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} m1_m3_agmt = supplier1.agreement.create(suffix=SUFFIX, host=supplier3.host, port=supplier3.port, properties=properties) if not m1_m3_agmt: log.fatal("Fail to create a supplier -> supplier replica agreement") sys.exit(1) log.debug("%s created" % m1_m3_agmt) # Creating agreement from supplier 1 to supplier 4 properties = {RA_NAME: 'meTo_' + supplier4.host + ':' + str(supplier4.port), RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} m1_m4_agmt = supplier1.agreement.create(suffix=SUFFIX, host=supplier4.host, port=supplier4.port, properties=properties) if not m1_m4_agmt: log.fatal("Fail to create a supplier -> supplier replica agreement") sys.exit(1) log.debug("%s created" % m1_m4_agmt) # Creating agreement from supplier 1 to hub 1 properties = {RA_NAME: 'meTo_' + hub1.host + ':' + str(hub1.port), RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} m1_h1_agmt = supplier1.agreement.create(suffix=SUFFIX, host=hub1.host, port=hub1.port, properties=properties) if not m1_h1_agmt: log.fatal("Fail to create a supplier -> hub replica agreement") sys.exit(1) log.debug("%s created" % m1_h1_agmt) # Creating agreement from supplier 1 to hub 2 properties = {RA_NAME: 'meTo_' + hub2.host + ':' + str(hub2.port), RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} m1_h2_agmt = supplier1.agreement.create(suffix=SUFFIX, host=hub2.host, port=hub2.port, properties=properties) if not m1_h2_agmt: log.fatal("Fail to create a supplier -> hub replica agreement") sys.exit(1) log.debug("%s created" % m1_h2_agmt) # Creating agreement from supplier 2 to supplier 1 properties = {RA_NAME: 'meTo_' + supplier1.host + ':' + str(supplier1.port), RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} m2_m1_agmt = supplier2.agreement.create(suffix=SUFFIX, host=supplier1.host, port=supplier1.port, properties=properties) if not m2_m1_agmt: log.fatal("Fail to create a supplier -> supplier replica agreement") sys.exit(1) log.debug("%s created" % m2_m1_agmt) # Creating agreement from supplier 2 to supplier 3 properties = {RA_NAME: 'meTo_' + supplier3.host + ':' + str(supplier3.port), RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} m2_m3_agmt = supplier2.agreement.create(suffix=SUFFIX, host=supplier3.host, port=supplier3.port, properties=properties) if not m2_m3_agmt: log.fatal("Fail to create a supplier -> supplier replica agreement") sys.exit(1) log.debug("%s created" % m2_m3_agmt) # Creating agreement from supplier 2 to supplier 4 properties = {RA_NAME: 'meTo_' + supplier4.host + ':' + str(supplier4.port), RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} m2_m4_agmt = supplier2.agreement.create(suffix=SUFFIX, host=supplier4.host, port=supplier4.port, properties=properties) if not m2_m4_agmt: log.fatal("Fail to create a supplier -> supplier replica agreement") sys.exit(1) log.debug("%s created" % m2_m4_agmt) # Creating agreement from supplier 2 to hub 1 properties = {RA_NAME: 'meTo_' + hub1.host + ':' + str(hub1.port), RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} m2_h1_agmt = supplier2.agreement.create(suffix=SUFFIX, host=hub1.host, port=hub1.port, properties=properties) if not m2_h1_agmt: log.fatal("Fail to create a supplier -> hub replica agreement") sys.exit(1) log.debug("%s created" % m2_h1_agmt) # Creating agreement from supplier 2 to hub 2 properties = {RA_NAME: 'meTo_' + hub2.host + ':' + str(hub2.port), RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} m2_h2_agmt = supplier2.agreement.create(suffix=SUFFIX, host=hub2.host, port=hub2.port, properties=properties) if not m2_h2_agmt: log.fatal("Fail to create a supplier -> hub replica agreement") sys.exit(1) log.debug("%s created" % m2_h2_agmt) # Creating agreement from supplier 3 to supplier 1 properties = {RA_NAME: 'meTo_' + supplier1.host + ':' + str(supplier1.port), RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} m3_m1_agmt = supplier3.agreement.create(suffix=SUFFIX, host=supplier1.host, port=supplier1.port, properties=properties) if not m3_m1_agmt: log.fatal("Fail to create a supplier -> supplier replica agreement") sys.exit(1) log.debug("%s created" % m3_m1_agmt) # Creating agreement from supplier 3 to supplier 2 properties = {RA_NAME: 'meTo_' + supplier2.host + ':' + str(supplier2.port), RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} m3_m2_agmt = supplier3.agreement.create(suffix=SUFFIX, host=supplier2.host, port=supplier2.port, properties=properties) if not m3_m2_agmt: log.fatal("Fail to create a supplier -> supplier replica agreement") sys.exit(1) log.debug("%s created" % m3_m2_agmt) # Creating agreement from supplier 3 to supplier 4 properties = {RA_NAME: 'meTo_' + supplier4.host + ':' + str(supplier4.port), RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} m3_m4_agmt = supplier3.agreement.create(suffix=SUFFIX, host=supplier4.host, port=supplier4.port, properties=properties) if not m3_m4_agmt: log.fatal("Fail to create a supplier -> supplier replica agreement") sys.exit(1) log.debug("%s created" % m3_m4_agmt) # Creating agreement from supplier 3 to hub 1 properties = {RA_NAME: 'meTo_' + hub1.host + ':' + str(hub1.port), RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} m3_h1_agmt = supplier3.agreement.create(suffix=SUFFIX, host=hub1.host, port=hub1.port, properties=properties) if not m3_h1_agmt: log.fatal("Fail to create a supplier -> hub replica agreement") sys.exit(1) log.debug("%s created" % m3_h1_agmt) # Creating agreement from supplier 3 to hub 2 properties = {RA_NAME: 'meTo_' + hub2.host + ':' + str(hub2.port), RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} m3_h2_agmt = supplier3.agreement.create(suffix=SUFFIX, host=hub2.host, port=hub2.port, properties=properties) if not m3_h2_agmt: log.fatal("Fail to create a supplier -> hub replica agreement") sys.exit(1) log.debug("%s created" % m3_h2_agmt) # Creating agreement from supplier 4 to supplier 1 properties = {RA_NAME: 'meTo_' + supplier1.host + ':' + str(supplier1.port), RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} m4_m1_agmt = supplier4.agreement.create(suffix=SUFFIX, host=supplier1.host, port=supplier1.port, properties=properties) if not m4_m1_agmt: log.fatal("Fail to create a supplier -> supplier replica agreement") sys.exit(1) log.debug("%s created" % m4_m1_agmt) # Creating agreement from supplier 4 to supplier 2 properties = {RA_NAME: 'meTo_' + supplier2.host + ':' + str(supplier2.port), RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} m4_m2_agmt = supplier4.agreement.create(suffix=SUFFIX, host=supplier2.host, port=supplier2.port, properties=properties) if not m4_m2_agmt: log.fatal("Fail to create a supplier -> supplier replica agreement") sys.exit(1) log.debug("%s created" % m4_m2_agmt) # Creating agreement from supplier 4 to supplier 3 properties = {RA_NAME: 'meTo_' + supplier3.host + ':' + str(supplier3.port), RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} m4_m3_agmt = supplier4.agreement.create(suffix=SUFFIX, host=supplier3.host, port=supplier3.port, properties=properties) if not m4_m3_agmt: log.fatal("Fail to create a supplier -> supplier replica agreement") sys.exit(1) log.debug("%s created" % m4_m3_agmt) # Creating agreement from supplier 4 to hub 1 properties = {RA_NAME: 'meTo_' + hub1.host + ':' + str(hub1.port), RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} m4_h1_agmt = supplier4.agreement.create(suffix=SUFFIX, host=hub1.host, port=hub1.port, properties=properties) if not m4_h1_agmt: log.fatal("Fail to create a supplier -> hub replica agreement") sys.exit(1) log.debug("%s created" % m4_h1_agmt) # Creating agreement from supplier 4 to hub 2 properties = {RA_NAME: 'meTo_' + hub2.host + ':' + str(hub2.port), RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} m4_h2_agmt = supplier4.agreement.create(suffix=SUFFIX, host=hub2.host, port=hub2.port, properties=properties) if not m4_h2_agmt: log.fatal("Fail to create a supplier -> hub replica agreement") sys.exit(1) log.debug("%s created" % m4_h2_agmt) # Creating agreement from hub 1 to consumer 1 properties = {RA_NAME: 'me2_' + consumer1.host + ':' + str(consumer1.port), RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} h1_c1_agmt = hub1.agreement.create(suffix=SUFFIX, host=consumer1.host, port=consumer1.port, properties=properties) if not h1_c1_agmt: log.fatal("Fail to create a hub -> consumer replica agreement") sys.exit(1) log.debug("%s created" % h1_c1_agmt) # Creating agreement from hub 1 to consumer 2 properties = {RA_NAME: 'me2_' + consumer2.host + ':' + str(consumer2.port), RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} h1_c2_agmt = hub1.agreement.create(suffix=SUFFIX, host=consumer2.host, port=consumer2.port, properties=properties) if not h1_c2_agmt: log.fatal("Fail to create a hub -> consumer replica agreement") sys.exit(1) log.debug("%s created" % h1_c2_agmt) # Creating agreement from hub 1 to consumer 3 properties = {RA_NAME: 'me2_' + consumer3.host + ':' + str(consumer3.port), RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} h1_c3_agmt = hub1.agreement.create(suffix=SUFFIX, host=consumer3.host, port=consumer3.port, properties=properties) if not h1_c3_agmt: log.fatal("Fail to create a hub -> consumer replica agreement") sys.exit(1) log.debug("%s created" % h1_c3_agmt) # Creating agreement from hub 1 to consumer 4 properties = {RA_NAME: 'me2_' + consumer4.host + ':' + str(consumer4.port), RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} h1_c4_agmt = hub1.agreement.create(suffix=SUFFIX, host=consumer4.host, port=consumer4.port, properties=properties) if not h1_c4_agmt: log.fatal("Fail to create a hub -> consumer replica agreement") sys.exit(1) log.debug("%s created" % h1_c4_agmt) # Creating agreement from hub 2 to consumer 1 properties = {RA_NAME: 'me2_' + consumer1.host + ':' + str(consumer1.port), RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} h2_c1_agmt = hub2.agreement.create(suffix=SUFFIX, host=consumer1.host, port=consumer1.port, properties=properties) if not h2_c1_agmt: log.fatal("Fail to create a hub -> consumer replica agreement") sys.exit(1) log.debug("%s created" % h2_c1_agmt) # Creating agreement from hub 2 to consumer 2 properties = {RA_NAME: 'me2_' + consumer2.host + ':' + str(consumer2.port), RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} h2_c2_agmt = hub2.agreement.create(suffix=SUFFIX, host=consumer2.host, port=consumer2.port, properties=properties) if not h2_c2_agmt: log.fatal("Fail to create a hub -> consumer replica agreement") sys.exit(1) log.debug("%s created" % h2_c2_agmt) # Creating agreement from hub 2 to consumer 3 properties = {RA_NAME: 'me2_' + consumer3.host + ':' + str(consumer3.port), RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} h2_c3_agmt = hub2.agreement.create(suffix=SUFFIX, host=consumer3.host, port=consumer3.port, properties=properties) if not h2_c3_agmt: log.fatal("Fail to create a hub -> consumer replica agreement") sys.exit(1) log.debug("%s created" % h2_c3_agmt) # Creating agreement from hub 2 to consumer 4 properties = {RA_NAME: 'me2_' + consumer4.host + ':' + str(consumer4.port), RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} h2_c4_agmt = hub2.agreement.create(suffix=SUFFIX, host=consumer4.host, port=consumer4.port, properties=properties) if not h2_c4_agmt: log.fatal("Fail to create a hub -> consumer replica agreement") sys.exit(1) log.debug("%s created" % h2_c4_agmt) # Allow the replicas to get situated with the new agreements... time.sleep(5) # # Initialize all the agreements # supplier1.agreement.init(SUFFIX, HOST_SUPPLIER_2, PORT_SUPPLIER_2) supplier1.waitForReplInit(m1_m2_agmt) supplier1.agreement.init(SUFFIX, HOST_SUPPLIER_3, PORT_SUPPLIER_3) supplier1.waitForReplInit(m1_m3_agmt) supplier1.agreement.init(SUFFIX, HOST_SUPPLIER_4, PORT_SUPPLIER_4) supplier1.waitForReplInit(m1_m4_agmt) supplier1.agreement.init(SUFFIX, HOST_HUB_1, PORT_HUB_1) supplier1.waitForReplInit(m1_h1_agmt) hub1.agreement.init(SUFFIX, HOST_CONSUMER_1, PORT_CONSUMER_1) hub1.waitForReplInit(h1_c1_agmt) hub1.agreement.init(SUFFIX, HOST_CONSUMER_2, PORT_CONSUMER_2) hub1.waitForReplInit(h1_c2_agmt) hub1.agreement.init(SUFFIX, HOST_CONSUMER_3, PORT_CONSUMER_3) hub1.waitForReplInit(h1_c3_agmt) hub1.agreement.init(SUFFIX, HOST_CONSUMER_4, PORT_CONSUMER_4) hub1.waitForReplInit(h1_c4_agmt) supplier1.agreement.init(SUFFIX, HOST_HUB_2, PORT_HUB_2) supplier1.waitForReplInit(m1_h2_agmt) # Check replication is working... if supplier1.testReplication(DEFAULT_SUFFIX, consumer1): log.info('Replication is working.') else: log.fatal('Replication is not working.') assert False def fin(): """If we are debugging just stop the instances, otherwise remove them """ if DEBUGGING: supplier1.stop() supplier2.stop() supplier3.stop() supplier4.stop() hub1.stop() hub2.stop() consumer1.stop() consumer2.stop() consumer3.stop() consumer4.stop() else: supplier1.delete() supplier2.delete() supplier3.delete() supplier4.delete() hub1.delete() hub2.delete() consumer1.delete() consumer2.delete() consumer3.delete() consumer4.delete() request.addfinalizer(fin) return TopologyReplication(supplier1, supplier2, supplier3, supplier4, hub1, hub2, consumer1, consumer2, consumer3, consumer4) class AddDelUsers(threading.Thread): """Add's and delets 50000 entries""" def __init__(self, inst): """ Initialize the thread """ threading.Thread.__init__(self) self.daemon = True self.inst = inst self.name = inst.serverid def run(self): """ Start adding users """ idx = 0 log.info('AddDelUsers (%s) Adding and deleting %d entries...' % (self.name, ADD_DEL_COUNT)) while idx < ADD_DEL_COUNT: RDN_VAL = ('uid=%s-%d' % (self.name, idx)) USER_DN = ('%s,%s' % (RDN_VAL, DEFAULT_SUFFIX)) try: self.inst.add_s(Entry((USER_DN, {'objectclass': 'top extensibleObject'.split(), 'uid': RDN_VAL}))) except ldap.LDAPError as e: log.fatal('AddDelUsers (%s): failed to add (%s) error: %s' % (self.name, USER_DN, str(e))) assert False try: self.inst.delete_s(USER_DN) except ldap.LDAPError as e: log.fatal('AddDelUsers (%s): failed to delete (%s) error: %s' % (self.name, USER_DN, str(e))) assert False idx += 1 log.info('AddDelUsers (%s) - Finished at: %s' % (self.name, getDateTime())) def measureConvergence(topology): """Find and measure the convergence of entries from each supplier """ replicas = [topology.supplier1, topology.supplier2, topology.supplier3, topology.supplier4, topology.hub1, topology.hub2, topology.consumer1, topology.consumer2, topology.consumer3, topology.consumer4] if ADD_DEL_COUNT > 10: interval = int(ADD_DEL_COUNT / 10) else: interval = 1 for supplier in [('1', topology.supplier1), ('2', topology.supplier2), ('3', topology.supplier3), ('4', topology.supplier4)]: # Start with the first entry entries = ['ADD dn="uid=supplier_%s-0,%s' % (supplier[0], DEFAULT_SUFFIX)] # Add incremental entries to the list idx = interval while idx < ADD_DEL_COUNT: entries.append('ADD dn="uid=supplier_%s-%d,%s' % (supplier[0], idx, DEFAULT_SUFFIX)) idx += interval # Add the last entry to the list (if it was not already added) if idx != (ADD_DEL_COUNT - 1): entries.append('ADD dn="uid=supplier_%s-%d,%s' % (supplier[0], (ADD_DEL_COUNT - 1), DEFAULT_SUFFIX)) ReplTools.replConvReport(DEFAULT_SUFFIX, entries, supplier[1], replicas) def test_MMR_Integrity(topology): """Apply load to 4 suppliers at the same time. Perform adds and deletes. If any updates are missed we will see an error 32 in the access logs or we will have entries left over once the test completes. """ loop = 0 ALL_REPLICAS = [topology.supplier1, topology.supplier2, topology.supplier3, topology.supplier4, topology.hub1, topology.hub2, topology.consumer1, topology.consumer2, topology.consumer3, topology.consumer4] if TEST_CONVERGE_LATENCY: try: for inst in ALL_REPLICAS: replica = inst.replicas.get(DEFAULT_SUFFIX) replica.set('nsds5ReplicaReleaseTimeout', CONVERGENCE_TIMEOUT) except ldap.LDAPError as e: log.fatal('Failed to set replicas release timeout - error: %s' % (str(e))) assert False if DEBUGGING: # Enable Repl logging, and increase the max logs try: for inst in ALL_REPLICAS: inst.enableReplLogging() inst.modify_s("cn=config", [(ldap.MOD_REPLACE, 'nsslapd-errorlog-maxlogsperdir', '5')]) except ldap.LDAPError as e: log.fatal('Failed to set max logs - error: %s' % (str(e))) assert False while loop < MAX_LOOPS: # Remove the current logs so we have a clean set of logs to check. log.info('Pass %d...' % (loop + 1)) log.info("Removing logs...") for inst in ALL_REPLICAS: inst.deleteAllLogs() # Fire off 4 threads to apply the load log.info("Start adding/deleting: " + getDateTime()) startTime = time.time() add_del_m1 = AddDelUsers(topology.supplier1) add_del_m1.start() add_del_m2 = AddDelUsers(topology.supplier2) add_del_m2.start() add_del_m3 = AddDelUsers(topology.supplier3) add_del_m3.start() add_del_m4 = AddDelUsers(topology.supplier4) add_del_m4.start() # Wait for threads to finish sending their updates add_del_m1.join() add_del_m2.join() add_del_m3.join() add_del_m4.join() log.info("Finished adding/deleting entries: " + getDateTime()) # # Loop checking for error 32's, and for convergence to complete # log.info("Waiting for replication to converge...") while True: # First check for error 32's for inst in ALL_REPLICAS: if inst.searchAccessLog(" err=32 "): log.fatal('An add was missed on: ' + inst.serverid) assert False # Next check to see if the last update is in the access log converged = True for inst in ALL_REPLICAS: if not inst.searchAccessLog(LAST_DN_M1) or \ not inst.searchAccessLog(LAST_DN_M2) or \ not inst.searchAccessLog(LAST_DN_M3) or \ not inst.searchAccessLog(LAST_DN_M4): converged = False break if converged: elapsed_tm = int(time.time() - startTime) convtime = str(datetime.timedelta(seconds=elapsed_tm)) log.info('Replication converged at: ' + getDateTime() + ' - Elapsed Time: ' + convtime) break else: # Check if replication is idle replicas = [topology.supplier1, topology.supplier2, topology.supplier3, topology.supplier4, topology.hub1, topology.hub2] if ReplTools.replIdle(replicas, DEFAULT_SUFFIX): # Replication is idle - wait 30 secs for access log buffer time.sleep(30) # Now check the access log again... converged = True for inst in ALL_REPLICAS: if not inst.searchAccessLog(LAST_DN_M1) or \ not inst.searchAccessLog(LAST_DN_M2) or \ not inst.searchAccessLog(LAST_DN_M3) or \ not inst.searchAccessLog(LAST_DN_M4): converged = False break if converged: elapsed_tm = int(time.time() - startTime) convtime = str(datetime.timedelta(seconds=elapsed_tm)) log.info('Replication converged at: ' + getDateTime() + ' - Elapsed Time: ' + convtime) break else: log.fatal('Stopping replication check: ' + getDateTime()) log.fatal('Failure: Replication is complete, but we ' + 'never converged.') assert False # Sleep a bit before the next pass time.sleep(3) # # Finally check the CSN's # log.info("Check the CSN's...") if not ReplTools.checkCSNs(ALL_REPLICAS): assert False log.info("All CSN's present and accounted for.") # # Print the convergence report # log.info('Measuring convergence...') measureConvergence(topology) # # Test complete # log.info('No lingering entries.') log.info('Pass %d complete.' % (loop + 1)) elapsed_tm = int(time.time() - TEST_START) convtime = str(datetime.timedelta(seconds=elapsed_tm)) log.info('Entire test ran for: ' + convtime) loop += 1 log.info('Test PASSED') if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/stress/replication/mmr_01_4m_test.py000066400000000000000000000562171421664411400300070ustar00rootroot00000000000000import os import sys import time import datetime import ldap import logging import pytest import threading from lib389._constants import * from lib389.properties import * from lib389.tasks import * from lib389.utils import * from lib389.repltools import ReplTools pytestmark = pytest.mark.tier3 logging.getLogger(__name__).setLevel(logging.DEBUG) log = logging.getLogger(__name__) DEBUGGING = False ADD_DEL_COUNT = 50000 MAX_LOOPS = 2 TEST_CONVERGE_LATENCY = True CONVERGENCE_TIMEOUT = '60' supplier_list = [] hub_list = [] con_list = [] TEST_START = time.time() LAST_DN_IDX = ADD_DEL_COUNT - 1 LAST_DN_M1 = 'DEL dn="uid=supplier_1-%d,%s' % (LAST_DN_IDX, DEFAULT_SUFFIX) LAST_DN_M2 = 'DEL dn="uid=supplier_2-%d,%s' % (LAST_DN_IDX, DEFAULT_SUFFIX) LAST_DN_M3 = 'DEL dn="uid=supplier_3-%d,%s' % (LAST_DN_IDX, DEFAULT_SUFFIX) LAST_DN_M4 = 'DEL dn="uid=supplier_4-%d,%s' % (LAST_DN_IDX, DEFAULT_SUFFIX) class TopologyReplication(object): """The Replication Topology Class""" def __init__(self, supplier1, supplier2, supplier3, supplier4): """Init""" supplier1.open() self.supplier1 = supplier1 supplier2.open() self.supplier2 = supplier2 supplier3.open() self.supplier3 = supplier3 supplier4.open() self.supplier4 = supplier4 @pytest.fixture(scope="module") def topology(request): """Create Replication Deployment""" # Creating supplier 1... if DEBUGGING: supplier1 = DirSrv(verbose=True) else: supplier1 = DirSrv(verbose=False) args_instance[SER_HOST] = HOST_SUPPLIER_1 args_instance[SER_PORT] = PORT_SUPPLIER_1 args_instance[SER_SERVERID_PROP] = SERVERID_SUPPLIER_1 args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX args_supplier = args_instance.copy() supplier1.allocate(args_supplier) instance_supplier1 = supplier1.exists() if instance_supplier1: supplier1.delete() supplier1.create() supplier1.open() supplier1.replica.enableReplication(suffix=SUFFIX, role=ReplicaRole.SUPPLIER, replicaId=REPLICAID_SUPPLIER_1) # Creating supplier 2... if DEBUGGING: supplier2 = DirSrv(verbose=True) else: supplier2 = DirSrv(verbose=False) args_instance[SER_HOST] = HOST_SUPPLIER_2 args_instance[SER_PORT] = PORT_SUPPLIER_2 args_instance[SER_SERVERID_PROP] = SERVERID_SUPPLIER_2 args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX args_supplier = args_instance.copy() supplier2.allocate(args_supplier) instance_supplier2 = supplier2.exists() if instance_supplier2: supplier2.delete() supplier2.create() supplier2.open() supplier2.replica.enableReplication(suffix=SUFFIX, role=ReplicaRole.SUPPLIER, replicaId=REPLICAID_SUPPLIER_2) # Creating supplier 3... if DEBUGGING: supplier3 = DirSrv(verbose=True) else: supplier3 = DirSrv(verbose=False) args_instance[SER_HOST] = HOST_SUPPLIER_3 args_instance[SER_PORT] = PORT_SUPPLIER_3 args_instance[SER_SERVERID_PROP] = SERVERID_SUPPLIER_3 args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX args_supplier = args_instance.copy() supplier3.allocate(args_supplier) instance_supplier3 = supplier3.exists() if instance_supplier3: supplier3.delete() supplier3.create() supplier3.open() supplier3.replica.enableReplication(suffix=SUFFIX, role=ReplicaRole.SUPPLIER, replicaId=REPLICAID_SUPPLIER_3) # Creating supplier 4... if DEBUGGING: supplier4 = DirSrv(verbose=True) else: supplier4 = DirSrv(verbose=False) args_instance[SER_HOST] = HOST_SUPPLIER_4 args_instance[SER_PORT] = PORT_SUPPLIER_4 args_instance[SER_SERVERID_PROP] = SERVERID_SUPPLIER_4 args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX args_supplier = args_instance.copy() supplier4.allocate(args_supplier) instance_supplier4 = supplier4.exists() if instance_supplier4: supplier4.delete() supplier4.create() supplier4.open() supplier4.replica.enableReplication(suffix=SUFFIX, role=ReplicaRole.SUPPLIER, replicaId=REPLICAID_SUPPLIER_4) # # Create all the agreements # # Creating agreement from supplier 1 to supplier 2 properties = {RA_NAME: 'meTo_' + supplier2.host + ':' + str(supplier2.port), RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} m1_m2_agmt = supplier1.agreement.create(suffix=SUFFIX, host=supplier2.host, port=supplier2.port, properties=properties) if not m1_m2_agmt: log.fatal("Fail to create a supplier -> supplier replica agreement") sys.exit(1) log.debug("%s created" % m1_m2_agmt) # Creating agreement from supplier 1 to supplier 3 properties = {RA_NAME: 'meTo_' + supplier3.host + ':' + str(supplier3.port), RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} m1_m3_agmt = supplier1.agreement.create(suffix=SUFFIX, host=supplier3.host, port=supplier3.port, properties=properties) if not m1_m3_agmt: log.fatal("Fail to create a supplier -> supplier replica agreement") sys.exit(1) log.debug("%s created" % m1_m3_agmt) # Creating agreement from supplier 1 to supplier 4 properties = {RA_NAME: 'meTo_' + supplier4.host + ':' + str(supplier4.port), RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} m1_m4_agmt = supplier1.agreement.create(suffix=SUFFIX, host=supplier4.host, port=supplier4.port, properties=properties) if not m1_m4_agmt: log.fatal("Fail to create a supplier -> supplier replica agreement") sys.exit(1) log.debug("%s created" % m1_m4_agmt) # Creating agreement from supplier 2 to supplier 1 properties = {RA_NAME: 'meTo_' + supplier1.host + ':' + str(supplier1.port), RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} m2_m1_agmt = supplier2.agreement.create(suffix=SUFFIX, host=supplier1.host, port=supplier1.port, properties=properties) if not m2_m1_agmt: log.fatal("Fail to create a supplier -> supplier replica agreement") sys.exit(1) log.debug("%s created" % m2_m1_agmt) # Creating agreement from supplier 2 to supplier 3 properties = {RA_NAME: 'meTo_' + supplier3.host + ':' + str(supplier3.port), RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} m2_m3_agmt = supplier2.agreement.create(suffix=SUFFIX, host=supplier3.host, port=supplier3.port, properties=properties) if not m2_m3_agmt: log.fatal("Fail to create a supplier -> supplier replica agreement") sys.exit(1) log.debug("%s created" % m2_m3_agmt) # Creating agreement from supplier 2 to supplier 4 properties = {RA_NAME: 'meTo_' + supplier4.host + ':' + str(supplier4.port), RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} m2_m4_agmt = supplier2.agreement.create(suffix=SUFFIX, host=supplier4.host, port=supplier4.port, properties=properties) if not m2_m4_agmt: log.fatal("Fail to create a supplier -> supplier replica agreement") sys.exit(1) log.debug("%s created" % m2_m4_agmt) # Creating agreement from supplier 3 to supplier 1 properties = {RA_NAME: 'meTo_' + supplier1.host + ':' + str(supplier1.port), RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} m3_m1_agmt = supplier3.agreement.create(suffix=SUFFIX, host=supplier1.host, port=supplier1.port, properties=properties) if not m3_m1_agmt: log.fatal("Fail to create a supplier -> supplier replica agreement") sys.exit(1) log.debug("%s created" % m3_m1_agmt) # Creating agreement from supplier 3 to supplier 2 properties = {RA_NAME: 'meTo_' + supplier2.host + ':' + str(supplier2.port), RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} m3_m2_agmt = supplier3.agreement.create(suffix=SUFFIX, host=supplier2.host, port=supplier2.port, properties=properties) if not m3_m2_agmt: log.fatal("Fail to create a supplier -> supplier replica agreement") sys.exit(1) log.debug("%s created" % m3_m2_agmt) # Creating agreement from supplier 3 to supplier 4 properties = {RA_NAME: 'meTo_' + supplier4.host + ':' + str(supplier4.port), RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} m3_m4_agmt = supplier3.agreement.create(suffix=SUFFIX, host=supplier4.host, port=supplier4.port, properties=properties) if not m3_m4_agmt: log.fatal("Fail to create a supplier -> supplier replica agreement") sys.exit(1) log.debug("%s created" % m3_m4_agmt) # Creating agreement from supplier 4 to supplier 1 properties = {RA_NAME: 'meTo_' + supplier1.host + ':' + str(supplier1.port), RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} m4_m1_agmt = supplier4.agreement.create(suffix=SUFFIX, host=supplier1.host, port=supplier1.port, properties=properties) if not m4_m1_agmt: log.fatal("Fail to create a supplier -> supplier replica agreement") sys.exit(1) log.debug("%s created" % m4_m1_agmt) # Creating agreement from supplier 4 to supplier 2 properties = {RA_NAME: 'meTo_' + supplier2.host + ':' + str(supplier2.port), RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} m4_m2_agmt = supplier4.agreement.create(suffix=SUFFIX, host=supplier2.host, port=supplier2.port, properties=properties) if not m4_m2_agmt: log.fatal("Fail to create a supplier -> supplier replica agreement") sys.exit(1) log.debug("%s created" % m4_m2_agmt) # Creating agreement from supplier 4 to supplier 3 properties = {RA_NAME: 'meTo_' + supplier3.host + ':' + str(supplier3.port), RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} m4_m3_agmt = supplier4.agreement.create(suffix=SUFFIX, host=supplier3.host, port=supplier3.port, properties=properties) if not m4_m3_agmt: log.fatal("Fail to create a supplier -> supplier replica agreement") sys.exit(1) log.debug("%s created" % m4_m3_agmt) # Allow the replicas to get situated with the new agreements... time.sleep(5) # # Initialize all the agreements # supplier1.agreement.init(SUFFIX, HOST_SUPPLIER_2, PORT_SUPPLIER_2) supplier1.waitForReplInit(m1_m2_agmt) supplier1.agreement.init(SUFFIX, HOST_SUPPLIER_3, PORT_SUPPLIER_3) supplier1.waitForReplInit(m1_m3_agmt) supplier1.agreement.init(SUFFIX, HOST_SUPPLIER_4, PORT_SUPPLIER_4) supplier1.waitForReplInit(m1_m4_agmt) # Check replication is working... if supplier1.testReplication(DEFAULT_SUFFIX, supplier4): log.info('Replication is working.') else: log.fatal('Replication is not working.') assert False def fin(): """If we are debugging just stop the instances, otherwise remove them """ if 1 or DEBUGGING: supplier1.stop() supplier2.stop() supplier3.stop() supplier4.stop() else: supplier1.delete() supplier2.delete() supplier3.delete() supplier4.delete() request.addfinalizer(fin) return TopologyReplication(supplier1, supplier2, supplier3, supplier4) class AddDelUsers(threading.Thread): """Add's and delets 50000 entries""" def __init__(self, inst): """ Initialize the thread """ threading.Thread.__init__(self) self.daemon = True self.inst = inst self.name = inst.serverid def run(self): """ Start adding users """ idx = 0 log.info('AddDelUsers (%s) Adding and deleting %d entries...' % (self.name, ADD_DEL_COUNT)) while idx < ADD_DEL_COUNT: RDN_VAL = ('uid=%s-%d' % (self.name, idx)) USER_DN = ('%s,%s' % (RDN_VAL, DEFAULT_SUFFIX)) try: self.inst.add_s(Entry((USER_DN, {'objectclass': 'top extensibleObject'.split(), 'uid': RDN_VAL}))) except ldap.LDAPError as e: log.fatal('AddDelUsers (%s): failed to add (%s) error: %s' % (self.name, USER_DN, str(e))) assert False try: self.inst.delete_s(USER_DN) except ldap.LDAPError as e: log.fatal('AddDelUsers (%s): failed to delete (%s) error: %s' % (self.name, USER_DN, str(e))) assert False idx += 1 log.info('AddDelUsers (%s) - Finished at: %s' % (self.name, getDateTime())) def measureConvergence(topology): """Find and measure the convergence of entries from each supplier """ replicas = [topology.supplier1, topology.supplier2, topology.supplier3, topology.supplier4] if ADD_DEL_COUNT > 10: interval = int(ADD_DEL_COUNT / 10) else: interval = 1 for supplier in [('1', topology.supplier1), ('2', topology.supplier2), ('3', topology.supplier3), ('4', topology.supplier4)]: # Start with the first entry entries = ['ADD dn="uid=supplier_%s-0,%s' % (supplier[0], DEFAULT_SUFFIX)] # Add incremental entries to the list idx = interval while idx < ADD_DEL_COUNT: entries.append('ADD dn="uid=supplier_%s-%d,%s' % (supplier[0], idx, DEFAULT_SUFFIX)) idx += interval # Add the last entry to the list (if it was not already added) if idx != (ADD_DEL_COUNT - 1): entries.append('ADD dn="uid=supplier_%s-%d,%s' % (supplier[0], (ADD_DEL_COUNT - 1), DEFAULT_SUFFIX)) ReplTools.replConvReport(DEFAULT_SUFFIX, entries, supplier[1], replicas) def test_MMR_Integrity(topology): """Apply load to 4 suppliers at the same time. Perform adds and deletes. If any updates are missed we will see an error 32 in the access logs or we will have entries left over once the test completes. """ loop = 0 ALL_REPLICAS = [topology.supplier1, topology.supplier2, topology.supplier3, topology.supplier4] if TEST_CONVERGE_LATENCY: try: for inst in ALL_REPLICAS: replica = inst.replicas.get(DEFAULT_SUFFIX) replica.set('nsds5ReplicaReleaseTimeout', CONVERGENCE_TIMEOUT) except ldap.LDAPError as e: log.fatal('Failed to set replicas release timeout - error: %s' % (str(e))) assert False if DEBUGGING: # Enable Repl logging, and increase the max logs try: for inst in ALL_REPLICAS: inst.enableReplLogging() inst.modify_s("cn=config", [(ldap.MOD_REPLACE, 'nsslapd-errorlog-maxlogsperdir', '5')]) except ldap.LDAPError as e: log.fatal('Failed to set max logs - error: %s' % (str(e))) assert False while loop < MAX_LOOPS: # Remove the current logs so we have a clean set of logs to check. log.info('Pass %d...' % (loop + 1)) log.info("Removing logs...") for inst in ALL_REPLICAS: inst.deleteAllLogs() # Fire off 4 threads to apply the load log.info("Start adding/deleting: " + getDateTime()) startTime = time.time() add_del_m1 = AddDelUsers(topology.supplier1) add_del_m1.start() add_del_m2 = AddDelUsers(topology.supplier2) add_del_m2.start() add_del_m3 = AddDelUsers(topology.supplier3) add_del_m3.start() add_del_m4 = AddDelUsers(topology.supplier4) add_del_m4.start() # Wait for threads to finish sending their updates add_del_m1.join() add_del_m2.join() add_del_m3.join() add_del_m4.join() log.info("Finished adding/deleting entries: " + getDateTime()) # # Loop checking for error 32's, and for convergence to complete # log.info("Waiting for replication to converge...") while True: # First check for error 32's for inst in ALL_REPLICAS: if inst.searchAccessLog(" err=32 "): log.fatal('An add was missed on: ' + inst.serverid) assert False # Next check to see if the last update is in the access log converged = True for inst in ALL_REPLICAS: if not inst.searchAccessLog(LAST_DN_M1) or \ not inst.searchAccessLog(LAST_DN_M2) or \ not inst.searchAccessLog(LAST_DN_M3) or \ not inst.searchAccessLog(LAST_DN_M4): converged = False break if converged: elapsed_tm = int(time.time() - startTime) convtime = str(datetime.timedelta(seconds=elapsed_tm)) log.info('Replication converged at: ' + getDateTime() + ' - Elapsed Time: ' + convtime) break else: # Check if replication is idle replicas = [topology.supplier1, topology.supplier2, topology.supplier3, topology.supplier4] if ReplTools.replIdle(replicas, DEFAULT_SUFFIX): # Replication is idle - wait 30 secs for access log buffer time.sleep(30) # Now check the access log again... converged = True for inst in ALL_REPLICAS: if not inst.searchAccessLog(LAST_DN_M1) or \ not inst.searchAccessLog(LAST_DN_M2) or \ not inst.searchAccessLog(LAST_DN_M3) or \ not inst.searchAccessLog(LAST_DN_M4): converged = False break if converged: elapsed_tm = int(time.time() - startTime) convtime = str(datetime.timedelta(seconds=elapsed_tm)) log.info('Replication converged at: ' + getDateTime() + ' - Elapsed Time: ' + convtime) break else: log.fatal('Stopping replication check: ' + getDateTime()) log.fatal('Failure: Replication is complete, but we ' + 'never converged.') assert False # Sleep a bit before the next pass time.sleep(3) # # Finally check the CSN's # log.info("Check the CSN's...") if not ReplTools.checkCSNs(ALL_REPLICAS): assert False log.info("All CSN's present and accounted for.") # # Print the convergence report # log.info('Measuring convergence...') measureConvergence(topology) # # Test complete # log.info('No lingering entries.') log.info('Pass %d complete.' % (loop + 1)) elapsed_tm = int(time.time() - TEST_START) convtime = str(datetime.timedelta(seconds=elapsed_tm)) log.info('Entire test ran for: ' + convtime) loop += 1 log.info('Test PASSED') if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/stress/search/000077500000000000000000000000001421664411400236245ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/stress/search/__init__.py000066400000000000000000000000001421664411400257230ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/stress/search/simple.py000066400000000000000000000033541421664411400254740ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2019 William Brown # Copyright (C) 2020 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # from lib389.topologies import topology_st from lib389.dbgen import dbgen_users from lib389.ldclt import Ldclt from lib389.tasks import ImportTask from lib389._constants import DEFAULT_SUFFIX def test_stress_search_simple(topology_st): """Test a simple stress test of searches on the directory server. :id: 3786d01c-ea03-4655-a4f9-450693c75863 :setup: Standalone Instance :steps: 1. Create test users 2. Import them 3. Stress test! :expectedresults: 1. Success 2. Success 3. Results are written to /tmp """ inst = topology_st.standalone inst.config.set("nsslapd-verify-filter-schema", "off") # Bump idllimit to test OR worst cases. from lib389.config import LDBMConfig lconfig = LDBMConfig(inst) # lconfig.set("nsslapd-idlistscanlimit", '20000') # lconfig.set("nsslapd-lookthroughlimit", '20000') ldif_dir = inst.get_ldif_dir() import_ldif = ldif_dir + '/basic_import.ldif' dbgen_users(inst, 10000, import_ldif, DEFAULT_SUFFIX) r = ImportTask(inst) r.import_suffix_from_ldif(ldiffile=import_ldif, suffix=DEFAULT_SUFFIX) r.wait() # Run a small to warm up the server's caches ... l = Ldclt(inst) l.search_loadtest(DEFAULT_SUFFIX, "(mail=XXXX@example.com)", rounds=1) # Now do it for realsies! # l.search_loadtest(DEFAULT_SUFFIX, "(|(mail=XXXX@example.com)(nonexist=foo))", rounds=10) l.search_loadtest(DEFAULT_SUFFIX, "(mail=XXXX@example.com)", rounds=10) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/000077500000000000000000000000001421664411400223505ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/__init__.py000066400000000000000000000000001421664411400244470ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/acl/000077500000000000000000000000001421664411400231075ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/acl/__init__.py000066400000000000000000000001101421664411400252100ustar00rootroot00000000000000""" :Requirement: 389-ds-base: Access Control Instructions (ACI) """ 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/acl/aci_excl_filter_test.py000066400000000000000000000135031421664411400276360ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2021 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- import logging import os import pytest from lib389.topologies import topology_st as topo from lib389._mapped_object import DSLdapObject from lib389.idm.organizationalunit import OrganizationalUnit from lib389.idm.user import UserAccounts from lib389._constants import DEFAULT_SUFFIX from lib389.idm.domain import Domain from lib389.idm.account import Accounts pytestmark = pytest.mark.tier1 logging.getLogger(__name__).setLevel(logging.DEBUG) log = logging.getLogger(__name__) @pytest.fixture(scope="function") def add_anon_aci_access(topo, request): # Add anonymous access aci ACI_TARGET = "(targetattr != \"userpassword\")(target = \"ldap:///%s\")" % (DEFAULT_SUFFIX) ACI_ALLOW = "(version 3.0; acl \"Anonymous Read access\"; allow (read,search,compare)" ACI_SUBJECT = "(userdn=\"ldap:///anyone\");)" ANON_ACI = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT suffix = Domain(topo.standalone, DEFAULT_SUFFIX) try: suffix.add('aci', ANON_ACI) except ldap.TYPE_OR_VALUE_EXISTS: pass def fin(): suffix.delete() request.addfinalizer(fin) def add_ou_entry(topo, name, myparent): ou_dn = 'ou={},{}'.format(name, myparent) ou = OrganizationalUnit(topo.standalone, dn=ou_dn) assert ou.create(properties={'ou': name}) log.info('Organisation {} created for ou :{} .'.format(name, ou_dn)) def add_user_entry(topo, user, name, pw, myparent): dn = 'ou=%s,%s' % (name, myparent) properties = { 'uid': name, 'cn': 'admin', 'sn': name, 'uidNumber': '1000', 'gidNumber': '2000', 'homeDirectory': '/home/{}'.format(name), 'telephonenumber': '+1 222 333-4444', 'userpassword': pw, } assert user.create(properties=properties) log.info('User created for dn :{} .'.format(dn)) return user def test_aci_with_exclude_filter(topo, add_anon_aci_access): """Test an ACI(Access control instruction) which contains an extensible filter. :id: 238da674-81d9-11eb-a965-98fa9ba19b65 :setup: Standalone instance :steps: 1. Bind to a new Standalone instance 2. Generate text for the Access Control Instruction(ACI) and add to the standalone instance -Create a test user 'admin' with a marker -> deniedattr = 'telephonenumber' 3. Create 2 top Organizational units (ou) under the same root suffix 4. Create 2 test users for each Organizational unit (ou) above with the same username 'admin' 5. Bind to the Standalone instance as the user 'admin' from the ou created in step 4 above - Search for user(s) ' admin in the subtree that satisfy this criteria: DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, cn_filter, [deniedattr, 'dn'] 6. The search should return 2 entries with the username 'admin' 7. Verify that the users found do not have the --> deniedattr = 'telephonenumber' marker :expectedresults: 1. Bind should be successful 2. Operation to create 2 Orgs (ou) should be successful 3. Operation to create 2 (admin*) users should be successful 4. Operation should be successful. 5. Operation should be successful 6. Should successfully return 2 users that match "admin*" 7. PASS - users found do not have the --> deniedattr = 'telephonenumber' marker """ log.info('Create an OU for them') ous = OrganizationalUnit(topo.standalone, DEFAULT_SUFFIX) log.info('Create an top org users') users = UserAccounts(topo.standalone, DEFAULT_SUFFIX) log.info('Add aci which contains extensible filter.') ouname = 'outest' username = 'admin' passwd = 'Password' deniedattr = 'telephonenumber' log.info('Add aci which contains extensible filter.') aci_text = ('(targetattr = "{}")'.format(deniedattr) + '(target = "ldap:///{}")'.format(DEFAULT_SUFFIX) + '(version 3.0;acl "admin-tel-matching-rule-outest";deny (all)' + '(userdn = "ldap:///{}??sub?(&(cn={})(ou:dn:={}))");)'.format(DEFAULT_SUFFIX, username, ouname)) suffix = Domain(topo.standalone, DEFAULT_SUFFIX) suffix.add('aci', aci_text) log.info('Adding OU entries ...') for idx in range(0, 2): ou0 = 'OU%d' % idx log.info('Adding "ou" : %s under "dn" : %s...' % (ou0, DEFAULT_SUFFIX)) add_ou_entry(topo, ou0, DEFAULT_SUFFIX) parent = 'ou=%s,%s' % (ou0, DEFAULT_SUFFIX) log.info('Adding %s under %s...' % (ouname, parent)) add_ou_entry(topo, ouname, parent) user = UserAccounts(topo.standalone, parent, rdn=None) for idx in range(0, 2): parent = 'ou=%s,ou=OU%d,%s' % (ouname, idx, DEFAULT_SUFFIX) user = UserAccounts(topo.standalone, parent, rdn=None) username = '{}{}'.format(username, idx) log.info('Adding User: %s under %s...' % (username, parent)) user = add_user_entry(topo, user, username, passwd, parent) log.info('Bind as user %s' % username) binddn_user = user.get(username) conn = binddn_user.bind(passwd) if not conn: log.error(" {} failed to authenticate: ".format(binddn_user)) assert False cn_filter = '(cn=%s)' % username entries = Accounts(conn, DEFAULT_SUFFIX).filter('(cn=admin*)') log.info('Verify 2 Entries returned for cn {}'.format(cn_filter)) assert len(entries) == 2 for entry in entries: assert not entry.get_attr_val_utf8('telephonenumber') log.info("Verified the entries do not contain 'telephonenumber' ") log.info('Test complete') if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/acl/acivattr_test.py000066400000000000000000000257731421664411400263530ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2019 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- import pytest, os, ldap from lib389._constants import DEFAULT_SUFFIX, PW_DM from lib389.idm.user import UserAccount from lib389.idm.organization import Organization from lib389.idm.organizationalunit import OrganizationalUnit from lib389.cos import CosTemplate, CosClassicDefinition from lib389.topologies import topology_st as topo from lib389.idm.nscontainer import nsContainer from lib389.idm.domain import Domain from lib389.idm.role import FilteredRoles pytestmark = pytest.mark.tier1 DNBASE = "o=acivattr,{}".format(DEFAULT_SUFFIX) ENG_USER = "cn=enguser1,ou=eng,{}".format(DNBASE) SALES_UESER = "cn=salesuser1,ou=sales,{}".format(DNBASE) ENG_MANAGER = "cn=engmanager1,ou=eng,{}".format(DNBASE) SALES_MANAGER = "cn=salesmanager1,ou=sales,{}".format(DNBASE) SALES_OU = "ou=sales,{}".format(DNBASE) ENG_OU = "ou=eng,{}".format(DNBASE) FILTERROLESALESROLE = "cn=FILTERROLESALESROLE,{}".format(DNBASE) FILTERROLEENGROLE = "cn=FILTERROLEENGROLE,{}".format(DNBASE) @pytest.fixture(scope="function") def aci_of_user(request, topo): aci_list = Domain(topo.standalone, DEFAULT_SUFFIX).get_attr_vals('aci') def finofaci(): domain = Domain(topo.standalone, DEFAULT_SUFFIX) domain.set('aci', None) for i in aci_list: domain.add("aci", i) request.addfinalizer(finofaci) @pytest.fixture(scope="function") def _add_user(request, topo): org = Organization(topo.standalone).create(properties={"o": "acivattr"}, basedn=DEFAULT_SUFFIX) org.add('aci', '(targetattr="*")(targetfilter="(nsrole=*)")(version 3.0; aci "tester"; ' 'allow(all) userdn="ldap:///cn=enguser1,ou=eng,o=acivattr,{}";)'.format(DEFAULT_SUFFIX)) ou = OrganizationalUnit(topo.standalone, "ou=eng,o=acivattr,{}".format(DEFAULT_SUFFIX)) ou.create(properties={'ou': 'eng'}) ou = OrganizationalUnit(topo.standalone, "ou=sales,o=acivattr,{}".format(DEFAULT_SUFFIX)) ou.create(properties={'ou': 'sales'}) roles = FilteredRoles(topo.standalone, DNBASE) roles.create(properties={'cn':'FILTERROLEENGROLE', 'nsRoleFilter':'cn=eng*'}) roles.create(properties={'cn': 'FILTERROLESALESROLE', 'nsRoleFilter': 'cn=sales*'}) nsContainer(topo.standalone, 'cn=cosClassicGenerateEmployeeTypeUsingnsroleTemplates,o=acivattr,{}'.format(DEFAULT_SUFFIX)).create( properties={'cn': 'cosTemplates'}) properties = {'employeeType': 'EngType', 'cn':'"cn=filterRoleEngRole,o=acivattr,dc=example,dc=com",cn=cosClassicGenerateEmployeeTypeUsingnsroleTemplates,o=acivattr,dc=example,dc=com'} CosTemplate(topo.standalone,'cn="cn=filterRoleEngRole,o=acivattr,dc=example,dc=com",' 'cn=cosClassicGenerateEmployeeTypeUsingnsroleTemplates,o=acivattr,{}'.format(DEFAULT_SUFFIX)).\ create(properties=properties) properties = {'employeeType': 'SalesType', 'cn': '"cn=filterRoleSalesRole,o=acivattr,dc=example,dc=com",cn=cosClassicGenerateEmployeeTypeUsingnsroleTemplates,o=acivattr,dc=example,dc=com'} CosTemplate(topo.standalone, 'cn="cn=filterRoleSalesRole,o=acivattr,dc=example,dc=com",cn=cosClassicGenerateEmployeeTypeUsingnsroleTemplates,' 'o=acivattr,{}'.format(DEFAULT_SUFFIX)).create(properties=properties) properties = { 'cosTemplateDn': 'cn=cosClassicGenerateEmployeeTypeUsingnsroleTemplates,o=acivattr,{}'.format(DEFAULT_SUFFIX), 'cosAttribute': 'employeeType', 'cosSpecifier': 'nsrole', 'cn': 'cosClassicGenerateEmployeeTypeUsingnsrole'} CosClassicDefinition(topo.standalone, 'cn=cosClassicGenerateEmployeeTypeUsingnsrole,o=acivattr,{}'.format(DEFAULT_SUFFIX)).create( properties=properties) properties = { 'uid': 'salesuser1', 'cn': 'salesuser1', 'sn': 'user', 'uidNumber': '1000', 'gidNumber': '2000', 'homeDirectory': '/home/' + 'salesuser1', 'userPassword': PW_DM } user = UserAccount(topo.standalone, 'cn=salesuser1,ou=sales,o=acivattr,{}'.format(DEFAULT_SUFFIX)) user.create(properties=properties) properties = { 'uid': 'salesmanager1', 'cn': 'salesmanager1', 'sn': 'user', 'uidNumber': '1000', 'gidNumber': '2000', 'homeDirectory': '/home/' + 'salesmanager1', 'userPassword': PW_DM, } user = UserAccount(topo.standalone, 'cn=salesmanager1,ou=sales,o=acivattr,{}'.format(DEFAULT_SUFFIX)) user.create(properties=properties) properties = { 'uid': 'enguser1', 'cn': 'enguser1', 'sn': 'user', 'uidNumber': '1000', 'gidNumber': '2000', 'homeDirectory': '/home/' + 'enguser1', 'userPassword': PW_DM } user = UserAccount(topo.standalone, 'cn=enguser1,ou=eng,o=acivattr,{}'.format(DEFAULT_SUFFIX)) user.create(properties=properties) properties = { 'uid': 'engmanager1', 'cn': 'engmanager1', 'sn': 'user', 'uidNumber': '1000', 'gidNumber': '2000', 'homeDirectory': '/home/' + 'engmanager1', 'userPassword': PW_DM } user = UserAccount(topo.standalone, 'cn=engmanager1,ou=eng,o=acivattr,{}'.format(DEFAULT_SUFFIX)) user.create(properties=properties) def fin(): for DN in [ENG_USER,SALES_UESER,ENG_MANAGER,SALES_MANAGER,FILTERROLESALESROLE,FILTERROLEENGROLE,ENG_OU,SALES_OU, 'cn="cn=filterRoleEngRole,o=acivattr,dc=example,dc=com",' 'cn=cosClassicGenerateEmployeeTypeUsingnsroleTemplates,o=acivattr,dc=example,dc=com', 'cn="cn=filterRoleSalesRole,o=acivattr,dc=example,dc=com",' 'cn=cosClassicGenerateEmployeeTypeUsingnsroleTemplates,o=acivattr,{}'.format(DEFAULT_SUFFIX), 'cn=cosClassicGenerateEmployeeTypeUsingnsroleTemplates,o=acivattr,{}'.format(DEFAULT_SUFFIX), 'cn=cosClassicGenerateEmployeeTypeUsingnsrole,o=acivattr,{}'.format(DEFAULT_SUFFIX), DNBASE]: UserAccount(topo.standalone, DN).delete() request.addfinalizer(fin) REAL_EQ_ACI = '(targetattr="*")(targetfilter="(cn=engmanager1)") (version 3.0; acl "real-eq"; allow (all) userdn="ldap:///{}";)'.format(ENG_USER) REAL_PRES_ACI = '(targetattr="*")(targetfilter="(cn=*)") (version 3.0; acl "real-pres"; allow (all) userdn="ldap:///{}";)'.format(ENG_USER) REAL_SUB_ACI = '(targetattr="*")(targetfilter="(cn=eng*)") (version 3.0; acl "real-sub"; allow (all) userdn="ldap:///{}";)'.format(ENG_USER) ROLE_EQ_ACI = '(targetattr="*")(targetfilter="(nsrole=cn=filterroleengrole,o=sun.com)") (version 3.0; acl "role-eq"; allow (all) userdn="ldap:///{}";)'.format(ENG_USER) ROLE_PRES_ACI = '(targetattr="*")(targetfilter="(nsrole=*)") (version 3.0; acl "role-pres"; allow (all) userdn="ldap:///{}";)'.format(ENG_USER) ROLE_SUB_ACI = '(targetattr="*")(targetfilter="(nsrole=cn=filterroleeng*)") (version 3.0; acl "role-sub"; allow (all) userdn="ldap:///{}";)'.format(ENG_USER) COS_EQ_ACI = '(targetattr="*")(targetfilter="(employeetype=engtype)") (version 3.0; acl "cos-eq"; allow (all) userdn="ldap:///{}";)'.format(ENG_USER) COS_PRES_ACI = '(targetattr="*")(targetfilter="(employeetype=*)") (version 3.0; acl "cos-pres"; allow (all) userdn="ldap:///{}";)'.format(ENG_USER) COS_SUB_ACI = '(targetattr="*")(targetfilter="(employeetype=eng*)") (version 3.0; acl "cos-sub"; allow (all) userdn="ldap:///{}";)'.format(ENG_USER) LDAPURL_ACI = '(targetattr="*")(version 3.0; acl "url"; allow (all) userdn="ldap:///o=acivattr,dc=example,dc=com??sub?(nsrole=*eng*)";)' @pytest.mark.parametrize("user,entry,aci", [ (ENG_USER, ENG_MANAGER, REAL_EQ_ACI), (ENG_USER, ENG_MANAGER, REAL_PRES_ACI), (ENG_USER, ENG_MANAGER, REAL_SUB_ACI), (ENG_USER, ENG_MANAGER, ROLE_PRES_ACI), (ENG_USER, ENG_MANAGER, ROLE_SUB_ACI), (ENG_USER, ENG_MANAGER, COS_EQ_ACI), (ENG_USER, ENG_MANAGER, COS_PRES_ACI), (ENG_USER, ENG_MANAGER, COS_SUB_ACI), (ENG_USER, ENG_MANAGER, LDAPURL_ACI), ], ids=[ "(ENG_USER, ENG_MANAGER, REAL_EQ_ACI)", "(ENG_USER, ENG_MANAGER, REAL_PRES_ACI)", "(ENG_USER, ENG_MANAGER, REAL_SUB_ACI)", "(ENG_USER, ENG_MANAGER, ROLE_PRES_ACI)", '(ENG_USER, ENG_MANAGER, ROLE_SUB_ACI)', '(ENG_USER, ENG_MANAGER, COS_EQ_ACI)', '(ENG_USER, ENG_MANAGER, COS_PRES_ACI)', '(ENG_USER, ENG_MANAGER, COS_SUB_ACI)', '(ENG_USER, ENG_MANAGER, LDAPURL_ACI)', ]) def test_positive(topo, _add_user, aci_of_user, user, entry, aci): """Positive testing of ACLs :id: ba6d5e9c-786b-11e8-860d-8c16451d917b :parametrized: yes :setup: server :steps: 1. Add test entry 2. Add ACI 3. ACI role should be followed :expectedresults: 1. Entry should be added 2. Operation should succeed 3. Operation should succeed """ # set aci Domain(topo.standalone, DNBASE).set("aci", aci) # create connection conn = UserAccount(topo.standalone, user).bind(PW_DM) # according to the aci , user will be able to change description UserAccount(conn, entry).replace("description", "Fred") assert UserAccount(conn, entry).present('description') @pytest.mark.parametrize("user,entry,aci", [ (ENG_USER, SALES_MANAGER, REAL_EQ_ACI), (ENG_USER, SALES_OU, REAL_PRES_ACI), (ENG_USER, SALES_MANAGER, REAL_SUB_ACI), (ENG_USER, SALES_MANAGER, ROLE_EQ_ACI), (ENG_USER, SALES_OU, ROLE_PRES_ACI), (ENG_USER, SALES_MANAGER, ROLE_SUB_ACI), (ENG_USER, SALES_MANAGER, COS_EQ_ACI), (ENG_USER, SALES_OU, COS_PRES_ACI), (ENG_USER, SALES_MANAGER, COS_SUB_ACI), (SALES_UESER, SALES_MANAGER, LDAPURL_ACI), (ENG_USER, ENG_MANAGER, ROLE_EQ_ACI), ], ids=[ "(ENG_USER, SALES_MANAGER, REAL_EQ_ACI)", "(ENG_USER, SALES_OU, REAL_PRES_ACI)", "(ENG_USER, SALES_MANAGER, REAL_SUB_ACI)", "(ENG_USER, SALES_MANAGER, ROLE_EQ_ACI)", "(ENG_USER, SALES_MANAGER, ROLE_PRES_ACI)", '(ENG_USER, SALES_MANAGER, ROLE_SUB_ACI)', '(ENG_USER, SALES_MANAGER, COS_EQ_ACI)', '(ENG_USER, SALES_MANAGER, COS_PRES_ACI)', '(ENG_USER, SALES_MANAGER, COS_SUB_ACI)', '(SALES_UESER, SALES_MANAGER, LDAPURL_ACI)', '(ENG_USER, ENG_MANAGER, ROLE_EQ_ACI)' ]) def test_negative(topo, _add_user, aci_of_user, user, entry, aci): """Negative testing of ACLs :id: c4c887c2-786b-11e8-a328-8c16451d917b :parametrized: yes :setup: server :steps: 1. Add test entry 2. Add ACI 3. ACI role should be followed :expectedresults: 1. Entry should be added 2. Operation should succeed 3. Operation should not succeed """ # set aci Domain(topo.standalone, DNBASE).set("aci", aci) # create connection conn = UserAccount(topo.standalone, user).bind(PW_DM) # according to the aci , user will not be able to change description with pytest.raises(ldap.INSUFFICIENT_ACCESS): UserAccount(conn, entry).replace("description", "Fred") if __name__ == "__main__": CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s -v %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/acl/acl_deny_test.py000066400000000000000000000206631421664411400263050ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2020 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import logging import pytest import os import ldap import time from lib389._constants import * from lib389.topologies import topology_st as topo from lib389.idm.user import UserAccount, TEST_USER_PROPERTIES from lib389.idm.domain import Domain pytestmark = pytest.mark.tier1 DEBUGGING = os.getenv("DEBUGGING", default=False) if DEBUGGING: logging.getLogger(__name__).setLevel(logging.DEBUG) else: logging.getLogger(__name__).setLevel(logging.INFO) log = logging.getLogger(__name__) BIND_DN2 = 'uid=tuser,ou=People,dc=example,dc=com' BIND_RDN2 = 'tuser' BIND_DN = 'uid=tuser1,ou=People,dc=example,dc=com' BIND_RDN = 'tuser1' SRCH_FILTER = "uid=tuser1" SRCH_FILTER2 = "uid=tuser" aci_list_A = ['(targetattr != "userPassword") (version 3.0; acl "Anonymous access"; allow (read, search, compare)userdn = "ldap:///anyone";)', '(targetattr = "*") (version 3.0;acl "allow tuser";allow (all)(userdn = "ldap:///uid=tuser5,ou=People,dc=example,dc=com");)', '(targetattr != "uid || mail") (version 3.0; acl "deny-attrs"; deny (all) (userdn = "ldap:///anyone");)', '(targetfilter = "(inetUserStatus=1)") ( version 3.0; acl "deny-specific-entry"; deny(all) (userdn = "ldap:///anyone");)'] aci_list_B = ['(targetattr != "userPassword") (version 3.0; acl "Anonymous access"; allow (read, search, compare)userdn = "ldap:///anyone";)', '(targetattr != "uid || mail") (version 3.0; acl "deny-attrs"; deny (all) (userdn = "ldap:///anyone");)', '(targetfilter = "(inetUserStatus=1)") ( version 3.0; acl "deny-specific-entry"; deny(all) (userdn = "ldap:///anyone");)'] @pytest.fixture(scope="module") def aci_setup(topo): topo.standalone.log.info("Add {}".format(BIND_DN)) user = UserAccount(topo.standalone, BIND_DN) user_props = TEST_USER_PROPERTIES.copy() user_props.update({'sn': BIND_RDN, 'cn': BIND_RDN, 'uid': BIND_RDN, 'inetUserStatus': '1', 'objectclass': 'extensibleObject', 'userpassword': PASSWORD}) user.create(properties=user_props, basedn=SUFFIX) topo.standalone.log.info("Add {}".format(BIND_DN2)) user2 = UserAccount(topo.standalone, BIND_DN2) user_props = TEST_USER_PROPERTIES.copy() user_props.update({'sn': BIND_RDN2, 'cn': BIND_RDN2, 'uid': BIND_RDN2, 'userpassword': PASSWORD}) user2.create(properties=user_props, basedn=SUFFIX) def test_multi_deny_aci(topo, aci_setup): """Test that mutliple deny rules work, and that they the cache properly stores the result :id: 294c366d-850e-459e-b5a0-3cc828ec3aca :setup: Standalone Instance :steps: 1. Add aci_list_A aci's and verify two searches on the same connection behave the same 2. Add aci_list_B aci's and verify search fails as expected :expectedresults: 1. Both searches do not return any entries 2. Seaches do not return any entries """ if DEBUGGING: # Maybe add aci logging? pass suffix = Domain(topo.standalone, DEFAULT_SUFFIX) for run in range(2): topo.standalone.log.info("Pass " + str(run + 1)) # Test ACI List A topo.standalone.log.info("Testing two searches behave the same...") topo.standalone.simple_bind_s(DN_DM, PASSWORD) suffix.set('aci', aci_list_A, ldap.MOD_REPLACE) time.sleep(1) topo.standalone.simple_bind_s(BIND_DN, PASSWORD) entries = topo.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, SRCH_FILTER) if entries and entries[0]: topo.standalone.log.fatal("Incorrectly got an entry returned from search 1") assert False entries = topo.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, SRCH_FILTER) if entries and entries[0]: topo.standalone.log.fatal("Incorrectly got an entry returned from search 2") assert False entries = topo.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, SRCH_FILTER2) if entries is None or len(entries) == 0: topo.standalone.log.fatal("Failed to get entry as good user") assert False entries = topo.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, SRCH_FILTER2) if entries is None or len(entries) == 0: topo.standalone.log.fatal("Failed to get entry as good user") assert False # Bind a different user who has rights topo.standalone.simple_bind_s(BIND_DN2, PASSWORD) entries = topo.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, SRCH_FILTER2) if entries is None or len(entries) == 0: topo.standalone.log.fatal("Failed to get entry as good user") assert False entries = topo.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, SRCH_FILTER2) if entries is None or len(entries) == 0: topo.standalone.log.fatal("Failed to get entry as good user (2)") assert False if run > 0: # Second pass topo.standalone.restart() # Reset ACI's and do the second test topo.standalone.log.info("Testing search does not return any entries...") topo.standalone.simple_bind_s(DN_DM, PASSWORD) suffix.set('aci', aci_list_B, ldap.MOD_REPLACE) time.sleep(1) topo.standalone.simple_bind_s(BIND_DN, PASSWORD) entries = topo.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, SRCH_FILTER) if entries and entries[0]: topo.standalone.log.fatal("Incorrectly got an entry returned from search 1") assert False entries = topo.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, SRCH_FILTER) if entries and entries[0]: topo.standalone.log.fatal("Incorrectly got an entry returned from search 2") assert False if run > 0: # Second pass topo.standalone.restart() # Bind as different user who has rights topo.standalone.simple_bind_s(BIND_DN2, PASSWORD) entries = topo.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, SRCH_FILTER2) if entries is None or len(entries) == 0: topo.standalone.log.fatal("Failed to get entry as good user") assert False entries = topo.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, SRCH_FILTER2) if entries is None or len(entries) == 0: topo.standalone.log.fatal("Failed to get entry as good user (2)") assert False entries = topo.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, SRCH_FILTER) if entries and entries[0]: topo.standalone.log.fatal("Incorrectly got an entry returned from search 1") assert False entries = topo.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, SRCH_FILTER) if entries and entries[0]: topo.standalone.log.fatal("Incorrectly got an entry returned from search 2") assert False # back to user 1 topo.standalone.simple_bind_s(BIND_DN, PASSWORD) entries = topo.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, SRCH_FILTER2) if entries is None or len(entries) == 0: topo.standalone.log.fatal("Failed to get entry as user1") assert False entries = topo.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, SRCH_FILTER2) if entries is None or len(entries) == 0: topo.standalone.log.fatal("Failed to get entry as user1 (2)") assert False entries = topo.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, SRCH_FILTER) if entries and entries[0]: topo.standalone.log.fatal("Incorrectly got an entry returned from search 1") assert False entries = topo.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, SRCH_FILTER) if entries and entries[0]: topo.standalone.log.fatal("Incorrectly got an entry returned from search 2") assert False topo.standalone.log.info("Test PASSED") if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main(["-s", CURRENT_FILE]) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/acl/acl_test.py000066400000000000000000001317431421664411400252700ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2020 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import pytest from ldap.controls.simple import GetEffectiveRightsControl from lib389.tasks import * from lib389.utils import * from lib389.schema import Schema from lib389.idm.domain import Domain from lib389.idm.user import UserAccount, UserAccounts, TEST_USER_PROPERTIES from lib389.idm.organizationalrole import OrganizationalRole, OrganizationalRoles from lib389.topologies import topology_m2 from lib389._constants import SUFFIX, DN_DM, DEFAULT_SUFFIX, PASSWORD pytestmark = pytest.mark.tier1 logging.getLogger(__name__).setLevel(logging.DEBUG) log = logging.getLogger(__name__) TEST_REPL_DN = "cn=test_repl, %s" % SUFFIX STAGING_CN = "staged user" PRODUCTION_CN = "accounts" EXCEPT_CN = "excepts" STAGING_DN = "cn=%s,%s" % (STAGING_CN, SUFFIX) PRODUCTION_DN = "cn=%s,%s" % (PRODUCTION_CN, SUFFIX) PROD_EXCEPT_DN = "cn=%s,%s" % (EXCEPT_CN, PRODUCTION_DN) STAGING_PATTERN = "cn=%s*,%s" % (STAGING_CN[:2], SUFFIX) PRODUCTION_PATTERN = "cn=%s*,%s" % (PRODUCTION_CN[:2], SUFFIX) BAD_STAGING_PATTERN = "cn=bad*,%s" % (SUFFIX) BAD_PRODUCTION_PATTERN = "cn=bad*,%s" % (SUFFIX) BIND_RDN = "bind_entry" BIND_DN = "uid=%s,%s" % (BIND_RDN, SUFFIX) BIND_PW = "password" NEW_ACCOUNT = "new_account" MAX_ACCOUNTS = 20 CONFIG_MODDN_ACI_ATTR = "nsslapd-moddn-aci" SRC_ENTRY_CN = "tuser" EXT_RDN = "01" DST_ENTRY_CN = SRC_ENTRY_CN + EXT_RDN SRC_ENTRY_DN = "cn=%s,%s" % (SRC_ENTRY_CN, SUFFIX) DST_ENTRY_DN = "cn=%s,%s" % (DST_ENTRY_CN, SUFFIX) def add_attr(topology_m2, attr_name): """Adds attribute to the schema""" ATTR_VALUE = """(NAME '%s' \ DESC 'Attribute filteri-Multi-Valued' \ SYNTAX 1.3.6.1.4.1.1466.115.121.1.27)""" % attr_name schema = Schema(topology_m2.ms["supplier1"]) schema.add('attributeTypes', ATTR_VALUE) @pytest.fixture(params=["lang-ja", "binary", "phonetic"]) def aci_with_attr_subtype(request, topology_m2): """Adds and deletes an ACI in the DEFAULT_SUFFIX""" TARGET_ATTR = 'protectedOperation' USER_ATTR = 'allowedToPerform' SUBTYPE = request.param suffix = Domain(topology_m2.ms["supplier1"], DEFAULT_SUFFIX) log.info("========Executing test with '%s' subtype========" % SUBTYPE) log.info(" Add a target attribute") add_attr(topology_m2, TARGET_ATTR) log.info(" Add a user attribute") add_attr(topology_m2, USER_ATTR) ACI_TARGET = '(targetattr=%s;%s)' % (TARGET_ATTR, SUBTYPE) ACI_ALLOW = '(version 3.0; acl "test aci for subtypes"; allow (read) ' ACI_SUBJECT = 'userattr = "%s;%s#GROUPDN";)' % (USER_ATTR, SUBTYPE) ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT log.info("Add an ACI with attribute subtype") suffix.add('aci', ACI_BODY) def fin(): log.info("Finally, delete an ACI with the '%s' subtype" % SUBTYPE) suffix.remove('aci', ACI_BODY) request.addfinalizer(fin) return ACI_BODY def test_aci_attr_subtype_targetattr(topology_m2, aci_with_attr_subtype): """Checks, that ACIs allow attribute subtypes in the targetattr keyword :id: a99ccda0-5d0b-4d41-99cc-c5e207b3b687 :parametrized: yes :setup: MMR with two suppliers, Define two attributes in the schema - targetattr and userattr, Add an ACI with attribute subtypes - "lang-ja", "binary", "phonetic" one by one :steps: 1. Search for the added attribute during setup one by one for each subtypes "lang-ja", "binary", "phonetic" :expectedresults: 1. Attributes should be found successfully one by one for each subtypes "lang-ja", "binary", "phonetic" """ log.info("Search for the added attribute") try: entries = topology_m2.ms["supplier1"].search_s(DEFAULT_SUFFIX, ldap.SCOPE_BASE, '(objectclass=*)', ['aci']) entry = str(entries[0]) assert aci_with_attr_subtype in entry log.info("The added attribute was found") except ldap.LDAPError as e: log.fatal('Search failed, error: ' + e.message['desc']) assert False def _bind_manager(topology_m2): topology_m2.ms["supplier1"].log.info("Bind as %s " % DN_DM) topology_m2.ms["supplier1"].simple_bind_s(DN_DM, PASSWORD) def _bind_normal(topology_m2): # bind as bind_entry topology_m2.ms["supplier1"].log.info("Bind as %s" % BIND_DN) topology_m2.ms["supplier1"].simple_bind_s(BIND_DN, BIND_PW) def _moddn_aci_deny_tree(topology_m2, mod_type=None, target_from=STAGING_DN, target_to=PROD_EXCEPT_DN): """It denies the access moddn_to in cn=except,cn=accounts,SUFFIX""" assert mod_type is not None ACI_TARGET_FROM = "" ACI_TARGET_TO = "" if target_from: ACI_TARGET_FROM = "(target_from = \"ldap:///%s\")" % (target_from) if target_to: ACI_TARGET_TO = "(target_to = \"ldap:///%s\")" % (target_to) ACI_ALLOW = "(version 3.0; acl \"Deny MODDN to prod_except\"; deny (moddn)" ACI_SUBJECT = " userdn = \"ldap:///%s\";)" % BIND_DN ACI_BODY = ACI_TARGET_TO + ACI_TARGET_FROM + ACI_ALLOW + ACI_SUBJECT # topology_m2.ms["supplier1"].modify_s(SUFFIX, mod) topology_m2.ms["supplier1"].log.info("Add a DENY aci under %s " % PROD_EXCEPT_DN) prod_except = OrganizationalRole(topology_m2.ms["supplier1"], PROD_EXCEPT_DN) prod_except.set('aci', ACI_BODY, mod_type) def _write_aci_staging(topology_m2, mod_type=None): assert mod_type is not None ACI_TARGET = "(targetattr= \"uid\")(target=\"ldap:///uid=*,%s\")" % STAGING_DN ACI_ALLOW = "(version 3.0; acl \"write staging entries\"; allow (write)" ACI_SUBJECT = " userdn = \"ldap:///%s\";)" % BIND_DN ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT suffix = Domain(topology_m2.ms["supplier1"], SUFFIX) suffix.set('aci', ACI_BODY, mod_type) def _write_aci_production(topology_m2, mod_type=None): assert mod_type is not None ACI_TARGET = "(targetattr= \"uid\")(target=\"ldap:///uid=*,%s\")" % PRODUCTION_DN ACI_ALLOW = "(version 3.0; acl \"write production entries\"; allow (write)" ACI_SUBJECT = " userdn = \"ldap:///%s\";)" % BIND_DN ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT suffix = Domain(topology_m2.ms["supplier1"], SUFFIX) suffix.set('aci', ACI_BODY, mod_type) def _moddn_aci_staging_to_production(topology_m2, mod_type=None, target_from=STAGING_DN, target_to=PRODUCTION_DN): assert mod_type is not None ACI_TARGET_FROM = "" ACI_TARGET_TO = "" if target_from: ACI_TARGET_FROM = "(target_from = \"ldap:///%s\")" % (target_from) if target_to: ACI_TARGET_TO = "(target_to = \"ldap:///%s\")" % (target_to) ACI_ALLOW = "(version 3.0; acl \"MODDN from staging to production\"; allow (moddn)" ACI_SUBJECT = " userdn = \"ldap:///%s\";)" % BIND_DN ACI_BODY = ACI_TARGET_FROM + ACI_TARGET_TO + ACI_ALLOW + ACI_SUBJECT suffix = Domain(topology_m2.ms["supplier1"], SUFFIX) suffix.set('aci', ACI_BODY, mod_type) _write_aci_staging(topology_m2, mod_type=mod_type) def _moddn_aci_from_production_to_staging(topology_m2, mod_type=None): assert mod_type is not None ACI_TARGET = "(target_from = \"ldap:///%s\") (target_to = \"ldap:///%s\")" % ( PRODUCTION_DN, STAGING_DN) ACI_ALLOW = "(version 3.0; acl \"MODDN from production to staging\"; allow (moddn)" ACI_SUBJECT = " userdn = \"ldap:///%s\";)" % BIND_DN ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT suffix = Domain(topology_m2.ms["supplier1"], SUFFIX) suffix.set('aci', ACI_BODY, mod_type) _write_aci_production(topology_m2, mod_type=mod_type) @pytest.fixture(scope="module") def moddn_setup(topology_m2): """Creates - a staging DIT - a production DIT - add accounts in staging DIT - enable ACL logging (commented for performance reason) """ m1 = topology_m2.ms["supplier1"] o_roles = OrganizationalRoles(m1, SUFFIX) m1.log.info("\n\n######## INITIALIZATION ########\n") # entry used to bind with m1.log.info("Add {}".format(BIND_DN)) user = UserAccount(m1, BIND_DN) user_props = TEST_USER_PROPERTIES.copy() user_props.update({'sn': BIND_RDN, 'cn': BIND_RDN, 'uid': BIND_RDN, 'userpassword': BIND_PW}) user.create(properties=user_props, basedn=SUFFIX) # Add anonymous read aci ACI_TARGET = "(target = \"ldap:///%s\")(targetattr=\"*\")" % (SUFFIX) ACI_ALLOW = "(version 3.0; acl \"Anonymous Read access\"; allow (read,search,compare)" ACI_SUBJECT = " userdn = \"ldap:///anyone\";)" ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT suffix = Domain(m1, SUFFIX) suffix.add('aci', ACI_BODY) # DIT for staging m1.log.info("Add {}".format(STAGING_DN)) o_roles.create(properties={'cn': STAGING_CN, 'description': "staging DIT"}) # DIT for production m1.log.info("Add {}".format(PRODUCTION_DN)) o_roles.create(properties={'cn': PRODUCTION_CN, 'description': "production DIT"}) # DIT for production/except m1.log.info("Add {}".format(PROD_EXCEPT_DN)) o_roles_prod = OrganizationalRoles(m1, PRODUCTION_DN) o_roles_prod.create(properties={'cn': EXCEPT_CN, 'description': "production except DIT"}) # enable acl error logging # mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '128')] # m1.modify_s(DN_CONFIG, mod) # topology_m2.ms["supplier2"].modify_s(DN_CONFIG, mod) # add dummy entries in the staging DIT staging_users = UserAccounts(m1, SUFFIX, rdn="cn={}".format(STAGING_CN)) user_props = TEST_USER_PROPERTIES.copy() for cpt in range(MAX_ACCOUNTS): name = "{}{}".format(NEW_ACCOUNT, cpt) user_props.update({'sn': name, 'cn': name, 'uid': name}) staging_users.create(properties=user_props) def test_mode_default_add_deny(topology_m2, moddn_setup): """Tests that the ADD operation fails (no ADD aci on production) :id: 301d41d3-b8d8-44c5-8eb9-c2d2816b5a4f :setup: MMR with two suppliers, M1 - staging DIT M2 - production DIT add test accounts in staging DIT :steps: 1. Add an entry in production :expectedresults: 1. It should fail due to INSUFFICIENT_ACCESS """ topology_m2.ms["supplier1"].log.info("\n\n######## mode moddn_aci : ADD (should fail) ########\n") _bind_normal(topology_m2) # # First try to add an entry in production => INSUFFICIENT_ACCESS # try: topology_m2.ms["supplier1"].log.info("Try to add %s" % PRODUCTION_DN) name = "%s%d" % (NEW_ACCOUNT, 0) topology_m2.ms["supplier1"].add_s(Entry(("uid=%s,%s" % (name, PRODUCTION_DN), { 'objectclass': "top person".split(), 'sn': name, 'cn': name, 'uid': name}))) assert 0 # this is an error, we should not be allowed to add an entry in production except Exception as e: topology_m2.ms["supplier1"].log.info("Exception (expected): %s" % type(e).__name__) assert isinstance(e, ldap.INSUFFICIENT_ACCESS) def test_mode_default_delete_deny(topology_m2, moddn_setup): """Tests that the DEL operation fails (no 'delete' aci on production) :id: 5dcb2213-3875-489a-8cb5-ace057120ad6 :setup: MMR with two suppliers, M1 - staging DIT M2 - production DIT add test accounts in staging DIT :steps: 1. Delete an entry in staging :expectedresults: 1. It should fail due to INSUFFICIENT_ACCESS """ topology_m2.ms["supplier1"].log.info("\n\n######## DELETE (should fail) ########\n") _bind_normal(topology_m2) # # Second try to delete an entry in staging => INSUFFICIENT_ACCESS # try: topology_m2.ms["supplier1"].log.info("Try to delete %s" % STAGING_DN) name = "%s%d" % (NEW_ACCOUNT, 0) topology_m2.ms["supplier1"].delete_s("uid=%s,%s" % (name, STAGING_DN)) assert 0 # this is an error, we should not be allowed to add an entry in production except Exception as e: topology_m2.ms["supplier1"].log.info("Exception (expected): %s" % type(e).__name__) assert isinstance(e, ldap.INSUFFICIENT_ACCESS) @pytest.mark.parametrize("index,tfrom,tto,failure", [(0, STAGING_DN, PRODUCTION_DN, False), (1, STAGING_DN, PRODUCTION_DN, False), (2, STAGING_DN, BAD_PRODUCTION_PATTERN, True), (3, STAGING_PATTERN, PRODUCTION_DN, False), (4, BAD_STAGING_PATTERN, PRODUCTION_DN, True), (5, STAGING_PATTERN, PRODUCTION_PATTERN, False), (6, None, PRODUCTION_PATTERN, False), (7, STAGING_PATTERN, None, False), (8, None, None, False)]) def test_moddn_staging_prod(topology_m2, moddn_setup, index, tfrom, tto, failure): """This test case MOVE entry NEW_ACCOUNT0 from staging to prod target_to/target_from: equality filter :id: cbafdd68-64d6-431f-9f22-6fbf9ed23ca0 :parametrized: yes :setup: MMR with two suppliers, M1 - staging DIT M2 - production DIT add test accounts in staging DIT :steps: 1. Try to modify DN with moddn for each value of STAGING_DN -> PRODUCTION_DN 2. Try to modify DN with moddn for each value of STAGING_DN -> PRODUCTION_DN with appropriate ACI :expectedresults: 1. It should fail due to INSUFFICIENT_ACCESS 2. It should pass due to appropriate ACI """ topology_m2.ms["supplier1"].log.info("\n\n######## MOVE staging -> Prod (%s) ########\n" % index) _bind_normal(topology_m2) old_rdn = "uid=%s%s" % (NEW_ACCOUNT, index) old_dn = "%s,%s" % (old_rdn, STAGING_DN) new_rdn = old_rdn new_superior = PRODUCTION_DN # # Try to rename without the appropriate ACI => INSUFFICIENT_ACCESS # try: topology_m2.ms["supplier1"].log.info("Try to MODDN %s -> %s,%s" % (old_dn, new_rdn, new_superior)) topology_m2.ms["supplier1"].rename_s(old_dn, new_rdn, newsuperior=new_superior) assert 0 except AssertionError: topology_m2.ms["supplier1"].log.info( "Exception (not really expected exception but that is fine as it fails to rename)") except Exception as e: topology_m2.ms["supplier1"].log.info("Exception (expected): %s" % type(e).__name__) assert isinstance(e, ldap.INSUFFICIENT_ACCESS) # successful MOD with the ACI topology_m2.ms["supplier1"].log.info("\n\n######## MOVE to and from equality filter ########\n") _bind_manager(topology_m2) _moddn_aci_staging_to_production(topology_m2, mod_type=ldap.MOD_ADD, target_from=tfrom, target_to=tto) _bind_normal(topology_m2) try: topology_m2.ms["supplier1"].log.info("Try to MODDN %s -> %s,%s" % (old_dn, new_rdn, new_superior)) topology_m2.ms["supplier1"].rename_s(old_dn, new_rdn, newsuperior=new_superior) except Exception as e: topology_m2.ms["supplier1"].log.info("Exception (expected): %s" % type(e).__name__) if failure: assert isinstance(e, ldap.INSUFFICIENT_ACCESS) # successful MOD with the both ACI _bind_manager(topology_m2) _moddn_aci_staging_to_production(topology_m2, mod_type=ldap.MOD_DELETE, target_from=tfrom, target_to=tto) _bind_normal(topology_m2) def test_moddn_staging_prod_9(topology_m2, moddn_setup): """Test with nsslapd-moddn-aci set to off so that MODDN requires an 'add' aci. :id: 222dd7e8-7ff1-40b8-ad26-6f8e42fbfcd9 :setup: MMR with two suppliers, M1 - staging DIT M2 - production DIT add test accounts in staging DIT :steps: 1. Try to modify DN with moddn STAGING_DN -> PRODUCTION_DN 2. Add the moddn aci that will not be evaluated because of the config flag 3. Try to do modDN 4. Remove the moddn aci 5. Add the 'add' right to the production DN 6. Try to modify DN with moddn with 'add' right 7. Enable the moddn right 8. Try to rename without the appropriate ACI 9. Add the 'add' right to the production DN 10. Try to rename without the appropriate ACI 11. Remove the moddn aci :expectedresults: 1. It should fail due to INSUFFICIENT_ACCESS 2. It should pass 3. It should fail due to INSUFFICIENT_ACCESS 4. It should pass 5. It should pass 6. It should pass 7. It should pass 8. It should fail due to INSUFFICIENT_ACCESS 9. It should pass 10. It should fail due to INSUFFICIENT_ACCESS 11. It should pass """ topology_m2.ms["supplier1"].log.info("\n\n######## MOVE staging -> Prod (9) ########\n") _bind_normal(topology_m2) old_rdn = "uid=%s9" % NEW_ACCOUNT old_dn = "%s,%s" % (old_rdn, STAGING_DN) new_rdn = old_rdn new_superior = PRODUCTION_DN prod = OrganizationalRole(topology_m2.ms["supplier1"], PRODUCTION_DN) # # Try to rename without the appropriate ACI => INSUFFICIENT_ACCESS # try: topology_m2.ms["supplier1"].log.info("Try to MODDN %s -> %s,%s" % (old_dn, new_rdn, new_superior)) topology_m2.ms["supplier1"].rename_s(old_dn, new_rdn, newsuperior=new_superior) assert 0 except AssertionError: topology_m2.ms["supplier1"].log.info( "Exception (not really expected exception but that is fine as it fails to rename)") except Exception as e: topology_m2.ms["supplier1"].log.info("Exception (expected): %s" % type(e).__name__) assert isinstance(e, ldap.INSUFFICIENT_ACCESS) ############# # Now do tests with no support of moddn aci ############# topology_m2.ms["supplier1"].log.info("Disable the moddn right") _bind_manager(topology_m2) topology_m2.ms["supplier1"].config.set(CONFIG_MODDN_ACI_ATTR, 'off') # Add the moddn aci that will not be evaluated because of the config flag topology_m2.ms["supplier1"].log.info("\n\n######## MOVE to and from equality filter ########\n") _bind_manager(topology_m2) _moddn_aci_staging_to_production(topology_m2, mod_type=ldap.MOD_ADD, target_from=STAGING_DN, target_to=PRODUCTION_DN) _bind_normal(topology_m2) # It will fail because it will test the ADD right try: topology_m2.ms["supplier1"].log.info("Try to MODDN %s -> %s,%s" % (old_dn, new_rdn, new_superior)) topology_m2.ms["supplier1"].rename_s(old_dn, new_rdn, newsuperior=new_superior) assert 0 except AssertionError: topology_m2.ms["supplier1"].log.info( "Exception (not really expected exception but that is fine as it fails to rename)") except Exception as e: topology_m2.ms["supplier1"].log.info("Exception (expected): %s" % type(e).__name__) assert isinstance(e, ldap.INSUFFICIENT_ACCESS) # remove the moddn aci _bind_manager(topology_m2) _moddn_aci_staging_to_production(topology_m2, mod_type=ldap.MOD_DELETE, target_from=STAGING_DN, target_to=PRODUCTION_DN) _bind_normal(topology_m2) # # add the 'add' right to the production DN # Then do a successful moddn # ACI_ALLOW = "(version 3.0; acl \"ADD rights to allow moddn\"; allow (add)" ACI_SUBJECT = " userdn = \"ldap:///%s\";)" % BIND_DN ACI_BODY = ACI_ALLOW + ACI_SUBJECT _bind_manager(topology_m2) prod.add('aci', ACI_BODY) _write_aci_staging(topology_m2, mod_type=ldap.MOD_ADD) _bind_normal(topology_m2) topology_m2.ms["supplier1"].log.info("Try to MODDN %s -> %s,%s" % (old_dn, new_rdn, new_superior)) topology_m2.ms["supplier1"].rename_s(old_dn, new_rdn, newsuperior=new_superior) _bind_manager(topology_m2) prod.remove('aci', ACI_BODY) _write_aci_staging(topology_m2, mod_type=ldap.MOD_DELETE) _bind_normal(topology_m2) ############# # Now do tests with support of moddn aci ############# topology_m2.ms["supplier1"].log.info("Enable the moddn right") _bind_manager(topology_m2) topology_m2.ms["supplier1"].config.set(CONFIG_MODDN_ACI_ATTR, 'on') topology_m2.ms["supplier1"].log.info("\n\n######## MOVE staging -> Prod (10) ########\n") _bind_normal(topology_m2) old_rdn = "uid=%s10" % NEW_ACCOUNT old_dn = "%s,%s" % (old_rdn, STAGING_DN) new_rdn = old_rdn new_superior = PRODUCTION_DN # # Try to rename without the appropriate ACI => INSUFFICIENT_ACCESS # try: topology_m2.ms["supplier1"].log.info("Try to MODDN %s -> %s,%s" % (old_dn, new_rdn, new_superior)) topology_m2.ms["supplier1"].rename_s(old_dn, new_rdn, newsuperior=new_superior) assert 0 except AssertionError: topology_m2.ms["supplier1"].log.info( "Exception (not really expected exception but that is fine as it fails to rename)") except Exception as e: topology_m2.ms["supplier1"].log.info("Exception (expected): %s" % type(e).__name__) assert isinstance(e, ldap.INSUFFICIENT_ACCESS) # # add the 'add' right to the production DN # Then do a failing moddn # ACI_ALLOW = "(version 3.0; acl \"ADD rights to allow moddn\"; allow (add)" ACI_SUBJECT = " userdn = \"ldap:///%s\";)" % BIND_DN ACI_BODY = ACI_ALLOW + ACI_SUBJECT _bind_manager(topology_m2) prod.add('aci', ACI_BODY) _write_aci_staging(topology_m2, mod_type=ldap.MOD_ADD) _bind_normal(topology_m2) try: topology_m2.ms["supplier1"].log.info("Try to MODDN %s -> %s,%s" % (old_dn, new_rdn, new_superior)) topology_m2.ms["supplier1"].rename_s(old_dn, new_rdn, newsuperior=new_superior) assert 0 except AssertionError: topology_m2.ms["supplier1"].log.info( "Exception (not really expected exception but that is fine as it fails to rename)") except Exception as e: topology_m2.ms["supplier1"].log.info("Exception (expected): %s" % type(e).__name__) assert isinstance(e, ldap.INSUFFICIENT_ACCESS) _bind_manager(topology_m2) prod.remove('aci', ACI_BODY) _write_aci_staging(topology_m2, mod_type=ldap.MOD_DELETE) _bind_normal(topology_m2) # Add the moddn aci that will be evaluated because of the config flag topology_m2.ms["supplier1"].log.info("\n\n######## MOVE to and from equality filter ########\n") _bind_manager(topology_m2) _moddn_aci_staging_to_production(topology_m2, mod_type=ldap.MOD_ADD, target_from=STAGING_DN, target_to=PRODUCTION_DN) _bind_normal(topology_m2) topology_m2.ms["supplier1"].log.info("Try to MODDN %s -> %s,%s" % (old_dn, new_rdn, new_superior)) topology_m2.ms["supplier1"].rename_s(old_dn, new_rdn, newsuperior=new_superior) # remove the moddn aci _bind_manager(topology_m2) _moddn_aci_staging_to_production(topology_m2, mod_type=ldap.MOD_DELETE, target_from=STAGING_DN, target_to=PRODUCTION_DN) _bind_normal(topology_m2) def test_moddn_prod_staging(topology_m2, moddn_setup): """This test checks that we can move ACCOUNT11 from staging to prod but not move back ACCOUNT11 from prod to staging :id: 2b061e92-483f-4399-9f56-8d1c1898b043 :setup: MMR with two suppliers, M1 - staging DIT M2 - production DIT add test accounts in staging DIT :steps: 1. Try to rename without the appropriate ACI 2. Try to MOD with the ACI from stage to production 3. Try to move back the entry to staging from production :expectedresults: 1. It should fail due to INSUFFICIENT_ACCESS 2. It should pass 3. It should fail due to INSUFFICIENT_ACCESS """ topology_m2.ms["supplier1"].log.info("\n\n######## MOVE staging -> Prod (11) ########\n") _bind_normal(topology_m2) old_rdn = "uid=%s11" % NEW_ACCOUNT old_dn = "%s,%s" % (old_rdn, STAGING_DN) new_rdn = old_rdn new_superior = PRODUCTION_DN # # Try to rename without the appropriate ACI => INSUFFICIENT_ACCESS # try: topology_m2.ms["supplier1"].log.info("Try to MODDN %s -> %s,%s" % (old_dn, new_rdn, new_superior)) topology_m2.ms["supplier1"].rename_s(old_dn, new_rdn, newsuperior=new_superior) assert 0 except AssertionError: topology_m2.ms["supplier1"].log.info( "Exception (not really expected exception but that is fine as it fails to rename)") except Exception as e: topology_m2.ms["supplier1"].log.info("Exception (expected): %s" % type(e).__name__) assert isinstance(e, ldap.INSUFFICIENT_ACCESS) # successful MOD with the ACI topology_m2.ms["supplier1"].log.info("\n\n######## MOVE to and from equality filter ########\n") _bind_manager(topology_m2) _moddn_aci_staging_to_production(topology_m2, mod_type=ldap.MOD_ADD, target_from=STAGING_DN, target_to=PRODUCTION_DN) _bind_normal(topology_m2) topology_m2.ms["supplier1"].log.info("Try to MODDN %s -> %s,%s" % (old_dn, new_rdn, new_superior)) topology_m2.ms["supplier1"].rename_s(old_dn, new_rdn, newsuperior=new_superior) # Now check we can not move back the entry to staging old_rdn = "uid=%s11" % NEW_ACCOUNT old_dn = "%s,%s" % (old_rdn, PRODUCTION_DN) new_rdn = old_rdn new_superior = STAGING_DN # add the write right because we want to check the moddn _bind_manager(topology_m2) _write_aci_production(topology_m2, mod_type=ldap.MOD_ADD) _bind_normal(topology_m2) try: topology_m2.ms["supplier1"].log.info("Try to move back MODDN %s -> %s,%s" % (old_dn, new_rdn, new_superior)) topology_m2.ms["supplier1"].rename_s(old_dn, new_rdn, newsuperior=new_superior) assert 0 except AssertionError: topology_m2.ms["supplier1"].log.info( "Exception (not really expected exception but that is fine as it fails to rename)") except Exception as e: topology_m2.ms["supplier1"].log.info("Exception (expected): %s" % type(e).__name__) assert isinstance(e, ldap.INSUFFICIENT_ACCESS) _bind_manager(topology_m2) _write_aci_production(topology_m2, mod_type=ldap.MOD_DELETE) _bind_normal(topology_m2) # successful MOD with the both ACI _bind_manager(topology_m2) _moddn_aci_staging_to_production(topology_m2, mod_type=ldap.MOD_DELETE, target_from=STAGING_DN, target_to=PRODUCTION_DN) _bind_normal(topology_m2) def test_check_repl_M2_to_M1(topology_m2, moddn_setup): """Checks that replication is still working M2->M1, using ACCOUNT12 :id: 08ac131d-34b7-443f-aacd-23025bbd7de1 :setup: MMR with two suppliers, M1 - staging DIT M2 - production DIT add test accounts in staging DIT :steps: 1. Add an entry in M2 2. Search entry on M1 :expectedresults: 1. It should pass 2. It should pass """ topology_m2.ms["supplier1"].log.info("Bind as %s (M2)" % DN_DM) topology_m2.ms["supplier2"].simple_bind_s(DN_DM, PASSWORD) rdn = "uid=%s12" % NEW_ACCOUNT dn = "%s,%s" % (rdn, STAGING_DN) new_account = UserAccount(topology_m2.ms["supplier2"], dn) # First wait for the ACCOUNT19 entry being replicated on M2 loop = 0 while loop <= 10: try: ent = topology_m2.ms["supplier2"].getEntry(dn, ldap.SCOPE_BASE, "(objectclass=*)") break except ldap.NO_SUCH_OBJECT: time.sleep(1) loop += 1 assert loop <= 10 attribute = 'description' tested_value = b'Hello world' topology_m2.ms["supplier1"].log.info("Update (M2) %s (%s)" % (dn, attribute)) new_account.add(attribute, tested_value) loop = 0 while loop <= 10: ent = topology_m2.ms["supplier1"].getEntry(dn, ldap.SCOPE_BASE, "(objectclass=*)") assert ent is not None if ent.hasAttr(attribute) and (ent.getValue(attribute) == tested_value): break time.sleep(1) loop += 1 assert loop < 10 topology_m2.ms["supplier1"].log.info("Update %s (%s) replicated on M1" % (dn, attribute)) def test_moddn_staging_prod_except(topology_m2, moddn_setup): """This test case MOVE entry NEW_ACCOUNT13 from staging to prod but fails to move entry NEW_ACCOUNT14 from staging to prod_except :id: 02d34f4c-8574-428d-b43f-31227426392c :setup: MMR with two suppliers, M1 - staging DIT M2 - production DIT add test accounts in staging DIT :steps: 1. Try to move entry staging -> Prod without the appropriate ACI 2. Do MOD with the appropriate ACI 3. Try to move an entry under Prod/Except from stage 4. Try to do MOD with appropriate ACI :expectedresults: 1. It should fail due to INSUFFICIENT_ACCESS 2. It should pass 3. It should fail due to INSUFFICIENT_ACCESS 4. It should pass """ topology_m2.ms["supplier1"].log.info("\n\n######## MOVE staging -> Prod (13) ########\n") _bind_normal(topology_m2) old_rdn = "uid=%s13" % NEW_ACCOUNT old_dn = "%s,%s" % (old_rdn, STAGING_DN) new_rdn = old_rdn new_superior = PRODUCTION_DN # # Try to rename without the appropriate ACI => INSUFFICIENT_ACCESS # try: topology_m2.ms["supplier1"].log.info("Try to MODDN %s -> %s,%s" % (old_dn, new_rdn, new_superior)) topology_m2.ms["supplier1"].rename_s(old_dn, new_rdn, newsuperior=new_superior) assert 0 except AssertionError: topology_m2.ms["supplier1"].log.info( "Exception (not really expected exception but that is fine as it fails to rename)") except Exception as e: topology_m2.ms["supplier1"].log.info("Exception (expected): %s" % type(e).__name__) assert isinstance(e, ldap.INSUFFICIENT_ACCESS) # successful MOD with the ACI topology_m2.ms["supplier1"].log.info("\n\n######## MOVE to and from equality filter ########\n") _bind_manager(topology_m2) _moddn_aci_staging_to_production(topology_m2, mod_type=ldap.MOD_ADD, target_from=STAGING_DN, target_to=PRODUCTION_DN) _moddn_aci_deny_tree(topology_m2, mod_type=ldap.MOD_ADD) _bind_normal(topology_m2) topology_m2.ms["supplier1"].log.info("Try to MODDN %s -> %s,%s" % (old_dn, new_rdn, new_superior)) topology_m2.ms["supplier1"].rename_s(old_dn, new_rdn, newsuperior=new_superior) # # Now try to move an entry under except # topology_m2.ms["supplier1"].log.info("\n\n######## MOVE staging -> Prod/Except (14) ########\n") old_rdn = "uid=%s14" % NEW_ACCOUNT old_dn = "%s,%s" % (old_rdn, STAGING_DN) new_rdn = old_rdn new_superior = PROD_EXCEPT_DN try: topology_m2.ms["supplier1"].log.info("Try to MODDN %s -> %s,%s" % (old_dn, new_rdn, new_superior)) topology_m2.ms["supplier1"].rename_s(old_dn, new_rdn, newsuperior=new_superior) assert 0 except AssertionError: topology_m2.ms["supplier1"].log.info( "Exception (not really expected exception but that is fine as it fails to rename)") except Exception as e: topology_m2.ms["supplier1"].log.info("Exception (expected): %s" % type(e).__name__) assert isinstance(e, ldap.INSUFFICIENT_ACCESS) # successful MOD with the both ACI _bind_manager(topology_m2) _moddn_aci_staging_to_production(topology_m2, mod_type=ldap.MOD_DELETE, target_from=STAGING_DN, target_to=PRODUCTION_DN) _moddn_aci_deny_tree(topology_m2, mod_type=ldap.MOD_DELETE) _bind_normal(topology_m2) def test_mode_default_ger_no_moddn(topology_m2, moddn_setup): """mode moddn_aci : Check Get Effective Rights Controls for entries :id: f4785d73-3b14-49c0-b981-d6ff96fa3496 :setup: MMR with two suppliers, M1 - staging DIT M2 - production DIT add test accounts in staging DIT :steps: 1. Search for GER controls on M1 2. Check 'n' is not in the entryLevelRights :expectedresults: 1. It should pass 2. It should pass """ topology_m2.ms["supplier1"].log.info("\n\n######## mode moddn_aci : GER no moddn ########\n") request_ctrl = GetEffectiveRightsControl(criticality=True, authzId=ensure_bytes("dn: " + BIND_DN)) msg_id = topology_m2.ms["supplier1"].search_ext(PRODUCTION_DN, ldap.SCOPE_SUBTREE, "objectclass=*", serverctrls=[request_ctrl]) rtype, rdata, rmsgid, response_ctrl = topology_m2.ms["supplier1"].result3(msg_id) # ger={} value = '' for dn, attrs in rdata: topology_m2.ms["supplier1"].log.info("dn: %s" % dn) value = attrs['entryLevelRights'][0] topology_m2.ms["supplier1"].log.info("######## entryLevelRights: %r" % value) assert b'n' not in value def test_mode_default_ger_with_moddn(topology_m2, moddn_setup): """This test case adds the moddn aci and check ger contains 'n' :id: a752a461-432d-483a-89c0-dfb34045a969 :setup: MMR with two suppliers, M1 - staging DIT M2 - production DIT add test accounts in staging DIT :steps: 1. Add moddn ACI on M2 2. Search for GER controls on M1 3. Check entryLevelRights value for entries 4. Check 'n' is in the entryLevelRights :expectedresults: 1. It should pass 2. It should pass 3. It should pass 4. It should pass """ topology_m2.ms["supplier1"].log.info("\n\n######## mode moddn_aci: GER with moddn ########\n") # successful MOD with the ACI _bind_manager(topology_m2) _moddn_aci_staging_to_production(topology_m2, mod_type=ldap.MOD_ADD, target_from=STAGING_DN, target_to=PRODUCTION_DN) _bind_normal(topology_m2) request_ctrl = GetEffectiveRightsControl(criticality=True, authzId=ensure_bytes("dn: " + BIND_DN)) msg_id = topology_m2.ms["supplier1"].search_ext(PRODUCTION_DN, ldap.SCOPE_SUBTREE, "objectclass=*", serverctrls=[request_ctrl]) rtype, rdata, rmsgid, response_ctrl = topology_m2.ms["supplier1"].result3(msg_id) # ger={} value = '' for dn, attrs in rdata: topology_m2.ms["supplier1"].log.info("dn: %s" % dn) value = attrs['entryLevelRights'][0] topology_m2.ms["supplier1"].log.info("######## entryLevelRights: %r" % value) assert b'n' in value # successful MOD with the both ACI _bind_manager(topology_m2) _moddn_aci_staging_to_production(topology_m2, mod_type=ldap.MOD_DELETE, target_from=STAGING_DN, target_to=PRODUCTION_DN) _bind_normal(topology_m2) def test_mode_legacy_ger_no_moddn1(topology_m2, moddn_setup): """This test checks mode legacy : GER no moddn :id: e783e05b-d0d0-4fd4-9572-258a81b7bd24 :setup: MMR with two suppliers, M1 - staging DIT M2 - production DIT add test accounts in staging DIT :steps: 1. Disable ACI checks - set nsslapd-moddn-aci: off 2. Search for GER controls on M1 3. Check entryLevelRights value for entries 4. Check 'n' is not in the entryLevelRights :expectedresults: 1. It should pass 2. It should pass 3. It should pass 4. It should pass """ topology_m2.ms["supplier1"].log.info("\n\n######## Disable the moddn aci mod ########\n") _bind_manager(topology_m2) topology_m2.ms["supplier1"].config.set(CONFIG_MODDN_ACI_ATTR, 'off') topology_m2.ms["supplier1"].log.info("\n\n######## mode legacy 1: GER no moddn ########\n") request_ctrl = GetEffectiveRightsControl(criticality=True, authzId=ensure_bytes("dn: " + BIND_DN)) msg_id = topology_m2.ms["supplier1"].search_ext(PRODUCTION_DN, ldap.SCOPE_SUBTREE, "objectclass=*", serverctrls=[request_ctrl]) rtype, rdata, rmsgid, response_ctrl = topology_m2.ms["supplier1"].result3(msg_id) # ger={} value = '' for dn, attrs in rdata: topology_m2.ms["supplier1"].log.info("dn: %s" % dn) value = attrs['entryLevelRights'][0] topology_m2.ms["supplier1"].log.info("######## entryLevelRights: %r" % value) assert b'n' not in value def test_mode_legacy_ger_no_moddn2(topology_m2, moddn_setup): """This test checks mode legacy : GER no moddn :id: af87e024-1744-4f1d-a2d3-ea2687e2351d :setup: MMR with two suppliers, M1 - staging DIT M2 - production DIT add test accounts in staging DIT :steps: 1. Disable ACI checks - set nsslapd-moddn-aci: off 2. Add moddn ACI on M1 3. Search for GER controls on M1 4. Check entryLevelRights value for entries 5. Check 'n' is not in the entryLevelRights :expectedresults: 1. It should pass 2. It should pass 3. It should pass 4. It should be pass 5. It should pass """ topology_m2.ms["supplier1"].log.info("\n\n######## Disable the moddn aci mod ########\n") _bind_manager(topology_m2) topology_m2.ms["supplier1"].config.set(CONFIG_MODDN_ACI_ATTR, 'off') topology_m2.ms["supplier1"].log.info("\n\n######## mode legacy 2: GER no moddn ########\n") # successful MOD with the ACI _bind_manager(topology_m2) _moddn_aci_staging_to_production(topology_m2, mod_type=ldap.MOD_ADD, target_from=STAGING_DN, target_to=PRODUCTION_DN) _bind_normal(topology_m2) request_ctrl = GetEffectiveRightsControl(criticality=True, authzId=ensure_bytes("dn: " + BIND_DN)) msg_id = topology_m2.ms["supplier1"].search_ext(PRODUCTION_DN, ldap.SCOPE_SUBTREE, "objectclass=*", serverctrls=[request_ctrl]) rtype, rdata, rmsgid, response_ctrl = topology_m2.ms["supplier1"].result3(msg_id) # ger={} value = '' for dn, attrs in rdata: topology_m2.ms["supplier1"].log.info("dn: %s" % dn) value = attrs['entryLevelRights'][0] topology_m2.ms["supplier1"].log.info("######## entryLevelRights: %r" % value) assert b'n' not in value # successful MOD with the both ACI _bind_manager(topology_m2) _moddn_aci_staging_to_production(topology_m2, mod_type=ldap.MOD_DELETE, target_from=STAGING_DN, target_to=PRODUCTION_DN) _bind_normal(topology_m2) def test_mode_legacy_ger_with_moddn(topology_m2, moddn_setup): """This test checks mode legacy : GER with moddn :id: 37c1e537-1b5d-4fab-b62a-50cd8c5b3493 :setup: MMR with two suppliers, M1 - staging DIT M2 - production DIT add test accounts in staging DIT :steps: 1. Disable ACI checks - set nsslapd-moddn-aci: off 2. Add moddn ACI on M1 3. Search for GER controls on M1 4. Check entryLevelRights value for entries 5. Check 'n' is in the entryLevelRights 6. Try MOD with the both ACI :expectedresults: 1. It should pass 2. It should pass 3. It should pass 4. It should pass 5. It should pass 6. It should pass """ suffix = Domain(topology_m2.ms["supplier1"], SUFFIX) topology_m2.ms["supplier1"].log.info("\n\n######## Disable the moddn aci mod ########\n") _bind_manager(topology_m2) topology_m2.ms["supplier1"].config.set(CONFIG_MODDN_ACI_ATTR, 'off') topology_m2.ms["supplier1"].log.info("\n\n######## mode legacy : GER with moddn ########\n") # being allowed to read/write the RDN attribute use to allow the RDN ACI_TARGET = "(target = \"ldap:///%s\")(targetattr=\"uid\")" % (PRODUCTION_DN) ACI_ALLOW = "(version 3.0; acl \"MODDN production changing the RDN attribute\"; allow (read,search,write)" ACI_SUBJECT = " userdn = \"ldap:///%s\";)" % BIND_DN ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT # successful MOD with the ACI _bind_manager(topology_m2) suffix.add('aci', ACI_BODY) _bind_normal(topology_m2) request_ctrl = GetEffectiveRightsControl(criticality=True, authzId=ensure_bytes("dn: " + BIND_DN)) msg_id = topology_m2.ms["supplier1"].search_ext(PRODUCTION_DN, ldap.SCOPE_SUBTREE, "objectclass=*", serverctrls=[request_ctrl]) rtype, rdata, rmsgid, response_ctrl = topology_m2.ms["supplier1"].result3(msg_id) # ger={} value = '' for dn, attrs in rdata: topology_m2.ms["supplier1"].log.info("dn: %s" % dn) value = attrs['entryLevelRights'][0] topology_m2.ms["supplier1"].log.info("######## entryLevelRights: %r" % value) assert b'n' in value # successful MOD with the both ACI _bind_manager(topology_m2) suffix.remove('aci', ACI_BODY) # _bind_normal(topology_m2) @pytest.fixture(scope="module") def rdn_write_setup(topology_m2): topology_m2.ms["supplier1"].log.info("\n\n######## Add entry tuser ########\n") user = UserAccount(topology_m2.ms["supplier1"], SRC_ENTRY_DN) user_props = TEST_USER_PROPERTIES.copy() user_props.update({'sn': SRC_ENTRY_CN, 'cn': SRC_ENTRY_CN, 'userpassword': BIND_PW}) user.create(properties=user_props, basedn=SUFFIX) def test_rdn_write_get_ger(topology_m2, rdn_write_setup): """This test checks GER rights for anonymous :id: d5d85f87-b53d-4f50-8fa6-a9e55c75419b :setup: MMR with two suppliers, Add entry tuser :steps: 1. Search for GER controls on M1 2. Check entryLevelRights value for entries 3. Check 'n' is not in the entryLevelRights :expectedresults: 1. It should pass 2. It should be pass 3. It should pass """ ANONYMOUS_DN = "" topology_m2.ms["supplier1"].log.info("\n\n######## GER rights for anonymous ########\n") request_ctrl = GetEffectiveRightsControl(criticality=True, authzId=ensure_bytes("dn:" + ANONYMOUS_DN)) msg_id = topology_m2.ms["supplier1"].search_ext(SUFFIX, ldap.SCOPE_SUBTREE, "objectclass=*", serverctrls=[request_ctrl]) rtype, rdata, rmsgid, response_ctrl = topology_m2.ms["supplier1"].result3(msg_id) value = '' for dn, attrs in rdata: topology_m2.ms["supplier1"].log.info("dn: %s" % dn) for value in attrs['entryLevelRights']: topology_m2.ms["supplier1"].log.info("######## entryLevelRights: %r" % value) assert b'n' not in value def test_rdn_write_modrdn_anonymous(topology_m2, rdn_write_setup): """Tests anonymous user for modrdn :id: fc07be23-3341-44ab-a53c-c68c5f9569c7 :setup: MMR with two suppliers, Add entry tuser :steps: 1. Bind as anonymous user 2. Try to perform MODRDN operation (SRC_ENTRY_DN -> DST_ENTRY_CN) 3. Try to search DST_ENTRY_CN :expectedresults: 1. It should pass 2. It should fails with INSUFFICIENT_ACCESS 3. It should fails with NO_SUCH_OBJECT """ ANONYMOUS_DN = "" topology_m2.ms["supplier1"].close() topology_m2.ms["supplier1"].binddn = ANONYMOUS_DN topology_m2.ms["supplier1"].open() msg_id = topology_m2.ms["supplier1"].search_ext("", ldap.SCOPE_BASE, "objectclass=*") rtype, rdata, rmsgid, response_ctrl = topology_m2.ms["supplier1"].result3(msg_id) for dn, attrs in rdata: topology_m2.ms["supplier1"].log.info("dn: %s" % dn) for attr in attrs: topology_m2.ms["supplier1"].log.info("######## %r: %r" % (attr, attrs[attr])) try: topology_m2.ms["supplier1"].rename_s(SRC_ENTRY_DN, "cn=%s" % DST_ENTRY_CN, delold=True) except Exception as e: topology_m2.ms["supplier1"].log.info("Exception (expected): %s" % type(e).__name__) isinstance(e, ldap.INSUFFICIENT_ACCESS) try: topology_m2.ms["supplier1"].getEntry(DST_ENTRY_DN, ldap.SCOPE_BASE, "objectclass=*") assert False except Exception as e: topology_m2.ms["supplier1"].log.info("The entry was not renamed (expected)") isinstance(e, ldap.NO_SUCH_OBJECT) _bind_manager(topology_m2) if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/acl/conftest.py000066400000000000000000000101111421664411400253000ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2019 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK ---- """ This is the config file for keywords test scripts. """ import pytest from lib389._constants import DEFAULT_SUFFIX, PW_DM from lib389.idm.user import UserAccounts from lib389.idm.organizationalunit import OrganizationalUnit, OrganizationalUnits from lib389.topologies import topology_st as topo from lib389.idm.domain import Domain @pytest.fixture(scope="function") def aci_of_user(request, topo): """ Removes and Restores ACIs after the test. """ aci_list = Domain(topo.standalone, DEFAULT_SUFFIX).get_attr_vals_utf8('aci') def finofaci(): """ Removes and Restores ACIs after the test. """ domain = Domain(topo.standalone, DEFAULT_SUFFIX) domain.remove_all('aci') for aci in aci_list: domain.add("aci", aci) request.addfinalizer(finofaci) @pytest.fixture(scope="module") def add_user(request, topo): """ This function will create user for the test and in the end entries will be deleted . """ ous_origin = OrganizationalUnits(topo.standalone, DEFAULT_SUFFIX) ou_origin = ous_origin.create(properties={'ou': 'Keywords'}) ous_next = OrganizationalUnits(topo.standalone, ou_origin.dn) for ou in ['Authmethod', 'Dayofweek', 'DNS', 'IP', 'Timeofday']: ous_next.create(properties={'ou': ou}) users_day_of_week = UserAccounts(topo.standalone, f"ou=Dayofweek,ou=Keywords,{DEFAULT_SUFFIX}", rdn=None) for user in ['EVERYDAY_KEY', 'TODAY_KEY', 'NODAY_KEY']: users_day_of_week.create(properties={ 'uid': user, 'cn': user, 'sn': 'user', 'uidNumber': '1000', 'gidNumber': '2000', 'homeDirectory': '/home/' + user, 'userPassword': PW_DM }) users_ip = UserAccounts(topo.standalone, f"ou=IP,ou=Keywords,{DEFAULT_SUFFIX}", rdn=None) for user in ['FULLIP_KEY', 'NETSCAPEIP_KEY', 'NOIP_KEY']: users_ip.create(properties={ 'uid': user, 'cn': user, 'sn': 'user', 'uidNumber': '1000', 'gidNumber': '2000', 'homeDirectory': '/home/' + user, 'userPassword': PW_DM }) users_timeof_day = UserAccounts(topo.standalone, f"ou=Timeofday,ou=Keywords,{DEFAULT_SUFFIX}", rdn=None) for user in ['FULLWORKER_KEY', 'DAYWORKER_KEY', 'NOWORKER_KEY', 'NIGHTWORKER_KEY']: users_timeof_day.create(properties={ 'uid': user, 'cn': user, 'sn': 'user', 'uidNumber': '1000', 'gidNumber': '2000', 'homeDirectory': '/home/' + user, 'userPassword': PW_DM }) users_authmethod = UserAccounts(topo.standalone, f"ou=Authmethod,ou=Keywords,{DEFAULT_SUFFIX}", rdn=None) for user in ['NONE_1_KEY', 'NONE_2_KEY', 'SIMPLE_1_KEY']: users_authmethod.create(properties={ 'uid': user, 'cn': user, 'sn': 'user', 'uidNumber': '1000', 'gidNumber': '2000', 'homeDirectory': '/home/' + user, 'userPassword': PW_DM }) users_dns = UserAccounts(topo.standalone, f"ou=DNS,ou=Keywords,{DEFAULT_SUFFIX}", rdn=None) for user in ['FULLDNS_KEY', 'SUNDNS_KEY', 'NODNS_KEY', 'NETSCAPEDNS_KEY']: users_dns.create(properties={ 'uid': user, 'cn': user, 'sn': 'user', 'uidNumber': '1000', 'gidNumber': '2000', 'homeDirectory': '/home/' + user, 'userPassword': PW_DM }) def fin(): """ Deletes entries after the test. """ for user in users_day_of_week.list() + users_ip.list() + users_timeof_day.list() + \ users_authmethod.list() + users_dns.list(): user.delete() for ou in sorted(ous_next.list(), key=lambda x: len(x.dn), reverse=True): ou.delete() request.addfinalizer(fin) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/acl/default_aci_allows_self_write_test.py000066400000000000000000000075041421664411400325720ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2019 William Brown # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- import pytest from lib389.idm.user import nsUserAccounts, UserAccounts from lib389.topologies import topology_st as topology from lib389.paths import Paths from lib389.utils import ds_is_older from lib389._constants import * default_paths = Paths() pytestmark = pytest.mark.tier1 USER_PASSWORD = "some test password" NEW_USER_PASSWORD = "some new password" @pytest.mark.skipif(ds_is_older('1.4.2.0'), reason="Default aci's in older versions do not support this functionality") def test_acl_default_allow_self_write_nsuser(topology): """ Testing nsusers can self write and self read. This it a sanity test so that our default entries have their aci's checked. :id: 4f0fb01a-36a6-430c-a2ee-ebeb036bd951 :setup: Standalone instance :steps: 1. Testing comparison of two different users. :expectedresults: 1. Should fail to compare """ topology.standalone.enable_tls() nsusers = nsUserAccounts(topology.standalone, DEFAULT_SUFFIX) # Create a user as dm. user = nsusers.create(properties={ 'uid': 'test_nsuser', 'cn': 'test_nsuser', 'displayName': 'testNsuser', 'legalName': 'testNsuser', 'uidNumber': '1001', 'gidNumber': '1001', 'homeDirectory': '/home/testnsuser', 'userPassword': USER_PASSWORD, }) # Create a new con and bind as the user. user_conn = user.bind(USER_PASSWORD) user_nsusers = nsUserAccounts(user_conn, DEFAULT_SUFFIX) self_ent = user_nsusers.get(dn=user.dn) # Can we self read x,y,z check = self_ent.get_attrs_vals_utf8([ 'uid', 'cn', 'displayName', 'legalName', 'uidNumber', 'gidNumber', 'homeDirectory', ]) for k in check.values(): # Could we read the values? assert(isinstance(k, list)) assert(len(k) > 0) # Can we self change a,b,c self_ent.ensure_attr_state({ 'legalName': ['testNsuser_update'], 'displayName': ['testNsuser_update'], 'nsSshPublicKey': ['testkey'], }) # self change pw self_ent.change_password(USER_PASSWORD, NEW_USER_PASSWORD) @pytest.mark.skipif(ds_is_older('1.4.2.0'), reason="Default aci's in older versions do not support this functionality") def test_acl_default_allow_self_write_user(topology): """ Testing users can self write and self read. This it a sanity test so that our default entries have their aci's checked. :id: 4c52321b-f473-4c32-a1d5-489b138cd199 :setup: Standalone instance :steps: 1. Testing comparison of two different users. :expectedresults: 1. Should fail to compare """ topology.standalone.enable_tls() users = UserAccounts(topology.standalone, DEFAULT_SUFFIX) # Create a user as dm. user = users.create(properties={ 'uid': 'test_user', 'cn': 'test_user', 'sn': 'User', 'uidNumber': '1002', 'gidNumber': '1002', 'homeDirectory': '/home/testuser', 'userPassword': USER_PASSWORD, }) # Create a new con and bind as the user. user_conn = user.bind(USER_PASSWORD) user_users = UserAccounts(user_conn, DEFAULT_SUFFIX) self_ent = user_users.get(dn=user.dn) # Can we self read x,y,z check = self_ent.get_attrs_vals_utf8([ 'uid', 'cn', 'sn', 'uidNumber', 'gidNumber', 'homeDirectory', ]) for (a, k) in check.items(): print(a) # Could we read the values? assert(isinstance(k, list)) assert(len(k) > 0) # Self change pw self_ent.change_password(USER_PASSWORD, NEW_USER_PASSWORD) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/acl/deladd_test.py000066400000000000000000000357461421664411400257540ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2019 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- """ Importing necessary Modules. """ import os import pytest from lib389._constants import DEFAULT_SUFFIX, PW_DM from lib389.idm.user import UserAccount, UserAccounts from lib389.idm.group import Groups from lib389.idm.organizationalunit import OrganizationalUnit, OrganizationalUnits from lib389.topologies import topology_st as topo from lib389.idm.domain import Domain import ldap pytestmark = pytest.mark.tier1 USER_WITH_ACI_DELADD = 'uid=test_user_1000,ou=People,dc=example,dc=com' USER_DELADD = 'uid=test_user_1,ou=Accounting,dc=example,dc=com' @pytest.fixture(scope="function") def _aci_of_user(request, topo): """ Removes and Restores ACIs after the test. """ aci_list = Domain(topo.standalone, DEFAULT_SUFFIX).get_attr_vals('aci') def finofaci(): """ Removes and Restores ACIs after the test. """ domain = Domain(topo.standalone, DEFAULT_SUFFIX) domain.remove_all('aci') for i in aci_list: domain.add("aci", i) request.addfinalizer(finofaci) @pytest.fixture(scope="function") def _add_user(request, topo): """ This function will create user for the test and in the end entries will be deleted . """ users = UserAccounts(topo.standalone, DEFAULT_SUFFIX) user = users.create_test_user() user.set("userPassword", PW_DM) ous = OrganizationalUnits(topo.standalone, DEFAULT_SUFFIX) ous.create(properties={'ou':'Accounting'}) users = UserAccounts(topo.standalone, DEFAULT_SUFFIX, rdn='ou=Accounting') for i in range(1, 3): user = users.create_test_user(uid=i, gid=i) user.set("userPassword", PW_DM) def fin(): """ Deletes entries after the test. """ users1 = UserAccounts(topo.standalone, DEFAULT_SUFFIX, rdn=None) for dn_dn in users1.list(): dn_dn.delete() groups = Groups(topo.standalone, DEFAULT_SUFFIX) for dn_dn in groups.list(): dn_dn.delete() ou_ou = OrganizationalUnit(topo.standalone, f'ou=Accounting,{DEFAULT_SUFFIX}') ou_ou.delete() request.addfinalizer(fin) def test_allow_delete_access_to_groupdn(topo, _add_user, _aci_of_user): """Test allow delete access to groupdn :id: 7cf15992-68ad-11e8-85af-54e1ad30572c :setup: topo.standalone :steps: 1. Add test entry 2. Add ACI that allows groupdn to delete 3. Delete something using test USER_DELADD 4. Remove ACI :expectedresults: 1. Entry should be added 2. ACI should be added 3. Delete operation should succeed 4. Delete operation for ACI should succeed """ # Create Group and add member groups = Groups(topo.standalone, DEFAULT_SUFFIX) group = groups.create(properties={"cn": "group1", "description": "testgroup"}) group.add_member(USER_WITH_ACI_DELADD) # set aci aci_target = f'(targetattr="*")' aci_allow = f'(version 3.0; acl "All rights for {group.dn}"; allow (delete) ' aci_subject = f'groupdn="ldap:///{group.dn}";)' Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", (aci_target + aci_allow + aci_subject)) # create connection with USER_WITH_ACI_DELADD conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) # Perform delete operation for i in [USER_DELADD, USER_WITH_ACI_DELADD]: UserAccount(conn, i).delete() def test_allow_add_access_to_anyone(topo, _add_user, _aci_of_user): """Test to allow add access to anyone :id: 5ca31cc4-68e0-11e8-8666-8c16451d917b :setup: topo.standalone :steps: 1. Add test entry 2. Add ACI that allows groupdn to add 3. Add something using test USER_DELADD 4. Remove ACI :expectedresults: 1. Entry should be added 2. ACI should be added 3. Add operation should succeed 4. Delete operation for ACI should succeed """ # set aci aci_target = f'(targetattr="*")' aci_allow = f'(version 3.0; acl "All rights for anyone"; allow (add) ' aci_subject = f'userdn="ldap:///anyone";)' Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", (aci_target + aci_allow + aci_subject)) # create connection with USER_WITH_ACI_DELADD conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) # Perform add operation users = UserAccounts(conn, DEFAULT_SUFFIX, rdn='ou=Accounting') user = users.create_test_user(gid=3, uid=3) assert user.exists() users = UserAccounts(conn, DEFAULT_SUFFIX) user = users.create_test_user(gid=3, uid=3) assert user.exists() def test_allow_delete_access_to_anyone(topo, _add_user, _aci_of_user): """Test to allow delete access to anyone :id: f5447c7e-68e1-11e8-84c4-8c16451d917b :setup: server :steps: 1. Add test entry 2. Add ACI that allows groupdn to delete some userdn 3. Delete something using test USER_DELADD 4. Remove ACI :expectedresults: 1. Entry should be added 2. ACI should be added 3. Operation should succeed 4. Delete operation for ACI should succeed """ # set aci aci_target = f'(targetattr="*")' aci_allow = f'(version 3.0; acl "All rights for anyone"; allow (delete) ' aci_subject = f'userdn="ldap:///anyone";)' Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", (aci_target + aci_allow + aci_subject)) # create connection with USER_WITH_ACI_DELADD conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) # Perform delete operation UserAccount(conn, USER_DELADD).delete() def test_allow_delete_access_not_to_userdn(topo, _add_user, _aci_of_user): """Test to Allow delete access to != userdn :id: 00637f6e-68e3-11e8-92a3-8c16451d917b :setup: server :steps: 1. Add test entry 2. Add ACI that allows userdn not to delete some userdn 3. Delete something using test USER_DELADD 4. Remove ACI :expectedresults: 1. Entry should be added 2. ACI should be added 3. Operation should not succeed 4. Delete operation for ACI should succeed """ # set aci aci_target = f'(targetattr="*")' aci_allow = f'(version 3.0; acl "All rights for %s"; allow (delete) ' % USER_DELADD aci_subject = f'userdn!="ldap:///{USER_WITH_ACI_DELADD}";)' Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", (aci_target + aci_allow + aci_subject)) # create connection with USER_WITH_ACI_DELADD conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) # Perform delete operation user = UserAccount(conn, USER_DELADD) with pytest.raises(ldap.INSUFFICIENT_ACCESS): user.delete() def test_allow_delete_access_not_to_group(topo, _add_user, _aci_of_user): """Test to Allow delete access to != groupdn :id: f58fc8b0-68e5-11e8-9313-8c16451d917b :setup: server :steps: 1. Add test entry 2. Add ACI that allows groupdn not to delete some userdn 3. Delete something using test USER_DELADD belong to test group 4. Remove ACI :expectedresults: 1. Entry should be added 2. ACI should be added 3. Operation should not succeed 4. Delete operation for ACI should succeed """ # Create group groups = Groups(topo.standalone, DEFAULT_SUFFIX) group = groups.create(properties={"cn": "group1", "description": "testgroup"}) group.add_member(USER_WITH_ACI_DELADD) # set aci aci_target = f'(targetattr="*")' aci_allow = f'(version 3.0; acl "All rights for {group.dn}"; allow (delete)' aci_subject = f'groupdn!="ldap:///{group.dn}";)' Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", (aci_target + aci_allow + aci_subject)) # create connection with USER_WITH_ACI_DELADD conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) user = UserAccount(conn, USER_DELADD) # Perform delete operation with pytest.raises(ldap.INSUFFICIENT_ACCESS): user.delete() def test_allow_add_access_to_parent(topo, _add_user, _aci_of_user): """Test to Allow add privilege to parent :id: 9f099845-9dbc-412f-bdb9-19a5ea729694 :setup: server :steps: 1. Add test entry 2. Add ACI that Allow add privilege to parent 3. Add something using test USER_DELADD 4. Remove ACI :expectedresults: 1. Entry should be added 2. ACI should be added 3. Operation should succeed 4. Delete operation for ACI should succeed """ # set aci aci_target = f'(targetattr="*")' aci_allow = f'(version 3.0; acl "All rights for parent"; allow (add) ' aci_subject = f'userdn="ldap:///parent";)' Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", (aci_target + aci_allow + aci_subject)) # create connection with USER_WITH_ACI_DELADD conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) # Perform Allow add privilege to parent users = UserAccounts(conn, DEFAULT_SUFFIX, rdn='uid=test_user_1000, ou=people') user = users.create_test_user(gid=1, uid=1) assert user.exists() # Delete created user UserAccounts(topo.standalone, DEFAULT_SUFFIX).get('test_user_1').delete() def test_allow_delete_access_to_parent(topo, _add_user, _aci_of_user): """Test to Allow delete access to parent :id: 2dd7f624-68e7-11e8-8591-8c16451d917b :setup: server :steps: 1. Add test entry 2. Add ACI that Allow delete privilege to parent 3. Delete something using test USER_DELADD 4. Remove ACI :expectedresults: 1. Entry should be added 2. ACI should be added 3. Operation should succeed 4. Delete operation for ACI should succeed """ # set aci aci_target = f'(targetattr="*")' aci_allow = f'(version 3.0; acl "All rights for parent"; allow (add,delete) ' aci_subject = f'userdn="ldap:///parent";)' Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", (aci_target + aci_allow + aci_subject)) # create connection with USER_WITH_ACI_DELADD conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) # Create a user with parent 'uid=test_user_1000, ou=people, {}'.format(DEFAULT_SUFFIX) users = UserAccounts(conn, DEFAULT_SUFFIX, rdn='uid=test_user_1000, ou=people') new_user = users.create_test_user(gid=1, uid=1) assert new_user.exists() # Perform Allow delete access to parent new_user.delete() def test_allow_delete_access_to_dynamic_group(topo, _add_user, _aci_of_user, request): """Test to Allow delete access to dynamic group :id: 14ffa452-68ed-11e8-a60d-8c16451d917b :setup: server :steps: 1. Add test entry 2. Add ACI that Allow delete privilege to dynamic group 3. Delete something using test USER_DELADD 4. Remove ACI :expectedresults: 1. Entry should be added 2. ACI should be added 3. Operation should succeed 4. Delete operation for ACI should succeed """ # Create dynamic group groups = Groups(topo.standalone, DEFAULT_SUFFIX) group = groups.create(properties={"cn": "group1", "description": "testgroup"}) group.add("objectclass", "groupOfURLs") group.add("memberURL", f"ldap:///dc=example,dc=com??sub?(&(objectclass=person)(uid=test_user_1000))") # Set ACI Domain(topo.standalone, DEFAULT_SUFFIX).\ add("aci", f'(target = ldap:///{DEFAULT_SUFFIX})(targetattr="*")' f'(version 3.0; acl "{request.node.name}"; ' f'allow (delete) (groupdn = "ldap:///{group.dn}"); )') # create connection with USER_WITH_ACI_DELADD conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) # Perform Allow delete access to dynamic group UserAccount(conn, USER_DELADD).delete() def test_allow_delete_access_to_dynamic_group_uid(topo, _add_user, _aci_of_user, request): """Test to Allow delete access to dynamic group :id: 010a4f20-752a-4173-b763-f520c7a85b82 :setup: server :steps: 1. Add test entry 2. Add ACI that Allow delete privilege to dynamic group 3. Delete something using test USER_DELADD 4. Remove ACI :expectedresults: 1. Entry should be added 2. ACI should be added 3. Operation should succeed 4. Delete operation for ACI should succeed """ # Create dynamic group groups = Groups(topo.standalone, DEFAULT_SUFFIX) group = groups.create(properties={"cn": "group1", "description": "testgroup"}) group.add("objectclass", "groupOfURLs") group.add("memberURL", f'ldap:///{DEFAULT_SUFFIX}??sub?(&(objectclass=person)(cn=test_user_1000))') # Set ACI Domain(topo.standalone, DEFAULT_SUFFIX).\ add("aci", f'(target = ldap:///{DEFAULT_SUFFIX})' f'(targetattr="uid")(version 3.0; acl "{request.node.name}"; ' f'allow (delete) (groupdn = "ldap:///{group.dn}"); )') # create connection with USER_WITH_ACI_DELADD conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) # Perform Allow delete access to dynamic group UserAccount(conn, USER_DELADD).delete() def test_allow_delete_access_not_to_dynamic_group(topo, _add_user, _aci_of_user, request): """Test to Allow delete access to != dynamic group :id: 9ecb139d-bca8-428e-9044-fd89db5a3d14 :setup: server :steps: 1. Add test entry 2. Add ACI that delete access to != dynamic group 3. Delete something using test USER_DELADD 4. Remove ACI :expectedresults: 1. Entry should be added 2. ACI should be added 3. Operation should not succeed 4. Delete operation for ACI should succeed """ # Create dynamic group groups = Groups(topo.standalone, DEFAULT_SUFFIX) group = groups.create(properties={"cn": "group1", "description": "testgroup"}) group.add("objectclass", "groupOfURLs") group.add("memberURL", f'ldap:///{DEFAULT_SUFFIX}??sub?(&(objectclass=person)(cn=test_user_1000))') # Set ACI Domain(topo.standalone, DEFAULT_SUFFIX).\ add("aci", f'(target = ldap:///{DEFAULT_SUFFIX})' f'(targetattr="*")(version 3.0; acl "{request.node.name}"; ' f'allow (delete) (groupdn != "ldap:///{group.dn}"); )') # create connection with USER_WITH_ACI_DELADD conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) user = UserAccount(conn, USER_DELADD) # Perform Allow delete access to != dynamic group with pytest.raises(ldap.INSUFFICIENT_ACCESS): user.delete() if __name__ == "__main__": CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s -v %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/acl/enhanced_aci_modrnd_test.py000066400000000000000000000111201421664411400304370ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2020 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import pytest from lib389.tasks import * from lib389.utils import * from lib389.topologies import topology_st pytestmark = pytest.mark.tier1 logging.getLogger(__name__).setLevel(logging.DEBUG) log = logging.getLogger(__name__) CONTAINER_1_OU = 'test_ou_1' CONTAINER_2_OU = 'test_ou_2' CONTAINER_1 = f'ou={CONTAINER_1_OU},dc=example,dc=com' CONTAINER_2 = f'ou={CONTAINER_2_OU},dc=example,dc=com' USER_CN = 'test_user' USER_PWD = 'Secret123' USER = f'cn={USER_CN},{CONTAINER_1}' @pytest.fixture(scope="module") def env_setup(topology_st): """Adds two containers, one user and two ACI rules""" log.info("Add a container: %s" % CONTAINER_1) topology_st.standalone.add_s(Entry((CONTAINER_1, {'objectclass': ['top','organizationalunit'], 'ou': CONTAINER_1_OU, }))) log.info("Add a container: %s" % CONTAINER_2) topology_st.standalone.add_s(Entry((CONTAINER_2, {'objectclass': ['top', 'organizationalunit'], 'ou': CONTAINER_2_OU, }))) log.info("Add a user: %s" % USER) topology_st.standalone.add_s(Entry((USER, {'objectclass': 'top person'.split(), 'cn': USER_CN, 'sn': USER_CN, 'userpassword': USER_PWD }))) ACI_TARGET = '(targetattr="*")' ACI_ALLOW = '(version 3.0; acl "All rights for %s"; allow (all) ' % USER ACI_SUBJECT = 'userdn="ldap:///%s";)' % USER ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT mod = [(ldap.MOD_ADD, 'aci', ensure_bytes(ACI_BODY))] log.info("Add an ACI 'allow (all)' by %s to the %s" % (USER, CONTAINER_1)) topology_st.standalone.modify_s(CONTAINER_1, mod) log.info("Add an ACI 'allow (all)' by %s to the %s" % (USER, CONTAINER_2)) topology_st.standalone.modify_s(CONTAINER_2, mod) @pytest.mark.ds47553 def test_enhanced_aci_modrnd(topology_st, env_setup): """Tests, that MODRDN operation is allowed, if user has ACI right '(all)' under superior entries, but doesn't have '(modrdn)' :id: 492cf2a9-2efe-4e3b-955e-85eca61d66b9 :setup: Standalone instance :steps: 1. Create two containers 2. Create a user within "ou=test_ou_1,dc=example,dc=com" 3. Add an aci with a rule "cn=test_user is allowed all" within these containers 4. Run MODRDN operation on the "cn=test_user" and set "newsuperior" to the "ou=test_ou_2,dc=example,dc=com" 5. Check there is no user under container one (ou=test_ou_1,dc=example,dc=com) 6. Check there is a user under container two (ou=test_ou_2,dc=example,dc=com) :expectedresults: 1. Two containers should be created 2. User should be added successfully 3. This should pass 4. This should pass 5. User should not be found under container ou=test_ou_1,dc=example,dc=com 6. User should be found under container ou=test_ou_2,dc=example,dc=com """ log.info("Bind as %s" % USER) topology_st.standalone.simple_bind_s(USER, USER_PWD) log.info("User MODRDN operation from %s to %s" % (CONTAINER_1, CONTAINER_2)) topology_st.standalone.rename_s(USER, "cn=%s" % USER_CN, newsuperior=CONTAINER_2, delold=1) log.info("Check there is no user in %s" % CONTAINER_1) entries = topology_st.standalone.search_s(CONTAINER_1, ldap.SCOPE_ONELEVEL, 'cn=%s' % USER_CN) assert not entries log.info("Check there is our user in %s" % CONTAINER_2) entries = topology_st.standalone.search_s(CONTAINER_2, ldap.SCOPE_ONELEVEL, 'cn=%s' % USER_CN) assert entries if __name__ == '__main__': # Run isolated # -s for DEBUG mode # -v for additional verbose CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s -v %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/acl/globalgroup_part2_test.py000066400000000000000000000501221421664411400301450ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2020 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK ---- import pytest, os, ldap from lib389._constants import DEFAULT_SUFFIX, PW_DM from lib389.idm.user import UserAccount, UserAccounts from lib389.idm.group import UniqueGroup, UniqueGroups from lib389.idm.organizationalunit import OrganizationalUnit from lib389.topologies import topology_st as topo from lib389.idm.domain import Domain pytestmark = pytest.mark.tier1 NESTEDGROUP_OU_GLOBAL = "ou=nestedgroup, {}".format(DEFAULT_SUFFIX) DEEPUSER_GLOBAL = "uid=DEEPUSER_GLOBAL, {}".format(NESTEDGROUP_OU_GLOBAL) DEEPUSER2_GLOBAL = "uid=DEEPUSER2_GLOBAL, {}".format(NESTEDGROUP_OU_GLOBAL) DEEPUSER3_GLOBAL = "uid=DEEPUSER3_GLOBAL, {}".format(NESTEDGROUP_OU_GLOBAL) DEEPGROUPSCRATCHENTRY_GLOBAL = "uid=scratchEntry,{}".format(NESTEDGROUP_OU_GLOBAL) GROUPDNATTRSCRATCHENTRY_GLOBAL = "uid=GROUPDNATTRSCRATCHENTRY_GLOBAL,{}".format(NESTEDGROUP_OU_GLOBAL) GROUPDNATTRCHILDSCRATCHENTRY_GLOBAL = "uid=c1,{}".format(GROUPDNATTRSCRATCHENTRY_GLOBAL) NEWCHILDSCRATCHENTRY_GLOBAL = "uid=newChild,{}".format(NESTEDGROUP_OU_GLOBAL) ALLGROUPS_GLOBAL = "cn=ALLGROUPS_GLOBAL,{}".format(NESTEDGROUP_OU_GLOBAL) GROUPA_GLOBAL = "cn=GROUPA_GLOBAL,{}".format(NESTEDGROUP_OU_GLOBAL) GROUPB_GLOBAL = "cn=GROUPB_GLOBAL,{}".format(NESTEDGROUP_OU_GLOBAL) GROUPC_GLOBAL = "cn=GROUPC_GLOBAL,{}".format(NESTEDGROUP_OU_GLOBAL) GROUPD_GLOBAL = "cn=GROUPD_GLOBAL,{}".format(NESTEDGROUP_OU_GLOBAL) GROUPE_GLOBAL = "cn=GROUPE_GLOBAL,{}".format(NESTEDGROUP_OU_GLOBAL) GROUPF_GLOBAL = "cn=GROUPF_GLOBAL,{}".format(NESTEDGROUP_OU_GLOBAL) GROUPG_GLOBAL = "cn=GROUPG_GLOBAL,{}".format(NESTEDGROUP_OU_GLOBAL) GROUPH_GLOBAL = "cn=GROUPH_GLOBAL,{}".format(NESTEDGROUP_OU_GLOBAL) CHILD1_GLOBAL = "uid=CHILD1_GLOBAL,{}".format(GROUPDNATTRSCRATCHENTRY_GLOBAL) CONTAINER_1_DELADD = "ou=Product Development,{}".format(DEFAULT_SUFFIX) CONTAINER_2_DELADD = "ou=Accounting,{}".format(DEFAULT_SUFFIX) @pytest.fixture(scope="function") def aci_of_user(request, topo): aci_list = Domain(topo.standalone, DEFAULT_SUFFIX).get_attr_vals('aci') def finofaci(): domain = Domain(topo.standalone, DEFAULT_SUFFIX) domain.set('aci', None) for i in aci_list: domain.add("aci", i) request.addfinalizer(finofaci) @pytest.fixture(scope="module") def test_user(request, topo): for demo in ['Product Development', 'Accounting', 'nestedgroup']: OrganizationalUnit(topo.standalone, "ou={},{}".format(demo, DEFAULT_SUFFIX)).create(properties={'ou': demo}) uas = UserAccounts(topo.standalone, DEFAULT_SUFFIX, 'ou=nestedgroup') for demo1 in ['DEEPUSER_GLOBAL', 'scratchEntry', 'DEEPUSER2_GLOBAL', 'DEEPUSER3_GLOBAL', 'GROUPDNATTRSCRATCHENTRY_GLOBAL', 'newChild']: uas.create(properties={ 'uid': demo1, 'cn': demo1, 'sn': 'user', 'uidNumber': '1000', 'gidNumber': '2000', 'homeDirectory': '/home/' + demo1, 'userPassword': PW_DM }) # Add anonymous access aci ACI_TARGET = "(targetattr=\"*\")(target = \"ldap:///%s\")" % (DEFAULT_SUFFIX) ACI_ALLOW = "(version 3.0; acl \"Anonymous Read access\"; allow (read,search,compare)" ACI_SUBJECT = "(userdn=\"ldap:///anyone\");)" ANON_ACI = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT suffix = Domain(topo.standalone, DEFAULT_SUFFIX) suffix.add('aci', ANON_ACI) uas = UserAccounts(topo.standalone, DEFAULT_SUFFIX, 'uid=GROUPDNATTRSCRATCHENTRY_GLOBAL,ou=nestedgroup') for demo1 in ['c1', 'CHILD1_GLOBAL']: uas.create(properties={ 'uid': demo1, 'cn': demo1, 'sn': 'user', 'uidNumber': '1000', 'gidNumber': '2000', 'homeDirectory': '/home/' + demo1, 'userPassword': PW_DM }) grp = UniqueGroups(topo.standalone, DEFAULT_SUFFIX, rdn='ou=nestedgroup') for i in [('ALLGROUPS_GLOBAL', GROUPA_GLOBAL), ('GROUPA_GLOBAL', GROUPB_GLOBAL), ('GROUPB_GLOBAL', GROUPC_GLOBAL), ('GROUPC_GLOBAL', GROUPD_GLOBAL), ('GROUPD_GLOBAL', GROUPE_GLOBAL), ('GROUPE_GLOBAL', GROUPF_GLOBAL), ('GROUPF_GLOBAL', GROUPG_GLOBAL), ('GROUPG_GLOBAL', GROUPH_GLOBAL), ('GROUPH_GLOBAL', DEEPUSER_GLOBAL)]: grp.create(properties={'cn': i[0], 'ou': 'groups', 'uniquemember': i[1] }) def test_undefined_in_group_eval_five(topo, test_user, aci_of_user): """ Aci will not allow access as Group dn is not allowed so members will not allowed access. :id: 11451a96-7841-11e8-9f79-8c16451d917b :setup: server :steps: 1. Add test entry 2. Take a count of users using DN_DM 3. Add test user 4. add aci 5. test should fulfil the aci rules :expectedresults: 1. Entry should be added 2. Operation should succeed 3. Operation should succeed 4. Operation should succeed 5. Operation should succeed """ Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr="*")(version 3.0; aci "tester"; allow(all) groupdn != "ldap:///{}\ || ldap:///{}";)'.format(ALLGROUPS_GLOBAL, GROUPF_GLOBAL)) conn = UserAccount(topo.standalone, DEEPUSER2_GLOBAL).bind(PW_DM) # This aci should NOT allow access user = UserAccount(conn, DEEPGROUPSCRATCHENTRY_GLOBAL) with pytest.raises(ldap.INSUFFICIENT_ACCESS): user.replace("description", "Fred") assert user.get_attr_val_utf8('uid') == 'scratchEntry' def test_undefined_in_group_eval_six(topo, test_user, aci_of_user): """ Aci will not allow access as tested user is not a member of allowed Group dn :id: 1904572e-7841-11e8-a9d8-8c16451d917b :setup: server :steps: 1. Add test entry 2. Take a count of users using DN_DM 3. Add test user 4. add aci 5. test should fullfil the aci rules :expectedresults: 1. Entry should be added 2. Operation should succeed 3. Operation should succeed 4. Operation should succeed 5. Operation should succeed """ Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr="*")(version 3.0; aci "tester"; allow(all) groupdn = "ldap:///{} || ldap:///{}" ;)'.format(GROUPH_GLOBAL, ALLGROUPS_GLOBAL)) conn = UserAccount(topo.standalone, DEEPUSER3_GLOBAL).bind(PW_DM) # test UNDEFINED in group user = UserAccount(conn, DEEPGROUPSCRATCHENTRY_GLOBAL) with pytest.raises(ldap.INSUFFICIENT_ACCESS): user.replace("description", "Fred") assert user.get_attr_val_utf8('uid') == 'scratchEntry' def test_undefined_in_group_eval_seven(topo, test_user, aci_of_user): """ Aci will not allow access as tested user is not a member of allowed Group dn :id: 206b43c4-7841-11e8-b3ed-8c16451d917b :setup: server :steps: 1. Add test entry 2. Take a count of users using DN_DM 3. Add test user 4. add aci 5. test should fullfil the aci rules :expectedresults: 1. Entry should be added 2. Operation should succeed 3. Operation should succeed 4. Operation should succeed 5. Operation should succeed """ Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr="*")(version 3.0; aci "tester"; allow(all) groupdn = "ldap:///{}\ || ldap:///{}";)'.format(ALLGROUPS_GLOBAL, GROUPH_GLOBAL)) conn = UserAccount(topo.standalone, DEEPUSER3_GLOBAL).bind(PW_DM) # test UNDEFINED in group user = UserAccount(conn, DEEPGROUPSCRATCHENTRY_GLOBAL) with pytest.raises(ldap.INSUFFICIENT_ACCESS): user.replace("description", "Fred") assert user.get_attr_val_utf8('uid') == 'scratchEntry' def test_undefined_in_group_eval_eight(topo, test_user, aci_of_user): """ Aci will not allow access as Group dn is not allowed so members will not allowed access. :id: 26ca7456-7841-11e8-801e-8c16451d917b :setup: server :steps: 1. Add test entry 2. Take a count of users using DN_DM 3. Add test user 4. add aci 5. test should fullfil the aci rules :expectedresults: 1. Entry should be added 2. Operation should succeed 3. Operation should succeed 4. Operation should succeed 5. Operation should succeed """ Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr="*")(version 3.0; aci "tester"; allow(all) groupdn != "ldap:///{} || ldap:///{} || ldap:///{}" ;)'.format(GROUPH_GLOBAL, GROUPA_GLOBAL, ALLGROUPS_GLOBAL)) conn = UserAccount(topo.standalone, DEEPUSER3_GLOBAL).bind(PW_DM) # test UNDEFINED in group user = UserAccount(conn, DEEPGROUPSCRATCHENTRY_GLOBAL) with pytest.raises(ldap.INSUFFICIENT_ACCESS): user.replace("description", "Fred") assert user.get_attr_val_utf8('uid') == 'scratchEntry' def test_undefined_in_group_eval_nine(topo, test_user, aci_of_user): """ Aci will not allow access as Group dn is not allowed so members will not allowed access. :id: 38c7fbb0-7841-11e8-90aa-8c16451d917b :setup: server :steps: 1. Add test entry 2. Take a count of users using DN_DM 3. Add test user 4. add aci 5. test should fullfil the aci rules :expectedresults: 1. Entry should be added 2. Operation should succeed 3. Operation should succeed 4. Operation should succeed 5. Operation should succeed """ Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr="*")(version 3.0; aci "tester"; allow(all) groupdn != "ldap:///{}\ || ldap:///{} || ldap:///{}";)'.format(ALLGROUPS_GLOBAL, GROUPA_GLOBAL, GROUPH_GLOBAL)) conn = UserAccount(topo.standalone, DEEPUSER3_GLOBAL).bind(PW_DM) # test UNDEFINED in group user = UserAccount(conn, DEEPGROUPSCRATCHENTRY_GLOBAL) with pytest.raises(ldap.INSUFFICIENT_ACCESS): user.replace("sn", "Fred") assert user.get_attr_val_utf8('uid') == 'scratchEntry' def test_undefined_in_group_eval_ten(topo, test_user, aci_of_user): """ Test the userattr keyword to ensure that it evaluates correctly. :id: 46c0fb72-7841-11e8-af1d-8c16451d917b :setup: server :steps: 1. Add test entry 2. Take a count of users using DN_DM 3. Add test user 4. add aci 5. test should fullfil the aci rules :expectedresults: 1. Entry should be added 2. Operation should succeed 3. Operation should succeed 4. Operation should succeed 5. Operation should succeed """ Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr="*")(version 3.0; aci "tester"; allow(all) userattr = "description#GROUPDN";)') user = UserAccount(topo.standalone, DEEPGROUPSCRATCHENTRY_GLOBAL) user.add("description", [ALLGROUPS_GLOBAL, GROUPG_GLOBAL]) conn = UserAccount(topo.standalone, DEEPUSER_GLOBAL).bind(PW_DM) # Test the userattr keyword user.add("sn", "Fred") assert UserAccount(conn, DEEPGROUPSCRATCHENTRY_GLOBAL).get_attr_val_utf8('uid') == 'scratchEntry' user.remove("description", [ALLGROUPS_GLOBAL, GROUPG_GLOBAL]) def test_undefined_in_group_eval_eleven(topo, test_user, aci_of_user): """ Aci will not allow access as description is there with the user entry which is not allowed in ACI :id: 4cfa28e2-7841-11e8-8117-8c16451d917b :setup: server :steps: 1. Add test entry 2. Take a count of users using DN_DM 3. Add test user 4. add aci 5. test should fullfil the aci rules :expectedresults: 1. Entry should be added 2. Operation should succeed 3. Operation should succeed 4. Operation should succeed 5. Operation should succeed """ Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr="*")(version 3.0; aci "tester"; allow(all) not( userattr = "description#GROUPDN");)') user = UserAccount(topo.standalone, DEEPGROUPSCRATCHENTRY_GLOBAL) user.add("description", [ALLGROUPS_GLOBAL, GROUPH_GLOBAL]) conn = UserAccount(topo.standalone, DEEPUSER_GLOBAL).bind(PW_DM) # Test that not(UNDEFINED(attrval1)) user1 = UserAccount(conn, DEEPGROUPSCRATCHENTRY_GLOBAL) with pytest.raises(ldap.INSUFFICIENT_ACCESS): user1.add("sn", "Fred1") assert user.get_attr_val_utf8('cn') user.remove("description", [ALLGROUPS_GLOBAL, GROUPH_GLOBAL]) def test_undefined_in_group_eval_twelve(topo, test_user, aci_of_user): """ Test with the parent keyord that Yields TRUE as description is present in tested entry :id: 54f471ec-7841-11e8-8910-8c16451d917b :setup: server :steps: 1. Add test entry 2. Take a count of users using DN_DM 3. Add test user 4. add aci 5. test should fullfil the aci rules :expectedresults: 1. Entry should be added 2. Operation should succeed 3. Operation should succeed 4. Operation should succeed 5. Operation should succeed """ Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr="*")(version 3.0; aci "tester"; allow(all) userattr = "parent[0,1].description#GROUPDN";)') user = UserAccount(topo.standalone, GROUPDNATTRSCRATCHENTRY_GLOBAL) user.add("description", [ALLGROUPS_GLOBAL, GROUPD_GLOBAL]) conn = UserAccount(topo.standalone, DEEPUSER_GLOBAL).bind(PW_DM) # Test with the parent keyord UserAccount(conn, GROUPDNATTRCHILDSCRATCHENTRY_GLOBAL).add("sn", "Fred") assert UserAccount(conn, DEEPGROUPSCRATCHENTRY_GLOBAL).get_attr_val_utf8('cn') user.remove("description", [ALLGROUPS_GLOBAL, GROUPD_GLOBAL]) def test_undefined_in_group_eval_fourteen(topo, test_user, aci_of_user): """ Test with parent keyword that Yields FALSE as description is not present in tested entry :id: 5c527218-7841-11e8-8909-8c16451d917b :setup: server :steps: 1. Add test entry 2. Take a count of users using DN_DM 3. Add test user 4. add aci 5. test should fullfil the aci rules :expectedresults: 1. Entry should be added 2. Operation should succeed 3. Operation should succeed 4. Operation should succeed 5. Operation should succeed """ Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr="*")(version 3.0; aci "tester"; allow(all) userattr = "parent[0,1].description#GROUPDN";)') user = UserAccount(topo.standalone, GROUPDNATTRSCRATCHENTRY_GLOBAL) user.add("description", [ALLGROUPS_GLOBAL, GROUPG_GLOBAL]) conn = UserAccount(topo.standalone, DEEPUSER2_GLOBAL).bind(PW_DM) # Test with parent keyword user1 = UserAccount(conn, GROUPDNATTRCHILDSCRATCHENTRY_GLOBAL) with pytest.raises(ldap.INSUFFICIENT_ACCESS): user1.add("sn", "Fred") assert UserAccount(conn, DEEPGROUPSCRATCHENTRY_GLOBAL).get_attr_val_utf8('cn') user.remove("description", [ALLGROUPS_GLOBAL, GROUPG_GLOBAL]) def test_undefined_in_group_eval_fifteen(topo, test_user, aci_of_user): """ Here do the same tests for userattr with the parent keyword. :id: 6381c070-7841-11e8-a6b6-8c16451d917b :setup: server :steps: 1. Add test entry 2. Take a count of users using DN_DM 3. Add test user 4. add aci 5. test should fullfil the aci rules :expectedresults: 1. Entry should be added 2. Operation should succeed 3. Operation should succeed 4. Operation should succeed 5. Operation should succeed """ Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr="*")(version 3.0; aci "tester"; allow(all) userattr = "parent[0,1].description#USERDN";)') UserAccount(topo.standalone, NESTEDGROUP_OU_GLOBAL).add("description", DEEPUSER_GLOBAL) # Here do the same tests for userattr with the parent keyword. conn = UserAccount(topo.standalone, DEEPUSER_GLOBAL).bind(PW_DM) UserAccount(conn, NEWCHILDSCRATCHENTRY_GLOBAL).add("description", DEEPUSER_GLOBAL) def test_undefined_in_group_eval_sixteen(topo, test_user, aci_of_user): """ Test with parent keyword with not key :id: 69852688-7841-11e8-8db1-8c16451d917b :setup: server :steps: 1. Add test entry 2. Take a count of users using DN_DM 3. Add test user 4. add aci 5. test should fullfil the aci rules :expectedresults: 1. Entry should be added 2. Operation should succeed 3. Operation should succeed 4. Operation should succeed 5. Operation should succeed """ domain = Domain(topo.standalone, DEFAULT_SUFFIX) domain.add("aci",'(targetattr="*")(version 3.0; aci "tester"; allow(all) not ( userattr = "parent[0,1].description#USERDN");)') domain.add("description", DEEPUSER_GLOBAL) conn = UserAccount(topo.standalone, DEEPUSER_GLOBAL).bind(PW_DM) # Test with parent keyword with not key user = UserAccount(conn, NEWCHILDSCRATCHENTRY_GLOBAL) with pytest.raises(ldap.INSUFFICIENT_ACCESS): user.add("description",DEEPUSER_GLOBAL) def test_undefined_in_group_eval_seventeen(topo, test_user, aci_of_user): """ Test with the parent keyord that Yields TRUE as description is present in tested entry :id: 7054d1c0-7841-11e8-8177-8c16451d917b :setup: server :steps: 1. Add test entry 2. Take a count of users using DN_DM 3. Add test user 4. add aci 5. test should fullfil the aci rules :expectedresults: 1. Entry should be added 2. Operation should succeed 3. Operation should succeed 4. Operation should succeed 5. Operation should succeed """ Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr="*")(version 3.0; aci "tester"; allow(all) userattr = "parent[0,1].description#GROUPDN";)') user = UserAccount(topo.standalone, GROUPDNATTRSCRATCHENTRY_GLOBAL) # Test with the parent keyord user.add("description", [ALLGROUPS_GLOBAL, GROUPD_GLOBAL]) conn = UserAccount(topo.standalone, DEEPUSER_GLOBAL).bind(PW_DM) UserAccount(conn, CHILD1_GLOBAL).add("description", DEEPUSER_GLOBAL) user.remove("description", [ALLGROUPS_GLOBAL, GROUPD_GLOBAL]) def test_undefined_in_group_eval_eighteen(topo, test_user, aci_of_user): """ Test with parent keyword with not key :id: 768b9ab0-7841-11e8-87c3-8c16451d917b :setup: server :steps: 1. Add test entry 2. Take a count of users using DN_DM 3. Add test user 4. add aci 5. test should fullfil the aci rules :expectedresults: 1. Entry should be added 2. Operation should succeed 3. Operation should succeed 4. Operation should succeed 5. Operation should succeed """ Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr="*")(version 3.0; aci "tester"; allow(all) not (userattr = "parent[0,1].description#GROUPDN" );)') user = UserAccount(topo.standalone, GROUPDNATTRSCRATCHENTRY_GLOBAL) # Test with parent keyword with not key user.add("description", [ALLGROUPS_GLOBAL, GROUPH_GLOBAL]) conn = UserAccount(topo.standalone, DEEPUSER_GLOBAL).bind(PW_DM) user = UserAccount(conn, CHILD1_GLOBAL) with pytest.raises(ldap.INSUFFICIENT_ACCESS): user.add("description", DEEPUSER_GLOBAL) if __name__ == "__main__": CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s -v %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/acl/globalgroup_test.py000066400000000000000000000440421421664411400270410ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2020 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK ---- import pytest, os, ldap from lib389._constants import DEFAULT_SUFFIX, PW_DM from lib389.idm.user import UserAccount, UserAccounts from lib389.idm.group import UniqueGroup, UniqueGroups from lib389.idm.organizationalunit import OrganizationalUnit from lib389.topologies import topology_st as topo from lib389.idm.domain import Domain pytestmark = pytest.mark.tier1 ACLGROUP_OU_GLOBAL = "ou=ACLGroup,{}".format(DEFAULT_SUFFIX) NESTEDGROUP_OU_GLOBAL = "ou=nestedgroup, {}".format(DEFAULT_SUFFIX) TESTING_OU_GLOBAL = "ou=Product Testing,{}".format(DEFAULT_SUFFIX) DEEPUSER_GLOBAL = "uid=DEEPUSER_GLOBAL, {}".format(NESTEDGROUP_OU_GLOBAL) DEEPUSER1_GLOBAL = "uid=DEEPUSER1_GLOBAL, {}".format(NESTEDGROUP_OU_GLOBAL) DEEPUSER2_GLOBAL = "uid=DEEPUSER2_GLOBAL, {}".format(NESTEDGROUP_OU_GLOBAL) DEEPUSER3_GLOBAL = "uid=DEEPUSER3_GLOBAL, {}".format(NESTEDGROUP_OU_GLOBAL) DEEPGROUPSCRATCHENTRY_GLOBAL = "uid=scratchEntry,{}".format(NESTEDGROUP_OU_GLOBAL) GROUPDNATTRSCRATCHENTRY_GLOBAL = "uid=GROUPDNATTRSCRATCHENTRY_GLOBAL,{}".format(NESTEDGROUP_OU_GLOBAL) GROUPDNATTRCHILDSCRATCHENTRY_GLOBAL = "uid=c1,{}".format(GROUPDNATTRSCRATCHENTRY_GLOBAL) NEWCHILDSCRATCHENTRY_GLOBAL = "uid=newChild,{}".format(NESTEDGROUP_OU_GLOBAL) BIG_GLOBAL = "cn=BIG_GLOBAL Group,{}".format(DEFAULT_SUFFIX) ALLGROUPS_GLOBAL = "cn=ALLGROUPS_GLOBAL,{}".format(NESTEDGROUP_OU_GLOBAL) GROUPA_GLOBAL = "cn=GROUPA_GLOBAL,{}".format(NESTEDGROUP_OU_GLOBAL) GROUPB_GLOBAL = "cn=GROUPB_GLOBAL,{}".format(NESTEDGROUP_OU_GLOBAL) GROUPC_GLOBAL = "cn=GROUPC_GLOBAL,{}".format(NESTEDGROUP_OU_GLOBAL) GROUPD_GLOBAL = "cn=GROUPD_GLOBAL,{}".format(NESTEDGROUP_OU_GLOBAL) GROUPE_GLOBAL = "cn=GROUPE_GLOBAL,{}".format(NESTEDGROUP_OU_GLOBAL) GROUPF_GLOBAL = "cn=GROUPF_GLOBAL,{}".format(NESTEDGROUP_OU_GLOBAL) GROUPG_GLOBAL = "cn=GROUPG_GLOBAL,{}".format(NESTEDGROUP_OU_GLOBAL) GROUPH_GLOBAL = "cn=GROUPH_GLOBAL,{}".format(NESTEDGROUP_OU_GLOBAL) CONTAINER_1_DELADD = "ou=Product Development,{}".format(DEFAULT_SUFFIX) CONTAINER_2_DELADD = "ou=Accounting,{}".format(DEFAULT_SUFFIX) @pytest.fixture(scope="function") def aci_of_user(request, topo): aci_list = Domain(topo.standalone, DEFAULT_SUFFIX).get_attr_vals('aci') def finofaci(): domain = Domain(topo.standalone, DEFAULT_SUFFIX) domain.set('aci', None) for i in aci_list: domain.add("aci", i) request.addfinalizer(finofaci) @pytest.fixture(scope="module") def test_user(request, topo): for demo in ['Product Development', 'Accounting', 'Product Testing', 'nestedgroup', 'ACLGroup']: OrganizationalUnit(topo.standalone, "ou={},{}".format(demo, DEFAULT_SUFFIX)).create(properties={'ou': demo}) user = UserAccounts(topo.standalone, DEFAULT_SUFFIX, rdn='ou=Accounting') for demo1 in ['Ted Morris', 'David Miller']: user.create(properties= { 'uid': demo1, 'cn': demo1, 'sn': 'user', 'uidNumber': '1000', 'gidNumber': '2000', 'homeDirectory': '/home/' + demo1, 'userPassword': PW_DM }) # Add anonymous access aci ACI_TARGET = "(targetattr=\"*\")(target = \"ldap:///%s\")" % (DEFAULT_SUFFIX) ACI_ALLOW = "(version 3.0; acl \"Anonymous Read access\"; allow (read,search,compare)" ACI_SUBJECT = "(userdn=\"ldap:///anyone\");)" ANON_ACI = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT suffix = Domain(topo.standalone, DEFAULT_SUFFIX) suffix.add('aci', ANON_ACI) uas = UserAccounts(topo.standalone, DEFAULT_SUFFIX, 'ou=nestedgroup') for demo1 in ['DEEPUSER_GLOBAL', 'scratchEntry', 'DEEPUSER2_GLOBAL', 'DEEPUSER1_GLOBAL', 'DEEPUSER3_GLOBAL', 'GROUPDNATTRSCRATCHENTRY_GLOBAL', 'newChild']: uas.create(properties={ 'uid': demo1, 'cn': demo1, 'sn': 'user', 'uidNumber': '1000', 'gidNumber': '2000', 'homeDirectory': '/home/' + demo1, 'userPassword': PW_DM }) uas = UserAccounts(topo.standalone, DEFAULT_SUFFIX, 'uid=GROUPDNATTRSCRATCHENTRY_GLOBAL,ou=nestedgroup') for demo1 in ['c1', 'CHILD1_GLOBAL']: uas.create(properties={ 'uid': demo1, 'cn': demo1, 'sn': 'user', 'uidNumber': '1000', 'gidNumber': '2000', 'homeDirectory': '/home/' + demo1, 'userPassword': PW_DM }) grp = UniqueGroups(topo.standalone, DEFAULT_SUFFIX, rdn='ou=nestedgroup') for i in [('ALLGROUPS_GLOBAL', GROUPA_GLOBAL), ('GROUPA_GLOBAL', GROUPB_GLOBAL), ('GROUPB_GLOBAL', GROUPC_GLOBAL), ('GROUPC_GLOBAL', GROUPD_GLOBAL), ('GROUPD_GLOBAL', GROUPE_GLOBAL), ('GROUPE_GLOBAL', GROUPF_GLOBAL), ('GROUPF_GLOBAL', GROUPG_GLOBAL), ('GROUPG_GLOBAL', GROUPH_GLOBAL), ('GROUPH_GLOBAL', DEEPUSER_GLOBAL)]: grp.create(properties={'cn': i[0], 'ou': 'groups', 'uniquemember': i[1] }) grp = UniqueGroup(topo.standalone, 'cn=BIG_GLOBAL Group,{}'.format(DEFAULT_SUFFIX)) grp.create(properties={'cn': 'BIG_GLOBAL Group', 'ou': 'groups', 'uniquemember': ["uid=Ted Morris,ou=Accounting,{}".format(DEFAULT_SUFFIX), "uid=David Miller,ou=Accounting,{}".format(DEFAULT_SUFFIX),] }) def test_caching_changes(topo, aci_of_user, test_user): """ Add user and then test deny :id: 26ed2dc2-783f-11e8-b1a5-8c16451d917b :setup: server :steps: 1. Add test entry 2. Take a count of users using DN_DM 3. Add test user 4. add aci 5. test should fullfil the aci rules :expectedresults: 1. Entry should be added 2. Operation should succeed 3. Operation should succeed 4. Operation should succeed 5. Operation should succeed """ Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr="roomnumber")(version 3.0; acl "ACLGroup"; deny ( read, search ) userdn = "ldap:///all" ;)') user = UserAccounts(topo.standalone, DEFAULT_SUFFIX, "ou=AclGroup").create_test_user() user.set('roomnumber', '3445') conn = UserAccount(topo.standalone, DEEPUSER_GLOBAL).bind(PW_DM) # targetattr="roomnumber" will be denied access user = UserAccount(conn, 'uid=test_user_1000,ou=ACLGroup,dc=example,dc=com') with pytest.raises(AssertionError): assert user.get_attr_val_utf8('roomNumber') UserAccount(topo.standalone, 'uid=test_user_1000,ou=ACLGroup,dc=example,dc=com').delete() def test_deny_group_member_all_rights_to_user(topo, aci_of_user, test_user): """ Try deleting user while no access :id: 0da68a4c-7840-11e8-98c2-8c16451d917b :setup: server :steps: 1. Add test entry 2. Take a count of users using DN_DM 3. delete test user 4. add aci 5. test should fullfil the aci rules :expectedresults: 1. Entry should be added 2. Operation should succeed 3. Operation should succeed 4. Operation should succeed 5. Operation should succeed """ Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr="*")(version 3.0; acl "ACLGroup"; deny (all) groupdn = "ldap:///{}" ;)'.format(BIG_GLOBAL)) conn = UserAccount(topo.standalone, "uid=Ted Morris, ou=Accounting, {}".format(DEFAULT_SUFFIX)).bind(PW_DM) # group BIG_GLOBAL will have no access user = UserAccount(conn, DEEPUSER3_GLOBAL) with pytest.raises(ldap.INSUFFICIENT_ACCESS): user.delete() def test_deny_group_member_all_rights_to_group_members(topo, aci_of_user, test_user): """ Deny group member all rights :id: 2d4ff70c-7840-11e8-8472-8c16451d917b :setup: server :steps: 1. Add test entry 2. Take a count of users using DN_DM 3. Add test user 4. add aci 5. test should fullfil the aci rules :expectedresults: 1. Entry should be added 2. Operation should succeed 3. Operation should succeed 4. Operation should succeed 5. Operation should succeed """ Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr="*")(version 3.0; acl "ACLGroup"; deny (all) groupdn = "ldap:///{}" ;)'.format(BIG_GLOBAL)) UserAccounts(topo.standalone, DEFAULT_SUFFIX, "ou=AclGroup").create_test_user() conn = UserAccount(topo.standalone, "uid=Ted Morris, ou=Accounting, {}".format(DEFAULT_SUFFIX)).bind(PW_DM) # group BIG_GLOBAL no access user = UserAccount(conn, 'uid=test_user_1000,ou=ACLGroup,dc=example,dc=com') with pytest.raises(IndexError): user.get_attr_val_utf8('uid') UserAccount(topo.standalone, 'uid=test_user_1000,ou=ACLGroup,dc=example,dc=com').delete() def test_deeply_nested_groups_aci_denial(topo, test_user, aci_of_user): """ Test deeply nested groups (1) This aci will not allow search or modify to a user too deep to be detected. :id: 3d98229c-7840-11e8-9f55-8c16451d917b :setup: server :steps: 1. Add test entry 2. Take a count of users using DN_DM 3. Add test user 4. add aci 5. test should fullfil the aci rules :expectedresults: 1. Entry should be added 2. Operation should succeed 3. Operation should succeed 4. Operation should succeed 5. Operation should succeed """ Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr="*")(version 3.0; acl "ACLGroup"; allow (all) groupdn = "ldap:///{}" ;)'.format(ALLGROUPS_GLOBAL)) conn = UserAccount(topo.standalone, DEEPUSER_GLOBAL).bind(PW_DM) # ALLGROUPS_GLOBAL have all access assert UserAccount(conn, DEEPGROUPSCRATCHENTRY_GLOBAL).get_attr_val_utf8('uid') == 'scratchEntry' user = UserAccount(conn, DEEPGROUPSCRATCHENTRY_GLOBAL) with pytest.raises(ldap.INSUFFICIENT_ACCESS): user.delete() def test_deeply_nested_groups_aci_denial_two(topo, test_user, aci_of_user): """ Test deeply nested groups (2) This aci will allow search and modify :id: 4ef6348e-7840-11e8-a70c-8c16451d917b :setup: server :steps: 1. Add test entry 2. Take a count of users using DN_DM 3. Add test user 4. add aci 5. test should fullfil the aci rules :expectedresults: 1. Entry should be added 2. Operation should succeed 3. Operation should succeed 4. Operation should succeed 5. Operation should succeed """ Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr="*")(version 3.0; acl "ACLGroup"; allow (all) groupdn = "ldap:///{}" ;)'.format(GROUPE_GLOBAL)) conn = UserAccount(topo.standalone, DEEPUSER_GLOBAL).bind(PW_DM) # GROUPE_GLOBAL have all access user = UserAccount(conn, DEEPGROUPSCRATCHENTRY_GLOBAL) user.add("sn", "Fred") user.remove("sn", "Fred") def test_deeply_nested_groups_aci_allow(topo, test_user, aci_of_user): """ Test deeply nested groups (3) This aci will allow search and modify :id: 8d338210-7840-11e8-8584-8c16451d917b :setup: server :steps: 1. Add test entry 2. Take a count of users using DN_DM 3. Add test user 4. add aci 5. test should fullfil the aci rules :expectedresults: 1. Entry should be added 2. Operation should succeed 3. Operation should succeed 4. Operation should succeed 5. Operation should succeed """ Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ['(targetattr="*")(version 3.0; acl "ACLGroup"; allow (all) groupdn = "ldap:///{}" ;)'.format(ALLGROUPS_GLOBAL), '(targetattr="*")(version 3.0; acl "ACLGroup"; allow (all) groupdn = "ldap:///{}" ;)'.format(GROUPE_GLOBAL)]) conn = UserAccount(topo.standalone, DEEPUSER_GLOBAL).bind(PW_DM) # test deeply nested groups user = UserAccount(conn, DEEPGROUPSCRATCHENTRY_GLOBAL) user.add("sn", "Fred") user.remove("sn", "Fred") def test_deeply_nested_groups_aci_allow_two(topo, test_user, aci_of_user): """ This aci will not allow search or modify to a user too deep to be detected. :id: 8d3459c4-7840-11e8-8ed8-8c16451d917b :setup: server :steps: 1. Add test entry 2. Take a count of users using DN_DM 3. Add test user 4. add aci 5. test should fullfil the aci rules :expectedresults: 1. Entry should be added 2. Operation should succeed 3. Operation should succeed 4. Operation should succeed 5. Operation should succeed """ Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr="*")(version 3.0; acl "ACLGroup"; allow (all) groupdn = "ldap:///{}" ;)'.format(ALLGROUPS_GLOBAL)) conn = UserAccount(topo.standalone, DEEPUSER_GLOBAL).bind(PW_DM) # This aci should not allow search or modify to a user too deep to be detected. user = UserAccount(conn, DEEPGROUPSCRATCHENTRY_GLOBAL) with pytest.raises(ldap.INSUFFICIENT_ACCESS): user.add("sn", "Fred") assert user.get_attr_val_utf8('uid') == 'scratchEntry' def test_undefined_in_group_eval(topo, test_user, aci_of_user): """ This aci will not allow access . :id: f1605e16-7840-11e8-b954-8c16451d917b :setup: server :steps: 1. Add test entry 2. Take a count of users using DN_DM 3. Add test user 4. add aci 5. test should fullfil the aci rules :expectedresults: 1. Entry should be added 2. Operation should succeed 3. Operation should succeed 4. Operation should succeed 5. Operation should succeed """ Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr="*")(version 3.0; acl "ACLGroup"; allow (all) groupdn != "ldap:///{}" ;)'.format(ALLGROUPS_GLOBAL)) conn = UserAccount(topo.standalone, DEEPUSER_GLOBAL).bind(PW_DM) # This aci should NOT allow access user = UserAccount(conn, DEEPGROUPSCRATCHENTRY_GLOBAL) with pytest.raises(ldap.INSUFFICIENT_ACCESS): user.add("sn", "Fred") assert user.get_attr_val_utf8('uid') == 'scratchEntry' def test_undefined_in_group_eval_two(topo, test_user, aci_of_user): """ This aci will allow access :id: fcfbcce2-7840-11e8-ba77-8c16451d917b :setup: server :steps: 1. Add test entry 2. Take a count of users using DN_DM 3. Add test user 4. add aci 5. test should fullfil the aci rules :expectedresults: 1. Entry should be added 2. Operation should succeed 3. Operation should succeed 4. Operation should succeed 5. Operation should succeed """ Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr="*")(version 3.0; aci "tester"; allow(all) groupdn = "ldap:///{}\ || ldap:///{}";)'.format(ALLGROUPS_GLOBAL, GROUPG_GLOBAL)) conn = UserAccount(topo.standalone, DEEPUSER_GLOBAL).bind(PW_DM) user = UserAccount(conn, DEEPGROUPSCRATCHENTRY_GLOBAL) # This aci should allow access user.add("sn", "Fred") assert UserAccount(conn, DEEPGROUPSCRATCHENTRY_GLOBAL).get_attr_val_utf8('uid') == 'scratchEntry' user.remove("sn", "Fred") def test_undefined_in_group_eval_three(topo, test_user, aci_of_user): """ This aci will allow access :id: 04943dcc-7841-11e8-8c46-8c16451d917b :setup: server :steps: 1. Add test entry 2. Take a count of users using DN_DM 3. Add test user 4. add aci 5. test should fullfil the aci rules :expectedresults: 1. Entry should be added 2. Operation should succeed 3. Operation should succeed 4. Operation should succeed 5. Operation should succeed """ Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr="*")(version 3.0; aci "tester"; allow(all) groupdn = "ldap:///{}\ || ldap:///{}";)'.format(GROUPG_GLOBAL, ALLGROUPS_GLOBAL)) conn = UserAccount(topo.standalone, DEEPUSER_GLOBAL).bind(PW_DM) user = Domain(conn, DEEPGROUPSCRATCHENTRY_GLOBAL) # test UNDEFINED in group user.add("sn", "Fred") assert UserAccount(conn, DEEPGROUPSCRATCHENTRY_GLOBAL).get_attr_val_utf8('uid') == 'scratchEntry' user.remove("sn", "Fred") def test_undefined_in_group_eval_four(topo, test_user, aci_of_user): """ This aci will not allow access :id: 0b03d10e-7841-11e8-9341-8c16451d917b :setup: server :steps: 1. Add test entry 2. Take a count of users using DN_DM 3. Add test user 4. add aci 5. test should fullfil the aci rules :expectedresults: 1. Entry should be added 2. Operation should succeed 3. Operation should succeed 4. Operation should succeed 5. Operation should succeed """ Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr="*")(version 3.0; aci "tester"; allow(all) groupdn != "ldap:///{}\ || ldap:///{}";)'.format(ALLGROUPS_GLOBAL, GROUPG_GLOBAL)) conn = UserAccount(topo.standalone, DEEPUSER1_GLOBAL).bind(PW_DM) # test UNDEFINED in group user = UserAccount(conn, DEEPGROUPSCRATCHENTRY_GLOBAL) with pytest.raises(ldap.INSUFFICIENT_ACCESS): user.add("sn", "Fred") assert user.get_attr_val_utf8('uid') == 'scratchEntry' if __name__ == "__main__": CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s -v %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/acl/keywords_part2_test.py000066400000000000000000000355261421664411400275120ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2020 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK ---- """ This test script will test wrong/correct key value with ACIs. """ import ldap import os import pytest import socket import time from datetime import datetime from lib389._constants import DEFAULT_SUFFIX, PW_DM from lib389.idm.domain import Domain from lib389.idm.organizationalunit import OrganizationalUnit from lib389.idm.user import UserAccount pytestmark = pytest.mark.tier1 KEYWORDS_OU_KEY = "ou=Keywords,{}".format(DEFAULT_SUFFIX) DAYOFWEEK_OU_KEY = "ou=Dayofweek,{}".format(KEYWORDS_OU_KEY) IP_OU_KEY = "ou=IP,{}".format(KEYWORDS_OU_KEY) TIMEOFDAY_OU_KEY = "ou=Timeofday,{}".format(KEYWORDS_OU_KEY) EVERYDAY_KEY = "uid=EVERYDAY_KEY,{}".format(DAYOFWEEK_OU_KEY) TODAY_KEY = "uid=TODAY_KEY,{}".format(DAYOFWEEK_OU_KEY) NODAY_KEY = "uid=NODAY_KEY,{}".format(DAYOFWEEK_OU_KEY) FULLIP_KEY = "uid=FULLIP_KEY,{}".format(IP_OU_KEY) NETSCAPEIP_KEY = "uid=NETSCAPEIP_KEY,{}".format(IP_OU_KEY) NOIP_KEY = "uid=NOIP_KEY,{}".format(IP_OU_KEY) FULLWORKER_KEY = "uid=FULLWORKER_KEY,{}".format(TIMEOFDAY_OU_KEY) DAYWORKER_KEY = "uid=DAYWORKER_KEY,{}".format(TIMEOFDAY_OU_KEY) NIGHTWORKER_KEY = "uid=NIGHTWORKER_KEY,{}".format(TIMEOFDAY_OU_KEY) NOWORKER_KEY = "uid=NOWORKER_KEY,{}".format(TIMEOFDAY_OU_KEY) def test_access_from_certain_network_only_ip(topo, add_user, aci_of_user): """ User can access the data when connecting from certain network only as per the ACI. :id: 4ec38296-7ac5-11e8-9816-8c16451d917b :customerscenario: True :setup: Standalone Server :steps: 1. Add test entry 2. Add ACI 3. User should follow ACI role :expectedresults: 1. Entry should be added 2. Operation should succeed 3. Operation should succeed """ # Turn access log buffering off to make less time consuming topo.standalone.config.set('nsslapd-accesslog-logbuffering', 'off') # Find the ip from ds logs , as we need to know the exact ip used by ds to run the instances. # Wait till Access Log is generated topo.standalone.restart() old_hostname = socket.gethostname() socket.sethostname('localhost') hostname = socket.gethostname() IP = socket.gethostbyname(hostname) # Add ACI domain = Domain(topo.standalone, DEFAULT_SUFFIX) domain.add("aci", f'(target = "ldap:///{IP_OU_KEY}")(targetattr=\"*\")(version 3.0; aci "IP aci"; ' f'allow(all)userdn = "ldap:///{NETSCAPEIP_KEY}" and (ip = "127.0.0.1" or ip = "::1" or ip = "{IP}") ;)') # create a new connection for the test new_uri = topo.standalone.ldapuri.replace(old_hostname, hostname) topo.standalone.ldapuri = new_uri conn = UserAccount(topo.standalone, NETSCAPEIP_KEY).bind(PW_DM) # Perform Operation topo.standalone.config.set('nsslapd-errorlog-level', '128') org = OrganizationalUnit(conn, IP_OU_KEY) topo.standalone.host = hostname org.replace("seeAlso", "cn=1") # remove the aci domain.ensure_removed("aci", f'(target = "ldap:///{IP_OU_KEY}")(targetattr=\"*\")(version 3.0; aci ' f'"IP aci"; allow(all)userdn = "ldap:///{NETSCAPEIP_KEY}" and ' f'(ip = "127.0.0.1" or ip = "::1" or ip = "{IP}") ;)') # Now add aci with new ip domain.add("aci", f'(target = "ldap:///{IP_OU_KEY}")(targetattr="*")(version 3.0; aci "IP aci"; ' f'allow(all)userdn = "ldap:///{NETSCAPEIP_KEY}" and ip = "100.1.1.1" ;)') # After changing the ip user cant access data with pytest.raises(ldap.INSUFFICIENT_ACCESS): org.replace("seeAlso", "cn=1") def test_connection_from_an_unauthorized_network(topo, add_user, aci_of_user): """ User cannot access the data when connectin from an unauthorized network as per the ACI. :id: 52d1ecce-7ac5-11e8-9ad9-8c16451d917b :customerscenario: True :setup: Standalone Server :steps: 1. Add test entry 2. Add ACI 3. User should follow ACI role :expectedresults: 1. Entry should be added 2. Operation should succeed 3. Operation should succeed """ old_hostname = socket.gethostname() socket.sethostname('localhost') hostname = socket.gethostname() # Add ACI domain = Domain(topo.standalone, DEFAULT_SUFFIX) domain.add("aci", f'(target = "ldap:///{IP_OU_KEY}")' f'(targetattr="*")(version 3.0; aci "IP aci"; ' f'allow(all) userdn = "ldap:///{NETSCAPEIP_KEY}" ' f'and (ip != "127.0.0.1" and ip != "::1") ;)') # create a new connection for the test new_uri = topo.standalone.ldapuri.replace(old_hostname, hostname) topo.standalone.ldapuri = new_uri conn = UserAccount(topo.standalone, NETSCAPEIP_KEY).bind(PW_DM) # Perform Operation topo.standalone.config.set('nsslapd-errorlog-level', '128') org = OrganizationalUnit(conn, IP_OU_KEY) with pytest.raises(ldap.INSUFFICIENT_ACCESS): org.replace("seeAlso", "cn=1") # Remove the ACI domain.ensure_removed('aci', domain.get_attr_vals('aci')[-1]) # Add new ACI domain.add('aci', f'(target = "ldap:///{IP_OU_KEY}")(targetattr="*")' f'(version 3.0; aci "IP aci"; allow(all) ' f'userdn = "ldap:///{NETSCAPEIP_KEY}" and (ip = "127.0.0.1" or ip = "::1") ;)') time.sleep(1) # now user can access data org.replace("seeAlso", "cn=1") def test_ip_keyword_test_noip_cannot(topo, add_user, aci_of_user): """ User NoIP cannot assess the data as per the ACI. :id: 570bc7f6-7ac5-11e8-88c1-8c16451d917b :customerscenario: True :setup: Standalone Server :steps: 1. Add test entry 2. Add ACI 3. User should follow ACI role :expectedresults: 1. Entry should be added 2. Operation should succeed 3. Operation should succeed """ # Add ACI Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", f'(target ="ldap:///{IP_OU_KEY}")' f'(targetattr="*")(version 3.0; aci "IP aci"; allow(all) ' f'userdn = "ldap:///{FULLIP_KEY}" and ip = "*" ;)') # Create a new connection for this test. conn = UserAccount(topo.standalone, NOIP_KEY).bind(PW_DM) # Perform Operation org = OrganizationalUnit(conn, IP_OU_KEY) with pytest.raises(ldap.INSUFFICIENT_ACCESS): org.replace("seeAlso", "cn=1") def test_user_can_access_the_data_at_any_time(topo, add_user, aci_of_user): """ User can access the data at any time as per the ACI. :id: 5b4da91a-7ac5-11e8-bbda-8c16451d917b :customerscenario: True :setup: Standalone Server :steps: 1. Add test entry 2. Add ACI 3. User should follow ACI role :expectedresults: 1. Entry should be added 2. Operation should succeed 3. Operation should succeed """ # Add ACI Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", f'(target = "ldap:///{TIMEOFDAY_OU_KEY}")' f'(targetattr="*")(version 3.0; aci "Timeofday aci"; ' f'allow(all) userdn ="ldap:///{FULLWORKER_KEY}" and ' f'(timeofday >= "0000" and timeofday <= "2359") ;)') # Create a new connection for this test. conn = UserAccount(topo.standalone, FULLWORKER_KEY).bind(PW_DM) # Perform Operation org = OrganizationalUnit(conn, TIMEOFDAY_OU_KEY) org.replace("seeAlso", "cn=1") def test_user_can_access_the_data_only_in_the_morning(topo, add_user, aci_of_user): """ User can access the data only in the morning as per the ACI. :id: 5f7d380c-7ac5-11e8-8124-8c16451d917b :customerscenario: True :setup: Standalone Server :steps: 1. Add test entry 2. Add ACI 3. User should follow ACI role :expectedresults: 1. Entry should be added 2. Operation should succeed 3. Operation should succeed """ # Add ACI Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", f'(target = "ldap:///{TIMEOFDAY_OU_KEY}")' f'(targetattr="*")(version 3.0; aci "Timeofday aci"; ' f'allow(all) userdn = "ldap:///{DAYWORKER_KEY}" ' f'and timeofday < "1200" ;)') # Create a new connection for this test. conn = UserAccount(topo.standalone, DAYWORKER_KEY).bind(PW_DM) # Perform Operation org = OrganizationalUnit(conn, TIMEOFDAY_OU_KEY) if datetime.now().hour >= 12: with pytest.raises(ldap.INSUFFICIENT_ACCESS): org.replace("seeAlso", "cn=1") else: org.replace("seeAlso", "cn=1") def test_user_can_access_the_data_only_in_the_afternoon(topo, add_user, aci_of_user): """ User can access the data only in the afternoon as per the ACI. :id: 63eb5b1c-7ac5-11e8-bd46-8c16451d917b :customerscenario: True :setup: Standalone Server :steps: 1. Add test entry 2. Add ACI 3. User should follow ACI role :expectedresults: 1. Entry should be added 2. Operation should succeed 3. Operation should succeed """ # Add ACI Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", f'(target = "ldap:///{TIMEOFDAY_OU_KEY}")' f'(targetattr="*")(version 3.0; aci "Timeofday aci"; ' f'allow(all) userdn = "ldap:///{NIGHTWORKER_KEY}" ' f'and timeofday > \'1200\' ;)') # create a new connection for the test conn = UserAccount(topo.standalone, NIGHTWORKER_KEY).bind(PW_DM) # Perform Operation org = OrganizationalUnit(conn, TIMEOFDAY_OU_KEY) if datetime.now().hour < 12: with pytest.raises(ldap.INSUFFICIENT_ACCESS): org.replace("seeAlso", "cn=1") else: org.replace("seeAlso", "cn=1") def test_timeofday_keyword(topo, add_user, aci_of_user): """ User NOWORKER_KEY can access the data as per the ACI after removing ACI it cant. :id: 681dd58e-7ac5-11e8-bed1-8c16451d917b :customerscenario: True :setup: Standalone Server :steps: 1. Add test entry 2. Add ACI 3. User should follow ACI role :expectedresults: 1. Entry should be added 2. Operation should succeed 3. Operation should succeed """ now = time.strftime("%c") now_1 = "".join(now.split()[3].split(":"))[:4] # Add ACI domain = Domain(topo.standalone, DEFAULT_SUFFIX) domain.add("aci", f'(target = "ldap:///{TIMEOFDAY_OU_KEY}")' f'(targetattr="*")(version 3.0; aci "Timeofday aci"; ' f'allow(all) userdn = "ldap:///{NOWORKER_KEY}" ' f'and timeofday = \'{now_1}\' ;)') # Create a new connection for this test. conn = UserAccount(topo.standalone, NOWORKER_KEY).bind(PW_DM) # Perform Operation org = OrganizationalUnit(conn, TIMEOFDAY_OU_KEY) org.replace("seeAlso", "cn=1") # Remove ACI aci = domain.get_attr_vals_utf8('aci')[-1] domain.ensure_removed('aci', aci) assert aci not in domain.get_attr_vals_utf8('aci') # after removing the ACI user cannot access the data time.sleep(1) with pytest.raises(ldap.INSUFFICIENT_ACCESS): org.replace("seeAlso", "cn=1") def test_dayofweek_keyword_test_everyday_can_access(topo, add_user, aci_of_user): """ User can access the data EVERYDAY_KEY as per the ACI. :id: 6c5922ca-7ac5-11e8-8f01-8c16451d917b :customerscenario: True :setup: Standalone Server :steps: 1. Add test entry 2. Add ACI 3. User should follow ACI role :expectedresults: 1. Entry should be added 2. Operation should succeed 3. Operation should succeed """ # Add ACI Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", f'(target = "ldap:///{DAYOFWEEK_OU_KEY}")' f'(targetattr="*")(version 3.0; aci "Dayofweek aci"; ' f'allow(all) userdn = "ldap:///{EVERYDAY_KEY}" and ' f'dayofweek = "Sun, Mon, Tue, Wed, Thu, Fri, Sat" ;)') # Create a new connection for this test. conn = UserAccount(topo.standalone, EVERYDAY_KEY).bind(PW_DM) # Perform Operation org = OrganizationalUnit(conn, DAYOFWEEK_OU_KEY) org.replace("seeAlso", "cn=1") def test_dayofweek_keyword_today_can_access(topo, add_user, aci_of_user): """ User can access the data one day per week as per the ACI. :id: 7131dc88-7ac5-11e8-acc2-8c16451d917b :customerscenario: True :setup: Standalone Server :steps: 1. Add test entry 2. Add ACI 3. User should follow ACI role :expectedresults: 1. Entry should be added 2. Operation should succeed 3. Operation should succeed """ today_1 = time.strftime("%c").split()[0] # Add ACI Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", f'(target = "ldap:///{DAYOFWEEK_OU_KEY}")' f'(targetattr="*")(version 3.0; aci "Dayofweek aci"; ' f'allow(all) userdn = "ldap:///{TODAY_KEY}" ' f'and dayofweek = \'{today_1}\' ;)') # Create a new connection for this test. conn = UserAccount(topo.standalone, TODAY_KEY).bind(PW_DM) # Perform Operation org = OrganizationalUnit(conn, DAYOFWEEK_OU_KEY) org.replace("seeAlso", "cn=1") def test_user_cannot_access_the_data_at_all(topo, add_user, aci_of_user): """ User cannot access the data at all as per the ACI. :id: 75cdac5e-7ac5-11e8-968a-8c16451d917b :customerscenario: True :setup: Standalone Server :steps: 1. Add test entry 2. Add ACI 3. User should follow ACI role :expectedresults: 1. Entry should be added 2. Operation should succeed 3. Operation should succeed """ # Add ACI Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", f'(target = "ldap:///{DAYOFWEEK_OU_KEY}")' f'(targetattr="*")(version 3.0; aci "Dayofweek aci"; ' f'allow(all) userdn = "ldap:///{TODAY_KEY}" ' f'and dayofweek = "$NEW_DATE" ;)') # Create a new connection for this test. conn = UserAccount(topo.standalone, NODAY_KEY).bind(PW_DM) # Perform Operation org = OrganizationalUnit(conn, DAYOFWEEK_OU_KEY) with pytest.raises(ldap.INSUFFICIENT_ACCESS): org.replace("seeAlso", "cn=1") if __name__ == "__main__": CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s -v %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/acl/keywords_test.py000066400000000000000000000404161421664411400263740ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2019 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK ---- """ This test script will test wrong/correct key value with ACIs. """ import os import socket import pytest from lib389.idm.account import Anonymous from lib389._constants import DEFAULT_SUFFIX, PW_DM from lib389.idm.domain import Domain from lib389.idm.organizationalunit import OrganizationalUnit from lib389.idm.user import UserAccount import ldap pytestmark = pytest.mark.tier1 KEYWORDS_OU_KEY = "ou=Keywords,{}".format(DEFAULT_SUFFIX) DNS_OU_KEY = "ou=DNS,{}".format(KEYWORDS_OU_KEY) IP_OU_KEY = "ou=IP,{}".format(KEYWORDS_OU_KEY) FULLIP_KEY = "uid=FULLIP_KEY,{}".format(IP_OU_KEY) AUTHMETHOD_OU_KEY = "ou=Authmethod,{}".format(KEYWORDS_OU_KEY) SIMPLE_1_KEY = "uid=SIMPLE_1_KEY,{}".format(AUTHMETHOD_OU_KEY) FULLDNS_KEY = "uid=FULLDNS_KEY,{}".format(DNS_OU_KEY) SUNDNS_KEY = "uid=SUNDNS_KEY,{}".format(DNS_OU_KEY) NODNS_KEY = "uid=NODNS_KEY,{}".format(DNS_OU_KEY) NETSCAPEDNS_KEY = "uid=NETSCAPEDNS_KEY,{}".format(DNS_OU_KEY) NONE_1_KEY = "uid=NONE_1_KEY,{}".format(AUTHMETHOD_OU_KEY) NONE_2_KEY = "uid=NONE_2_KEY,{}".format(AUTHMETHOD_OU_KEY) NONE_ACI_KEY = f'(target = "ldap:///{AUTHMETHOD_OU_KEY}")' \ f'(targetattr="*")(version 3.0; aci "Authmethod aci"; ' \ f'allow(all) userdn = "ldap:///{NONE_1_KEY}" and authmethod = "none" ;)' SIMPLE_ACI_KEY = f'(target = "ldap:///{AUTHMETHOD_OU_KEY}")' \ f'(targetattr="*")(version 3.0; aci "Authmethod aci"; ' \ f'allow(all) userdn = "ldap:///{SIMPLE_1_KEY}" and authmethod = "simple" ;)' def _add_aci(topo, name): """ This function will add ACI to DEFAULT_SUFFIX """ Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", name) def test_user_binds_with_a_password_and_can_access_the_data(topo, add_user, aci_of_user): """User binds with a password and can access the data as per the ACI. :id: f6c4b6f0-7ac4-11e8-a517-8c16451d917b :customerscenario: True :setup: Standalone Server :steps: 1. Add test entry 2. Add ACI 3. User should follow ACI role :expectedresults: 1. Entry should be added 2. Operation should succeed 3. Operation should succeed """ # Add ACI _add_aci(topo, NONE_ACI_KEY) # Create a new connection for this test. conn = UserAccount(topo.standalone, NONE_1_KEY).bind(PW_DM) # Perform Operation OrganizationalUnit(conn, AUTHMETHOD_OU_KEY).replace("seeAlso", "cn=1") def test_user_binds_with_a_bad_password_and_cannot_access_the_data(topo, add_user, aci_of_user): """User binds with a BAD password and cannot access the data . :id: 0397744e-7ac5-11e8-bfb1-8c16451d917b :customerscenario: True :setup: Standalone Server :steps: 1. Add test entry 2. Add ACI 3. User should follow ACI role :expectedresults: 1. Entry should be added 2. Operation should succeed 3. Operation should succeed """ # User binds with a bad password and cannot access the data with pytest.raises(ldap.UNWILLING_TO_PERFORM): UserAccount(topo.standalone, NONE_1_KEY).bind("") def test_anonymous_user_cannot_access_the_data(topo, add_user, aci_of_user): """Anonymous user cannot access the data :id: 0821a55c-7ac5-11e8-b214-8c16451d917b :customerscenario: True :setup: Standalone Server :steps: 1. Add test entry 2. Add ACI 3. User should follow ACI role :expectedresults: 1. Entry should be added 2. Operation should succeed 3. Operation should succeed """ # Add ACI _add_aci(topo, NONE_ACI_KEY) # Create a new connection for this test. conn = Anonymous(topo.standalone).bind() # Perform Operation org = OrganizationalUnit(conn, AUTHMETHOD_OU_KEY) with pytest.raises(ldap.INSUFFICIENT_ACCESS): org.replace("seeAlso", "cn=1") def test_authenticated_but_has_no_rigth_on_the_data(topo, add_user, aci_of_user): """User has a password. He is authenticated but has no rigth on the data. :id: 11be7ebe-7ac5-11e8-b754-8c16451d917b :customerscenario: True :setup: Standalone Server :steps: 1. Add test entry 2. Add ACI 3. User should follow ACI role :expectedresults: 1. Entry should be added 2. Operation should succeed 3. Operation should succeed """ # Add ACI _add_aci(topo, NONE_ACI_KEY) # Create a new connection for this test. conn = UserAccount(topo.standalone, SIMPLE_1_KEY).bind(PW_DM) # Perform Operation org = OrganizationalUnit(conn, AUTHMETHOD_OU_KEY) with pytest.raises(ldap.INSUFFICIENT_ACCESS): org.replace("seeAlso", "cn=1") def test_the_bind_client_is_accessing_the_directory(topo, add_user, aci_of_user): """The bind rule is evaluated to be true if the client is accessing the directory as per the ACI. :id: 1715bfb2-7ac5-11e8-8f2c-8c16451d917b :customerscenario: True :setup: Standalone Server :steps: 1. Add test entry 2. Add ACI 3. User should follow ACI role :expectedresults: 1. Entry should be added 2. Operation should succeed 3. Operation should succeed """ # Add ACI _add_aci(topo, SIMPLE_ACI_KEY) # Create a new connection for this test. conn = UserAccount(topo.standalone, SIMPLE_1_KEY).bind(PW_DM) # Perform Operation OrganizationalUnit(conn, AUTHMETHOD_OU_KEY).replace("seeAlso", "cn=1") def test_users_binds_with_a_password_and_can_access_the_data( topo, add_user, aci_of_user): """User binds with a password and can access the data as per the ACI. :id: 1bd01cb4-7ac5-11e8-a2f1-8c16451d917b :customerscenario: True :setup: Standalone Server :steps: 1. Add test entry 2. Add ACI 3. User should follow ACI role :expectedresults: 1. Entry should be added 2. Operation should succeed 3. Operation should succeed """ # Add ACI _add_aci(topo, SIMPLE_ACI_KEY) # Create a new connection for this test. conn = UserAccount(topo.standalone, SIMPLE_1_KEY).bind(PW_DM) # Perform Operation OrganizationalUnit(conn, AUTHMETHOD_OU_KEY).replace("seeAlso", "cn=1") def test_user_binds_without_any_password_and_cannot_access_the_data(topo, add_user, aci_of_user): """User binds without any password and cannot access the data :id: 205777fa-7ac5-11e8-ba2f-8c16451d917b :customerscenario: True :setup: Standalone Server :steps: 1. Add test entry 2. Add ACI 3. User should follow ACI role :expectedresults: 1. Entry should be added 2. Operation should succeed 3. Operation should succeed """ # Add ACI _add_aci(topo, SIMPLE_ACI_KEY) # Create a new connection for this test. conn = Anonymous(topo.standalone).bind() # Perform Operation org = OrganizationalUnit(conn, AUTHMETHOD_OU_KEY) with pytest.raises(ldap.INSUFFICIENT_ACCESS): org.replace("seeAlso", "cn=1") #unstable or unstatus tests, skipped for now @pytest.mark.flaky(max_runs=2, min_passes=1) def test_user_can_access_the_data_when_connecting_from_any_machine( topo, add_user, aci_of_user ): """User can access the data when connecting from any machine as per the ACI. :id: 28cbc008-7ac5-11e8-934e-8c16451d917b :customerscenario: True :setup: Standalone Server :steps: 1. Add test entry 2. Add ACI 3. User should follow ACI role :expectedresults: 1. Entry should be added 2. Operation should succeed 3. Operation should succeed """ # Add ACI Domain(topo.standalone, DEFAULT_SUFFIX)\ .add("aci", f'(target ="ldap:///{DNS_OU_KEY}")' f'(targetattr="*")(version 3.0; aci "DNS aci"; allow(all) ' f'userdn = "ldap:///{FULLDNS_KEY}" and dns = "*" ;)') # Create a new connection for this test. conn = UserAccount(topo.standalone, FULLDNS_KEY).bind(PW_DM) # Perform Operation OrganizationalUnit(conn, DNS_OU_KEY).replace("seeAlso", "cn=1") #unstable or unstatus tests, skipped for now @pytest.mark.flaky(max_runs=2, min_passes=1) def test_user_can_access_the_data_when_connecting_from_internal_ds_network_only( topo, add_user, aci_of_user ): """User can access the data when connecting from internal ICNC network only as per the ACI. :id: 2cac2136-7ac5-11e8-8328-8c16451d917b :customerscenario: True :setup: Standalone Server :steps: 1. Add test entry 2. Add ACI 3. User should follow ACI role :expectedresults: 1. Entry should be added 2. Operation should succeed 3. Operation should succeed """ dns_name = socket.getfqdn() # Add ACI Domain(topo.standalone, DEFAULT_SUFFIX).\ add("aci", [f'(target = "ldap:///{DNS_OU_KEY}")(targetattr="*")' f'(version 3.0; aci "DNS aci"; allow(all) ' f'userdn = "ldap:///{SUNDNS_KEY}" and ' f'(dns = "*redhat.com" or dns = "{dns_name}");)']) # Create a new connection for this test. conn = UserAccount(topo.standalone, SUNDNS_KEY).bind(PW_DM) # Perform Operation OrganizationalUnit(conn, DNS_OU_KEY).replace("seeAlso", "cn=1") #unstable or unstatus tests, skipped for now @pytest.mark.flaky(max_runs=2, min_passes=1) def test_user_can_access_the_data_when_connecting_from_some_network_only( topo, add_user, aci_of_user ): """User can access the data when connecting from some network only as per the ACI. :id: 3098512a-7ac5-11e8-af85-8c16451d917b :customerscenario: True :setup: Standalone Server :steps: 1. Add test entry 2. Add ACI 3. User should follow ACI role :expectedresults: 1. Entry should be added 2. Operation should succeed 3. Operation should succeed """ dns_name = socket.getfqdn() # Add ACI Domain(topo.standalone, DEFAULT_SUFFIX)\ .add("aci", f'(target = "ldap:///{DNS_OU_KEY}")' f'(targetattr="*")(version 3.0; aci "DNS aci"; allow(all) ' f'userdn = "ldap:///{NETSCAPEDNS_KEY}" ' f'and dns = "{dns_name}" ;)') # Create a new connection for this test. conn = UserAccount(topo.standalone, NETSCAPEDNS_KEY).bind(PW_DM) # Perform Operation OrganizationalUnit(conn, DNS_OU_KEY).replace("seeAlso", "cn=1") #unstable or unstatus tests, skipped for now @pytest.mark.flaky(max_runs=2, min_passes=1) def test_from_an_unauthorized_network(topo, add_user, aci_of_user): """User cannot access the data when connecting from an unauthorized network as per the ACI. :id: 34cf9726-7ac5-11e8-bc12-8c16451d917b :customerscenario: True :setup: Standalone Server :steps: 1. Add test entry 2. Add ACI 3. User should follow ACI role :expectedresults: 1. Entry should be added 2. Operation should succeed 3. Operation should succeed """ # Add ACI Domain(topo.standalone, DEFAULT_SUFFIX).\ add("aci", f'(target = "ldap:///{DNS_OU_KEY}")' f'(targetattr="*")(version 3.0; aci "DNS aci"; allow(all) ' f'userdn = "ldap:///{NETSCAPEDNS_KEY}" and dns != "red.iplanet.com" ;)') # Create a new connection for this test. conn = UserAccount(topo.standalone, NETSCAPEDNS_KEY).bind(PW_DM) # Perform Operation OrganizationalUnit(conn, DNS_OU_KEY).replace("seeAlso", "cn=1") #unstable or unstatus tests, skipped for now @pytest.mark.flaky(max_runs=2, min_passes=1) def test_user_cannot_access_the_data_when_connecting_from_an_unauthorized_network_2( topo, add_user, aci_of_user): """User cannot access the data when connecting from an unauthorized network as per the ACI. :id: 396bdd44-7ac5-11e8-8014-8c16451d917b :customerscenario: True :setup: Standalone Server :steps: 1. Add test entry 2. Add ACI 3. User should follow ACI role :expectedresults: 1. Entry should be added 2. Operation should succeed 3. Operation should succeed """ # Add ACI Domain(topo.standalone, DEFAULT_SUFFIX).\ add("aci", f'(target = "ldap:///{DNS_OU_KEY}")' f'(targetattr="*")(version 3.0; aci "DNS aci"; allow(all) ' f'userdn = "ldap:///{NETSCAPEDNS_KEY}" ' f'and dnsalias != "www.redhat.com" ;)') # Create a new connection for this test. conn = UserAccount(topo.standalone, NETSCAPEDNS_KEY).bind(PW_DM) # Perform Operation OrganizationalUnit(conn, DNS_OU_KEY).replace("seeAlso", "cn=1") def test_user_cannot_access_the_data_if_not_from_a_certain_domain(topo, add_user, aci_of_user): """User cannot access the data if not from a certain domain as per the ACI. :id: 3d658972-7ac5-11e8-930f-8c16451d917b :customerscenario: True :setup: Standalone Server :steps: 1. Add test entry 2. Add ACI 3. User should follow ACI role :expectedresults: 1. Entry should be added 2. Operation should succeed 3. Operation should succeed """ # Add ACI Domain(topo.standalone, DEFAULT_SUFFIX).\ add("aci", f'(target = "ldap:///{DNS_OU_KEY}")(targetattr="*")' f'(version 3.0; aci "DNS aci"; allow(all) ' f'userdn = "ldap:///{NODNS_KEY}" ' f'and dns = "RAP.rock.SALSA.house.COM" ;)') # Create a new connection for this test. conn = UserAccount(topo.standalone, NODNS_KEY).bind(PW_DM) # Perform Operation org = OrganizationalUnit(conn, AUTHMETHOD_OU_KEY) with pytest.raises(ldap.INSUFFICIENT_ACCESS): org.replace("seeAlso", "cn=1") def test_dnsalias_keyword_test_nodns_cannot(topo, add_user, aci_of_user): """Dnsalias Keyword NODNS_KEY cannot assess data as per the ACI. :id: 41b467be-7ac5-11e8-89a3-8c16451d917b :customerscenario: True :setup: Standalone Server :steps: 1. Add test entry 2. Add ACI 3. User should follow ACI role :expectedresults: 1. Entry should be added 2. Operation should succeed 3. Operation should succeed """ # Add ACI Domain(topo.standalone, DEFAULT_SUFFIX).\ add("aci", f'(target = "ldap:///{DNS_OU_KEY}")(targetattr="*")' f'(version 3.0; aci "DNS aci"; allow(all) ' f'userdn = "ldap:///{NODNS_KEY}" and ' f'dnsalias = "RAP.rock.SALSA.house.COM" ;)') # Create a new connection for this test. conn = UserAccount(topo.standalone, NODNS_KEY).bind(PW_DM) # Perform Operation org = OrganizationalUnit(conn, DNS_OU_KEY) with pytest.raises(ldap.INSUFFICIENT_ACCESS): org.replace("seeAlso", "cn=1") #unstable or unstatus tests, skipped for now @pytest.mark.flaky(max_runs=2, min_passes=1) @pytest.mark.ds50378 @pytest.mark.bz1710848 @pytest.mark.parametrize("ip_addr", ['127.0.0.1', "[::1]"]) def test_user_can_access_from_ipv4_or_ipv6_address(topo, add_user, aci_of_user, ip_addr): """User can modify the data when accessing the server from the allowed IPv4 and IPv6 addresses :id: 461e761e-7ac5-11e8-9ae4-8c16451d917b :customerscenario: True :parametrized: yes :setup: Standalone Server :steps: 1. Add ACI that has both IPv4 and IPv6 2. Connect from one of the IPs allowed in ACI 3. Modify an attribute :expectedresults: 1. ACI should be added 2. Conection should be successful 3. Operation should be successful """ # Add ACI that contains both IPv4 and IPv6 Domain(topo.standalone, DEFAULT_SUFFIX).\ add("aci", f'(target ="ldap:///{IP_OU_KEY}")(targetattr="*") ' f'(version 3.0; aci "IP aci"; allow(all) ' f'userdn = "ldap:///{FULLIP_KEY}" and (ip = "127.0.0.1" or ip = "::1");)') # Create a new connection for this test. conn = UserAccount(topo.standalone, FULLIP_KEY).bind(PW_DM, uri=f'ldap://{ip_addr}:{topo.standalone.port}') # Perform Operation OrganizationalUnit(conn, IP_OU_KEY).replace("seeAlso", "cn=1") if __name__ == "__main__": CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s -v %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/acl/misc_test.py000066400000000000000000000443711421664411400254640ustar00rootroot00000000000000""" # --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2020 RED Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK ---- """ import ldap import os import pytest from lib389._constants import DEFAULT_SUFFIX, PW_DM, DN_DM from lib389.idm.user import UserAccount, UserAccounts from lib389._mapped_object import DSLdapObject from lib389.idm.account import Accounts, Anonymous from lib389.idm.organizationalunit import OrganizationalUnit, OrganizationalUnits from lib389.idm.group import Group, Groups from lib389.topologies import topology_st as topo from lib389.idm.domain import Domain from lib389.plugins import ACLPlugin pytestmark = pytest.mark.tier1 PEOPLE = "ou=PEOPLE,{}".format(DEFAULT_SUFFIX) DYNGROUP = "cn=DYNGROUP,{}".format(PEOPLE) CONTAINER_1_DELADD = "ou=Product Development,{}".format(DEFAULT_SUFFIX) CONTAINER_2_DELADD = "ou=Accounting,{}".format(DEFAULT_SUFFIX) @pytest.fixture(scope="function") def aci_of_user(request, topo): """ :param request: :param topo: """ # Add anonymous access aci ACI_TARGET = "(targetattr != \"userpassword\")(target = \"ldap:///%s\")" % (DEFAULT_SUFFIX) ACI_ALLOW = "(version 3.0; acl \"Anonymous Read access\"; allow (read,search,compare)" ACI_SUBJECT = "(userdn=\"ldap:///anyone\");)" ANON_ACI = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT suffix = Domain(topo.standalone, DEFAULT_SUFFIX) try: suffix.add('aci', ANON_ACI) except ldap.TYPE_OR_VALUE_EXISTS: pass aci_list = suffix.get_attr_vals('aci') def finofaci(): """ Removes and Restores ACIs after the test. """ domain = Domain(topo.standalone, DEFAULT_SUFFIX) domain.remove_all('aci') for i in aci_list: domain.add("aci", i) request.addfinalizer(finofaci) @pytest.fixture(scope="function") def clean(request, topo): """ :param request: :param topo: """ ous = OrganizationalUnits(topo.standalone, DEFAULT_SUFFIX) try: for i in ['Product Development', 'Accounting']: ous.create(properties={'ou': i}) except ldap.ALREADY_EXISTS as eoor_eoor: topo.standalone.log.info("Exception (expected): %s" % type(eoor_eoor).__name__) def fin(): """ Deletes entries after the test. """ for scope_scope in [CONTAINER_1_DELADD, CONTAINER_2_DELADD, PEOPLE]: try: DSLdapObject(topo.standalone, scope_scope).delete() except ldap.ALREADY_EXISTS as eoor_eoor: topo.standalone.log.info("Exception (expected): %s" % type(eoor_eoor).__name__) request.addfinalizer(fin) def test_accept_aci_in_addition_to_acl(topo, clean, aci_of_user): """Misc Test 2 accept aci in addition to acl :id: 8e9408fa-7db8-11e8-adaa-8c16451d917b :setup: Standalone Instance :steps: 1. Add test entry 2. Add ACI 3. User should follow ACI role :expectedresults: 1. Entry should be added 2. Operation should succeed 3. Operation should succeed """ uas = UserAccounts(topo.standalone, DEFAULT_SUFFIX, rdn='ou=product development') user = uas.create_test_user() for i in [('mail', 'anujborah@okok.com'), ('givenname', 'Anuj'), ('userPassword', PW_DM)]: user.set(i[0], i[1]) aci_target = '(targetattr="givenname")' aci_allow = ('(version 3.0; acl "Name of the ACI"; deny (read, search, compare, write)') aci_subject = 'userdn="ldap:///anyone";)' Domain(topo.standalone, CONTAINER_1_DELADD).add("aci", aci_target + aci_allow + aci_subject) conn = Anonymous(topo.standalone).bind() # aci will block targetattr=givenname to anyone user = UserAccount(conn, user.dn) with pytest.raises(AssertionError): assert user.get_attr_val_utf8('givenname') == 'Anuj' # aci will allow targetattr=uid to anyone assert user.get_attr_val_utf8('uid') == 'test_user_1000' for i in uas.list(): i.delete() @pytest.mark.bz334451 def test_more_then_40_acl_will_crash_slapd(topo, clean, aci_of_user): """bug 334451 : more then 40 acl will crash slapd superseded by Bug 772778 - acl cache overflown problem with > 200 acis :id: 93a44c60-7db8-11e8-9439-8c16451d917b :setup: Standalone Instance :steps: 1. Add test entry 2. Add ACI 3. User should follow ACI role :expectedresults: 1. Entry should be added 2. Operation should succeed 3. Operation should succeed """ uas = UserAccounts(topo.standalone, DEFAULT_SUFFIX, rdn='ou=Accounting') user = uas.create_test_user() aci_target = '(target ="ldap:///{}")(targetattr!="userPassword")'.format(CONTAINER_1_DELADD) # more_then_40_acl_will not crash_slapd for i in range(40): aci_allow = '(version 3.0;acl "ACI_{}";allow (read, search, compare)'.format(i) aci_subject = 'userdn="ldap:///anyone";)' aci_body = aci_target + aci_allow + aci_subject Domain(topo.standalone, CONTAINER_1_DELADD).add("aci", aci_body) conn = Anonymous(topo.standalone).bind() assert UserAccount(conn, user.dn).get_attr_val_utf8('uid') == 'test_user_1000' for i in uas.list(): i.delete() @pytest.mark.bz345643 def test_search_access_should_not_include_read_access(topo, clean, aci_of_user): """bug 345643 Misc Test 4 search access should not include read access :id: 98ab173e-7db8-11e8-a309-8c16451d917b :setup: Standalone Instance :steps: 1. Add test entry 2. Add ACI 3. User should follow ACI role :expectedresults: 1. Entry should be added 2. Operation should succeed 3. Operation should succeed """ assert Domain(topo.standalone, DEFAULT_SUFFIX).present('aci') Domain(topo.standalone, DEFAULT_SUFFIX)\ .replace("aci", [f'(target ="ldap:///{DEFAULT_SUFFIX}")(targetattr != "userPassword")' '(version 3.0;acl "anonymous access";allow (search)' '(userdn = "ldap:///anyone");)', f'(target="ldap:///{DEFAULT_SUFFIX}") (targetattr = "*")(version 3.0; ' 'acl "allow self write";allow(write) ' 'userdn = "ldap:///self";)', f'(target="ldap:///{DEFAULT_SUFFIX}") (targetattr = "*")(version 3.0; ' 'acl "Allow all admin group"; allow(all) groupdn = "ldap:///cn=Directory ' 'Administrators, {}";)']) conn = Anonymous(topo.standalone).bind() # search_access_should_not_include_read_access suffix = Domain(conn, DEFAULT_SUFFIX) with pytest.raises(Exception): assert suffix.present('aci') def test_only_allow_some_targetattr(topo, clean, aci_of_user): """Misc Test 5 only allow some targetattr (1/2) :id: 9d27f048-7db8-11e8-a71c-8c16451d917b :setup: Standalone Instance :steps: 1. Add test entry 2. Add ACI 3. User should follow ACI role :expectedresults: 1. Entry should be added 2. Operation should succeed 3. Operation should succeed """ uas = UserAccounts(topo.standalone, DEFAULT_SUFFIX, rdn=None) for i in range(1, 3): user = uas.create_test_user(uid=i, gid=i) user.replace_many(('cn', 'Anuj1'), ('mail', 'annandaBorah@anuj.com')) Domain(topo.standalone, DEFAULT_SUFFIX).\ replace("aci", '(target="ldap:///{}")(targetattr="mail||objectClass")' '(version 3.0; acl "Test";allow (read,search,compare) ' '(userdn = "ldap:///anyone"); )'.format(DEFAULT_SUFFIX)) conn = Anonymous(topo.standalone).bind() accounts = Accounts(conn, DEFAULT_SUFFIX) # aci will allow only mail targetattr assert len(accounts.filter('(mail=*)')) == 2 # aci will allow only mail targetattr assert not accounts.filter('(cn=*)', scope=1) # with root no , blockage assert len(Accounts(topo.standalone, DEFAULT_SUFFIX).filter('(uid=*)', scope=1)) == 2 for i in uas.list(): i.delete() def test_only_allow_some_targetattr_two(topo, clean, aci_of_user, request): """Misc Test 6 only allow some targetattr (2/2)" :id: a188239c-7db8-11e8-903e-8c16451d917b :setup: Standalone Instance :steps: 1. Add test entry 2. Add ACI 3. User should follow ACI role :expectedresults: 1. Entry should be added 2. Operation should succeed 3. Operation should succeed """ uas = UserAccounts(topo.standalone, DEFAULT_SUFFIX, rdn=None) for i in range(5): user = uas.create_test_user(uid=i, gid=i) user.replace_many(('mail', 'anujborah@anujborah.com'), ('cn', 'Anuj'), ('userPassword', PW_DM)) user1 = uas.create_test_user() user1.replace_many(('mail', 'anujborah@anujborah.com'), ('userPassword', PW_DM)) Domain(topo.standalone, DEFAULT_SUFFIX).\ replace("aci", '(target="ldap:///{}") (targetattr="mail||objectClass")' '(targetfilter="cn=Anuj") (version 3.0; acl "{}"; ' 'allow (compare,read,search) ' '(userdn = "ldap:///anyone"); )'.format(DEFAULT_SUFFIX, request.node.name)) conn = UserAccount(topo.standalone, user.dn).bind(PW_DM) # aci will allow only mail targetattr but only for cn=Anuj account = Accounts(conn, DEFAULT_SUFFIX) assert len(account.filter('(mail=*)', scope=1)) == 5 assert not account.filter('(cn=*)', scope=1) for i in account.filter('(mail=*)'): assert i.get_attr_val_utf8('mail') == 'anujborah@anujborah.com' conn = Anonymous(topo.standalone).bind() # aci will allow only mail targetattr but only for cn=Anuj account = Accounts(conn, DEFAULT_SUFFIX) assert len(account.filter('(mail=*)', scope=1)) == 5 assert not account.filter('(cn=*)', scope=1) for i in account.filter('(mail=*)'): assert i.get_attr_val_utf8('mail') == 'anujborah@anujborah.com' # with root no blockage assert len(Accounts(topo.standalone, DEFAULT_SUFFIX).filter('(mail=*)')) == 6 for i in uas.list(): i.delete() @pytest.mark.bz326000 def test_memberurl_needs_to_be_normalized(topo, clean, aci_of_user): """Non-regression test for BUG 326000: MemberURL needs to be normalized :id: a5d172e6-7db8-11e8-aca7-8c16451d917b :setup: Standalone Instance :steps: 1. Add test entry 2. Add ACI 3. User should follow ACI role :expectedresults: 1. Entry should be added 2. Operation should succeed 3. Operation should succeed """ ou_ou = OrganizationalUnit(topo.standalone, "ou=PEOPLE,{}".format(DEFAULT_SUFFIX)) ou_ou.set('aci', '(targetattr="*")' '(version 3.0; acl "tester"; allow(all) ' 'groupdn = "ldap:///cn =DYNGROUP,ou=PEOPLE, {}";)'.format(DEFAULT_SUFFIX)) groups = Groups(topo.standalone, DEFAULT_SUFFIX, rdn='ou=PEOPLE') groups.create(properties={"cn": "DYNGROUP", "description": "DYNGROUP", 'objectClass': 'groupOfURLS', 'memberURL': "ldap:///ou=PEOPLE,{}??sub?" "(uid=test_user_2)".format(DEFAULT_SUFFIX)}) uas = UserAccounts(topo.standalone, DEFAULT_SUFFIX) for demo1 in [(1, "Entry to test rights on."), (2, "Member of DYNGROUP")]: user = uas.create_test_user(uid=demo1[0], gid=demo1[0]) user.replace_many(('description', demo1[1]), ('userPassword', PW_DM)) ##with normal aci conn = UserAccount(topo.standalone, uas.list()[1].dn).bind(PW_DM) harry = UserAccount(conn, uas.list()[1].dn) harry.add('sn', 'FRED') ##with abnomal aci dygrp = Group(topo.standalone, DYNGROUP) dygrp.remove('memberurl', "ldap:///ou=PEOPLE,{}??sub?(uid=test_user_2)".format(DEFAULT_SUFFIX)) dygrp.add('memberurl', "ldap:///ou=PEOPLE,{}??sub?(uid=tesT_UsEr_2)".format(DEFAULT_SUFFIX)) harry.add('sn', 'Not FRED') for i in uas.list(): i.delete() @pytest.mark.bz624370 def test_greater_than_200_acls_can_be_created(topo, clean, aci_of_user): """Misc 10, check that greater than 200 ACLs can be created. Bug 624370 :id: ac020252-7db8-11e8-8652-8c16451d917b :setup: Standalone Instance :steps: 1. Add test entry 2. Add ACI 3. User should follow ACI role :expectedresults: 1. Entry should be added 2. Operation should succeed 3. Operation should succeed """ # greater_than_200_acls_can_be_created uas = UserAccounts(topo.standalone, DEFAULT_SUFFIX) for i in range(200): user = uas.create_test_user(uid=i, gid=i) user.set('aci', '(targetattr = "description")' '(version 3.0;acl "foo{}"; allow (read, search, compare)' '(userdn="ldap:///anyone");)'.format(i)) assert user.\ get_attr_val_utf8('aci') == '(targetattr = "description")' \ '(version 3.0;acl "foo{}"; allow ' \ '(read, search, compare)' \ '(userdn="ldap:///anyone");)'.format(i) for i in uas.list(): i.delete() @pytest.mark.bz624453 def test_server_bahaves_properly_with_very_long_attribute_names(topo, clean, aci_of_user): """Make sure the server bahaves properly with very long attribute names. Bug 624453. :id: b0d31942-7db8-11e8-a833-8c16451d917b :setup: Standalone Instance :steps: 1. Add test entry 2. Add ACI 3. User should follow ACI role :expectedresults: 1. Entry should be added 2. Operation should succeed 3. Operation should succeed """ users = UserAccounts(topo.standalone, DEFAULT_SUFFIX) users.create_test_user() users.list()[0].set('userpassword', PW_DM) user = UserAccount(topo.standalone, 'uid=test_user_1000,ou=People,{}'.format(DEFAULT_SUFFIX)) with pytest.raises(ldap.INVALID_SYNTAX): user.add("aci", "a" * 9000) def test_do_bind_as_201_distinct_users(topo, clean, aci_of_user): """Test bind as 201 distinct users :id: c0060532-7db8-11e8-a124-8c16451d917b :setup: Standalone Instance :steps: 1. Add test entries 2. Increase the nsslapd-aclpb-max-selected-acls in cn=ACL Plugin,cn=plugins,cn=config 3. Restart the server 4. Do bind as 201 distinct users :expectedresults: 1. Entries should be added 2. Operation should succeed 3. Operation should succeed 4. Operation should succeed """ uas = UserAccounts(topo.standalone, DEFAULT_SUFFIX) for i in range(201): user = uas.create_test_user(uid=i, gid=i) user.set('userPassword', PW_DM) users = uas.list() for user in users: user.bind(PW_DM) ACLPlugin(topo.standalone).replace("nsslapd-aclpb-max-selected-acls", '220') topo.standalone.restart() users = uas.list() for user in users: user.bind(PW_DM) def test_info_disclosure(request, topo): """Test that a search returns 32 when base entry does not exist :id: f6dec4c2-65a3-41e4-a4c0-146196863333 :setup: Standalone Instance :steps: 1. Add aci 2. Add test user 3. Bind as user and search for non-existent entry :expectedresults: 1. Success 2. Success 3. Error 32 is returned """ ACI_TARGET = "(targetattr = \"*\")(target = \"ldap:///%s\")" % (DEFAULT_SUFFIX) ACI_ALLOW = "(version 3.0; acl \"Read/Search permission for all users\"; allow (read,search)" ACI_SUBJECT = "(userdn=\"ldap:///all\");)" ACI = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT # Get current ACi's so we can restore them when we are done suffix = Domain(topo.standalone, DEFAULT_SUFFIX) preserved_acis = suffix.get_attr_vals_utf8('aci') def finofaci(): domain = Domain(topo.standalone, DEFAULT_SUFFIX) try: domain.remove_all('aci') domain.replace_values('aci', preserved_acis) except: pass request.addfinalizer(finofaci) # Remove aci's suffix.remove_all('aci') # Add test user USER_DN = "uid=test,ou=people," + DEFAULT_SUFFIX users = UserAccounts(topo.standalone, DEFAULT_SUFFIX) users.create(properties={ 'uid': 'test', 'cn': 'test', 'sn': 'test', 'uidNumber': '1000', 'gidNumber': '2000', 'homeDirectory': '/home/test', 'userPassword': PW_DM }) # bind as user conn = UserAccount(topo.standalone, USER_DN).bind(PW_DM) # Search fo existing base DN test = Domain(conn, DEFAULT_SUFFIX) try: test.get_attr_vals_utf8_l('dc') assert False except IndexError: pass # Search for a non existent bases subtree = Domain(conn, "ou=does_not_exist," + DEFAULT_SUFFIX) try: subtree.get_attr_vals_utf8_l('objectclass') except IndexError: pass subtree = Domain(conn, "ou=also does not exist,ou=does_not_exist," + DEFAULT_SUFFIX) try: subtree.get_attr_vals_utf8_l('objectclass') except IndexError: pass # Try ONE level search instead of BASE try: Accounts(conn, "ou=does_not_exist," + DEFAULT_SUFFIX).filter("(objectclass=top)", scope=ldap.SCOPE_ONELEVEL) except IndexError: pass # add aci suffix.add('aci', ACI) # Search for a non existent entry which should raise an exception with pytest.raises(ldap.NO_SUCH_OBJECT): conn = UserAccount(topo.standalone, USER_DN).bind(PW_DM) subtree = Domain(conn, "ou=does_not_exist," + DEFAULT_SUFFIX) subtree.get_attr_vals_utf8_l('objectclass') with pytest.raises(ldap.NO_SUCH_OBJECT): conn = UserAccount(topo.standalone, USER_DN).bind(PW_DM) subtree = Domain(conn, "ou=also does not exist,ou=does_not_exist," + DEFAULT_SUFFIX) subtree.get_attr_vals_utf8_l('objectclass') with pytest.raises(ldap.NO_SUCH_OBJECT): conn = UserAccount(topo.standalone, USER_DN).bind(PW_DM) DN = "ou=also does not exist,ou=does_not_exist," + DEFAULT_SUFFIX Accounts(conn, DN).filter("(objectclass=top)", scope=ldap.SCOPE_ONELEVEL, strict=True) if __name__ == "__main__": CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s -v %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/acl/modify_test.py000066400000000000000000000542421421664411400260160ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2019 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK ---- import pytest, os, ldap from lib389._constants import DEFAULT_SUFFIX, PW_DM from lib389.idm.user import UserAccount from lib389.idm.account import Anonymous from lib389.idm.group import Group, UniqueGroup from lib389.idm.organizationalunit import OrganizationalUnit from lib389.idm.group import Groups from lib389.topologies import topology_st as topo from lib389.idm.domain import Domain pytestmark = pytest.mark.tier1 CONTAINER_1_DELADD = "ou=Product Development,{}".format(DEFAULT_SUFFIX) CONTAINER_2_DELADD = "ou=Accounting,{}".format(DEFAULT_SUFFIX) USER_DELADD = "cn=Jeff Vedder,{}".format(CONTAINER_1_DELADD) USER_WITH_ACI_DELADD = "cn=Sam Carter,{}".format(CONTAINER_2_DELADD) KIRSTENVAUGHAN = "cn=Kirsten Vaughan, ou=Human Resources, {}".format(DEFAULT_SUFFIX) HUMAN_OU_GLOBAL = "ou=Human Resources,{}".format(DEFAULT_SUFFIX) @pytest.fixture(scope="function") def cleanup_tree(request, topo): def fin(): for i in [USER_DELADD, USER_WITH_ACI_DELADD, KIRSTENVAUGHAN, CONTAINER_1_DELADD, CONTAINER_2_DELADD, HUMAN_OU_GLOBAL]: try: UserAccount(topo.standalone, i).delete() except: pass request.addfinalizer(fin) @pytest.fixture(scope="function") def aci_of_user(request, topo): # Add anonymous access aci ACI_TARGET = "(targetattr=\"*\")(target = \"ldap:///%s\")" % (DEFAULT_SUFFIX) ACI_ALLOW = "(version 3.0; acl \"Anonymous Read access\"; allow (read,search,compare)" ACI_SUBJECT = "(userdn=\"ldap:///anyone\");)" ANON_ACI = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT suffix = Domain(topo.standalone, DEFAULT_SUFFIX) try: suffix.add('aci', ANON_ACI) except ldap.TYPE_OR_VALUE_EXISTS: pass aci_list = suffix.get_attr_vals('aci') def finofaci(): domain = Domain(topo.standalone, DEFAULT_SUFFIX) domain.set('aci', None) for i in aci_list: domain.add("aci", i) request.addfinalizer(finofaci) def test_allow_write_access_to_targetattr_with_a_single_attribute( topo, aci_of_user, cleanup_tree): """Modify Test 1 Allow write access to targetattr with a single attribute :id: 620d7b82-7abf-11e8-a4db-8c16451d917b :setup: server :steps: 1. Add test entry 2. Add ACI 3. User should follow ACI role :expectedresults: 1. Entry should be added 2. Operation should succeed 3. Operation should succeed """ ACI_BODY = '(targetattr = "title")(version 3.0; acl "ACI NAME"; allow (write) (userdn = "ldap:///anyone") ;)' Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) ou = OrganizationalUnit(topo.standalone, "ou=Product Development,{}".format(DEFAULT_SUFFIX)) ou.create(properties={'ou': 'Product Development'}) properties = { 'uid': 'Jeff Vedder', 'cn': 'Jeff Vedder', 'sn': 'user', 'uidNumber': '1000', 'gidNumber': '2000', 'homeDirectory': '/home/' + 'JeffVedder', 'userPassword': PW_DM } user = UserAccount(topo.standalone, "cn=Jeff Vedder,ou=Product Development,{}".format(DEFAULT_SUFFIX)) user.create(properties=properties) # Allow write access to targetattr with a single attribute conn = Anonymous(topo.standalone).bind() ua = UserAccount(conn, USER_DELADD) ua.add("title", "Architect") assert ua.get_attr_val('title') ua.remove("title", "Architect") def test_allow_write_access_to_targetattr_with_multiple_attibutes( topo, aci_of_user, cleanup_tree): """Modify Test 2 Allow write access to targetattr with multiple attibutes :id: 6b9f05c6-7abf-11e8-9ba1-8c16451d917b :setup: server :steps: 1. Add test entry 2. Add ACI 3. User should follow ACI role :expectedresults: 1. Entry should be added 2. Operation should succeed 3. Operation should succeed """ ACI_BODY = '(targetattr = "telephonenumber || roomnumber")(version 3.0; acl "ACI NAME"; allow (write) (userdn = "ldap:///anyone") ;)' Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) ou = OrganizationalUnit(topo.standalone, "ou=Product Development,{}".format(DEFAULT_SUFFIX)) ou.create(properties={'ou': 'Product Development'}) properties = { 'uid': 'Jeff Vedder', 'cn': 'Jeff Vedder', 'sn': 'user', 'uidNumber': '1000', 'gidNumber': '2000', 'homeDirectory': '/home/' + 'JeffVedder', 'userPassword': PW_DM } user = UserAccount(topo.standalone, "cn=Jeff Vedder,ou=Product Development,{}".format(DEFAULT_SUFFIX)) user.create(properties=properties) # Allow write access to targetattr with multiple attibutes conn = Anonymous(topo.standalone).bind() ua = UserAccount(conn, USER_DELADD) ua.add("telephonenumber", "+1 408 555 1212") assert ua.get_attr_val('telephonenumber') ua.add("roomnumber", "101") assert ua.get_attr_val('roomnumber') def test_allow_write_access_to_userdn_all(topo, aci_of_user, cleanup_tree): """Modify Test 3 Allow write access to userdn 'all' :id: 70c58818-7abf-11e8-afa1-8c16451d917b :setup: server :steps: 1. Add test entry 2. Add ACI 3. User should follow ACI role :expectedresults: 1. Entry should be added 2. Operation should succeed 3. Operation should succeed """ ACI_BODY = '(targetattr = "*")(version 3.0; acl "ACI NAME"; allow (write) (userdn = "ldap:///all") ;)' Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) for i in ['Product Development', 'Accounting']: ou = OrganizationalUnit(topo.standalone, "ou={},{}".format(i, DEFAULT_SUFFIX)) ou.create(properties={'ou': i}) for i in ['Jeff Vedder,ou=Product Development', 'Sam Carter,ou=Accounting']: properties = { 'uid': i, 'cn': i, 'sn': 'user', 'uidNumber': '1000', 'gidNumber': '2000', 'homeDirectory': '/home/' + i, 'userPassword': PW_DM } user = UserAccount(topo.standalone, "cn={},{}".format(i, DEFAULT_SUFFIX)) user.create(properties=properties) # Allow write access to userdn 'all' conn = Anonymous(topo.standalone).bind() with pytest.raises(ldap.INSUFFICIENT_ACCESS): UserAccount(conn, USER_DELADD).add("title", "Architect") conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) UserAccount(conn, USER_DELADD).add("title", "Architect") assert UserAccount(conn, USER_DELADD).get_attr_val('title') def test_allow_write_access_to_userdn_with_wildcards_in_dn( topo, aci_of_user, cleanup_tree): """Modify Test 4 Allow write access to userdn with wildcards in DN :id: 766c2312-7abf-11e8-b57d-8c16451d917b :setup: server :steps: 1. Add test entry 2. Add ACI 3. User should follow ACI role :expectedresults: 1. Entry should be added 2. Operation should succeed 3. Operation should succeed """ ACI_BODY = '(targetattr = "*")(version 3.0; acl "ACI NAME"; allow (write)(userdn = "ldap:///cn=*, ou=Product Development,{}") ;)'.format(DEFAULT_SUFFIX) Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) ou = OrganizationalUnit(topo.standalone, "ou=Product Development,{}".format(DEFAULT_SUFFIX)) ou.create(properties={'ou': 'Product Development'}) properties = { 'uid': 'Jeff Vedder', 'cn': 'Jeff Vedder', 'sn': 'user', 'uidNumber': '1000', 'gidNumber': '2000', 'homeDirectory': '/home/' + 'JeffVedder', 'userPassword': PW_DM } user = UserAccount(topo.standalone, "cn=Jeff Vedder,ou=Product Development,{}".format(DEFAULT_SUFFIX)) user.create(properties=properties) conn = UserAccount(topo.standalone, USER_DELADD).bind(PW_DM) # Allow write access to userdn with wildcards in DN ua = UserAccount(conn, USER_DELADD) ua.add("title", "Architect") assert ua.get_attr_val('title') def test_allow_write_access_to_userdn_with_multiple_dns(topo, aci_of_user, cleanup_tree): """Modify Test 5 Allow write access to userdn with multiple DNs :id: 7aae760a-7abf-11e8-bc3a-8c16451d917b :setup: server :steps: 1. Add test entry 2. Add ACI 3. User should follow ACI role :expectedresults: 1. Entry should be added 2. Operation should succeed 3. Operation should succeed """ ACI_BODY = '(targetattr = "*")(version 3.0; acl "ACI NAME"; allow (write)(userdn = "ldap:///{} || ldap:///{}") ;)'.format(USER_DELADD, USER_WITH_ACI_DELADD) Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) for i in ['Product Development', 'Accounting', 'Human Resources']: ou = OrganizationalUnit(topo.standalone, "ou={},{}".format(i, DEFAULT_SUFFIX)) ou.create(properties={'ou': i}) for i in ['Jeff Vedder,ou=Product Development', 'Sam Carter,ou=Accounting', 'Kirsten Vaughan, ou=Human Resources']: properties = { 'uid': i, 'cn': i, 'sn': 'user', 'uidNumber': '1000', 'gidNumber': '2000', 'homeDirectory': '/home/' + i, 'userPassword': PW_DM } user = UserAccount(topo.standalone, "cn={},{}".format(i, DEFAULT_SUFFIX)) user.create(properties=properties) conn = UserAccount(topo.standalone, USER_DELADD).bind(PW_DM) # Allow write access to userdn with multiple DNs ua = UserAccount(conn, KIRSTENVAUGHAN) ua.add("title", "Architect") conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) # Allow write access to userdn with multiple DNs ua = UserAccount(conn, USER_DELADD) ua.add("title", "Architect") assert ua.get_attr_val('title') def test_allow_write_access_to_target_with_wildcards(topo, aci_of_user, cleanup_tree): """Modify Test 6 Allow write access to target with wildcards :id: 825fe884-7abf-11e8-8541-8c16451d917b :setup: server :steps: 1. Add test entry 2. Add ACI 3. User should follow ACI role :expectedresults: 1. Entry should be added 2. Operation should succeed 3. Operation should succeed """ ACI_BODY = '(target = ldap:///{})(targetattr = "*")(version 3.0; acl "ACI NAME"; allow (write) (userdn = "ldap:///anyone") ;)'.format(DEFAULT_SUFFIX) Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) for i in ['Product Development', 'Accounting', 'Human Resources']: ou = OrganizationalUnit(topo.standalone, "ou={},{}".format(i, DEFAULT_SUFFIX)) ou.create(properties={'ou': i}) for i in ['Jeff Vedder,ou=Product Development', 'Sam Carter,ou=Accounting', 'Kirsten Vaughan, ou=Human Resources']: properties = { 'uid': i, 'cn': i, 'sn': 'user', 'uidNumber': '1000', 'gidNumber': '2000', 'homeDirectory': '/home/' + i, 'userPassword': PW_DM } user = UserAccount(topo.standalone, "cn={},{}".format(i, DEFAULT_SUFFIX)) user.create(properties=properties) conn = UserAccount(topo.standalone, USER_DELADD).bind(PW_DM) # Allow write access to target with wildcards ua = UserAccount(conn, KIRSTENVAUGHAN) ua.add("title", "Architect") assert ua.get_attr_val('title') conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) # Allow write access to target with wildcards ua = UserAccount(conn, USER_DELADD) ua.add("title", "Architect") assert ua.get_attr_val('title') def test_allow_write_access_to_userdnattr(topo, aci_of_user, cleanup_tree, request): """Modify Test 7 Allow write access to userdnattr :id: 86b418f6-7abf-11e8-ae28-8c16451d917b :setup: server :steps: 1. Add test entry 2. Add ACI 3. User should follow ACI role :expectedresults: 1. Entry should be added 2. Operation should succeed 3. Operation should succeed """ ACI_BODY = '(target = ldap:///{})(targetattr="*")(version 3.0; acl "{}";allow (write) (userdn = "ldap:///anyone"); )'.format(DEFAULT_SUFFIX, request.node.name) Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) for i in ['Product Development', 'Accounting']: ou = OrganizationalUnit(topo.standalone, "ou={},{}".format(i, DEFAULT_SUFFIX)) ou.create(properties={'ou': i}) for i in ['Jeff Vedder,ou=Product Development', 'Sam Carter,ou=Accounting']: properties = { 'uid': i, 'cn': i, 'sn': 'user', 'uidNumber': '1000', 'gidNumber': '2000', 'homeDirectory': '/home/' + i, 'userPassword': PW_DM } user = UserAccount(topo.standalone, "cn={},{}".format(i, DEFAULT_SUFFIX)) user.create(properties=properties) UserAccount(topo.standalone, USER_WITH_ACI_DELADD).add('manager', USER_WITH_ACI_DELADD) conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) # Allow write access to userdnattr ua = UserAccount(conn, USER_DELADD) ua.add('uid', 'scoobie') assert ua.get_attr_val('uid') ua.add('uid', 'jvedder') assert ua.get_attr_val('uid') def test_allow_selfwrite_access_to_anyone(topo, aci_of_user, cleanup_tree): """Modify Test 8 Allow selfwrite access to anyone :id: 8b3becf0-7abf-11e8-ac34-8c16451d917b :setup: server :steps: 1. Add test entry 2. Add ACI 3. User should follow ACI role :expectedresults: 1. Entry should be added 2. Operation should succeed 3. Operation should succeed """ groups = Groups(topo.standalone, DEFAULT_SUFFIX) group = groups.create(properties={"cn": "group1", "description": "testgroup"}) ACI_BODY = '(target = ldap:///cn=group1,ou=Groups,{})(targetattr = "member")(version 3.0; acl "ACI NAME"; allow (selfwrite) (userdn = "ldap:///anyone") ;)'.format(DEFAULT_SUFFIX) Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) ou = OrganizationalUnit(topo.standalone, "ou=Product Development,{}".format(DEFAULT_SUFFIX)) ou.create(properties={'ou': 'Product Development'}) properties = { 'uid': 'Jeff Vedder', 'cn': 'Jeff Vedder', 'sn': 'user', 'uidNumber': '1000', 'gidNumber': '2000', 'homeDirectory': '/home/' + 'JeffVedder', 'userPassword': PW_DM } user = UserAccount(topo.standalone, "cn=Jeff Vedder,ou=Product Development,{}".format(DEFAULT_SUFFIX)) user.create(properties=properties) conn = UserAccount(topo.standalone, USER_DELADD).bind(PW_DM) # Allow selfwrite access to anyone groups = Groups(conn, DEFAULT_SUFFIX) groups.list()[1].add_member(USER_DELADD) def test_uniquemember_should_also_be_the_owner(topo, aci_of_user): """Modify Test 10 groupdnattr = \"ldap:///$BASEDN?owner\" if owner is a group, group's uniquemember should also be the owner :id: 9456b2d4-7abf-11e8-829d-8c16451d917b :setup: server :steps: 1. Add test entry 2. Add ACI 3. User should follow ACI role :expectedresults: 1. Entry should be added 2. Operation should succeed 3. Operation should succeed """ for i in ['ACLGroupTest']: ou = OrganizationalUnit(topo.standalone, "ou={},{}".format(i, DEFAULT_SUFFIX)) ou.create(properties={'ou': i}) ou = OrganizationalUnit(topo.standalone, "ou=ACLDevelopment,{}".format(DEFAULT_SUFFIX)) ou.create(properties={'ou': 'ACLDevelopment'}) ou.set('aci','(targetattr="*")(version 3.0; acl "groupdnattr acl"; ' 'allow (all)groupdnattr = "ldap:///{}?owner";)'.format(DEFAULT_SUFFIX)) grp = UniqueGroup(topo.standalone, "uid=anuj,ou=ACLDevelopment, {}".format(DEFAULT_SUFFIX)) user_props = ( {'sn': 'Borah', 'cn': 'Anuj', 'objectclass': ['top', 'person', 'organizationalPerson', 'inetOrgPerson', 'groupofUniquenames'], 'userpassword': PW_DM, 'givenname': 'Anuj', 'ou': ['ACLDevelopment', 'People'], 'roomnumber': '123', 'uniquemember': 'cn=mandatory member' } ) grp.create(properties=user_props) grp = UniqueGroup(topo.standalone, "uid=2ishani,ou=ACLDevelopment, {}".format(DEFAULT_SUFFIX)) user_props = ( {'sn': 'Borah', 'cn': '2ishani', 'objectclass': ['top', 'person','organizationalPerson', 'inetOrgPerson', 'groupofUniquenames'], 'userpassword': PW_DM, 'givenname': '2ishani', 'ou': ['ACLDevelopment', 'People'], 'roomnumber': '1234', 'uniquemember': 'cn=mandatory member', "owner": "cn=group4, ou=ACLGroupTest, {}".format(DEFAULT_SUFFIX) } ) grp.create(properties=user_props) grp = UniqueGroup(topo.standalone, 'cn=group1,ou=ACLGroupTest,'+DEFAULT_SUFFIX) grp.create(properties={'cn': 'group1', 'ou': 'groups'}) grp.set('uniquemember', ["cn=group2, ou=ACLGroupTest, {}".format(DEFAULT_SUFFIX), "cn=group3, ou=ACLGroupTest, {}".format(DEFAULT_SUFFIX)]) grp = UniqueGroup(topo.standalone, 'cn=group3,ou=ACLGroupTest,' + DEFAULT_SUFFIX) grp.create(properties={'cn': 'group3', 'ou': 'groups'}) grp.set('uniquemember', ["cn=group4, ou=ACLGroupTest, {}".format(DEFAULT_SUFFIX)]) grp = UniqueGroup(topo.standalone, 'cn=group4,ou=ACLGroupTest,' + DEFAULT_SUFFIX) grp.create(properties={ 'cn': 'group4', 'ou': 'groups'}) grp.set('uniquemember', ["uid=anuj, ou=ACLDevelopment, {}".format(DEFAULT_SUFFIX)]) #uniquemember should also be the owner conn = UserAccount(topo.standalone, "uid=anuj,ou=ACLDevelopment, {}".format(DEFAULT_SUFFIX)).bind(PW_DM) ua = UserAccount(conn, "uid=2ishani, ou=ACLDevelopment, {}".format(DEFAULT_SUFFIX)) ua.add('roomnumber', '9999') assert ua.get_attr_val('roomnumber') for DN in ["cn=group4,ou=ACLGroupTest,{}".format(DEFAULT_SUFFIX), "cn=group3,ou=ACLGroupTest,{}".format(DEFAULT_SUFFIX), "cn=group1,ou=ACLGroupTest,{}".format(DEFAULT_SUFFIX), "uid=2ishani,ou=ACLDevelopment,{}".format(DEFAULT_SUFFIX), "uid=anuj,ou=ACLDevelopment,{}".format(DEFAULT_SUFFIX), "ou=ACLDevelopment,{}".format(DEFAULT_SUFFIX), "ou=ACLGroupTest, {}".format(DEFAULT_SUFFIX)]: UserAccount(topo.standalone, DN).delete() def test_aci_with_both_allow_and_deny(topo, aci_of_user, cleanup_tree): """Modify Test 12 aci with both allow and deny :id: 9dcfe902-7abf-11e8-86dc-8c16451d917b :setup: server :steps: 1. Add test entry 2. Add ACI 3. User should follow ACI role :expectedresults: 1. Entry should be added 2. Operation should succeed 3. Operation should succeed """ ACI_BODY = '(targetattr = "*")(version 3.0; acl "ACI NAME"; deny (read, search)userdn = "ldap:///{}"; allow (all) userdn = "ldap:///{}" ;)'.format(USER_WITH_ACI_DELADD, USER_DELADD) Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) for i in ['Product Development', 'Accounting']: ou = OrganizationalUnit(topo.standalone, "ou={},{}".format(i, DEFAULT_SUFFIX)) ou.create(properties={'ou': i}) for i in ['Jeff Vedder,ou=Product Development', 'Sam Carter,ou=Accounting']: properties = { 'uid': i, 'cn': i, 'sn': 'user', 'uidNumber': '1000', 'gidNumber': '2000', 'homeDirectory': '/home/' + i, 'userPassword': PW_DM } user = UserAccount(topo.standalone, "cn={},{}".format(i, DEFAULT_SUFFIX)) user.create(properties=properties) conn = UserAccount(topo.standalone, USER_DELADD).bind(PW_DM) # aci with both allow and deny, testing allow assert UserAccount(conn, USER_WITH_ACI_DELADD).get_attr_val('uid') conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) # aci with both allow and deny, testing deny with pytest.raises(IndexError): UserAccount(conn, USER_WITH_ACI_DELADD).get_attr_val('uid') def test_allow_owner_to_modify_entry(topo, aci_of_user, cleanup_tree, request): """Modify Test 14 allow userdnattr = owner to modify entry :id: aa302090-7abf-11e8-811a-8c16451d917b :setup: server :steps: 1. Add test entry 2. Add ACI 3. User should follow ACI role :expectedresults: 1. Entry should be added 2. Operation should succeed 3. Operation should succeed """ grp = UniqueGroup(topo.standalone, 'cn=intranet,' + DEFAULT_SUFFIX) grp.create(properties={ 'cn': 'intranet', 'ou': 'groups'}) grp.set('owner', USER_WITH_ACI_DELADD) ACI_BODY = '(target ="ldap:///cn=intranet, {}") (targetattr ="*")(targetfilter ="(objectclass=groupOfUniqueNames)") (version 3.0;acl "{}";allow(read, write, delete, search, compare, add) (userdnattr = "owner");)'.format(DEFAULT_SUFFIX, request.node.name) Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) for i in ['Product Development', 'Accounting']: ou = OrganizationalUnit(topo.standalone, "ou={},{}".format(i, DEFAULT_SUFFIX)) ou.create(properties={'ou': i}) for i in ['Jeff Vedder,ou=Product Development', 'Sam Carter,ou=Accounting']: properties = { 'uid': i, 'cn': i, 'sn': 'user', 'uidNumber': '1000', 'gidNumber': '2000', 'homeDirectory': '/home/' + i, 'userPassword': PW_DM } user = UserAccount(topo.standalone, "cn={},{}".format(i, DEFAULT_SUFFIX)) user.create(properties=properties) conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) # allow userdnattr = owner to modify entry ua = UserAccount(conn, 'cn=intranet,dc=example,dc=com') ua.set('uniquemember', "cn=Andy Walker, ou=Accounting,dc=example,dc=com") assert ua.get_attr_val('uniquemember') if __name__ == "__main__": CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s -v %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/acl/modrdn_test.py000066400000000000000000000302741421664411400260110ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2020 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK ---- import pytest, os, ldap from lib389._constants import DEFAULT_SUFFIX, PW_DM from lib389.idm.user import UserAccount from lib389.idm.account import Anonymous from lib389.idm.group import Group, UniqueGroup from lib389.idm.organizationalunit import OrganizationalUnit, OrganizationalUnits from lib389.topologies import topology_st as topo from lib389.idm.domain import Domain pytestmark = pytest.mark.tier1 CONTAINER_1_DELADD = "ou=Product Development,{}".format(DEFAULT_SUFFIX) CONTAINER_2_DELADD = "ou=Accounting,{}".format(DEFAULT_SUFFIX) USER_DELADD = "cn=Jeff Vedder,{}".format(CONTAINER_1_DELADD) USER_WITH_ACI_DELADD = "cn=Sam Carter,{}".format(CONTAINER_2_DELADD) DYNAMIC_MODRDN = "cn=Test DYNAMIC_MODRDN Group 70, {}".format(DEFAULT_SUFFIX) SAM_DAMMY_MODRDN = "cn=Sam Carter1,ou=Accounting,{}".format(DEFAULT_SUFFIX) TRAC340_MODRDN = "cn=TRAC340_MODRDN,{}".format(DEFAULT_SUFFIX) NEWENTRY9_MODRDN = "cn=NEWENTRY9_MODRDN,{}".format("ou=People,{}".format(DEFAULT_SUFFIX)) OU0_OU_MODRDN = "ou=OU0,{}".format(DEFAULT_SUFFIX) OU2_OU_MODRDN = "ou=OU2,{}".format(DEFAULT_SUFFIX) @pytest.fixture(scope="function") def aci_of_user(request, topo): aci_list = Domain(topo.standalone, DEFAULT_SUFFIX).get_attr_vals('aci') def finofaci(): domain = Domain(topo.standalone, DEFAULT_SUFFIX) domain.set('aci', None) for i in aci_list: domain.add("aci", i) request.addfinalizer(finofaci) @pytest.fixture(scope="function") def _add_user(request, topo): ou = OrganizationalUnit(topo.standalone, 'ou=Product Development,{}'.format(DEFAULT_SUFFIX)) ou.create(properties={'ou': 'Product Development'}) ou = OrganizationalUnit(topo.standalone, 'ou=Accounting,{}'.format(DEFAULT_SUFFIX)) ou.create(properties={'ou': 'Accounting'}) groups = Group(topo.standalone, DYNAMIC_MODRDN) group_properties = {"cn": "Test DYNAMIC_MODRDN Group 70", "objectclass": ["top", 'groupofURLs'], 'memberURL': 'ldap:///{}??base?(cn=*)'.format(USER_WITH_ACI_DELADD)} groups.create(properties=group_properties) properties = { 'uid': 'Jeff Vedder', 'cn': 'Jeff Vedder', 'sn': 'user', 'uidNumber': '1000', 'gidNumber': '2000', 'homeDirectory': '/home/' + 'JeffVedder', 'userPassword': PW_DM } user = UserAccount(topo.standalone, 'cn=Jeff Vedder,ou=Product Development,{}'.format(DEFAULT_SUFFIX)) user.create(properties=properties) properties = { 'uid': 'Sam Carter', 'cn': 'Sam Carter', 'sn': 'user', 'uidNumber': '1000', 'gidNumber': '2000', 'homeDirectory': '/home/' + 'SamCarter', 'userPassword': PW_DM } user = UserAccount(topo.standalone, 'cn=Sam Carter,ou=Accounting,{}'.format(DEFAULT_SUFFIX)) user.create(properties=properties) def fin(): for DN in [USER_DELADD,USER_WITH_ACI_DELADD,DYNAMIC_MODRDN,CONTAINER_2_DELADD,CONTAINER_1_DELADD]: UserAccount(topo.standalone, DN).delete() request.addfinalizer(fin) def test_allow_write_privilege_to_anyone(topo, _add_user, aci_of_user, request): """Modrdn Test 1 Allow write privilege to anyone :id: 4406f12e-7932-11e8-9dea-8c16451d917b :setup: server :steps: 1. Add test entry 2. Add ACI 3. User should follow ACI role :expectedresults: 1. Entry should be added 2. Operation should succeed 3. Operation should succeed """ Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", '(target ="ldap:///{}")(targetattr="*")(version 3.0;acl "{}";allow ' '(write) (userdn = "ldap:///anyone");)'.format(DEFAULT_SUFFIX, request.node.name)) conn = Anonymous(topo.standalone).bind() # Allow write privilege to anyone useraccount = UserAccount(conn, USER_WITH_ACI_DELADD) useraccount.rename("cn=Jeff Vedder") assert 'cn=Jeff Vedder,ou=Accounting,dc=example,dc=com' == useraccount.dn useraccount = UserAccount(conn, "cn=Jeff Vedder,ou=Accounting,dc=example,dc=com") useraccount.rename("cn=Sam Carter") assert 'cn=Sam Carter,ou=Accounting,dc=example,dc=com' == useraccount.dn def test_allow_write_privilege_to_dynamic_group_with_scope_set_to_base_in_ldap_url( topo, _add_user, aci_of_user, request ): """Modrdn Test 2 Allow write privilege to DYNAMIC_MODRDN group with scope set to base in LDAP URL :id: 4c0f8c00-7932-11e8-8398-8c16451d917b :setup: server :steps: 1. Add test entry 2. Add ACI 3. User should follow ACI role :expectedresults: 1. Entry should be added 2. Operation should succeed 3. Operation should succeed """ Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(target = ldap:///{})(targetattr="*")(version 3.0; acl "{}"; allow(all)(groupdn = "ldap:///{}"); )'.format(DEFAULT_SUFFIX, request.node.name, DYNAMIC_MODRDN)) conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) # Allow write privilege to DYNAMIC_MODRDN group with scope set to base in LDAP URL useraccount = UserAccount(conn, USER_DELADD) useraccount.rename("cn=Jeffbo Vedder") assert 'cn=Jeffbo Vedder,ou=Product Development,dc=example,dc=com' == useraccount.dn useraccount = UserAccount(conn, "cn=Jeffbo Vedder,{}".format(CONTAINER_1_DELADD)) useraccount.rename("cn=Jeff Vedder") assert 'cn=Jeff Vedder,ou=Product Development,dc=example,dc=com' == useraccount.dn def test_write_access_to_naming_atributes(topo, _add_user, aci_of_user, request): """Test for write access to naming atributes Test that check for add writes to the new naming attr :id: 532fc630-7932-11e8-8924-8c16451d917b :setup: server :steps: 1. Add test entry 2. Add ACI 3. User should follow ACI role :expectedresults: 1. Entry should be added 2. Operation should succeed 3. Operation should succeed """ Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", '(target ="ldap:///{}")(targetattr != "uid")(version 3.0;acl "{}";allow (write) (userdn = "ldap:///anyone");)'.format(DEFAULT_SUFFIX, request.node.name)) conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) #Test for write access to naming atributes useraccount = UserAccount(conn, USER_WITH_ACI_DELADD) with pytest.raises(ldap.INSUFFICIENT_ACCESS): useraccount.rename("uid=Jeffbo Vedder") def test_write_access_to_naming_atributes_two(topo, _add_user, aci_of_user, request): """Test for write access to naming atributes (2) :id: 5a2077d2-7932-11e8-9e7b-8c16451d917b :setup: server :steps: 1. Add test entry 2. Add ACI 3. User should follow ACI role 4. Now try to modrdn it to cn, won't work if request deleteoldrdn. :expectedresults: 1. Entry should be added 2. Operation should succeed 3. Operation should succeed 4. Operation should not succeed """ Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", '(target ="ldap:///{}")(targetattr != "uid")(version 3.0;acl "{}";allow (write) (userdn = "ldap:///anyone");)'.format(DEFAULT_SUFFIX, request.node.name)) properties = { 'uid': 'Sam Carter1', 'cn': 'Sam Carter1', 'sn': 'user', 'uidNumber': '1000', 'gidNumber': '2000', 'homeDirectory': '/home/' + 'SamCarter1' } user = UserAccount(topo.standalone, 'cn=Sam Carter1,ou=Accounting,{}'.format(DEFAULT_SUFFIX)) user.create(properties=properties) user.set("userPassword", "password") conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) # Test for write access to naming atributes useraccount = UserAccount(conn, SAM_DAMMY_MODRDN) with pytest.raises(ldap.INSUFFICIENT_ACCESS): useraccount.rename("uid=Jeffbo Vedder") UserAccount(topo.standalone, SAM_DAMMY_MODRDN).delete() @pytest.mark.bz950351 def test_access_aci_list_contains_any_deny_rule(topo, _add_user, aci_of_user): """RHDS denies MODRDN access if ACI list contains any DENY rule Bug description: If you create a deny ACI for some or more attributes there is incorrect behaviour as you cannot rename the entry anymore :id: 62cbbb8a-7932-11e8-96a7-8c16451d917b :setup: server :steps: 1. Add test entry 2. Adding a new ou ou=People to $BASEDN 3. Adding a user NEWENTRY9_MODRDN to ou=People,$BASEDN 4. Adding an allow rule for NEWENTRY9_MODRDN and for others an aci deny rule :expectedresults: 1. Entry should be added 2. Operation should succeed 3. Operation should succeed 4. Operation should succeed """ properties = { 'uid': 'NEWENTRY9_MODRDN', 'cn': 'NEWENTRY9_MODRDN_People', 'sn': 'user', 'uidNumber': '1000', 'gidNumber': '2000', 'homeDirectory': '/home/' + 'NEWENTRY9_MODRDN' } user = UserAccount(topo.standalone, 'cn=NEWENTRY9_MODRDN,ou=People,{}'.format(DEFAULT_SUFFIX)) user.create(properties=properties) user.set("userPassword", "password") user.set("telephoneNumber", "989898191") user.set("mail", "anuj@anuj.com") user.set("givenName", "givenName") user.set("uid", "NEWENTRY9_MODRDN") OrganizationalUnits(topo.standalone, DEFAULT_SUFFIX).get('People').add("aci", ['(targetattr = "*") ' '(version 3.0;acl "admin";allow (all)(userdn = "ldap:///{}");)'.format(NEWENTRY9_MODRDN), '(targetattr = "mail") (version 3.0;acl "deny_mail";deny (write)(userdn = "ldap:///anyone");)', '(targetattr = "uid") (version 3.0;acl "allow uid";allow (write)(userdn = "ldap:///{}");)'.format(NEWENTRY9_MODRDN)]) UserAccount(topo.standalone, NEWENTRY9_MODRDN).replace("userpassword", "Anuj") useraccount = UserAccount(topo.standalone, NEWENTRY9_MODRDN) useraccount.rename("uid=newrdnchnged") assert 'uid=newrdnchnged,ou=People,dc=example,dc=com' == useraccount.dn def test_renaming_target_entry(topo, _add_user, aci_of_user): """Test for renaming target entry :id: 6be1d33a-7932-11e8-9115-8c16451d917b :setup: server :steps: 1. Add test entry 2. Create a test user entry 3. Create a new ou entry with an aci 4. Make sure uid=$MYUID has the access 5. Rename ou=OU0 to ou=OU1 6. Create another ou=OU2 7. Move ou=OU1 under ou=OU2 8. Make sure uid=$MYUID still has the access :expectedresults: 1. Entry should be added 2. Operation should succeed 3. Operation should succeed 4. Operation should succeed 5. Operation should succeed 6. Operation should succeed 7. Operation should succeed 8. Operation should succeed """ properties = { 'uid': 'TRAC340_MODRDN', 'cn': 'TRAC340_MODRDN', 'sn': 'user', 'uidNumber': '1000', 'gidNumber': '2000', 'homeDirectory': '/home/' + 'TRAC340_MODRDN' } user = UserAccount(topo.standalone, 'cn=TRAC340_MODRDN,{}'.format(DEFAULT_SUFFIX)) user.create(properties=properties) user.set("userPassword", "password") ou = OrganizationalUnit(topo.standalone, 'ou=OU0,{}'.format(DEFAULT_SUFFIX)) ou.create(properties={'ou': 'OU0'}) ou.set('aci', '(targetattr="*")(version 3.0; acl "$MYUID";allow(read, search, compare) userdn = "ldap:///{}";)'.format(TRAC340_MODRDN)) conn = UserAccount(topo.standalone, TRAC340_MODRDN).bind(PW_DM) assert OrganizationalUnits(conn, DEFAULT_SUFFIX).get('OU0') # Test for renaming target entry OrganizationalUnits(topo.standalone, DEFAULT_SUFFIX).get('OU0').rename("ou=OU1") assert OrganizationalUnits(conn, DEFAULT_SUFFIX).get('OU1') ou = OrganizationalUnit(topo.standalone, 'ou=OU2,{}'.format(DEFAULT_SUFFIX)) ou.create(properties={'ou': 'OU2'}) # Test for renaming target entry OrganizationalUnits(topo.standalone, DEFAULT_SUFFIX).get('OU1').rename("ou=OU1", newsuperior=OU2_OU_MODRDN) assert OrganizationalUnits(conn, DEFAULT_SUFFIX).get('OU1') if __name__ == "__main__": CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s -v %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/acl/repeated_ldap_add_test.py000066400000000000000000000466521421664411400301360ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2020 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # from subprocess import Popen import pytest from lib389.paths import Paths from lib389.tasks import * from lib389.utils import * from lib389.topologies import topology_st from lib389._constants import DN_DM, DEFAULT_SUFFIX, PASSWORD, SERVERID_STANDALONE pytestmark = pytest.mark.tier1 logging.getLogger(__name__).setLevel(logging.DEBUG) log = logging.getLogger(__name__) CONFIG_DN = 'cn=config' BOU = 'BOU' BINDOU = 'ou=%s,%s' % (BOU, DEFAULT_SUFFIX) BUID = 'buser123' TUID = 'tuser0' BINDDN = 'uid=%s,%s' % (BUID, BINDOU) BINDPW = BUID TESTDN = 'uid=%s,ou=people,%s' % (TUID, DEFAULT_SUFFIX) TESTPW = TUID BOGUSDN = 'uid=bogus,%s' % DEFAULT_SUFFIX BOGUSDN2 = 'uid=bogus,ou=people,%s' % DEFAULT_SUFFIX BOGUSSUFFIX = 'uid=bogus,ou=people,dc=bogus' GROUPOU = 'ou=groups,%s' % DEFAULT_SUFFIX BOGUSOU = 'ou=OU,%s' % DEFAULT_SUFFIX def get_ldap_error_msg(e, type): return e.args[0][type] def pattern_accesslog(file, log_pattern): for i in range(5): try: pattern_accesslog.last_pos += 1 except AttributeError: pattern_accesslog.last_pos = 0 found = None file.seek(pattern_accesslog.last_pos) # Use a while true iteration because 'for line in file: hit a # python bug that break file.tell() while True: line = file.readline() found = log_pattern.search(line) if ((line == '') or (found)): break pattern_accesslog.last_pos = file.tell() if found: return line else: time.sleep(1) return None def check_op_result(server, op, dn, superior, exists, rc): targetdn = dn if op == 'search': if exists: opstr = 'Searching existing entry' else: opstr = 'Searching non-existing entry' elif op == 'add': if exists: opstr = 'Adding existing entry' else: opstr = 'Adding non-existing entry' elif op == 'modify': if exists: opstr = 'Modifying existing entry' else: opstr = 'Modifying non-existing entry' elif op == 'modrdn': if superior is not None: targetdn = superior if exists: opstr = 'Moving to existing superior' else: opstr = 'Moving to non-existing superior' else: if exists: opstr = 'Renaming existing entry' else: opstr = 'Renaming non-existing entry' elif op == 'delete': if exists: opstr = 'Deleting existing entry' else: opstr = 'Deleting non-existing entry' if ldap.SUCCESS == rc: expstr = 'be ok' else: expstr = 'fail with %s' % rc.__name__ log.info('%s %s, which should %s.' % (opstr, targetdn, expstr)) time.sleep(1) hit = 0 try: if op == 'search': centry = server.search_s(dn, ldap.SCOPE_BASE, 'objectclass=*') elif op == 'add': server.add_s(Entry((dn, {'objectclass': 'top extensibleObject'.split(), 'cn': 'test entry'}))) elif op == 'modify': server.modify_s(dn, [(ldap.MOD_REPLACE, 'description', b'test')]) elif op == 'modrdn': if superior is not None: server.rename_s(dn, 'uid=new', newsuperior=superior, delold=1) else: server.rename_s(dn, 'uid=new', delold=1) elif op == 'delete': server.delete_s(dn) else: log.fatal('Unknown operation %s' % op) assert False except ldap.LDAPError as e: hit = 1 log.info("Exception (expected): %s" % type(e).__name__) log.info('Desc {}'.format(get_ldap_error_msg(e,'desc'))) assert isinstance(e, rc) if 'matched' in e.args: log.info('Matched is returned: {}'.format(get_ldap_error_msg(e, 'matched'))) if rc != ldap.NO_SUCH_OBJECT: assert False if ldap.SUCCESS == rc: if op == 'search': log.info('Search should return none') assert len(centry) == 0 else: if 0 == hit: log.info('Expected to fail with %s, but passed' % rc.__name__) assert False log.info('PASSED\n') @pytest.mark.bz1347760 def test_repeated_ldap_add(topology_st): """Prevent revealing the entry info to whom has no access rights. :id: 76d278bd-3e51-4579-951a-753e6703b4df :setup: Standalone instance :steps: 1. Disable accesslog logbuffering 2. Bind as "cn=Directory Manager" 3. Add a organisational unit as BOU 4. Add a bind user as uid=buser123,ou=BOU,dc=example,dc=com 5. Add a test user as uid=tuser0,ou=People,dc=example,dc=com 6. Delete aci in dc=example,dc=com 7. Bind as Directory Manager, acquire an access log path and instance dir 8. Bind as uid=buser123,ou=BOU,dc=example,dc=com who has no right to read the entry 9. Bind as uid=bogus,ou=people,dc=bogus,bogus who does not exist 10. Bind as uid=buser123,ou=BOU,dc=example,dc=com,bogus with wrong password 11. Adding aci for uid=buser123,ou=BOU,dc=example,dc=com to ou=BOU,dc=example,dc=com. 12. Bind as uid=buser123,ou=BOU,dc=example,dc=com now who has right to read the entry :expectedresults: 1. Operation should be successful 2. Operation should be successful 3. Operation should be successful 4. Operation should be successful 5. Operation should be successful 6. Operation should be successful 7. Operation should be successful 8. Bind operation should be successful with no search result 9. Bind operation should Fail 10. Bind operation should Fail 11. Operation should be successful 12. Bind operation should be successful with search result """ log.info('Testing Bug 1347760 - Information disclosure via repeated use of LDAP ADD operation, etc.') log.info('Disabling accesslog logbuffering') topology_st.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'nsslapd-accesslog-logbuffering', b'off')]) log.info('Bind as {%s,%s}' % (DN_DM, PASSWORD)) topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) log.info('Adding ou=%s a bind user belongs to.' % BOU) topology_st.standalone.add_s(Entry((BINDOU, { 'objectclass': 'top organizationalunit'.split(), 'ou': BOU}))) log.info('Adding a bind user.') topology_st.standalone.add_s(Entry((BINDDN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), 'cn': 'bind user', 'sn': 'user', 'userPassword': BINDPW}))) log.info('Adding a test user.') topology_st.standalone.add_s(Entry((TESTDN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), 'cn': 'test user', 'sn': 'user', 'userPassword': TESTPW}))) log.info('Deleting aci in %s.' % DEFAULT_SUFFIX) topology_st.standalone.modify_s(DEFAULT_SUFFIX, [(ldap.MOD_DELETE, 'aci', None)]) log.info('While binding as DM, acquire an access log path and instance dir') ds_paths = Paths(serverid=topology_st.standalone.serverid, instance=topology_st.standalone) file_path = ds_paths.access_log inst_dir = ds_paths.inst_dir log.info('Bind case 1. the bind user has no rights to read the entry itself, bind should be successful.') log.info('Bind as {%s,%s} who has no access rights.' % (BINDDN, BINDPW)) try: topology_st.standalone.simple_bind_s(BINDDN, BINDPW) except ldap.LDAPError as e: log.info('Desc {}'.format(get_ldap_error_msg(e,'desc'))) assert False file_obj = open(file_path, "r") log.info('Access log path: %s' % file_path) log.info( 'Bind case 2-1. the bind user does not exist, bind should fail with error %s' % ldap.INVALID_CREDENTIALS.__name__) log.info('Bind as {%s,%s} who does not exist.' % (BOGUSDN, 'bogus')) try: topology_st.standalone.simple_bind_s(BOGUSDN, 'bogus') except ldap.LDAPError as e: log.info("Exception (expected): %s" % type(e).__name__) log.info('Desc {}'.format(get_ldap_error_msg(e,'desc'))) assert isinstance(e, ldap.INVALID_CREDENTIALS) regex = re.compile('No such entry') cause = pattern_accesslog(file_obj, regex) if cause is None: log.fatal('Cause not found - %s' % cause) assert False else: log.info('Cause found - %s' % cause) time.sleep(1) log.info( 'Bind case 2-2. the bind user\'s suffix does not exist, bind should fail with error %s' % ldap.INVALID_CREDENTIALS.__name__) log.info('Bind as {%s,%s} who does not exist.' % (BOGUSSUFFIX, 'bogus')) with pytest.raises(ldap.INVALID_CREDENTIALS): topology_st.standalone.simple_bind_s(BOGUSSUFFIX, 'bogus') regex = re.compile('No suffix for bind') cause = pattern_accesslog(file_obj, regex) if cause is None: log.fatal('Cause not found - %s' % cause) assert False else: log.info('Cause found - %s' % cause) time.sleep(1) log.info( 'Bind case 2-3. the bind user\'s password is wrong, bind should fail with error %s' % ldap.INVALID_CREDENTIALS.__name__) log.info('Bind as {%s,%s} who does not exist.' % (BINDDN, 'bogus')) try: topology_st.standalone.simple_bind_s(BINDDN, 'bogus') except ldap.LDAPError as e: log.info("Exception (expected): %s" % type(e).__name__) log.info('Desc {}'.format(get_ldap_error_msg(e,'desc'))) assert isinstance(e, ldap.INVALID_CREDENTIALS) regex = re.compile('Invalid credentials') cause = pattern_accesslog(file_obj, regex) if cause is None: log.fatal('Cause not found - %s' % cause) assert False else: log.info('Cause found - %s' % cause) time.sleep(1) log.info('Adding aci for %s to %s.' % (BINDDN, BINDOU)) acival = '(targetattr="*")(version 3.0; acl "%s"; allow(all) userdn = "ldap:///%s";)' % (BUID, BINDDN) log.info('aci: %s' % acival) log.info('Bind as {%s,%s}' % (DN_DM, PASSWORD)) topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) topology_st.standalone.modify_s(BINDOU, [(ldap.MOD_ADD, 'aci', ensure_bytes(acival))]) time.sleep(1) log.info('Bind case 3. the bind user has the right to read the entry itself, bind should be successful.') log.info('Bind as {%s,%s} which should be ok.\n' % (BINDDN, BINDPW)) topology_st.standalone.simple_bind_s(BINDDN, BINDPW) log.info('The following operations are against the subtree the bind user %s has no rights.' % BINDDN) # Search exists = True rc = ldap.SUCCESS log.info( 'Search case 1. the bind user has no rights to read the search entry, it should return no search results with %s' % rc) check_op_result(topology_st.standalone, 'search', TESTDN, None, exists, rc) exists = False rc = ldap.SUCCESS log.info( 'Search case 2-1. the search entry does not exist, the search should return no search results with %s' % rc.__name__) check_op_result(topology_st.standalone, 'search', BOGUSDN, None, exists, rc) exists = False rc = ldap.SUCCESS log.info( 'Search case 2-2. the search entry does not exist, the search should return no search results with %s' % rc.__name__) check_op_result(topology_st.standalone, 'search', BOGUSDN2, None, exists, rc) # Add exists = True rc = ldap.INSUFFICIENT_ACCESS log.info( 'Add case 1. the bind user has no rights AND the adding entry exists, it should fail with %s' % rc.__name__) check_op_result(topology_st.standalone, 'add', TESTDN, None, exists, rc) exists = False rc = ldap.INSUFFICIENT_ACCESS log.info( 'Add case 2-1. the bind user has no rights AND the adding entry does not exist, it should fail with %s' % rc.__name__) check_op_result(topology_st.standalone, 'add', BOGUSDN, None, exists, rc) exists = False rc = ldap.INSUFFICIENT_ACCESS log.info( 'Add case 2-2. the bind user has no rights AND the adding entry does not exist, it should fail with %s' % rc.__name__) check_op_result(topology_st.standalone, 'add', BOGUSDN2, None, exists, rc) # Modify exists = True rc = ldap.INSUFFICIENT_ACCESS log.info( 'Modify case 1. the bind user has no rights AND the modifying entry exists, it should fail with %s' % rc.__name__) check_op_result(topology_st.standalone, 'modify', TESTDN, None, exists, rc) exists = False rc = ldap.INSUFFICIENT_ACCESS log.info( 'Modify case 2-1. the bind user has no rights AND the modifying entry does not exist, it should fail with %s' % rc.__name__) check_op_result(topology_st.standalone, 'modify', BOGUSDN, None, exists, rc) exists = False rc = ldap.INSUFFICIENT_ACCESS log.info( 'Modify case 2-2. the bind user has no rights AND the modifying entry does not exist, it should fail with %s' % rc.__name__) check_op_result(topology_st.standalone, 'modify', BOGUSDN2, None, exists, rc) # Modrdn exists = True rc = ldap.INSUFFICIENT_ACCESS log.info( 'Modrdn case 1. the bind user has no rights AND the renaming entry exists, it should fail with %s' % rc.__name__) check_op_result(topology_st.standalone, 'modrdn', TESTDN, None, exists, rc) exists = False rc = ldap.INSUFFICIENT_ACCESS log.info( 'Modrdn case 2-1. the bind user has no rights AND the renaming entry does not exist, it should fail with %s' % rc.__name__) check_op_result(topology_st.standalone, 'modrdn', BOGUSDN, None, exists, rc) exists = False rc = ldap.INSUFFICIENT_ACCESS log.info( 'Modrdn case 2-2. the bind user has no rights AND the renaming entry does not exist, it should fail with %s' % rc.__name__) check_op_result(topology_st.standalone, 'modrdn', BOGUSDN2, None, exists, rc) exists = True rc = ldap.INSUFFICIENT_ACCESS log.info( 'Modrdn case 3. the bind user has no rights AND the node moving an entry to exists, it should fail with %s' % rc.__name__) check_op_result(topology_st.standalone, 'modrdn', TESTDN, GROUPOU, exists, rc) exists = False rc = ldap.INSUFFICIENT_ACCESS log.info( 'Modrdn case 4-1. the bind user has no rights AND the node moving an entry to does not, it should fail with %s' % rc.__name__) check_op_result(topology_st.standalone, 'modrdn', TESTDN, BOGUSOU, exists, rc) exists = False rc = ldap.INSUFFICIENT_ACCESS log.info( 'Modrdn case 4-2. the bind user has no rights AND the node moving an entry to does not, it should fail with %s' % rc.__name__) check_op_result(topology_st.standalone, 'modrdn', TESTDN, BOGUSOU, exists, rc) # Delete exists = True rc = ldap.INSUFFICIENT_ACCESS log.info( 'Delete case 1. the bind user has no rights AND the deleting entry exists, it should fail with %s' % rc.__name__) check_op_result(topology_st.standalone, 'delete', TESTDN, None, exists, rc) exists = False rc = ldap.INSUFFICIENT_ACCESS log.info( 'Delete case 2-1. the bind user has no rights AND the deleting entry does not exist, it should fail with %s' % rc.__name__) check_op_result(topology_st.standalone, 'delete', BOGUSDN, None, exists, rc) exists = False rc = ldap.INSUFFICIENT_ACCESS log.info( 'Delete case 2-2. the bind user has no rights AND the deleting entry does not exist, it should fail with %s' % rc.__name__) check_op_result(topology_st.standalone, 'delete', BOGUSDN2, None, exists, rc) log.info('EXTRA: Check no regressions') log.info('Adding aci for %s to %s.' % (BINDDN, DEFAULT_SUFFIX)) acival = '(targetattr="*")(version 3.0; acl "%s-all"; allow(all) userdn = "ldap:///%s";)' % (BUID, BINDDN) log.info('Bind as {%s,%s}' % (DN_DM, PASSWORD)) topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) topology_st.standalone.modify_s(DEFAULT_SUFFIX, [(ldap.MOD_ADD, 'aci', ensure_bytes(acival))]) time.sleep(1) log.info('Bind as {%s,%s}.' % (BINDDN, BINDPW)) try: topology_st.standalone.simple_bind_s(BINDDN, BINDPW) except ldap.LDAPError as e: log.info('Desc {}'.format(get_ldap_error_msg(e,'desc'))) assert False time.sleep(1) exists = False rc = ldap.NO_SUCH_OBJECT log.info('Search case. the search entry does not exist, the search should fail with %s' % rc.__name__) check_op_result(topology_st.standalone, 'search', BOGUSDN2, None, exists, rc) file_obj.close() exists = True rc = ldap.ALREADY_EXISTS log.info('Add case. the adding entry already exists, it should fail with %s' % rc.__name__) check_op_result(topology_st.standalone, 'add', TESTDN, None, exists, rc) exists = False rc = ldap.NO_SUCH_OBJECT log.info('Modify case. the modifying entry does not exist, it should fail with %s' % rc.__name__) check_op_result(topology_st.standalone, 'modify', BOGUSDN, None, exists, rc) exists = False rc = ldap.NO_SUCH_OBJECT log.info('Modrdn case 1. the renaming entry does not exist, it should fail with %s' % rc.__name__) check_op_result(topology_st.standalone, 'modrdn', BOGUSDN, None, exists, rc) exists = False rc = ldap.NO_SUCH_OBJECT log.info('Modrdn case 2. the node moving an entry to does not, it should fail with %s' % rc.__name__) check_op_result(topology_st.standalone, 'modrdn', TESTDN, BOGUSOU, exists, rc) exists = False rc = ldap.NO_SUCH_OBJECT log.info('Delete case. the deleting entry does not exist, it should fail with %s' % rc.__name__) check_op_result(topology_st.standalone, 'delete', BOGUSDN, None, exists, rc) log.info('Inactivate %s' % BINDDN) if ds_paths.version < '1.3': nsinactivate = '%s/ns-inactivate.pl' % inst_dir cli_cmd = [nsinactivate, '-D', DN_DM, '-w', PASSWORD, '-I', BINDDN] else: dsidm = '%s/dsidm' % ds_paths.sbin_dir cli_cmd = [dsidm, SERVERID_STANDALONE, '-b', DEFAULT_SUFFIX, 'account', 'lock', BINDDN] log.info(cli_cmd) p = Popen(cli_cmd) assert (p.wait() == 0) log.info('Bind as {%s,%s} which should fail with %s.' % (BINDDN, BUID, ldap.UNWILLING_TO_PERFORM.__name__)) try: topology_st.standalone.simple_bind_s(BINDDN, BUID) except ldap.LDAPError as e: log.info("Exception (expected): %s" % type(e).__name__) log.info('Desc {}'.format(get_ldap_error_msg(e,'desc'))) assert isinstance(e, ldap.UNWILLING_TO_PERFORM) log.info('Bind as {%s,%s} which should fail with %s.' % (BINDDN, 'bogus', ldap.UNWILLING_TO_PERFORM.__name__)) try: topology_st.standalone.simple_bind_s(BINDDN, 'bogus') except ldap.LDAPError as e: log.info("Exception (expected): %s" % type(e).__name__) log.info('Desc {}'.format(get_ldap_error_msg(e,'desc'))) assert isinstance(e, ldap.UNWILLING_TO_PERFORM) log.info('SUCCESS') if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/acl/roledn_test.py000066400000000000000000000221661421664411400260120ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2019 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK ---- """ This script will test different type of roles. """ import os import pytest from lib389._constants import DEFAULT_SUFFIX, PW_DM from lib389.idm.user import UserAccounts, UserAccount from lib389.idm.organizationalunit import OrganizationalUnits from lib389.topologies import topology_st as topo from lib389.idm.domain import Domain from lib389.idm.role import NestedRoles, ManagedRoles, FilteredRoles from lib389.idm.account import Anonymous import ldap pytestmark = pytest.mark.tier1 OU_ROLE = f"ou=roledntest,{DEFAULT_SUFFIX}" STEVE_ROLE = f"uid=STEVE_ROLE,{OU_ROLE}" HARRY_ROLE = f"uid=HARRY_ROLE,{OU_ROLE}" MARY_ROLE = f"uid=MARY_ROLE,{OU_ROLE}" ROLE1 = f"cn=ROLE1,{OU_ROLE}" ROLE2 = f"cn=ROLE2,{OU_ROLE}" ROLE3 = f"cn=ROLE3,{OU_ROLE}" ROLE21 = f"cn=ROLE21,{OU_ROLE}" ROLE31 = f"cn=ROLE31,{OU_ROLE}" FILTERROLE = f"cn=FILTERROLE,{OU_ROLE}" JOE_ROLE = f"uid=JOE_ROLE,{OU_ROLE}" NOROLEUSER = f"uid=NOROLEUSER,{OU_ROLE}" SCRACHENTRY = f"uid=SCRACHENTRY,{OU_ROLE}" ALL_ACCESS = f"uid=all access,{OU_ROLE}" NOT_RULE_ACCESS = f"uid=not rule access,{OU_ROLE}" OR_RULE_ACCESS = f"uid=or rule access,{OU_ROLE}" NESTED_ROLE_TESTER = f"uid=nested role tester,{OU_ROLE}" @pytest.fixture(scope="function") def _aci_of_user(request, topo): """ Removes and Restores ACIs after the test. """ aci_list = Domain(topo.standalone, DEFAULT_SUFFIX).get_attr_vals_utf8('aci') def finofaci(): """ Removes and Restores ACIs after the test. """ domain = Domain(topo.standalone, DEFAULT_SUFFIX) domain.remove_all('aci') for i in aci_list: domain.add("aci", i) request.addfinalizer(finofaci) @pytest.fixture(scope="module") def _add_user(request, topo): """ A Function that will create necessary users delete the created user """ ous = OrganizationalUnits(topo.standalone, DEFAULT_SUFFIX) ou_ou = ous.create(properties={'ou': 'roledntest'}) ou_ou.set('aci', [f'(target="ldap:///{NESTED_ROLE_TESTER}")(targetattr="*") ' f'(version 3.0; aci "nested role aci"; allow(all)' f'roledn = "ldap:///{ROLE2}";)', f'(target="ldap:///{OR_RULE_ACCESS}")(targetattr="*")' f'(version 3.0; aci "or role aci"; allow(all) ' f'roledn = "ldap:///{ROLE1} || ldap:///{ROLE21}";)', f'(target="ldap:///{ALL_ACCESS}")(targetattr="*")' f'(version 3.0; aci "anyone role aci"; allow(all) ' f'roledn = "ldap:///anyone";)', f'(target="ldap:///{NOT_RULE_ACCESS}")(targetattr="*")' f'(version 3.0; aci "not role aci"; allow(all)' f'roledn != "ldap:///{ROLE1} || ldap:///{ROLE21}";)']) nestedroles = NestedRoles(topo.standalone, OU_ROLE) for i in [('role2', [ROLE1, ROLE21]), ('role3', [ROLE2, ROLE31])]: nestedroles.create(properties={'cn': i[0], 'nsRoleDN': i[1]}) managedroles = ManagedRoles(topo.standalone, OU_ROLE) for i in ['ROLE1', 'ROLE21', 'ROLE31']: managedroles.create(properties={'cn': i}) filterroles = FilteredRoles(topo.standalone, OU_ROLE) filterroles.create(properties={'cn': 'filterRole', 'nsRoleFilter': 'sn=Dr Drake', 'description': 'filter role tester'}) users = UserAccounts(topo.standalone, OU_ROLE, rdn=None) for i in [('STEVE_ROLE', ROLE1, 'Has roles 1, 2 and 3.'), ('HARRY_ROLE', ROLE21, 'Has roles 21, 2 and 3.'), ('MARY_ROLE', ROLE31, 'Has roles 31 and 3.')]: users.create(properties={ 'uid': i[0], 'cn': i[0], 'sn': 'user', 'uidNumber': '1000', 'gidNumber': '2000', 'homeDirectory': '/home/' + i[0], 'userPassword': PW_DM, 'nsRoleDN': i[1], 'Description': i[2] }) for i in [('JOE_ROLE', 'Has filterRole.'), ('NOROLEUSER', 'Has no roles.'), ('SCRACHENTRY', 'Entry to test rights on.'), ('all access', 'Everyone has acccess (incl anon).'), ('not rule access', 'Only accessible to mary.'), ('or rule access', 'Only to steve and harry but nbot mary or anon'), ('nested role tester', 'Only accessible to harry and steve.')]: users.create(properties={ 'uid': i[0], 'cn': i[0], 'sn': 'user', 'uidNumber': '1000', 'gidNumber': '2000', 'homeDirectory': '/home/' + i[0], 'userPassword': PW_DM, 'Description': i[1] }) # Setting SN for user JOE UserAccount(topo.standalone, f'uid=JOE_ROLE,ou=roledntest,{DEFAULT_SUFFIX}').set('sn', 'Dr Drake') def fin(): """ It will delete the created users """ for i in users.list() + managedroles.list() + nestedroles.list(): i.delete() request.addfinalizer(fin) @pytest.mark.parametrize("user,entry", [ (STEVE_ROLE, NESTED_ROLE_TESTER), (HARRY_ROLE, NESTED_ROLE_TESTER), (MARY_ROLE, NOT_RULE_ACCESS), (STEVE_ROLE, OR_RULE_ACCESS), (HARRY_ROLE, OR_RULE_ACCESS), (STEVE_ROLE, ALL_ACCESS), (HARRY_ROLE, ALL_ACCESS), (MARY_ROLE, ALL_ACCESS), ], ids=[ "(STEVE_ROLE, NESTED_ROLE_TESTER)", "(HARRY_ROLE, NESTED_ROLE_TESTER)", "(MARY_ROLE, NOT_RULE_ACCESS)", "(STEVE_ROLE, OR_RULE_ACCESS)", "(HARRY_ROLE, OR_RULE_ACCESS)", "(STEVE_ROLE, ALL_ACCESS)", "(HARRY_ROLE, ALL_ACCESS)", "(MARY_ROLE, ALL_ACCESS)", ]) def test_mod_seealso_positive(topo, _add_user, _aci_of_user, user, entry): """ Testing the roledn keyword that allows access control based on the role of the bound user. :id: a33c5d6a-79f4-11e8-8551-8c16451d917b :parametrized: yes :setup: Standalone server :steps: 1. Add test entry 2. Add ACI 3. User should follow ACI role :expectedresults: 1. Entry should be added 2. Operation should succeed 3. Operation should succeed """ conn = UserAccount(topo.standalone, user).bind(PW_DM) UserAccount(conn, entry).replace('seeAlso', 'cn=1') @pytest.mark.parametrize( "user,entry", [ (MARY_ROLE, NESTED_ROLE_TESTER), (STEVE_ROLE, NOT_RULE_ACCESS), (HARRY_ROLE, NOT_RULE_ACCESS), (MARY_ROLE, OR_RULE_ACCESS), ], ids=[ "(MARY_ROLE, NESTED_ROLE_TESTER)", "(STEVE_ROLE, NOT_RULE_ACCESS)", "(HARRY_ROLE, NOT_RULE_ACCESS)", "(MARY_ROLE , OR_RULE_ACCESS)"] ) def test_mod_seealso_negative(topo, _add_user, _aci_of_user, user, entry): """ Testing the roledn keyword that do not allows access control based on the role of the bound user. :id: b2444aa2-79f4-11e8-a2c3-8c16451d917b :parametrized: yes :setup: Standalone server :steps: 1. Add test entry 2. Add ACI 3. User should follow ACI role :expectedresults: 1. Entry should be added 2. Operation should succeed 3. Operation should succeed """ conn = UserAccount(topo.standalone, user).bind(PW_DM) user = UserAccount(conn, entry) with pytest.raises(ldap.INSUFFICIENT_ACCESS): user.replace('seeAlso', 'cn=1') @pytest.mark.parametrize("entry", [NOT_RULE_ACCESS, ALL_ACCESS], ids=["NOT_RULE_ACCESS", "ALL_ACCESS"]) def test_mod_anonseealso_positive(topo, _add_user, _aci_of_user, entry): """ Testing the roledn keyword that allows access control based on the role of the bound user. :id: c3eb41ac-79f4-11e8-aa8b-8c16451d917b :parametrized: yes :setup: Standalone server :steps: 1. Add test entry 2. Add ACI 3. User should follow ACI role :expectedresults: 1. Entry should be added 2. Operation should succeed 3. Operation should succeed """ conn = Anonymous(topo.standalone).bind() UserAccount(conn, entry).replace('seeAlso', 'cn=1') @pytest.mark.parametrize("entry", [NESTED_ROLE_TESTER, OR_RULE_ACCESS], ids=["NESTED_ROLE_TESTER", "OR_RULE_ACCESS"]) def test_mod_anonseealso_negaive(topo, _add_user, _aci_of_user, entry): """ Testing the roledn keyword that do not allows access control based on the role of the bound user. :id: d385611a-79f4-11e8-adc8-8c16451d917b :parametrized: yes :setup: Standalone Instance :steps: 1. Add test entry 2. Add ACI 3. User should follow ACI role :expectedresults: 1. Entry should be added 2. Operation should succeed 3. Operation should succeed """ conn = Anonymous(topo.standalone).bind() user = UserAccount(conn, entry) with pytest.raises(ldap.INSUFFICIENT_ACCESS): user.replace('seeAlso', 'cn=1') if __name__ == "__main__": CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s -v %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/acl/search_real_part2_test.py000066400000000000000000000455011421664411400301050ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2020 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK ---- import pytest, os, ldap from lib389._constants import DEFAULT_SUFFIX, PW_DM, ErrorLog from lib389.idm.user import UserAccount, UserAccounts from lib389.idm.account import Accounts from lib389.idm.organizationalunit import OrganizationalUnits from lib389.topologies import topology_st as topo from lib389.idm.domain import Domain pytestmark = pytest.mark.tier1 CONTAINER_1_DELADD = "ou=Product Development,{}".format(DEFAULT_SUFFIX) CONTAINER_2_DELADD = "ou=Accounting,{}".format(DEFAULT_SUFFIX) USER_ANUJ = "uid=Anuj Borah,{}".format(CONTAINER_1_DELADD) USER_ANANDA = "uid=Ananda Borah,{}".format(CONTAINER_2_DELADD) @pytest.fixture(scope="function") def aci_of_user(request, topo): # Add anonymous access aci ACI_TARGET = "(targetattr != \"userpassword\")(target = \"ldap:///%s\")" % (DEFAULT_SUFFIX) ACI_ALLOW = "(version 3.0; acl \"Anonymous Read access\"; allow (read,search,compare)" ACI_SUBJECT = "(userdn=\"ldap:///anyone\");)" ANON_ACI = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT suffix = Domain(topo.standalone, DEFAULT_SUFFIX) try: suffix.add('aci', ANON_ACI) except ldap.TYPE_OR_VALUE_EXISTS: pass aci_list = Domain(topo.standalone, DEFAULT_SUFFIX).get_attr_vals('aci') def finofaci(): domain = Domain(topo.standalone, DEFAULT_SUFFIX) domain.set('aci', None) for i in aci_list: domain.add("aci", i) pass request.addfinalizer(finofaci) @pytest.fixture(scope="module") def test_uer(request, topo): topo.standalone.config.loglevel((ErrorLog.ACL_SUMMARY,)) ous = OrganizationalUnits(topo.standalone, DEFAULT_SUFFIX) for i in ['Product Development', 'Accounting']: ous.create(properties={'ou': i}) users = UserAccounts(topo.standalone, DEFAULT_SUFFIX, rdn='ou=Product Development') users.create(properties={ 'uid': 'Anuj Borah', 'cn': 'Anuj Borah', 'sn': 'user', 'uidNumber': '1000', 'gidNumber': '2000', 'homeDirectory': '/home/' + 'AnujBorah', 'userPassword': PW_DM }) users = UserAccounts(topo.standalone, DEFAULT_SUFFIX, rdn='ou=Accounting') users.create(properties={ 'uid': 'Ananda Borah', 'cn': 'Ananda Borah', 'sn': 'user', 'uidNumber': '1000', 'gidNumber': '2000', 'homeDirectory': '/home/' + 'AnandaBorah', 'userPassword': PW_DM }) def test_deny_all_access_with__target_set_on_non_leaf(topo, test_uer, aci_of_user): """Search Test 11 Deny all access with != target set on non-leaf :id: f1c5d72a-6e11-11e8-aa9d-8c16451d917b :setup: Standalone Instance :steps: 1. Add Entry 2. Add ACI 3. Bind with test USER_ANUJ 4. Try search 5. Delete Entry,test USER_ANUJ, ACI :expectedresults: 1. Operation should success 2. Operation should success 3. Operation should success 4. Operation should Fail 5. Operation should success """ ACI_TARGET = "(target != ldap:///{})(targetattr=\"*\")".format(CONTAINER_2_DELADD) ACI_ALLOW = '(version 3.0; acl "Name of the ACI"; deny absolute (all)' ACI_SUBJECT = 'userdn="ldap:///anyone";)' ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) conn = UserAccount(topo.standalone, USER_ANANDA).bind(PW_DM) # After binding with USER_ANANDA , aci will limit the search to itself assert 1 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=*)')) conn = UserAccount(topo.standalone, USER_ANUJ).bind(PW_DM) # After binding with USER_ANUJ , aci will limit the search to itself assert 1 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=*)')) # After binding with root , the actual number of users will be given assert 4 == len(Accounts(topo.standalone, DEFAULT_SUFFIX).filter('(cn=*)')) def test_deny_all_access_with__target_set_on_wildcard_non_leaf( topo, test_uer, aci_of_user ): """Search Test 12 Deny all access with != target set on wildcard non-leaf :id: 02f34640-6e12-11e8-a382-8c16451d917b :setup: Standalone Instance :steps: 1. Add Entry 2. Add ACI 3. Bind with test USER_ANUJ 4. Try search 5. Delete Entry,test USER_ANUJ, ACI :expectedresults: 1. Operation should success 2. Operation should success 3. Operation should success 4. Operation should Fail 5. Operation should success """ ACI_TARGET = "(target != ldap:///ou=Product*,{})(targetattr=\"*\")".format( DEFAULT_SUFFIX) ACI_ALLOW = '(version 3.0; acl "Name of the ACI"; deny absolute (all)' ACI_SUBJECT = 'userdn="ldap:///anyone";)' ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) conn = UserAccount(topo.standalone, USER_ANANDA).bind(PW_DM) # aci will limit the search to ou=Product it will block others assert 1 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=*)')) conn = UserAccount(topo.standalone, USER_ANUJ).bind(PW_DM) # aci will limit the search to ou=Product it will block others assert 1 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=*)')) # with root , aci will give actual no of users , without any limit. assert 4 == len(Accounts(topo.standalone, DEFAULT_SUFFIX).filter('(cn=*)')) def test_deny_all_access_with__target_set_on_wildcard_leaf( topo, test_uer, aci_of_user ): """Search Test 13 Deny all access with != target set on wildcard leaf :id: 16c54d76-6e12-11e8-b5ba-8c16451d917b :setup: Standalone Instance :steps: 1. Add Entry 2. Add ACI 3. Bind with test USER_ANUJ 4. Try search 5. Delete Entry,test USER_ANUJ, ACI :expectedresults: 1. Operation should success 2. Operation should success 3. Operation should success 4. Operation should Fail 5. Operation should success """ ACI_TARGET = "(target != ldap:///uid=Anuj*, ou=*,{})(targetattr=\"*\")".format( DEFAULT_SUFFIX) ACI_ALLOW = '(version 3.0; acl "Name of the ACI"; deny absolute (all)' ACI_SUBJECT = 'userdn="ldap:///anyone";)' ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) conn = UserAccount(topo.standalone, USER_ANANDA).bind(PW_DM) # aci will limit the search to cn=Jeff it will block others assert 1 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=*)')) conn = UserAccount(topo.standalone, USER_ANUJ).bind(PW_DM) # aci will limit the search to cn=Jeff it will block others assert 1 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=*)')) # with root there is no aci blockage assert 4 == len(Accounts(topo.standalone, DEFAULT_SUFFIX).filter('(cn=*)')) def test_deny_all_access_with_targetfilter_using_equality_search( topo, test_uer, aci_of_user ): """Search Test 14 Deny all access with targetfilter using equality search :id: 27255e04-6e12-11e8-8e35-8c16451d917b :setup: Standalone Instance :steps: 1. Add Entry 2. Add ACI 3. Bind with test USER_ANUJ 4. Try search 5. Delete Entry,test USER_ANUJ, ACI :expectedresults: 1. Operation should success 2. Operation should success 3. Operation should success 4. Operation should Fail 5. Operation should success """ ACI_TARGET = '(targetfilter ="(uid=Anuj Borah)")(target = ldap:///{})(targetattr="*")'.format( DEFAULT_SUFFIX) ACI_ALLOW = '(version 3.0; acl "Name of the ACI"; deny absolute (all)' ACI_SUBJECT = 'userdn="ldap:///anyone";)' ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) conn = UserAccount(topo.standalone, USER_ANANDA).bind(PW_DM) # aci will block the search to cn=Jeff assert 0 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(uid=Anuj Borah)')) conn = UserAccount(topo.standalone, USER_ANUJ).bind(PW_DM) # aci will block the search to cn=Jeff assert 0 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(uid=Anuj Borah)')) # with root there is no blockage assert 1 == len(Accounts(topo.standalone, DEFAULT_SUFFIX).filter('(uid=Anuj Borah)')) def test_deny_all_access_with_targetfilter_using_equality_search_two( topo, test_uer, aci_of_user ): """Test that Search Test 15 Deny all access with targetfilter using != equality search :id: 3966bcd4-6e12-11e8-83ce-8c16451d917b :setup: Standalone Instance :steps: 1. Add Entry 2. Add ACI 3. Bind with test USER_ANUJ 4. Try search 5. Delete Entry,test USER_ANUJ, ACI :expectedresults: 1. Operation should success 2. Operation should success 3. Operation should success 4. Operation should Fail 5. Operation should success """ ACI_TARGET = '(targetfilter !="(uid=Anuj Borah)")(target = ldap:///{})(targetattr="*")'.format( DEFAULT_SUFFIX) ACI_ALLOW = '(version 3.0; acl "Name of the ACI"; deny absolute (all)' ACI_SUBJECT = 'userdn="ldap:///anyone";)' ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) conn = UserAccount(topo.standalone, USER_ANANDA).bind(PW_DM) # aci will limit the search to cn=Jeff it will block others assert 1 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=*)')) conn = UserAccount(topo.standalone, USER_ANUJ).bind(PW_DM) # aci will limit the search to cn=Jeff it will block others assert 1 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=*)')) # with root there is no blockage assert 4 == len(Accounts(topo.standalone, DEFAULT_SUFFIX).filter('(cn=*)')) def test_deny_all_access_with_targetfilter_using_substring_search( topo, test_uer, aci_of_user ): """Test that Search Test 16 Deny all access with targetfilter using substring search :id: 44d7b4ba-6e12-11e8-b420-8c16451d917b :setup: Standalone Instance :steps: 1. Add Entry 2. Add ACI 3. Bind with test USER_ANUJ 4. Try search 5. Delete Entry,test USER_ANUJ, ACI :expectedresults: 1. Operation should success 2. Operation should success 3. Operation should success 4. Operation should Fail 5. Operation should success """ ACI_TARGET = '(targetfilter ="(uid=Anu*)")(target = ldap:///{})(targetattr="*")'.format( DEFAULT_SUFFIX) ACI_ALLOW = '(version 3.0; acl "Name of the ACI"; deny absolute (all)' ACI_SUBJECT = 'userdn="ldap:///anyone";)' ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) conn = UserAccount(topo.standalone, USER_ANANDA).bind(PW_DM) # aci block anything cn=j* assert 0 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=Anu*)')) conn = UserAccount(topo.standalone, USER_ANUJ).bind(PW_DM) # aci block anything cn=j* assert 0 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=Anu*)')) # with root there is no blockage assert 1 == len(Accounts(topo.standalone, DEFAULT_SUFFIX).filter('(cn=Anu*)')) def test_deny_all_access_with_targetfilter_using_substring_search_two( topo, test_uer, aci_of_user ): """Test that Search Test 17 Deny all access with targetfilter using != substring search :id: 55b12d98-6e12-11e8-8cf4-8c16451d917b :setup: Standalone Instance :steps: 1. Add Entry 2. Add ACI 3. Bind with test USER_ANUJ 4. Try search 5. Delete Entry,test USER_ANUJ, ACI :expectedresults: 1. Operation should success 2. Operation should success 3. Operation should success 4. Operation should Fail 5. Operation should success """ ACI_TARGET = '(targetfilter !="(uid=Anu*)")(target = ldap:///{})(targetattr="*")'.format( DEFAULT_SUFFIX ) ACI_ALLOW = '(version 3.0; acl "Name of the ACI"; deny (all)' ACI_SUBJECT = 'userdn="ldap:///anyone";)' ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) conn = UserAccount(topo.standalone, USER_ANANDA).bind(PW_DM) # aci allow anything cn=j*, it will block others assert 1 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(uid=*)')) conn = UserAccount(topo.standalone, USER_ANUJ).bind(PW_DM) # aci allow anything cn=j*, it will block others assert 1 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(uid=*)')) # with root there is no blockage assert 3 == len(Accounts(topo.standalone, DEFAULT_SUFFIX).filter('(uid=*)')) def test_deny_all_access_with_targetfilter_using_boolean_or_of_two_equality_search( topo, test_uer, aci_of_user, request ): """Search Test 18 Deny all access with targetfilter using boolean OR of two equality search :id: 29cc35fa-793f-11e8-988f-8c16451d917b :setup: Standalone Instance :steps: 1. Add Entry 2. Add ACI 3. Bind with test USER_ANUJ 4. Try search 5. Delete Entry,test USER_ANUJ, ACI :expectedresults: 1. Operation should success 2. Operation should success 3. Operation should success 4. Operation should Fail 5. Operation should success """ Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(target = ldap:///{})(targetattr = "*")' '(targetfilter = (|(cn=scarter)(cn=jvaughan)))(version 3.0; acl "{}"; ' 'deny absolute (all) (userdn = "ldap:///anyone") ;)'.format(DEFAULT_SUFFIX, request.node.name)) UserAccount(topo.standalone, USER_ANANDA).set("cn", "scarter") UserAccount(topo.standalone, USER_ANUJ).set("cn", "jvaughan") conn = UserAccount(topo.standalone, USER_ANANDA).bind(PW_DM) # aci will deny_all_access_with_targetfilter_using_boolean_or_of_two_equality_search user = UserAccount(conn, USER_ANANDA) with pytest.raises(IndexError): user.get_attr_val_utf8('uid') # aci will deny_all_access_with_targetfilter_using_boolean_or_of_two_equality_search user = UserAccount(conn, USER_ANUJ) with pytest.raises(IndexError): user.get_attr_val_utf8('uid') # with root no blockage assert UserAccount(topo.standalone, USER_ANANDA).get_attr_val_utf8('uid') == 'Ananda Borah' # with root no blockage assert UserAccount(topo.standalone, USER_ANUJ).get_attr_val_utf8('uid') == 'Anuj Borah' def test_deny_all_access_to__userdn_two(topo, test_uer, aci_of_user): """Search Test 19 Deny all access to != userdn :id: 693496c0-6e12-11e8-80dc-8c16451d917b :setup: Standalone Instance :steps: 1. Add Entry 2. Add ACI 3. Bind with test USER_ANUJ 4. Try search 5. Delete Entry,test USER_ANUJ, ACI :expectedresults: 1. Operation should success 2. Operation should success 3. Operation should success 4. Operation should Fail 5. Operation should success """ ACI_TARGET = "(target = ldap:///{})(targetattr=\"*\")".format(DEFAULT_SUFFIX) ACI_ALLOW = '(version 3.0; acl "Name of the ACI"; deny absolute (all)' ACI_SUBJECT = 'userdn!="ldap:///{}";)'.format(USER_ANANDA) ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) conn = UserAccount(topo.standalone, USER_ANANDA).bind(PW_DM) # aci will not block anything for USER_ANANDA , it block other users assert 4 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=*)')) conn = UserAccount(topo.standalone, USER_ANUJ).bind(PW_DM) # aci will block everything for other users assert 0 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=*)')) # with root there is no aci blockage assert 4 == len(Accounts(topo.standalone, DEFAULT_SUFFIX).filter('(cn=*)')) def test_deny_all_access_with_userdn(topo, test_uer, aci_of_user): """Search Test 20 Deny all access with userdn :id: 75aada86-6e12-11e8-bd34-8c16451d917b :setup: Standalone Instance :steps: 1. Add Entry 2. Add ACI 3. Bind with test USER_ANUJ 4. Try search 5. Delete Entry,test USER_ANUJ, ACI :expectedresults: 1. Operation should success 2. Operation should success 3. Operation should success 4. Operation should Fail 5. Operation should success """ ACI_TARGET = "(target = ldap:///{})(targetattr=\"*\")".format(DEFAULT_SUFFIX) ACI_ALLOW = '(version 3.0; acl "Name of the ACI"; deny (all)' ACI_SUBJECT = 'userdn="ldap:///{}";)'.format(USER_ANANDA) ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) conn = UserAccount(topo.standalone, USER_ANANDA).bind(PW_DM) # aci will block anything for USER_ANANDA , it not block other users assert 0 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=*)')) conn = UserAccount(topo.standalone, USER_ANUJ).bind(PW_DM) # aci will block anything for other users assert 4 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=*)')) # with root thers is no aci blockage assert 4 == len(Accounts(topo.standalone, DEFAULT_SUFFIX).filter('(cn=*)')) def test_deny_all_access_with_targetfilter_using_presence_search( topo, test_uer, aci_of_user ): """Search Test 21 Deny all access with targetfilter using presence search :id: 85244a42-6e12-11e8-9480-8c16451d917b :setup: Standalone Instance :steps: 1. Add Entry 2. Add ACI 3. Bind with test USER_ANUJ 4. Try search 5. Delete Entry,test USER_ANUJ, ACI :expectedresults: 1. Operation should success 2. Operation should success 3. Operation should success 4. Operation should Fail 5. Operation should success """ user = UserAccounts(topo.standalone, DEFAULT_SUFFIX).create_test_user() user.set('userPassword', PW_DM) ACI_TARGET = '(targetfilter ="(cn=*)")(target = ldap:///{})(targetattr="*")'.format( DEFAULT_SUFFIX) ACI_ALLOW = '(version 3.0; acl "Name of the ACI"; deny absolute (all)' ACI_SUBJECT = 'userdn="ldap:///anyone";)' ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) conn = UserAccount(topo.standalone, USER_ANANDA).bind(PW_DM) # aci will eny_all_access_with_targetfilter_using_presence_search user = UserAccount(conn, 'uid=test_user_1000,ou=People,{}'.format(DEFAULT_SUFFIX)) with pytest.raises(IndexError): user.get_attr_val_utf8('cn') # with root no blockage assert UserAccount(topo.standalone, 'uid=test_user_1000,ou=People,{}'.format(DEFAULT_SUFFIX)).get_attr_val_utf8('cn') == 'test_user_1000' if __name__ == "__main__": CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s -v %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/acl/search_real_part3_test.py000066400000000000000000000460221421664411400301050ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2020 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK ---- import pytest, os, ldap from lib389._constants import DEFAULT_SUFFIX, PW_DM, ErrorLog from lib389.idm.user import UserAccount, UserAccounts from lib389.idm.organization import Organization from lib389.idm.account import Accounts, Anonymous from lib389.idm.group import Group, UniqueGroup from lib389.idm.organizationalunit import OrganizationalUnit from lib389.idm.group import Groups from lib389.topologies import topology_st as topo from lib389.idm.domain import Domain pytestmark = pytest.mark.tier1 CONTAINER_1_DELADD = "ou=Product Development,{}".format(DEFAULT_SUFFIX) CONTAINER_2_DELADD = "ou=Accounting,{}".format(DEFAULT_SUFFIX) USER_ANUJ = "uid=Anuj Borah,{}".format(CONTAINER_1_DELADD) USER_ANANDA = "uid=Ananda Borah,{}".format(CONTAINER_2_DELADD) @pytest.fixture(scope="function") def aci_of_user(request, topo): # Add anonymous access aci ACI_TARGET = "(targetattr != \"userpassword\")(target = \"ldap:///%s\")" % (DEFAULT_SUFFIX) ACI_ALLOW = "(version 3.0; acl \"Anonymous Read access\"; allow (read,search,compare)" ACI_SUBJECT = "(userdn=\"ldap:///anyone\");)" ANON_ACI = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT suffix = Domain(topo.standalone, DEFAULT_SUFFIX) try: suffix.add('aci', ANON_ACI) except ldap.TYPE_OR_VALUE_EXISTS: pass aci_list = suffix.get_attr_vals('aci') def finofaci(): domain = Domain(topo.standalone, DEFAULT_SUFFIX) domain.set('aci', None) for i in aci_list: domain.add("aci", i) request.addfinalizer(finofaci) @pytest.fixture(scope="module") def test_uer(request, topo): topo.standalone.config.loglevel((ErrorLog.ACL_SUMMARY,)) for i in ['Product Development', 'Accounting']: OrganizationalUnit(topo.standalone, "ou={},{}".format(i, DEFAULT_SUFFIX)).create(properties={'ou': i}) users = UserAccounts(topo.standalone, DEFAULT_SUFFIX, rdn='ou=Product Development') users.create(properties={ 'uid': 'Anuj Borah', 'cn': 'Anuj Borah', 'sn': 'user', 'uidNumber': '1000', 'gidNumber': '2000', 'homeDirectory': '/home/' + 'AnujBorah', 'userPassword': PW_DM }) users = UserAccounts(topo.standalone, DEFAULT_SUFFIX, rdn='ou=Accounting') users.create(properties={ 'uid': 'Ananda Borah', 'cn': 'Ananda Borah', 'sn': 'user', 'uidNumber': '1000', 'gidNumber': '2000', 'homeDirectory': '/home/' + 'AnandaBorah', 'userPassword': PW_DM }) def test_deny_search_access_to_userdn_with_ldap_url(topo, test_uer, aci_of_user): """Search Test 23 Deny search access to userdn with LDAP URL :id: 94f082d8-6e12-11e8-be72-8c16451d917b :setup: Standalone Instance :steps: 1. Add Entry 2. Add ACI 3. Bind with test USER_ANUJ 4. Try search 5. Delete Entry,test USER_ANUJ, ACI :expectedresults: 1. Operation should success 2. Operation should success 3. Operation should success 4. Operation should Fail 5. Operation should success """ ACI_TARGET = '(target = ldap:///{})(targetattr="*")'.format(DEFAULT_SUFFIX) ACI_ALLOW = '(version 3.0; acl "Name of the ACI"; deny (search)' ACI_SUBJECT = ( 'userdn="ldap:///%s";)' % "{}??sub?(&(roomnumber=3445))".format(DEFAULT_SUFFIX) ) ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) UserAccount(topo.standalone, USER_ANANDA).set('roomnumber', '3445') conn = UserAccount(topo.standalone, USER_ANANDA).bind(PW_DM) # aci will block all users having roomnumber=3445 assert 0 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=*)')) conn = UserAccount(topo.standalone, USER_ANUJ).bind(PW_DM) # aci will block roomnumber=3445 for all users USER_ANUJ does not have roomnumber assert 4 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=*)')) # with root there is no aci blockage UserAccount(topo.standalone, USER_ANANDA).remove('roomnumber', '3445') def test_deny_search_access_to_userdn_with_ldap_url_two(topo, test_uer, aci_of_user): """Search Test 24 Deny search access to != userdn with LDAP URL :id: a1ee05d2-6e12-11e8-8260-8c16451d917b :setup: Standalone Instance :steps: 1. Add Entry 2. Add ACI 3. Bind with test USER_ANUJ 4. Try search 5. Delete Entry,test USER_ANUJ, ACI :expectedresults: 1. Operation should success 2. Operation should success 3. Operation should success 4. Operation should Fail 5. Operation should success """ ACI_TARGET = '(target = ldap:///{})(targetattr="*")'.format(DEFAULT_SUFFIX) ACI_ALLOW = '(version 3.0; acl "Name of the ACI"; deny (search)' ACI_SUBJECT = ( 'userdn != "ldap:///%s";)' % "{}??sub?(&(roomnumber=3445))".format(DEFAULT_SUFFIX) ) ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) UserAccount(topo.standalone, USER_ANANDA).set('roomnumber', '3445') conn = UserAccount(topo.standalone, USER_ANANDA).bind(PW_DM) # aci will not block all users having roomnumber=3445 , it will block others assert 4 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=*)')) conn = UserAccount(topo.standalone, USER_ANUJ).bind(PW_DM) # aci will not block all users having roomnumber=3445 , it will block others assert 0 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=*)')) # with root there is no aci blockage UserAccount(topo.standalone, USER_ANANDA).remove('roomnumber', '3445') def test_deny_search_access_to_userdn_with_ldap_url_matching_all_users( topo, test_uer, aci_of_user ): """Search Test 25 Deny search access to userdn with LDAP URL matching all users :id: b37f72ae-6e12-11e8-9c98-8c16451d917b :setup: Standalone Instance :steps: 1. Add Entry 2. Add ACI 3. Bind with test USER_ANUJ 4. Try search 5. Delete Entry,test USER_ANUJ, ACI :expectedresults: 1. Operation should success 2. Operation should success 3. Operation should success 4. Operation should Fail 5. Operation should success """ ACI_TARGET = '(target = ldap:///{})(targetattr="*")'.format(DEFAULT_SUFFIX) ACI_ALLOW = '(version 3.0; acl "Name of the ACI"; deny (search)' ACI_SUBJECT = 'userdn = "ldap:///%s";)' % "{}??sub?(&(cn=*))".format(DEFAULT_SUFFIX) ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) conn = UserAccount(topo.standalone, USER_ANANDA).bind(PW_DM) # aci will block all users LDAP URL matching all users assert 0 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=*)')) conn = UserAccount(topo.standalone, USER_ANUJ).bind(PW_DM) # aci will block all users LDAP URL matching all users assert 0 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=*)')) # with root there is no aci blockage assert 4 == len(Accounts(topo.standalone, DEFAULT_SUFFIX).filter('(cn=*)')) def test_deny_read_access_to_a_dynamic_group(topo, test_uer, aci_of_user): """Search Test 26 Deny read access to a dynamic group :id: c0c5290e-6e12-11e8-a900-8c16451d917b :setup: Standalone Instance :steps: 1. Add Entry 2. Add ACI 3. Bind with test USER_ANUJ 4. Try search 5. Delete Entry,test USER_ANUJ, ACI :expectedresults: 1. Operation should success 2. Operation should success 3. Operation should success 4. Operation should Fail 5. Operation should success """ groups = Groups(topo.standalone, DEFAULT_SUFFIX) group_properties = {"cn": "group1", "description": "testgroup"} group = groups.create(properties=group_properties) group.add('objectClass', 'groupOfURLS') group.set('memberURL', "ldap:///{}??sub?(&(ou=Accounting)(cn=Sam*))".format(DEFAULT_SUFFIX)) group.add_member(USER_ANANDA) ACI_TARGET = '(target = ldap:///{})(targetattr = "*")'.format(DEFAULT_SUFFIX) ACI_ALLOW = '(version 3.0; acl "All rights for %s"; deny(read)' % "Unknown" ACI_SUBJECT = 'groupdn = "ldap:///cn=group1,ou=Groups,{}";)'.format(DEFAULT_SUFFIX) ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) conn = UserAccount(topo.standalone, USER_ANANDA).bind(PW_DM) # aci will block all 'memberURL', "ldap:///{}??sub?(&(ou=Accounting)(cn=Sam*))".format(DEFAULT_SUFFIX) assert 0 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=*)')) conn = UserAccount(topo.standalone, USER_ANUJ).bind(PW_DM) # USER_ANUJ is not a member assert 4 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=*)')) group.delete() def test_deny_read_access_to_dynamic_group_with_host_port_set_on_ldap_url( topo, test_uer, aci_of_user ): """Search Test 27 Deny read access to dynamic group with host:port set on LDAP URL :id: ceb62158-6e12-11e8-8c36-8c16451d917b :setup: Standalone Instance :steps: 1. Add Entry 2. Add ACI 3. Bind with test USER_ANUJ 4. Try search 5. Delete Entry,test USER_ANUJ, ACI :expectedresults: 1. Operation should success 2. Operation should success 3. Operation should success 4. Operation should Fail 5. Operation should success """ groups = Groups(topo.standalone, DEFAULT_SUFFIX) group = groups.create(properties={"cn": "group1", "description": "testgroup" }) group.add('objectClass', 'groupOfURLS') group.set('memberURL', "ldap:///localhost:38901/{}??sub?(&(ou=Accounting)(cn=Sam*))".format(DEFAULT_SUFFIX)) group.add_member(USER_ANANDA) ACI_TARGET = '(target = ldap:///{})(targetattr = "*")'.format(DEFAULT_SUFFIX) ACI_ALLOW = '(version 3.0; acl "All rights for %s"; deny(read)' % "Unknown" ACI_SUBJECT = 'groupdn = "ldap:///cn=group1,ou=Groups,{}";)'.format(DEFAULT_SUFFIX) ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) conn = UserAccount(topo.standalone, USER_ANANDA).bind(PW_DM) # aci will block 'memberURL', "ldap:///localhost:38901/dc=example,dc=com??sub?(&(ou=Accounting)(cn=Sam*))" assert 0 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=*)')) # with root there is no aci blockage assert 4 == len(Accounts(topo.standalone, DEFAULT_SUFFIX).filter('(cn=*)')) group.delete() def test_deny_read_access_to_dynamic_group_with_scope_set_to_one_in_ldap_url( topo, test_uer, aci_of_user ): """Search Test 28 Deny read access to dynamic group with scope set to "one" in LDAP URL :id: ddb30432-6e12-11e8-94db-8c16451d917b :setup: Standalone Instance :steps: 1. Add Entry 2. Add ACI 3. Bind with test USER_ANUJ 4. Try search 5. Delete Entry,test USER_ANUJ, ACI :expectedresults: 1. Operation should success 2. Operation should success 3. Operation should success 4. Operation should Fail 5. Operation should success """ groups = Groups(topo.standalone, DEFAULT_SUFFIX) group = groups.create(properties={"cn": "group1", "description": "testgroup" }) group.add('objectClass', 'groupOfURLS') group.set('memberURL', "ldap:///{}??sub?(&(ou=Accounting)(cn=Sam*))".format(DEFAULT_SUFFIX)) group.add_member(USER_ANANDA) ACI_TARGET = '(targetattr = "*")' ACI_ALLOW = '(version 3.0; acl "All rights for %s"; deny(read) ' % "Unknown" ACI_SUBJECT = 'groupdn != "ldap:///cn=group1,ou=Groups,{}";)'.format(DEFAULT_SUFFIX) ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) conn = UserAccount(topo.standalone, USER_ANANDA).bind(PW_DM) # aci will allow only 'memberURL', "ldap:///{dc=example,dc=com??sub?(&(ou=Accounting)(cn=Sam*))" assert 4 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=*)')) conn = UserAccount(topo.standalone, USER_ANUJ).bind(PW_DM) # aci will allow only 'memberURL', "ldap:///{dc=example,dc=com??sub?(&(ou=Accounting)(cn=Sam*))" assert 0 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=*)')) group.delete() def test_deny_read_access_to_dynamic_group_two(topo, test_uer, aci_of_user): """Search Test 29 Deny read access to != dynamic group :id: eae2a6c6-6e12-11e8-80f3-8c16451d917b :setup: Standalone Instance :steps: 1. Add Entry 2. Add ACI 3. Bind with test USER_ANUJ 4. Try search 5. Delete Entry,test USER_ANUJ, ACI :expectedresults: 1. Operation should success 2. Operation should success 3. Operation should success 4. Operation should Fail 5. Operation should success """ groups = Groups(topo.standalone, DEFAULT_SUFFIX) group_properties = {"cn": "group1", "description": "testgroup" } group = groups.create(properties=group_properties) group.add('objectClass', 'groupofuniquenames') group.set('uniquemember', [USER_ANANDA,USER_ANUJ]) ACI_TARGET = '(targetattr = "*")' ACI_ALLOW = '(version 3.0; acl "All rights for %s"; deny(read) ' % "Unknown" ACI_SUBJECT = 'groupdn = "ldap:///cn=group1,ou=Groups,{}";)'.format(DEFAULT_SUFFIX) ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) conn = UserAccount(topo.standalone, USER_ANANDA).bind(PW_DM) # aci will block groupdn = "ldap:///cn=group1,ou=Groups,dc=example,dc=com";) assert 0 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=*)')) conn = UserAccount(topo.standalone, USER_ANUJ).bind(PW_DM) # aci will block groupdn = "ldap:///cn=group1,ou=Groups,dc=example,dc=com";) assert 0 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=*)')) # with root there is no aci blockage assert 4 == len(Accounts(topo.standalone, DEFAULT_SUFFIX).filter('(cn=*)')) group.delete() def test_deny_access_to_group_should_deny_access_to_all_uniquemember( topo, test_uer, aci_of_user, request ): """Search Test 38 Deny access to group should deny access to all uniquemember (including chain group) :id: 56b470e4-7941-11e8-912b-8c16451d917b :setup: Standalone Instance :steps: 1. Add Entry 2. Add ACI 3. Bind with test USER_ANUJ 4. Try search 5. Delete Entry,test USER_ANUJ, ACI :expectedresults: 1. Operation should success 2. Operation should success 3. Operation should success 4. Operation should Fail 5. Operation should success """ grp = UniqueGroup(topo.standalone, 'cn=Nested Group 1,' + DEFAULT_SUFFIX) grp.create(properties={ 'cn': 'Nested Group 1', 'ou': 'groups', 'uniquemember': "cn=Nested Group 2, {}".format(DEFAULT_SUFFIX) }) grp = UniqueGroup(topo.standalone, 'cn=Nested Group 2,' + DEFAULT_SUFFIX) grp.create(properties={ 'cn': 'Nested Group 2', 'ou': 'groups', 'uniquemember': "cn=Nested Group 3, {}".format(DEFAULT_SUFFIX) }) grp = UniqueGroup(topo.standalone, 'cn=Nested Group 3,' + DEFAULT_SUFFIX) grp.create(properties={ 'cn': 'Nested Group 3', 'ou': 'groups', 'uniquemember': [USER_ANANDA, USER_ANUJ] }) Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", '(target = ldap:///{})(targetattr="*")' '(version 3.0; acl "{}"; deny(read)(groupdn = "ldap:///cn=Nested Group 1, {}"); )'.format(DEFAULT_SUFFIX, request.node.name, DEFAULT_SUFFIX)) conn = UserAccount(topo.standalone, USER_ANANDA).bind(PW_DM) # deny_access_to_group_should_deny_access_to_all_uniquemember assert 0 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=*)')) conn = UserAccount(topo.standalone, USER_ANUJ).bind(PW_DM) # deny_access_to_group_should_deny_access_to_all_uniquemember assert 0 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=*)')) # with root there is no aci blockage assert 4 == len(Accounts(topo.standalone, DEFAULT_SUFFIX).filter('(cn=*)')) def test_entry_with_lots_100_attributes(topo, test_uer, aci_of_user): """Search Test 39 entry with lots (>100) attributes :id: fc155f74-6e12-11e8-96ac-8c16451d917b :setup: Standalone Instance :steps: 1. Add Entry 2. Bind with test USER_ANUJ 3. Try search 4. Delete Entry,test USER_ANUJ, ACI :expectedresults: 1. Operation should success 3. Operation should success 4. Operation should success 5. Operation should success """ for i in range(100): user = UserAccounts(topo.standalone, DEFAULT_SUFFIX, rdn='ou=People').create_test_user(uid=i) user.set("userPassword", "password") conn = UserAccount(topo.standalone, "uid=test_user_1,ou=People,{}".format(DEFAULT_SUFFIX)).bind(PW_DM) # no aci no blockage assert 1 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=Anuj*)')) # no aci no blockage assert 103 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(uid=*)')) conn = Anonymous(topo.standalone).bind() # anonymous_search_on_monitor_entry assert 103 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(uid=*)')) @pytest.mark.bz301798 def test_groupdnattr_value_is_another_group(topo): """Search Test 42 groupdnattr value is another group test #1 :id: 52299e16-7944-11e8-b471-8c16451d917b :setup: server :steps: 1. Add test entry 2. Add ACI 3. USER_ANUJ should follow ACI role :expectedresults: 1. Entry should be added 2. Operation should succeed 3. Operation should succeed """ Organization(topo.standalone).create(properties={"o": "nscpRoot"}, basedn=DEFAULT_SUFFIX) user = UserAccount(topo.standalone, "cn=dchan,o=nscpRoot,{}".format(DEFAULT_SUFFIX)) user.create(properties={ 'uid': 'dchan', 'cn': 'dchan', 'sn': 'user', 'uidNumber': '1000', 'gidNumber': '2000', 'homeDirectory': '/home/' + 'dchan', 'userPassword': PW_DM }) grp = UniqueGroup(topo.standalone, 'cn=groupx,o=nscpRoot,' + DEFAULT_SUFFIX) grp.create(properties={ 'cn': 'groupx', 'ou': 'groups', }) grp.set('uniquemember', 'cn=dchan,o=nscpRoot,{}'.format(DEFAULT_SUFFIX)) grp.set('aci', '(targetattr="*")(version 3.0; acl "Enable Group Expansion"; allow (read, search, compare) groupdnattr="ldap:///o=nscpRoot?uniquemember?sub";)') conn = UserAccount(topo.standalone, 'cn=dchan,o=nscpRoot,{}'.format(DEFAULT_SUFFIX),).bind(PW_DM) # acil will allow ldap:///o=nscpRoot?uniquemember?sub" assert UserAccount(conn, 'cn=groupx,o=nscpRoot,{}'.format(DEFAULT_SUFFIX)).get_attr_val_utf8('cn') == 'groupx' if __name__ == "__main__": CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s -v %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/acl/search_real_test.py000066400000000000000000000412411421664411400267720ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2020 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK ---- import pytest, os, ldap from lib389._constants import DEFAULT_SUFFIX, PW_DM, ErrorLog from lib389.idm.user import UserAccount, UserAccounts from lib389.idm.account import Accounts from lib389.idm.organizationalunit import OrganizationalUnit from lib389.idm.group import Groups from lib389.topologies import topology_st as topo from lib389.idm.domain import Domain from lib389.idm.posixgroup import PosixGroups pytestmark = pytest.mark.tier1 CONTAINER_1_DELADD = "ou=Product Development,{}".format(DEFAULT_SUFFIX) CONTAINER_2_DELADD = "ou=Accounting,{}".format(DEFAULT_SUFFIX) USER_ANUJ = "uid=Anuj Borah,{}".format(CONTAINER_1_DELADD) USER_ANANDA = "uid=Ananda Borah,{}".format(CONTAINER_2_DELADD) @pytest.fixture(scope="function") def aci_of_user(request, topo): # Add anonymous access aci ACI_TARGET = "(targetattr != \"userpassword\")(target = \"ldap:///%s\")" % (DEFAULT_SUFFIX) ACI_ALLOW = "(version 3.0; acl \"Anonymous Read access\"; allow (read,search,compare)" ACI_SUBJECT = "(userdn=\"ldap:///anyone\");)" ANON_ACI = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT suffix = Domain(topo.standalone, DEFAULT_SUFFIX) try: suffix.add('aci', ANON_ACI) except ldap.TYPE_OR_VALUE_EXISTS: pass aci_list = Domain(topo.standalone, DEFAULT_SUFFIX).get_attr_vals('aci') def finofaci(): domain = Domain(topo.standalone, DEFAULT_SUFFIX) domain.set('aci', None) for i in aci_list: domain.add("aci", i) request.addfinalizer(finofaci) @pytest.fixture(scope="module") def test_uer(request, topo): topo.standalone.config.loglevel((ErrorLog.ACL_SUMMARY,)) for i in ['Product Development', 'Accounting']: OrganizationalUnit(topo.standalone, "ou={},{}".format(i, DEFAULT_SUFFIX)).create(properties={'ou': i}) users = UserAccounts(topo.standalone, DEFAULT_SUFFIX, rdn='ou=Product Development') users.create(properties={ 'uid': 'Anuj Borah', 'cn': 'Anuj Borah', 'sn': 'user', 'uidNumber': '1000', 'gidNumber': '2000', 'homeDirectory': '/home/' + 'AnujBorah', 'userPassword': PW_DM }) users = UserAccounts(topo.standalone, DEFAULT_SUFFIX, rdn='ou=Accounting') users.create(properties={ 'uid': 'Ananda Borah', 'cn': 'Ananda Borah', 'sn': 'user', 'uidNumber': '1000', 'gidNumber': '2000', 'homeDirectory': '/home/' + 'AnandaBorah', 'userPassword': PW_DM }) def test_deny_all_access_with_target_set(topo, test_uer, aci_of_user): """Test that Deny all access with target set :id: 0550e680-6e0e-11e8-82f4-8c16451d917b :setup: Standalone Instance :steps: 1. Add Entry 2. Add ACI 3. Bind with test USER_ANUJ 4. Try search 5. Delete Entry,test USER_ANUJ, ACI :expectedresults: 1. Operation should success 2. Operation should success 3. Operation should success 4. Operation should Fail 5. Operation should success """ ACI_TARGET = '(target = ldap:///{})(targetattr="*")'.format(USER_ANANDA) ACI_ALLOW = '(version 3.0; acl "Name of the ACI"; deny absolute (all)' ACI_SUBJECT = 'userdn="ldap:///anyone";)' ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) conn = UserAccount(topo.standalone, USER_ANANDA).bind(PW_DM) # aci will block all for all usrs assert 0 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=Ananda*)')) conn = UserAccount(topo.standalone, USER_ANUJ).bind(PW_DM) # aci will block all for all usrs assert 0 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=Ananda*)')) # with root there is no aci blockage assert 1 == len(Accounts(topo.standalone, DEFAULT_SUFFIX).filter('(cn=Ananda*)')) def test_deny_all_access_to_a_target_with_wild_card(topo, test_uer, aci_of_user): """Search Test 2 Deny all access to a target with wild card :id: 1c370f98-6e11-11e8-9f10-8c16451d917b :setup: Standalone Instance :steps: 1. Add Entry 2. Add ACI 3. Bind with test USER_ANUJ 4. Try search 5. Delete Entry,test USER_ANUJ, ACI :expectedresults: 1. Operation should success 2. Operation should success 3. Operation should success 4. Operation should Fail 5. Operation should success """ ACI_TARGET = '(target = ldap:///uid=Ananda*, ou=*,{})(targetattr="*")'.format( DEFAULT_SUFFIX ) ACI_ALLOW = '(version 3.0; acl "Name of the ACI"; deny absolute (all)' ACI_SUBJECT = 'userdn="ldap:///anyone";)' ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) conn = UserAccount(topo.standalone, USER_ANANDA).bind(PW_DM) # aci will block (cn=Sam*) for all usrs assert 0 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=Ananda*)')) conn = UserAccount(topo.standalone, USER_ANUJ).bind(PW_DM) # aci will block (cn=Sam*) for all usrs assert 0 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=Ananda*)')) # with root there is no aci blockage assert 1 == len(Accounts(topo.standalone, DEFAULT_SUFFIX).filter('(cn=Ananda*)')) def test_deny_all_access_without_a_target_set(topo, test_uer, aci_of_user): """Search Test 3 Deny all access without a target set :id: 2dbeb36a-6e11-11e8-ab9f-8c16451d917b :setup: Standalone Instance :steps: 1. Add Entry 2. Add ACI 3. Bind with test USER_ANUJ 4. Try search 5. Delete Entry,test USER_ANUJ, ACI :expectedresults: 1. Operation should success 2. Operation should success 3. Operation should success 4. Operation should Fail 5. Operation should success """ ACI_TARGET = '(targetattr="*")' ACI_ALLOW = '(version 3.0; acl "Name of the ACI"; deny absolute (all)' ACI_SUBJECT = 'userdn="ldap:///anyone";)' ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) conn = UserAccount(topo.standalone, USER_ANANDA).bind(PW_DM) # aci will block all for all usrs assert 0 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(ou=Accounting)')) conn = UserAccount(topo.standalone, USER_ANUJ).bind(PW_DM) # aci will block all for all usrs assert 0 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(ou=Accounting)')) # with root there is no aci blockage assert 1 == len(Accounts(topo.standalone, DEFAULT_SUFFIX).filter('(ou=Accounting)')) def test_deny_read_search_and_compare_access_with_target_and_targetattr_set( topo, test_uer, aci_of_user ): """Search Test 4 Deny read, search and compare access with target and targetattr set :id: 3f4a87e4-6e11-11e8-a09f-8c16451d917b :setup: Standalone Instance :steps: 1. Add Entry 2. Add ACI 3. Bind with test USER_ANUJ 4. Try search 5. Delete Entry,test USER_ANUJ, ACI :expectedresults: 1. Operation should success 2. Operation should success 3. Operation should success 4. Operation should Fail 5. Operation should success """ ACI_TARGET = '(target = ldap:///{})(targetattr="*")'.format(CONTAINER_2_DELADD) ACI_ALLOW = '(version 3.0; acl "Name of the ACI"; deny absolute (all)' ACI_SUBJECT = 'userdn="ldap:///anyone";)' ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) conn = UserAccount(topo.standalone, USER_ANANDA).bind(PW_DM) # aci will block all for all usrs assert 0 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(ou=Accounting)')) conn = UserAccount(topo.standalone, USER_ANUJ).bind(PW_DM) # aci will block all for all usrs assert 0 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(ou=Accounting)')) # with root there is no aci blockage assert 1 == len(Accounts(topo.standalone, DEFAULT_SUFFIX).filter('(ou=Accounting)')) def test_deny_read_access_to_multiple_groupdns(topo, test_uer, aci_of_user): """Search Test 6 Deny read access to multiple groupdn's :id: 8f3ba440-6e11-11e8-8b20-8c16451d917b :setup: Standalone Instance :steps: 1. Add Entry 2. Add ACI 3. Bind with test USER_ANUJ 4. Try search 5. Delete Entry,test USER_ANUJ, ACI :expectedresults: 1. Operation should success 2. Operation should success 3. Operation should success 4. Operation should Fail 5. Operation should success """ groups = Groups(topo.standalone, DEFAULT_SUFFIX) group = groups.create(properties={"cn": "group1", "description": "testgroup" }) group.add_member(USER_ANANDA) posix_groups = PosixGroups(topo.standalone, DEFAULT_SUFFIX) posix_group = posix_groups.create(properties={ "cn": "group2", "description": "testgroup2", "gidNumber": "2000", }) posix_group.add_member(USER_ANUJ) ACI_TARGET = '(targetattr="*")' ACI_ALLOW = '(version 3.0; acl "All rights for cn=group1,ou=Groups,{}"; deny(read)'.format(DEFAULT_SUFFIX) ACI_SUBJECT = 'groupdn="ldap:///cn=group1,ou=Groups,{}||ldap:///cn=group2,ou=Groups,{}";)'.format(DEFAULT_SUFFIX, DEFAULT_SUFFIX) ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) conn = UserAccount(topo.standalone, USER_ANANDA).bind(PW_DM) # aci will block 'groupdn="ldap:///cn=group1,ou=Groups,dc=example,dc=com||ldap:///cn=group2,ou=Groups,dc=example,dc=com";) assert 0 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=*)')) conn = UserAccount(topo.standalone, USER_ANUJ).bind(PW_DM) # aci will block 'groupdn="ldap:///cn=group1,ou=Groups,dc=example,dc=com||ldap:///cn=group2,ou=Groups,dc=example,dc=com";) assert 0 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=*)')) # with root there is no aci blockage assert 5 == len(Accounts(topo.standalone, DEFAULT_SUFFIX).filter('(cn=*)')) group = groups.get("group1") group.delete() posix_groups.get("group2") posix_group.delete() def test_deny_all_access_to_userdnattr(topo, test_uer, aci_of_user): """Search Test 7 Deny all access to userdnattr" :id: ae482494-6e11-11e8-ae33-8c16451d917b :setup: Standalone Instance :steps: 1. Add Entry 2. Add ACI 3. Bind with test USER_ANUJ 4. Try search 5. Delete Entry,test USER_ANUJ, ACI :expectedresults: 1. Operation should success 2. Operation should success 3. Operation should success 4. Operation should Fail 5. Operation should success """ UserAccount(topo.standalone, USER_ANUJ).add('manager', USER_ANANDA) ACI_TARGET = '(target = ldap:///{})(targetattr="*")'.format(DEFAULT_SUFFIX) ACI_ALLOW = '(version 3.0; acl "Name of the ACI"; deny absolute (all)' ACI_SUBJECT = 'userdnattr="manager";)' ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) conn = UserAccount(topo.standalone, USER_ANANDA).bind(PW_DM) # aci will block only 'userdnattr="manager" assert 0 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=Anuj Borah)')) conn = UserAccount(topo.standalone, USER_ANUJ).bind(PW_DM) # aci will block only 'userdnattr="manager" assert 1 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=Anuj Borah)')) # with root there is no aci blockage assert 1 == len(Accounts(topo.standalone, DEFAULT_SUFFIX).filter('(cn=Anuj Borah)')) UserAccount(topo.standalone, USER_ANUJ).remove('manager', USER_ANANDA) def test_deny_all_access_with__target_set(topo, test_uer, aci_of_user, request): """Search Test 8 Deny all access with != target set :id: bc00aed0-6e11-11e8-be66-8c16451d917b :setup: Standalone Instance :steps: 1. Add Entry 2. Add ACI 3. Bind with test USER_ANUJ 4. Try search 5. Delete Entry,test USER_ANUJ, ACI :expectedresults: 1. Operation should success 2. Operation should success 3. Operation should success 4. Operation should Fail 5. Operation should success """ Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(target != "ldap:///{}")(targetattr = "*")' '(version 3.0; acl "{}"; deny absolute (all) (userdn = "ldap:///anyone") ;)'.format(USER_ANANDA, request.node.name)) conn = UserAccount(topo.standalone, USER_ANANDA).bind(PW_DM) # aci will not block USER_ANANDA will block others assert 1 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=*)')) conn = UserAccount(topo.standalone, USER_ANUJ).bind(PW_DM) # aci will not block USER_ANANDA will block others assert 1 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=*)')) # with root there is no aci blockage assert 4 == len(Accounts(topo.standalone, DEFAULT_SUFFIX).filter('(cn=*)')) def test_deny_all_access_with__targetattr_set(topo, test_uer, aci_of_user): """Search Test 9 Deny all access with != targetattr set :id: d2d73b2e-6e11-11e8-ad3d-8c16451d917b :setup: Standalone Instance :steps: 1. Add Entry 2. Add ACI 3. Bind with test USER_ANUJ 4. Try search 5. Delete Entry,test USER_ANUJ, ACI :expectedresults: 1. Operation should success 2. Operation should success 3. Operation should success 4. Operation should Fail 5. Operation should success """ testusers = UserAccounts(topo.standalone, DEFAULT_SUFFIX) user = testusers.create(properties={ 'uid': 'Anuj', 'cn': 'Anuj', 'sn': 'user', 'uidNumber': '1000', 'gidNumber': '2000', 'homeDirectory': '/home/' + 'Anuj', 'userPassword': PW_DM }) ACI_TARGET = '(targetattr != "uid||Objectclass")' ACI_ALLOW = '(version 3.0; acl "Name of the ACI"; deny absolute (all)' ACI_SUBJECT = 'userdn="ldap:///anyone";)' ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) conn = UserAccount(topo.standalone, USER_ANANDA).bind(PW_DM) # aci will allow only uid=* assert 4 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(uid=*)')) # aci will allow only uid=* assert 0 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=*)')) conn = UserAccount(topo.standalone, USER_ANUJ).bind(PW_DM) # aci will allow only uid=* assert 4 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(uid=*)')) # aci will allow only uid=* assert 0 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=*)')) # with root there is no aci blockage assert 4 == len(Accounts(topo.standalone, DEFAULT_SUFFIX).filter('(uid=*)')) # with root there is no aci blockage assert 5 == len(Accounts(topo.standalone, DEFAULT_SUFFIX).filter('(cn=*)')) user.delete() def test_deny_all_access_with_targetattr_set(topo, test_uer, aci_of_user): """Search Test 10 Deny all access with targetattr set :id: e1602ff2-6e11-11e8-8e55-8c16451d917b :setup: Standalone Instance :steps: 1. Add Entry 2. Add ACI 3. Bind with test USER_ANUJ 4. Try search 5. Delete Entry,test USER_ANUJ, ACI :expectedresults: 1. Operation should success 2. Operation should success 3. Operation should success 4. Operation should Fail 5. Operation should success """ testuser = UserAccount(topo.standalone, "cn=Anuj12,ou=People,{}".format(DEFAULT_SUFFIX)) testuser.create(properties={ 'uid': 'Anuj12', 'cn': 'Anuj12', 'sn': 'user', 'uidNumber': '1000', 'gidNumber': '2000', 'homeDirectory': '/home/' + 'Anuj12' }) ACI_TARGET = '(targetattr="uid")' ACI_ALLOW = '(version 3.0; acl "Name of the ACI"; deny absolute (all)' ACI_SUBJECT = 'userdn="ldap:///anyone";)' ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) conn = UserAccount(topo.standalone, USER_ANANDA).bind(PW_DM) # aci will block only uid=* assert 0 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(uid=*)')) conn = UserAccount(topo.standalone, USER_ANUJ).bind(PW_DM) # aci will block only uid=* assert 0 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(uid=*)')) # with root there is no aci blockage assert 4 == len(Accounts(topo.standalone, DEFAULT_SUFFIX).filter('(uid=*)')) testuser.delete() if __name__ == "__main__": CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s -v %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/acl/selfdn_permissions_test.py000066400000000000000000000344011421664411400304300ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2020 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import logging import ldap import pytest from lib389 import Entry from lib389._constants import * from lib389.topologies import topology_st log = logging.getLogger(__name__) from lib389.utils import * # Skip on older versions pytestmark = [pytest.mark.tier1, pytest.mark.skipif(ds_is_older('1.3.2'), reason="Not implemented")] OC_NAME = 'OCticket47653' MUST = "(postalAddress $ postalCode)" MAY = "(member $ street)" OTHER_NAME = 'other_entry' MAX_OTHERS = 10 BIND_NAME = 'bind_entry' BIND_DN = 'cn=%s, %s' % (BIND_NAME, SUFFIX) BIND_PW = 'password' ENTRY_NAME = 'test_entry' ENTRY_DN = 'cn=%s, %s' % (ENTRY_NAME, SUFFIX) ENTRY_OC = "top person %s" % OC_NAME def _oc_definition(oid_ext, name, must=None, may=None): oid = "1.2.3.4.5.6.7.8.9.10.%d" % oid_ext desc = 'To test ticket 47490' sup = 'person' if not must: must = MUST if not may: may = MAY new_oc = "( %s NAME '%s' DESC '%s' SUP %s AUXILIARY MUST %s MAY %s )" % (oid, name, desc, sup, must, may) return ensure_bytes(new_oc) @pytest.fixture(scope="module") def allow_user_init(topology_st): """Initialize the test environment """ topology_st.standalone.log.info("Add %s that allows 'member' attribute" % OC_NAME) new_oc = _oc_definition(2, OC_NAME, must=MUST, may=MAY) topology_st.standalone.schema.add_schema('objectClasses', new_oc) # entry used to bind with topology_st.standalone.log.info("Add %s" % BIND_DN) topology_st.standalone.add_s(Entry((BIND_DN, { 'objectclass': "top person".split(), 'sn': BIND_NAME, 'cn': BIND_NAME, 'userpassword': BIND_PW}))) # enable acl error logging mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', b'128')] topology_st.standalone.modify_s(DN_CONFIG, mod) # Remove aci's to start with a clean slate mod = [(ldap.MOD_DELETE, 'aci', None)] topology_st.standalone.modify_s(SUFFIX, mod) # add dummy entries for cpt in range(MAX_OTHERS): name = "%s%d" % (OTHER_NAME, cpt) topology_st.standalone.add_s(Entry(("cn=%s,%s" % (name, SUFFIX), { 'objectclass': "top person".split(), 'sn': name, 'cn': name}))) @pytest.mark.ds47653 def test_selfdn_permission_add(topology_st, allow_user_init): """Check add entry operation with and without SelfDN aci :id: e837a9ef-be92-48da-ad8b-ebf42b0fede1 :setup: Standalone instance, add a entry which is used to bind, enable acl error logging by setting 'nsslapd-errorlog-level' to '128', remove aci's to start with a clean slate, and add dummy entries :steps: 1. Check we can not ADD an entry without the proper SELFDN aci 2. Check with the proper ACI we can not ADD with 'member' attribute 3. Check entry to add with memberS and with the ACI 4. Check with the proper ACI and 'member' it succeeds to ADD :expectedresults: 1. Operation should be successful 2. Operation should be successful 3. Operation should fail with Insufficient Access 4. Operation should be successful """ topology_st.standalone.log.info("\n\n######################### ADD ######################\n") # bind as bind_entry topology_st.standalone.log.info("Bind as %s" % BIND_DN) topology_st.standalone.simple_bind_s(BIND_DN, BIND_PW) # Prepare the entry with multivalued members entry_with_members = Entry(ENTRY_DN) entry_with_members.setValues('objectclass', 'top', 'person', 'OCticket47653') entry_with_members.setValues('sn', ENTRY_NAME) entry_with_members.setValues('cn', ENTRY_NAME) entry_with_members.setValues('postalAddress', 'here') entry_with_members.setValues('postalCode', '1234') members = [] for cpt in range(MAX_OTHERS): name = "%s%d" % (OTHER_NAME, cpt) members.append("cn=%s,%s" % (name, SUFFIX)) members.append(BIND_DN) entry_with_members.setValues('member', members) # Prepare the entry with one member entry_with_member = Entry(ENTRY_DN) entry_with_member.setValues('objectclass', 'top', 'person', 'OCticket47653') entry_with_member.setValues('sn', ENTRY_NAME) entry_with_member.setValues('cn', ENTRY_NAME) entry_with_member.setValues('postalAddress', 'here') entry_with_member.setValues('postalCode', '1234') member = [] member.append(BIND_DN) entry_with_member.setValues('member', member) # entry to add WITH member being BIND_DN but WITHOUT the ACI -> ldap.INSUFFICIENT_ACCESS try: topology_st.standalone.log.info("Try to add Add %s (aci is missing): %r" % (ENTRY_DN, entry_with_member)) topology_st.standalone.add_s(entry_with_member) except Exception as e: topology_st.standalone.log.info("Exception (expected): %s" % type(e).__name__) assert isinstance(e, ldap.INSUFFICIENT_ACCESS) # Ok Now add the proper ACI topology_st.standalone.log.info("Bind as %s and add the ADD SELFDN aci" % DN_DM) topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) ACI_TARGET = "(target = \"ldap:///cn=*,%s\")" % SUFFIX ACI_TARGETFILTER = "(targetfilter =\"(objectClass=%s)\")" % OC_NAME ACI_ALLOW = "(version 3.0; acl \"SelfDN add\"; allow (add)" ACI_SUBJECT = " userattr = \"member#selfDN\";)" ACI_BODY = ACI_TARGET + ACI_TARGETFILTER + ACI_ALLOW + ACI_SUBJECT mod = [(ldap.MOD_ADD, 'aci', ensure_bytes(ACI_BODY))] topology_st.standalone.modify_s(SUFFIX, mod) # bind as bind_entry topology_st.standalone.log.info("Bind as %s" % BIND_DN) topology_st.standalone.simple_bind_s(BIND_DN, BIND_PW) # entry to add WITHOUT member and WITH the ACI -> ldap.INSUFFICIENT_ACCESS try: topology_st.standalone.log.info("Try to add Add %s (member is missing)" % ENTRY_DN) topology_st.standalone.add_s(Entry((ENTRY_DN, { 'objectclass': ENTRY_OC.split(), 'sn': ENTRY_NAME, 'cn': ENTRY_NAME, 'postalAddress': 'here', 'postalCode': '1234'}))) except Exception as e: topology_st.standalone.log.info("Exception (expected): %s" % type(e).__name__) assert isinstance(e, ldap.INSUFFICIENT_ACCESS) # entry to add WITH memberS and WITH the ACI -> ldap.INSUFFICIENT_ACCESS # member should contain only one value try: topology_st.standalone.log.info("Try to add Add %s (with several member values)" % ENTRY_DN) topology_st.standalone.add_s(entry_with_members) except Exception as e: topology_st.standalone.log.info("Exception (expected): %s" % type(e).__name__) assert isinstance(e, ldap.INSUFFICIENT_ACCESS) topology_st.standalone.log.info("Try to add Add %s should be successful" % ENTRY_DN) topology_st.standalone.add_s(entry_with_member) @pytest.mark.ds47653 def test_selfdn_permission_search(topology_st, allow_user_init): """Check search operation with and without SelfDN aci :id: 06d51ef9-c675-4583-99b2-4852dbda190e :setup: Standalone instance, add a entry which is used to bind, enable acl error logging by setting 'nsslapd-errorlog-level' to '128', remove aci's to start with a clean slate, and add dummy entries :steps: 1. Check we can not search an entry without the proper SELFDN aci 2. Add proper ACI 3. Check we can search with the proper ACI :expectedresults: 1. Operation should be successful 2. Operation should be successful 3. Operation should be successful """ topology_st.standalone.log.info("\n\n######################### SEARCH ######################\n") # bind as bind_entry topology_st.standalone.log.info("Bind as %s" % BIND_DN) topology_st.standalone.simple_bind_s(BIND_DN, BIND_PW) # entry to search WITH member being BIND_DN but WITHOUT the ACI -> no entry returned topology_st.standalone.log.info("Try to search %s (aci is missing)" % ENTRY_DN) ents = topology_st.standalone.search_s(ENTRY_DN, ldap.SCOPE_BASE, 'objectclass=*') assert len(ents) == 0 # Ok Now add the proper ACI topology_st.standalone.log.info("Bind as %s and add the READ/SEARCH SELFDN aci" % DN_DM) topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) ACI_TARGET = "(target = \"ldap:///cn=*,%s\")" % SUFFIX ACI_TARGETATTR = '(targetattr="*")' ACI_TARGETFILTER = "(targetfilter =\"(objectClass=%s)\")" % OC_NAME ACI_ALLOW = "(version 3.0; acl \"SelfDN search-read\"; allow (read, search, compare)" ACI_SUBJECT = " userattr = \"member#selfDN\";)" ACI_BODY = ACI_TARGET + ACI_TARGETATTR + ACI_TARGETFILTER + ACI_ALLOW + ACI_SUBJECT mod = [(ldap.MOD_ADD, 'aci', ensure_bytes(ACI_BODY))] topology_st.standalone.modify_s(SUFFIX, mod) # bind as bind_entry topology_st.standalone.log.info("Bind as %s" % BIND_DN) topology_st.standalone.simple_bind_s(BIND_DN, BIND_PW) # entry to search with the proper aci topology_st.standalone.log.info("Try to search %s should be successful" % ENTRY_DN) ents = topology_st.standalone.search_s(ENTRY_DN, ldap.SCOPE_BASE, 'objectclass=*') assert len(ents) == 1 @pytest.mark.ds47653 def test_selfdn_permission_modify(topology_st, allow_user_init): """Check modify operation with and without SelfDN aci :id: 97a58844-095f-44b0-9029-dd29a7d83d68 :setup: Standalone instance, add a entry which is used to bind, enable acl error logging by setting 'nsslapd-errorlog-level' to '128', remove aci's to start with a clean slate, and add dummy entries :steps: 1. Check we can not modify an entry without the proper SELFDN aci 2. Add proper ACI 3. Modify the entry and check the modified value :expectedresults: 1. Operation should be successful 2. Operation should be successful 3. Operation should be successful """ # bind as bind_entry topology_st.standalone.log.info("Bind as %s" % BIND_DN) topology_st.standalone.simple_bind_s(BIND_DN, BIND_PW) topology_st.standalone.log.info("\n\n######################### MODIFY ######################\n") # entry to modify WITH member being BIND_DN but WITHOUT the ACI -> ldap.INSUFFICIENT_ACCESS try: topology_st.standalone.log.info("Try to modify %s (aci is missing)" % ENTRY_DN) mod = [(ldap.MOD_REPLACE, 'postalCode', b'9876')] topology_st.standalone.modify_s(ENTRY_DN, mod) except Exception as e: topology_st.standalone.log.info("Exception (expected): %s" % type(e).__name__) assert isinstance(e, ldap.INSUFFICIENT_ACCESS) # Ok Now add the proper ACI topology_st.standalone.log.info("Bind as %s and add the WRITE SELFDN aci" % DN_DM) topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) ACI_TARGET = "(target = \"ldap:///cn=*,%s\")" % SUFFIX ACI_TARGETATTR = '(targetattr="*")' ACI_TARGETFILTER = "(targetfilter =\"(objectClass=%s)\")" % OC_NAME ACI_ALLOW = "(version 3.0; acl \"SelfDN write\"; allow (write)" ACI_SUBJECT = " userattr = \"member#selfDN\";)" ACI_BODY = ACI_TARGET + ACI_TARGETATTR + ACI_TARGETFILTER + ACI_ALLOW + ACI_SUBJECT mod = [(ldap.MOD_ADD, 'aci', ensure_bytes(ACI_BODY))] topology_st.standalone.modify_s(SUFFIX, mod) # bind as bind_entry topology_st.standalone.log.info("Bind as %s" % BIND_DN) topology_st.standalone.simple_bind_s(BIND_DN, BIND_PW) # modify the entry and checks the value topology_st.standalone.log.info("Try to modify %s. It should succeeds" % ENTRY_DN) mod = [(ldap.MOD_REPLACE, 'postalCode', b'1928')] topology_st.standalone.modify_s(ENTRY_DN, mod) ents = topology_st.standalone.search_s(ENTRY_DN, ldap.SCOPE_BASE, 'objectclass=*') assert len(ents) == 1 assert ensure_str(ents[0].postalCode) == '1928' @pytest.mark.ds47653 def test_selfdn_permission_delete(topology_st, allow_user_init): """Check delete operation with and without SelfDN aci :id: 0ec4c0ec-e7b0-4ef1-8373-ab25aae34516 :setup: Standalone instance, add a entry which is used to bind, enable acl error logging by setting 'nsslapd-errorlog-level' to '128', remove aci's to start with a clean slate, and add dummy entries :steps: 1. Check we can not delete an entry without the proper SELFDN aci 2. Add proper ACI 3. Check we can perform delete operation with proper ACI :expectedresults: 1. Operation should be successful 2. Operation should be successful 3. Operation should be successful """ topology_st.standalone.log.info("\n\n######################### DELETE ######################\n") # bind as bind_entry topology_st.standalone.log.info("Bind as %s" % BIND_DN) topology_st.standalone.simple_bind_s(BIND_DN, BIND_PW) # entry to delete WITH member being BIND_DN but WITHOUT the ACI -> ldap.INSUFFICIENT_ACCESS try: topology_st.standalone.log.info("Try to delete %s (aci is missing)" % ENTRY_DN) topology_st.standalone.delete_s(ENTRY_DN) except Exception as e: topology_st.standalone.log.info("Exception (expected): %s" % type(e).__name__) assert isinstance(e, ldap.INSUFFICIENT_ACCESS) # Ok Now add the proper ACI topology_st.standalone.log.info("Bind as %s and add the READ/SEARCH SELFDN aci" % DN_DM) topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) ACI_TARGET = "(target = \"ldap:///cn=*,%s\")" % SUFFIX ACI_TARGETFILTER = "(targetfilter =\"(objectClass=%s)\")" % OC_NAME ACI_ALLOW = "(version 3.0; acl \"SelfDN delete\"; allow (delete)" ACI_SUBJECT = " userattr = \"member#selfDN\";)" ACI_BODY = ACI_TARGET + ACI_TARGETFILTER + ACI_ALLOW + ACI_SUBJECT mod = [(ldap.MOD_ADD, 'aci', ensure_bytes(ACI_BODY))] topology_st.standalone.modify_s(SUFFIX, mod) # bind as bind_entry topology_st.standalone.log.info("Bind as %s" % BIND_DN) topology_st.standalone.simple_bind_s(BIND_DN, BIND_PW) # entry to delete with the proper aci topology_st.standalone.log.info("Try to delete %s should be successful" % ENTRY_DN) topology_st.standalone.delete_s(ENTRY_DN) if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/acl/syntax_test.py000066400000000000000000000335231421664411400260540ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2020 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK ---- import os import pytest from lib389._constants import DEFAULT_SUFFIX from lib389.idm.domain import Domain from lib389.topologies import topology_st as topo from lib389.utils import ds_is_older import ldap pytestmark = pytest.mark.tier1 INVALID = [('test_targattrfilters_1', f'(targattrfilters ="add=title:title=fred),del=cn:(cn!=harry)")' f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), ('test_targattrfilters_2', f'(targattrfilters ="add=:(title=fred),del=cn:(cn!=harry)")' f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), ('test_targattrfilters_3', f'(targattrfilters ="add=:(title=fred),del=cn:(cn!=harry))' f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), ('test_targattrfilters_4', f'(targattrfilters ="add=title:(title=fred),=cn:(cn!=harry")' f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), ('test_targattrfilters_5', f'(targattrfilters ="add=title:(|(title=fred)(cn=harry)),del=cn:(cn=harry)")' f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), ('test_targattrfilters_6', f'(targattrfilters ="add=title:(|(title=fred)(title=harry)),del=cn:(title=harry)")' f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), ('test_targattrfilters_7', f'(targattrfilters ="add=title:(cn=architect), ' f'del=title:(title=architect) && l:(l=cn=Meylan,dc=example,dc=com")")' f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), ('test_targattrfilters_8', f'(targattrfilters ="add=title:(cn=architect)")' f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), ('test_targattrfilters_9', f'(targattrfilters ="add=title:(cn=arch*)")' f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), ('test_targattrfilters_10', f'(targattrfilters ="add=title:(cn >= 1)")' f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), ('test_targattrfilters_11', f'(targattrfilters ="add=title:(cn <= 1)")' f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), ('test_targattrfilters_12', f'(targattrfilters ="add=title:(cn ~= 1)")' f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), ('test_targattrfilters_13', f'(targattrfilters ="add=title:(!(cn ~= 1))")' f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), ('test_targattrfilters_14', f'(targattrfilters ="add=title:(&(cn=fred)(cn ~= 1))")' f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), ('test_targattrfilters_15', f'(targattrfilters ="add=title:(|(cn=fred)(cn ~= 1))")' f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), ('test_targattrfilters_16', f'(targattrfilters ="add=title:(&(|(title=fred)(title=harry))(cn ~= 1))")' f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), ('test_targattrfilters_17', f'\(targattrfilters ="add=title:(&(|(&(title=harry)(title=fred))' f'(title=harry))(title ~= 1))")' f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), ('test_targattrfilters_19', f'(target = ldap:///cn=Jeff Vedder,ou=Product Development,{DEFAULT_SUFFIX})' f'(targetattr="*")' f'(version 3.0; acl "Name of the ACI"; deny(write)gropdn="ldap:///anyone";)'), ('test_targattrfilters_21', f'(target = ldap:///cn=Jeff Vedder,ou=Product Development,{DEFAULT_SUFFIX})' f'(targetattr="*")' f'(version 3.0; acl "Name of the ACI"; deny(rite)userdn="ldap:///anyone";)'), ('test_targattrfilters_22', f'(targt = ldap:///cn=Jeff Vedder,ou=Product Development,{DEFAULT_SUFFIX})' f'(targetattr="*")' f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), ('test_targattrfilters_23', f'(target = ldap:///cn=Jeff Vedder,ou=Product Development,{DEFAULT_SUFFIX})' f'(targetattr="*")' f'(version 3.0; acl "Name of the ACI"; absolute (all)userdn="ldap:///anyone";)'), ('test_Missing_acl_mispel', f'(target = ldap:///cn=Jeff Vedder,ou=Product Development,{DEFAULT_SUFFIX})' f'(targetattr="*")' f'(version 3.0; alc "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), ('test_Missing_acl_string', f'(target = ldap:///cn=Jeff Vedder,ou=Product Development,{DEFAULT_SUFFIX})' f'(targetattr="*")' f'(version 3.0; "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), ('test_Wrong_version_string', f'(target = ldap:///cn=Jeff Vedder,ou=Product Development,{DEFAULT_SUFFIX})' f'(targetattr="*")' f'(version 2.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), ('test_Missing_version_string', f'(target = ldap:///cn=Jeff Vedder,ou=Product Development,{DEFAULT_SUFFIX})' f'(targetattr="*")' f'(; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), ('test_Authenticate_statement', f'(target = ldap:///cn=Jeff Vedder,ou=Product Development,{DEFAULT_SUFFIX})' f'(targetattr != "uid")' f'(targetattr="*")(version 3.0; acl "Name of the ACI"; deny absolute (all)' f'userdn="ldap:///anyone";)'), ('test_Multiple_targets', f'(target = ldap:///ou=Product Development,{DEFAULT_SUFFIX})' f'(target = ldap:///ou=Product Testing,{DEFAULT_SUFFIX})(targetattr="*")' f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), ('test_Target_set_to_self', f'(target = ldap:///self)(targetattr="*")' f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), ('test_target_set_with_ldap_instead_of_ldap', f'(target = ldap:\\\{DEFAULT_SUFFIX})(targetattr="*")' f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), ('test_target_set_with_more_than_three', f'(target = ldap:////{DEFAULT_SUFFIX})(targetattr="*")' f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), ('test_target_set_with_less_than_three', f'(target = ldap://{DEFAULT_SUFFIX})(targetattr="*")' f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), ('test_bind_rule_set_with_less_than_three', f'(target = ldap:///{DEFAULT_SUFFIX})(targetattr="*")' f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:/anyone";)'), ('test_Use_semicolon_instead_of_comma_in_permission', f'(target = ldap:///{DEFAULT_SUFFIX})(targetattr="*")' f'(version 3.0; acl "Name of the ACI"; deny ' f'(read; search; compare; write)userdn="ldap:///anyone";)'), ('test_Use_double_equal_instead_of_equal_in_the_target', f'(target == ldap:///{DEFAULT_SUFFIX})(targetattr="*")' f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), ('test_use_double_equal_instead_of_equal_in_user_and_group_access', f'(target = ldap:///{DEFAULT_SUFFIX})' f'(version 3.0; acl "Name of the ACI"; deny absolute (all)' f'userdn == "ldap:///anyone";)'), ('test_donot_cote_the_name_of_the_aci', f'(target = ldap:///{DEFAULT_SUFFIX})' f'(version 3.0; acl Name of the ACI ; deny absolute (all)userdn = "ldap:///anyone";)'), ('test_extra_parentheses_case_1', f'( )(target = ldap:///{DEFAULT_SUFFIX}) (targetattr="*")' f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn = "ldap:///anyone";)'), ('test_extra_parentheses_case_2', f'(((((target = ldap:///{DEFAULT_SUFFIX})(targetattr="*")' f'(version 3.0; acl "Name of the ACI"; deny absolute (all)' f'userdn == "ldap:///anyone";)'), ('test_extra_parentheses_case_3', f'(((target = ldap:///{DEFAULT_SUFFIX}) (targetattr="*")' f'(version 3.0; acl "Name of the ACI"; deny absolute ' f'(all)userdn = "ldap:///anyone";)))'), ('test_no_semicolon_at_the_end_of_the_aci', f'(target = ldap:///{DEFAULT_SUFFIX}) (targetattr="*")' f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn = "ldap:///anyone")'), ('test_a_character_different_of_a_semicolon_at_the_end_of_the_aci', f'(target = ldap:///{DEFAULT_SUFFIX}) (targetattr="*")' f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn = "ldap:///anyone"%)'), ('test_bad_filter', f'(target = ldap:///{DEFAULT_SUFFIX}) ' f'(targetattr="cn |&| sn |(|) uid")' f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn = "ldap:///anyone";)'), ('test_Use_double_equal_instead_of_equal_in_the_targattrfilters', f'(target = ldap:///{DEFAULT_SUFFIX})(targattrfilters== "add=title:(title=architect)")' f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), ('test_Use_double_equal_instead_of_equal_inside_the_targattrfilters', f'(target = ldap:///{DEFAULT_SUFFIX})(targattrfilters="add==title:(title==architect)")' f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'),] FAILED = [('test_targattrfilters_18', f'(target = ldap:///cn=Jeff Vedder,ou=Product Development,{DEFAULT_SUFFIX})' f'(targetattr="*")' f'(version 3.0; acl "Name of the ACI"; deny(write)userdn="ldap:///{"123" * 300}";)'), ('test_targattrfilters_20', f'(target = ldap:///cn=Jeff Vedder,ou=Product Development,{DEFAULT_SUFFIX})' f'(targetattr="*")' f'(version 3.0; acl "Name of the ACI"; deny(write)userdns="ldap:///anyone";)'), ('test_bind_rule_set_with_more_than_three', f'(target = ldap:///{DEFAULT_SUFFIX})(targetattr="*")' f'(version 3.0; acl "Name of the ACI"; deny absolute (all)' f'userdn="ldap:////////anyone";)'), ('test_Use_double_equal_instead_of_equal_in_the_targetattr', f'(target = ldap:///{DEFAULT_SUFFIX})(targetattr==*)' f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), ('test_Use_double_equal_instead_of_equal_in_the_targetfilter', f'(target = ldap:///{DEFAULT_SUFFIX})(targetfilter==*)' f'(version 3.0; acl "Name of the ACI"; deny absolute ' f'(all)userdn="ldap:///anyone";)'), ] @pytest.mark.xfail(reason='https://bugzilla.redhat.com/show_bug.cgi?id=1691473') @pytest.mark.parametrize("real_value", [a[1] for a in FAILED], ids=[a[0] for a in FAILED]) def test_aci_invalid_syntax_fail(topo, real_value): """ Try to set wrong ACI syntax. :id: 83c40784-fff5-49c8-9535-7064c9c19e7e :parametrized: yes :setup: Standalone Instance :steps: 1. Create ACI 2. Try to setup the ACI with Instance :expectedresults: 1. It should pass 2. It should not pass """ domain = Domain(topo.standalone, DEFAULT_SUFFIX) with pytest.raises(ldap.INVALID_SYNTAX): domain.add("aci", real_value) @pytest.mark.parametrize("real_value", [a[1] for a in INVALID], ids=[a[0] for a in INVALID]) def test_aci_invalid_syntax(topo, real_value): """ Try to set wrong ACI syntax. :id: e8bf20b6-48be-4574-8300-056e42a0f0a8 :parametrized: yes :setup: Standalone Instance :steps: 1. Create ACI 2. Try to setup the ACI with Instance :expectedresults: 1. It should pass 2. It should not pass """ domain = Domain(topo.standalone, DEFAULT_SUFFIX) with pytest.raises(ldap.INVALID_SYNTAX): domain.add("aci", real_value) def test_target_set_above_the_entry_test(topo): """ Try to set wrong ACI syntax. :id: d544d09a-6ed1-11e8-8872-8c16451d917b :setup: Standalone Instance :steps: 1. Create ACI 2. Try to setup the ACI with Instance :expectedresults: 1. It should pass 2. It should not pass """ domain = Domain(topo.standalone, "ou=People,{}".format(DEFAULT_SUFFIX)) with pytest.raises(ldap.INVALID_SYNTAX): domain.add("aci", f'(target = ldap:///{DEFAULT_SUFFIX})' f'(targetattr="*")(version 3.0; acl "Name of the ACI"; deny absolute ' f'(all)userdn="ldap:///anyone";)') if __name__ == "__main__": CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s -v %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/acl/userattr_test.py000066400000000000000000000236571421664411400264060ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2020 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK ---- """ This script will test different type of user attributes. """ import os import pytest from lib389._constants import DEFAULT_SUFFIX, PW_DM from lib389.idm.user import UserAccount, UserAccounts from lib389.idm.organizationalunit import OrganizationalUnits from lib389.idm.group import Groups from lib389.idm.role import ManagedRoles from lib389.topologies import topology_st as topo import ldap pytestmark = pytest.mark.tier1 OU = f"ou=Accounting,{DEFAULT_SUFFIX}" OU_2 = f"ou=Inheritance,{DEFAULT_SUFFIX}" CAN = f"uid=Anuj Borah,{OU}" CANNOT = f"uid=Ananda Borah,{OU}" LEVEL_0 = f"uid=Grandson,{OU_2}" LEVEL_1 = f"uid=Child,{OU_2}" LEVEL_2 = f"uid=Parent,{OU_2}" LEVEL_3 = f"uid=Grandparent,{OU_2}" LEVEL_4 = f"uid=Ancestor,{OU_2}" ROLE1 = f'cn=ROLE1,{OU}' ROLE2 = f'cn=ROLE2,{OU}' NSSIMPLEGROUP = f'cn=NSSIMPLEGROUP,{OU}' NSSIMPLEGROUP1 = f'cn=NSSIMPLEGROUP1,{OU}' ROLEDNACCESS = f'uid=ROLEDNACCESS,{OU}' USERDNACCESS = f'uid=USERDNACCESS,{OU}' GROUPDNACCESS = f'uid=GROUPDNACCESS,{OU}' LDAPURLACCESS = f'uid=LDAPURLACCESS,{OU}' ATTRNAMEACCESS = f'uid=ATTRNAMEACCESS,{OU}' ANCESTORS = f'ou=ANCESTORS,{OU_2}' GRANDPARENTS = f'ou=GRANDPARENTS,{ANCESTORS}' PARENTS = f'ou=PARENTS,{GRANDPARENTS}' CHILDREN = f'ou=CHILDREN,{PARENTS}' GRANDSONS = f'ou=GRANDSONS,{CHILDREN}' @pytest.fixture(scope="module") def _add_user(topo): """ This function will create user for the test and in the end entries will be deleted . """ role_aci_body = '(targetattr="*")(version 3.0; aci "role aci"; allow(all)' # Creating OUs ous = OrganizationalUnits(topo.standalone, DEFAULT_SUFFIX) ou_accounting = ous.create(properties={'ou': 'Accounting'}) ou_accounting.set('aci', [f'(target="ldap:///{ROLEDNACCESS}"){role_aci_body} ' f'userattr = "Description#ROLEDN";)', f'(target="ldap:///{USERDNACCESS}"){role_aci_body} ' f'userattr = "Description#USERDN";)', f'(target="ldap:///{GROUPDNACCESS}"){role_aci_body} ' f'userattr = "Description#GROUPDN";)', f'(target="ldap:///{LDAPURLACCESS}"){role_aci_body} ' f'userattr = "Description#LDAPURL";)', f'(target="ldap:///{ATTRNAMEACCESS}"){role_aci_body} ' f'userattr = "Description#4612";)']) ou_inheritance = ous.create(properties={'ou': 'Inheritance', 'street': LEVEL_4, 'seeAlso': LEVEL_3, 'st': LEVEL_2, 'description': LEVEL_1, 'businessCategory': LEVEL_0}) inheritance_aci_body = '(targetattr="*")(version 3.0; aci "Inheritance aci"; allow(all) ' ou_inheritance.set('aci', [f'{inheritance_aci_body} ' f'userattr = "parent[0].businessCategory#USERDN";)', f'{inheritance_aci_body} ' f'userattr = "parent[0,1].description#USERDN";)', f'{inheritance_aci_body} ' f'userattr = "parent[0,1,2].st#USERDN";)', f'{inheritance_aci_body} ' f'userattr = "parent[0,1,2,3].seeAlso#USERDN";)', f'{inheritance_aci_body} ' f'userattr = "parent[0,1,2,3,4].street#USERDN";)']) # Creating Users users = UserAccounts(topo.standalone, OU, rdn=None) for i in [['Anuj Borah', 'Sunnyvale', ROLE1, '4612'], ['Ananda Borah', 'Santa Clara', ROLE2, 'Its Unknown']]: users.create(properties={ 'uid': i[0], 'cn': i[0].split()[0], 'sn': 'user', 'uidNumber': '1000', 'gidNumber': '2000', 'homeDirectory': '/home/' + i[0].split()[0], 'userPassword': PW_DM, 'givenname': i[0].split()[0], 'l': i[1], 'mail': "anuj@borah.com", 'telephonenumber': "+1 408 555 4798", 'facsimiletelephonenumber': "+1 408 555 9751", 'roomnumber': i[3], 'Description': i[3], 'nsRoleDN': i[2] }) for demo1 in [('ROLEDNACCESS', ROLE1), ('USERDNACCESS', CAN), ('GROUPDNACCESS', NSSIMPLEGROUP), ('ATTRNAMEACCESS', '4612'), ('LDAPURLACCESS', f"ldap:///{DEFAULT_SUFFIX}??sub?(l=Sunnyvale)")]: users.create(properties={ 'uid': demo1[0], 'cn': demo1[0], 'sn': 'user', 'uidNumber': '1000', 'gidNumber': '2000', 'homeDirectory': '/home/' + demo1[0], 'userPassword': PW_DM, 'Description': demo1[1] }) # Creating roles roles = ManagedRoles(topo.standalone, OU) for i in ['ROLE1', 'ROLE2']: roles.create(properties={"cn": i}) # Creating Groups grps = Groups(topo.standalone, OU, rdn=None) for i in [('NSSIMPLEGROUP', CAN), ('NSSIMPLEGROUP1', CANNOT)]: grps.create(properties={ 'cn': i[0], 'ou': 'groups', 'member': i[1] }) users = UserAccounts(topo.standalone, OU_2, rdn=None) for i in ['Grandson', 'Child', 'Parent', 'Grandparent', 'Ancestor']: users.create( properties={ 'uid': i, 'cn': i, 'sn': 'user', 'uidNumber': '1000', 'gidNumber': '2000', 'homeDirectory': '/home/' + i, 'userPassword': PW_DM }) # Creating Other OUs for dn_dn in [(OU_2, 'ANCESTORS'), (ANCESTORS, 'GRANDPARENTS'), (GRANDPARENTS, 'PARENTS'), (PARENTS, 'CHILDREN'), (CHILDREN, 'GRANDSONS')]: OrganizationalUnits(topo.standalone, dn_dn[0]).create(properties={'ou': dn_dn[1]}) @pytest.mark.parametrize("user,entry", [ (CAN, ROLEDNACCESS), (CAN, USERDNACCESS), (CAN, GROUPDNACCESS), (CAN, LDAPURLACCESS), (CAN, ATTRNAMEACCESS), (LEVEL_0, OU_2), (LEVEL_1, ANCESTORS), (LEVEL_2, GRANDPARENTS), (LEVEL_4, OU_2), (LEVEL_4, ANCESTORS), (LEVEL_4, GRANDPARENTS), (LEVEL_4, PARENTS), (LEVEL_4, CHILDREN), pytest.param(LEVEL_3, CHILDREN, marks=pytest.mark.xfail(reason="May be some bug")), ], ids=[ "(CAN,ROLEDNACCESS)", "(CAN,USERDNACCESS)", "(CAN,GROUPDNACCESS)", "(CAN,LDAPURLACCESS)", "(CAN,ATTRNAMEACCESS)", "(LEVEL_0, OU_2)", "(LEVEL_1,ANCESTORS)", "(LEVEL_2,GRANDPARENTS)", "(LEVEL_4,OU_2)", "(LEVEL_4, ANCESTORS)", "(LEVEL_4,GRANDPARENTS)", "(LEVEL_4,PARENTS)", "(LEVEL_4,CHILDREN)", "(LEVEL_3, CHILDREN)" ]) def test_mod_see_also_positive(topo, _add_user, user, entry): """ Try to set seeAlso on entry with binding specific user, it will success as per the ACI. :id: 65745426-7a01-11e8-8ac2-8c16451d917b :parametrized: yes :setup: Standalone Instance :steps: 1. Add test entry 2. Add ACI 3. User should follow ACI role :expectedresults: 1. Entry should be added 2. Operation should succeed 3. Operation should succeed """ conn = UserAccount(topo.standalone, user).bind(PW_DM) UserAccount(conn, entry).replace('seeAlso', 'cn=1') @pytest.mark.parametrize("user,entry", [ (CANNOT, ROLEDNACCESS), (CANNOT, USERDNACCESS), (CANNOT, GROUPDNACCESS), (CANNOT, LDAPURLACCESS), (CANNOT, ATTRNAMEACCESS), (LEVEL_0, ANCESTORS), (LEVEL_0, GRANDPARENTS), (LEVEL_0, PARENTS), (LEVEL_0, CHILDREN), (LEVEL_2, PARENTS), (LEVEL_4, GRANDSONS), ], ids=[ "(CANNOT,ROLEDNACCESS)", "(CANNOT,USERDNACCESS)", "(CANNOT,GROUPDNACCESS)", "(CANNOT,LDAPURLACCESS)", "(CANNOT,ATTRNAMEACCESS)", "(LEVEL_0, ANCESTORS)", "(LEVEL_0,GRANDPARENTS)", "(LEVEL_0,PARENTS)", "(LEVEL_0,CHILDREN)", "(LEVEL_2,PARENTS)", "(LEVEL_4,GRANDSONS)", ]) def test_mod_see_also_negative(topo, _add_user, user, entry): """ Try to set seeAlso on entry with binding specific user, it will Fail as per the ACI. :id: 9ea93252-7a01-11e8-a85b-8c16451d917b :parametrized: yes :setup: Standalone Instance :steps: 1. Add test entry 2. Add ACI 3. User should follow ACI role :expectedresults: 1. Entry should be added 2. Operation should succeed 3. Operation should succeed """ conn = UserAccount(topo.standalone, user).bind(PW_DM) user = UserAccount(conn, entry) with pytest.raises(ldap.INSUFFICIENT_ACCESS): user.replace('seeAlso', 'cn=1') @pytest.mark.parametrize("user,entry", [ (CANNOT, USERDNACCESS), (CANNOT, ROLEDNACCESS), (CANNOT, GROUPDNACCESS) ]) def test_last_three(topo, _add_user, user, entry): """ When we use the userattr keyword to associate the entry used to bind with the target entry the ACI applies only to the target specified and not to subentries. :id: add58a0a-7a01-11e8-85f1-8c16451d917b :parametrized: yes :setup: Standalone Instance :steps: 1. Add test entry 2. Add ACI 3. User should follow ACI role :expectedresults: 1. Entry should be added 2. Operation should succeed 3. Operation should succeed """ conn = UserAccount(topo.standalone, user).bind(PW_DM) users = UserAccounts(conn, entry) with pytest.raises(ldap.INSUFFICIENT_ACCESS): users.create_test_user() if __name__ == "__main__": CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s -v %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/acl/valueacl_part2_test.py000066400000000000000000000430471421664411400274340ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2019 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK ---- import pytest, os, ldap from lib389._constants import DEFAULT_SUFFIX, PW_DM from lib389.idm.user import UserAccount from lib389.idm.account import Anonymous from lib389.idm.organizationalunit import OrganizationalUnit from lib389.topologies import topology_st as topo from lib389.idm.domain import Domain pytestmark = pytest.mark.tier1 CONTAINER_1_DELADD = "ou=Product Development,{}".format(DEFAULT_SUFFIX) CONTAINER_2_DELADD = "ou=Accounting,{}".format(DEFAULT_SUFFIX) USER_DELADD = "cn=Jeff Vedder,{}".format(CONTAINER_1_DELADD) USER_WITH_ACI_DELADD = "cn=Sam Carter,{}".format(CONTAINER_2_DELADD) FRED = "cn=FRED,ou=Accounting,{}".format(DEFAULT_SUFFIX) HARRY = "cn=HARRY,ou=Accounting,{}".format(DEFAULT_SUFFIX) KIRSTENVAUGHAN = "cn=Kirsten Vaughan,ou=Human Resources,{}".format(DEFAULT_SUFFIX) HUMAN_OU_GLOBAL = "ou=Human Resources,{}".format(DEFAULT_SUFFIX) @pytest.fixture(scope="function") def aci_of_user(request, topo): # Add anonymous access aci ACI_TARGET = "(targetattr != \"userpassword\")(target = \"ldap:///%s\")" % (DEFAULT_SUFFIX) ACI_ALLOW = "(version 3.0; acl \"Anonymous Read access\"; allow (read,search,compare)" ACI_SUBJECT = "(userdn=\"ldap:///anyone\");)" ANON_ACI = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT suffix = Domain(topo.standalone, DEFAULT_SUFFIX) try: suffix.add('aci', ANON_ACI) except ldap.TYPE_OR_VALUE_EXISTS: pass aci_list = Domain(topo.standalone, DEFAULT_SUFFIX).get_attr_vals('aci') def finofaci(): domain = Domain(topo.standalone, DEFAULT_SUFFIX) domain.set('aci', None) for i in aci_list: domain.add("aci", i) request.addfinalizer(finofaci) @pytest.fixture(scope="function") def _add_user(request, topo): for i in ["Product Development", 'Accounting', "Human Resources"]: ou = OrganizationalUnit(topo.standalone, "ou={},{}".format(i, DEFAULT_SUFFIX)) ou.create(properties={'ou': i}) properties = { 'uid': 'Jeff Vedder', 'cn': 'Jeff Vedder', 'sn': 'user', 'uidNumber': '1000', 'gidNumber': '2000', 'homeDirectory': '/home/' + 'JeffVedder', 'userPassword': 'password' } user = UserAccount(topo.standalone, 'cn=Jeff Vedder,{}'.format(CONTAINER_1_DELADD)) user.create(properties=properties) user.set('secretary', 'cn=Arpitoo Borah, o=Red Hat, c=As') user.set('mail', 'anuj@anuj.Borah') properties = { 'uid': 'Sam Carter', 'cn': 'Sam Carter', 'sn': 'user', 'uidNumber': '1000', 'gidNumber': '2000', 'homeDirectory': '/home/' + 'SamCarter', 'userPassword': 'password' } user = UserAccount(topo.standalone, 'cn=Sam Carter,{}'.format(CONTAINER_2_DELADD)) user.create(properties=properties) properties = { 'uid': 'Kirsten Vaughan', 'cn': 'Kirsten Vaughan', 'sn': 'Kirsten Vaughan', 'uidNumber': '1000', 'gidNumber': '2000', 'homeDirectory': '/home/' + 'KirstenVaughan', 'userPassword': 'password' } user = UserAccount(topo.standalone, 'cn=Kirsten Vaughan, ou=Human Resources,{}'.format(DEFAULT_SUFFIX)) user.create(properties=properties) properties = { 'uid': 'HARRY', 'cn': 'HARRY', 'sn': 'HARRY', 'uidNumber': '1000', 'gidNumber': '2000', 'homeDirectory': '/home/' + 'HARRY', 'userPassword': 'password' } user = UserAccount(topo.standalone, 'cn=HARRY, ou=Accounting,{}'.format(DEFAULT_SUFFIX)) user.create(properties=properties) def fin(): for DN in [USER_DELADD, USER_WITH_ACI_DELADD, FRED, HARRY, KIRSTENVAUGHAN, HUMAN_OU_GLOBAL, CONTAINER_2_DELADD,CONTAINER_1_DELADD]: ua = UserAccount(topo.standalone, DN) try: ua.delete() except: pass request.addfinalizer(fin) def test_we_can_search_as_expected(topo, _add_user, aci_of_user, request): """Testing the targattrfilters keyword that allows access control based on the value of the attributes being added (or deleted)) Test that we can search as expected :id: e845dbba-7aa9-11e8-8988-8c16451d917b :setup: server :steps: 1. Add test entry 2. Add ACI 3. User should follow ACI role :expectedresults: 1. Entry should be added 2. Operation should succeed 3. Operation should succeed """ ACI_BODY = '(target="ldap:///cn=*,ou=Product Development, {}")' \ '(targetfilter="cn=Jeff*")(targetattr="secretary || objectclass || mail")' \ '(targattrfilters = "add=title:(title=arch*)")(version 3.0; acl "{}"; ' \ 'allow (write,read,search,compare) (userdn = "ldap:///anyone") ;)'.format(DEFAULT_SUFFIX, request.node.name) Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) conn = Anonymous(topo.standalone).bind() # aci will allow secretary , mail , objectclass user = UserAccount(conn, USER_DELADD) assert user.get_attr_vals('secretary') assert user.get_attr_vals('mail') assert user.get_attr_vals('objectclass') def test_we_can_mod_title_as_expected(topo, _add_user, aci_of_user, request): """Testing the targattrfilters keyword that allows access control based on the value of the attributes being added (or deleted)) Test search will work with targattrfilters present. :id: f8c1ea88-7aa9-11e8-a55c-8c16451d917b :setup: server :steps: 1. Add test entry 2. Add ACI 3. User should follow ACI role :expectedresults: 1. Entry should be added 2. Operation should succeed 3. Operation should succeed """ ACI_BODY = '(target="ldap:///cn=*,ou=Product Development, {}")' \ '(targetfilter="cn=Jeff*")(targetattr="secretary || objectclass || mail")' \ '(targattrfilters = "add=title:(title=arch*)")(version 3.0; acl "{}"; ' \ 'allow (write,read,search,compare) (userdn = "ldap:///anyone") ;)'.format(DEFAULT_SUFFIX, request.node.name) Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) # aci will not allow 'title', 'topdog' conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) user = UserAccount(conn, USER_DELADD) with pytest.raises(ldap.INSUFFICIENT_ACCESS): user.add('title', 'topdog') def test_modify_with_multiple_filters(topo, _add_user, aci_of_user, request): """Testing the targattrfilters keyword that allows access control based on the value of the attributes being added (or deleted)) Allowed by multiple filters :id: fd9d223e-7aa9-11e8-a83b-8c16451d917b :setup: server :steps: 1. Add test entry 2. Add ACI 3. User should follow ACI role :expectedresults: 1. Entry should be added 2. Operation should succeed 3. Operation should succeed """ ACI_BODY = '(targattrfilters = "add=title:(title=architect) && secretary:' \ '(secretary=cn=Meylan,{}), del=title:(title=architect) && secretary:' \ '(secretary=cn=Meylan,{})")(version 3.0; acl "{}"; allow (write) ' \ '(userdn = "ldap:///anyone") ;)'.format( DEFAULT_SUFFIX, DEFAULT_SUFFIX, request.node.name ) Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) # aci will allow title some attribute only user = UserAccount(conn, USER_DELADD) user.add("title", "architect") assert user.get_attr_val('title') user.add("secretary", "cn=Meylan,dc=example,dc=com") assert user.get_attr_val('secretary') def test_denied_by_multiple_filters(topo, _add_user, aci_of_user, request): """Testing the targattrfilters keyword that allows access control based on the value of the attributes being added (or deleted)) Denied by multiple filters :id: 034c6c62-7aaa-11e8-8634-8c16451d917b :setup: server :steps: 1. Add test entry 2. Add ACI 3. User should follow ACI role :expectedresults: 1. Entry should be added 2. Operation should succeed 3. Operation should succeed """ ACI_BODY = '(targattrfilters = "add=title:(title=architect) && secretary:' \ '(secretary=cn=Meylan,{}), del=title:(title=architect) && secretary:' \ '(secretary=cn=Meylan,{})")(version 3.0; acl "{}"; allow (write) ' \ '(userdn = "ldap:///anyone") ;)'.format(DEFAULT_SUFFIX, DEFAULT_SUFFIX, request.node.name) Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) # aci will allow title some attribute only user = UserAccount(conn, USER_DELADD) user.add("title", "architect") assert user.get_attr_val('title') user.add("secretary", "cn=Meylan,dc=example,dc=com") assert user.get_attr_val('secretary') # aci will allow title some attribute only with pytest.raises(ldap.INSUFFICIENT_ACCESS): user.add("secretary", "cn=Grenoble,dc=example,dc=com") def test_allowed_add_one_attribute(topo, _add_user, aci_of_user, request): """Testing the targattrfilters keyword that allows access control based on the value of the attributes being added (or deleted)) Allowed add one attribute (in presence of multiple filters) :id: 086c7f0c-7aaa-11e8-b69f-8c16451d917b :setup: server :steps: 1. Add test entry 2. Add ACI 3. User should follow ACI role :expectedresults: 1. Entry should be added 2. Operation should succeed 3. Operation should succeed """ ACI_BODY = '(targattrfilters = "add=title:(title=architect) && secretary:(secretary=cn=Meylan, {}), ' \ 'del=title:(title=architect) && secretary:(secretary=cn=Meylan, {})")(version 3.0; acl "{}"; ' \ 'allow (write) (userdn = "ldap:///{}") ;)'.format( DEFAULT_SUFFIX, DEFAULT_SUFFIX, request.node.name, USER_WITH_ACI_DELADD) Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) user = UserAccount(conn, USER_DELADD) # aci will allow add ad delete user.add('title', 'architect') assert user.get_attr_val('title') user.remove('title', 'architect') def test_cannot_add_an_entry_with_attribute_values_we_are_not_allowed_add( topo, _add_user, aci_of_user, request ): """Testing the targattrfilters keyword that allows access control based on the value of the attributes being added (or deleted)) Test not allowed add an entry :id: 0d0effee-7aaa-11e8-b673-8c16451d917b :setup: server :steps: 1. Add test entry 2. Add ACI 3. User should follow ACI role :expectedresults: 1. Entry should be added 2. Operation should succeed 3. Operation should succeed """ ACI_BODY = '(targattrfilters = "add=title:(|(title=engineer)(title=cool dude)(title=scum)) ' \ '&& secretary:(secretary=cn=Meylan, {}), del=title:(|(title=engineer)(title=cool dude)' \ '(title=scum))")(version 3.0; aci "{}"; allow (add) userdn = "ldap:///{}";)'.format( DEFAULT_SUFFIX, request.node.name, DEFAULT_SUFFIX) Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) properties = { 'uid': 'FRED', 'cn': 'FRED', 'sn': 'user', 'uidNumber': '1000', 'gidNumber': '2000', 'homeDirectory': '/home/' + 'FRED' } user = UserAccount(topo.standalone, 'cn=FRED,ou=Accounting,{}'.format(DEFAULT_SUFFIX)) user.create(properties=properties) user.set('title', ['anuj', 'kumar', 'borah']) conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) # aci will not allow adding objectclass user = UserAccount(conn, USER_WITH_ACI_DELADD) with pytest.raises(ldap.INSUFFICIENT_ACCESS): user.add("objectclass", "person") def test_on_modrdn(topo, _add_user, aci_of_user, request): """Testing the targattrfilters keyword that allows access control based on the value of the attributes being added (or deleted)) Test that valuacls kick in for modrdn operation. :id: 12985dde-7aaa-11e8-abde-8c16451d917b :setup: server :steps: 1. Add test entry 2. Add ACI 3. User should follow ACI role :expectedresults: 1. Entry should be added 2. Operation should succeed 3. Operation should succeed """ ACI_BODY = '(target="ldap:///cn=*,ou=Accounting,{}")(targattrfilters = "add=cn:(|(cn=engineer)), ' \ 'del=title:(|(title=engineer)(title=cool dude)(title=scum))")(version 3.0; aci "{}"; ' \ 'allow (write) userdn = "ldap:///{}";)'.format(DEFAULT_SUFFIX, request.node.name, USER_WITH_ACI_DELADD) Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) # modrdn_s is not allowed with ou=OU1 useraccount = UserAccount(conn, FRED) with pytest.raises(ldap.INSUFFICIENT_ACCESS): useraccount.rename("ou=OU1") def test_on_modrdn_allow(topo, _add_user, aci_of_user, request): """Testing the targattrfilters keyword that allows access control based on the value of the attributes being added (or deleted)) Test modrdn still works (2) :id: 17720562-7aaa-11e8-82ee-8c16451d917b :setup: server :steps: 1. Add test entry 2. Add ACI 3. User should follow ACI role :expectedresults: 1. Entry should be added 2. Operation should succeed 3. Operation should succeed """ ACI_BODY = '(target="ldap:///{}")(targattrfilters = "add=cn:((cn=engineer)), del=cn:((cn=jonny))")' \ '(version 3.0; aci "{}"; allow (write) ' \ 'userdn = "ldap:///{}";)'.format(DEFAULT_SUFFIX, request.node.name, USER_WITH_ACI_DELADD) Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) properties = { 'uid': 'jonny', 'cn': 'jonny', 'sn': 'user', 'uidNumber': '1000', 'gidNumber': '2000', 'homeDirectory': '/home/' + 'jonny' } user = UserAccount(topo.standalone, 'cn=jonny,{}'.format(DEFAULT_SUFFIX)) user.create(properties=properties) conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) # aci will allow modrdn_s on cn=engineer useraccount = UserAccount(conn, "cn=jonny,{}".format(DEFAULT_SUFFIX)) useraccount.rename("cn=engineer") assert useraccount.dn == 'cn=engineer,dc=example,dc=com' @pytest.mark.bz979515 def test_targattrfilters_keyword(topo): """Testing the targattrfilters keyword that allows access control based on the value of the attributes being added (or deleted)) "Bug #979515 - ACLs inoperative in some search scenarios [rhel-6.5]" "Bug #979516 is a clone for DS8.2 on RHEL5.9" "Bug #979514 is a clone for RHEL6.4 zStream errata" :id: 23f9e9d0-7aaa-11e8-b16b-8c16451d917b :setup: server :steps: 1. Add test entry 2. Add ACI 3. User should follow ACI role :expectedresults: 1. Entry should be added 2. Operation should succeed 3. Operation should succeed """ domain = Domain(topo.standalone, DEFAULT_SUFFIX) domain.set('aci', None) ou = OrganizationalUnit(topo.standalone, 'ou=bug979515,{}'.format(DEFAULT_SUFFIX)) ou.create(properties={'ou': 'bug979515'}) Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", '(target="ldap:///ou=bug979515,{}") ' '(targetattr= "uid") ( version 3.0; acl "read other subscriber"; allow (compare, read, search) ' 'userdn="ldap:///uid=*,ou=bug979515,{}" ; )'.format(DEFAULT_SUFFIX, DEFAULT_SUFFIX)) properties = { 'uid': 'acientryusr1', 'cn': 'acientryusr1', 'sn': 'user', 'uidNumber': '1000', 'gidNumber': '2000', 'homeDirectory': '/home/' + 'acientryusr1' } user = UserAccount(topo.standalone, 'cn=acientryusr1,ou=bug979515,{}'.format(DEFAULT_SUFFIX)) user.create(properties=properties) user.set('telephoneNumber', '99972566596') user.set('mail', 'anuj@anuj.com') user.set("userPassword", "password") properties = { 'uid': 'newaciphoneusr1', 'cn': 'newaciphoneusr1', 'sn': 'user', 'uidNumber': '1000', 'gidNumber': '2000', 'homeDirectory': '/home/' + 'newaciphoneusr1' } user = UserAccount(topo.standalone, 'cn=newaciphoneusr1,ou=bug979515,{}'.format(DEFAULT_SUFFIX)) user.create(properties=properties) user.set('telephoneNumber', '99972566596') user.set('mail', 'anuj@anuj.com') conn = UserAccount(topo.standalone, "cn=acientryusr1,ou=bug979515,{}".format(DEFAULT_SUFFIX)).bind(PW_DM) # Testing the targattrfilters keyword that allows access control based on the value of the attributes being added (or deleted)) user = UserAccount(conn, "cn=acientryusr1,ou=bug979515,{}".format(DEFAULT_SUFFIX)) with pytest.raises(IndexError): user.get_attr_vals('mail') user.get_attr_vals('telephoneNumber') user.get_attr_vals('cn') user = UserAccount(topo.standalone, "cn=acientryusr1,ou=bug979515,{}".format(DEFAULT_SUFFIX)) user.get_attr_vals('mail') user.get_attr_vals('telephoneNumber') user.get_attr_vals('cn') if __name__ == '__main__': CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s -v %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/acl/valueacl_test.py000066400000000000000000000754541421664411400263330ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2020 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK ---- import pytest, os, ldap from lib389._constants import DEFAULT_SUFFIX, PW_DM from lib389.idm.user import UserAccount from lib389.idm.account import Anonymous from lib389.idm.organizationalunit import OrganizationalUnit from lib389.topologies import topology_st as topo from lib389.idm.domain import Domain pytestmark = pytest.mark.tier1 CONTAINER_1_DELADD = "ou=Product Development,{}".format(DEFAULT_SUFFIX) CONTAINER_2_DELADD = "ou=Accounting,{}".format(DEFAULT_SUFFIX) USER_DELADD = "cn=Jeff Vedder,{}".format(CONTAINER_1_DELADD) USER_WITH_ACI_DELADD = "cn=Sam Carter,{}".format(CONTAINER_2_DELADD) FRED = "cn=FRED,ou=Accounting,{}".format(DEFAULT_SUFFIX) HARRY = "cn=HARRY,ou=Accounting,{}".format(DEFAULT_SUFFIX) KIRSTENVAUGHAN = "cn=Kirsten Vaughan,ou=Human Resources,{}".format(DEFAULT_SUFFIX) HUMAN_OU_GLOBAL = "ou=Human Resources,{}".format(DEFAULT_SUFFIX) @pytest.fixture(scope="function") def aci_of_user(request, topo): # Add anonymous access aci ACI_TARGET = "(targetattr != \"userpassword\")(target = \"ldap:///%s\")" % (DEFAULT_SUFFIX) ACI_ALLOW = "(version 3.0; acl \"Anonymous Read access\"; allow (read,search,compare)" ACI_SUBJECT = "(userdn=\"ldap:///anyone\");)" ANON_ACI = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT suffix = Domain(topo.standalone, DEFAULT_SUFFIX) try: suffix.add('aci', ANON_ACI) except ldap.TYPE_OR_VALUE_EXISTS: pass aci_list = Domain(topo.standalone, DEFAULT_SUFFIX).get_attr_vals('aci') def finofaci(): domain = Domain(topo.standalone, DEFAULT_SUFFIX) domain.set('aci', None) for i in aci_list: domain.add("aci", i) request.addfinalizer(finofaci) @pytest.fixture(scope="function") def _add_user(request, topo): for i in ["Product Development", 'Accounting', "Human Resources"]: ou = OrganizationalUnit(topo.standalone, "ou={},{}".format(i, DEFAULT_SUFFIX)) ou.create(properties={'ou': i}) properties = { 'uid': 'Jeff Vedder', 'cn': 'Jeff Vedder', 'sn': 'user', 'uidNumber': '1000', 'gidNumber': '2000', 'homeDirectory': '/home/' + 'JeffVedder', 'userPassword': 'password' } user = UserAccount(topo.standalone, 'cn=Jeff Vedder,{}'.format(CONTAINER_1_DELADD)) user.create(properties=properties) user.set('secretary', 'cn=Arpitoo Borah, o=Red Hat, c=As') user.set('mail', 'anuj@anuj.Borah') properties = { 'uid': 'Sam Carter', 'cn': 'Sam Carter', 'sn': 'user', 'uidNumber': '1000', 'gidNumber': '2000', 'homeDirectory': '/home/' + 'SamCarter', 'userPassword': 'password' } user = UserAccount(topo.standalone, 'cn=Sam Carter,{}'.format(CONTAINER_2_DELADD)) user.create(properties=properties) properties = { 'uid': 'Kirsten Vaughan', 'cn': 'Kirsten Vaughan', 'sn': 'Kirsten Vaughan', 'uidNumber': '1000', 'gidNumber': '2000', 'homeDirectory': '/home/' + 'KirstenVaughan', 'userPassword': 'password' } user = UserAccount(topo.standalone, 'cn=Kirsten Vaughan, ou=Human Resources,{}'.format(DEFAULT_SUFFIX)) user.create(properties=properties) properties = { 'uid': 'HARRY', 'cn': 'HARRY', 'sn': 'HARRY', 'uidNumber': '1000', 'gidNumber': '2000', 'homeDirectory': '/home/' + 'HARRY', 'userPassword': 'password' } user = UserAccount(topo.standalone, 'cn=HARRY, ou=Accounting,{}'.format(DEFAULT_SUFFIX)) user.create(properties=properties) def fin(): for DN in [USER_DELADD, USER_WITH_ACI_DELADD, FRED, HARRY, KIRSTENVAUGHAN, HUMAN_OU_GLOBAL, CONTAINER_2_DELADD,CONTAINER_1_DELADD]: ua = UserAccount(topo.standalone, DN) try: ua.delete() except: pass request.addfinalizer(fin) class _ModTitleArchitectJeffVedder: def __init__(self, topo, value, conn): self.topo = topo self.value = value self.conn = conn self.user = UserAccount(self.conn, USER_DELADD) def add(self): self.user.add("title", self.value) def delete(self): self.user.remove("title", self.value) class _DelTitleArchitectJeffVedder: def __init__(self, topo, conn): self.topo = topo self.conn = conn def delete(self): UserAccount(self.conn, USER_DELADD).remove("title", None) class _AddTitleWithRoot: def __init__(self, topo, value): self.topo = topo self.value = value self.user = UserAccount(self.topo.standalone, USER_DELADD) def add(self): self.user.add("title", self.value) def delete(self): self.user.remove("title", self.value) class _AddFREDWithRoot: def __init__(self, topo, title1, title2, title3): self.topo = topo self.title1 = title1 self.title2 = title2 self.title3 = title3 def create(self): properties = { 'uid': 'FRED', 'cn': 'FRED', 'sn': 'user', 'uidNumber': '1000', 'gidNumber': '2000', 'homeDirectory': '/home/' + 'FRED' } user = UserAccount(self.topo.standalone, "cn=FRED, ou=Accounting,{}".format(DEFAULT_SUFFIX)) user.create(properties=properties) user.set("title", [self.title1, self.title2, self.title3]) def test_delete_an_attribute_value_we_are_not_allowed_to_delete( topo, _add_user, aci_of_user ): """Testing the targattrfilters keyword that allows access control based on the value of the attributes being added (or deleted)) Test that we can MODIFY:add an attribute value we are allowed to add :id: 7c41baa6-7aa9-11e8-9bdc-8c16451d917b :setup: server :steps: 1. Add test entry 2. Add ACI 3. User should follow ACI role :expectedresults: 1. Entry should be added 2. Operation should succeed 3. Operation should succeed """ ACI_BODY = '(targattrfilters = "add=title:(title=architect), del=title:(title=architect)")' \ '(version 3.0; acl "ACI NAME"; allow (write) (userdn = "ldap:///{}") ;)'.format(USER_WITH_ACI_DELADD) Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) with pytest.raises(ldap.INSUFFICIENT_ACCESS): _ModTitleArchitectJeffVedder(topo, "engineer", conn).add() _ModTitleArchitectJeffVedder(topo, "architect", conn).add() def test_donot_allow_write_access_to_title_if_value_is_not_architect( topo, _add_user, aci_of_user, request ): """Testing the targattrfilters keyword that allows access control based on the value of the attributes being added (or deleted)) Test that we cannot MODIFY:add an attribute value we are not allowed to add :id: 822c607e-7aa9-11e8-b2e7-8c16451d917b :setup: server :steps: 1. Add test entry 2. Add ACI 3. User should follow ACI role :expectedresults: 1. Entry should be added 2. Operation should succeed 3. Operation should succeed """ ACI_BODY = '(targattrfilters = "add=title:(title=architect), del=title:(title=architect)")' \ '(version 3.0; acl "{}"; allow (write) (userdn = "ldap:///{}") ;)'.format(request.node.name, USER_WITH_ACI_DELADD) Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) # aci will allow to add title architect conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) _ModTitleArchitectJeffVedder(topo, "architect", conn).add() # aci will noo allow to add title architect1 with pytest.raises(ldap.INSUFFICIENT_ACCESS): _ModTitleArchitectJeffVedder(topo, "architect1", conn).add() def test_delete_an_attribute_value_we_are_allowed_to_delete( topo, _add_user, aci_of_user, request ): """Testing the targattrfilters keyword that allows access control based on the value of the attributes being added (or deleted)) Test that we can MODIFY:delete an attribute value we are allowed to delete :id: 86f36b34-7aa9-11e8-ab16-8c16451d917b :setup: server :steps: 1. Add test entry 2. Add ACI 3. User should follow ACI role :expectedresults: 1. Entry should be added 2. Operation should succeed 3. Operation should succeed """ ACI_BODY = '(targattrfilters = "add=title:(title=architect), del=title:(title=architect)")' \ '(version 3.0; acl "{}"; allow (write) (userdn = "ldap:///{}") ;)'.format(request.node.name, USER_WITH_ACI_DELADD) Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) _AddTitleWithRoot(topo, "architect").add() # aci will allow to delete title architect conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) r1 = _ModTitleArchitectJeffVedder(topo, "architect", conn) r1.delete() def test_delete_an_attribute_value_we_are_not_allowed_to_deleted( topo, _add_user, aci_of_user, request ): """Testing the targattrfilters keyword that allows access control based on the value of the attributes being added (or deleted)) Test that we cannot MODIFY:delete an attribute value we are allowed to delete :id: 8c9f3a90-7aa9-11e8-bf2e-8c16451d917b :setup: server :steps: 1. Add test entry 2. Add ACI 3. User should follow ACI role :expectedresults: 1. Entry should be added 2. Operation should succeed 3. Operation should succeed """ ACI_BODY = '(targattrfilters = "add=title:(title=architect), del=title:(title=architect)")' \ '(version 3.0; acl "{}"; allow (write) (userdn = "ldap:///{}") ;)'.format(request.node.name, USER_WITH_ACI_DELADD) Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) _AddTitleWithRoot(topo, "engineer").add() # acl will not allow to delete title engineer conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) with pytest.raises(ldap.INSUFFICIENT_ACCESS): _ModTitleArchitectJeffVedder(topo, "engineer", conn).delete() def test_allow_modify_replace(topo, _add_user, aci_of_user, request): """Testing the targattrfilters keyword that allows access control based on the value of the attributes being added (or deleted)) Test that we can MODIFY:replace an attribute if we have correct add/delete rights. :id: 9148a234-7aa9-11e8-a1f1-8c16451d917b :setup: server :steps: 1. Add test entry 2. Add ACI 3. User should follow ACI role :expectedresults: 1. Entry should be added 2. Operation should succeed 3. Operation should succeed """ ACI_BODY = '(targattrfilters = "add=title:(title=engineer), del=title:(|(title=architect)' \ '(title=idiot))")(version 3.0; acl "{}"; ' \ 'allow (write) (userdn = "ldap:///{}") ;)'.format(request.node.name, USER_WITH_ACI_DELADD) Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) _AddTitleWithRoot(topo, "architect").add() _AddTitleWithRoot(topo, "idiot").add() _AddTitleWithRoot(topo, "engineer").add() # acl will not allow to delete title engineer conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) with pytest.raises(ldap.INSUFFICIENT_ACCESS): _ModTitleArchitectJeffVedder(topo, "engineer", conn).delete() def test_allow_modify_delete(topo, _add_user, aci_of_user, request): """Testing the targattrfilters keyword that allows access control based on the value of the attributes being added (or deleted)) Don't Allow modify:replace because of lack of delete rights :id: 962842d2-7aa9-11e8-b39e-8c16451d917b :setup: server :steps: 1. Add test entry 2. Add ACI 3. User should follow ACI role :expectedresults: 1. Entry should be added 2. Operation should succeed 3. Operation should succeed """ ACI_BODY = '(targattrfilters = "add=title:(title=engineer), del=title:(|(title=architect))")' \ '(version 3.0; acl "{}"; allow (write) ' \ '(userdn = "ldap:///{}") ;)'.format(request.node.name, USER_WITH_ACI_DELADD) Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) _AddTitleWithRoot(topo, "architect").add() _AddTitleWithRoot(topo, "idiot").add() conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) _ModTitleArchitectJeffVedder(topo, "architect", conn).delete() # acl will not allow to delete title idiot conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) with pytest.raises(ldap.INSUFFICIENT_ACCESS): _ModTitleArchitectJeffVedder(topo, "idiot", conn).delete() def test_replace_an_attribute_if_we_lack(topo, _add_user, aci_of_user, request): """Testing the targattrfilters keyword that allows access control based on the value of the attributes being added (or deleted)) Test that we cannot MODIFY:replace an attribute if we lack :id: 9b1e6afa-7aa9-11e8-ac5b-8c16451d917b :setup: server :steps: 1. Add test entry 2. Add ACI 3. User should follow ACI role :expectedresults: 1. Entry should be added 2. Operation should succeed 3. Operation should succeed """ ACI_BODY = '(targattrfilters = "add=title:(title=engineer), del=title:(|(title=architect))")' \ '(version 3.0; acl "{}"; allow (write) ' \ '(userdn = "ldap:///{}") ;)'.format(request.node.name, USER_WITH_ACI_DELADD) Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) _AddTitleWithRoot(topo, "architect").add() _AddTitleWithRoot(topo, "idiot").add() conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) _ModTitleArchitectJeffVedder(topo, "architect", conn).delete() # acl will not allow to delete title idiot with pytest.raises(ldap.INSUFFICIENT_ACCESS): _ModTitleArchitectJeffVedder(topo, "idiot", conn).delete() def test_remove_an_attribute_if_we_have_del_rights_to_all_attr_value( topo, _add_user, aci_of_user, request ): """Testing the targattrfilters keyword that allows access control based on the value of the attributes being added (or deleted)) Test that we can use MODIFY:delete to entirely remove an attribute if we have del rights to all attr values negative case tested next. :id: a0c9e0c4-7aa9-11e8-8880-8c16451d917b :setup: server :steps: 1. Add test entry 2. Add ACI 3. User should follow ACI role :expectedresults: 1. Entry should be added 2. Operation should succeed 3. Operation should succeed """ ACI_BODY = '(targattrfilters = "add=title:(title=engineer), del=title:(|(title=architect)' \ '(title=idiot))")(version 3.0; acl "{}"; allow (write)' \ ' (userdn = "ldap:///{}") ;)'.format(request.node.name, USER_WITH_ACI_DELADD) Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) _AddTitleWithRoot(topo, "architect").add() _AddTitleWithRoot(topo, "idiot").add() # acl will allow to delete title idiot conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) _DelTitleArchitectJeffVedder(topo,conn).delete() def test_remove_an_attribute_if_we_donot_have_del_rights_to_all_attr_value( topo, _add_user, aci_of_user, request ): """Testing the targattrfilters keyword that allows access control based on the value of the attributes being added (or deleted)) Test that we can use MODIFY:delete to entirely remove an attribute if we have not del rights to all attr values :id: a6862eaa-7aa9-11e8-8bf9-8c16451d917b :setup: server :steps: 1. Add test entry 2. Add ACI 3. User should follow ACI role :expectedresults: 1. Entry should be added 2. Operation should succeed 3. Operation should succeed """ ACI_BODY = '(targattrfilters = "add=title:(title=engineer), del=title:(|(title=architect)' \ '(title=idiot))")(version 3.0; acl "{}"; allow (write) ' \ '(userdn = "ldap:///{}") ;)'.format(request.node.name, USER_WITH_ACI_DELADD) Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) _AddTitleWithRoot(topo, "architect").add() _AddTitleWithRoot(topo, "sailor").add() # aci will not allow to delete all titles conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) with pytest.raises(ldap.INSUFFICIENT_ACCESS): _DelTitleArchitectJeffVedder(topo, conn).delete() def test_remove_an_attribute_if_we_have_del_rights_to_all_attr_values( topo, _add_user, aci_of_user, request ): """Testing the targattrfilters keyword that allows access control based on the value of the attributes being added (or deleted)) Test that we can use MODIFY:replace to entirely remove an attribute if we have del rights to all attr values :id: ab04c7e8-7aa9-11e8-84db-8c16451d917b :setup: server :steps: 1. Add test entry 2. Add ACI 3. User should follow ACI role :expectedresults: 1. Entry should be added 2. Operation should succeed 3. Operation should succeed """ ACI_BODY = '(targattrfilters = "add=title:(title=engineer), del=title:(|(title=architect)' \ '(title=idiot))")(version 3.0; acl "{}"; allow (write) ' \ '(userdn = "ldap:///{}") ;)'.format(request.node.name, USER_WITH_ACI_DELADD) Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) _AddTitleWithRoot(topo, "architect").add() _AddTitleWithRoot(topo, "idiot").add() # aci allowing to delete an_attribute_if_we_have_del_rights_to_all_attr_values conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) _DelTitleArchitectJeffVedder(topo, conn).delete() def test_cantnot_delete_an_entry_with_attribute_values_we_are_not_allowed_delete( topo, _add_user, aci_of_user, request ): """Testing the targattrfilters keyword that allows access control based on the value of the attributes being added (or deleted)) Test we cannot DELETE an entry with attribute values we are not allowed delete :id: b525d94c-7aa9-11e8-8539-8c16451d917b :setup: server :steps: 1. Add test entry 2. Add ACI 3. User should follow ACI role :expectedresults: 1. Entry should be added 2. Operation should succeed 3. Operation should succeed """ ACI_BODY = '(targattrfilters = "add=title:(|(title=engineer)(title=cool dude)(title=scum)), ' \ 'del=title:(|(title=engineer)(title=cool dude)(title=scum))")(version 3.0; ' \ 'aci "{}"; allow (delete) userdn = "ldap:///{}";)'.format(request.node.name, USER_WITH_ACI_DELADD) Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) _AddFREDWithRoot(topo, "engineer", "cool dude", "ANuj").create() conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) # aci will not allow to delete with pytest.raises(ldap.INSUFFICIENT_ACCESS): UserAccount(conn, FRED).delete() def test_we_can_add_and_delete_an_entry_with_attribute_values_we_are_allowed_add_and_delete( topo, _add_user, aci_of_user, request ): """Testing the targattrfilters keyword that allows access control based on the value of the attributes being added (or deleted)) Test we can DELETE an entry with attribute values we are allowed delete :id: ba138e54-7aa9-11e8-8037-8c16451d917b :setup: server :steps: 1. Add test entry 2. Add ACI 3. User should follow ACI role :expectedresults: 1. Entry should be added 2. Operation should succeed 3. Operation should succeed """ ACI_BODY = '(targattrfilters = "add=title:(|(title=engineer)(title=cool dude)(title=scum)), ' \ 'del=title:(|(title=engineer)(title=cool dude)(title=scum))")(version 3.0; ' \ 'aci "{}"; allow (delete) userdn = "ldap:///{}";)'.format(request.node.name, USER_WITH_ACI_DELADD) Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) _AddFREDWithRoot(topo, "engineer", "cool dude", "scum").create() conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) # aci will allow to delete UserAccount(conn, FRED).delete() def test_allow_title(topo, _add_user, aci_of_user, request): """Testing the targattrfilters keyword that allows access control based on the value of the attributes being added (or deleted)) Test that if attr appears in targetattr and in targattrfilters then targattrfilters applies--ie. targattrfilters is a refinement of targattrfilters. :id: beadf328-7aa9-11e8-bb08-8c16451d917b :setup: server :steps: 1. Add test entry 2. Add ACI 3. User should follow ACI role :expectedresults: 1. Entry should be added 2. Operation should succeed 3. Operation should succeed """ ACI_BODY = '(targetattr="title")(targattrfilters = "add=title:(|(title=engineer)' \ '(title=cool dude)(title=scum)), del=title:(|(title=engineer)(title=cool dude)' \ '(title=scum))")(version 3.0; aci "{}"; allow (write) ' \ 'userdn = "ldap:///{}";)'.format(request.node.name, USER_WITH_ACI_DELADD) Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) _AddTitleWithRoot(topo, "engineer").add() _AddTitleWithRoot(topo, "cool dude").add() # # aci will not allow to add title topdog conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) with pytest.raises(ldap.INSUFFICIENT_ACCESS): _ModTitleArchitectJeffVedder(topo, "topdog", conn).add() def test_allow_to_modify(topo, _add_user, aci_of_user, request): """Testing the targattrfilters keyword that allows access control based on the value of the attributes being added (or deleted)) Test that I can have secretary in targetattr and title in targattrfilters. :id: c32e4704-7aa9-11e8-951d-8c16451d917b :setup: server :steps: 1. Add test entry 2. Add ACI 3. User should follow ACI role :expectedresults: 1. Entry should be added 2. Operation should succeed 3. Operation should succeed """ ACI_BODY = '(targetattr="secretary")(targattrfilters = "add=title:(|(title=engineer)' \ '(title=cool dude)(title=scum)), del=title:(|(title=engineer)(title=cool dude)' \ '(title=scum))")(version 3.0; aci "{}"; allow (write)' \ ' userdn = "ldap:///{}";)'.format(request.node.name, USER_WITH_ACI_DELADD) Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) _AddTitleWithRoot(topo, "engineer").add() _AddTitleWithRoot(topo, "cool dude").add() conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) user = UserAccount(conn, USER_DELADD) # aci will allow to add 'secretary', "cn=emporte quoi user.add('secretary', "cn=emporte quoi, {}".format(DEFAULT_SUFFIX)) assert user.get_attr_val('secretary') def test_selfwrite_does_not_confer_write_on_a_targattrfilters_atribute(topo, _add_user, aci_of_user, request): """Testing the targattrfilters keyword that allows access control based on the value of the attributes being added (or deleted)) Selfwrite does not confer "write" on a targattrfilters atribute. :id: c7b9ec2e-7aa9-11e8-ba4a-8c16451d917b :setup: server :steps: 1. Add test entry 2. Add ACI 3. User should follow ACI role :expectedresults: 1. Entry should be added 2. Operation should succeed 3. Operation should succeed """ ACI_BODY = '(targattrfilters = "add=title:(|(title=engineer)(title=cool dude)(title=scum)), ' \ 'del=title:(|(title=engineer)(title=cool dude)(title=scum))")(version 3.0; ' \ 'aci "{}"; allow (selfwrite) userdn = "ldap:///{}";)'.format(request.node.name, USER_WITH_ACI_DELADD) Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) # aci will not allow to add selfwrite_does_not_confer_write_on_a_targattrfilters_atribute conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) with pytest.raises(ldap.INSUFFICIENT_ACCESS): _ModTitleArchitectJeffVedder(topo, "engineer", conn).add() def test_selfwrite_continues_to_give_rights_to_attr_in_targetattr_list( topo, _add_user, aci_of_user, request ): """Testing the targattrfilters keyword that allows access control based on the value of the attributes being added (or deleted)) Selfwrite continues to give rights to attr in targetattr list. :id: cd287680-7aa9-11e8-a8e2-8c16451d917b :setup: server :steps: 1. Add test entry 2. Add ACI 3. User should follow ACI role :expectedresults: 1. Entry should be added 2. Operation should succeed 3. Operation should succeed """ ACI_BODY = '(targetattr="secretary")(targattrfilters = "add=title:(|(title=engineer)' \ '(title=cool dude)(title=scum)), del=title:(|(title=engineer)(title=cool dude)' \ '(title=scum))")(version 3.0; aci "{}"; allow (selfwrite) ' \ 'userdn = "ldap:///{}";)'.format(request.node.name, USER_WITH_ACI_DELADD) Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) # selfwrite_continues_to_give_rights_to_attr_in_targetattr_list conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) with pytest.raises(ldap.INSUFFICIENT_ACCESS): _ModTitleArchitectJeffVedder(topo, "engineer", conn).add() def test_add_an_attribute_value_we_are_allowed_to_add_with_ldapanyone( topo, _add_user, aci_of_user, request ): """Testing the targattrfilters keyword that allows access control based on the value of the attributes being added (or deleted)) Test that we can MODIFY:add an attribute value we are allowed to add with ldap:///anyone :id: d1e1d7ac-7aa9-11e8-b968-8c16451d917b :setup: server :steps: 1. Add test entry 2. Add ACI 3. User should follow ACI role :expectedresults: 1. Entry should be added 2. Operation should succeed 3. Operation should succeed """ ACI_BODY = '(targattrfilters = "add=title:(title=architect), del=title:(title=architect)")' \ '(version 3.0; acl "{}"; allow (write) userdn = "ldap:///anyone";)'.format(request.node.name) Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) _AddTitleWithRoot(topo, "engineer").add() # aci will allow to add title architect conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) _ModTitleArchitectJeffVedder(topo, "architect", conn).add() def test_hierarchy(topo, _add_user, aci_of_user, request): """Testing the targattrfilters keyword that allows access control based on the value of the attributes being added (or deleted)) Test that with two targattrfilters in the hierarchy that the general one applies. This is the correct behaviour, even if it's a bit confusing :id: d7ae354a-7aa9-11e8-8b0d-8c16451d917b :setup: server :steps: 1. Add test entry 2. Add ACI 3. User should follow ACI role :expectedresults: 1. Entry should be added 2. Operation should succeed 3. Operation should succeed """ ACI_BODY = '(targattrfilters = "add=title:(title=arch*)")(version 3.0; acl "{}"; ' \ 'allow (write) (userdn = "ldap:///anyone") ;)'.format(request.node.name) ACI_BODY1 = '(targattrfilters = "add=title:(title=architect)")(version 3.0; ' \ 'acl "{}"; allow (write) (userdn = "ldap:///anyone") ;)'.format(request.node.name) Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY1) _AddTitleWithRoot(topo, "engineer").add() # aci will allow to add title architect conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) _ModTitleArchitectJeffVedder(topo, "architect", conn).add() # aci will not allow to add title architect with pytest.raises(ldap.INSUFFICIENT_ACCESS): _ModTitleArchitectJeffVedder(topo, "engineer", conn).add() def test_targattrfilters_and_search_permissions_and_that_ldapmodify_works_as_expected( topo, _add_user, aci_of_user, request ): """Testing the targattrfilters keyword that allows access control based on the value of the attributes being added (or deleted)) Test that we can have targattrfilters and search permissions and that ldapmodify works as expected. :id: ddae7a22-7aa9-11e8-ad6b-8c16451d917b :setup: server :steps: 1. Add test entry 2. Add ACI 3. User should follow ACI role :expectedresults: 1. Entry should be added 2. Operation should succeed 3. Operation should succeed """ ACI_BODY = '(targetattr="secretary || objectclass || mail")(targattrfilters = "add=title:' \ '(title=arch*)")(version 3.0; acl "{}"; ' \ 'allow (write,read,search,compare) (userdn = "ldap:///anyone") ;)'.format(request.node.name) Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) # aci will allow to add title architect conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) _ModTitleArchitectJeffVedder(topo, "architect", conn).add() def test_targattrfilters_and_search_permissions_and_that_ldapmodify_works_as_expected_two( topo, _add_user, aci_of_user, request ): """Testing the targattrfilters keyword that allows access control based on the value of the attributes being added (or deleted)) Test that we can have targattrfilters and search permissions and that ldapsearch works as expected. :id: e25d116e-7aa9-11e8-81d8-8c16451d917b :setup: server :steps: 1. Add test entry 2. Add ACI 3. User should follow ACI role :expectedresults: 1. Entry should be added 2. Operation should succeed 3. Operation should succeed """ ACI_BODY = '(targetattr="secretary || objectclass || mail")(targattrfilters = ' \ '"add=title:(title=arch*)")(version 3.0; acl "{}"; allow ' \ '(write,read,search,compare) (userdn = "ldap:///anyone") ;)'.format(request.node.name) Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) conn = Anonymous(topo.standalone).bind() user = UserAccount(conn, USER_DELADD) #targattrfilters_and_search_permissions_and_that_ldapmodify_works_as_expected assert user.get_attr_vals('secretary') assert user.get_attr_vals('mail') assert user.get_attr_vals('objectclass') if __name__ == "__main__": CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s -v %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/attr_encryption/000077500000000000000000000000001421664411400255745ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/attr_encryption/__init__.py000066400000000000000000000000731421664411400277050ustar00rootroot00000000000000""" :Requirement: 389-ds-base: Attribute Encryption """ 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/attr_encryption/attr_encryption_test.py000066400000000000000000000465321421664411400324430ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2020 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import logging import pytest from lib389.tasks import * from lib389.topologies import topology_st as topo from lib389.utils import * from lib389._constants import DEFAULT_SUFFIX from lib389.idm.user import UserAccounts, TEST_USER_PROPERTIES from lib389.backend import Backends from lib389.idm.domain import Domain from lib389.encrypted_attributes import EncryptedAttrs pytestmark = pytest.mark.tier1 USER_DN = 'uid=test_user,%s' % DEFAULT_SUFFIX logging.getLogger(__name__).setLevel(logging.INFO) log = logging.getLogger(__name__) @pytest.fixture(scope="module") def enable_user_attr_encryption(topo, request): """ Enables attribute encryption for various attributes Adds a test user with encrypted attributes """ log.info("Enable TLS for attribute encryption") topo.standalone.enable_tls() log.info("Enables attribute encryption") backends = Backends(topo.standalone) backend = backends.list()[0] encrypt_attrs = EncryptedAttrs(topo.standalone, basedn='cn=encrypted attributes,{}'.format(backend.dn)) log.info("Enables attribute encryption for employeeNumber and telephoneNumber") emp_num_encrypt = encrypt_attrs.create(properties={'cn': 'employeeNumber', 'nsEncryptionAlgorithm': 'AES'}) telephone_encrypt = encrypt_attrs.create(properties={'cn': 'telephoneNumber', 'nsEncryptionAlgorithm': '3DES'}) log.info("Add a test user with encrypted attributes") users = UserAccounts(topo.standalone, DEFAULT_SUFFIX) test_user = users.create(properties=TEST_USER_PROPERTIES) test_user.replace('employeeNumber', '1000') test_user.replace('telephoneNumber', '1234567890') def fin(): log.info("Remove attribute encryption for various attributes") emp_num_encrypt.delete() telephone_encrypt.delete() request.addfinalizer(fin) return test_user def test_basic(topo, enable_user_attr_encryption): """Tests encrypted attributes with a test user entry :id: d767d5c8-b934-4b14-9774-bd13480d81b3 :setup: Standalone instance Enable AES encryption config on employeenumber Enable 3DES encryption config on telephoneNumber Add a test user with with encrypted attributes :steps: 1. Restart the server 2. Check employeenumber encryption enabled 3. Check telephoneNumber encryption enabled 4. Check that encrypted attribute is present for user i.e. telephoneNumber :expectedresults: 1. This should be successful 2. This should be successful 3. This should be successful 4. This should be successful """ log.info("Restart the server") topo.standalone.restart() backends = Backends(topo.standalone) backend = backends.list()[0] encrypt_attrs = backend.get_encrypted_attrs() log.info("Extracting values of cn from the list of objects in encrypt_attrs") log.info("And appending the cn values in a list") enc_attrs_cns = [] for enc_attr in encrypt_attrs: enc_attrs_cns.append(enc_attr.rdn) log.info("Check employeenumber encryption is enabled") assert "employeeNumber" in enc_attrs_cns log.info("Check telephoneNumber encryption is enabled") assert "telephoneNumber" in enc_attrs_cns log.info("Check that encrypted attribute is present for user i.e. telephoneNumber") assert enable_user_attr_encryption.present('telephoneNumber') def test_export_import_ciphertext(topo, enable_user_attr_encryption): """Configure attribute encryption, store some data, check that we can export the ciphertext :id: b433e215-2926-48a5-818f-c21abc40fc2d :setup: Standalone instance Enable AES encryption config on employeenumber Enable 3DES encryption config on telephoneNumber Add a test user with encrypted attributes :steps: 1. Export data as ciphertext 2. Check that the attribute is present in the exported file 3. Check that the encrypted value of attribute is not present in the exported file 4. Delete the test user entry with encrypted data 5. Import the previously exported data as ciphertext 6. Check attribute telephoneNumber should be imported :expectedresults: 1. This should be successful 2. This should be successful 3. This should be successful 4. This should be successful 5. This should be successful 6. This should be successful """ log.info("Export data as ciphertext") export_ldif = os.path.join(topo.standalone.ds_paths.ldif_dir, "export_ciphertext.ldif") # Offline export topo.standalone.stop() if not topo.standalone.db2ldif(bename=DEFAULT_BENAME, suffixes=(DEFAULT_SUFFIX,), excludeSuffixes=None, encrypt=False, repl_data=None, outputfile=export_ldif): log.fatal('Failed to run offline db2ldif') assert False topo.standalone.start() log.info("Check that the attribute is present in the exported file") log.info("Check that the encrypted value of attribute is not present in the exported file") with open(export_ldif, 'r') as ldif_file: ldif = ldif_file.read() assert 'telephoneNumber' in ldif assert 'telephoneNumber: 1234567890' not in ldif log.info("Delete the test user entry with encrypted data") enable_user_attr_encryption.delete() log.info("Import data as ciphertext, which was exported previously") import_ldif = os.path.join(topo.standalone.ds_paths.ldif_dir, "export_ciphertext.ldif") # Offline export topo.standalone.stop() if not topo.standalone.ldif2db(bename=DEFAULT_BENAME, suffixes=(DEFAULT_SUFFIX,), excludeSuffixes=None, encrypt=False, import_file=import_ldif): log.fatal('Failed to run offline ldif2db') assert False topo.standalone.start() log.info("Check that the data with encrypted attribute is imported properly") users = UserAccounts(topo.standalone, DEFAULT_SUFFIX) user = users.get('testuser') assert user.present("telephoneNumber") def test_export_import_plaintext(topo, enable_user_attr_encryption): """Configure attribute encryption, store some data, check that we can export the plain text :id: b171e215-0456-48a5-245f-c21abc40fc2d :setup: Standalone instance Enable AES encryption config on employeenumber Enable 3DES encryption config on telephoneNumber Add a test user with encrypted attributes :steps: 1. Export data as plain text 2. Check that the attribute is present in the exported file 3. Check that the encrypted value of attribute is also present in the exported file 4. Delete the test user entry with encrypted data 5. Import data as plaintext 6. Check attribute value of telephoneNumber :expectedresults: 1. This should be successful 2. This should be successful 3. This should be successful 4. This should be successful 5. This should be successful 6. This should be successful """ log.info("Export data as plain text") export_ldif = os.path.join(topo.standalone.ds_paths.ldif_dir, "export_plaintext.ldif") # Offline export topo.standalone.stop() if not topo.standalone.db2ldif(bename=DEFAULT_BENAME, suffixes=(DEFAULT_SUFFIX,), excludeSuffixes=None, encrypt=True, repl_data=None, outputfile=export_ldif): log.fatal('Failed to run offline db2ldif') assert False topo.standalone.start() log.info("Check that the attribute is present in the exported file") log.info("Check that the plain text value of the encrypted attribute is present in the exported file") with open(export_ldif, 'r') as ldif_file: assert 'telephoneNumber: 1234567890' in ldif_file.read() log.info("Delete the test user entry with encrypted data") enable_user_attr_encryption.delete() log.info("Import data as plain text, which was exported previously") import_ldif = os.path.join(topo.standalone.ds_paths.ldif_dir, "export_plaintext.ldif") # Offline export topo.standalone.stop() if not topo.standalone.ldif2db(bename=DEFAULT_BENAME, suffixes=(DEFAULT_SUFFIX,), excludeSuffixes=None, encrypt=True, import_file=import_ldif): log.fatal('Failed to run offline ldif2db') assert False topo.standalone.start() log.info("Check that the attribute is imported properly") users = UserAccounts(topo.standalone, DEFAULT_SUFFIX) user = users.get('testuser') assert user.present("telephoneNumber") def test_attr_encryption_unindexed(topo, enable_user_attr_encryption): """Configure attribute encryption for an un-indexed attribute, check that we can export encrypted data :id: d3ef38e1-bb5a-44d8-a3a4-4a25a57e3454 :setup: Standalone instance Enable AES encryption config on employeenumber Enable 3DES encryption config on telephoneNumber Add a test user with encrypted attributes :steps: 1. Export data as cipher text 2. Check that the unindexed attribute employeenumber is present in exported ldif file 3. Check that the unindexed attribute employeenumber value is not present in exported ldif file :expectedresults: 1. This should be successful 2. This should be successful 3. This should be successful """ log.info("Export data as cipher text") export_ldif = os.path.join(topo.standalone.ds_paths.ldif_dir, "emp_num_ciphertext.ldif") # Offline export topo.standalone.stop() if not topo.standalone.db2ldif(bename=DEFAULT_BENAME, suffixes=(DEFAULT_SUFFIX,), excludeSuffixes=None, encrypt=False, repl_data=None, outputfile=export_ldif): log.fatal('Failed to run offline db2ldif') assert False topo.standalone.start() log.info("Check that the attribute is present in the exported file") log.info("Check that the encrypted value of attribute is not present in the exported file") with open(export_ldif, 'r') as ldif_file: ldif = ldif_file.read() assert 'employeeNumber' in ldif assert 'employeeNumber: 1000' not in ldif def test_attr_encryption_multiple_backends(topo, enable_user_attr_encryption): """Tests Configuration of attribute encryption for multiple backends Where both the backends have attribute encryption :id: 9ece3e6c-96b7-4dd5-b092-d76dda23472d :setup: Standalone instance SSL Enabled :steps: 1. Add two test backends 2. Configure attribute encryption for telephoneNumber in one test backend 3. Configure attribute encryption for employeenumber in another test backend 4. Add a test user in both backends with encrypted attributes 5. Export data as ciphertext from both backends 6. Check that telephoneNumber is encrypted in the ldif file of db1 7. Check that employeeNumber is encrypted in the ldif file of db2 8. Delete both test backends :expectedresults: 1. This should be successful 2. This should be successful 3. This should be successful 4. This should be successful 5. This should be successful 6. This should be successful 7. This should be successful 8. This should be successful """ log.info("Add two test backends") test_suffix1 = 'dc=test1,dc=com' test_db1 = 'test_db1' test_suffix2 = 'dc=test2,dc=com' test_db2 = 'test_db2' # Create backends backends = Backends(topo.standalone) backend = backends.list()[0] test_backend1 = backends.create(properties={'cn': test_db1, 'nsslapd-suffix': test_suffix1}) test_backend2 = backends.create(properties={'cn': test_db2, 'nsslapd-suffix': test_suffix2}) # Create the top of the tree suffix1 = Domain(topo.standalone, test_suffix1) test1 = suffix1.create(properties={'dc': 'test1'}) suffix2 = Domain(topo.standalone, test_suffix2) test2 = suffix2.create(properties={'dc': 'test2'}) log.info("Enables attribute encryption for telephoneNumber in test_backend1") backend1_encrypt_attrs = EncryptedAttrs(topo.standalone, basedn='cn=encrypted attributes,{}'.format(test_backend1.dn)) b1_encrypt = backend1_encrypt_attrs.create(properties={'cn': 'telephoneNumber', 'nsEncryptionAlgorithm': 'AES'}) log.info("Enables attribute encryption for employeeNumber in test_backend2") backend2_encrypt_attrs = EncryptedAttrs(topo.standalone, basedn='cn=encrypted attributes,{}'.format(test_backend2.dn)) b2_encrypt = backend2_encrypt_attrs.create(properties={'cn': 'employeeNumber', 'nsEncryptionAlgorithm': 'AES'}) log.info("Add a test user with encrypted attributes in both backends") users = UserAccounts(topo.standalone, test1.dn, None) test_user = users.create(properties=TEST_USER_PROPERTIES) test_user.replace('telephoneNumber', '1234567890') users = UserAccounts(topo.standalone, test2.dn, None) test_user = users.create(properties=TEST_USER_PROPERTIES) test_user.replace('employeeNumber', '1000') log.info("Export data as ciphertext from both backends") export_db1 = os.path.join(topo.standalone.ds_paths.ldif_dir, "export_db1.ldif") export_db2 = os.path.join(topo.standalone.ds_paths.ldif_dir, "export_db2.ldif") # Offline export topo.standalone.stop() if not topo.standalone.db2ldif(bename=test_db1, suffixes=(test_suffix1,), excludeSuffixes=None, encrypt=False, repl_data=None, outputfile=export_db1): log.fatal('Failed to run offline db2ldif') assert False if not topo.standalone.db2ldif(bename=test_db2, suffixes=(test_suffix2,), excludeSuffixes=None, encrypt=False, repl_data=None, outputfile=export_db2): log.fatal('Failed to run offline db2ldif') assert False topo.standalone.start() log.info("Check that the attribute is present in the exported file in db1") log.info("Check that the encrypted value of attribute is not present in the exported file in db1") with open(export_db1, 'r') as ldif_file: ldif = ldif_file.read() assert 'telephoneNumber' in ldif assert 'telephoneNumber: 1234567890' not in ldif log.info("Check that the attribute is present in the exported file in db2") log.info("Check that the encrypted value of attribute is not present in the exported file in db2") with open(export_db2, 'r') as ldif_file: ldif = ldif_file.read() assert 'employeeNumber' in ldif assert 'employeeNumber: 1000' not in ldif log.info("Delete test backends") test_backend1.delete() test_backend2.delete() def test_attr_encryption_backends(topo, enable_user_attr_encryption): """Tests Configuration of attribute encryption for single backend where more backends are present :id: f3ef40e1-17d6-44d8-a3a4-4a25a57e9064 :setup: Standalone instance SSL Enabled :steps: 1. Add two test backends 2. Configure attribute encryption for telephoneNumber in one test backend 3. Add a test user in both backends with telephoneNumber 4. Export ldif from both test backends 5. Check that telephoneNumber is encrypted in the ldif file of db1 6. Check that telephoneNumber is not encrypted in the ldif file of db2 7. Delete both test backends :expectedresults: 1. This should be successful 2. This should be successful 3. This should be successful 4. This should be successful 5. This should be successful 6. This should be successful 7. This should be successful """ log.info("Add two test backends") test_suffix1 = 'dc=test1,dc=com' test_db1 = 'test_db1' test_suffix2 = 'dc=test2,dc=com' test_db2 = 'test_db2' # Create backends backends = Backends(topo.standalone) test_backend1 = backends.create(properties={'cn': test_db1, 'nsslapd-suffix': test_suffix1}) test_backend2 = backends.create(properties={'cn': test_db2, 'nsslapd-suffix': test_suffix2}) # Create the top of the tree suffix1 = Domain(topo.standalone, test_suffix1) test1 = suffix1.create(properties={'dc': 'test1'}) suffix2 = Domain(topo.standalone, test_suffix2) test2 = suffix2.create(properties={'dc': 'test2'}) log.info("Enables attribute encryption for telephoneNumber in test_backend1") backend1_encrypt_attrs = EncryptedAttrs(topo.standalone, basedn='cn=encrypted attributes,{}'.format(test_backend1.dn)) b1_encrypt = backend1_encrypt_attrs.create(properties={'cn': 'telephoneNumber', 'nsEncryptionAlgorithm': 'AES'}) log.info("Add a test user with telephoneNumber in both backends") users = UserAccounts(topo.standalone, test1.dn, None) test_user = users.create(properties=TEST_USER_PROPERTIES) test_user.replace('telephoneNumber', '1234567890') users = UserAccounts(topo.standalone, test2.dn, None) test_user = users.create(properties=TEST_USER_PROPERTIES) test_user.replace('telephoneNumber', '1234567890') log.info("Export data as ciphertext from both backends") export_db1 = os.path.join(topo.standalone.ds_paths.ldif_dir, "export_db1.ldif") export_db2 = os.path.join(topo.standalone.ds_paths.ldif_dir, "export_db2.ldif") # Offline export topo.standalone.stop() if not topo.standalone.db2ldif(bename=test_db1, suffixes=(test_suffix1,), excludeSuffixes=None, encrypt=False, repl_data=None, outputfile=export_db1): log.fatal('Failed to run offline db2ldif') assert False if not topo.standalone.db2ldif(bename=test_db2, suffixes=(test_suffix2,), excludeSuffixes=None, encrypt=False, repl_data=None, outputfile=export_db2): log.fatal('Failed to run offline db2ldif') assert False topo.standalone.start() log.info("Check that the attribute is present in the exported file in db1") log.info("Check that the encrypted value of attribute is not present in the exported file in db1") with open(export_db1, 'r') as ldif_file: ldif = ldif_file.read() assert 'telephoneNumber' in ldif assert 'telephoneNumber: 1234567890' not in ldif log.info("Check that the attribute is present in the exported file in db2") log.info("Check that the value of attribute is also present in the exported file in db2") with open(export_db2, 'r') as ldif_file: ldif = ldif_file.read() assert 'telephoneNumber' in ldif assert 'telephoneNumber: 1234567890' in ldif log.info("Delete test backends") test_backend1.delete() test_backend2.delete() if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/auth_token/000077500000000000000000000000001421664411400245115ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/auth_token/__init__.py000066400000000000000000000000731421664411400266220ustar00rootroot00000000000000""" :Requirement: 389-ds-base: Authentication Token """ 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/auth_token/basic_auth_test.py000066400000000000000000000214001421664411400302210ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2019 William Brown # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- import ldap import pytest import time from lib389.idm.user import nsUserAccounts, UserAccounts from lib389.topologies import topology_st as topology from lib389.paths import Paths from lib389.utils import ds_is_older from lib389._constants import * from lib389.idm.directorymanager import DirectoryManager from lib389.idm.account import Anonymous from lib389.extended_operations import LdapSSOTokenRequest default_paths = Paths() pytestmark = pytest.mark.tier1 USER_PASSWORD = "password aouoaeu" TEST_KEY = "4PXhmtKG7iCdT9C49GoBdD92x5X1tvF3eW9bHq4ND2Q=" @pytest.mark.skipif(not default_paths.rust_enabled or ds_is_older('1.4.3.3'), reason="Auth tokens are not available in older versions") def test_ldap_auth_token_config(topology): """ Test that we are able to configure the ldapssotoken backend with various types and states. :id: e9b9360b-76df-40ef-9f45-b448df4c9eda :setup: Standalone instance :steps: 1. Enable the feature 2. Set a key manually. 3. Regerate a key server side. 4. Attempt to set invalid keys. 5. Disable the feature 6. Assert that key changes are rejected :expectedresults: 1. Feature enables 2. Key is set and accepted 3. The key is regenerated and unique 4. The key is rejected 5. The disable functions online 6. The key changes are rejected """ # Enable token topology.standalone.config.set('nsslapd-enable-ldapssotoken', 'on') # enable it. # Set a key topology.standalone.config.set('nsslapd-ldapssotoken-secret', TEST_KEY) # regen a key topology.standalone.config.remove_all('nsslapd-ldapssotoken-secret') k1 = topology.standalone.config.get_attr_val_utf8('nsslapd-ldapssotoken-secret') assert(k1 != TEST_KEY) # set an invalid key with pytest.raises(ldap.UNWILLING_TO_PERFORM): topology.standalone.config.set('nsslapd-ldapssotoken-secret', 'invalid key') with pytest.raises(ldap.UNWILLING_TO_PERFORM): topology.standalone.config.set('nsslapd-ldapssotoken-secret', '') # Disable token topology.standalone.config.set('nsslapd-enable-ldapssotoken', 'off') # disable it. # Set a key with pytest.raises(ldap.OPERATIONS_ERROR): topology.standalone.config.set('nsslapd-ldapssotoken-secret', TEST_KEY) # regen a key with pytest.raises(ldap.OPERATIONS_ERROR): topology.standalone.config.remove_all('nsslapd-ldapssotoken-secret') @pytest.mark.skipif(not default_paths.rust_enabled or ds_is_older('1.4.3.3'), reason="Auth tokens are not available in older versions") def test_ldap_auth_token_nsuser(topology): """ Test that we can generate and authenticate with authentication tokens for users in the directory, as well as security properties around these tokens. :id: 65335341-c85b-457d-ac7d-c4079ac90a60 :setup: Standalone instance :steps: 1. Create an account 2. Generate a token for the account 3. Authenticate with the token 4. Assert that a token can not be issued from a token-authed account 5. Regenerate the server key 6. Assert the token no longer authenticates :expectedresults: 1. Account is created 2. Token is generated 3. Token authenticates 4. Token is NOT issued 5. The key is regenerated 6. The token fails to bind. """ topology.standalone.enable_tls() topology.standalone.config.set('nsslapd-enable-ldapssotoken', 'on') # enable it. nsusers = nsUserAccounts(topology.standalone, DEFAULT_SUFFIX) # Create a user as dm. user = nsusers.create(properties={ 'uid': 'test_nsuser', 'cn': 'test_nsuser', 'displayName': 'testNsuser', 'legalName': 'testNsuser', 'uidNumber': '1001', 'gidNumber': '1001', 'homeDirectory': '/home/testnsuser', 'userPassword': USER_PASSWORD, }) # Create a new con and bind as the user. user_conn = user.bind(USER_PASSWORD) user_account = nsUserAccounts(user_conn, DEFAULT_SUFFIX).get('test_nsuser') # From the user_conn do an extop_s for the token token = user_account.request_sso_token() # Great! Now do a bind where the token is the pw: # user_conn_tok = user.bind(token) user_conn_tok = user.authenticate_sso_token(token) # Assert whoami. # Assert that user_conn_tok with the token can NOT get a new token. user_tok_account = nsUserAccounts(user_conn_tok, DEFAULT_SUFFIX).get('test_nsuser') with pytest.raises(ldap.UNWILLING_TO_PERFORM): user_tok_account.request_sso_token() # Check with a lowered ttl (should deny) topology.standalone.config.set('nsslapd-ldapssotoken-ttl-secs', '1') # Set a low ttl # Ensure it's past - the one time I'll allow a sleep .... time.sleep(2) with pytest.raises(ldap.INVALID_CREDENTIALS): user.authenticate_sso_token(token) topology.standalone.config.set('nsslapd-ldapssotoken-ttl-secs', '3600') # Set a reasonable # Regenerate the server token key topology.standalone.config.remove_all('nsslapd-ldapssotoken-secret') # check we fail to authenticate. with pytest.raises(ldap.INVALID_CREDENTIALS): user.authenticate_sso_token(token) @pytest.mark.skipif(not default_paths.rust_enabled or ds_is_older('1.4.3.3'), reason="Auth tokens are not available in older versions") def test_ldap_auth_token_disabled(topology): """ Assert when the feature is disabled that token operations are not able to progress :id: ccde5d0b-7f2d-49d5-b9d5-f7082f8f36a3 :setup: Standalone instance :steps: 1. Create a user 2. Attempt to get a token. 3. Enable the feature, get a token, then disable it. 4. Attempt to auth :expectedresults: 1. Success 2. Fails to get a token 3. Token is received 4. Auth fails as token is disabled. """ topology.standalone.enable_tls() topology.standalone.config.set('nsslapd-enable-ldapssotoken', 'off') # disable it. nsusers = nsUserAccounts(topology.standalone, DEFAULT_SUFFIX) # Create a user as dm. user = nsusers.create(properties={ 'uid': 'test_nsuser1', 'cn': 'test_nsuser1', 'displayName': 'testNsuser1', 'legalName': 'testNsuser1', 'uidNumber': '1002', 'gidNumber': '1002', 'homeDirectory': '/home/testnsuser1', 'userPassword': USER_PASSWORD, }) # Create a new con and bind as the user. user_conn = user.bind(USER_PASSWORD) user_account = nsUserAccounts(user_conn, DEFAULT_SUFFIX).get('test_nsuser1') # From the user_conn do an extop_s for the token with pytest.raises(ldap.PROTOCOL_ERROR): user_account.request_sso_token() # Now enable it topology.standalone.config.set('nsslapd-enable-ldapssotoken', 'on') token = user_account.request_sso_token() # Now disable topology.standalone.config.set('nsslapd-enable-ldapssotoken', 'off') # Now attempt to bind (should fail) with pytest.raises(ldap.INVALID_CREDENTIALS): user_account.authenticate_sso_token(token) @pytest.mark.skipif(not default_paths.rust_enabled or ds_is_older('1.4.3.3'), reason="Auth tokens are not available in older versions") def test_ldap_auth_token_directory_manager(topology): """ Test token auth with directory manager is denied :id: ec9aec64-3edf-4f3f-853a-7527b0c42124 :setup: Standalone instance :steps: 1. Attempt to generate a token as DM :expectedresults: 1. Fails """ topology.standalone.enable_tls() topology.standalone.config.set('nsslapd-enable-ldapssotoken', 'on') # enable it. dm = DirectoryManager(topology.standalone) # Try getting a token at DM, should fail. with pytest.raises(ldap.UNWILLING_TO_PERFORM): dm.request_sso_token() ## test as anon (will fail) @pytest.mark.skipif(not default_paths.rust_enabled or ds_is_older('1.4.3.3'), reason="Auth tokens are not available in older versions") def test_ldap_auth_token_anonymous(topology): """ Test token auth with Anonymous is denied. :id: 966068c3-fbc6-468d-a554-18d68d1d895b :setup: Standalone instance :steps: 1. Attempt to generate a token as Anonymous :expectedresults: 1. Fails """ topology.standalone.enable_tls() topology.standalone.config.set('nsslapd-enable-ldapssotoken', 'on') # enable it. anon_conn = Anonymous(topology.standalone).bind() # Build the request req = LdapSSOTokenRequest() # Get the response with pytest.raises(ldap.UNWILLING_TO_PERFORM): (_, res) = anon_conn.extop_s(req, escapehatch='i am sure') 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/automember_plugin/000077500000000000000000000000001421664411400260665ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/automember_plugin/__init__.py000066400000000000000000000000621421664411400301750ustar00rootroot00000000000000""" :Requirement: 389-ds-base: Auto Member """ 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/automember_plugin/automember_abort_test.py000066400000000000000000000053571421664411400330400ustar00rootroot00000000000000import logging import pytest import os import time from lib389._constants import DEFAULT_SUFFIX from lib389.plugins import AutoMembershipPlugin, AutoMembershipDefinitions from lib389.idm.user import UserAccounts from lib389.idm.group import Groups from lib389.topologies import topology_st as topo log = logging.getLogger(__name__) @pytest.fixture(scope="module") def automember_fixture(topo, request): # Create group group_obj = Groups(topo.standalone, DEFAULT_SUFFIX) automem_group = group_obj.create(properties={'cn': 'testgroup'}) # Create users users = UserAccounts(topo.standalone, DEFAULT_SUFFIX, rdn=None) NUM_USERS = 1000 for num in range(NUM_USERS): num_ran = int(round(num)) USER_NAME = 'test%05d' % num_ran users.create(properties={ 'uid': USER_NAME, 'sn': USER_NAME, 'cn': USER_NAME, 'uidNumber': '%s' % num_ran, 'gidNumber': '%s' % num_ran, 'homeDirectory': '/home/%s' % USER_NAME, 'mail': '%s@redhat.com' % USER_NAME, 'userpassword': 'pass%s' % num_ran, }) # Create automember definitions and regex rules automember_prop = { 'cn': 'testgroup_definition', 'autoMemberScope': DEFAULT_SUFFIX, 'autoMemberFilter': 'objectclass=posixaccount', 'autoMemberDefaultGroup': automem_group.dn, 'autoMemberGroupingAttr': 'member:dn', } automembers = AutoMembershipDefinitions(topo.standalone) auto_def = automembers.create(properties=automember_prop) auto_def.add_regex_rule("regex1", automem_group.dn, include_regex=['uid=.*']) # Enable plugin automemberplugin = AutoMembershipPlugin(topo.standalone) automemberplugin.enable() topo.standalone.restart() def test_abort(automember_fixture, topo): """Test the abort rebuild task :id: 24763279-48ec-4c34-91b3-f681679dec3a :setup: Standalone Instance :steps: 1. Setup automember and create a bunch of users 2. Start rebuild task 3. Abort rebuild task 4. Verify rebuild task was aborted :expectedresults: 1. Success 2. Success 3. Success 4. Success """ automemberplugin = AutoMembershipPlugin(topo.standalone) # Run rebuild task task = automemberplugin.fixup(DEFAULT_SUFFIX, "objectclass=top") time.sleep(1) # Abort rebuild task automemberplugin.abort_fixup() # Wait for rebuild task to finish task.wait() # Check errors log for abort message assert topo.standalone.searchErrorsLog("task was intentionally aborted") if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main(["-s", CURRENT_FILE]) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/automember_plugin/automember_mod_test.py000066400000000000000000000104441421664411400325010ustar00rootroot00000000000000import logging import pytest import os from lib389.utils import ds_is_older from lib389._constants import * from lib389.plugins import AutoMembershipPlugin, AutoMembershipDefinitions from lib389.idm.user import UserAccounts from lib389.idm.group import Groups from lib389.topologies import topology_st as topo # Skip on older versions pytestmark = [pytest.mark.tier1, pytest.mark.skipif(ds_is_older('1.4.0'), reason="Not implemented")] DEBUGGING = os.getenv("DEBUGGING", default=False) if DEBUGGING: logging.getLogger(__name__).setLevel(logging.DEBUG) else: logging.getLogger(__name__).setLevel(logging.INFO) log = logging.getLogger(__name__) @pytest.fixture(scope="module") def automember_fixture(topo, request): # Create group groups = [] group_obj = Groups(topo.standalone, DEFAULT_SUFFIX) groups.append(group_obj.create(properties={'cn': 'testgroup'})) groups.append(group_obj.create(properties={'cn': 'testgroup2'})) groups.append(group_obj.create(properties={'cn': 'testgroup3'})) # Create test user user_accts = UserAccounts(topo.standalone, DEFAULT_SUFFIX) user = user_accts.create_test_user() # Create automember definitions and regex rules automember_prop = { 'cn': 'testgroup_definition', 'autoMemberScope': DEFAULT_SUFFIX, 'autoMemberFilter': 'objectclass=posixaccount', 'autoMemberDefaultGroup': groups[0].dn, 'autoMemberGroupingAttr': 'member:dn', } automembers = AutoMembershipDefinitions(topo.standalone) auto_def = automembers.create(properties=automember_prop) auto_def.add_regex_rule("regex1", groups[1].dn, include_regex=['cn=mark.*']) auto_def.add_regex_rule("regex2", groups[2].dn, include_regex=['cn=simon.*']) # Enable plugin automemberplugin = AutoMembershipPlugin(topo.standalone) automemberplugin.enable() topo.standalone.restart() return (user, groups) def test_mods(automember_fixture, topo): """Modify the user so that it is added to the various automember groups :id: 28a2b070-7f16-4905-8831-c80fa6441693 :setup: Standalone Instance :steps: 1. Update user that should add it to group[0] 2. Update user that should add it to group[1] 3. Update user that should add it to group[2] 4. Update user that should add it to group[0] 5. Test rebuild task correctly moves user to group[1] :expectedresults: 1. Success 2. Success 3. Success 4. Success 5. Success """ (user, groups) = automember_fixture # Update user which should go into group[0] user.replace('cn', 'whatever') groups[0].is_member(user.dn) if groups[1].is_member(user.dn): assert False if groups[2].is_member(user.dn): assert False # Update user0 which should go into group[1] user.replace('cn', 'mark') groups[1].is_member(user.dn) if groups[0].is_member(user.dn): assert False if groups[2].is_member(user.dn): assert False # Update user which should go into group[2] user.replace('cn', 'simon') groups[2].is_member(user.dn) if groups[0].is_member(user.dn): assert False if groups[1].is_member(user.dn): assert False # Update user which should go back into group[0] (full circle) user.replace('cn', 'whatever') groups[0].is_member(user.dn) if groups[1].is_member(user.dn): assert False if groups[2].is_member(user.dn): assert False # # Test rebuild task. First disable plugin # automemberplugin = AutoMembershipPlugin(topo.standalone) automemberplugin.disable() topo.standalone.restart() # Make change that would move the entry from group[0] to group[1] user.replace('cn', 'mark') # Enable plugin automemberplugin.enable() topo.standalone.restart() # Run rebuild task task = automemberplugin.fixup(DEFAULT_SUFFIX, "objectclass=posixaccount") task.wait() # Test membership groups[1].is_member(user.dn) if groups[0].is_member(user.dn): assert False if groups[2].is_member(user.dn): assert False # Success log.info("Test PASSED") if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main(["-s", CURRENT_FILE]) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/automember_plugin/automember_test.py000066400000000000000000000243211421664411400316410ustar00rootroot00000000000000import logging import pytest import os import ldap from lib389.utils import ds_is_older from lib389._constants import * from lib389.plugins import AutoMembershipPlugin, AutoMembershipDefinition, AutoMembershipDefinitions, AutoMembershipRegexRule from lib389._mapped_object import DSLdapObjects, DSLdapObject from lib389 import agreement from lib389.idm.user import UserAccount, UserAccounts, TEST_USER_PROPERTIES from lib389.idm.group import Groups, Group from lib389.topologies import topology_st as topo from lib389._constants import DEFAULT_SUFFIX # Skip on older versions pytestmark = [pytest.mark.tier1, pytest.mark.skipif(ds_is_older('1.3.7'), reason="Not implemented")] DEBUGGING = os.getenv("DEBUGGING", default=False) if DEBUGGING: logging.getLogger(__name__).setLevel(logging.DEBUG) else: logging.getLogger(__name__).setLevel(logging.INFO) log = logging.getLogger(__name__) @pytest.fixture(scope="module") def automember_fixture(topo, request): groups = Groups(topo.standalone, DEFAULT_SUFFIX) group = groups.create(properties={'cn': 'testgroup'}) automemberplugin = AutoMembershipPlugin(topo.standalone) automemberplugin.enable() topo.standalone.restart() automember_prop = { 'cn': 'testgroup_definition', 'autoMemberScope': 'ou=People,' + DEFAULT_SUFFIX, 'autoMemberFilter': 'objectclass=*', 'autoMemberDefaultGroup': group.dn, 'autoMemberGroupingAttr': 'member:dn', } automembers = AutoMembershipDefinitions(topo.standalone, "cn=Auto Membership Plugin,cn=plugins,cn=config") automember = automembers.create(properties=automember_prop) return (group, automembers, automember) def test_automemberscope(automember_fixture, topo): """Test if the automember scope is valid :id: c3d3f250-e7fd-4441-8387-3d24c156e982 :setup: Standalone instance, enabled Auto Membership Plugin :steps: 1. Create automember with invalid cn that raises UNWILLING_TO_PERFORM exception 2. If exception raised, set scope to any cn 3. If exception is not raised, set scope to with ou=People :expectedresults: 1. Should be success 2. Should be success 3. Should be success """ (group, automembers, automember) = automember_fixture automember_prop = { 'cn': 'anyrandomcn', 'autoMemberScope': 'ou=People,' + DEFAULT_SUFFIX, 'autoMemberFilter': 'objectclass=*', 'autoMemberDefaultGroup': group.dn, 'autoMemberGroupingAttr': 'member:dn', } # depends on issue #49465 # with pytest.raises(ldap.UNWILLING_TO_PERFORM): # automember = automembers.create(properties=automember_prop) # automember.set_scope("cn=No Entry,%s" % DEFAULT_SUFFIX) automember.set_scope("ou=People,%s" % DEFAULT_SUFFIX) def test_automemberfilter(automember_fixture, topo): """Test if the automember filter is valid :id: 935c55de-52dc-4f80-b7dd-3aacd30f6df2 :setup: Standalone instance, enabled Auto Membership Plugin :steps: 1. Create automember with invalid filter that raises UNWILLING_TO_PERFORM exception 2. If exception raised, set filter to the invalid filter 3. If exception is not raised, set filter as all objectClasses :expectedresults: 1. Should be success 2. Should be success 3. Should be success """ (group, automembers, automember) = automember_fixture automember_prop = { 'cn': 'anyrandomcn', 'autoMemberScope': 'ou=People,' + DEFAULT_SUFFIX, 'autoMemberFilter': '(ou=People', 'autoMemberDefaultGroup': group.dn, 'autoMemberGroupingAttr': 'member:dn', } with pytest.raises(ldap.UNWILLING_TO_PERFORM): automember = automembers.create(properties=automember_prop) automember.set_filter("(ou=People") automember.set_filter("objectClass=*") def test_adduser(automember_fixture, topo): """Test if member is automatically added to the group :id: 14f1e2f5-2162-41ab-962c-5293516baf2e :setup: Standalone instance, enabled Auto Membership Plugin :steps: 1. Create a user 2. Assert that the user is member of the group :expectedresults: 1. Should be success 2. Should be success """ (group, automembers, automember) = automember_fixture users = UserAccounts(topo.standalone, DEFAULT_SUFFIX) user = users.create(properties=TEST_USER_PROPERTIES) assert group.is_member(user.dn) user.delete() @pytest.mark.skipif(ds_is_older("1.4.1.2"), reason="Not implemented") def test_delete_default_group(automember_fixture, topo): """If memberof is enable and a user became member of default group because of automember rule then delete the default group should succeeds :id: 8b55d077-8851-45a2-a547-b28a7983a3c2 :setup: Standalone instance, enabled Auto Membership Plugin :steps: 1. Enable memberof plugin 2. Create a user 3. Assert that the user is member of the default group 4. Delete the default group :expectedresults: 1. Should be success 2. Should be success 3. Should be success 4. Should be success """ (group, automembers, automember) = automember_fixture from lib389.plugins import MemberOfPlugin memberof = MemberOfPlugin(topo.standalone) memberof.enable() topo.standalone.restart() topo.standalone.setLogLevel(65536) users = UserAccounts(topo.standalone, DEFAULT_SUFFIX) user_1 = users.create_test_user(uid=1) try: assert group.is_member(user_1.dn) group.delete() error_lines = topo.standalone.ds_error_log.match('.*auto-membership-plugin - automember_update_member_value - group .default or target. does not exist .%s.$' % group.dn) assert (len(error_lines) == 1) finally: user_1.delete() topo.standalone.setLogLevel(0) @pytest.mark.skipif(ds_is_older("1.4.3.3"), reason="Not implemented") def test_no_default_group(automember_fixture, topo): """If memberof is enable and a user became member of default group and default group does not exist then an INFO should be logged :id: 8882972f-fb3e-4d77-9729-0235897676bc :setup: Standalone instance, enabled Auto Membership Plugin :steps: 1. Enable memberof plugin 2. Set errorlog level to 0 (default) 3. delete the default group 4. Create a user 5. Retrieve message in log :expectedresults: 1. Should be success 2. Should be success 3. Should be success 4. Should be success 5. Should be success """ (group, automembers, automember) = automember_fixture from lib389.plugins import MemberOfPlugin memberof = MemberOfPlugin(topo.standalone) memberof.enable() topo.standalone.restart() topo.standalone.setLogLevel(0) # delete it if it exists try: group.get_attr_val_utf8('creatorsname') group.delete() except ldap.NO_SUCH_OBJECT: pass users = UserAccounts(topo.standalone, DEFAULT_SUFFIX) user_1 = users.create_test_user(uid=1) try: error_lines = topo.standalone.ds_error_log.match('.*auto-membership-plugin - automember_update_member_value - group .default or target. does not exist .%s.$' % group.dn) assert (len(error_lines) > 0) finally: user_1.delete() topo.standalone.setLogLevel(0) @pytest.mark.skipif(ds_is_older("1.4.1.2"), reason="Not implemented") def test_delete_target_group(automember_fixture, topo): """If memberof is enabld and a user became member of target group because of automember rule then delete the target group should succeeds :id: bf5745e3-3de8-485d-8a68-e2fd460ce1cb :setup: Standalone instance, enabled Auto Membership Plugin :steps: 1. Recreate the default group if it was deleted before 2. Create a target group (using regex) 3. Create a target group automember rule (regex) 4. Enable memberof plugin 5. Create a user that goes into the target group 6. Assert that the user is member of the target group 7. Delete the target group 8. Check automember skipped the regex automember rule because target group did not exist :expectedresults: 1. Should be success 2. Should be success 3. Should be success 4. Should be success 5. Should be success 6. Should be success 7. Should be success 8. Should be success """ (group, automembers, automember) = automember_fixture # default group that may have been deleted in previous tests try: groups = Groups(topo.standalone, DEFAULT_SUFFIX) group = groups.create(properties={'cn': 'testgroup'}) except: pass # target group that will receive regex automember groups = Groups(topo.standalone, DEFAULT_SUFFIX) group_regex = groups.create(properties={'cn': 'testgroup_regex'}) # regex automember definition automember_regex_prop = { 'cn': 'automember regex', 'autoMemberTargetGroup': group_regex.dn, 'autoMemberInclusiveRegex': 'uid=.*1', } automember_regex_dn = 'cn=automember regex, %s' % automember.dn automember_regexes = AutoMembershipRegexRule(topo.standalone, automember_regex_dn) automember_regex = automember_regexes.create(properties=automember_regex_prop) from lib389.plugins import MemberOfPlugin memberof = MemberOfPlugin(topo.standalone) memberof.enable() topo.standalone.restart() topo.standalone.setLogLevel(65536) # create a user that goes into the target group but not in the default group users = UserAccounts(topo.standalone, DEFAULT_SUFFIX) user_1 = users.create_test_user(uid=1) try: assert group_regex.is_member(user_1.dn) assert not group.is_member(user_1.dn) # delete that target filter group group_regex.delete() error_lines = topo.standalone.ds_error_log.match('.*auto-membership-plugin - automember_update_member_value - group .default or target. does not exist .%s.$' % group_regex.dn) # one line for default group and one for target group assert (len(error_lines) == 1) finally: user_1.delete() topo.standalone.setLogLevel(0) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/automember_plugin/basic_test.py000066400000000000000000001420301421664411400305600ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2019 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- """ Will test AutoMememer Plugin with AotoMember Task and Retro Changelog """ import os import pytest import time import re from lib389.topologies import topology_m1 as topo from lib389.idm.organizationalunit import OrganizationalUnits from lib389.idm.domain import Domain from lib389.idm.posixgroup import PosixGroups from lib389.plugins import AutoMembershipPlugin, AutoMembershipDefinitions, \ MemberOfPlugin, AutoMembershipRegexRules, AutoMembershipDefinition, RetroChangelogPlugin from lib389.backend import Backends from lib389.config import Config from lib389._constants import DEFAULT_SUFFIX from lib389.idm.user import UserAccounts from lib389.idm.group import Groups, Group, UniqueGroup, nsAdminGroups, nsAdminGroup from lib389.tasks import Tasks, AutomemberRebuildMembershipTask, ExportTask from lib389.utils import ds_is_older from lib389.paths import Paths import ldap pytestmark = pytest.mark.tier1 BASE_SUFF = "dc=autoMembers,dc=com" TEST_BASE = "dc=testAutoMembers,dc=com" BASE_REPL = "dc=replAutoMembers,dc=com" SUBSUFFIX = f'dc=SubSuffix,{BASE_SUFF}' PLUGIN_AUTO = "cn=Auto Membership Plugin,cn=plugins,cn=config" REPMANDN = "cn=ReplManager" CACHE_SIZE = '-1' CACHEMEM_SIZE = '10485760' AUTO_MEM_SCOPE_TEST = f'ou=Employees,{TEST_BASE}' AUTO_MEM_SCOPE_BASE = f'ou=Employees,{BASE_SUFF}' def add_base_entries(topo): """ Will create suffix """ for suffix, backend_name in [(BASE_SUFF, 'AutoMembers'), (SUBSUFFIX, 'SubAutoMembers'), (TEST_BASE, 'testAutoMembers'), (BASE_REPL, 'ReplAutoMembers'), ("dc=SubSuffix,{}".format(BASE_REPL), 'ReplSubAutoMembers')]: Backends(topo.ms["supplier1"]).create(properties={ 'cn': backend_name, 'nsslapd-suffix': suffix, 'nsslapd-CACHE_SIZE': CACHE_SIZE, 'nsslapd-CACHEMEM_SIZE': CACHEMEM_SIZE}) Domain(topo.ms["supplier1"], suffix).create(properties={ 'dc': suffix.split('=')[1].split(',')[0], 'aci': [ f'(targetattr="userPassword")(version 3.0;aci "Replication Manager ' f'Access";allow (write,compare) userdn="ldap:///{REPMANDN},cn=config";)', f'(target ="ldap:///{suffix}")(targetattr !="cn||sn||uid") (version 3.0;' f'acl "Group Permission";allow (write) ' f'(groupdn = "ldap:///cn=GroupMgr,{suffix}");)', f'(target ="ldap:///{suffix}")(targetattr !="userPassword")(version 3.0;acl ' f'"Anonym-read access"; allow (read,search,compare)(userdn="ldap:///anyone");)' ] }) for suffix, ou_cn in [(BASE_SUFF, 'userGroups'), (BASE_SUFF, 'Employees'), (BASE_SUFF, 'TaskEmployees'), (TEST_BASE, 'Employees')]: OrganizationalUnits(topo.ms["supplier1"], suffix).create(properties={'ou': ou_cn}) def add_user(topo, user_id, suffix, uid_no, gid_no, role_usr): """ Will create entries with nsAdminGroup objectclass """ objectclasses = ['top', 'person', 'posixaccount', 'inetuser', 'nsMemberOf', 'nsAccount', 'nsAdminGroup'] if ds_is_older('1.4.0'): objectclasses.remove('nsAccount') user = nsAdminGroups(topo.ms["supplier1"], suffix, rdn=None).create(properties={ 'cn': user_id, 'sn': user_id, 'uid': user_id, 'homeDirectory': '/home/{}'.format(user_id), 'loginShell': '/bin/bash', 'uidNumber': uid_no, 'gidNumber': gid_no, 'objectclass': objectclasses, 'nsAdminGroupName': role_usr, 'seeAlso': 'uid={},{}'.format(user_id, suffix), 'entrydn': 'uid={},{}'.format(user_id, suffix) }) return user def check_groups(topo, group_dn, user_dn, member): """ Will check MEMBATTR """ return bool(Group(topo.ms["supplier1"], group_dn).present(member, user_dn)) def add_group(topo, suffix, group_id): """ Will create groups """ Groups(topo.ms["supplier1"], suffix, rdn=None).create(properties={ 'cn': group_id }) def number_memberof(topo, user, number): """ Function to check if the memberOf attribute is present. """ return len(nsAdminGroup(topo.ms["supplier1"], user).get_attr_vals_utf8('memberOf')) == number def add_group_entries(topo): """ Will create multiple entries needed for this test script """ for suffix, group in [(SUBSUFFIX, 'subsuffGroups'), (SUBSUFFIX, 'Employees'), (TEST_BASE, 'testuserGroups'), ("dc=SubSuffix,{}".format(BASE_REPL), 'replsubGroups'), (BASE_REPL, 'replsubGroups')]: add_group(topo, suffix, group) for group_cn in ['SubDef1', 'SubDef2', 'SubDef3', 'SubDef4', 'SubDef5']: add_group(topo, BASE_REPL, group_cn) for user in ['Managers', 'Contractors', 'Interns', 'Visitors']: add_group(topo, "cn=replsubGroups,{}".format(BASE_REPL), user) for ou_ou, group_cn in [("ou=userGroups,{}".format(BASE_SUFF), 'SuffDef1'), ("ou=userGroups,{}".format(BASE_SUFF), 'SuffDef2'), ("ou=userGroups,{}".format(BASE_SUFF), 'SuffDef3'), ("ou=userGroups,{}".format(BASE_SUFF), 'SuffDef4'), ("ou=userGroups,{}".format(BASE_SUFF), 'SuffDef5'), ("ou=userGroups,{}".format(BASE_SUFF), 'Contractors'), ("ou=userGroups,{}".format(BASE_SUFF), 'Managers'), ("CN=testuserGroups,{}".format(TEST_BASE), 'TestDef1'), ("CN=testuserGroups,{}".format(TEST_BASE), 'TestDef2'), ("CN=testuserGroups,{}".format(TEST_BASE), 'TestDef3'), ("CN=testuserGroups,{}".format(TEST_BASE), 'TestDef4'), ("CN=testuserGroups,{}".format(TEST_BASE), 'TestDef5')]: add_group(topo, ou_ou, group_cn) for ou_ou, group_cn, grp_no in [(SUBSUFFIX, 'SubDef1', '111'), (SUBSUFFIX, 'SubDef2', '222'), (SUBSUFFIX, 'SubDef3', '333'), (SUBSUFFIX, 'SubDef4', '444'), (SUBSUFFIX, 'SubDef5', '555'), ('cn=subsuffGroups,{}'.format(SUBSUFFIX), 'Managers', '666'), ('cn=subsuffGroups,{}'.format(SUBSUFFIX), 'Contractors', '999')]: PosixGroups(topo.ms["supplier1"], ou_ou, rdn=None).create(properties={ 'cn': group_cn, 'gidNumber': grp_no }) def add_member_attr(topo, group_dn, user_dn, member): """ Will add members to groups """ Group(topo.ms["supplier1"], group_dn).add(member, user_dn) def change_grp_objclass(new_object, member, type_of): """ Will change objectClass """ try: type_of.remove(member, None) except ldap.NO_SUCH_ATTRIBUTE: pass type_of.ensure_state(properties={ 'cn': type_of.get_attr_val_utf8('cn'), 'objectClass': ['top', 'nsMemberOf', new_object] }) @pytest.fixture(scope="module") def _create_all_entries(topo): """ Fixture module that will create required entries for test cases. """ add_base_entries(topo) add_group_entries(topo) auto = AutoMembershipPlugin(topo.ms["supplier1"]) auto.add("nsslapd-pluginConfigArea", "cn=autoMembersPlugin,{}".format(BASE_REPL)) MemberOfPlugin(topo.ms["supplier1"]).enable() automembers_definitions = AutoMembershipDefinitions(topo.ms["supplier1"]) automembers_definitions.create(properties={ 'cn': 'userGroups', 'autoMemberScope': f'ou=Employees,{BASE_SUFF}', 'autoMemberFilter': "objectclass=posixAccount", 'autoMemberDefaultGroup': [ f'cn=SuffDef1,ou=userGroups,{BASE_SUFF}', f'cn=SuffDef2,ou=userGroups,{BASE_SUFF}', f'cn=SuffDef3,ou=userGroups,{BASE_SUFF}', f'cn=SuffDef4,ou=userGroups,{BASE_SUFF}', f'cn=SuffDef5,ou=userGroups,{BASE_SUFF}' ], 'autoMemberGroupingAttr': 'member:dn', }) automembers_definitions.create(properties={ 'cn': 'subsuffGroups', 'autoMemberScope': f'ou=Employees,{BASE_SUFF}', 'autoMemberFilter': "objectclass=posixAccount", 'autoMemberDefaultGroup': [ f'cn=SubDef1,dc=subSuffix,{BASE_SUFF}', f'cn=SubDef2,dc=subSuffix,{BASE_SUFF}', f'cn=SubDef3,dc=subSuffix,{BASE_SUFF}', f'cn=SubDef4,dc=subSuffix,{BASE_SUFF}', f'cn=SubDef5,dc=subSuffix,{BASE_SUFF}', ], 'autoMemberGroupingAttr': 'memberuid:dn', }) automembers_regex_usergroup = AutoMembershipRegexRules(topo.ms["supplier1"], f'cn=userGroups,{auto.dn}') automembers_regex_usergroup.create(properties={ 'cn': 'Managers', 'description': f'Group placement for Managers', 'autoMemberTargetGroup': [f'cn=Managers,ou=userGroups,{BASE_SUFF}'], 'autoMemberInclusiveRegex': [ "gidNumber=^9", "nsAdminGroupName=^Manager", ], "autoMemberExclusiveRegex": [ "gidNumber=^[6-8]", "nsAdminGroupName=^Junior$", ], }) automembers_regex_usergroup.create(properties={ 'cn': 'Contractors', 'description': f'Group placement for Contractors', 'autoMemberTargetGroup': [f'cn=Contractors,ou=userGroups,{BASE_SUFF}'], 'autoMemberInclusiveRegex': [ "gidNumber=^1", "nsAdminGroupName=Contractor", ], "autoMemberExclusiveRegex": [ "gidNumber=^[2-4]", "nsAdminGroupName=^Employee$", ], }) automembers_regex_sub = AutoMembershipRegexRules(topo.ms["supplier1"], f'cn=subsuffGroups,{auto.dn}') automembers_regex_sub.create(properties={ 'cn': 'Managers', 'description': f'Group placement for Managers', 'autoMemberTargetGroup': [f'cn=Managers,cn=subsuffGroups,dc=subSuffix,{BASE_SUFF}'], 'autoMemberInclusiveRegex': [ "gidNumber=^[1-4]..3$", "uidNumber=^5.5$", "nsAdminGroupName=^Manager$|^Supervisor$", ], "autoMemberExclusiveRegex": [ "gidNumber=^[6-8].0$", "uidNumber=^999$", "nsAdminGroupName=^Junior$", ], }) automembers_regex_sub.create(properties={ 'cn': 'Contractors', 'description': f'Group placement for Contractors', 'autoMemberTargetGroup': [f'cn=Contractors,cn=subsuffGroups,dc=SubSuffix,{BASE_SUFF}'], 'autoMemberInclusiveRegex': [ "gidNumber=^[5-9].3$", "uidNumber=^8..5$", "nsAdminGroupName=^Contract|^Temporary$", ], "autoMemberExclusiveRegex": [ "gidNumber=^[2-4]00$", "uidNumber=^[1,3,8]99$", "nsAdminGroupName=^Employee$", ], }) for cn_name, ou_name in [('testuserGroups', 'Employees'), ('hostGroups', 'HostEntries')]: automembers_definitions.create(properties={ 'cn': cn_name, 'autoMemberScope': f'ou={ou_name},dc=testautoMembers,dc=com', 'autoMemberFilter': "objectclass=posixAccount", 'autoMemberDefaultGroup': [ f'cn=TestDef1,cn={cn_name},dc=testautoMembers,dc=com', f'cn=TestDef2,cn={cn_name},dc=testautoMembers,dc=com', f'cn=TestDef3,cn={cn_name},dc=testautoMembers,dc=com', f'cn=TestDef4,cn={cn_name},dc=testautoMembers,dc=com', f'cn=TestDef5,cn={cn_name},dc=testautoMembers,dc=com', ], 'autoMemberGroupingAttr': 'member:dn', }) topo.ms["supplier1"].restart() def test_disable_the_plug_in(topo, _create_all_entries): """Plug-in and check the status :id: 4feee76c-e7ff-11e8-836e-8c16451d917b :setup: Instance with replication :steps: 1. Disable the plug-in and check the status 2. Enable the plug-in and check the status :expected results: 1. Should success 2. Should success """ instance_auto = AutoMembershipPlugin(topo.ms["supplier1"]) instance_auto.disable() assert not instance_auto.status() instance_auto.enable() assert instance_auto.status() def test_custom_config_area(topo, _create_all_entries): """Custom config area :id: 4fefb8cc-e7ff-11e8-92fd-8c16451d917b :setup: Instance with replication :steps: 1. Check whether the plugin can be configured for custom config area 2. After adding custom config area can be removed :expected results: 1. Should success 2. Should success """ instance_auto = AutoMembershipPlugin(topo.ms["supplier1"]) instance_auto.replace("nsslapd-pluginConfigArea", DEFAULT_SUFFIX) assert instance_auto.get_attr_val_utf8("nsslapd-pluginConfigArea") instance_auto.remove("nsslapd-pluginConfigArea", DEFAULT_SUFFIX) assert not instance_auto.get_attr_val_utf8("nsslapd-pluginConfigArea") @pytest.mark.bz834053 def test_ability_to_control_behavior_of_modifiers_name(topo, _create_all_entries): """Control behaviour of modifier's name :id: 4ff16370-e7ff-11e8-838d-8c16451d917b :setup: Instance with replication :steps: 1. Turn on 'nsslapd-plugin-binddn-tracking' 2. Add an user 3. Check the creatorsname in the user entry 4. Check the internalCreatorsname in the user entry 5. Check the modifiersname in the user entry 6. Check the internalModifiersname in the user entry 7. Unset nsslapd-plugin-binddn-tracking attribute under cn=config and delete the test enteries :expected results: 1. Should success 2. Should success 3. Should success 4. Should success 5. Should success 6. Should success 7. Should success """ instance1 = topo.ms["supplier1"] configure = Config(instance1) configure.replace('nsslapd-plugin-binddn-tracking', 'on') instance1.restart() assert configure.get_attr_val_utf8('nsslapd-plugin-binddn-tracking') == 'on' user = add_user(topo, "User_autoMembers_05", "ou=Employees,{}".format(TEST_BASE), "19", "18", "Supervisor") # search the User DN name for the creatorsname in user entry assert user.get_attr_val_utf8('creatorsname') == 'cn=directory manager' # search the User DN name for the internalCreatorsname in user entry assert user.get_attr_val_utf8('internalCreatorsname') == \ 'cn=ldbm database,cn=plugins,cn=config' # search the modifiersname in the user entry assert user.get_attr_val_utf8('modifiersname') == 'cn=directory manager' # search the internalModifiersname in the user entry assert user.get_attr_val_utf8('internalModifiersname') == \ 'cn=MemberOf Plugin,cn=plugins,cn=config' # unset nsslapd-plugin-binddn-tracking attribute configure.replace('nsslapd-plugin-binddn-tracking', 'off') instance1.restart() # deleting test enteries of automember05 test case user.delete() def test_posixaccount_objectclass_automemberdefaultgroup(topo, _create_all_entries): """Verify the PosixAccount user :id: 4ff0f642-e7ff-11e8-ac88-8c16451d917b :setup: Instance with replication :steps: 1. Add users with PosixAccount ObjectClass 2. Verify the same user added as a member to autoMemberDefaultGroup :expected results: 1. Should success 2. Should success """ test_id = "autoMembers_05" default_group = "cn=TestDef1,CN=testuserGroups,{}".format(TEST_BASE) user = add_user(topo, "User_{}".format(test_id), AUTO_MEM_SCOPE_TEST, "19", "18", "Supervisor") assert check_groups(topo, default_group, user.dn, "member") user.delete() with pytest.raises(AssertionError): assert check_groups(topo, default_group, user.dn, "member") def test_duplicated_member_attributes_added_when_the_entry_is_re_created(topo, _create_all_entries): """Checking whether duplicated member attributes added when the entry is re-created :id: 4ff2afaa-e7ff-11e8-8a92-8c16451d917b :setup: Instance with replication :steps: 1. Create a user 2. It should present as member in all automember groups 3. Delete use 4. It should not present as member in all automember groups 5. Recreate same user 6. It should present as member in all automember groups :expected results: 1. Should success 2. Should success 3. Should success 4. Should success 5. Should success 6. Should success """ test_id = "autoMembers_06" default_group = "cn=TestDef1,CN=testuserGroups,{}".format(TEST_BASE) user = add_user(topo, "User_{}".format(test_id), AUTO_MEM_SCOPE_TEST, "19", "16", "Supervisor") assert check_groups(topo, default_group, user.dn, "member") user.delete() with pytest.raises(AssertionError): assert check_groups(topo, default_group, user.dn, "member") user = add_user(topo, "User_{}".format(test_id), AUTO_MEM_SCOPE_TEST, "19", "15", "Supervisor") assert check_groups(topo, default_group, user.dn, "member") user.delete() def test_multi_valued_automemberdefaultgroup_for_hostgroups(topo, _create_all_entries): """Multi-valued autoMemberDefaultGroup :id: 4ff32a02-e7ff-11e8-99a1-8c16451d917b :setup: Instance with replication :steps: 1. Create a user 2. Check user is present in all Automember Groups as member 3. Delete the user 4. Check user is not present in all Automember Groups :expected results: 1. Should success 2. Should success 3. Should success 4. Should success """ test_id = "autoMembers_07" default_group1 = "cn=TestDef1,CN=testuserGroups,{}".format(TEST_BASE) default_group2 = "cn=TestDef2,CN=testuserGroups,{}".format(TEST_BASE) default_group3 = "cn=TestDef3,CN=testuserGroups,{}".format(TEST_BASE) user = add_user(topo, "User_{}".format(test_id), AUTO_MEM_SCOPE_TEST, "19", "14", "TestEngr") for grp in [default_group1, default_group2, default_group3]: assert check_groups(topo, grp, user.dn, "member") user.delete() with pytest.raises(AssertionError): assert check_groups(topo, default_group1, user.dn, "member") def test_plugin_creates_member_attributes_of_the_automemberdefaultgroup(topo, _create_all_entries): """Checking whether plugin creates member attributes if it already exists for some of the autoMemberDefaultGroup :id: 4ff3ba76-e7ff-11e8-9846-8c16451d917b :setup: Instance with replication :steps: 1. Add a non existing user to some groups as member 2. Then Create the user 3. Check the same user is present to other groups also as member :expected results: 1. Should success 2. Should success 3. Should success """ test_id = "autoMembers_08" default_group1 = "cn=TestDef1,CN=testuserGroups,{}".format(TEST_BASE) default_group2 = "cn=TestDef5,CN=testuserGroups,{}".format(TEST_BASE) default_group3 = "cn=TestDef3,CN=testuserGroups,{}".format(TEST_BASE) add_member_attr(topo, "cn=TestDef2,CN=testuserGroups,{}".format(TEST_BASE), "uid=User_{},{}".format(test_id, AUTO_MEM_SCOPE_TEST), "member") add_member_attr(topo, "cn=TestDef4,CN=testuserGroups,{}".format(TEST_BASE), "uid=User_{},{}".format(test_id, AUTO_MEM_SCOPE_TEST), "member") user = add_user(topo, "User_{}".format(test_id), AUTO_MEM_SCOPE_TEST, "19", "14", "TestEngr") for grp in [default_group1, default_group2, default_group3]: assert check_groups(topo, grp, user.dn, "member") user.delete() def test_multi_valued_automemberdefaultgroup_with_uniquemember(topo, _create_all_entries): """Multi-valued autoMemberDefaultGroup with uniquemember attributes :id: 4ff4461c-e7ff-11e8-8124-8c16451d917b :setup: Instance with replication :steps: 1. Modify automember config entry to use uniquemember 2. Change object class for all groups which is used for automember grouping 3. Add user uniquemember attributes 4. Check uniqueMember attribute in groups 5. Revert the changes done above :expected results: 1. Should success 2. Should success 3. Should success 4. Should success 5. Should success """ test_id = "autoMembers_09" instance = topo.ms["supplier1"] auto = AutoMembershipPlugin(topo.ms["supplier1"]) # Modify automember config entry to use uniquemember: cn=testuserGroups,PLUGIN_AUTO AutoMembershipDefinition( instance, "cn=testuserGroups,{}".format(auto.dn)).replace('autoMemberGroupingAttr', "uniquemember: dn") instance.restart() default_group1 = "cn=TestDef1,CN=testuserGroups,{}".format(TEST_BASE) default_group2 = "cn=TestDef2,CN=testuserGroups,{}".format(TEST_BASE) default_group3 = "cn=TestDef3,CN=testuserGroups,{}".format(TEST_BASE) default_group4 = "cn=TestDef4,CN=testuserGroups,{}".format(TEST_BASE) default_group5 = "cn=TestDef5,CN=testuserGroups,{}".format(TEST_BASE) for grp in (default_group1, default_group2, default_group3, default_group4, default_group5): instance_of_group = Group(topo.ms["supplier1"], grp) change_grp_objclass("groupOfUniqueNames", "member", instance_of_group) # Add user: uid=User_{test_id}, AutoMemScope user = add_user(topo, "User_{}".format(test_id), AUTO_MEM_SCOPE_TEST, "19", "14", "New") # Checking groups... assert user.dn.lower() in UniqueGroup(topo.ms["supplier1"], default_group1).get_attr_val_utf8("uniqueMember") # Delete user uid=User_{test_id},AutoMemScope user.delete() # Change the automember config back to using \"member\" AutoMembershipDefinition( instance, "cn=testuserGroups,{}".format(auto.dn)).replace('autoMemberGroupingAttr', "member: dn") for grp in [default_group1, default_group2, default_group3, default_group4, default_group5]: instance_of_group = UniqueGroup(topo.ms["supplier1"], grp) change_grp_objclass("groupOfNames", "uniquemember", instance_of_group) topo.ms["supplier1"].restart() def test_invalid_automembergroupingattr_member(topo, _create_all_entries): """Invalid autoMemberGroupingAttr-member :id: 4ff4b598-e7ff-11e8-a3a3-8c16451d917b :setup: Instance with replication :steps: 1. Change object class for one group which is used for automember grouping 2. Try to add user with invalid parameter 3. Check member attribute on other groups 4. Check member attribute on group where object class was changed 5. Revert the object class where it was changed :expected results: 1. Should success 2. Should fail (ldap.UNWILLING_TO_PERFORM) 3. Should success 4. Should fail (AssertionError) 5. Should success """ test_id = "autoMembers_10" default_group = "cn=TestDef1,CN=testuserGroups,{}".format(TEST_BASE) instance_of_group = Group(topo.ms["supplier1"], default_group) change_grp_objclass("groupOfUniqueNames", "member", instance_of_group) with pytest.raises(ldap.UNWILLING_TO_PERFORM): add_user(topo, "User_{}".format(test_id), AUTO_MEM_SCOPE_TEST, "19", "20", "Invalid") with pytest.raises(AssertionError): assert check_groups(topo, default_group, "uid=User_{},{}".format(test_id, AUTO_MEM_SCOPE_TEST), "member") change_grp_objclass("groupOfNames", "uniquemember", instance_of_group) def test_valid_and_invalid_automembergroupingattr(topo, _create_all_entries): """Valid and invalid autoMemberGroupingAttr :id: 4ff4fad0-e7ff-11e8-9cbd-8c16451d917b :setup: Instance with replication :steps: 1. Change object class for some groups which is used for automember grouping 2. Try to add user with invalid parameter 3. Check member attribute on other groups 4. Check member attribute on groups where object class was changed 5. Revert the object class where it was changed :expected results: 1. Should success 2. Should fail (ldap.UNWILLING_TO_PERFORM) 3. Should success 4. Should fail (AssertionError) 5. Should success """ test_id = "autoMembers_11" default_group_1 = "cn=TestDef1,CN=testuserGroups,{}".format(TEST_BASE) default_group_2 = "cn=TestDef2,CN=testuserGroups,{}".format(TEST_BASE) default_group_3 = "cn=TestDef3,CN=testuserGroups,{}".format(TEST_BASE) default_group_4 = "cn=TestDef4,CN=testuserGroups,{}".format(TEST_BASE) default_group_5 = "cn=TestDef5,CN=testuserGroups,{}".format(TEST_BASE) grp_4_5 = [default_group_4, default_group_5] for grp in grp_4_5: instance_of_group = Group(topo.ms["supplier1"], grp) change_grp_objclass("groupOfUniqueNames", "member", instance_of_group) with pytest.raises(ldap.UNWILLING_TO_PERFORM): add_user(topo, "User_{}".format(test_id), AUTO_MEM_SCOPE_TEST, "19", "24", "MixUsers") for grp in [default_group_1, default_group_2, default_group_3]: assert not check_groups(topo, grp, "cn=User_{},{}".format(test_id, AUTO_MEM_SCOPE_TEST), "member") for grp in grp_4_5: with pytest.raises(AssertionError): assert check_groups(topo, grp, "cn=User_{},{}".format(test_id, AUTO_MEM_SCOPE_TEST), "member") for grp in grp_4_5: instance_of_group = Group(topo.ms["supplier1"], grp) change_grp_objclass("groupOfNames", "uniquemember", instance_of_group) def test_add_regular_expressions_for_user_groups_and_check_for_member_attribute_after_adding_users( topo, _create_all_entries): """Regular expressions for user groups :id: 4ff53fc2-e7ff-11e8-9a18-8c16451d917b :setup: Instance with replication :steps: 1. Add user with a match with regular expressions for user groups 2. check for member attribute after adding users :expected results: 1. Should success 2. Should success """ test_id = "autoMembers_12" default_group = f'cn=SuffDef1,ou=userGroups,{BASE_SUFF}' user = add_user(topo, "User_{}".format(test_id), AUTO_MEM_SCOPE_BASE, "19", "0", "HR") assert check_groups(topo, default_group, user.dn, "member") assert number_memberof(topo, user.dn, 5) user.delete() LIST_FOR_PARAMETERIZATION = [ ("autoMembers_22", "5288", "5289", "Contractor", "5291", "5292", "Contractors"), ("autoMembers_21", "1161", "1162", "Contractor", "1162", "1163", "Contractors"), ("autoMembers_20", "1188", "1189", "CEO", "1191", "1192", "Contractors"), ("autoMembers_15", "9288", "9289", "Manager", "9291", "9292", "Managers"), ("autoMembers_14", "561", "562", "Manager", "562", "563", "Managers"), ("autoMembers_13", "9788", "9789", "VPEngg", "9392", "9393", "Managers")] @pytest.mark.parametrize("testid, uid, gid, role, uid2, gid2, m_grp", LIST_FOR_PARAMETERIZATION) def test_matching_gid_role_inclusive_regular_expression(topo, _create_all_entries, testid, uid, gid, role, uid2, gid2, m_grp): """Matching gid nos and Role for the Inclusive regular expression :id: 4ff71ce8-e7ff-11e8-b69b-8c16451d917b :parametrized: yes :setup: Instance with replication :steps: 1. Create users with matching gid nos and Role for the Inclusive regular expression 2. It will be filtered with gidNumber, uidNumber and nsAdminGroupName 3. It will a match for contract_grp :expected results: 1. Should success 2. Should success 3. Should success """ contract_grp = f'cn={m_grp},ou=userGroups,{BASE_SUFF}' user1 = add_user(topo, "User_{}".format(testid), AUTO_MEM_SCOPE_BASE, uid, gid, role) user2 = add_user(topo, "SecondUser_{}".format(testid), AUTO_MEM_SCOPE_BASE, uid2, gid2, role) for user_dn in [user1.dn, user2.dn]: assert check_groups(topo, contract_grp, user_dn, "member") assert number_memberof(topo, user1.dn, 1) for user in [user1, user2]: user.delete() LIST_FOR_PARAMETERIZATION = [ ("autoMembers_26", "5788", "5789", "Intern", "Contractors", "SuffDef1", 5), ("autoMembers_25", "9788", "9789", "Employee", "Contractors", "Managers", 1), ("autoMembers_24", "1110", "1111", "Employee", "Contractors", "SuffDef1", 5), ("autoMembers_23", "2788", "2789", "Contractor", "Contractors", "SuffDef1", 5), ("autoMembers_19", "5788", "5789", "HRManager", "Managers", "SuffDef1", 5), ("autoMembers_18", "6788", "6789", "Junior", "Managers", "SuffDef1", 5), ("autoMembers_17", "562", "563", "Junior", "Managers", "SuffDef1", 5), ("autoMembers_16", "6788", "6789", "Manager", "Managers", "SuffDef1", 5)] @pytest.mark.parametrize("testid, uid, gid, role, c_grp, m_grp, number", LIST_FOR_PARAMETERIZATION) def test_gid_and_role_inclusive_exclusive_regular_expression(topo, _create_all_entries, testid, uid, gid, role, c_grp, m_grp, number): """Matching gid nos and Role for the Inclusive and Exclusive regular expression :id: 4ff7d160-e7ff-11e8-8fbc-8c16451d917b :parametrized: yes :setup: Instance with replication :steps: 1. Create user with not matching gid nos and Role for the Inclusive and Exclusive regular expression 2. It will be filtered with gidNumber, uidNumber and nsAdminGroupName 3. It will not match for contract_grp(Exclusive regular expression) 4. It will match for default_group(Inclusive regular expression) :expected results: 1. Should success 2. Should success 3. Should success 4. Should success """ contract_grp = f'cn={c_grp},ou=userGroups,{BASE_SUFF}' default_group = f'cn={m_grp},ou=userGroups,{BASE_SUFF}' user = add_user(topo, "User_{}".format(testid), AUTO_MEM_SCOPE_BASE, uid, gid, role) with pytest.raises(AssertionError): assert check_groups(topo, contract_grp, user.dn, "member") check_groups(topo, default_group, user.dn, "member") assert number_memberof(topo, user.dn, number) user.delete() LIST_FOR_PARAMETERIZATION = [ ("autoMembers_32", "555", "720", "Employee", "SubDef1", "SubDef3"), ("autoMembers_31", "515", "200", "Junior", "SubDef1", "SubDef5"), ("autoMembers_30", "999", "400", "Supervisor", "SubDef1", "SubDef2"), ("autoMembers_28", "555", "3663", "ContractHR", "Contractors,cn=subsuffGroups", "Managers,cn=subsuffGroups")] @pytest.mark.parametrize("testid, uid, gid, role, c_grp, m_grp", LIST_FOR_PARAMETERIZATION) def test_managers_contractors_exclusive_regex_rules_member_uid(topo, _create_all_entries, testid, uid, gid, role, c_grp, m_grp): """Match both managers and contractors exclusive regex rules :id: 4ff8be18-e7ff-11e8-94aa-8c16451d917b :parametrized: yes :setup: Instance with replication :steps: 1. Add Users to match both managers and contractors exclusive regex rules, memberUid created in Default grp 2. It will be filtered with gidNumber, uidNumber and nsAdminGroupName 3. It will match for default_group1 and default_group2(Inclusive regular expression) :expected results: 1. Should success 2. Should success 3. Should success """ default_group1 = f'cn={c_grp},{SUBSUFFIX}' default_group2 = f'cn={m_grp},{SUBSUFFIX}' user = add_user(topo, "User_{}".format(testid), AUTO_MEM_SCOPE_BASE, uid, gid, role) for group in [default_group1, default_group2]: assert check_groups(topo, group, user.dn, "memberuid") user.delete() LIST_FOR_PARAMETERIZATION = [ ("autoMembers_27", "595", "690", "ContractHR", "Managers", "Contractors"), ("autoMembers_29", "8195", "2753", "Employee", "Contractors", "Managers"), ("autoMembers_33", "545", "3333", "Supervisor", "Contractors", "Managers"), ("autoMembers_34", "8195", "693", "Temporary", "Managers", "Contractors")] @pytest.mark.parametrize("testid, uid, gid, role, c_grp, m_grp", LIST_FOR_PARAMETERIZATION) def test_managers_inclusive_regex_rule(topo, _create_all_entries, testid, uid, gid, role, c_grp, m_grp): """Match managers inclusive regex rule, and no inclusive/exclusive Contractors regex rules :id: 4ff8d862-e7ff-11e8-b688-8c16451d917b :parametrized: yes :setup: Instance with replication :steps: 1. Add User to match managers inclusive regex rule, and no inclusive/exclusive Contractors regex rules 2. It will be filtered with gidNumber, uidNumber and nsAdminGroupName(Supervisor) 3. It will match for managers_grp(Inclusive regular expression) 4. It will not match for contract_grp(Exclusive regular expression) :expected results: 1. Should success 2. Should success 3. Should success 4. Should success """ contract_grp = f'cn={c_grp},cn=subsuffGroups,{SUBSUFFIX}' managers_grp = f'cn={m_grp},cn=subsuffGroups,{SUBSUFFIX}' user = add_user(topo, "User_{}".format(testid), AUTO_MEM_SCOPE_BASE, uid, gid, role) check_groups(topo, managers_grp, user.dn, "memberuid") with pytest.raises(AssertionError): assert check_groups(topo, contract_grp, user.dn, "memberuid") user.delete() def test_reject_invalid_config_and_we_donot_deadlock_the_server(topo, _create_all_entries): """Verify DS reject invalid config, and we don't deadlock the server :id: 4ff90c38-e7ff-11e8-b72a-8c16451d917b :setup: Instance with replication :steps: 1. Verify DS reject invalid config, 2. This operation don't deadlock the server :expected results: 1. Should success 2. Should success """ # Changing config area to dc=automembers,dc=com instance = AutoMembershipPlugin(topo.ms["supplier1"]) instance.replace("nsslapd-pluginConfigArea", BASE_SUFF) topo.ms["supplier1"] .restart() # Attempting to add invalid config... automembers = AutoMembershipDefinitions(topo.ms["supplier1"], BASE_SUFF) with pytest.raises(ldap.UNWILLING_TO_PERFORM): automembers.create(properties={ 'cn': 'userGroups', "autoMemberScope": BASE_SUFF, "autoMemberFilter": "objectclass=posixAccount", "autoMemberDefaultGroup": f'cn=SuffDef1,ou=userGroups,{BASE_SUFF}', "autoMemberGroupingAttr": "member: dn" }) # Verify server is still working automembers = AutoMembershipRegexRules(topo.ms["supplier1"], f'cn=userGroups,cn=Auto Membership Plugin,' f'cn=plugins,cn=config') with pytest.raises(ldap.ALREADY_EXISTS): automembers.create(properties={ 'cn': 'Managers', 'description': f'Group placement for Managers', 'autoMemberTargetGroup': [f'cn=Managers,ou=userGroups,{BASE_SUFF}'], 'autoMemberInclusiveRegex': [ "gidNumber=^9", "nsAdminGroupName=^Manager", ], }) # Adding first user... for uid in range(300, 302): UserAccounts(topo.ms["supplier1"], BASE_SUFF, rdn=None).create_test_user(uid=uid, gid=uid) # Adding this line code to remove the automembers plugin configuration. instance.remove("nsslapd-pluginConfigArea", BASE_SUFF) topo.ms["supplier1"] .restart() @pytest.fixture(scope="module") def _startuptask(topo): """ Fixture module that will change required entries for test cases. """ for Configs in ["cn=Managers,cn=subsuffGroups", "cn=Contractors,cn=subsuffGroups", "cn=testuserGroups", "cn=subsuffGroups", "cn=hostGroups"]: AutoMembershipDefinition(topo.ms["supplier1"], f'{Configs},{PLUGIN_AUTO}').delete() AutoMembershipDefinition(topo.ms["supplier1"], "cn=userGroups,{}".format(PLUGIN_AUTO)).replace( 'autoMemberScope', 'ou=TaskEmployees,dc=autoMembers,dc=com') topo.ms['supplier1'].restart() @pytest.fixture(scope="function") def _fixture_for_build_task(request, topo): def finof(): supplier = topo.ms['supplier1'] auto_mem_scope = "ou=TaskEmployees,{}".format(BASE_SUFF) for user in nsAdminGroups(supplier, auto_mem_scope, rdn=None).list(): user.delete() request.addfinalizer(finof) def bulk_check_groups(topo, GROUP_DN, MEMBATTR, TOTAL_MEM): assert len(nsAdminGroup(topo, GROUP_DN).get_attr_vals_utf8(MEMBATTR)) == TOTAL_MEM def test_automemtask_re_build_task(topo, _create_all_entries, _startuptask, _fixture_for_build_task): """ :id: 4ff973a8-e7ff-11e8-a89b-8c16451d917b :setup: 4 Instances with replication :steps: 1. Add 10 users and enable autoMembers plug-in 2. Run automembers re-build task to create the member attributes 3. Search for any error logs :expected results: 1. Success 2. Success 3. Success """ supplier = topo.ms['supplier1'] testid = "autoMemTask_01" auto_mem_scope = "ou=TaskEmployees,{}".format(BASE_SUFF) managers_grp = "cn=Managers,ou=userGroups,{}".format(BASE_SUFF) contract_grp = "cn=Contractors,ou=userGroups,{}".format(BASE_SUFF) user_rdn = "User_{}".format(testid) # make sure the retro changelog is disabled RetroChangelogPlugin(supplier).disable() AutoMembershipPlugin(supplier).disable() supplier.restart() for i in range(10): add_user(topo, "{}{}".format(user_rdn, str(i)), auto_mem_scope, str(1188), str(1189), "Manager") for grp in (managers_grp, contract_grp): with pytest.raises(AssertionError): assert check_groups(topo, grp, f'uid=User_autoMemTask_010,{auto_mem_scope}', 'member') AutoMembershipPlugin(supplier).enable() supplier.restart() error_string = "automember_rebuild_task_thread" AutomemberRebuildMembershipTask(supplier).create(properties={ 'basedn': auto_mem_scope, 'filter': "objectClass=posixAccount" }) # Search for any error logs assert not supplier.searchErrorsLog(error_string) for grp in (managers_grp, contract_grp): bulk_check_groups(supplier, grp, "member", 10) def ldif_check_groups(USERS_DN, MEMBATTR, TOTAL_MEM, LDIF_FILE): study = open('{}'.format(LDIF_FILE), 'r') study_ready = study.read() assert len(re.findall("{}: {}".format(MEMBATTR, USERS_DN.lower()), study_ready)) == TOTAL_MEM def check_file_exists(export_ldif): count = 0 while not os.path.exists(export_ldif) and count < 3: time.sleep(1) count += 1 count = 0 while (os.stat(export_ldif).st_size == 0) and count < 3: time.sleep(1) count += 1 if os.path.exists(export_ldif) and os.stat(export_ldif).st_size != 0: return True else: return False def test_automemtask_export_task(topo, _create_all_entries, _startuptask, _fixture_for_build_task): """ :id: 4ff98b18-e7ff-11e8-872a-8c16451d917b :setup: 4 Instances with replication :steps: 1. Add 10 users and enable autoMembers plug-in 2. Run automembers export task to create an ldif file with member attributes :expected results: 1. Success 2. Success """ supplier = topo.ms['supplier1'] p = Paths('supplier1') testid = "autoMemTask_02" auto_mem_scope = "ou=TaskEmployees,{}".format(BASE_SUFF) managers_grp = "cn=Managers,ou=userGroups,{}".format(BASE_SUFF) user_rdn = "User_{}".format(testid) # Disabling plugin AutoMembershipPlugin(supplier).disable() supplier.restart() for i in range(10): add_user(topo, "{}{}".format(user_rdn, str(i)), auto_mem_scope, str(2788), str(2789), "Manager") with pytest.raises(AssertionError): bulk_check_groups(supplier, managers_grp, "member", 10) AutoMembershipPlugin(supplier).enable() supplier.restart() export_ldif = p.backup_dir + "/Out_Export_02.ldif" if os.path.exists(export_ldif): os.remove(export_ldif) exp_task = Tasks(supplier) exp_task.automemberExport(suffix=auto_mem_scope, fstr='objectclass=posixAccount', ldif_out=export_ldif) check_file_exists(export_ldif) ldif_check_groups("cn={}".format(user_rdn), "member", 10, export_ldif) os.remove(export_ldif) def test_automemtask_mapping(topo, _create_all_entries, _startuptask, _fixture_for_build_task): """ :id: 4ff9a206-e7ff-11e8-bf59-8c16451d917b :setup: 4 Instances with replication :steps: 1. Add 10 users and enable autoMembers plug-in 2. Run automembers Mapping task with input/output ldif files :expected results: 1. Should success 2. Should success """ supplier = topo.ms['supplier1'] p = Paths('supplier1') testid = "autoMemTask_02" auto_mem_scope = "ou=TaskEmployees,{}".format(BASE_SUFF) user_rdn = "User_{}".format(testid) export_ldif = p.backup_dir+"/Out_Export_02.ldif" output_ldif3 = p.backup_dir+"/Output_03.ldif" for file in [export_ldif, output_ldif3]: if os.path.exists(file): os.remove(file) for i in range(10): add_user(topo, "{}{}".format(user_rdn, str(i)), auto_mem_scope, str(2788), str(2789), "Manager") ExportTask(supplier).export_suffix_to_ldif(ldiffile=export_ldif, suffix=BASE_SUFF) check_file_exists(export_ldif) map_task = Tasks(supplier) map_task.automemberMap(ldif_in=export_ldif, ldif_out=output_ldif3) check_file_exists(output_ldif3) ldif_check_groups("cn={}".format(user_rdn), "member", 10, output_ldif3) for file in [export_ldif, output_ldif3]: os.remove(file) def test_automemtask_re_build(topo, _create_all_entries, _startuptask, _fixture_for_build_task): """ :id: 4ff9b944-e7ff-11e8-ad35-8c16451d917b :setup: 4 Instances with replication :steps: 1. Add 10 users with inetOrgPerson object class 2. Run automembers re-build task to create the member attributes, exp to FAIL :expected results: 1. Should success 2. Should not success """ supplier = topo.ms['supplier1'] testid = "autoMemTask_04" auto_mem_scope = "ou=TaskEmployees,{}".format(BASE_SUFF) managers_grp = "cn=Managers,ou=userGroups,{}".format(BASE_SUFF) user_rdn = "User_{}".format(testid) # Disabling plugin AutoMembershipPlugin(supplier).disable() supplier.restart() for number in range(10): add_user(topo, f'{user_rdn}{number}', auto_mem_scope, str(number), str(number), "Manager") with pytest.raises(AssertionError): bulk_check_groups(supplier, managers_grp, "member", 10) # Enabling plugin AutoMembershipPlugin(supplier).enable() supplier.restart() AutomemberRebuildMembershipTask(supplier).create(properties={ 'basedn': auto_mem_scope, 'filter': "objectClass=inetOrgPerson" }) with pytest.raises(AssertionError): bulk_check_groups(supplier, managers_grp, "member", 10) def test_automemtask_export(topo, _create_all_entries, _startuptask, _fixture_for_build_task): """ :id: 4ff9cf74-e7ff-11e8-b712-8c16451d917b :setup: 4 Instances with replication :steps: 1. Add 10 users with inetOrgPerson objectClass 2. Run automembers export task to create an ldif file with member attributes, exp to FAIL :expected results: 1. Should success 2. Should not success """ supplier = topo.ms['supplier1'] p = Paths('supplier1') testid = "autoMemTask_05" auto_mem_scope = "ou=TaskEmployees,{}".format(BASE_SUFF) managers_grp = "cn=Managers,ou=userGroups,{}".format(BASE_SUFF) user_rdn = "User_{}".format(testid) # Disabling plugin AutoMembershipPlugin(supplier).disable() supplier.restart() for number in range(10): add_user(topo, f'{user_rdn}{number}', auto_mem_scope, str(number), str(number), "Manager") with pytest.raises(AssertionError): bulk_check_groups(supplier, managers_grp, "member", 10) # Enabling plugin AutoMembershipPlugin(supplier).enable() supplier.restart() export_ldif = p.backup_dir + "/Out_Export_02.ldif" if os.path.exists(export_ldif): os.remove(export_ldif) exp_task = Tasks(supplier) exp_task.automemberExport(suffix=auto_mem_scope, fstr='objectclass=inetOrgPerson', ldif_out=export_ldif) check_file_exists(export_ldif) with pytest.raises(AssertionError): ldif_check_groups("uid={}".format(user_rdn), "member", 10, export_ldif) os.remove(export_ldif) def test_automemtask_run_re_build(topo, _create_all_entries, _startuptask, _fixture_for_build_task): """ :id: 4ff9e5c2-e7ff-11e8-943e-8c16451d917b :setup: 4 Instances with replication :steps: 1. Add 10 users with inetOrgPerson obj class 2. Change plugin config 3. Enable plug-in and run re-build task to create the member attributes :expected results: 1. Should success 2. Should success 3. Should success """ supplier = topo.ms['supplier1'] p = Paths('supplier1') testid = "autoMemTask_06" auto_mem_scope = "ou=TaskEmployees,{}".format(BASE_SUFF) managers_grp = "cn=Managers,ou=userGroups,{}".format(BASE_SUFF) user_rdn = "User_{}".format(testid) # Disabling plugin AutoMembershipPlugin(supplier).disable() supplier.restart() for number in range(10): add_user(topo, f'{user_rdn}{number}', auto_mem_scope, '111', '111', "Manager") for user in nsAdminGroups(supplier, auto_mem_scope, rdn=None).list(): user.add('objectclass', 'inetOrgPerson') AutoMembershipDefinition(supplier, f'cn=userGroups,{PLUGIN_AUTO}').replace('autoMemberFilter', "objectclass=inetOrgPerson") supplier.restart() with pytest.raises(AssertionError): bulk_check_groups(supplier, managers_grp, "member", 10) AutoMembershipPlugin(supplier).enable() supplier.restart() AutomemberRebuildMembershipTask(supplier).create(properties={ 'basedn': auto_mem_scope, 'filter': "objectClass=inetOrgPerson"}) time.sleep(2) bulk_check_groups(supplier, managers_grp, "member", 10) AutoMembershipDefinition(supplier, f'cn=userGroups,{PLUGIN_AUTO}').replace('autoMemberFilter', "objectclass=posixAccount") supplier.restart() def test_automemtask_run_export(topo, _create_all_entries, _startuptask, _fixture_for_build_task): """ :id: 4ff9fba2-e7ff-11e8-a5ec-8c16451d917b :setup: 4 Instances with replication :steps: 1. Add 10 users with inetOrgPerson objectClass 2. change plugin config 3. Run export task to create an ldif file with member attributes :expected results: 1. Should success 2. Should success 3. Should success """ supplier = topo.ms['supplier1'] p = Paths('supplier1') testid = "autoMemTask_07" auto_mem_scope = "ou=TaskEmployees,{}".format(BASE_SUFF) managers_grp = "cn=Managers,ou=userGroups,{}".format(BASE_SUFF) user_rdn = "User_{}".format(testid) # Disabling plugin AutoMembershipPlugin(supplier).disable() supplier.restart() for number in range(10): add_user(topo, f'{user_rdn}{number}', auto_mem_scope, '222', '222', "Manager") for user in nsAdminGroups(supplier, auto_mem_scope, rdn=None).list(): user.add('objectclass', 'inetOrgPerson') AutoMembershipDefinition(supplier, f'cn=userGroups,{PLUGIN_AUTO}').replace('autoMemberFilter', "objectclass=inetOrgPerson") supplier.restart() # Enabling plugin AutoMembershipPlugin(supplier).enable() supplier.restart() with pytest.raises(AssertionError): bulk_check_groups(supplier, managers_grp, "member", 10) export_ldif = p.backup_dir + "/Out_Export_02.ldif" if os.path.exists(export_ldif): os.remove(export_ldif) exp_task = Tasks(supplier) exp_task.automemberExport(suffix=auto_mem_scope, fstr='objectclass=inetOrgPerson', ldif_out=export_ldif) check_file_exists(export_ldif) ldif_check_groups("cn={}".format(user_rdn), "member", 10, export_ldif) AutoMembershipDefinition(supplier, f'cn=userGroups,{PLUGIN_AUTO}').\ replace('autoMemberFilter', "objectclass=posixAccount") if __name__ == "__main__": CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s -v %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/automember_plugin/configuration_test.py000066400000000000000000000076401421664411400323550ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2021 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- import ldap import os import pytest from lib389.topologies import topology_st as topo from lib389.plugins import AutoMembershipPlugin, AutoMembershipDefinitions, MemberOfPlugin from lib389._constants import DEFAULT_SUFFIX pytestmark = pytest.mark.tier1 @pytest.mark.bz834056 def test_configuration(topo): """Automembership plugin and mixed in the plugin configuration :id: 45a5a8f8-e800-11e8-ab16-8c16451d917b :setup: Single Instance :steps: 1. Automembership plugin fails in a MMR setup, if data and config area mixed in the plugin configuration 2. Plugin configuration should throw proper error messages if not configured properly :expected results: 1. Should success 2. Should success """ # Configure pluginConfigArea for PLUGIN_AUTO AutoMembershipPlugin(topo.standalone).set("nsslapd-pluginConfigArea", 'cn=config') # Enable MemberOf plugin MemberOfPlugin(topo.standalone).enable() topo.standalone.restart() # Add invalid configuration, which mixes data and config area: All will fail automembers = AutoMembershipDefinitions(topo.standalone) with pytest.raises(ldap.UNWILLING_TO_PERFORM): automembers.create(properties={ 'cn': 'autouserGroups', 'autoMemberScope': f'ou=Employees,cn=config', 'autoMemberFilter': "objectclass=posixAccount", 'autoMemberDefaultGroup': [f'cn=SuffDef1,ou=autouserGroups,cn=config', f'cn=SuffDef2,ou=autouserGroups,cn=config'], 'autoMemberGroupingAttr': 'member:dn' }) # Search in error logs assert topo.standalone.ds_error_log.match('.*ERR - auto-membership-plugin - ' 'automember_parse_config_entry - The default group ' '"cn=SuffDef1,ou=autouserGroups,cn=config" ' 'can not be a child of the plugin config area "cn=config"') def test_invalid_regex(topo): """Test invalid regex is properly reportedin the error log :id: a6d89f84-ec76-4871-be96-411d051800b1 :setup: Standalone Instance :steps: 1. Setup automember 2. Add invalid regex 3. Error log reports useful message :expectedresults: 1. Success 2. Success 3. Success """ REGEX_DN = "cn=regex1,cn=testregex,cn=auto membership plugin,cn=plugins,cn=config" REGEX_VALUE = "cn=*invalid*" REGEX_ESC_VALUE = "cn=\\*invalid\\*" GROUP_DN = "cn=demo_group,ou=groups," + DEFAULT_SUFFIX AutoMembershipPlugin(topo.standalone).remove_all("nsslapd-pluginConfigArea") automemberplugin = AutoMembershipPlugin(topo.standalone) automember_prop = { 'cn': 'testRegex', 'autoMemberScope': 'ou=People,' + DEFAULT_SUFFIX, 'autoMemberFilter': 'objectclass=*', 'autoMemberDefaultGroup': GROUP_DN, 'autoMemberGroupingAttr': 'member:dn', } automember_defs = AutoMembershipDefinitions(topo.standalone, "cn=Auto Membership Plugin,cn=plugins,cn=config") automember_def = automember_defs.create(properties=automember_prop) automember_def.add_regex_rule("regex1", GROUP_DN, include_regex=[REGEX_VALUE]) automemberplugin.enable() topo.standalone.restart() # Check errors log for invalid message ERR_STR1 = "automember_parse_regex_rule - Unable to parse regex rule" ERR_STR2 = f"Skipping invalid inclusive regex rule in rule entry \"{REGEX_DN}\" \\(rule = \"{REGEX_ESC_VALUE}\"\\)" assert topo.standalone.searchErrorsLog(ERR_STR1) assert topo.standalone.searchErrorsLog(ERR_STR2) if __name__ == "__main__": CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s -v %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/backups/000077500000000000000000000000001421664411400240005ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/backups/__init__.py000066400000000000000000000001111421664411400261020ustar00rootroot00000000000000""" :Requirement: 389-ds-base: Directory Server Backup Operations """ 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/backups/backup_test.py000066400000000000000000000066351421664411400266700ustar00rootroot00000000000000import logging import pytest import os from datetime import datetime from lib389._constants import DEFAULT_SUFFIX, INSTALL_LATEST_CONFIG from lib389.properties import BACKEND_SAMPLE_ENTRIES, TASK_WAIT from lib389.topologies import topology_st as topo from lib389.backend import Backend from lib389.tasks import BackupTask, RestoreTask from lib389.config import BDB_LDBMConfig from lib389 import DSEldif from lib389.utils import ds_is_older import tempfile pytestmark = pytest.mark.tier1 DEBUGGING = os.getenv("DEBUGGING", default=False) if DEBUGGING: logging.getLogger(__name__).setLevel(logging.DEBUG) else: logging.getLogger(__name__).setLevel(logging.INFO) log = logging.getLogger(__name__) def test_missing_backend(topo): """Test that an error is returned when a restore is performed for a backend that is no longer present. :id: 889b8028-35cf-41d7-91f6-bc5193683646 :setup: Standalone Instance :steps: 1. Create a second backend 2. Perform a back up 3. Remove one of the backends from the config 4. Perform a restore :expectedresults: 1. Success 2. Success 3. Success 4. Failure """ # Create a new backend BE_NAME = 'backupRoot' BE_SUFFIX = 'dc=back,dc=up' props = { 'cn': BE_NAME, 'nsslapd-suffix': BE_SUFFIX, BACKEND_SAMPLE_ENTRIES: INSTALL_LATEST_CONFIG } be = Backend(topo.standalone) backend_entry = be.create(properties=props) # perform backup backup_dir_name = "backup-%s" % datetime.now().strftime("%Y_%m_%d_%H_%M_%S") archive = os.path.join(topo.standalone.ds_paths.backup_dir, backup_dir_name) backup_task = BackupTask(topo.standalone) task_properties = {'nsArchiveDir': archive} backup_task.create(properties=task_properties) backup_task.wait() assert backup_task.get_exit_code() == 0 # Remove new backend backend_entry.delete() # Restore the backup - it should fail restore_task = RestoreTask(topo.standalone) task_properties = {'nsArchiveDir': archive} restore_task.create(properties=task_properties) restore_task.wait() assert restore_task.get_exit_code() != 0 @pytest.mark.bz1851967 @pytest.mark.ds4112 @pytest.mark.skipif(ds_is_older('1.4.1'), reason="Not implemented") def test_db_home_dir_online_backup(topo): """Test that if the dbhome directory is set causing an online backup to fail, the dblayer_backup function should go to error processing section. :id: cfc495d6-2a58-4e4e-aa40-39a15c71f973 :setup: Standalone Instance :steps: 1. Change the dbhome to directory to eg-/tmp/test 2. Perform an online back-up 3. Check for the correct errors in the log :expectedresults: 1. Success 2. Failure 3. Success """ bdb_ldbmconfig = BDB_LDBMConfig(topo.standalone) dseldif = DSEldif(topo.standalone) topo.standalone.stop() with tempfile.TemporaryDirectory() as backup_dir: dseldif.replace(bdb_ldbmconfig.dn, 'nsslapd-db-home-directory', f'{backup_dir}') topo.standalone.start() topo.standalone.tasks.db2bak(backup_dir=f'{backup_dir}', args={TASK_WAIT: True}) assert topo.standalone.ds_error_log.match(f".*Failed renaming {backup_dir}.bak back to {backup_dir}") if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main(["-s", CURRENT_FILE]) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/basic/000077500000000000000000000000001421664411400234315ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/basic/__init__.py000066400000000000000000000001101421664411400255320ustar00rootroot00000000000000""" :Requirement: 389-ds-base: Basic Directory Server Operations """ 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/basic/basic_test.py000066400000000000000000001714251421664411400261350ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2020 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # from subprocess import check_output, PIPE, run from lib389 import DirSrv from lib389.idm.user import UserAccount, UserAccounts import pytest from lib389.tasks import * from lib389.utils import * from lib389.topologies import topology_st from lib389.dbgen import dbgen_users from lib389.idm.organizationalunit import OrganizationalUnits from lib389._constants import DN_DM, PASSWORD, PW_DM from lib389.paths import Paths from lib389.idm.directorymanager import DirectoryManager from lib389.config import LDBMConfig from lib389.dseldif import DSEldif from lib389.rootdse import RootDSE from ....conftest import get_rpm_version from lib389._mapped_object import DSLdapObjects pytestmark = pytest.mark.tier0 default_paths = Paths() log = logging.getLogger(__name__) # Globals USER1_DN = 'uid=user1,' + DEFAULT_SUFFIX USER2_DN = 'uid=user2,' + DEFAULT_SUFFIX USER3_DN = 'uid=user3,' + DEFAULT_SUFFIX USER4_DN = 'uid=user4,' + DEFAULT_SUFFIX ROOTDSE_DEF_ATTR_LIST = ('namingContexts', 'supportedLDAPVersion', 'supportedControl', 'supportedExtension', 'supportedSASLMechanisms', 'vendorName', 'vendorVersion') @pytest.fixture(scope="module") def import_example_ldif(topology_st): """Import the Example LDIF for the tests in this suite""" log.info('Initializing the "basic" test suite') ldif = '%s/dirsrv/data/Example.ldif' % topology_st.standalone.get_data_dir() import_ldif = topology_st.standalone.get_ldif_dir() + "/Example.ldif" shutil.copy(ldif, import_ldif) import_task = ImportTask(topology_st.standalone) import_task.import_suffix_from_ldif(ldiffile=import_ldif, suffix=DEFAULT_SUFFIX) import_task.wait() @pytest.fixture(params=ROOTDSE_DEF_ATTR_LIST) def rootdse_attr(topology_st, request): """Adds an attr from the list as the default attr to the rootDSE """ # Ensure the server is started and connected topology_st.standalone.start() RETURN_DEFAULT_OPATTR = "nsslapd-return-default-opattr" rootdse_attr_name = ensure_bytes(request.param) log.info(" Add the %s: %s to rootdse" % (RETURN_DEFAULT_OPATTR, rootdse_attr_name)) mod = [(ldap.MOD_ADD, RETURN_DEFAULT_OPATTR, rootdse_attr_name)] try: topology_st.standalone.modify_s("", mod) except ldap.LDAPError as e: log.fatal('Failed to add attr: error (%s)' % (e.args[0]['desc'])) assert False def fin(): log.info(" Delete the %s: %s from rootdse" % (RETURN_DEFAULT_OPATTR, rootdse_attr_name)) mod = [(ldap.MOD_DELETE, RETURN_DEFAULT_OPATTR, rootdse_attr_name)] try: topology_st.standalone.modify_s("", mod) except ldap.LDAPError as e: log.fatal('Failed to delete attr: error (%s)' % (e.args[0]['desc'])) assert False request.addfinalizer(fin) return rootdse_attr_name def change_conf_attr(topology_st, suffix, attr_name, attr_value): """Change configuration attribute in the given suffix. Returns previous attribute value. """ entry = DSLdapObject(topology_st.standalone, suffix) attr_value_bck = entry.get_attr_val_bytes(attr_name) log.info('Set %s to %s. Previous value - %s. Modified suffix - %s.' % ( attr_name, attr_value, attr_value_bck, suffix)) if attr_value is None: entry.remove_all(attr_name) else: entry.replace(attr_name, attr_value) return attr_value_bck def test_basic_ops(topology_st, import_example_ldif): """Tests adds, mods, modrdns, and deletes operations :id: 33f97f55-60bf-46c7-b880-6c488517ae19 :setup: Standalone instance :steps: 1. Add 3 test users USER1, USER2 and USER3 to database 2. Modify (ADD, REPLACE and DELETE) description for USER1 in database 3. Rename USER1, USER2 and USER3 using Modrds 4. Delete test entries USER1, USER2 and USER3 :expectedresults: 1. Add operation should PASS. 2. Modify operations should PASS. 3. Rename operations should PASS. 4. Delete operations should PASS. """ log.info('Running test_basic_ops...') USER1_NEWDN = 'cn=user1' USER2_NEWDN = 'cn=user2' USER3_NEWDN = 'cn=user3' NEW_SUPERIOR = 'ou=people,' + DEFAULT_SUFFIX USER1_RDN_DN = 'cn=user1,' + DEFAULT_SUFFIX USER2_RDN_DN = 'cn=user2,' + DEFAULT_SUFFIX USER3_RDN_DN = 'cn=user3,' + NEW_SUPERIOR # New superior test # # Adds# try: topology_st.standalone.add_s(Entry((USER1_DN, {'objectclass': "top extensibleObject".split(), 'sn': '1', 'cn': 'user1', 'uid': 'user1', 'userpassword': 'password'}))) except ldap.LDAPError as e: log.error('Failed to add test user' + USER1_DN + ': error ' + e.args[0]['desc']) assert False try: topology_st.standalone.add_s(Entry((USER2_DN, {'objectclass': "top extensibleObject".split(), 'sn': '2', 'cn': 'user2', 'uid': 'user2', 'userpassword': 'password'}))) except ldap.LDAPError as e: log.error('Failed to add test user' + USER2_DN + ': error ' + e.args[0]['desc']) assert False try: topology_st.standalone.add_s(Entry((USER3_DN, {'objectclass': "top extensibleObject".split(), 'sn': '3', 'cn': 'user3', 'uid': 'user3', 'userpassword': 'password'}))) except ldap.LDAPError as e: log.error('Failed to add test user' + USER3_DN + ': error ' + e.args[0]['desc']) assert False # # Mods # try: topology_st.standalone.modify_s(USER1_DN, [(ldap.MOD_ADD, 'description', b'New description')]) except ldap.LDAPError as e: log.error('Failed to add description: error ' + e.args[0]['desc']) assert False try: topology_st.standalone.modify_s(USER1_DN, [(ldap.MOD_REPLACE, 'description', b'Modified description')]) except ldap.LDAPError as e: log.error('Failed to modify description: error ' + e.args[0]['desc']) assert False try: topology_st.standalone.modify_s(USER1_DN, [(ldap.MOD_DELETE, 'description', None)]) except ldap.LDAPError as e: log.error('Failed to delete description: error ' + e.args[0]['desc']) assert False # # Modrdns # try: topology_st.standalone.rename_s(USER1_DN, USER1_NEWDN, delold=1) except ldap.LDAPError as e: log.error('Failed to modrdn user1: error ' + e.args[0]['desc']) assert False try: topology_st.standalone.rename_s(USER2_DN, USER2_NEWDN, delold=0) except ldap.LDAPError as e: log.error('Failed to modrdn user2: error ' + e.args[0]['desc']) assert False # Modrdn - New superior try: topology_st.standalone.rename_s(USER3_DN, USER3_NEWDN, newsuperior=NEW_SUPERIOR, delold=1) except ldap.LDAPError as e: log.error('Failed to modrdn(new superior) user3: error ' + e.args[0]['desc']) assert False # # Deletes # try: topology_st.standalone.delete_s(USER1_RDN_DN) except ldap.LDAPError as e: log.error('Failed to delete test entry1: ' + e.args[0]['desc']) assert False try: topology_st.standalone.delete_s(USER2_RDN_DN) except ldap.LDAPError as e: log.error('Failed to delete test entry2: ' + e.args[0]['desc']) assert False try: topology_st.standalone.delete_s(USER3_RDN_DN) except ldap.LDAPError as e: log.error('Failed to delete test entry3: ' + e.args[0]['desc']) assert False log.info('test_basic_ops: PASSED') def test_basic_import_export(topology_st, import_example_ldif): """Test online and offline LDIF import & export :id: 3ceeea11-9235-4e20-b80e-7203b2c6e149 :setup: Standalone instance :steps: 1. Generate a test ldif (50k entries) 2. Import test ldif file using Online import. 3. Import test ldif file using Offline import (ldif2db). 4. Export test ldif file using Online export. 5. Export test ldif file using Offline export (db2ldif). 6. Cleanup - Import the Example LDIF for the other tests in this suite :expectedresults: 1. Test ldif file creation should PASS. 2. Online import should PASS. 3. Offline import should PASS. 4. Online export should PASS. 5. Offline export should PASS. 6. Cleanup should PASS. """ log.info('Running test_basic_import_export...') # # Test online/offline LDIF imports # topology_st.standalone.start() # topology_st.standalone.config.set('nsslapd-errorlog-level', '1') # Generate a test ldif (50k entries) log.info("Generating LDIF...") ldif_dir = topology_st.standalone.get_ldif_dir() import_ldif = ldif_dir + '/basic_import.ldif' dbgen_users(topology_st.standalone, 50000, import_ldif, DEFAULT_SUFFIX) # Online log.info("Importing LDIF online...") import_task = ImportTask(topology_st.standalone) import_task.import_suffix_from_ldif(ldiffile=import_ldif, suffix=DEFAULT_SUFFIX) # Wait a bit till the task is created and available for searching time.sleep(0.5) # Good as place as any to quick test the task has some expected attributes if ds_is_newer('1.4.1.2'): assert import_task.present('nstaskcreated') assert import_task.present('nstasklog') assert import_task.present('nstaskcurrentitem') assert import_task.present('nstasktotalitems') assert import_task.present('ttl') import_task.wait() # Offline log.info("Importing LDIF offline...") topology_st.standalone.stop() if not topology_st.standalone.ldif2db(DEFAULT_BENAME, None, None, None, import_ldif): log.fatal('test_basic_import_export: Offline import failed') assert False topology_st.standalone.start() # # Test online and offline LDIF export # # Online export log.info("Exporting LDIF online...") export_ldif = ldif_dir + '/export.ldif' export_task = ExportTask(topology_st.standalone) export_task.export_suffix_to_ldif(ldiffile=export_ldif, suffix=DEFAULT_SUFFIX) export_task.wait() # Offline export log.info("Exporting LDIF offline...") topology_st.standalone.stop() if not topology_st.standalone.db2ldif(DEFAULT_BENAME, (DEFAULT_SUFFIX,), None, None, None, export_ldif): log.fatal('test_basic_import_export: Failed to run offline db2ldif') assert False topology_st.standalone.start() # # Cleanup - Import the Example LDIF for the other tests in this suite # log.info("Restore datrabase, import initial LDIF...") ldif = '%s/dirsrv/data/Example.ldif' % topology_st.standalone.get_data_dir() import_ldif = topology_st.standalone.get_ldif_dir() + "/Example.ldif" shutil.copyfile(ldif, import_ldif) import_task = ImportTask(topology_st.standalone) import_task.import_suffix_from_ldif(ldiffile=import_ldif, suffix=DEFAULT_SUFFIX) import_task.wait() log.info('test_basic_import_export: PASSED') def test_basic_backup(topology_st, import_example_ldif): """Tests online and offline backup and restore :id: 0e9d91f8-8748-40b6-ab03-fbd1998eb985 :setup: Standalone instance and import example.ldif :steps: 1. Test online backup using db2bak. 2. Test online restore using bak2db. 3. Test offline backup using db2bak. 4. Test offline restore using bak2db. :expectedresults: 1. Online backup should PASS. 2. Online restore should PASS. 3. Offline backup should PASS. 4. Offline restore should PASS. """ log.info('Running test_basic_backup...') backup_dir = topology_st.standalone.get_bak_dir() + '/backup_test' # Test online backup try: topology_st.standalone.tasks.db2bak(backup_dir=backup_dir, args={TASK_WAIT: True}) except ValueError: log.fatal('test_basic_backup: Online backup failed') assert False # Test online restore try: topology_st.standalone.tasks.bak2db(backup_dir=backup_dir, args={TASK_WAIT: True}) except ValueError: log.fatal('test_basic_backup: Online restore failed') assert False # Test offline backup topology_st.standalone.stop() if not topology_st.standalone.db2bak(backup_dir): log.fatal('test_basic_backup: Offline backup failed') assert False # Test offline restore if not topology_st.standalone.bak2db(backup_dir): log.fatal('test_basic_backup: Offline backup failed') assert False topology_st.standalone.start() log.info('test_basic_backup: PASSED') def test_basic_db2index(topology_st): """Assert db2index can operate correctly. :id: 191fc0fd-9722-46b5-a7c3-e8760effe119 :setup: Standalone instance :steps: 1: Call db2index with a single index attribute 2: Call db2index with multiple index attributes 3: Call db2index with no index attributes :expectedresults: 1: Index succeeds for single index attribute 2: Index succeeds for multiple index attributes 3: Index succeeds for all backend indexes which have been obtained from dseldif """ indexes = [] # Error log message to confirm a reindex info_message = 'INFO - bdb_db2index - ' + DEFAULT_BENAME + ':' + ' Indexing attribute: ' log.info('Start the server') topology_st.standalone.start() log.info('Offline reindex, stopping the server') topology_st.standalone.stop() log.info('Reindex with a single index attribute') topology_st.standalone.db2index(bename=DEFAULT_BENAME, attrs=['uid']) assert topology_st.standalone.searchErrorsLog(info_message + 'uid') log.info('Restart the server to clear the logs') topology_st.standalone.start() topology_st.standalone.stop() log.info('Reindex with multiple attributes') topology_st.standalone.db2index(bename=DEFAULT_BENAME, attrs=['cn','aci','givenname']) assert topology_st.standalone.searchErrorsLog(info_message + 'cn') assert topology_st.standalone.searchErrorsLog(info_message + 'aci') assert topology_st.standalone.searchErrorsLog(info_message + 'givenname') log.info('Restart the server to clear the logs') topology_st.standalone.start() topology_st.standalone.stop() log.info('Start the server and get all indexes for specified backend') topology_st.standalone.start() dse_ldif = DSEldif(topology_st.standalone) indexes = dse_ldif.get_indexes(DEFAULT_BENAME) numIndexes = len(indexes) assert numIndexes > 0 log.info('Stop the server and reindex with all backend indexes') topology_st.standalone.stop() topology_st.standalone.db2index(bename=DEFAULT_BENAME, attrs=indexes) log.info('Checking the server logs for %d backend indexes INFO' % numIndexes) for indexNum, index in enumerate(indexes): if index in "entryrdn": assert topology_st.standalone.searchErrorsLog( 'INFO - bdb_db2index - ' + DEFAULT_BENAME + ':' + ' Indexing ' + index) else: assert topology_st.standalone.searchErrorsLog( 'INFO - bdb_db2index - ' + DEFAULT_BENAME + ':' + ' Indexing attribute: ' + index) assert indexNum+1 == numIndexes topology_st.standalone.start() def test_basic_acl(topology_st, import_example_ldif): """Run some basic access control (ACL) tests :id: 4f4e705f-32f4-4065-b3a8-2b0c2525798b :setup: Standalone instance :steps: 1. Add two test users USER1_DN and USER2_DN. 2. Add an aci that denies USER1 from doing anything. 3. Set the default anonymous access for USER2. 4. Try searching entries using USER1. 5. Try searching entries using USER2. 6. Try searching entries using root dn. 7. Cleanup - delete test users and test ACI. :expectedresults: 1. Test Users should be added. 2. ACI should be added. 3. This operation should PASS. 4. USER1 should not be able to search anything. 5. USER2 should be able to search everything except password. 6. RootDN should be allowed to search everything. 7. Cleanup should PASS. """ """Run some basic access control(ACL) tests""" log.info('Running test_basic_acl...') DENY_ACI = ensure_bytes('(targetattr = "*")(version 3.0;acl "deny user";deny (all)(userdn = "ldap:///%s");)' % USER1_DN) # # Add two users # try: topology_st.standalone.add_s(Entry((USER1_DN, {'objectclass': "top extensibleObject".split(), 'sn': '1', 'cn': 'user 1', 'uid': 'user1', 'userpassword': PASSWORD}))) except ldap.LDAPError as e: log.fatal('test_basic_acl: Failed to add test user ' + USER1_DN + ': error ' + e.args[0]['desc']) assert False try: topology_st.standalone.add_s(Entry((USER2_DN, {'objectclass': "top extensibleObject".split(), 'sn': '2', 'cn': 'user 2', 'uid': 'user2', 'userpassword': PASSWORD}))) except ldap.LDAPError as e: log.fatal('test_basic_acl: Failed to add test user ' + USER1_DN + ': error ' + e.args[0]['desc']) assert False # # Add an aci that denies USER1 from doing anything, # and also set the default anonymous access # try: topology_st.standalone.modify_s(DEFAULT_SUFFIX, [(ldap.MOD_ADD, 'aci', DENY_ACI)]) except ldap.LDAPError as e: log.fatal('test_basic_acl: Failed to add DENY ACI: error ' + e.args[0]['desc']) assert False # # Make sure USER1_DN can not search anything, but USER2_dn can... # try: topology_st.standalone.simple_bind_s(USER1_DN, PASSWORD) except ldap.LDAPError as e: log.fatal('test_basic_acl: Failed to bind as user1, error: ' + e.args[0]['desc']) assert False try: entries = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(uid=*)') if entries: log.fatal('test_basic_acl: User1 was incorrectly able to search the suffix!') assert False except ldap.LDAPError as e: log.fatal('test_basic_acl: Search suffix failed(as user1): ' + e.args[0]['desc']) assert False # Now try user2... Also check that userpassword is stripped out try: topology_st.standalone.simple_bind_s(USER2_DN, PASSWORD) except ldap.LDAPError as e: log.fatal('test_basic_acl: Failed to bind as user2, error: ' + e.args[0]['desc']) assert False try: entries = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(uid=user1)') if not entries: log.fatal('test_basic_acl: User1 incorrectly not able to search the suffix') assert False if entries[0].hasAttr('userpassword'): # The default anonymous access aci should have stripped out userpassword log.fatal('test_basic_acl: User2 was incorrectly able to see userpassword') assert False except ldap.LDAPError as e: log.fatal('test_basic_acl: Search for user1 failed(as user2): ' + e.args[0]['desc']) assert False # Make sure RootDN can also search (this also resets the bind dn to the # Root DN for future operations) try: topology_st.standalone.simple_bind_s(DN_DM, PW_DM) except ldap.LDAPError as e: log.fatal('test_basic_acl: Failed to bind as ROotDN, error: ' + e.args[0]['desc']) assert False try: entries = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(uid=*)') if not entries: log.fatal('test_basic_acl: Root DN incorrectly not able to search the suffix') assert False except ldap.LDAPError as e: log.fatal('test_basic_acl: Search for user1 failed(as user2): ' + e.args[0]['desc']) assert False # # Cleanup # try: topology_st.standalone.modify_s(DEFAULT_SUFFIX, [(ldap.MOD_DELETE, 'aci', DENY_ACI)]) except ldap.LDAPError as e: log.fatal('test_basic_acl: Failed to delete DENY ACI: error ' + e.args[0]['desc']) assert False try: topology_st.standalone.delete_s(USER1_DN) except ldap.LDAPError as e: log.fatal('test_basic_acl: Failed to delete test entry1: ' + e.args[0]['desc']) assert False try: topology_st.standalone.delete_s(USER2_DN) except ldap.LDAPError as e: log.fatal('test_basic_acl: Failed to delete test entry2: ' + e.args[0]['desc']) assert False log.info('test_basic_acl: PASSED') def test_basic_searches(topology_st, import_example_ldif): """Tests basic search operations with filters. :id: 426a59ff-49b8-4a70-b377-0c0634a29b6f :setup: Standalone instance, add example.ldif to the database :steps: 1. Execute search command while using different filters. 2. Check number of entries returned by search filters. :expectedresults: 1. Search command should PASS. 2. Number of result entries returned should match number of the database entries according to the search filter. """ log.info('Running test_basic_searches...') filters = (('(uid=scarter)', 1), ('(uid=tmorris*)', 1), ('(uid=*hunt*)', 4), ('(uid=*cope)', 2), ('(mail=*)', 150), ('(roomnumber>=4000)', 35), ('(roomnumber<=4000)', 115), ('(&(roomnumber>=4000)(roomnumber<=4500))', 18), ('(!(l=sunnyvale))', 120), ('(&(uid=t*)(l=santa clara))', 7), ('(|(uid=k*)(uid=r*))', 18), ('(|(uid=t*)(l=sunnyvale))', 50), ('(&(!(uid=r*))(ou=people))', 139), ('(&(uid=m*)(l=sunnyvale)(ou=people)(mail=*example*)(roomNumber=*))', 3), ('(&(|(uid=m*)(l=santa clara))(roomNumber=22*))', 5), ('(&(|(uid=m*)(l=santa clara))(roomNumber=22*)(!(roomnumber=2254)))', 4),) for (search_filter, search_result) in filters: try: entries = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, search_filter) if len(entries) != search_result: log.fatal('test_basic_searches: An incorrect number of entries\ was returned from filter (%s): (%d) expected (%d)' % (search_filter, len(entries), search_result)) assert False except ldap.LDAPError as e: log.fatal('Search failed: ' + e.args[0]['desc']) assert False log.info('test_basic_searches: PASSED') @pytest.mark.parametrize('limit,resp', ((('200'), 'PASS'), (('50'), ldap.ADMINLIMIT_EXCEEDED))) def test_basic_search_lookthroughlimit(topology_st, limit, resp, import_example_ldif): """ Tests normal search with lookthroughlimit set high and low. :id: b5119970-6c9f-41b7-9649-de9233226fec :setup: Standalone instance, add example.ldif to the database, search filter (uid=*). :steps: 1. Import ldif user file. 2. Change lookthroughlimit to 200. 3. Bind to server as low priv user 4. Run search 1 with "high" lookthroughlimit. 5. Change lookthroughlimit to 50. 6. Run search 2 with "low" lookthroughlimit. 8. Delete user from DB. 9. Reset lookthroughlimit to original. :expectedresults: 1. First search should complete with no error. 2. Second search should return ldap.ADMINLIMIT_EXCEEDED error. """ log.info('Running test_basic_search_lookthroughlimit...') search_filter = "(uid=*)" ltl_orig = change_conf_attr(topology_st, 'cn=config,cn=ldbm database,cn=plugins,cn=config', 'nsslapd-lookthroughlimit', limit) try: users = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX, rdn=None) user = users.create_test_user() user.replace('userPassword', PASSWORD) except ldap.LDAPError as e: log.fatal('Failed to create test user: error ' + e.args[0]['desc']) assert False try: conn = UserAccount(topology_st.standalone, user.dn).bind(PASSWORD) except ldap.LDAPError as e: log.fatal('Failed to bind test user: error ' + e.args[0]['desc']) assert False try: if resp == ldap.ADMINLIMIT_EXCEEDED: with pytest.raises(ldap.ADMINLIMIT_EXCEEDED): searchid = conn.search(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, search_filter) rtype, rdata = conn.result(searchid) else: searchid = conn.search(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, search_filter) rtype, rdata = conn.result(searchid) assert(len(rdata) == 151) #151 entries in the imported ldif file using "(uid=*)" except ldap.LDAPError as e: log.fatal('Failed to perform search: error ' + e.args[0]['desc']) assert False finally: #Cleanup change_conf_attr(topology_st, 'cn=config,cn=ldbm database,cn=plugins,cn=config', 'nsslapd-lookthroughlimit', ltl_orig) user.delete() log.info('test_basic_search_lookthroughlimit: PASSED') @pytest.fixture(scope="module") def add_test_entry(topology_st, request): # Add test entry topology_st.standalone.add_s(Entry((USER4_DN, {'objectclass': "top extensibleObject".split(), 'cn': 'user1', 'uid': 'user1'}))) search_params = [(['1.1'], 'cn', False), (['1.1', 'cn'], 'cn', True), (['+'], 'nsUniqueId', True), (['*'], 'cn', True), (['cn'], 'cn', True)] @pytest.mark.skipif(ds_is_older("1.4.2.0"), reason="Not implemented") @pytest.mark.parametrize("attrs, attr, present", search_params) def test_search_req_attrs(topology_st, add_test_entry, attrs, attr, present): """Test requested attributes in search operations. :id: 426a59ff-49b8-4a70-b377-0c0634a29b6e :parametrized: yes :setup: Standalone instance :steps: 1. Test "1.1" does not return any attributes. 2. Test "1.1" is ignored if there are other requested attributes 3. Test "+" returns all operational attributes 4. Test "*" returns all attributes 5. Test requested attributes :expectedresults: 1. Success 2. Success 3. Success 4. Success 5. Success """ log.info("Testing attrs: {} attr: {} present: {}".format(attrs, attr, present)) entry = topology_st.standalone.search_s(USER4_DN, ldap.SCOPE_BASE, 'objectclass=top', attrs) if present: assert entry[0].hasAttr(attr) else: assert not entry[0].hasAttr(attr) def test_basic_referrals(topology_st, import_example_ldif): """Test LDAP server in referral mode. :id: c586aede-7ac3-4e8d-a1cf-bfa8b8d78cc2 :setup: Standalone instance :steps: 1. Set the referral and the backend state 2. Set backend state to referral mode. 3. Set server to not follow referral. 4. Search using referral. 5. Make sure server can restart in referral mode. 6. Cleanup - Delete referral. :expectedresults: 1. Set the referral, and the backend state should PASS. 2. Set backend state to referral mode should PASS. 3. Set server to not follow referral should PASS. 4. referral error(10) should occur. 5. Restart should PASS. 6. Cleanup should PASS. """ log.info('Running test_basic_referrals...') SUFFIX_CONFIG = 'cn="dc=example,dc=com",cn=mapping tree,cn=config' # # Set the referral, and the backend state # try: topology_st.standalone.modify_s(SUFFIX_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-referral', b'ldap://localhost.localdomain:389/o%3dnetscaperoot')]) except ldap.LDAPError as e: log.fatal('test_basic_referrals: Failed to set referral: error ' + e.args[0]['desc']) assert False try: topology_st.standalone.modify_s(SUFFIX_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-state', b'Referral')]) except ldap.LDAPError as e: log.fatal('test_basic_referrals: Failed to set backend state: error ' + e.args[0]['desc']) assert False # # Test that a referral error is returned # topology_st.standalone.set_option(ldap.OPT_REFERRALS, 0) # Do not follow referral try: topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, 'objectclass=top') except ldap.REFERRAL: pass except ldap.LDAPError as e: log.fatal('test_basic_referrals: Search failed: ' + e.args[0]['desc']) assert False # # Make sure server can restart in referral mode # topology_st.standalone.restart(timeout=10) # # Cleanup # try: topology_st.standalone.modify_s(SUFFIX_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-state', b'Backend')]) except ldap.LDAPError as e: log.fatal('test_basic_referrals: Failed to set backend state: error ' + e.args[0]['desc']) assert False try: topology_st.standalone.modify_s(SUFFIX_CONFIG, [(ldap.MOD_DELETE, 'nsslapd-referral', None)]) except ldap.LDAPError as e: log.fatal('test_basic_referrals: Failed to delete referral: error ' + e.args[0]['desc']) assert False topology_st.standalone.set_option(ldap.OPT_REFERRALS, 1) log.info('test_basic_referrals: PASSED') def test_basic_systemctl(topology_st, import_example_ldif): """Tests systemctl/lib389 can stop and start the server. :id: a92a7438-ecfa-4583-a89c-5fbfc0220b69 :setup: Standalone instance :steps: 1. Stop the server. 2. Start the server. 3. Stop the server, break the dse.ldif and dse.ldif.bak, so a start fails. 4. Verify that systemctl detects the failed start. 5. Fix the dse.ldif, and make sure the server starts up. 6. Verify systemctl correctly identifies the successful start. :expectedresults: 1. Server should be stopped. 2. Server should start 3. Stop should work but start after breaking dse.ldif should fail. 4. Systemctl should be able to detect the failed start. 5. Server should start. 6. Systemctl should be able to detect the successful start. """ log.info('Running test_basic_systemctl...') config_dir = topology_st.standalone.get_config_dir() # # Stop the server # log.info('Stopping the server...') topology_st.standalone.stop() log.info('Stopped the server.') # # Start the server # log.info('Starting the server...') topology_st.standalone.start() log.info('Started the server.') # # Stop the server, break the dse.ldif so a start fails, # and verify that systemctl detects the failed start # log.info('Stopping the server...') topology_st.standalone.stop() log.info('Stopped the server before breaking the dse.ldif.') shutil.copy(config_dir + '/dse.ldif', config_dir + '/dse.ldif.correct') open(config_dir + '/dse.ldif', 'w').close() # We need to kill the .bak file too, DS is just too smart! open(config_dir + '/dse.ldif.bak', 'w').close() log.info('Attempting to start the server with broken dse.ldif...') try: topology_st.standalone.start() except Exception as e: log.info('Server failed to start as expected: ' + str(e)) log.info('Check the status...') assert (not topology_st.standalone.status()) log.info('Server failed to start as expected') time.sleep(5) # # Fix the dse.ldif, and make sure the server starts up, # and systemctl correctly identifies the successful start # shutil.copy(config_dir + '/dse.ldif.correct', config_dir + '/dse.ldif') log.info('Starting the server with good dse.ldif...') topology_st.standalone.start() log.info('Check the status...') assert (topology_st.standalone.status()) log.info('Server started after fixing dse.ldif.') log.info('test_basic_systemctl: PASSED') def test_basic_ldapagent(topology_st, import_example_ldif): """Tests that the ldap agent starts :id: da1d1846-8fc4-4b8c-8e53-4c9c16eff1ba :setup: Standalone instance :steps: 1. Start SNMP ldap agent using command. 2. Cleanup - Kill SNMP agent process. :expectedresults: 1. SNMP agent should start. 2. SNMP agent process should be successfully killed. """ log.info('Running test_basic_ldapagent...') var_dir = topology_st.standalone.get_local_state_dir() config_file = os.path.join(topology_st.standalone.get_sysconf_dir(), 'dirsrv/config/agent.conf') agent_config_file = open(config_file, 'w') agent_config_file.write('agentx-supplier ' + var_dir + '/agentx/supplier\n') agent_config_file.write('agent-logdir ' + var_dir + '/log/dirsrv\n') agent_config_file.write('server slapd-' + topology_st.standalone.serverid + '\n') agent_config_file.close() # Remember, this is *forking* check_output([os.path.join(topology_st.standalone.get_sbin_dir(), 'ldap-agent'), config_file]) # First kill any previous agents .... run_dir = topology_st.standalone.get_run_dir() pidpath = os.path.join(run_dir, 'ldap-agent.pid') pid = None with open(pidpath, 'r') as pf: pid = pf.readlines()[0].strip() if pid: log.debug('test_basic_ldapagent: Terminating agent %s', pid) check_output(['kill', pid]) log.info('test_basic_ldapagent: PASSED') @pytest.mark.skipif(not get_user_is_ds_owner(), reason="process ownership permission is required") def test_basic_dse_survives_kill9(topology_st, import_example_ldif): """Tests that the dse.ldif is not wiped out after the process is killed (bug 910581) :id: 10f141da-9b22-443a-885c-87271dcd7a59 :setup: Standalone instance :steps: 1. Check out pid of ns-slapd process and Kill ns-slapd process. 2. Check the contents of dse.ldif file. 3. Start server. :expectedresults: 1. ns-slapd process should be killed. 2. dse.ldif should not be corrupted. 3. Server should start successfully. """ log.info('Running test_basic_dse...') dse_file = topology_st.standalone.confdir + '/dse.ldif' pid = check_output(['pidof', '-s', 'ns-slapd']).strip() # We can't guarantee we have access to sudo in any environment ... Either # run py.test with sudo, or as the same user as the dirsrv. check_output(['kill', '-9', ensure_str(pid)]) if os.path.getsize(dse_file) == 0: log.fatal('test_basic_dse: dse.ldif\'s content was incorrectly removed!') assert False topology_st.standalone.start(timeout=60) log.info('dse.ldif was not corrupted, and the server was restarted') log.info('test_basic_dse: PASSED') # Give the server time to startup, in some conditions this can be racey without systemd notification. Only affects this one test though... time.sleep(10) @pytest.mark.parametrize("rootdse_attr_name", ROOTDSE_DEF_ATTR_LIST) def test_def_rootdse_attr(topology_st, import_example_ldif, rootdse_attr_name): """Tests that operational attributes are not returned by default in rootDSE searches :id: 4fee33cc-4019-4c27-89e8-998e6c770dc0 :parametrized: yes :setup: Standalone instance :steps: 1. Make an ldapsearch for rootdse attribute 2. Check the returned entries. :expectedresults: 1. Search should not fail 2. Operational attributes should not be returned. """ topology_st.standalone.start() log.info(" Assert rootdse search hasn't %s attr" % rootdse_attr_name) try: entry = topology_st.standalone.search_s("", ldap.SCOPE_BASE)[0] assert not entry.hasAttr(rootdse_attr_name) except ldap.LDAPError as e: log.fatal('Search failed, error: ' + e.args[0]['desc']) assert False def test_mod_def_rootdse_attr(topology_st, import_example_ldif, rootdse_attr): """Tests that operational attributes are returned by default in rootDSE searches after config modification :id: c7831e04-f458-4e23-83c7-b6f66109f639 :parametrized: yes :setup: Standalone instance and we are using rootdse_attr fixture which adds nsslapd-return-default-opattr attr with value of one operation attribute. :steps: 1. Make an ldapsearch for rootdse attribute 2. Check the returned entries. :expectedresults: 1. Search should not fail 2. Operational attributes should be returned after the config modification """ log.info(" Assert rootdse search has %s attr" % rootdse_attr) try: entry = topology_st.standalone.search_s("", ldap.SCOPE_BASE)[0] assert entry.hasAttr(rootdse_attr) except ldap.LDAPError as e: log.fatal('Search failed, error: ' + e.args[0]['desc']) assert False @pytest.fixture(scope="module") def create_users(topology_st): """Add users to the default suffix """ users = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX) user_names = ["Directory", "Server", "389", "lib389", "pytest"] log.info('Adding 5 test users') for name in user_names: users.create(properties={ 'uid': name, 'sn': name, 'cn': name, 'uidNumber': '1000', 'gidNumber': '1000', 'homeDirectory': '/home/%s' % name, 'mail': '%s@example.com' % name, 'userpassword': 'pass%s' % name, }) def test_basic_anonymous_search(topology_st, create_users): """Tests basic anonymous search operations :id: c7831e04-f458-4e50-83c7-b6f77109f639 :setup: Standalone instance Add 5 test users with different user names :steps: 1. Execute anonymous search with different filters :expectedresults: 1. Search should be successful """ filters = ["uid=Directory", "(|(uid=S*)(uid=3*))", "(&(uid=l*)(mail=l*))", "(&(!(uid=D*))(ou=People))"] log.info("Execute anonymous search with different filters") for filtr in filters: entries = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, filtr) assert len(entries) != 0 @pytest.mark.ds604 @pytest.mark.bz915801 def test_search_original_type(topology_st, create_users): """Test ldapsearch returning original attributes using nsslapd-search-return-original-type-switch :id: d7831d04-f558-4e50-93c7-b6f77109f640 :setup: Standalone instance Add some test entries :steps: 1. Set nsslapd-search-return-original-type-switch to ON 2. Check that ldapsearch *does* return unknown attributes 3. Turn off nsslapd-search-return-original-type-switch 4. Check that ldapsearch doesn't return any unknown attributes :expectedresults: 1. nsslapd-search-return-original-type-switch should be set to ON 2. ldapsearch should return unknown attributes 3. nsslapd-search-return-original-type-switch should be OFF 4. ldapsearch should not return any unknown attributes """ log.info("Set nsslapd-search-return-original-type-switch to ON") topology_st.standalone.config.set('nsslapd-search-return-original-type-switch', 'on') log.info("Check that ldapsearch *does* return unknown attributes") entries = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, 'uid=Directory', ['objectclass overflow', 'unknown']) assert "objectclass overflow" in entries[0].getAttrs() log.info("Set nsslapd-search-return-original-type-switch to Off") topology_st.standalone.config.set('nsslapd-search-return-original-type-switch', 'off') log.info("Check that ldapsearch *does not* return unknown attributes") entries = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, 'uid=Directory', ['objectclass overflow', 'unknown']) assert "objectclass overflow" not in entries[0].getAttrs() @pytest.mark.bz192901 def test_search_ou(topology_st): """Test that DS should not return an entry that does not match the filter :id: d7831d05-f117-4e89-93c7-b6f77109f640 :setup: Standalone instance :steps: 1. Create an OU entry without sub entries 2. Search from the OU with the filter that does not match the OU :expectedresults: 1. Creation of OU should be successful 2. Search should not return any results """ log.info("Create a test OU without sub entries") ou = OrganizationalUnits(topology_st.standalone, DEFAULT_SUFFIX) ou.create(properties={ 'ou': 'test_ou', }) search_base = ("ou=test_ou,%s" % DEFAULT_SUFFIX) log.info("Search from the OU with the filter that does not match the OU, it should not return anything") entries = topology_st.standalone.search_s(search_base, ldap.SCOPE_SUBTREE, 'uid=*', ['dn']) assert len(entries) == 0 def test_bind_invalid_entry(topology_st): """Test the failing bind does not return information about the entry :id: 5cd9b083-eea6-426b-84ca-83c26fc49a6f :customerscenario: True :setup: Standalone instance :steps: 1: bind as non existing entry 2: check that bind info does not report 'No such entry' :expectedresults: 1: pass 2: pass """ topology_st.standalone.restart() INVALID_ENTRY="cn=foooo,%s" % DEFAULT_SUFFIX try: topology_st.standalone.simple_bind_s(INVALID_ENTRY, PASSWORD) except ldap.LDAPError as e: log.info('test_bind_invalid_entry: Failed to bind as %s (expected)' % INVALID_ENTRY) log.info('exception description: ' + e.args[0]['desc']) if 'info' in e.args[0]: log.info('exception info: ' + e.args[0]['info']) assert e.args[0]['desc'] == 'Invalid credentials' assert 'info' not in e.args[0] pass log.info('test_bind_invalid_entry: PASSED') # reset credentials topology_st.standalone.simple_bind_s(DN_DM, PW_DM) def test_bind_entry_missing_passwd(topology_st): """ :id: af209149-8fb8-48cb-93ea-3e82dd7119d2 :setup: Standalone Instance :steps: 1. Bind as database entry that does not have userpassword set 2. Bind as database entry that does not exist 1. Bind as cn=config entry that does not have userpassword set 2. Bind as cn=config entry that does not exist :expectedresults: 1. Fails with error 49 2. Fails with error 49 3. Fails with error 49 4. Fails with error 49 """ user = UserAccount(topology_st.standalone, DEFAULT_SUFFIX) with pytest.raises(ldap.INVALID_CREDENTIALS): # Bind as the suffix root entry which does not have a userpassword user.bind("some_password") user = UserAccount(topology_st.standalone, "cn=not here," + DEFAULT_SUFFIX) with pytest.raises(ldap.INVALID_CREDENTIALS): # Bind as the entry which does not exist user.bind("some_password") # Test cn=config since it has its own code path user = UserAccount(topology_st.standalone, "cn=config") with pytest.raises(ldap.INVALID_CREDENTIALS): # Bind as the config entry which does not have a userpassword user.bind("some_password") user = UserAccount(topology_st.standalone, "cn=does not exist,cn=config") with pytest.raises(ldap.INVALID_CREDENTIALS): # Bind as an entry under cn=config that does not exist user.bind("some_password") @pytest.mark.bz1044135 @pytest.mark.ds47319 def test_connection_buffer_size(topology_st): """Test connection buffer size adjustable with different values(valid values and invalid) :id: e7831d05-f117-4ec9-1203-b6f77109f117 :setup: Standalone instance :steps: 1. Set nsslapd-connection-buffer to some valid values (2, 0 , 1) 2. Set nsslapd-connection-buffer to some invalid values (-1, a) :expectedresults: 1. This should pass 2. This should fail """ valid_values = ['2', '0', '1'] for value in valid_values: topology_st.standalone.config.replace('nsslapd-connection-buffer', value) invalid_values = ['-1', 'a'] for value in invalid_values: with pytest.raises(ldap.OPERATIONS_ERROR): topology_st.standalone.config.replace('nsslapd-connection-buffer', value) @pytest.mark.bz1637439 def test_critical_msg_on_empty_range_idl(topology_st): """Doing a range index lookup should not report a critical message even if IDL is empty :id: a07a2222-0551-44a6-b113-401d23799364 :setup: Standalone instance :steps: 1. Create an index for internationalISDNNumber. (attribute chosen because it is unlikely that previous tests used it) 2. telephoneNumber being indexed by default create 20 users without telephoneNumber 3. add a telephoneNumber value and delete it to trigger an empty index database 4. Do a search that triggers a range lookup on empty telephoneNumber 5. Check that the critical message is not logged in error logs :expectedresults: 1. This should pass 2. This should pass 3. This should pass 4. This should pass on normal build but could abort a debug build 5. This should pass """ indexedAttr = 'internationalISDNNumber' # Step 1 from lib389.index import Indexes indexes = Indexes(topology_st.standalone) indexes.create(properties={ 'cn': indexedAttr, 'nsSystemIndex': 'false', 'nsIndexType': 'eq' }) topology_st.standalone.restart() # Step 2 users = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX) log.info('Adding 20 users without "%s"' % indexedAttr) for i in range(20): name = 'user_%d' % i last_user = users.create(properties={ 'uid': name, 'sn': name, 'cn': name, 'uidNumber': '1000', 'gidNumber': '1000', 'homeDirectory': '/home/%s' % name, 'mail': '%s@example.com' % name, 'userpassword': 'pass%s' % name, }) # Step 3 # required update to create the indexAttr (i.e. 'loginShell') database, and then make it empty topology_st.standalone.modify_s(last_user.dn, [(ldap.MOD_ADD, indexedAttr, b'1234')]) ent = topology_st.standalone.getEntry(last_user.dn, ldap.SCOPE_BASE,) assert ent assert ent.hasAttr(indexedAttr) topology_st.standalone.modify_s(last_user.dn, [(ldap.MOD_DELETE, indexedAttr, None)]) ent = topology_st.standalone.getEntry(last_user.dn, ldap.SCOPE_BASE,) assert ent assert not ent.hasAttr(indexedAttr) # Step 4 # The first component being not indexed the range on second is evaluated try: ents = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(&(sudoNotAfter=*)(%s>=111))' % indexedAttr) assert len(ents) == 0 except ldap.SERVER_DOWN: log.error('Likely testing against a debug version that asserted') pass # Step 5 assert not topology_st.standalone.searchErrorsLog('CRIT - list_candidates - NULL idl was recieved from filter_candidates_ext.') @pytest.mark.bz1870624 @pytest.mark.ds4379 @pytest.mark.parametrize("case,value", [('positive', ['cn','','']), ("positive", ['cn', '', '', '', '', '', '', '', '', '', '']), ("negative", ['cn', '', '', '', '', '', '', '', '', '', '', ''])]) def test_attr_description_limit(topology_st, case, value): """Test that up to 10 empty attributeDescription is allowed :id: 5afd3dcd-1028-428d-822d-a489ecf4b67e :customerscenario: True :parametrized: yes :setup: Standalone instance :steps: 1. Check that 2 empty values are allowed 2. Check that 10 empty values are allowed 3. Check that more than 10 empty values are allowed :expectedresults: 1. Should succeed 2. Should succeed 3. Should fail """ if case == 'positive': DSLdapObjects(topology_st.standalone, basedn='').filter("(objectclass=*)", attrlist=value, scope=0) else: with pytest.raises(ldap.PROTOCOL_ERROR): DSLdapObjects(topology_st.standalone, basedn='').filter("(objectclass=*)", attrlist=value, scope=0) @pytest.mark.bz1647099 @pytest.mark.ds50026 def test_ldbm_modification_audit_log(topology_st): """When updating LDBM config attributes, those attributes/values are not listed in the audit log :id: 5bf75c47-a283-430e-a65c-3c5fd8dbadb8 :setup: Standalone Instance :steps: 1. Bind as DM 2. Enable audit log 3. Update a set of config attrs in LDBM config 4. Restart the server 5. Check that config attrs are listed in the audit log :expectedresults: 1. Operation successful 2. Operation successful 3. Operation successful 4. Operation successful 5. Audit log should contain modification of attrs" """ VALUE = '10001' d_manager = DirectoryManager(topology_st.standalone) conn = d_manager.bind() config_ldbm = LDBMConfig(conn) log.info("Enable audit logging") conn.config.enable_log('audit') attrs = ['nsslapd-lookthroughlimit', 'nsslapd-pagedidlistscanlimit', 'nsslapd-idlistscanlimit', 'nsslapd-db-locks'] for attr in attrs: log.info("Set attribute %s to value %s" % (attr, VALUE)) config_ldbm.set(attr, VALUE) log.info('Restart the server to flush the logs') conn.restart() for attr in attrs: log.info("Check if attribute %s is replaced in the audit log" % attr) assert conn.searchAuditLog('replace: %s' % attr) assert conn.searchAuditLog('%s: %s' % (attr, VALUE)) @pytest.mark.skipif(not get_user_is_root() or ds_is_older('1.4.0.0'), reason="This test is only required if perl is enabled, and requires root.") def test_dscreate(request): """Test that dscreate works, we need this for now until setup-ds.pl is fully discontinued. :id: 5bf75c47-a283-430e-a65c-3c5fd8dbadb9 :setup: None :steps: 1. Create template file for dscreate 2. Create instance using template file :expectedresults: 1. Should succeeds 2. Should succeeds """ template_file = "/tmp/dssetup.inf" template_text = """[general] config_version = 2 # This invalid hostname ... full_machine_name = localhost.localdomain # Means we absolutely require this. strict_host_checking = False # In tests, we can be run in containers, NEVER trust # that systemd is there, or functional in any capacity systemd = False [slapd] instance_name = test_dscreate root_dn = cn=directory manager root_password = someLongPassword_123 # We do not have access to high ports in containers, # so default to something higher. port = 38999 secure_port = 63699 [backend-userroot] suffix = dc=example,dc=com sample_entries = yes """ with open(template_file, "w") as template_fd: template_fd.write(template_text) # Unset PYTHONPATH to avoid mixing old CLI tools and new lib389 tmp_env = os.environ if "PYTHONPATH" in tmp_env: del tmp_env["PYTHONPATH"] try: subprocess.check_call([ 'dscreate', 'from-file', template_file ], env=tmp_env) except subprocess.CalledProcessError as e: log.fatal("dscreate failed! Error ({}) {}".format(e.returncode, e.output)) assert False def fin(): os.remove(template_file) try: subprocess.check_call(['dsctl', 'test_dscreate', 'remove', '--do-it']) except subprocess.CalledProcessError as e: log.fatal("Failed to remove test instance Error ({}) {}".format(e.returncode, e.output)) request.addfinalizer(fin) @pytest.fixture(scope="function") def dscreate_long_instance(request): template_file = "/tmp/dssetup.inf" longname_serverid = "test-longname-deadbeef-deadbeef-deadbeef-deadbeef-deadbeef" template_text = """[general] config_version = 2 # This invalid hostname ... full_machine_name = localhost.localdomain # Means we absolutely require this. strict_host_checking = False # In tests, we can be run in containers, NEVER trust # that systemd is there, or functional in any capacity systemd = False [slapd] instance_name = %s root_dn = cn=directory manager root_password = someLongPassword_123 # We do not have access to high ports in containers, # so default to something higher. port = 38999 secure_port = 63699 [backend-userroot] suffix = dc=example,dc=com sample_entries = yes """ % longname_serverid with open(template_file, "w") as template_fd: template_fd.write(template_text) # Unset PYTHONPATH to avoid mixing old CLI tools and new lib389 tmp_env = os.environ if "PYTHONPATH" in tmp_env: del tmp_env["PYTHONPATH"] try: subprocess.check_call([ 'dscreate', 'from-file', template_file ], env=tmp_env) except subprocess.CalledProcessError as e: log.fatal("dscreate failed! Error ({}) {}".format(e.returncode, e.output)) assert False inst = DirSrv(verbose=True, external_log=log) dse_ldif = DSEldif(inst, serverid=longname_serverid) socket_path = dse_ldif.get("cn=config", "nsslapd-ldapifilepath") inst.local_simple_allocate( serverid=longname_serverid, ldapuri=f"ldapi://{socket_path[0].replace('/', '%2f')}", password="someLongPassword_123" ) inst.ldapi_enabled = 'on' inst.ldapi_socket = socket_path inst.ldapi_autobind = 'off' try: inst.open() except: log.fatal("Failed to connect via ldapi to %s instance" % longname_serverid) os.remove(template_file) try: subprocess.check_call(['dsctl', longname_serverid, 'remove', '--do-it']) except subprocess.CalledProcessError as e: log.fatal("Failed to remove test instance Error ({}) {}".format(e.returncode, e.output)) def fin(): os.remove(template_file) try: subprocess.check_call(['dsctl', longname_serverid, 'remove', '--do-it']) except subprocess.CalledProcessError as e: log.fatal("Failed to remove test instance Error ({}) {}".format(e.returncode, e.output)) request.addfinalizer(fin) return inst @pytest.mark.skipif(not get_user_is_root() or ds_is_older('1.4.2.0'), reason="This test is only required with new admin cli, and requires root.") @pytest.mark.bz1748016 @pytest.mark.ds50581 def test_dscreate_ldapi(dscreate_long_instance): """Test that an instance with a long name can handle ldapi connection using a long socket name :id: 5d72d955-aff8-4741-8c9a-32c1c707cf1f :setup: None :steps: 1. Ccreate an instance with a long serverId name, that open a ldapi connection 2. Connect with ldapi, that hit 50581 and crash the instance :expectedresults: 1. Should succeeds 2. Should succeeds """ root_dse = RootDSE(dscreate_long_instance) log.info(root_dse.get_supported_ctrls()) @pytest.mark.skipif(not get_user_is_root() or ds_is_older('1.4.2.0'), reason="This test is only required with new admin cli, and requires root.") @pytest.mark.bz1715406 @pytest.mark.ds50923 def test_dscreate_multiple_dashes_name(dscreate_long_instance): """Test that an instance with a multiple dashes in the name can be removed with dsctl --remove-all :id: 265c3ac7-5ba6-4278-b8f4-4e7692afd1a5 :setup: An instance with a few dashes in its name :steps: 1. Run 'dsctl --remove-all' command 2. Check if the instance exists :expectedresults: 1. Should succeeds 2. Instance doesn't exists """ p = run(['dsctl', '--remove-all'], stdout=PIPE, input='Yes\n', encoding='ascii') assert not dscreate_long_instance.exists() @pytest.fixture(scope="module", params=('c=uk', 'cn=test_user', 'dc=example,dc=com', 'o=south', 'ou=sales', 'wrong=some_value')) def dscreate_test_rdn_value(request): template_file = "/tmp/dssetup.inf" template_text = f"""[general] config_version = 2 # This invalid hostname ... full_machine_name = localhost.localdomain # Means we absolutely require this. strict_host_checking = False # In tests, we can be run in containers, NEVER trust # that systemd is there, or functional in any capacity systemd = False [slapd] instance_name = test_different_rdn root_dn = cn=directory manager root_password = someLongPassword_123 # We do not have access to high ports in containers, # so default to something higher. port = 38999 secure_port = 63699 [backend-userroot] create_suffix_entry = True suffix = {request.param} """ with open(template_file, "w") as template_fd: template_fd.write(template_text) # Unset PYTHONPATH to avoid mixing old CLI tools and new lib389 tmp_env = os.environ if "PYTHONPATH" in tmp_env: del tmp_env["PYTHONPATH"] def fin(): os.remove(template_file) if request.param != "wrong=some_value": try: subprocess.check_call(['dsctl', 'test_different_rdn', 'remove', '--do-it']) except subprocess.CalledProcessError as e: log.fatal(f"Failed to remove test instance Error ({e.returncode}) {e.output}") else: log.info("Wrong RDN is passed, instance not created") request.addfinalizer(fin) return template_file, tmp_env, request.param, @pytest.mark.skipif(not get_user_is_root() or ds_is_older('1.4.0.0'), reason="This test is only required with new admin cli, and requires root.") @pytest.mark.bz1807419 @pytest.mark.ds50928 def test_dscreate_with_different_rdn(dscreate_test_rdn_value): """Test that dscreate works with different RDN attributes as suffix :id: 77ed6300-6a2f-4e79-a862-1f1105f1e3ef :customerscenario: True :parametrized: yes :setup: None :steps: 1. Create template file for dscreate with different RDN attributes as suffix 2. Create instance using template file 3. Create instance with 'wrong=some_value' as suffix's RDN attribute :expectedresults: 1. Should succeeds 2. Should succeeds 3. Should fail """ try: subprocess.check_call([ 'dscreate', 'from-file', dscreate_test_rdn_value[0] ], env=dscreate_test_rdn_value[1]) except subprocess.CalledProcessError as e: log.fatal(f"dscreate failed! Error ({e.returncode}) {e.output}") if dscreate_test_rdn_value[2] != "wrong=some_value": assert False else: assert True if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/betxns/000077500000000000000000000000001421664411400236535ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/betxns/__init__.py000066400000000000000000000000631421664411400257630ustar00rootroot00000000000000""" :Requirement: 389-ds-base: betxn Plugin """ 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/betxns/betxn_test.py000066400000000000000000000314411421664411400264070ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2016 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import pytest import ldap from lib389.tasks import * from lib389.utils import * from lib389.topologies import topology_st from lib389.plugins import (SevenBitCheckPlugin, AttributeUniquenessPlugin, MemberOfPlugin, ManagedEntriesPlugin, ReferentialIntegrityPlugin, MEPTemplates, MEPConfigs) from lib389.idm.user import UserAccounts, TEST_USER_PROPERTIES from lib389.idm.organizationalunit import OrganizationalUnits from lib389.idm.group import Groups, Group from lib389.idm.domain import Domain from lib389._constants import DEFAULT_SUFFIX pytestmark = pytest.mark.tier1 logging.getLogger(__name__).setLevel(logging.DEBUG) log = logging.getLogger(__name__) USER_PASSWORD = 'password' def test_betxt_7bit(topology_st): """Test that the 7-bit plugin correctly rejects an invalid update :id: 9e2ab27b-eda9-4cd9-9968-a1a8513210fd :setup: Standalone instance and enabled dynamic plugins :steps: 1. Enable PLUGIN_7_BIT_CHECK to "ON" 2. Add test user 3. Try to Modify test user's RDN to have 8 bit RDN 4. Execute search operation for new 8 bit RDN 5. Remove the test user for cleanup :expectedresults: 1. PLUGIN_7_BIT_CHECK should be ON 2. Test users should be added 3. Modify RDN for test user should FAIL 4. Search operation should FAIL 5. Test user should be removed """ log.info('Running test_betxt_7bit...') BAD_RDN = u'uid=Fu\u00c4\u00e8' sevenbc = SevenBitCheckPlugin(topology_st.standalone) sevenbc.enable() topology_st.standalone.restart() users = UserAccounts(topology_st.standalone, basedn=DEFAULT_SUFFIX) user = users.create(properties=TEST_USER_PROPERTIES) # Attempt a modrdn, this should fail with pytest.raises(ldap.LDAPError): user.rename(BAD_RDN) # Make sure the operation did not succeed, attempt to search for the new RDN with pytest.raises(ldap.LDAPError): users.get(u'Fu\u00c4\u00e8') # Make sure original entry is present user_check = users.get("testuser") assert user_check.dn.lower() == user.dn.lower() # Cleanup - remove the user user.delete() log.info('test_betxt_7bit: PASSED') def test_betxn_attr_uniqueness(topology_st): """Test that we can not add two entries that have the same attr value that is defined by the plugin :id: 42aeb41c-fbb5-4bc6-a97b-56274034d29f :setup: Standalone instance and enabled dynamic plugins :steps: 1. Enable PLUGIN_ATTR_UNIQUENESS plugin as "ON" 2. Add a test user 3. Add another test user having duplicate uid as previous one 4. Cleanup - disable PLUGIN_ATTR_UNIQUENESS plugin as "OFF" 5. Cleanup - remove test user entry :expectedresults: 1. PLUGIN_ATTR_UNIQUENESS plugin should be ON 2. Test user should be added 3. Add operation should FAIL 4. PLUGIN_ATTR_UNIQUENESS plugin should be "OFF" 5. Test user entry should be removed """ attruniq = AttributeUniquenessPlugin(topology_st.standalone, dn="cn=attruniq,cn=plugins,cn=config") attruniq.create(properties={'cn': 'attruniq'}) attruniq.add_unique_attribute('uid') attruniq.add_unique_subtree(DEFAULT_SUFFIX) attruniq.enable_all_subtrees() attruniq.enable() topology_st.standalone.restart() users = UserAccounts(topology_st.standalone, basedn=DEFAULT_SUFFIX) user1 = users.create(properties={ 'uid': 'testuser1', 'cn': 'testuser1', 'sn': 'user1', 'uidNumber': '1001', 'gidNumber': '2001', 'homeDirectory': '/home/testuser1' }) with pytest.raises(ldap.LDAPError): users.create(properties={ 'uid': ['testuser2', 'testuser1'], 'cn': 'testuser2', 'sn': 'user2', 'uidNumber': '1002', 'gidNumber': '2002', 'homeDirectory': '/home/testuser2' }) user1.delete() log.info('test_betxn_attr_uniqueness: PASSED') def test_betxn_memberof(topology_st): """Test PLUGIN_MEMBER_OF plugin :id: 70d0b96e-b693-4bf7-bbf5-102a66ac5993 :setup: Standalone instance and enabled dynamic plugins :steps: 1. Enable and configure memberOf plugin 2. Set memberofgroupattr="member" and memberofAutoAddOC="referral" 3. Add two test groups - group1 and group2 4. Add group2 to group1 5. Add group1 to group2 :expectedresults: 1. memberOf plugin plugin should be ON 2. Set memberofgroupattr="member" and memberofAutoAddOC="referral" should PASS 3. Add operation should PASS 4. Add operation should FAIL 5. Add operation should FAIL """ memberof = MemberOfPlugin(topology_st.standalone) memberof.enable() memberof.set_autoaddoc('referral') topology_st.standalone.restart() groups = Groups(topology_st.standalone, DEFAULT_SUFFIX) group1 = groups.create(properties={'cn': 'group1'}) group2 = groups.create(properties={'cn': 'group2'}) # We may need to mod groups to not have nsMemberOf ... ? if not ds_is_older('1.3.7'): group1.remove('objectClass', 'nsMemberOf') group2.remove('objectClass', 'nsMemberOf') # Add group2 to group1 - it should fail with objectclass violation with pytest.raises(ldap.OBJECT_CLASS_VIOLATION): group1.add_member(group2.dn) # verify entry cache reflects the current/correct state of group1 assert not group1.is_member(group2.dn) # Done log.info('test_betxn_memberof: PASSED') def test_betxn_modrdn_memberof_cache_corruption(topology_st): """Test modrdn operations and memberOf be txn post op failures :id: 70d0b96e-b693-4bf7-bbf5-102a66ac5994 :setup: Standalone instance :steps: 1. Enable and configure memberOf plugin 2. Set memberofgroupattr="member" and memberofAutoAddOC="nsContainer" 3. Create group and user outside of memberOf plugin scope 4. Do modrdn to move group into scope 5. Do modrdn to move group into scope (again) :expectedresults: 1. memberOf plugin plugin should be ON 2. Set memberofgroupattr="member" and memberofAutoAddOC="nsContainer" should PASS 3. Creating group and user should PASS 4. Modrdn should fail with objectclass violation 5. Second modrdn should also fail with objectclass violation """ peoplebase = 'ou=people,%s' % DEFAULT_SUFFIX memberof = MemberOfPlugin(topology_st.standalone) memberof.enable() memberof.set_autoaddoc('nsContainer') # Bad OC memberof.set('memberOfEntryScope', peoplebase) memberof.set('memberOfAllBackends', 'on') topology_st.standalone.restart() groups = Groups(topology_st.standalone, DEFAULT_SUFFIX) group = groups.create(properties={ 'cn': 'group', }) # Create user and add it to group users = UserAccounts(topology_st.standalone, basedn=DEFAULT_SUFFIX) user = users.ensure_state(properties=TEST_USER_PROPERTIES) if not ds_is_older('1.3.7'): user.remove('objectClass', 'nsMemberOf') group.add_member(user.dn) # Attempt modrdn that should fail, but the original entry should stay in the cache with pytest.raises(ldap.OBJECT_CLASS_VIOLATION): group.rename('cn=group_to_people', newsuperior=peoplebase) # Should fail, but not with NO_SUCH_OBJECT as the original entry should still be in the cache with pytest.raises(ldap.OBJECT_CLASS_VIOLATION): group.rename('cn=group_to_people', newsuperior=peoplebase) # Done log.info('test_betxn_modrdn_memberof: PASSED') def test_ri_and_mep_cache_corruption(topology_st): """Test RI plugin aborts change after MEP plugin fails. This is really testing the entry cache for corruption :id: 70d0b96e-b693-4bf7-bbf5-102a66ac5995 :setup: Standalone instance :steps: 1. Enable and configure mep and ri plugins 2. Add user and add it to a group 3. Disable MEP plugin and remove MEP group 4. Delete user 5. Check that user is still a member of the group :expectedresults: 1. Success 2. Success 3. Success 4. It fails with NO_SUCH_OBJECT 5. Success """ # Add ACI so we can test that non-DM user can't delete managed entry domain = Domain(topology_st.standalone, DEFAULT_SUFFIX) ACI_TARGET = f"(target = \"ldap:///{DEFAULT_SUFFIX}\")" ACI_TARGETATTR = "(targetattr = *)" ACI_ALLOW = "(version 3.0; acl \"Admin Access\"; allow (all) " ACI_SUBJECT = "(userdn = \"ldap:///anyone\");)" ACI_BODY = ACI_TARGET + ACI_TARGETATTR + ACI_ALLOW + ACI_SUBJECT domain.add('aci', ACI_BODY) # Start plugins topology_st.standalone.config.set('nsslapd-dynamic-plugins', 'on') mep_plugin = ManagedEntriesPlugin(topology_st.standalone) mep_plugin.enable() ri_plugin = ReferentialIntegrityPlugin(topology_st.standalone) ri_plugin.enable() # Add our org units ous = OrganizationalUnits(topology_st.standalone, DEFAULT_SUFFIX) ou_people = ous.create(properties={'ou': 'managed_people'}) ou_groups = ous.create(properties={'ou': 'managed_groups'}) # Configure MEP mep_templates = MEPTemplates(topology_st.standalone, DEFAULT_SUFFIX) mep_template1 = mep_templates.create(properties={ 'cn': 'MEP template', 'mepRDNAttr': 'cn', 'mepStaticAttr': 'objectclass: groupOfNames|objectclass: extensibleObject'.split('|'), 'mepMappedAttr': 'cn: $cn|uid: $cn|gidNumber: $uidNumber'.split('|') }) mep_configs = MEPConfigs(topology_st.standalone) mep_configs.create(properties={'cn': 'config', 'originScope': ou_people.dn, 'originFilter': 'objectclass=posixAccount', 'managedBase': ou_groups.dn, 'managedTemplate': mep_template1.dn}) # Add an entry that meets the MEP scope users = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX, rdn='ou={}'.format(ou_people.rdn)) user = users.create(properties={ 'uid': 'test-user1', 'cn': 'test-user', 'sn': 'test-user', 'uidNumber': '10011', 'gidNumber': '20011', 'homeDirectory': '/home/test-user1' }) user.reset_password(USER_PASSWORD) user_bound_conn = user.bind(USER_PASSWORD) # Add group groups = Groups(topology_st.standalone, DEFAULT_SUFFIX) user_group = groups.ensure_state(properties={'cn': 'group', 'member': user.dn}) # Check if a managed group entry was created mep_group = Group(topology_st.standalone, dn='cn={},{}'.format(user.rdn, ou_groups.dn)) if not mep_group.exists(): log.fatal("MEP group was not created for the user") assert False # Test MEP be txn pre op failure does not corrupt entry cache # Should get the same exception for both rename attempts # Try to remove the entry while bound as Admin (non-DM) managed_groups_user_conn = Groups(user_bound_conn, ou_groups.dn, rdn=None) managed_entry_user_conn = managed_groups_user_conn.get(user.rdn) with pytest.raises(ldap.UNWILLING_TO_PERFORM): managed_entry_user_conn.rename("cn=modrdn group") with pytest.raises(ldap.UNWILLING_TO_PERFORM): managed_entry_user_conn.rename("cn=modrdn group") # Mess with MEP so it fails mep_plugin.disable() users_mep_group = UserAccounts(topology_st.standalone, mep_group.dn, rdn=None) users_mep_group.create_test_user(1001) mep_plugin.enable() # Add another group to verify entry cache is not corrupted test_group = groups.create(properties={'cn': 'test_group'}) # Try to delete user - it fails because managed entry can't be deleted with pytest.raises(ldap.NOT_ALLOWED_ON_NONLEAF): user.delete() # Verify membership is intact if not user_group.is_member(user.dn): log.fatal("Member was incorrectly removed from the group!! Or so it seems") # Restart server and test again in case this was a cache issue topology_st.standalone.restart() if user_group.is_member(user.dn): log.info("The entry cache was corrupted") assert False assert False # Verify test group is still found in entry cache by deleting it test_group.delete() # Success log.info("Test PASSED") if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/chaining_plugin/000077500000000000000000000000001421664411400255065ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/chaining_plugin/__init__.py000066400000000000000000000000661421664411400276210ustar00rootroot00000000000000""" :Requirement: 389-ds-base: Chaining Plugin """ anonymous_access_denied_basic.py000066400000000000000000000106021421664411400340220ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/chaining_plugin# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2021 William Brown # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- import ldap import pytest import time import shutil from lib389.idm.account import Accounts, Account from lib389.topologies import topology_i2 as topology from lib389.backend import Backends from lib389._constants import DEFAULT_SUFFIX from lib389.plugins import ChainingBackendPlugin from lib389.chaining import ChainingLinks from lib389.mappingTree import MappingTrees from lib389.idm.services import ServiceAccounts, ServiceAccount from lib389.idm.domain import Domain PW = 'thnaoehtnuaoenhtuaoehtnu' pytestmark = pytest.mark.tier1 def test_chaining_paged_search(topology): """ Check that when the chaining target has anonymous access disabled that the ping still functions and allows the search to continue with an appropriate bind user. :id: 00bf31db-d93b-4224-8e70-86abb2d4cd17 :setup: Two standalones in chaining. :steps: 1. Configure chaining between the nodes 2. Do a chaining search (w anon allow) to assert it works 3. Configure anon dis allowed on st2 4. Restart both 5. Check search still works :expectedresults: 1. Success 2. Success 3. Success 4. Success 5. Success """ st1 = topology.ins["standalone1"] st2 = topology.ins["standalone2"] ### We setup so that st1 -> st2 # Setup a chaining user on st2 to authenticate to. sa = ServiceAccounts(st2, DEFAULT_SUFFIX).create(properties = { 'cn': 'sa', 'userPassword': PW }) # Add a proxy user. sproxy = ServiceAccounts(st2, DEFAULT_SUFFIX).create(properties = { 'cn': 'proxy', 'userPassword': PW }) # Add the read and proxy ACI dc = Domain(st2, DEFAULT_SUFFIX) dc.add('aci', f"""(targetattr="objectClass || cn || uid")(version 3.0; acl "Enable sa read"; allow (read, search, compare)(userdn="ldap:///{sa.dn}");)""" ) # Add the proxy ACI dc.add('aci', f"""(targetattr="*")(version 3.0; acl "Enable proxy access"; allow (proxy)(userdn="ldap:///{sproxy.dn}");)""" ) # Clear all the BE in st1 bes1 = Backends(st1) for be in bes1.list(): be.delete() # Setup st1 to chain to st2 chain_plugin_1 = ChainingBackendPlugin(st1) chain_plugin_1.enable() # Chain with the proxy user. chains = ChainingLinks(st1) chain = chains.create(properties={ 'cn': 'demochain', 'nsfarmserverurl': st2.toLDAPURL(), 'nsslapd-suffix': DEFAULT_SUFFIX, 'nsmultiplexorbinddn': sproxy.dn, 'nsmultiplexorcredentials': PW, 'nsCheckLocalACI': 'on', 'nsConnectionLife': '30', }) mts = MappingTrees(st1) # Due to a bug in lib389, we need to delete and recreate the mt. for mt in mts.list(): mt.delete() mts.ensure_state(properties={ 'cn': DEFAULT_SUFFIX, 'nsslapd-state': 'backend', 'nsslapd-backend': 'demochain', 'nsslapd-distribution-plugin': 'libreplication-plugin', 'nsslapd-distribution-funct': 'repl_chain_on_update', }) # Enable pwpolicy (Not sure if part of the issue). st1.config.set('passwordIsGlobalPolicy', 'on') st2.config.set('passwordIsGlobalPolicy', 'on') # Restart to enable everything. st1.restart() # Get a proxy auth connection. sa1 = ServiceAccount(st1, sa.dn) sa1_conn = sa1.bind(password=PW) # Now do a search from st1 -> st2 sa1_dc = Domain(sa1_conn, DEFAULT_SUFFIX) assert sa1_dc.exists() # Now on st2 disable anonymous access. st2.config.set('nsslapd-allow-anonymous-access', 'rootdse') # Stop st2 to force the connection to be dead. st2.stop() # Restart st1 - this means it must re-do the ping/keepalive. st1.restart() # do a bind - this should fail, and forces the conn offline. with pytest.raises(ldap.OPERATIONS_ERROR): sa1.bind(password=PW) # Allow time to attach lldb if needed. # print("🔥🔥🔥") # time.sleep(45) # Bring st2 online. st2.start() # Wait a bit time.sleep(5) # Get a proxy auth connection (again) sa1_conn = sa1.bind(password=PW) # Now do a search from st1 -> st2 sa1_dc = Domain(sa1_conn, DEFAULT_SUFFIX) assert sa1_dc.exists() 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/chaining_plugin/paged_search_test.py000066400000000000000000000047511421664411400315330ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2020 William Brown # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- import ldap import pytest import time import shutil from lib389.idm.account import Accounts, Account from lib389.topologies import topology_i2 as topology from lib389.backend import Backends from lib389._constants import DEFAULT_SUFFIX from lib389.plugins import ChainingBackendPlugin from lib389.chaining import ChainingLinks from lib389.mappingTree import MappingTrees pytestmark = pytest.mark.tier1 def test_chaining_paged_search(topology): """ Test paged search through the chaining db. This would cause a SIGSEGV with paged search which could be triggered by SSSD. :id: 7b29b1f5-26cf-49fa-9fe7-ee29a1408633 :setup: Two standalones in chaining. :steps: 1. Configure chaining between the nodes 2. Do a chaining search (no page) to assert it works 3. Do a paged search through chaining. :expectedresults: 1. Success 2. Success 3. Success """ st1 = topology.ins["standalone1"] st2 = topology.ins["standalone2"] ### We setup so that st1 -> st2 # Clear all the BE in st1 bes1 = Backends(st1) for be in bes1.list(): be.delete() # Setup st1 to chain to st2 chain_plugin_1 = ChainingBackendPlugin(st1) chain_plugin_1.enable() chains = ChainingLinks(st1) chain = chains.create(properties={ 'cn': 'demochain', 'nsslapd-suffix': DEFAULT_SUFFIX, 'nsmultiplexorbinddn': '', 'nsmultiplexorcredentials': '', 'nsfarmserverurl': st2.toLDAPURL(), }) mts = MappingTrees(st1) # Due to a bug in lib389, we need to delete and recreate the mt. for mt in mts.list(): mt.delete() mts.ensure_state(properties={ 'cn': DEFAULT_SUFFIX, 'nsslapd-state': 'backend', 'nsslapd-backend': 'demochain', }) # Restart to enable st1.restart() # Get an anonymous connection. anon = Account(st1, dn='') anon_conn = anon.bind(password='') # Now do a search from st1 -> st2 accs_1 = Accounts(anon_conn, DEFAULT_SUFFIX) assert len(accs_1.list()) > 0 # Allow time to attach lldb if needed. # import time # print("🔥🔥🔥") # time.sleep(45) # Now do a *paged* search from st1 -> st2 assert len(accs_1.list(paged_search=2, paged_critical=False)) > 0 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/clu/000077500000000000000000000000001421664411400231335ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/clu/__init__.py000066400000000000000000000017611421664411400252510ustar00rootroot00000000000000""" :Requirement: 389-ds-base: Command Line Utility """ import logging log = logging.getLogger(__name__) def check_value_in_log_and_reset(topology, content_list=None, content_list2=None, check_value=None, check_value_not=None): if content_list2 is not None: log.info('Check if content is present in output') for item in content_list + content_list2: assert topology.logcap.contains(item) if content_list is not None: log.info('Check if content is present in output') for item in content_list: assert topology.logcap.contains(item) if check_value is not None: log.info('Check if value is present in output') assert topology.logcap.contains(check_value) if check_value_not is not None: log.info('Check if value is not present in output') assert not topology.logcap.contains(check_value_not) log.info('Reset the log for next test') topology.logcap.flush() 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/clu/clu_test.py000066400000000000000000000061521421664411400253330ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2016 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import time import subprocess import pytest from lib389.tasks import * from lib389.utils import * from lib389.topologies import topology_st pytestmark = pytest.mark.tier0 logging.getLogger(__name__).setLevel(logging.DEBUG) log = logging.getLogger(__name__) def test_clu_pwdhash(topology_st): """Test the pwdhash script output and encrypted password length :id: faaafd01-6748-4451-9d2b-f3bd47902447 :setup: Standalone instance :steps: 1. Execute /usr/bin/pwdhash -s ssha testpassword command from command line 2. Check if there is any output 3. Check the length of the generated output :expectedresults: 1. Execution should PASS 2. There should be an output from the command 3. Output length should not be less than 20 """ log.info('Running test_clu_pwdhash...') cmd = '%s -s ssha testpassword' % os.path.join(topology_st.standalone.get_bin_dir(), 'pwdhash') p = os.popen(cmd) result = p.readline() p.close() if not result: log.fatal('test_clu_pwdhash: Failed to run pwdhash') assert False if len(result) < 20: log.fatal('test_clu_pwdhash: Encrypted password is too short') assert False log.info('pwdhash generated: ' + result) log.info('test_clu_pwdhash: PASSED') def test_clu_pwdhash_mod(topology_st): """Test the pwdhash script output with -D configdir :id: 874ab5e2-207b-4a95-b4c0-22d97b8ab643 :setup: Standalone instance :steps: 1. Set nsslapd-rootpwstoragescheme & passwordStorageScheme to SSHA256 & SSHA384 respectively 2. Execute /usr/bin/pwdhash -D /etc/dirsrv/slapd-instance_name/ 3. Check if there is any output 4. Check if the command returns the hashed string using the algorithm set in nsslapd-rootpwstoragescheme :expectedresults: 1. nsslapd-rootpwstoragescheme & passwordStorageScheme should set to SSHA256 & SSHA384 respectively 2. Execution should PASS 3. There should be an output from the command 4. Command should return the hashed string using the algorithm set in nsslapd-rootpwstoragescheme """ log.info('Running test_clu_pwdhash_mod...') topology_st.standalone.config.set('nsslapd-rootpwstoragescheme', 'SSHA256') topology_st.standalone.config.set('passwordStorageScheme', 'SSHA384') cmd = [os.path.join(topology_st.standalone.get_bin_dir(), 'pwdhash'), '-D', '/etc/dirsrv/slapd-standalone1', 'password'] result = subprocess.check_output(cmd) stdout = ensure_str(result) assert result, "Failed to run pwdhash" assert 'SSHA256' in stdout log.info('pwdhash generated: ' + stdout) log.info('returned the hashed string using the algorithm set in nsslapd-rootpwstoragescheme') if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/clu/dbgen_test.py000066400000000000000000000707731421664411400256410ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2020 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import time import subprocess import pytest from lib389.cli_ctl.dbgen import * from lib389.cos import CosClassicDefinitions, CosPointerDefinitions, CosIndirectDefinitions, CosTemplates from lib389.idm.account import Accounts from lib389.idm.group import Groups from lib389.idm.role import ManagedRoles, FilteredRoles, NestedRoles from lib389.tasks import * from lib389.utils import * from lib389.topologies import topology_st from lib389.cli_base import FakeArgs pytestmark = pytest.mark.tier0 LOG_FILE = '/tmp/dbgen.log' logging.getLogger(__name__).setLevel(logging.DEBUG) log = logging.getLogger(__name__) @pytest.fixture(scope="function") def set_log_file_and_ldif(topology_st, request): global ldif_file ldif_file = get_ldif_dir(topology_st.standalone) + '/created.ldif' fh = logging.FileHandler(LOG_FILE) fh.setLevel(logging.DEBUG) log.addHandler(fh) def fin(): log.info('Delete files') os.remove(LOG_FILE) os.remove(ldif_file) request.addfinalizer(fin) def run_offline_import(instance, ldif_file): log.info('Stopping the server and running offline import...') instance.stop() assert instance.ldif2db(bename=DEFAULT_BENAME, suffixes=[DEFAULT_SUFFIX], encrypt=None, excludeSuffixes=None, import_file=ldif_file) instance.start() def run_ldapmodify_from_file(instance, ldif_file, output_to_check=None): LDAP_MOD = '/usr/bin/ldapmodify' log.info('Add entries from ldif file with ldapmodify') result = subprocess.check_output([LDAP_MOD, '-cx', '-D', DN_DM, '-w', PASSWORD, '-h', instance.host, '-p', str(instance.port), '-af', ldif_file]) if output_to_check is not None: assert output_to_check in ensure_str(result) def check_value_in_log_and_reset(content_list): with open(LOG_FILE, 'r+') as f: file_content = f.read() log.info('Check if content is present in output') for item in content_list: assert item in file_content log.info('Reset log file for next test') f.truncate(0) @pytest.mark.ds50545 @pytest.mark.bz1798394 @pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented") def test_dsconf_dbgen_users(topology_st, set_log_file_and_ldif): """Test ldifgen (formerly dbgen) tool to create ldif with users :id: 426b5b94-9923-454d-a736-7e71ca985e98 :setup: Standalone instance :steps: 1. Create DS instance 2. Run ldifgen to generate ldif with users 3. Import generated ldif to database 4. Check it was properly imported :expectedresults: 1. Success 2. Success 3. Success 4. Success """ standalone = topology_st.standalone args = FakeArgs() args.suffix = DEFAULT_SUFFIX args.parent = 'ou=people,dc=example,dc=com' args.number = 1000 args.rdn_cn = False args.generic = True args.start_idx = 50 args.localize = False args.ldif_file = ldif_file content_list = ['Generating LDIF with the following options:', 'suffix={}'.format(args.suffix), 'parent={}'.format(args.parent), 'number={}'.format(args.number), 'rdn-cn={}'.format(args.rdn_cn), 'generic={}'.format(args.generic), 'start-idx={}'.format(args.start_idx), 'localize={}'.format(args.localize), 'ldif-file={}'.format(args.ldif_file), 'Writing LDIF', 'Successfully created LDIF file: {}'.format(args.ldif_file)] log.info('Run ldifgen to create users ldif') dbgen_create_users(standalone, log, args) log.info('Check if file exists') assert os.path.exists(ldif_file) check_value_in_log_and_reset(content_list) log.info('Get number of accounts before import') accounts = Accounts(standalone, DEFAULT_SUFFIX) count_account = len(accounts.filter('(uid=*)')) run_offline_import(standalone, ldif_file) log.info('Check that accounts are imported') assert len(accounts.filter('(uid=*)')) > count_account @pytest.mark.ds50545 @pytest.mark.bz1798394 @pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented") def test_dsconf_dbgen_groups(topology_st, set_log_file_and_ldif): """Test ldifgen (formerly dbgen) tool to create ldif with group :id: 97207413-9a93-4065-a5ec-63aa93801a3f :setup: Standalone instance :steps: 1. Create DS instance 2. Run ldifgen to generate ldif with group 3. Import generated ldif to database 4. Check it was properly imported :expectedresults: 1. Success 2. Success 3. Success 4. Success """ LDAP_RESULT = 'adding new entry "cn=myGroup-1,ou=groups,dc=example,dc=com"' standalone = topology_st.standalone args = FakeArgs() args.NAME = 'myGroup' args.parent = 'ou=groups,dc=example,dc=com' args.suffix = DEFAULT_SUFFIX args.number = 1 args.num_members = 1000 args.create_members = True args.member_attr = 'uniquemember' args.member_parent = 'ou=people,dc=example,dc=com' args.ldif_file = ldif_file content_list = ['Generating LDIF with the following options:', 'NAME={}'.format(args.NAME), 'number={}'.format(args.number), 'suffix={}'.format(args.suffix), 'num-members={}'.format(args.num_members), 'create-members={}'.format(args.create_members), 'member-parent={}'.format(args.member_parent), 'member-attr={}'.format(args.member_attr), 'ldif-file={}'.format(args.ldif_file), 'Writing LDIF', 'Successfully created LDIF file: {}'.format(args.ldif_file)] log.info('Run ldifgen to create group ldif') dbgen_create_groups(standalone, log, args) log.info('Check if file exists') assert os.path.exists(ldif_file) check_value_in_log_and_reset(content_list) log.info('Get number of accounts before import') accounts = Accounts(standalone, DEFAULT_SUFFIX) count_account = len(accounts.filter('(uid=*)')) # Groups, COS, Roles and modification ldifs are designed to be used by ldapmodify, not ldif2db # ldapmodify will complain about already existing parent which causes subprocess to return exit code != 0 with pytest.raises(subprocess.CalledProcessError): run_ldapmodify_from_file(standalone, ldif_file, LDAP_RESULT) log.info('Check that accounts are imported') assert len(accounts.filter('(uid=*)')) > count_account log.info('Check that group is imported') groups = Groups(standalone, DEFAULT_SUFFIX) assert groups.exists(args.NAME + '-1') new_group = groups.get(args.NAME + '-1') new_group.present('uniquemember', 'uid=group_entry1-0152,ou=people,dc=example,dc=com') @pytest.mark.ds50545 @pytest.mark.bz1798394 @pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented") def test_dsconf_dbgen_cos_classic(topology_st, set_log_file_and_ldif): """Test ldifgen (formerly dbgen) tool to create a COS definition :id: 8557f994-8a91-4f8a-86f6-9cb826a0b8fd :setup: Standalone instance :steps: 1. Create DS instance 2. Run ldifgen to generate ldif with classic COS definition 3. Import generated ldif to database 4. Check it was properly imported :expectedresults: 1. Success 2. Success 3. Success 4. Success """ LDAP_RESULT = 'adding new entry "cn=My_Postal_Def,ou=cos definitions,dc=example,dc=com"' standalone = topology_st.standalone args = FakeArgs() args.type = 'classic' args.NAME = 'My_Postal_Def' args.parent = 'ou=cos definitions,dc=example,dc=com' args.create_parent = True args.cos_specifier = 'businessCategory' args.cos_attr = ['postalcode', 'telephonenumber'] args.cos_template = 'cn=sales,cn=classicCoS,dc=example,dc=com' args.ldif_file = ldif_file content_list = ['Generating LDIF with the following options:', 'NAME={}'.format(args.NAME), 'type={}'.format(args.type), 'parent={}'.format(args.parent), 'create-parent={}'.format(args.create_parent), 'cos-specifier={}'.format(args.cos_specifier), 'cos-template={}'.format(args.cos_template), 'cos-attr={}'.format(args.cos_attr), 'ldif-file={}'.format(args.ldif_file), 'Writing LDIF', 'Successfully created LDIF file: {}'.format(args.ldif_file)] log.info('Run ldifgen to create COS definition ldif') dbgen_create_cos_def(standalone, log, args) log.info('Check if file exists') assert os.path.exists(ldif_file) check_value_in_log_and_reset(content_list) # Groups, COS, Roles and modification ldifs are designed to be used by ldapmodify, not ldif2db run_ldapmodify_from_file(standalone, ldif_file, LDAP_RESULT) log.info('Check that COS definition is imported') cos_def = CosClassicDefinitions(standalone, args.parent) assert cos_def.exists(args.NAME) new_cos = cos_def.get(args.NAME) assert new_cos.present('cosTemplateDN', args.cos_template) assert new_cos.present('cosSpecifier', args.cos_specifier) assert new_cos.present('cosAttribute', args.cos_attr[0]) assert new_cos.present('cosAttribute', args.cos_attr[1]) @pytest.mark.ds50545 @pytest.mark.bz1798394 @pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented") def test_dsconf_dbgen_cos_pointer(topology_st, set_log_file_and_ldif): """Test ldifgen (formerly dbgen) tool to create a COS definition :id: 6b26ca6d-226a-4f93-925e-faf95cc20214 :setup: Standalone instance :steps: 1. Create DS instance 2. Run ldifgen to generate ldif with pointer COS definition 3. Import generated ldif to database 4. Check it was properly imported :expectedresults: 1. Success 2. Success 3. Success 4. Success """ LDAP_RESULT = 'adding new entry "cn=My_Postal_Def_pointer,ou=cos pointer definitions,dc=example,dc=com"' standalone = topology_st.standalone args = FakeArgs() args.type = 'pointer' args.NAME = 'My_Postal_Def_pointer' args.parent = 'ou=cos pointer definitions,dc=example,dc=com' args.create_parent = True args.cos_specifier = None args.cos_attr = ['postalcode', 'telephonenumber'] args.cos_template = 'cn=sales,cn=pointerCoS,dc=example,dc=com' args.ldif_file = ldif_file content_list = ['Generating LDIF with the following options:', 'NAME={}'.format(args.NAME), 'type={}'.format(args.type), 'parent={}'.format(args.parent), 'create-parent={}'.format(args.create_parent), 'cos-template={}'.format(args.cos_template), 'cos-attr={}'.format(args.cos_attr), 'ldif-file={}'.format(args.ldif_file), 'Writing LDIF', 'Successfully created LDIF file: {}'.format(args.ldif_file)] log.info('Run ldifgen to create COS definition ldif') dbgen_create_cos_def(standalone, log, args) log.info('Check if file exists') assert os.path.exists(ldif_file) check_value_in_log_and_reset(content_list) # Groups, COS, Roles and modification ldifs are designed to be used by ldapmodify, not ldif2db run_ldapmodify_from_file(standalone, ldif_file, LDAP_RESULT) log.info('Check that COS definition is imported') cos_def = CosPointerDefinitions(standalone, args.parent) assert cos_def.exists(args.NAME) new_cos = cos_def.get(args.NAME) assert new_cos.present('cosTemplateDN', args.cos_template) assert new_cos.present('cosAttribute', args.cos_attr[0]) assert new_cos.present('cosAttribute', args.cos_attr[1]) @pytest.mark.ds50545 @pytest.mark.bz1798394 @pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented") def test_dsconf_dbgen_cos_indirect(topology_st, set_log_file_and_ldif): """Test ldifgen (formerly dbgen) tool to create a COS definition :id: ab4b799e-e801-432a-a61d-badad2628203 :setup: Standalone instance :steps: 1. Create DS instance 2. Run ldifgen to generate ldif with indirect COS definition 3. Import generated ldif to database 4. Check it was properly imported :expectedresults: 1. Success 2. Success 3. Success 4. Success """ LDAP_RESULT = 'adding new entry "cn=My_Postal_Def_indirect,ou=cos indirect definitions,dc=example,dc=com"' standalone = topology_st.standalone args = FakeArgs() args.type = 'indirect' args.NAME = 'My_Postal_Def_indirect' args.parent = 'ou=cos indirect definitions,dc=example,dc=com' args.create_parent = True args.cos_specifier = 'businessCategory' args.cos_attr = ['postalcode', 'telephonenumber'] args.cos_template = None args.ldif_file = ldif_file content_list = ['Generating LDIF with the following options:', 'NAME={}'.format(args.NAME), 'type={}'.format(args.type), 'parent={}'.format(args.parent), 'create-parent={}'.format(args.create_parent), 'cos-specifier={}'.format(args.cos_specifier), 'cos-attr={}'.format(args.cos_attr), 'ldif-file={}'.format(args.ldif_file), 'Writing LDIF', 'Successfully created LDIF file: {}'.format(args.ldif_file)] log.info('Run ldifgen to create COS definition ldif') dbgen_create_cos_def(standalone, log, args) log.info('Check if file exists') assert os.path.exists(ldif_file) check_value_in_log_and_reset(content_list) # Groups, COS, Roles and modification ldifs are designed to be used by ldapmodify, not ldif2db run_ldapmodify_from_file(standalone, ldif_file, LDAP_RESULT) log.info('Check that COS definition is imported') cos_def = CosIndirectDefinitions(standalone, args.parent) assert cos_def.exists(args.NAME) new_cos = cos_def.get(args.NAME) assert new_cos.present('cosIndirectSpecifier', args.cos_specifier) assert new_cos.present('cosAttribute', args.cos_attr[0]) assert new_cos.present('cosAttribute', args.cos_attr[1]) @pytest.mark.ds50545 @pytest.mark.bz1798394 @pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented") def test_dsconf_dbgen_cos_template(topology_st, set_log_file_and_ldif): """Test ldifgen (formerly dbgen) tool to create a COS template :id: 544017c7-4a82-4e7d-a047-00b68a28e070 :setup: Standalone instance :steps: 1. Create DS instance 2. Run ldifgen to generate ldif with COS template 3. Import generated ldif to database 4. Check it was properly imported :expectedresults: 1. Success 2. Success 3. Success 4. Success """ LDAP_RESULT = 'adding new entry "cn=My_Template,ou=cos templates,dc=example,dc=com"' standalone = topology_st.standalone args = FakeArgs() args.NAME = 'My_Template' args.parent = 'ou=cos templates,dc=example,dc=com' args.create_parent = True args.cos_priority = 1 args.cos_attr_val = 'postalcode:12345' args.ldif_file = ldif_file content_list = ['Generating LDIF with the following options:', 'NAME={}'.format(args.NAME), 'parent={}'.format(args.parent), 'create-parent={}'.format(args.create_parent), 'cos-priority={}'.format(args.cos_priority), 'cos-attr-val={}'.format(args.cos_attr_val), 'ldif-file={}'.format(args.ldif_file), 'Writing LDIF', 'Successfully created LDIF file: {}'.format(args.ldif_file)] log.info('Run ldifgen to create COS template ldif') dbgen_create_cos_tmp(standalone, log, args) log.info('Check if file exists') assert os.path.exists(ldif_file) check_value_in_log_and_reset(content_list) # Groups, COS, Roles and modification ldifs are designed to be used by ldapmodify, not ldif2db run_ldapmodify_from_file(standalone, ldif_file, LDAP_RESULT) log.info('Check that COS template is imported') cos_temp = CosTemplates(standalone, args.parent) assert cos_temp.exists(args.NAME) new_cos = cos_temp.get(args.NAME) assert new_cos.present('cosPriority', str(args.cos_priority)) assert new_cos.present('postalcode', '12345') @pytest.mark.ds50545 @pytest.mark.bz1798394 @pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented") def test_dsconf_dbgen_managed_role(topology_st, set_log_file_and_ldif): """Test ldifgen (formerly dbgen) tool to create a managed role :id: 10e77b41-0bc1-4ad5-a144-2c5107455b92 :setup: Standalone instance :steps: 1. Create DS instance 2. Run ldifgen to generate ldif with managed role 3. Import generated ldif to database 4. Check it was properly imported :expectedresults: 1. Success 2. Success 3. Success 4. Success """ LDAP_RESULT = 'adding new entry "cn=My_Managed_Role,ou=managed roles,dc=example,dc=com"' standalone = topology_st.standalone args = FakeArgs() args.NAME = 'My_Managed_Role' args.parent = 'ou=managed roles,dc=example,dc=com' args.create_parent = True args.type = 'managed' args.filter = None args.role_dn = None args.ldif_file = ldif_file content_list = ['Generating LDIF with the following options:', 'NAME={}'.format(args.NAME), 'parent={}'.format(args.parent), 'create-parent={}'.format(args.create_parent), 'type={}'.format(args.type), 'ldif-file={}'.format(args.ldif_file), 'Writing LDIF', 'Successfully created LDIF file: {}'.format(args.ldif_file)] log.info('Run ldifgen to create managed role ldif') dbgen_create_role(standalone, log, args) log.info('Check if file exists') assert os.path.exists(ldif_file) check_value_in_log_and_reset(content_list) # Groups, COS, Roles and modification ldifs are designed to be used by ldapmodify, not ldif2db run_ldapmodify_from_file(standalone, ldif_file, LDAP_RESULT) log.info('Check that managed role is imported') roles = ManagedRoles(standalone, DEFAULT_SUFFIX) assert roles.exists(args.NAME) @pytest.mark.ds50545 @pytest.mark.bz1798394 @pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented") def test_dsconf_dbgen_filtered_role(topology_st, set_log_file_and_ldif): """Test ldifgen (formerly dbgen) tool to create a filtered role :id: cb3c8ea8-4234-40e2-8810-fb6a25973927 :setup: Standalone instance :steps: 1. Create DS instance 2. Run ldifgen to generate ldif with filtered role 3. Import generated ldif to database 4. Check it was properly imported :expectedresults: 1. Success 2. Success 3. Success 4. Success """ LDAP_RESULT = 'adding new entry "cn=My_Filtered_Role,ou=filtered roles,dc=example,dc=com"' standalone = topology_st.standalone args = FakeArgs() args.NAME = 'My_Filtered_Role' args.parent = 'ou=filtered roles,dc=example,dc=com' args.create_parent = True args.type = 'filtered' args.filter = '"objectclass=posixAccount"' args.role_dn = None args.ldif_file = ldif_file content_list = ['Generating LDIF with the following options:', 'NAME={}'.format(args.NAME), 'parent={}'.format(args.parent), 'create-parent={}'.format(args.create_parent), 'type={}'.format(args.type), 'filter={}'.format(args.filter), 'ldif-file={}'.format(args.ldif_file), 'Writing LDIF', 'Successfully created LDIF file: {}'.format(args.ldif_file)] log.info('Run ldifgen to create filtered role ldif') dbgen_create_role(standalone, log, args) log.info('Check if file exists') assert os.path.exists(ldif_file) check_value_in_log_and_reset(content_list) # Groups, COS, Roles and modification ldifs are designed to be used by ldapmodify, not ldif2db run_ldapmodify_from_file(standalone, ldif_file, LDAP_RESULT) log.info('Check that filtered role is imported') roles = FilteredRoles(standalone, DEFAULT_SUFFIX) assert roles.exists(args.NAME) new_role = roles.get(args.NAME) assert new_role.present('nsRoleFilter', args.filter) @pytest.mark.ds50545 @pytest.mark.bz1798394 @pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented") def test_dsconf_dbgen_nested_role(topology_st, set_log_file_and_ldif): """Test ldifgen (formerly dbgen) tool to create a nested role :id: 97fff0a8-3103-4adb-be04-2799ff58d8f4 :setup: Standalone instance :steps: 1. Create DS instance 2. Run ldifgen to generate ldif with nested role 3. Import generated ldif to database 4. Check it was properly imported :expectedresults: 1. Success 2. Success 3. Success 4. Success """ LDAP_RESULT = 'adding new entry "cn=My_Nested_Role,ou=nested roles,dc=example,dc=com"' standalone = topology_st.standalone args = FakeArgs() args.NAME = 'My_Nested_Role' args.parent = 'ou=nested roles,dc=example,dc=com' args.create_parent = True args.type = 'nested' args.filter = None args.role_dn = ['cn=some_role,ou=roles,dc=example,dc=com'] args.ldif_file = ldif_file content_list = ['Generating LDIF with the following options:', 'NAME={}'.format(args.NAME), 'parent={}'.format(args.parent), 'create-parent={}'.format(args.create_parent), 'type={}'.format(args.type), 'role-dn={}'.format(args.role_dn), 'ldif-file={}'.format(args.ldif_file), 'Writing LDIF', 'Successfully created LDIF file: {}'.format(args.ldif_file)] log.info('Run ldifgen to create nested role ldif') dbgen_create_role(standalone, log, args) log.info('Check if file exists') assert os.path.exists(ldif_file) check_value_in_log_and_reset(content_list) # Groups, COS, Roles and modification ldifs are designed to be used by ldapmodify, not ldif2db run_ldapmodify_from_file(standalone, ldif_file, LDAP_RESULT) log.info('Check that nested role is imported') roles = NestedRoles(standalone, DEFAULT_SUFFIX) assert roles.exists(args.NAME) new_role = roles.get(args.NAME) assert new_role.present('nsRoleDN', args.role_dn[0]) @pytest.mark.ds50545 @pytest.mark.bz1798394 @pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented") def test_dsconf_dbgen_mod_ldif_mixed(topology_st, set_log_file_and_ldif): """Test ldifgen (formerly dbgen) tool to create mixed modification ldif :id: 4a2e0901-2b48-452e-a4a0-507735132c8d :setup: Standalone instance :steps: 1. Create DS instance 2. Run ldifgen to generate modification ldif 3. Import generated ldif to database 4. Check it was properly imported :expectedresults: 1. Success 2. Success 3. Success 4. Success """ standalone = topology_st.standalone args = FakeArgs() args.parent = DEFAULT_SUFFIX args.create_users = True args.delete_users = True args.create_parent = False args.num_users = 1000 args.add_users = 100 args.del_users = 999 args.modrdn_users = 100 args.mod_users = 10 args.mod_attrs = ['cn', 'uid', 'sn'] args.randomize = False args.ldif_file = ldif_file content_list = ['Generating LDIF with the following options:', 'create-users={}'.format(args.create_users), 'parent={}'.format(args.parent), 'create-parent={}'.format(args.create_parent), 'delete-users={}'.format(args.delete_users), 'num-users={}'.format(args.num_users), 'add-users={}'.format(args.add_users), 'del-users={}'.format(args.del_users), 'modrdn-users={}'.format(args.modrdn_users), 'mod-users={}'.format(args.mod_users), 'mod-attrs={}'.format(args.mod_attrs), 'randomize={}'.format(args.randomize), 'ldif-file={}'.format(args.ldif_file), 'Writing LDIF', 'Successfully created LDIF file: {}'.format(args.ldif_file)] log.info('Run ldifgen to create modification ldif') dbgen_create_mods(standalone, log, args) log.info('Check if file exists') assert os.path.exists(ldif_file) check_value_in_log_and_reset(content_list) log.info('Get number of accounts before import') accounts = Accounts(standalone, DEFAULT_SUFFIX) count_account = len(accounts.filter('(uid=*)')) # Groups, COS, Roles and modification ldifs are designed to be used by ldapmodify, not ldif2db # ldapmodify will complain about a lot of changes done which causes subprocess to return exit code != 0 with pytest.raises(subprocess.CalledProcessError): run_ldapmodify_from_file(standalone, ldif_file) log.info('Check that some accounts are imported') assert len(accounts.filter('(uid=*)')) > count_account @pytest.mark.ds50545 @pytest.mark.bz1798394 @pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented") def test_dsconf_dbgen_nested_ldif(topology_st, set_log_file_and_ldif): """Test ldifgen (formerly dbgen) tool to create nested ldif :id: 9c281c28-4169-45e0-8c07-c5502d9a7585 :setup: Standalone instance :steps: 1. Create DS instance 2. Run ldifgen to generate nested ldif 3. Import generated ldif to database 4. Check it was properly imported :expectedresults: 1. Success 2. Success 3. Success 4. Success """ standalone = topology_st.standalone args = FakeArgs() args.suffix = DEFAULT_SUFFIX args.node_limit = 100 args.num_users = 600 args.ldif_file = ldif_file content_list = ['Generating LDIF with the following options:', 'suffix={}'.format(args.suffix), 'node-limit={}'.format(args.node_limit), 'num-users={}'.format(args.num_users), 'ldif-file={}'.format(args.ldif_file), 'Writing LDIF', 'Successfully created nested LDIF file ({}) containing 6 nodes/subtrees'.format(args.ldif_file)] log.info('Run ldifgen to create nested ldif') dbgen_create_nested(standalone, log, args) log.info('Check if file exists') assert os.path.exists(ldif_file) check_value_in_log_and_reset(content_list) log.info('Get number of accounts before import') accounts = Accounts(standalone, DEFAULT_SUFFIX) count_account = len(accounts.filter('(uid=*)')) count_ou = len(accounts.filter('(ou=*)')) # Groups, COS, Roles and modification ldifs are designed to be used by ldapmodify, not ldif2db # ldapmodify will complain about already existing suffix which causes subprocess to return exit code != 0 with pytest.raises(subprocess.CalledProcessError): run_ldapmodify_from_file(standalone, ldif_file) standalone.restart() log.info('Check that accounts are imported') assert len(accounts.filter('(uid=*)')) > count_account assert len(accounts.filter('(ou=*)')) > count_ou if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/clu/dbgen_test_usan.py000066400000000000000000000720221421664411400266540ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2020 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import time """ This file contains tests similar to dbgen_test.py except that paramaters that are number are expressed as string (to mimic the parameters parser default behavior which returns an int when parsing "option value" and a string when parsing "option=value" This file has been generated by usign: sed ' 9r z1 s/ test_/ test_usan/ /args.*= [0-9]/s,[0-9]*$,"&", /:id:/s/.$/1/ ' dbgen_test.py > dbgen_test_usan.py ( with z1 file containing this comment ) """ import subprocess import pytest from lib389.cli_ctl.dbgen import * from lib389.cos import CosClassicDefinitions, CosPointerDefinitions, CosIndirectDefinitions, CosTemplates from lib389.idm.account import Accounts from lib389.idm.group import Groups from lib389.idm.role import ManagedRoles, FilteredRoles, NestedRoles from lib389.tasks import * from lib389.utils import * from lib389.topologies import topology_st from lib389.cli_base import FakeArgs pytestmark = pytest.mark.tier0 LOG_FILE = '/tmp/dbgen.log' logging.getLogger(__name__).setLevel(logging.DEBUG) log = logging.getLogger(__name__) @pytest.fixture(scope="function") def set_log_file_and_ldif(topology_st, request): global ldif_file ldif_file = get_ldif_dir(topology_st.standalone) + '/created.ldif' fh = logging.FileHandler(LOG_FILE) fh.setLevel(logging.DEBUG) log.addHandler(fh) def fin(): log.info('Delete files') os.remove(LOG_FILE) os.remove(ldif_file) request.addfinalizer(fin) def run_offline_import(instance, ldif_file): log.info('Stopping the server and running offline import...') instance.stop() assert instance.ldif2db(bename=DEFAULT_BENAME, suffixes=[DEFAULT_SUFFIX], encrypt=None, excludeSuffixes=None, import_file=ldif_file) instance.start() def run_ldapmodify_from_file(instance, ldif_file, output_to_check=None): LDAP_MOD = '/usr/bin/ldapmodify' log.info('Add entries from ldif file with ldapmodify') result = subprocess.check_output([LDAP_MOD, '-cx', '-D', DN_DM, '-w', PASSWORD, '-h', instance.host, '-p', str(instance.port), '-af', ldif_file]) if output_to_check is not None: assert output_to_check in ensure_str(result) def check_value_in_log_and_reset(content_list): with open(LOG_FILE, 'r+') as f: file_content = f.read() log.info('Check if content is present in output') for item in content_list: assert item in file_content log.info('Reset log file for next test') f.truncate(0) @pytest.mark.ds50545 @pytest.mark.bz1798394 @pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented") def test_usandsconf_dbgen_users(topology_st, set_log_file_and_ldif): """Test ldifgen (formerly dbgen) tool to create ldif with users :id: 426b5b94-9923-454d-a736-7e71ca985e91 :setup: Standalone instance :steps: 1. Create DS instance 2. Run ldifgen to generate ldif with users 3. Import generated ldif to database 4. Check it was properly imported :expectedresults: 1. Success 2. Success 3. Success 4. Success """ standalone = topology_st.standalone args = FakeArgs() args.suffix = DEFAULT_SUFFIX args.parent = 'ou=people,dc=example,dc=com' args.number = "1000" args.rdn_cn = False args.generic = True args.start_idx = "50" args.localize = False args.ldif_file = ldif_file content_list = ['Generating LDIF with the following options:', 'suffix={}'.format(args.suffix), 'parent={}'.format(args.parent), 'number={}'.format(args.number), 'rdn-cn={}'.format(args.rdn_cn), 'generic={}'.format(args.generic), 'start-idx={}'.format(args.start_idx), 'localize={}'.format(args.localize), 'ldif-file={}'.format(args.ldif_file), 'Writing LDIF', 'Successfully created LDIF file: {}'.format(args.ldif_file)] log.info('Run ldifgen to create users ldif') dbgen_create_users(standalone, log, args) log.info('Check if file exists') assert os.path.exists(ldif_file) check_value_in_log_and_reset(content_list) log.info('Get number of accounts before import') accounts = Accounts(standalone, DEFAULT_SUFFIX) count_account = len(accounts.filter('(uid=*)')) run_offline_import(standalone, ldif_file) log.info('Check that accounts are imported') assert len(accounts.filter('(uid=*)')) > count_account @pytest.mark.ds50545 @pytest.mark.bz1798394 @pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented") def test_usandsconf_dbgen_groups(topology_st, set_log_file_and_ldif): """Test ldifgen (formerly dbgen) tool to create ldif with group :id: 97207413-9a93-4065-a5ec-63aa93801a31 :setup: Standalone instance :steps: 1. Create DS instance 2. Run ldifgen to generate ldif with group 3. Import generated ldif to database 4. Check it was properly imported :expectedresults: 1. Success 2. Success 3. Success 4. Success """ LDAP_RESULT = 'adding new entry "cn=myGroup-1,ou=groups,dc=example,dc=com"' standalone = topology_st.standalone args = FakeArgs() args.NAME = 'myGroup' args.parent = 'ou=groups,dc=example,dc=com' args.suffix = DEFAULT_SUFFIX args.number = "1" args.num_members = "1000" args.create_members = True args.member_attr = 'uniquemember' args.member_parent = 'ou=people,dc=example,dc=com' args.ldif_file = ldif_file content_list = ['Generating LDIF with the following options:', 'NAME={}'.format(args.NAME), 'number={}'.format(args.number), 'suffix={}'.format(args.suffix), 'num-members={}'.format(args.num_members), 'create-members={}'.format(args.create_members), 'member-parent={}'.format(args.member_parent), 'member-attr={}'.format(args.member_attr), 'ldif-file={}'.format(args.ldif_file), 'Writing LDIF', 'Successfully created LDIF file: {}'.format(args.ldif_file)] log.info('Run ldifgen to create group ldif') dbgen_create_groups(standalone, log, args) log.info('Check if file exists') assert os.path.exists(ldif_file) check_value_in_log_and_reset(content_list) log.info('Get number of accounts before import') accounts = Accounts(standalone, DEFAULT_SUFFIX) count_account = len(accounts.filter('(uid=*)')) # Groups, COS, Roles and modification ldifs are designed to be used by ldapmodify, not ldif2db # ldapmodify will complain about already existing parent which causes subprocess to return exit code != 0 with pytest.raises(subprocess.CalledProcessError): run_ldapmodify_from_file(standalone, ldif_file, LDAP_RESULT) log.info('Check that accounts are imported') assert len(accounts.filter('(uid=*)')) > count_account log.info('Check that group is imported') groups = Groups(standalone, DEFAULT_SUFFIX) assert groups.exists(args.NAME + '-1') new_group = groups.get(args.NAME + '-1') new_group.present('uniquemember', 'uid=group_entry1-0152,ou=people,dc=example,dc=com') @pytest.mark.ds50545 @pytest.mark.bz1798394 @pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented") def test_usandsconf_dbgen_cos_classic(topology_st, set_log_file_and_ldif): """Test ldifgen (formerly dbgen) tool to create a COS definition :id: 8557f994-8a91-4f8a-86f6-9cb826a0b8f1 :setup: Standalone instance :steps: 1. Create DS instance 2. Run ldifgen to generate ldif with classic COS definition 3. Import generated ldif to database 4. Check it was properly imported :expectedresults: 1. Success 2. Success 3. Success 4. Success """ LDAP_RESULT = 'adding new entry "cn=My_Postal_Def,ou=cos definitions,dc=example,dc=com"' standalone = topology_st.standalone args = FakeArgs() args.type = 'classic' args.NAME = 'My_Postal_Def' args.parent = 'ou=cos definitions,dc=example,dc=com' args.create_parent = True args.cos_specifier = 'businessCategory' args.cos_attr = ['postalcode', 'telephonenumber'] args.cos_template = 'cn=sales,cn=classicCoS,dc=example,dc=com' args.ldif_file = ldif_file content_list = ['Generating LDIF with the following options:', 'NAME={}'.format(args.NAME), 'type={}'.format(args.type), 'parent={}'.format(args.parent), 'create-parent={}'.format(args.create_parent), 'cos-specifier={}'.format(args.cos_specifier), 'cos-template={}'.format(args.cos_template), 'cos-attr={}'.format(args.cos_attr), 'ldif-file={}'.format(args.ldif_file), 'Writing LDIF', 'Successfully created LDIF file: {}'.format(args.ldif_file)] log.info('Run ldifgen to create COS definition ldif') dbgen_create_cos_def(standalone, log, args) log.info('Check if file exists') assert os.path.exists(ldif_file) check_value_in_log_and_reset(content_list) # Groups, COS, Roles and modification ldifs are designed to be used by ldapmodify, not ldif2db run_ldapmodify_from_file(standalone, ldif_file, LDAP_RESULT) log.info('Check that COS definition is imported') cos_def = CosClassicDefinitions(standalone, args.parent) assert cos_def.exists(args.NAME) new_cos = cos_def.get(args.NAME) assert new_cos.present('cosTemplateDN', args.cos_template) assert new_cos.present('cosSpecifier', args.cos_specifier) assert new_cos.present('cosAttribute', args.cos_attr[0]) assert new_cos.present('cosAttribute', args.cos_attr[1]) @pytest.mark.ds50545 @pytest.mark.bz1798394 @pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented") def test_usandsconf_dbgen_cos_pointer(topology_st, set_log_file_and_ldif): """Test ldifgen (formerly dbgen) tool to create a COS definition :id: 6b26ca6d-226a-4f93-925e-faf95cc20211 :setup: Standalone instance :steps: 1. Create DS instance 2. Run ldifgen to generate ldif with pointer COS definition 3. Import generated ldif to database 4. Check it was properly imported :expectedresults: 1. Success 2. Success 3. Success 4. Success """ LDAP_RESULT = 'adding new entry "cn=My_Postal_Def_pointer,ou=cos pointer definitions,dc=example,dc=com"' standalone = topology_st.standalone args = FakeArgs() args.type = 'pointer' args.NAME = 'My_Postal_Def_pointer' args.parent = 'ou=cos pointer definitions,dc=example,dc=com' args.create_parent = True args.cos_specifier = None args.cos_attr = ['postalcode', 'telephonenumber'] args.cos_template = 'cn=sales,cn=pointerCoS,dc=example,dc=com' args.ldif_file = ldif_file content_list = ['Generating LDIF with the following options:', 'NAME={}'.format(args.NAME), 'type={}'.format(args.type), 'parent={}'.format(args.parent), 'create-parent={}'.format(args.create_parent), 'cos-template={}'.format(args.cos_template), 'cos-attr={}'.format(args.cos_attr), 'ldif-file={}'.format(args.ldif_file), 'Writing LDIF', 'Successfully created LDIF file: {}'.format(args.ldif_file)] log.info('Run ldifgen to create COS definition ldif') dbgen_create_cos_def(standalone, log, args) log.info('Check if file exists') assert os.path.exists(ldif_file) check_value_in_log_and_reset(content_list) # Groups, COS, Roles and modification ldifs are designed to be used by ldapmodify, not ldif2db run_ldapmodify_from_file(standalone, ldif_file, LDAP_RESULT) log.info('Check that COS definition is imported') cos_def = CosPointerDefinitions(standalone, args.parent) assert cos_def.exists(args.NAME) new_cos = cos_def.get(args.NAME) assert new_cos.present('cosTemplateDN', args.cos_template) assert new_cos.present('cosAttribute', args.cos_attr[0]) assert new_cos.present('cosAttribute', args.cos_attr[1]) @pytest.mark.ds50545 @pytest.mark.bz1798394 @pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented") def test_usandsconf_dbgen_cos_indirect(topology_st, set_log_file_and_ldif): """Test ldifgen (formerly dbgen) tool to create a COS definition :id: ab4b799e-e801-432a-a61d-badad2628201 :setup: Standalone instance :steps: 1. Create DS instance 2. Run ldifgen to generate ldif with indirect COS definition 3. Import generated ldif to database 4. Check it was properly imported :expectedresults: 1. Success 2. Success 3. Success 4. Success """ LDAP_RESULT = 'adding new entry "cn=My_Postal_Def_indirect,ou=cos indirect definitions,dc=example,dc=com"' standalone = topology_st.standalone args = FakeArgs() args.type = 'indirect' args.NAME = 'My_Postal_Def_indirect' args.parent = 'ou=cos indirect definitions,dc=example,dc=com' args.create_parent = True args.cos_specifier = 'businessCategory' args.cos_attr = ['postalcode', 'telephonenumber'] args.cos_template = None args.ldif_file = ldif_file content_list = ['Generating LDIF with the following options:', 'NAME={}'.format(args.NAME), 'type={}'.format(args.type), 'parent={}'.format(args.parent), 'create-parent={}'.format(args.create_parent), 'cos-specifier={}'.format(args.cos_specifier), 'cos-attr={}'.format(args.cos_attr), 'ldif-file={}'.format(args.ldif_file), 'Writing LDIF', 'Successfully created LDIF file: {}'.format(args.ldif_file)] log.info('Run ldifgen to create COS definition ldif') dbgen_create_cos_def(standalone, log, args) log.info('Check if file exists') assert os.path.exists(ldif_file) check_value_in_log_and_reset(content_list) # Groups, COS, Roles and modification ldifs are designed to be used by ldapmodify, not ldif2db run_ldapmodify_from_file(standalone, ldif_file, LDAP_RESULT) log.info('Check that COS definition is imported') cos_def = CosIndirectDefinitions(standalone, args.parent) assert cos_def.exists(args.NAME) new_cos = cos_def.get(args.NAME) assert new_cos.present('cosIndirectSpecifier', args.cos_specifier) assert new_cos.present('cosAttribute', args.cos_attr[0]) assert new_cos.present('cosAttribute', args.cos_attr[1]) @pytest.mark.ds50545 @pytest.mark.bz1798394 @pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented") def test_usandsconf_dbgen_cos_template(topology_st, set_log_file_and_ldif): """Test ldifgen (formerly dbgen) tool to create a COS template :id: 544017c7-4a82-4e7d-a047-00b68a28e071 :setup: Standalone instance :steps: 1. Create DS instance 2. Run ldifgen to generate ldif with COS template 3. Import generated ldif to database 4. Check it was properly imported :expectedresults: 1. Success 2. Success 3. Success 4. Success """ LDAP_RESULT = 'adding new entry "cn=My_Template,ou=cos templates,dc=example,dc=com"' standalone = topology_st.standalone args = FakeArgs() args.NAME = 'My_Template' args.parent = 'ou=cos templates,dc=example,dc=com' args.create_parent = True args.cos_priority = "1" args.cos_attr_val = 'postalcode:12345' args.ldif_file = ldif_file content_list = ['Generating LDIF with the following options:', 'NAME={}'.format(args.NAME), 'parent={}'.format(args.parent), 'create-parent={}'.format(args.create_parent), 'cos-priority={}'.format(args.cos_priority), 'cos-attr-val={}'.format(args.cos_attr_val), 'ldif-file={}'.format(args.ldif_file), 'Writing LDIF', 'Successfully created LDIF file: {}'.format(args.ldif_file)] log.info('Run ldifgen to create COS template ldif') dbgen_create_cos_tmp(standalone, log, args) log.info('Check if file exists') assert os.path.exists(ldif_file) check_value_in_log_and_reset(content_list) # Groups, COS, Roles and modification ldifs are designed to be used by ldapmodify, not ldif2db run_ldapmodify_from_file(standalone, ldif_file, LDAP_RESULT) log.info('Check that COS template is imported') cos_temp = CosTemplates(standalone, args.parent) assert cos_temp.exists(args.NAME) new_cos = cos_temp.get(args.NAME) assert new_cos.present('cosPriority', str(args.cos_priority)) assert new_cos.present('postalcode', '12345') @pytest.mark.ds50545 @pytest.mark.bz1798394 @pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented") def test_usandsconf_dbgen_managed_role(topology_st, set_log_file_and_ldif): """Test ldifgen (formerly dbgen) tool to create a managed role :id: 10e77b41-0bc1-4ad5-a144-2c5107455b91 :setup: Standalone instance :steps: 1. Create DS instance 2. Run ldifgen to generate ldif with managed role 3. Import generated ldif to database 4. Check it was properly imported :expectedresults: 1. Success 2. Success 3. Success 4. Success """ LDAP_RESULT = 'adding new entry "cn=My_Managed_Role,ou=managed roles,dc=example,dc=com"' standalone = topology_st.standalone args = FakeArgs() args.NAME = 'My_Managed_Role' args.parent = 'ou=managed roles,dc=example,dc=com' args.create_parent = True args.type = 'managed' args.filter = None args.role_dn = None args.ldif_file = ldif_file content_list = ['Generating LDIF with the following options:', 'NAME={}'.format(args.NAME), 'parent={}'.format(args.parent), 'create-parent={}'.format(args.create_parent), 'type={}'.format(args.type), 'ldif-file={}'.format(args.ldif_file), 'Writing LDIF', 'Successfully created LDIF file: {}'.format(args.ldif_file)] log.info('Run ldifgen to create managed role ldif') dbgen_create_role(standalone, log, args) log.info('Check if file exists') assert os.path.exists(ldif_file) check_value_in_log_and_reset(content_list) # Groups, COS, Roles and modification ldifs are designed to be used by ldapmodify, not ldif2db run_ldapmodify_from_file(standalone, ldif_file, LDAP_RESULT) log.info('Check that managed role is imported') roles = ManagedRoles(standalone, DEFAULT_SUFFIX) assert roles.exists(args.NAME) @pytest.mark.ds50545 @pytest.mark.bz1798394 @pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented") def test_usandsconf_dbgen_filtered_role(topology_st, set_log_file_and_ldif): """Test ldifgen (formerly dbgen) tool to create a filtered role :id: cb3c8ea8-4234-40e2-8810-fb6a25973921 :setup: Standalone instance :steps: 1. Create DS instance 2. Run ldifgen to generate ldif with filtered role 3. Import generated ldif to database 4. Check it was properly imported :expectedresults: 1. Success 2. Success 3. Success 4. Success """ LDAP_RESULT = 'adding new entry "cn=My_Filtered_Role,ou=filtered roles,dc=example,dc=com"' standalone = topology_st.standalone args = FakeArgs() args.NAME = 'My_Filtered_Role' args.parent = 'ou=filtered roles,dc=example,dc=com' args.create_parent = True args.type = 'filtered' args.filter = '"objectclass=posixAccount"' args.role_dn = None args.ldif_file = ldif_file content_list = ['Generating LDIF with the following options:', 'NAME={}'.format(args.NAME), 'parent={}'.format(args.parent), 'create-parent={}'.format(args.create_parent), 'type={}'.format(args.type), 'filter={}'.format(args.filter), 'ldif-file={}'.format(args.ldif_file), 'Writing LDIF', 'Successfully created LDIF file: {}'.format(args.ldif_file)] log.info('Run ldifgen to create filtered role ldif') dbgen_create_role(standalone, log, args) log.info('Check if file exists') assert os.path.exists(ldif_file) check_value_in_log_and_reset(content_list) # Groups, COS, Roles and modification ldifs are designed to be used by ldapmodify, not ldif2db run_ldapmodify_from_file(standalone, ldif_file, LDAP_RESULT) log.info('Check that filtered role is imported') roles = FilteredRoles(standalone, DEFAULT_SUFFIX) assert roles.exists(args.NAME) new_role = roles.get(args.NAME) assert new_role.present('nsRoleFilter', args.filter) @pytest.mark.ds50545 @pytest.mark.bz1798394 @pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented") def test_usandsconf_dbgen_nested_role(topology_st, set_log_file_and_ldif): """Test ldifgen (formerly dbgen) tool to create a nested role :id: 97fff0a8-3103-4adb-be04-2799ff58d8f1 :setup: Standalone instance :steps: 1. Create DS instance 2. Run ldifgen to generate ldif with nested role 3. Import generated ldif to database 4. Check it was properly imported :expectedresults: 1. Success 2. Success 3. Success 4. Success """ LDAP_RESULT = 'adding new entry "cn=My_Nested_Role,ou=nested roles,dc=example,dc=com"' standalone = topology_st.standalone args = FakeArgs() args.NAME = 'My_Nested_Role' args.parent = 'ou=nested roles,dc=example,dc=com' args.create_parent = True args.type = 'nested' args.filter = None args.role_dn = ['cn=some_role,ou=roles,dc=example,dc=com'] args.ldif_file = ldif_file content_list = ['Generating LDIF with the following options:', 'NAME={}'.format(args.NAME), 'parent={}'.format(args.parent), 'create-parent={}'.format(args.create_parent), 'type={}'.format(args.type), 'role-dn={}'.format(args.role_dn), 'ldif-file={}'.format(args.ldif_file), 'Writing LDIF', 'Successfully created LDIF file: {}'.format(args.ldif_file)] log.info('Run ldifgen to create nested role ldif') dbgen_create_role(standalone, log, args) log.info('Check if file exists') assert os.path.exists(ldif_file) check_value_in_log_and_reset(content_list) # Groups, COS, Roles and modification ldifs are designed to be used by ldapmodify, not ldif2db run_ldapmodify_from_file(standalone, ldif_file, LDAP_RESULT) log.info('Check that nested role is imported') roles = NestedRoles(standalone, DEFAULT_SUFFIX) assert roles.exists(args.NAME) new_role = roles.get(args.NAME) assert new_role.present('nsRoleDN', args.role_dn[0]) @pytest.mark.ds50545 @pytest.mark.bz1798394 @pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented") def test_usandsconf_dbgen_mod_ldif_mixed(topology_st, set_log_file_and_ldif): """Test ldifgen (formerly dbgen) tool to create mixed modification ldif :id: 4a2e0901-2b48-452e-a4a0-507735132c81 :setup: Standalone instance :steps: 1. Create DS instance 2. Run ldifgen to generate modification ldif 3. Import generated ldif to database 4. Check it was properly imported :expectedresults: 1. Success 2. Success 3. Success 4. Success """ standalone = topology_st.standalone args = FakeArgs() args.parent = DEFAULT_SUFFIX args.create_users = True args.delete_users = True args.create_parent = False args.num_users = "1000" args.add_users = "100" args.del_users = "999" args.modrdn_users = "100" args.mod_users = "10" args.mod_attrs = ['cn', 'uid', 'sn'] args.randomize = False args.ldif_file = ldif_file content_list = ['Generating LDIF with the following options:', 'create-users={}'.format(args.create_users), 'parent={}'.format(args.parent), 'create-parent={}'.format(args.create_parent), 'delete-users={}'.format(args.delete_users), 'num-users={}'.format(args.num_users), 'add-users={}'.format(args.add_users), 'del-users={}'.format(args.del_users), 'modrdn-users={}'.format(args.modrdn_users), 'mod-users={}'.format(args.mod_users), 'mod-attrs={}'.format(args.mod_attrs), 'randomize={}'.format(args.randomize), 'ldif-file={}'.format(args.ldif_file), 'Writing LDIF', 'Successfully created LDIF file: {}'.format(args.ldif_file)] log.info('Run ldifgen to create modification ldif') dbgen_create_mods(standalone, log, args) log.info('Check if file exists') assert os.path.exists(ldif_file) check_value_in_log_and_reset(content_list) log.info('Get number of accounts before import') accounts = Accounts(standalone, DEFAULT_SUFFIX) count_account = len(accounts.filter('(uid=*)')) # Groups, COS, Roles and modification ldifs are designed to be used by ldapmodify, not ldif2db # ldapmodify will complain about a lot of changes done which causes subprocess to return exit code != 0 with pytest.raises(subprocess.CalledProcessError): run_ldapmodify_from_file(standalone, ldif_file) log.info('Check that some accounts are imported') assert len(accounts.filter('(uid=*)')) > count_account @pytest.mark.ds50545 @pytest.mark.bz1798394 @pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented") def test_usandsconf_dbgen_nested_ldif(topology_st, set_log_file_and_ldif): """Test ldifgen (formerly dbgen) tool to create nested ldif :id: 9c281c28-4169-45e0-8c07-c5502d9a7581 :setup: Standalone instance :steps: 1. Create DS instance 2. Run ldifgen to generate nested ldif 3. Import generated ldif to database 4. Check it was properly imported :expectedresults: 1. Success 2. Success 3. Success 4. Success """ standalone = topology_st.standalone args = FakeArgs() args.suffix = DEFAULT_SUFFIX args.node_limit = "100" args.num_users = "600" args.ldif_file = ldif_file content_list = ['Generating LDIF with the following options:', 'suffix={}'.format(args.suffix), 'node-limit={}'.format(args.node_limit), 'num-users={}'.format(args.num_users), 'ldif-file={}'.format(args.ldif_file), 'Writing LDIF', 'Successfully created nested LDIF file ({}) containing 6 nodes/subtrees'.format(args.ldif_file)] log.info('Run ldifgen to create nested ldif') dbgen_create_nested(standalone, log, args) log.info('Check if file exists') assert os.path.exists(ldif_file) check_value_in_log_and_reset(content_list) log.info('Get number of accounts before import') accounts = Accounts(standalone, DEFAULT_SUFFIX) count_account = len(accounts.filter('(uid=*)')) count_ou = len(accounts.filter('(ou=*)')) # Groups, COS, Roles and modification ldifs are designed to be used by ldapmodify, not ldif2db # ldapmodify will complain about already existing suffix which causes subprocess to return exit code != 0 with pytest.raises(subprocess.CalledProcessError): run_ldapmodify_from_file(standalone, ldif_file) standalone.restart() log.info('Check that accounts are imported') assert len(accounts.filter('(uid=*)')) > count_account assert len(accounts.filter('(ou=*)')) > count_ou if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/clu/dbmon_test.py000066400000000000000000000106751421664411400256540ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2020 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import time import subprocess import pytest from lib389.tasks import * from lib389.utils import * from lib389.topologies import topology_st from lib389.cli_conf.monitor import db_monitor from lib389.cli_base import FakeArgs pytestmark = pytest.mark.tier1 logging.getLogger(__name__).setLevel(logging.DEBUG) log = logging.getLogger(__name__) OUTPUT_NO_INDEXES = [ 'DB Monitor Report', 'Database Cache:', 'Cache Hit Ratio:', 'Free Space:', 'Free Percentage:', 'RO Page Drops:', 'Pages In:', 'Pages Out:', 'Normalized DN Cache:', 'Cache Hit Ratio:', 'Free Space:', 'Free Percentage:', 'DN Count:', 'Evictions:', 'Backends:', 'dc=example,dc=com (userRoot):', 'Entry Cache Hit Ratio:', 'Entry Cache Count:', 'Entry Cache Free Space:', 'Entry Cache Free Percentage:', 'Entry Cache Average Size:', 'DN Cache Hit Ratio:', 'DN Cache Count:', 'DN Cache Free Space:', 'DN Cache Free Percentage:', 'DN Cache Average Size:' ] OUTPUT_INDEXES = [ 'DB Monitor Report', 'Database Cache:', 'Cache Hit Ratio:', 'Free Space:', 'Free Percentage:', 'RO Page Drops:', 'Pages In:', 'Pages Out:', 'Normalized DN Cache:', 'Cache Hit Ratio:', 'Free Space:', 'Free Percentage:', 'DN Count:', 'Evictions:', 'Backends:', 'dc=example,dc=com (userRoot):', 'Entry Cache Hit Ratio:', 'Entry Cache Count:', 'Entry Cache Free Space:', 'Entry Cache Free Percentage:', 'Entry Cache Average Size:', 'DN Cache Hit Ratio:', 'DN Cache Count:', 'DN Cache Free Space:', 'DN Cache Free Percentage:', 'DN Cache Average Size:', 'Indexes:', 'Index: aci.db', 'Cache Hit:', 'Cache Miss:', 'Page In:', 'Page Out:', 'Index: id2entry.db', 'Index: objectclass.db', 'Index: entryrdn.db' ] JSON_OUTPUT = [ 'date', 'dbcache', 'hit_ratio', 'free', 'free_percentage', 'roevicts', 'pagein', 'pageout', 'ndncache', 'hit_ratio', 'free', 'free_percentage', 'count', 'evictions', 'backends', 'userRoot', '"suffix": "dc=example,dc=com"', 'entry_cache_count', 'entry_cache_free', 'entry_cache_free_percentage', 'entry_cache_size', 'entry_cache_hit_ratio', 'dn_cache_count', 'dn_cache_free', 'dn_cache_free_percentage', 'dn_cache_size', 'dn_cache_hit_ratio', 'indexes', 'name', 'objectclass.db', 'cachehit', 'cachemiss', 'pagein', 'pageout', 'entryrdn.db', 'aci.db', 'id2entry.db' ] def clear_log(inst): log.info('Clear the log') inst.logcap.flush() @pytest.mark.ds50545 @pytest.mark.bz1795943 @pytest.mark.skipif(ds_is_older("1.4.2"), reason="Not implemented") def test_dsconf_dbmon(topology_st): """Test dbmon tool, that was ported from legacy tools to dsconf :id: 4d584ba9-12a9-4e90-ba9a-7e103affdac5 :setup: Standalone instance :steps: 1. Create DS instance 2. Run dbmon without --indexes 3. Run dbmon with --indexes 4. Run dbmon with --json :expectedresults: 1. Success 2. Success 3. Success 4. Success """ standalone = topology_st.standalone args = FakeArgs() args.backends = DEFAULT_BENAME args.indexes = False args.json = False log.info('Sanity check for syntax') db_monitor(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args) for item in OUTPUT_NO_INDEXES: assert topology_st.logcap.contains(item) clear_log(topology_st) log.info('Sanity check for --indexes output') args.indexes = True db_monitor(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args) for index_item in OUTPUT_INDEXES: assert topology_st.logcap.contains(index_item) clear_log(topology_st) log.info('Sanity check for --json output') args.json = True db_monitor(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args) for json_item in JSON_OUTPUT: assert topology_st.logcap.contains(json_item) clear_log(topology_st) if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/clu/dbverify_test.py000066400000000000000000000035571421664411400263700ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2020 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import time import subprocess import pytest from lib389.cli_ctl.dbtasks import dbtasks_verify from lib389.tasks import * from lib389.utils import * from lib389.topologies import topology_st from lib389.cli_base import FakeArgs pytestmark = pytest.mark.tier0 LOG_FILE = '/tmp/dbverify.log' logging.getLogger(__name__).setLevel(logging.DEBUG) log = logging.getLogger(__name__) @pytest.fixture(scope="function") def set_log_file(request): fh = logging.FileHandler(LOG_FILE) fh.setLevel(logging.DEBUG) log.addHandler(fh) def fin(): log.info('Delete log file') os.remove(LOG_FILE) request.addfinalizer(fin) @pytest.mark.ds50545 @pytest.mark.bz1739718 @pytest.mark.skipif(ds_is_older("1.4.2"), reason="Not implemented") def test_dsctl_dbverify(topology_st, set_log_file): """Test dbverify tool, that was ported from legacy tools to dsctl :id: 1b22b363-a6e5-4922-ad42-ae80446d69fe :setup: Standalone instance :steps: 1. Create DS instance 2. Run dbverify 3. Check if dbverify was successful :expectedresults: 1. Success 2. Success 3. Success """ standalone = topology_st.standalone message = 'dbverify successful' args = FakeArgs() args.backend = DEFAULT_BENAME log.info('Run dbverify') standalone.stop() dbtasks_verify(standalone, log, args) log.info('Check dbverify was successful') with open(LOG_FILE, 'r+') as f: file_content = f.read() assert message in file_content if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/clu/dsctl_acceptance_test.py000066400000000000000000000037121421664411400300260ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2021 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- import logging import pytest import os import time from lib389.topologies import topology_st as topo log = logging.getLogger(__name__) #unstable or unstatus tests, skipped for now @pytest.mark.flaky(max_runs=2, min_passes=1) def test_custom_path(topo): """Test that a custom path, backup directory, is correctly used by lib389 when the server is stopped. :id: 8659e209-ee83-477e-8183-1d2f555669ea :setup: Standalone Instance :steps: 1. Get the LDIF directory 2. Change the server's backup directory to the LDIF directory 3. Stop the server, and perform a backup 4. Backup was written to LDIF directory :expectedresults: 1. Success 2. Success 3. Success 4. Success """ # Get LDIF dir ldif_dir = topo.standalone.get_ldif_dir() bak_dir = topo.standalone.get_bak_dir() log.info("ldif dir: " + ldif_dir + " items: " + str(len(os.listdir(ldif_dir)))) log.info("bak dir: " + bak_dir + " items: " + str(len(os.listdir(bak_dir)))) # Set backup directory to LDIF directory topo.standalone.config.replace('nsslapd-bakdir', ldif_dir) time.sleep(.5) # Stop the server and take a backup topo.standalone.stop() time.sleep(.5) topo.standalone.db2bak(None) # Bug, bak dir is being pulled from defaults.inf, and not from config # Verify backup was written to LDIF directory log.info("AFTER: ldif dir (new bak dir): " + ldif_dir + " items: " + str(len(os.listdir(ldif_dir)))) log.info("AFTER: bak dir: " + bak_dir + " items: " + str(len(os.listdir(bak_dir)))) assert len(os.listdir(ldif_dir)) if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main(["-s", CURRENT_FILE]) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/clu/dsctl_tls_test.py000066400000000000000000000050451421664411400265430ustar00rootroot00000000000000import logging import pytest import os from lib389.topologies import topology_st as topo from lib389.nss_ssl import NssSsl log = logging.getLogger(__name__) def test_tls_command_returns_error_text(topo): """CLI commands that called certutil should return the error text from certutil when something goes wrong, and not the system error code number. :id: 7f0c28d0-6e13-4ca4-bec2-4586d56b73f6 :setup: Standalone Instance :steps: 1. Issue invalid "generate key and cert" command, and error text is returned 2. Issue invalid "delete cert" command, and error text is returned 3. Issue invalid "import ca cert" command, and error text is returned 4. Issue invalid "import server cert" command, and error text is returned 5. Issue invalid "import key and server cert" command, and error text is returned :expectedresults: 1. Success 2. Success 3. Success 4. Success 5. Success """ # dsctl localhost tls generate-server-cert-csr -s "bad" tls = NssSsl(dirsrv=topo.standalone) try: tls.create_rsa_key_and_csr([], "bad") assert False except ValueError as e: assert '255' not in str(e) assert 'improperly formatted name' in str(e) # dsctl localhost tls remove-cert try: tls.del_cert("bad") assert False except ValueError as e: assert '255' not in str(e) assert 'could not find certificate named' in str(e) # dsctl localhost tls import-ca try: invalid_file = topo.standalone.confdir + '/dse.ldif' tls.add_cert(nickname="bad", input_file=invalid_file) assert False except ValueError as e: assert '255' not in str(e) assert 'error converting ascii to binary' in str(e) # dsctl localhost tls import-server-cert try: invalid_file = topo.standalone.confdir + '/dse.ldif' tls.import_rsa_crt(crt=invalid_file) assert False except ValueError as e: assert '255' not in str(e) assert 'error converting ascii to binary' in str(e) # dsctl localhost tls import-server-key-cert try: invalid_file = topo.standalone.confdir + '/dse.ldif' tls.add_server_key_and_cert(invalid_file, invalid_file) assert False except ValueError as e: assert '255' not in str(e) assert 'unable to load private key' in str(e) if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main(["-s", CURRENT_FILE]) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/clu/dsidm_account_test.py000066400000000000000000000073611421664411400273670ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2020 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import time import subprocess import pytest import logging import os from lib389 import DEFAULT_SUFFIX from lib389.cli_idm.account import list, get_dn, lock, unlock, delete, modify, rename, entry_status, \ subtree_status, reset_password, change_password from lib389.topologies import topology_st from lib389.cli_base import FakeArgs from lib389.utils import ds_is_older from lib389.idm.user import nsUserAccounts from . import check_value_in_log_and_reset pytestmark = pytest.mark.tier0 logging.getLogger(__name__).setLevel(logging.DEBUG) log = logging.getLogger(__name__) @pytest.fixture(scope="function") def create_test_user(topology_st, request): log.info('Create test user') users = nsUserAccounts(topology_st.standalone, DEFAULT_SUFFIX) test_user = users.create_test_user() def fin(): log.info('Delete test user') if test_user.exists(): test_user.delete() request.addfinalizer(fin) @pytest.mark.bz1862971 @pytest.mark.ds4281 @pytest.mark.skipif(ds_is_older("1.4.2"), reason="Not implemented") def test_dsidm_account_entry_status_with_lock(topology_st, create_test_user): """ Test dsidm account entry-status option with account lock/unlock :id: d911bbf2-3a65-42a4-ad76-df1114caa396 :setup: Standalone instance :steps: 1. Create user account 2. Run dsidm account entry status 3. Run dsidm account lock 4. Run dsidm account entry status 5. Run dsidm account unlock 6. Run dsidm account entry status :expectedresults: 1. Success 2. The state message should be Entry State: activated 3. Success 4. The state message should be Entry State: directly locked through nsAccountLock 5. Success 6. The state message should be Entry State: activated """ standalone = topology_st.standalone users = nsUserAccounts(standalone, DEFAULT_SUFFIX) test_user = users.get('test_user_1000') entry_list = ['Entry DN: {}'.format(test_user.dn), 'Entry Creation Date', 'Entry Modification Date'] state_lock = 'Entry State: directly locked through nsAccountLock' state_unlock= 'Entry State: activated' lock_msg = 'Entry {} is locked'.format(test_user.dn) unlock_msg = 'Entry {} is unlocked'.format(test_user.dn) args = FakeArgs() args.dn = test_user.dn log.info('Test dsidm account entry-status') entry_status(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args) check_value_in_log_and_reset(topology_st, content_list=entry_list, check_value=state_unlock) log.info('Test dsidm account lock') lock(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args) check_value_in_log_and_reset(topology_st, check_value=lock_msg) log.info('Test dsidm account entry-status with locked account') entry_status(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args) check_value_in_log_and_reset(topology_st, content_list=entry_list, check_value=state_lock) log.info('Test dsidm account unlock') unlock(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args) check_value_in_log_and_reset(topology_st, check_value=unlock_msg) log.info('Test dsidm account entry-status with unlocked account') entry_status(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args) check_value_in_log_and_reset(topology_st, content_list=entry_list, check_value=state_unlock) if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/clu/dsidm_config_test.py000066400000000000000000000165741421664411400272060ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2020 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import time import pytest import logging import os from lib389 import DEFAULT_SUFFIX from lib389.cli_idm.client_config import sssd_conf, ldap_conf, display from lib389.plugins import MemberOfPlugin from lib389.topologies import topology_st from lib389.cli_base import FakeArgs from lib389.idm.group import Groups from lib389.idm.user import nsUserAccounts from lib389.utils import ds_is_older pytestmark = pytest.mark.tier0 LOG_FILE = '/tmp/dsidm.log' logging.getLogger(__name__).setLevel(logging.DEBUG) log = logging.getLogger(__name__) @pytest.fixture(scope="function") def set_log_file(request): fh = logging.FileHandler(LOG_FILE) fh.setLevel(logging.DEBUG) log.addHandler(fh) def fin(): log.info('Delete log file') os.remove(LOG_FILE) request.addfinalizer(fin) def check_value_in_log_and_reset(content_list, content_list2=None, check_value=None): with open(LOG_FILE, 'r+') as f: file_content = f.read() if content_list2 is not None: log.info('Check if content is present in output') for item in content_list + content_list2: assert item.lower() in file_content.lower() else: log.info('Check if content is present in output') for item in content_list: assert item.lower() in file_content.lower() if check_value is not None: log.info('Check if value is present in output') assert check_value in file_content log.info('Reset log file for next test') f.truncate(0) @pytest.mark.skipif(ds_is_older("1.4.2"), reason="Not implemented") def test_dsidm_config_sssd(topology_st, set_log_file): """ Test dsidm creation of sssd.conf content :id: 77812ba6-b133-40f4-91a7-13309618f24d :setup: Standalone instance :steps: 1. Run dsidm client_config sssd.conf 2. Enable MemberOfPlugin 3. Run dsidm client_config sssd.conf with allowed group :expectedresults: 1. Success 2. Success 3. Success """ standalone = topology_st.standalone sssd_content_list = ['Generated by 389 Directory Server - dsidm', 'id_provider = ldap', 'auth_provider = ldap', 'access_provider = ldap', 'chpass_provider = ldap', 'ldap_search_base = ' + DEFAULT_SUFFIX, 'ldap_uri = ' + standalone.ldapuri, 'ldap_user_member_of = memberof', 'ignore_group_members = False', '[sssd]', 'services = nss, pam, ssh, sudo', 'config_file_version = 2', 'domains = ldap', '[nss]', 'homedir_substring = /home'] schema = 'ldap_schema = rfc2307' args = FakeArgs() args.allowed_group = None log.info('Create sssd.conf content') sssd_conf(standalone, DEFAULT_SUFFIX, log, args) log.info('Check if config creation was successful') check_value_in_log_and_reset(sssd_content_list, check_value=schema) log.info('Now we test allowed_group argument') log.info('Enable MemberOf plugin') plugin = MemberOfPlugin(standalone) plugin.enable() standalone.restart() log.info('Create test group') groups = Groups(standalone, DEFAULT_SUFFIX) test_group = groups.create(properties={"cn": "new_group", "description": "testgroup"}) log.info('Create sssd.conf content with allowed group') filter_msg = ['ldap_access_filter = (memberOf={})'.format(test_group.dn), 'ldap_schema = rfc2307bis'] args.allowed_group = test_group.rdn sssd_conf(standalone, DEFAULT_SUFFIX, log, args) log.info('Check if config creation was successful') check_value_in_log_and_reset(sssd_content_list, filter_msg) @pytest.mark.skipif(ds_is_older("1.4.2"), reason="Not implemented") def test_dsidm_config_ldap(topology_st, set_log_file): """ Test dsidm creation of ldap.conf content :id: 29ffcc91-9104-4c90-bcdf-0f6a4082322c :setup: Standalone instance :steps: 1. Create instance 2. Run dsidm client_config ldap.conf :expectedresults: 1. Success 2. Success """ standalone = topology_st.standalone args = FakeArgs() ldap_content_list = ['OpenLDAP client configuration', 'Generated by 389 Directory Server - dsidm', 'BASE ' + DEFAULT_SUFFIX, 'URI ' + standalone.ldapuri, 'DEREF never', 'TLS_CACERTDIR /etc/openldap/certs'] log.info('Create ldap.conf content') ldap_conf(standalone, DEFAULT_SUFFIX, log, args) log.info('Check if config creation was successful') check_value_in_log_and_reset(ldap_content_list) @pytest.mark.skipif(ds_is_older("1.4.2"), reason="Not implemented") def test_dsidm_config_display(topology_st, set_log_file): """ Test dsidm display option :id: 6e888ae2-8835-44d5-846b-e971d76aa461 :setup: Standalone instance :steps: 1. Run dsidm client_config display 2. Enable MemberOfPlugin 3. Run dsidm client_config display with MemberOfPlugin :expectedresults: 1. Success 2. Success 3. Success """ standalone = topology_st.standalone users = nsUserAccounts(standalone, DEFAULT_SUFFIX) groups = Groups(standalone, DEFAULT_SUFFIX) display_content_list = ['ldap_uri = ' + standalone.ldapuri, 'ldap_uri = ldaps:///dc%3Dexample%2Cdc%3Dcom', 'group_basedn = ' + groups._basedn, 'basedn = ' + DEFAULT_SUFFIX, 'user_basedn = ' + users._basedn, 'user_filter = (&(objectclass=nsPerson)(objectclass=nsAccount)(objectclass=nsOrgPerson)' '(objectclass=posixAccount))', 'unique id = nsUniqueId', 'group member attribute = member', 'user rdn = uid', 'user identifier = uid', 'group_filter = (&(objectclass=groupOfNames))', 'group rdn = cn'] schema_type = 'rfc2307' args = FakeArgs() log.info('Test dsidm display option') display(standalone, DEFAULT_SUFFIX, log, args) log.info('Check if display option was successful') check_value_in_log_and_reset(display_content_list, check_value=schema_type) log.info('Enable MemberOf plugin') plugin = MemberOfPlugin(standalone) plugin.enable() standalone.restart() log.info('Test dsidm display option with MemberOf plugin') display(standalone, DEFAULT_SUFFIX, log, args) log.info('Check if display option was successful with MemberOf plugin enabled') schema_type = 'rfc2307bis' check_value_in_log_and_reset(display_content_list, check_value=schema_type) if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/clu/dsidm_organizational_unit_test.py000066400000000000000000000046501421664411400320110ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2020 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import time import subprocess import pytest import logging import os from lib389 import DEFAULT_SUFFIX from lib389.cli_idm.organizationalunit import get, get_dn, create, modify, delete, list, rename from lib389.topologies import topology_st from lib389.cli_base import FakeArgs from lib389.utils import ds_is_older from lib389.idm.organizationalunit import OrganizationalUnits from . import check_value_in_log_and_reset pytestmark = pytest.mark.tier0 logging.getLogger(__name__).setLevel(logging.DEBUG) log = logging.getLogger(__name__) @pytest.fixture(scope="function") def create_test_ou(topology_st, request): log.info('Create organizational unit') ous = OrganizationalUnits(topology_st.standalone, DEFAULT_SUFFIX) test_ou = ous.create(properties={ 'ou': 'toDelete', 'description': 'Test OU', }) def fin(): log.info('Delete organizational unit') if test_ou.exists(): test_ou.delete() request.addfinalizer(fin) @pytest.mark.bz1866294 @pytest.mark.ds4284 @pytest.mark.skipif(ds_is_older("1.4.2"), reason="Not implemented") @pytest.mark.xfail(ds_is_older("1.4.3.16"), reason="Might fail because of bz1866294") def test_dsidm_organizational_unit_delete(topology_st, create_test_ou): """ Test dsidm organizationalunit delete :id: 5d35a5ee-85c2-4b83-9101-938ba7732ccd :customerscenario: True :setup: Standalone instance :steps: 1. Run dsidm organizationalunit delete 2. Check the ou is deleted :expectedresults: 1. Success 2. Entry is deleted """ standalone = topology_st.standalone ous = OrganizationalUnits(standalone, DEFAULT_SUFFIX) test_ou = ous.get('toDelete') delete_value = 'Successfully deleted {}'.format(test_ou.dn) args = FakeArgs() args.dn = test_ou.dn log.info('Test dsidm organizationalunit delete') delete(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args, warn=False) check_value_in_log_and_reset(topology_st, check_value=delete_value) log.info('Check the entry is deleted') assert not test_ou.exists() if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/clu/dsidm_services_test.py000066400000000000000000000334521421664411400275560ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2021 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import time import subprocess import pytest import logging import os from lib389 import DEFAULT_SUFFIX from lib389.cli_idm.service import list, get, get_dn, create, delete, modify, rename from lib389.topologies import topology_st from lib389.cli_base import FakeArgs from lib389.utils import ds_is_older, ensure_str from lib389.idm.services import ServiceAccounts from . import check_value_in_log_and_reset pytestmark = pytest.mark.tier0 logging.getLogger(__name__).setLevel(logging.DEBUG) log = logging.getLogger(__name__) @pytest.fixture(scope="function") def create_test_service(topology_st, request): service_name = 'test_service' services = ServiceAccounts(topology_st.standalone, DEFAULT_SUFFIX) log.info('Create test service') if services.exists(service_name): test_service = services.get(service_name) test_service.delete() else: test_service = services.create_test_service() def fin(): log.info('Delete test service') if test_service.exists(): test_service.delete() request.addfinalizer(fin) @pytest.mark.skipif(ds_is_older("2.1.0"), reason="Not implemented") def test_dsidm_service_list(topology_st, create_test_service): """ Test dsidm service list option :id: 218aa060-51e1-11ec-8a70-3497f624ea11 :setup: Standalone instance :steps: 1. Run dsidm service list option without json 2. Check the output content is correct 3. Run dsidm service list option with json 4. Check the json content is correct 5. Delete the service 6. Check the service is not in the list with json 7. Check the service is not in the list without json :expectedresults: 1. Success 2. Success 3. Success 4. Success 5. Success 6. Success 7. Success """ standalone = topology_st.standalone args = FakeArgs() args.json = False service_value = 'test_service' json_list = ['type', 'list', 'items'] log.info('Empty the log file to prevent false data to check about service') topology_st.logcap.flush() log.info('Test dsidm service list without json') list(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args) check_value_in_log_and_reset(topology_st, check_value=service_value) log.info('Test dsidm service list with json') args.json = True list(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args) check_value_in_log_and_reset(topology_st, content_list=json_list, check_value=service_value) log.info('Delete the service') services = ServiceAccounts(topology_st.standalone, DEFAULT_SUFFIX) testservice = services.get(service_value) testservice.delete() log.info('Test empty dsidm service list with json') list(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args) check_value_in_log_and_reset(topology_st, content_list=json_list, check_value_not=service_value) log.info('Test empty dsidm service list without json') args.json = False list(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args) check_value_in_log_and_reset(topology_st, check_value_not=service_value) @pytest.mark.skipif(ds_is_older("2.1.0"), reason="Not implemented") def test_dsidm_service_get_rdn(topology_st, create_test_service): """ Test dsidm service get option :id: 294ef774-51e1-11ec-a2c7-3497f624ea11 :setup: Standalone instance :steps: 1. Run dsidm get option for created service with json 2. Check the output content is correct 3. Run dsidm get option for created service without json 4. Check the json content is correct :expectedresults: 1. Success 2. Success 3. Success 4. Success """ standalone = topology_st.standalone services = ServiceAccounts(topology_st.standalone, DEFAULT_SUFFIX) testservice = services.get('test_service') service_content = [f'dn: {testservice.dn}', f'cn: {testservice.rdn}', 'description: Test Service', 'objectClass: top', 'objectClass: nsAccount', 'objectClass: nsMemberOf'] json_content = ['attrs', 'objectclass', 'top', 'nsAccount', 'nsMemberOf', testservice.rdn, 'cn', 'description', 'creatorsname', 'cn=directory manager', 'modifiersname', 'createtimestamp', 'modifytimestamp', 'nsuniqueid', 'parentid', 'entryid', 'entrydn', testservice.dn] args = FakeArgs() args.json = False args.selector = 'test_service' log.info('Empty the log file to prevent false data to check about service') topology_st.logcap.flush() log.info('Test dsidm service get without json') get(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args) check_value_in_log_and_reset(topology_st, content_list=service_content) log.info('Test dsidm service get with json') args.json = True get(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args) check_value_in_log_and_reset(topology_st, content_list=json_content) @pytest.mark.bz1893667 @pytest.mark.xfail(reason="Will fail because of bz1893667") @pytest.mark.skipif(ds_is_older("2.1.0"), reason="Not implemented") def test_dsidm_service_get_dn(topology_st, create_test_service): """ Test dsidm service get_dn option :id: 2e4c8f98-51e1-11ec-b472-3497f624ea11 :setup: Standalone instance :steps: 1. Run dsidm service get_dn for created service 2. Check the output content is correct :expectedresults: 1. Success 2. Success """ standalone = topology_st.standalone services = ServiceAccounts(standalone, DEFAULT_SUFFIX) test_service = services.get('test_service') args = FakeArgs() args.dn = test_service.dn log.info('Empty the log file to prevent false data to check about service') topology_st.logcap.flush() log.info('Test dsidm service get_dn without json') get_dn(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args) # check_value_in_log_and_reset(topology_st, content_list=service_content) # The check_value_in_log_and_reset will have to be updated accordinly after bz1893667 is fixed # because now I can't determine the output @pytest.mark.skipif(ds_is_older("2.1.0"), reason="Not implemented") def test_dsidm_service_create(topology_st): """ Test dsidm service create option :id: 338efbc6-51e1-11ec-a83a-3497f624ea11 :setup: Standalone instance :steps: 1. Run dsidm service create 2. Check that a message is provided on creation 3. Check that created service exists :expectedresults: 1. Success 2. Success 3. Success """ standalone = topology_st.standalone service_name = 'new_service' output = f'Successfully created {service_name}' args = FakeArgs() args.cn = service_name args.description = service_name log.info('Test dsidm service create') create(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args) check_value_in_log_and_reset(topology_st, check_value=output) log.info('Check that service is present') services = ServiceAccounts(standalone, DEFAULT_SUFFIX) new_service = services.get(service_name) assert new_service.exists() log.info('Clean up for next test') new_service.delete() @pytest.mark.skipif(ds_is_older("2.1.0"), reason="Not implemented") def test_dsidm_service_delete(topology_st, create_test_service): """ Test dsidm service delete option :id: 3b382a96-51e1-11ec-a1c2-3497f624ea11 :setup: Standalone instance :steps: 1. Run dsidm service delete on created service 2. Check that a message is provided on deletion 3. Check that service does not exist :expectedresults: 1. Success 2. Success 3. Success """ standalone = topology_st.standalone services = ServiceAccounts(standalone, DEFAULT_SUFFIX) test_service = services.get('test_service') output = f'Successfully deleted {test_service.dn}' args = FakeArgs() args.dn = test_service.dn log.info('Test dsidm service delete') delete(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args, warn=False) check_value_in_log_and_reset(topology_st, check_value=output) log.info('Check that service does not exist') assert not test_service.exists() @pytest.mark.skipif(ds_is_older("2.1.0"), reason="Not implemented") def test_dsidm_service_modify(topology_st, create_test_service): """ Test dsidm service modify add, replace, delete option :id: 4023ef22-51e1-11ec-93c5-3497f624ea11 :setup: Standalone instance :steps: 1. Run dsidm service modify replace description value 2. Run dsidm service modify add seeAlso attribute to service 3. Run dsidm service modify delete for seeAlso attribute :expectedresults: 1. description value is replaced with new text 2. seeAlso attribute is present 3. seeAlso attribute is deleted """ standalone = topology_st.standalone services = ServiceAccounts(standalone, DEFAULT_SUFFIX) test_service = services.get('test_service') output = f'Successfully modified {test_service.dn}' args = FakeArgs() args.selector = 'test_service' args.changes = ['replace:description:Test Service Modified'] log.info('Test dsidm service modify replace') modify(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args, warn=False) check_value_in_log_and_reset(topology_st, check_value=output) log.info('Test dsidm service modify add') args.changes = [f'add:seeAlso:ou=services,{DEFAULT_SUFFIX}'] modify(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args, warn=False) check_value_in_log_and_reset(topology_st, check_value=output) assert test_service.present('seeAlso', f'ou=services,{DEFAULT_SUFFIX}') log.info('Test dsidm service modify delete') args.changes = [f'delete:seeAlso:ou=services,{DEFAULT_SUFFIX}'] modify(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args, warn=False) check_value_in_log_and_reset(topology_st, check_value=output) assert not test_service.present('seeAlso', f'ou=services,{DEFAULT_SUFFIX}') @pytest.mark.skipif(ds_is_older("1.4.2"), reason="Not implemented") def test_dsidm_service_rename_keep_old_rdn(topology_st, create_test_service): """ Test dsidm service rename option with keep-old-rdn :id: 44cc6b08-51e1-11ec-89e7-3497f624ea11 :setup: Standalone instance :steps: 1. Run dsidm service rename option with keep-old-rdn 2. Check the service does have another cn attribute with the old rdn 3. Check the old service is deleted :expectedresults: 1. Success 2. Success 3. Success """ standalone = topology_st.standalone services = ServiceAccounts(standalone, DEFAULT_SUFFIX) test_service = services.get('test_service') args = FakeArgs() args.selector = test_service.rdn args.new_name = 'my_service' args.keep_old_rdn = True log.info('Test dsidm service rename') rename(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args) my_service = services.get(args.new_name) output = f'Successfully renamed to {my_service.dn}' check_value_in_log_and_reset(topology_st, check_value=output) log.info('my_service should have cn attribute with the old rdn') assert my_service.present('cn', 'test_service') assert my_service.get_attr_val_utf8('cn') == 'test_service' assert my_service.get_attr_val_utf8('description') == 'Test Service' log.info('Old service dn should not exist') assert not test_service.exists() log.info('Clean up') my_service.delete() @pytest.mark.skipif(ds_is_older("1.4.2"), reason="Not implemented") def test_dsidm_service_rename(topology_st, create_test_service): """ Test dsidm service rename option :id: 4a13ea64-51e1-11ec-b3ff-3497f624ea11 :setup: Standalone instance :steps: 1. Run dsidm service rename option on created service 2. Check the service does not have another cn attribute with the old rdn 3. Check the old service is deleted :expectedresults: 1. Success 2. Success 3. Success """ standalone = topology_st.standalone services = ServiceAccounts(standalone, DEFAULT_SUFFIX) test_service = services.get('test_service') args = FakeArgs() args.selector = test_service.rdn args.new_name = 'my_service' args.keep_old_rdn = False log.info('Test dsidm service rename') args.new_name = 'my_service' rename(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args) my_service = services.get(args.new_name) output = f'Successfully renamed to {my_service.dn}' check_value_in_log_and_reset(topology_st, check_value=output) log.info('New service should not have cn attribute with the old rdn') assert not my_service.present('cn', 'test_service') assert my_service.get_attr_val_utf8('cn') == 'my_service' assert my_service.get_attr_val_utf8('description') == 'Test Service' log.info('Old service dn should not exist.') assert not test_service.exists() log.info('Clean up') my_service.delete() if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/clu/dsidm_user_test.py000066400000000000000000000340401421664411400267030ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2021 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import time import subprocess import pytest import logging import os from lib389 import DEFAULT_SUFFIX from lib389.cli_idm.user import list, get, get_dn, create, delete, modify, rename from lib389.topologies import topology_st from lib389.cli_base import FakeArgs from lib389.utils import ds_is_older, ensure_str from lib389.idm.user import nsUserAccounts from . import check_value_in_log_and_reset pytestmark = pytest.mark.tier0 logging.getLogger(__name__).setLevel(logging.DEBUG) log = logging.getLogger(__name__) @pytest.fixture(scope="function") def create_test_user(topology_st, request): user_name = 'test_user_1000' users = nsUserAccounts(topology_st.standalone, DEFAULT_SUFFIX) log.info('Create test user') if users.exists(user_name): test_user = users.get(user_name) test_user.delete() else: test_user = users.create_test_user() def fin(): log.info('Delete test user') if test_user.exists(): test_user.delete() request.addfinalizer(fin) @pytest.mark.skipif(ds_is_older("1.4.2"), reason="Not implemented") def test_dsidm_user_list(topology_st, create_test_user): """ Test dsidm user list option :id: a7400ac2-b629-4507-bc05-c6402a5b437b :setup: Standalone instance :steps: 1. Run dsidm user list option without json 2. Check the output content is correct 3. Run dsidm user list option with json 4. Check the json content is correct 5. Delete the user 6. Check the user is not in the list with json 7. Check the user is not in the list without json :expectedresults: 1. Success 2. Success 3. Success 4. Success 5. Success 6. Success 7. Success """ standalone = topology_st.standalone args = FakeArgs() args.json = False user_value = 'test_user_1000' json_list = ['type', 'list', 'items'] log.info('Empty the log file to prevent false data to check about user') topology_st.logcap.flush() log.info('Test dsidm user list without json') list(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args) check_value_in_log_and_reset(topology_st, check_value=user_value) log.info('Test dsidm user list with json') args.json = True list(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args) check_value_in_log_and_reset(topology_st, content_list=json_list, check_value=user_value) log.info('Delete the user') users = nsUserAccounts(topology_st.standalone, DEFAULT_SUFFIX) testuser = users.get(user_value) testuser.delete() log.info('Test empty dsidm user list with json') list(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args) check_value_in_log_and_reset(topology_st, content_list=json_list, check_value_not=user_value) log.info('Test empty dsidm user list without json') args.json = False list(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args) check_value_in_log_and_reset(topology_st, check_value_not=user_value) @pytest.mark.skipif(ds_is_older("1.4.2"), reason="Not implemented") def test_dsidm_user_get_rdn(topology_st, create_test_user): """ Test dsidm user get option :id: 8c7247cd-7588-45d3-817c-ac5a9f135b32 :setup: Standalone instance :steps: 1. Run dsidm get option for created user with json 2. Check the output content is correct 3. Run dsidm get option for created user without json 4. Check the json content is correct :expectedresults: 1. Success 2. Success 3. Success 4. Success """ standalone = topology_st.standalone users = nsUserAccounts(topology_st.standalone, DEFAULT_SUFFIX) testuser = users.get('test_user_1000') user_content = ['dn: {}'.format(testuser.dn), 'cn: {}'.format(testuser.rdn), 'displayName: {}'.format(testuser.rdn), 'gidNumber: 2000', 'homeDirectory: /home/{}'.format(testuser.rdn), 'objectClass: top', 'objectClass: nsPerson', 'objectClass: nsAccount', 'objectClass: nsOrgPerson', 'objectClass: posixAccount', 'uid: {}'.format(testuser.rdn), 'uidNumber: 1000'] json_content = ['attrs', 'objectclass', 'top', 'nsPerson', 'nsAccount', 'nsOrgPerson', 'posixAccount', 'uid', testuser.rdn, 'cn', 'displayname', 'uidnumber', 'gidnumber', '2000', 'homedirectory', '/home/{}'.format(testuser.rdn), 'creatorsname', 'cn=directory manager', 'modifiersname', 'createtimestamp', 'modifytimestamp', 'nsuniqueid', 'parentid', 'entryid', 'entrydn', testuser.dn] args = FakeArgs() args.json = False args.selector = 'test_user_1000' log.info('Empty the log file to prevent false data to check about user') topology_st.logcap.flush() log.info('Test dsidm user get without json') get(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args) check_value_in_log_and_reset(topology_st, content_list=user_content) log.info('Test dsidm user get with json') args.json = True get(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args) check_value_in_log_and_reset(topology_st, content_list=json_content) @pytest.mark.bz1893667 @pytest.mark.xfail(reason="Will fail because of bz1893667") @pytest.mark.skipif(ds_is_older("1.4.2"), reason="Not implemented") def test_dsidm_user_get_dn(topology_st, create_test_user): """ Test dsidm user get_dn option :id: 787bf278-87c3-402e-936e-6161799d098d :setup: Standalone instance :steps: 1. Run dsidm user get_dn for created user 2. Check the output content is correct :expectedresults: 1. Success 2. Success """ standalone = topology_st.standalone users = nsUserAccounts(standalone, DEFAULT_SUFFIX) test_user = users.get('test_user_1000') args = FakeArgs() args.dn = test_user.dn log.info('Empty the log file to prevent false data to check about user') topology_st.logcap.flush() log.info('Test dsidm user get_dn without json') get_dn(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args) # check_value_in_log_and_reset(topology_st, content_list=user_content) # The check_value_in_log_and_reset will have to be updated accordinly after bz1893667 is fixed # because now I can't determine the output @pytest.mark.skipif(ds_is_older("1.4.2"), reason="Not implemented") def test_dsidm_user_create(topology_st): """ Test dsidm user create option :id: 862f5875-11fd-4e8e-92c1-397010386eb8 :setup: Standalone instance :steps: 1. Run dsidm user create 2. Check that a message is provided on creation 3. Check that created user exists :expectedresults: 1. Success 2. Success 3. Success """ standalone = topology_st.standalone user_name = 'new_user' output = 'Successfully created {}'.format(user_name) args = FakeArgs() args.uid = user_name args.cn = user_name args.displayName = user_name args.uidNumber = '1030' args.gidNumber = '2030' args.homeDirectory = '/home/{}'.format(user_name) log.info('Test dsidm user create') create(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args) check_value_in_log_and_reset(topology_st, check_value=output) log.info('Check that user is present') users = nsUserAccounts(standalone, DEFAULT_SUFFIX) new_user = users.get(user_name) assert new_user.exists() log.info('Clean up for next test') new_user.delete() @pytest.mark.skipif(ds_is_older("1.4.2"), reason="Not implemented") def test_dsidm_user_delete(topology_st, create_test_user): """ Test dsidm user delete option :id: 3704dc3a-9787-4f74-aaa8-45f38e4a6a52 :setup: Standalone instance :steps: 1. Run dsidm user delete on created user 2. Check that a message is provided on deletion 3. Check that user does not exist :expectedresults: 1. Success 2. Success 3. Success """ standalone = topology_st.standalone users = nsUserAccounts(standalone, DEFAULT_SUFFIX) test_user = users.get('test_user_1000') output = 'Successfully deleted {}'.format(test_user.dn) args = FakeArgs() args.dn = test_user.dn log.info('Test dsidm user delete') delete(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args, warn=False) check_value_in_log_and_reset(topology_st, check_value=output) log.info('Check that user does not exist') assert not test_user.exists() @pytest.mark.skipif(ds_is_older("1.4.2"), reason="Not implemented") def test_dsidm_user_modify(topology_st, create_test_user): """ Test dsidm user modify add, replace, delete option :id: 7a27be19-1a63-44d0-b11b-f877e06e1a9b :setup: Standalone instance :steps: 1. Run dsidm user modify replace cn value 2. Run dsidm user modify add telephoneNumber attribute to user 3. Run dsidm user modify delete for telephoneNumber attribute :expectedresults: 1. cn value is replaced with new name 2. telephoneNumber attribute is present 3. telephoneNumber attribute is deleted """ standalone = topology_st.standalone users = nsUserAccounts(standalone, DEFAULT_SUFFIX) test_user = users.get('test_user_1000') output = 'Successfully modified {}'.format(test_user.dn) args = FakeArgs() args.selector = 'test_user_1000' args.changes = ['replace:cn:test'] log.info('Test dsidm user modify replace') modify(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args, warn=False) check_value_in_log_and_reset(topology_st, check_value=output) log.info('Test dsidm user modify add') args.changes = ['add:telephoneNumber:1234567890'] modify(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args, warn=False) check_value_in_log_and_reset(topology_st, check_value=output) assert test_user.present('telephoneNumber', '1234567890') log.info('Test dsidm user modify delete') args.changes = ['delete:telephoneNumber:1234567890'] modify(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args, warn=False) check_value_in_log_and_reset(topology_st, check_value=output) assert not test_user.present('telephoneNumber', '1234567890') @pytest.mark.skipif(ds_is_older("1.4.2"), reason="Not implemented") def test_dsidm_user_rename_keep_old_rdn(topology_st, create_test_user): """ Test dsidm user rename option with keep-old-rdn :id: 3fd0827c-ab5e-4586-9493-55bc5076a887 :setup: Standalone instance :steps: 1. Run dsidm user rename option with keep-old-rdn 2. Check the user does have another uid attribute with the old rdn 3. Check the old user is deleted :expectedresults: 1. Success 2. Success 3. Success """ standalone = topology_st.standalone users = nsUserAccounts(standalone, DEFAULT_SUFFIX) test_user = users.get('test_user_1000') args = FakeArgs() args.selector = test_user.rdn args.new_name = 'my_user' args.keep_old_rdn = True log.info('Test dsidm user rename') rename(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args) my_user = users.get(args.new_name) output = 'Successfully renamed to {}'.format(my_user.dn) check_value_in_log_and_reset(topology_st, check_value=output) log.info('my_user should have uid attribute with the old rdn') assert my_user.present('uid', 'test_user_1000') assert my_user.get_attr_val_utf8('cn') == 'test_user_1000' assert my_user.get_attr_val_utf8('displayName') == 'test_user_1000' log.info('Old user dn should not exist') assert not test_user.exists() log.info('Clean up') my_user.delete() @pytest.mark.skipif(ds_is_older("1.4.2"), reason="Not implemented") def test_dsidm_user_rename(topology_st, create_test_user): """ Test dsidm user rename option :id: fa569966-3954-465f-92b0-331a3a088b1b :setup: Standalone instance :steps: 1. Run dsidm user rename option on created user 2. Check the user does not have another uid attribute with the old rdn 3. Check the old user is deleted :expectedresults: 1. Success 2. Success 3. Success """ standalone = topology_st.standalone users = nsUserAccounts(standalone, DEFAULT_SUFFIX) test_user = users.get('test_user_1000') args = FakeArgs() args.selector = test_user.rdn args.new_name = 'my_user' args.keep_old_rdn = False log.info('Test dsidm user rename') args.new_name = 'my_user' rename(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args) my_user = users.get(args.new_name) output = 'Successfully renamed to {}'.format(my_user.dn) check_value_in_log_and_reset(topology_st, check_value=output) log.info('New user should not have uid attribute with the old rdn') assert not my_user.present('uid', 'test_user_1000') assert my_user.get_attr_val_utf8('cn') == 'test_user_1000' assert my_user.get_attr_val_utf8('displayName') == 'test_user_1000' log.info('Old user dn should not exist.') assert not test_user.exists() log.info('Clean up') my_user.delete() if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/clu/dsrc_test.py000066400000000000000000000076611421664411400255110ustar00rootroot00000000000000import logging import pytest import os from os.path import expanduser from lib389.cli_base import FakeArgs from lib389.cli_ctl.dsrc import create_dsrc, modify_dsrc, delete_dsrc, display_dsrc from lib389._constants import DEFAULT_SUFFIX, DN_DM from lib389.topologies import topology_st as topo log = logging.getLogger(__name__) @pytest.fixture(scope="function") def setup(topo, request): """Preserve any existing .dsrc file""" dsrc_file = f'{expanduser("~")}/.dsrc' backup_file = dsrc_file + ".original" if os.path.exists(dsrc_file): os.rename(dsrc_file, backup_file) def fin(): if os.path.exists(backup_file): os.rename(backup_file, dsrc_file) request.addfinalizer(fin) def test_dsrc(topo, setup): """Test "dsctl dsrc" command :id: 0610de6c-e167-4761-bdab-3e677b2d44bb :setup: Standalone Instance :steps: 1. Test creation works 2. Test creating duplicate section 3. Test adding an additional inst config works 4. Test removing an instance works 5. Test modify works 6. Test delete works 7. Test display fails when no file is present :expectedresults: 1. Success 2. Success 3. Success 4. Success 5. Success 6. Success 7. Success """ inst = topo.standalone serverid = inst.serverid second_inst_name = "Second" second_inst_basedn = "o=second" different_suffix = "o=different" # Setup our args args = FakeArgs() args.basedn = DEFAULT_SUFFIX args.binddn = DN_DM args.json = None args.uri = None args.saslmech = None args.tls_cacertdir = None args.tls_cert = None args.tls_key = None args.tls_reqcert = None args.starttls = None args.cancel_starttls = None args.pwdfile = None args.do_it = True # Create a dsrc configuration entry create_dsrc(inst, log, args) display_dsrc(inst, topo.logcap.log, args) assert topo.logcap.contains("basedn = " + args.basedn) assert topo.logcap.contains("binddn = " + args.binddn) assert topo.logcap.contains("[" + serverid + "]") topo.logcap.flush() # Attempt to add duplicate instance section with pytest.raises(ValueError): create_dsrc(inst, log, args) # Test adding a second instance works correctly inst.serverid = second_inst_name args.basedn = second_inst_basedn create_dsrc(inst, log, args) display_dsrc(inst, topo.logcap.log, args) assert topo.logcap.contains("basedn = " + args.basedn) assert topo.logcap.contains("[" + second_inst_name + "]") topo.logcap.flush() # Delete second instance delete_dsrc(inst, log, args) inst.serverid = serverid # Restore original instance name display_dsrc(inst, topo.logcap.log, args) assert not topo.logcap.contains("[" + second_inst_name + "]") assert not topo.logcap.contains("basedn = " + args.basedn) # Make sure first instance config is still present assert topo.logcap.contains("[" + serverid + "]") assert topo.logcap.contains("binddn = " + args.binddn) topo.logcap.flush() # Modify the config args.basedn = different_suffix modify_dsrc(inst, log, args) display_dsrc(inst, topo.logcap.log, args) assert topo.logcap.contains(different_suffix) topo.logcap.flush() # Remove an arg from the config args.basedn = "" modify_dsrc(inst, log, args) display_dsrc(inst, topo.logcap.log, args) assert not topo.logcap.contains(different_suffix) topo.logcap.flush() # Remove the last entry, which should delete the file delete_dsrc(inst, log, args) dsrc_file = f'{expanduser("~")}/.dsrc' assert not os.path.exists(dsrc_file) # Make sure display fails with pytest.raises(ValueError): display_dsrc(inst, log, args) if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main(["-s", CURRENT_FILE]) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/clu/fixup_test.py000066400000000000000000000054531421664411400257060ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2020 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import time import subprocess import pytest from lib389.tasks import * from lib389.utils import * from lib389.topologies import topology_st from lib389.cli_base import FakeArgs from lib389.plugins import POSIXWinsyncPlugin from lib389.cli_conf.plugins.posix_winsync import do_fixup pytestmark = pytest.mark.tier0 LOG_FILE = '/tmp/fixup.log' logging.getLogger(__name__).setLevel(logging.DEBUG) log = logging.getLogger(__name__) @pytest.fixture(scope="function") def set_log_file_and_ldif(topology_st, request): MYLDIF = 'example1k_posix.ldif' global ldif_file fh = logging.FileHandler(LOG_FILE) fh.setLevel(logging.DEBUG) log.addHandler(fh) data_dir_path = topology_st.standalone.getDir(__file__, DATA_DIR) ldif_file = f"{data_dir_path}ticket48212/{MYLDIF}" ldif_dir = topology_st.standalone.get_ldif_dir() shutil.copy(ldif_file, ldif_dir) ldif_file = ldif_dir + '/' + MYLDIF def fin(): log.info('Delete files') os.remove(LOG_FILE) os.remove(ldif_file) request.addfinalizer(fin) @pytest.mark.ds50545 @pytest.mark.bz1739718 @pytest.mark.skipif(ds_is_older("1.4.1"), reason="Not implemented") def test_posix_winsync_fixup(topology_st, set_log_file_and_ldif): """Test posix-winsync fixup that was ported from legacy tools :id: ce691017-cbd2-49ed-ac2d-8c3ea78050f6 :setup: Standalone instance :steps: 1. Create DS instance 2. Enable PosixWinsync plugin 3. Run fixup task 4. Check log for output :expectedresults: 1. Success 2. Success 3. Success 4. Success """ standalone = topology_st.standalone output_list = ['Attempting to add task entry', 'Successfully added task entry'] log.info('Enable POSIXWinsyncPlugin') posix = POSIXWinsyncPlugin(standalone) posix.enable() log.info('Stopping the server and importing posix accounts') standalone.stop() assert standalone.ldif2db(bename=DEFAULT_BENAME, suffixes=[DEFAULT_SUFFIX], encrypt=None, excludeSuffixes=None, import_file=ldif_file) standalone.start() args = FakeArgs() args.DN = DEFAULT_SUFFIX args.filter = None log.info('Run Fixup task') do_fixup(standalone, DEFAULT_SUFFIX, log, args) log.info('Check log if fixup task was successful') with open(LOG_FILE, 'r') as f: file_content = f.read() for item in output_list: assert item in file_content if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/clu/repl_monitor_test.py000066400000000000000000000237701421664411400272660ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2020 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import time import subprocess import pytest import re from lib389.cli_conf.replication import get_repl_monitor_info from lib389.tasks import * from lib389.utils import * from lib389.topologies import topology_m2 from lib389.cli_base import FakeArgs from lib389.cli_base.dsrc import dsrc_arg_concat from lib389.cli_base import connect_instance from lib389.replica import Replicas pytestmark = pytest.mark.tier0 LOG_FILE = '/tmp/monitor.log' logging.getLogger(__name__).setLevel(logging.DEBUG) log = logging.getLogger(__name__) @pytest.fixture(scope="function") def set_log_file(request): fh = logging.FileHandler(LOG_FILE) fh.setLevel(logging.DEBUG) log.addHandler(fh) def fin(): log.info('Delete files') os.remove(LOG_FILE) config = os.path.expanduser(DSRC_HOME) if os.path.exists(config): os.remove(config) request.addfinalizer(fin) def check_value_in_log_and_reset(content_list, second_list=None, single_value=None, error_list=None): with open(LOG_FILE, 'r+') as f: file_content = f.read() for item in content_list: log.info('Check that "{}" is present'.format(item)) assert item in file_content if second_list is not None: log.info('Check for "{}"'.format(second_list)) for item in second_list: assert item in file_content if single_value is not None: log.info('Check for "{}"'.format(single_value)) assert single_value in file_content if error_list is not None: log.info('Check that "{}" is not present'.format(error_list)) for item in error_list: assert item not in file_content log.info('Reset log file') f.truncate(0) def get_hostnames_from_log(port1, port2): # Get the supplier host names as displayed in replication monitor output with open(LOG_FILE, 'r') as logfile: logtext = logfile.read() # search for Supplier :hostname:port # and use \D to insure there is no more number is after # the matched port (i.e that 10 is not matching 101) regexp = '(Supplier: )([^:]*)(:' + str(port1) + '\D)' match=re.search(regexp, logtext) host_m1 = 'localhost.localdomain' if (match is not None): host_m1 = match.group(2) # Same for supplier 2 regexp = '(Supplier: )([^:]*)(:' + str(port2) + '\D)' match=re.search(regexp, logtext) host_m2 = 'localhost.localdomain' if (match is not None): host_m2 = match.group(2) return (host_m1, host_m2) #unstable or unstatus tests, skipped for now @pytest.mark.flaky(max_runs=2, min_passes=1) @pytest.mark.ds50545 @pytest.mark.bz1739718 @pytest.mark.skipif(ds_is_older("1.4.0"), reason="Not implemented") def test_dsconf_replication_monitor(topology_m2, set_log_file): """Test replication monitor that was ported from legacy tools :id: ce48020d-7c30-41b7-8f68-144c9cd757f6 :setup: 2 MM topology :steps: 1. Create DS instance 2. Run replication monitor with connections option 3. Run replication monitor with aliases option 4. Run replication monitor with --json option 5. Run replication monitor with .dsrc file created 6. Run replication monitor with connections option as if using dsconf CLI :expectedresults: 1. Success 2. Success 3. Success 4. Success 5. Success 6. Success """ m1 = topology_m2.ms["supplier1"] m2 = topology_m2.ms["supplier2"] # Enable ldapi if not already done. for inst in [topology_m2.ms["supplier1"], topology_m2.ms["supplier2"]]: if not inst.can_autobind(): # Update ns-slapd instance inst.config.set('nsslapd-ldapilisten', 'on') inst.config.set('nsslapd-ldapiautobind', 'on') inst.restart() # Ensure that updates have been sent both ways. replicas = Replicas(m1) replica = replicas.get(DEFAULT_SUFFIX) replica.test_replication([m2]) replicas = Replicas(m2) replica = replicas.get(DEFAULT_SUFFIX) replica.test_replication([m1]) alias_content = ['Supplier: M1 (' + m1.host + ':' + str(m1.port) + ')', 'Supplier: M2 (' + m2.host + ':' + str(m2.port) + ')'] connection_content = 'Supplier: '+ m1.host + ':' + str(m1.port) content_list = ['Replica Root: dc=example,dc=com', 'Replica ID: 1', 'Replica Status: Online', 'Max CSN', 'Status For Agreement: "002" ('+ m2.host + ':' + str(m2.port) + ')', 'Replica Enabled: on', 'Update In Progress: FALSE', 'Last Update Start:', 'Last Update End:', 'Number Of Changes Sent:', 'Number Of Changes Skipped: None', 'Last Update Status: Error (0) Replica acquired successfully: Incremental update succeeded', 'Last Init Start:', 'Last Init End:', 'Last Init Status:', 'Reap Active: 0', 'Replication Status: In Synchronization', 'Replication Lag Time:', 'Supplier: ', m2.host + ':' + str(m2.port), 'Replica Root: dc=example,dc=com', 'Replica ID: 2', 'Status For Agreement: "001" (' + m1.host + ':' + str(m1.port)+')'] error_list = ['consumer (Unavailable)', 'Failed to retrieve database RUV entry from consumer'] json_list = ['type', 'list', 'items', 'name', m1.host + ':' + str(m1.port), 'data', '"replica_id": "1"', '"replica_root": "dc=example,dc=com"', '"replica_status": "Online"', 'maxcsn', 'agmts_status', 'agmt-name', '002', 'replica', m2.host + ':' + str(m2.port), 'replica-enabled', 'update-in-progress', 'last-update-start', 'last-update-end', 'number-changes-sent', 'number-changes-skipped', 'last-update-status', 'Error (0) Replica acquired successfully: Incremental update succeeded', 'last-init-start', 'last-init-end', 'last-init-status', 'reap-active', 'replication-status', 'In Synchronization', 'replication-lag-time', '"replica_id": "2"', '001', m1.host + ':' + str(m1.port)] connections = [m1.host + ':' + str(m1.port) + ':' + DN_DM + ':' + PW_DM, m2.host + ':' + str(m2.port) + ':' + DN_DM + ':' + PW_DM] args = FakeArgs() args.connections = connections args.aliases = None args.json = False log.info('Run replication monitor with connections option') get_repl_monitor_info(m1, DEFAULT_SUFFIX, log, args) (host_m1, host_m2) = get_hostnames_from_log(m1.port, m2.port) check_value_in_log_and_reset(content_list, connection_content, error_list=error_list) # Prepare the data for next tests aliases = ['M1=' + host_m1 + ':' + str(m1.port), 'M2=' + host_m2 + ':' + str(m2.port)] alias_content = ['Supplier: M1 (' + host_m1 + ':' + str(m1.port) + ')', 'Supplier: M2 (' + host_m2 + ':' + str(m2.port) + ')'] dsrc_content = '[repl-monitor-connections]\n' \ 'connection1 = ' + m1.host + ':' + str(m1.port) + ':' + DN_DM + ':' + PW_DM + '\n' \ 'connection2 = ' + m2.host + ':' + str(m2.port) + ':' + DN_DM + ':' + PW_DM + '\n' \ '\n' \ '[repl-monitor-aliases]\n' \ 'M1 = ' + host_m1 + ':' + str(m1.port) + '\n' \ 'M2 = ' + host_m2 + ':' + str(m2.port) log.info('Run replication monitor with aliases option') args.aliases = aliases get_repl_monitor_info(m1, DEFAULT_SUFFIX, log, args) check_value_in_log_and_reset(content_list, alias_content) log.info('Run replication monitor with --json option') args.aliases = None args.json = True get_repl_monitor_info(m1, DEFAULT_SUFFIX, log, args) check_value_in_log_and_reset(json_list) with open(os.path.expanduser(DSRC_HOME), 'w+') as f: f.write(dsrc_content) args.connections = None args.aliases = None args.json = False log.info('Run replication monitor when .dsrc file is present with content') get_repl_monitor_info(m1, DEFAULT_SUFFIX, log, args) check_value_in_log_and_reset(content_list, alias_content) os.remove(os.path.expanduser(DSRC_HOME)) log.info('Run replication monitor with connections option as if using dsconf CLI') # Perform same test than steps 2 test but without using directly the topology instance. # but with an instance similar to those than dsconf cli generates: # step 2 args args.connections = connections args.aliases = None args.json = False # args needed to generate an instance with dsrc_arg_concat args.instance = 'supplier1' args.basedn = None args.binddn = None args.bindpw = None args.pwdfile = None args.prompt = False args.starttls = False dsrc_inst = dsrc_arg_concat(args, None) inst = connect_instance(dsrc_inst, True, args) get_repl_monitor_info(inst, DEFAULT_SUFFIX, log, args) check_value_in_log_and_reset(content_list, connection_content, error_list=error_list) if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/clu/schema_test.py000066400000000000000000000053201421664411400260040ustar00rootroot00000000000000import logging import pytest import os from lib389.topologies import topology_st as topo from lib389.schema import Schema pytestmark = pytest.mark.tier0 log = logging.getLogger(__name__) def test_origins_with_extra_parenthesis(topo): """Test the custom schema with extra parenthesis in X-ORIGIN can be parsed into JSON :id: 4230f83b-0dc3-4bc4-a7a8-5ab0826a4f05 :setup: Standalone Instance :steps: 1. Add attribute with X-ORIGIN that contains extra parenthesis 2. Querying for that attribute with JSON flag :expectedresults: 1. Success 2. Success """ ATTR_NAME = 'testAttribute' X_ORG_VAL = 'test (TEST)' schema = Schema(topo.standalone) # Add new attribute parameters = { 'names': [ATTR_NAME], 'oid': '1.1.1.1.1.1.1.22222', 'desc': 'Test extra parenthesis in X-ORIGIN', 'x_origin': [X_ORG_VAL], 'syntax': '1.3.6.1.4.1.1466.115.121.1.15', 'syntax_len': None, 'x_ordered': None, 'collective': None, 'obsolete': None, 'single_value': None, 'no_user_mod': None, 'equality': None, 'substr': None, 'ordering': None, 'usage': None, 'sup': None } schema.add_attributetype(parameters) # Search for attribute with JSON option attr_result = schema.query_attributetype(ATTR_NAME, json=True) # Verify the x-origin value is correct assert attr_result['at']['x_origin'][0] == X_ORG_VAL schema_params = [ ['attr1', '99999.1', None], ['attr2', '99999.2', 'test-str'], ['attr3', '99999.3', ['test-list']], ['attr4', '99999.4', ('test-tuple')], ] @pytest.mark.parametrize("name, oid, xorg", schema_params) def test_origins(topo, name, oid, xorg): """Test the various possibilities of x-origin :id: 3229f6f8-67c1-4558-9be5-71434283086a :setup: Standalone Instance :steps: 1. Add an attribute with different x-origin values/types :expectedresults: 1. Success """ schema = Schema(topo.standalone) # Add new attribute parameters = { 'names': [name], 'oid': oid, 'desc': 'Test X-ORIGIN', 'x_origin': xorg, 'syntax': '1.3.6.1.4.1.1466.115.121.1.15', 'syntax_len': None, 'x_ordered': None, 'collective': None, 'obsolete': None, 'single_value': None, 'no_user_mod': None, 'equality': None, 'substr': None, 'ordering': None, 'usage': None, 'sup': None } schema.add_attributetype(parameters) if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main(["-s", CURRENT_FILE]) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/config/000077500000000000000000000000001421664411400236155ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/config/__init__.py000066400000000000000000000001061421664411400257230ustar00rootroot00000000000000""" :Requirement: 389-ds-base: Directory Server Configurations """ 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/config/autotuning_test.py000066400000000000000000000403711421664411400274300ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2017 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import pytest from lib389._mapped_object import DSLdapObject from lib389.utils import * from lib389.topologies import topology_st as topo from lib389._constants import DN_CONFIG_LDBM, DN_CONFIG_LDBM_BDB, DN_USERROOT_LDBM, DEFAULT_SUFFIX pytestmark = pytest.mark.tier0 DEBUGGING = os.getenv("DEBUGGING", default=False) if DEBUGGING: logging.getLogger(__name__).setLevel(logging.DEBUG) else: logging.getLogger(__name__).setLevel(logging.INFO) log = logging.getLogger(__name__) def test_threads_basic(topo): """Check that a number of threads are able to be autotuned :id: 371fb9c4-9607-4a4b-a4a2-6f00809d6257 :setup: Standalone instance :steps: 1. Set nsslapd-threadnumber to -1 2. Check that number of threads is positive :expectedresults: 1. nsslapd-threadnumber should be successfully set 2. nsslapd-threadnumber is positive """ log.info("Set nsslapd-threadnumber: -1 to enable autotuning") topo.standalone.config.set("nsslapd-threadnumber", "-1") log.info("Assert nsslapd-threadnumber is equal to the documented expected value") assert topo.standalone.config.get_attr_val_int("nsslapd-threadnumber") > 0 def test_threads_warning(topo): """Check that we log a warning if the thread number is too high or low :id: db92412b-2812-49de-84b0-00f452cd254f :setup: Standalone Instance :steps: 1. Get autotuned thread number 2. Set threads way higher than hw threads, and find a warning in the log 3. Set threads way lower than hw threads, and find a warning in the log :expectedresults: 1. Success 2. Success 3. Success """ topo.standalone.config.set("nsslapd-threadnumber", "-1") autotuned_value = topo.standalone.config.get_attr_val_utf8("nsslapd-threadnumber") topo.standalone.config.set("nsslapd-threadnumber", str(int(autotuned_value) * 4)) time.sleep(.5) assert topo.standalone.ds_error_log.match('.*higher.*hurt server performance.*') if int(autotuned_value) > 1: # If autotuned is 1, there isn't anything to test here topo.standalone.config.set("nsslapd-threadnumber", "1") time.sleep(.5) assert topo.standalone.ds_error_log.match('.*lower.*hurt server performance.*') @pytest.mark.parametrize("invalid_value", ('-2', '0', 'invalid')) def test_threads_invalid_value(topo, invalid_value): """Check nsslapd-threadnumber for an invalid values :id: 1979eddf-8222-4c9d-809d-269c26de636e :parametrized: yes :setup: Standalone instance :steps: 1. Set nsslapd-threadnumber to -2, 0, invalid_str :expectedresults: 1. The operation should fail """ log.info("Set nsslapd-threadnumber: {}. Operation should fail".format(invalid_value)) with pytest.raises(ldap.OPERATIONS_ERROR): topo.standalone.config.set("nsslapd-threadnumber", invalid_value) def test_threads_back_from_manual_value(topo): """Check that thread autotuning works after manual tuning :id: 4b674016-e5ca-426b-a9c0-a94745a7dd25 :setup: Standalone instance :steps: 1. Set nsslapd-threadnumber to -1 and save the autotuned value 2. Decrease nsslapd-threadnumber by 2 3. Set nsslapd-threadnumber to -1 4. Check that nsslapd-threadnumber is back to autotuned value :expectedresults: 1. nsslapd-threadnumber should be successfully set 2. nsslapd-threadnumber should be successfully decreased 3. nsslapd-threadnumber should be successfully set 4. nsslapd-threadnumber is set back to the autotuned value """ log.info("Set nsslapd-threadnumber: -1 to enable autotuning and save the new value") topo.standalone.config.set("nsslapd-threadnumber", "-1") autotuned_value = topo.standalone.config.get_attr_val_utf8("nsslapd-threadnumber") log.info("Set nsslapd-threadnumber to the autotuned value decreased by 2") new_value = str(int(autotuned_value) - 2) topo.standalone.config.set("nsslapd-threadnumber", new_value) assert topo.standalone.config.get_attr_val_utf8("nsslapd-threadnumber") == new_value log.info("Set nsslapd-threadnumber: -1 to enable autotuning") topo.standalone.config.set("nsslapd-threadnumber", "-1") log.info("Assert nsslapd-threadnumber is back to the autotuned value") assert topo.standalone.config.get_attr_val_utf8("nsslapd-threadnumber") == autotuned_value @pytest.mark.parametrize("autosize,autosize_split", (('', ''), ('', '0'), ('10', '40'), ('', '40'), ('10', ''), ('10', '40'), ('10', '0'))) def test_cache_autosize_non_zero(topo, autosize, autosize_split): """Check that autosizing works works properly in different combinations :id: 83fa099c-a6c9-457a-82db-0982b67e8598 :parametrized: yes :setup: Standalone instance :steps: 1. Set in the cn=config,cn=ldbm database,cn=plugins,cn=config: nsslapd-cache-autosize, nsslapd-cache-autosize-split to the next value pairs: ('', ''), ('', '0'), ('10', '40'), ('', '40'), ('10', ''), ('10', '40'), ('10', '0') '' - for deleting the value (set to default) 2. Try to modify nsslapd-dbcachesize and nsslapd-cachememsize to some real value, it should be rejected 3. Restart the instance 4. Check nsslapd-dbcachesize and nsslapd-cachememsize :expectedresults: 1. nsslapd-cache-autosize, nsslapd-cache-autosize-split are successfully set 2. Modify operation should be rejected 3. The instance should be successfully restarted 4. nsslapd-dbcachesize and nsslapd-cachememsize should set to value greater than 512KB """ config_ldbm = DSLdapObject(topo.standalone, DN_CONFIG_LDBM) bdb_config_ldbm = DSLdapObject(topo.standalone, DN_CONFIG_LDBM_BDB) userroot_ldbm = DSLdapObject(topo.standalone, DN_USERROOT_LDBM) cachesize = '33333333' if ds_is_older('1.4.2'): dbcachesize_val = config_ldbm.get_attr_val('nsslapd-dbcachesize') autosize_val = config_ldbm.get_attr_val('nsslapd-cache-autosize') autosize_split_val = config_ldbm.get_attr_val('nsslapd-cache-autosize-split') else: dbcachesize_val = bdb_config_ldbm.get_attr_val('nsslapd-dbcachesize') autosize_val = bdb_config_ldbm.get_attr_val('nsslapd-cache-autosize') autosize_split_val = bdb_config_ldbm.get_attr_val('nsslapd-cache-autosize-split') cachenensize_val = userroot_ldbm.get_attr_val('nsslapd-cachememsize') dncachenensize_val = userroot_ldbm.get_attr_val('nsslapd-dncachememsize') log.info("Check nsslapd-dbcachesize and nsslapd-cachememsize before the test") log.info("nsslapd-dbcachesize == {}".format(dbcachesize_val)) log.info("nsslapd-cachememsize == {}".format(cachenensize_val)) log.info("nsslapd-dncachememsize == {}".format(dncachenensize_val)) log.info("nsslapd-cache-autosize == {}".format(autosize_val)) log.info("nsslapd-cache-autosize-split == {}".format(autosize_split_val)) if autosize: log.info("Set nsslapd-cache-autosize to {}".format(autosize)) config_ldbm.set('nsslapd-cache-autosize', autosize) else: log.info("Delete nsslapd-cache-autosize") try: config_ldbm.remove('nsslapd-cache-autosize', autosize_val) except ValueError: log.info("nsslapd-cache-autosize wasn't found") if autosize_split: log.info("Set nsslapd-cache-autosize-split to {}".format(autosize_split)) config_ldbm.set('nsslapd-cache-autosize-split', autosize_split) else: log.info("Delete nsslapd-cache-autosize-split") try: config_ldbm.remove('nsslapd-cache-autosize-split', autosize_split_val) except ValueError: log.info("nsslapd-cache-autosize-split wasn't found") log.info("Trying to set nsslapd-cachememsize to {}".format(cachesize)) with pytest.raises(ldap.UNWILLING_TO_PERFORM): userroot_ldbm.set('nsslapd-cachememsize', cachesize) log.info("Trying to set nsslapd-dbcachesize to {}".format(cachesize)) with pytest.raises(ldap.UNWILLING_TO_PERFORM): config_ldbm.set('nsslapd-dbcachesize ', cachesize) topo.standalone.restart() if ds_is_older('1.4.2'): dbcachesize_val = config_ldbm.get_attr_val('nsslapd-dbcachesize') autosize_val = config_ldbm.get_attr_val('nsslapd-cache-autosize') autosize_split_val = config_ldbm.get_attr_val('nsslapd-cache-autosize-split') else: dbcachesize_val = bdb_config_ldbm.get_attr_val('nsslapd-dbcachesize') autosize_val = bdb_config_ldbm.get_attr_val('nsslapd-cache-autosize') autosize_split_val = bdb_config_ldbm.get_attr_val('nsslapd-cache-autosize-split') cachenensize_val = userroot_ldbm.get_attr_val('nsslapd-cachememsize') dncachenensize_val = userroot_ldbm.get_attr_val('nsslapd-dncachememsize') log.info("Check nsslapd-dbcachesize and nsslapd-cachememsize in the appropriate range.") log.info("nsslapd-dbcachesize == {}".format(dbcachesize_val)) log.info("nsslapd-cachememsize == {}".format(cachenensize_val)) log.info("nsslapd-dncachememsize == {}".format(dncachenensize_val)) log.info("nsslapd-cache-autosize == {}".format(autosize_val)) log.info("nsslapd-cache-autosize-split == {}".format(autosize_split_val)) assert int(dbcachesize_val) >= 512000 assert int(cachenensize_val) >= 512000 assert int(dncachenensize_val) >= 512000 @pytest.mark.parametrize("autosize_split", ('0', '', '40')) def test_cache_autosize_basic_sane(topo, autosize_split): """Check that autotuning cachesizes works properly with different values :id: 9dc363ef-f551-446d-8b83-8ac45dabb8df :parametrized: yes :setup: Standalone instance :steps: 1. Set in the cn=config,cn=ldbm database,cn=plugins,cn=config: nsslapd-cache-autosize, nsslapd-cache-autosize-split to the next value pairs: ('0', '0'), ('0', ''), ('0', '40') '' - for deleting the value (set to default) 2. Set in the cn=config,cn=ldbm database,cn=plugins,cn=config: nsslapd-dbcachesize: 0 and some same value 3. Set in the cn=UserRoot,cn=ldbm database,cn=plugins,cn=config: nsslapd-cachememsize: 0 and some same value 4. Restart the instance 5. Check nsslapd-dbcachesize and nsslapd-cachememsize :expectedresults: 1. nsslapd-cache-autosize, nsslapd-cache-autosize-split are successfully set 2. nsslapd-dbcachesize are successfully set 3. nsslapd-cachememsize are successfully set 4. The instance should be successfully restarted 5. nsslapd-dbcachesize and nsslapd-cachememsize should set to value greater than 512KB """ config_ldbm = DSLdapObject(topo.standalone, DN_CONFIG_LDBM) bdb_config_ldbm = DSLdapObject(topo.standalone, DN_CONFIG_LDBM_BDB) userroot_ldbm = DSLdapObject(topo.standalone, DN_USERROOT_LDBM) config_ldbm.set('nsslapd-cache-autosize', '0') # Test with caches with both real values and 0 for cachesize in ('0', '33333333'): if ds_is_older('1.4.2'): dbcachesize_val = config_ldbm.get_attr_val('nsslapd-dbcachesize') autosize_val = config_ldbm.get_attr_val('nsslapd-cache-autosize') autosize_split_val = config_ldbm.get_attr_val('nsslapd-cache-autosize-split') else: dbcachesize_val = bdb_config_ldbm.get_attr_val('nsslapd-dbcachesize') autosize_val = bdb_config_ldbm.get_attr_val('nsslapd-cache-autosize') autosize_split_val = bdb_config_ldbm.get_attr_val('nsslapd-cache-autosize-split') cachenensize_val = userroot_ldbm.get_attr_val('nsslapd-cachememsize') dncachenensize_val = userroot_ldbm.get_attr_val('nsslapd-dncachememsize') log.info("Check nsslapd-dbcachesize and nsslapd-cachememsize before the test") log.info("nsslapd-dbcachesize == {}".format(dbcachesize_val)) log.info("nsslapd-cachememsize == {}".format(cachenensize_val)) log.info("nsslapd-cache-autosize == {}".format(autosize_val)) log.info("nsslapd-cache-autosize-split == {}".format(autosize_split_val)) if autosize_split: log.info("Set nsslapd-cache-autosize-split to {}".format(autosize_split)) config_ldbm.set('nsslapd-cache-autosize-split', autosize_split) else: log.info("Delete nsslapd-cache-autosize-split") try: config_ldbm.remove('nsslapd-cache-autosize-split', autosize_split_val) except ValueError: log.info("nsslapd-cache-autosize-split wasn't found") log.info("Set nsslapd-dbcachesize to {}".format(cachesize)) config_ldbm.set('nsslapd-dbcachesize', cachesize) log.info("Set nsslapd-cachememsize to {}".format(cachesize)) userroot_ldbm.set('nsslapd-cachememsize', cachesize) topo.standalone.restart() if ds_is_older('1.4.2'): dbcachesize_val = config_ldbm.get_attr_val('nsslapd-dbcachesize') autosize_val = config_ldbm.get_attr_val('nsslapd-cache-autosize') autosize_split_val = config_ldbm.get_attr_val('nsslapd-cache-autosize-split') else: dbcachesize_val = bdb_config_ldbm.get_attr_val('nsslapd-dbcachesize') autosize_val = bdb_config_ldbm.get_attr_val('nsslapd-cache-autosize') autosize_split_val = bdb_config_ldbm.get_attr_val('nsslapd-cache-autosize-split') cachenensize_val = userroot_ldbm.get_attr_val('nsslapd-cachememsize') dncachenensize_val = userroot_ldbm.get_attr_val('nsslapd-dncachememsize') log.info("Check nsslapd-dbcachesize and nsslapd-cachememsize in the appropriate range.") log.info("nsslapd-dbcachesize == {}".format(dbcachesize_val)) log.info("nsslapd-cachememsize == {}".format(cachenensize_val)) log.info("nsslapd-dncachememsize == {}".format(dncachenensize_val)) log.info("nsslapd-cache-autosize == {}".format(autosize_val)) log.info("nsslapd-cache-autosize-split == {}".format(autosize_split_val)) assert int(dbcachesize_val) >= 512000 assert int(cachenensize_val) >= 512000 assert int(dncachenensize_val) >= 512000 @pytest.mark.parametrize("invalid_value", ('-2', '102', 'invalid')) def test_cache_autosize_invalid_values(topo, invalid_value): """Check that we can't set invalid values to autosize attributes :id: 2f0d01b5-ca91-4dc2-97bc-ad0ac8d08633 :parametrized: yes :setup: Standalone instance :steps: 1. Stop the instance 2. Set in the cn=config,cn=ldbm database,cn=plugins,cn=config: nsslapd-cache-autosize and nsslapd-cache-autosize-split to invalid values like (-2, 102, invalid_str) 3. Try to start the instance :expectedresults: 1. The instance should stop successfully 2. nsslapd-cache-autosize, nsslapd-cache-autosize-split are successfully set 3. Starting the instance should fail """ config_ldbm = DSLdapObject(topo.standalone, DN_CONFIG_LDBM) bdb_config_ldbm = DSLdapObject(topo.standalone, DN_CONFIG_LDBM_BDB) if ds_is_older('1.4.2'): autosize_val = config_ldbm.get_attr_val('nsslapd-cache-autosize') autosize_split_val = config_ldbm.get_attr_val('nsslapd-cache-autosize-split') else: autosize_val = bdb_config_ldbm.get_attr_val('nsslapd-cache-autosize') autosize_split_val = bdb_config_ldbm.get_attr_val('nsslapd-cache-autosize-split') log.info("Set nsslapd-cache-autosize-split to {}".format(invalid_value)) with pytest.raises(ldap.UNWILLING_TO_PERFORM): config_ldbm.set('nsslapd-cache-autosize-split', invalid_value) topo.standalone.restart() config_ldbm.remove('nsslapd-cache-autosize-split', autosize_split_val) log.info("Set nsslapd-cache-autosize to {}".format(invalid_value)) with pytest.raises(ldap.UNWILLING_TO_PERFORM): config_ldbm.set('nsslapd-cache-autosize', invalid_value) topo.standalone.restart() config_ldbm.remove('nsslapd-cache-autosize', autosize_val) if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/config/compact_test.py000066400000000000000000000043441421664411400266610ustar00rootroot00000000000000import logging import pytest import os import time from lib389.tasks import DBCompactTask from lib389.backend import DatabaseConfig from lib389.topologies import topology_m1 as topo log = logging.getLogger(__name__) def test_compact_db_task(topo): """Test creation of dbcompact task is successful :id: 1b3222ef-a336-4259-be21-6a52f76e1859 :setup: Standalone Instance :steps: 1. Create task 2. Check task was successful 3. Check errors log to show task was run 4. Create task just for replication :expectedresults: 1. Success 2. Success 3. Success 4. Success """ inst = topo.ms["supplier1"] task = DBCompactTask(inst) task.create() task.wait() assert task.get_exit_code() == 0 # Check errors log to make sure task actually compacted db assert inst.searchErrorsLog("Compacting databases") inst.deleteErrorLogs() # Create new task that only compacts changelog task = DBCompactTask(inst) task_properties = {'justChangelog': 'yes'} task.create(properties=task_properties) task.wait() assert task.get_exit_code() == 0 # Check errors log to make sure task only performed changelog compaction assert inst.searchErrorsLog("Compacting DB") == False assert inst.searchErrorsLog("Compacting Replication Changelog") inst.deleteErrorLogs(restart=False) def test_compaction_interval_and_time(topo): """Test dbcompact is successful when nsslapd-db-compactdb-interval and nsslapd-db-compactdb-time is set :id: f361bee9-d7e7-4569-9255-d7b60dd9d92e :setup: Supplier Instance :steps: 1. Configure compact interval and time 2. Check compaction occurs as expected :expectedresults: 1. Success 2. Success """ inst = topo.ms["supplier1"] config = DatabaseConfig(inst) config.set([('nsslapd-db-compactdb-interval', '2'), ('nsslapd-db-compactdb-time', '00:01')]) inst.deleteErrorLogs() time.sleep(6) assert inst.searchErrorsLog("Compacting databases") inst.deleteErrorLogs(restart=False) if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main(["-s", CURRENT_FILE]) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/config/config_delete_attr_test.py000066400000000000000000000115141421664411400310510ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2021 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import pytest from lib389.utils import os, logging, ds_is_older, ldap from lib389.topologies import topology_st logging.getLogger(__name__).setLevel(logging.DEBUG) log = logging.getLogger(__name__) # Skip on older versions pytestmark = [pytest.mark.tier2, pytest.mark.skipif(ds_is_older('1.3.6'), reason="Not implemented")] @pytest.mark.ds48961 def test_delete_storagescheme(topology_st): """ Test that deletion of passwordStorageScheme is rejected :id: 53ab2dbf-e37c-4d30-8cce-0d5f44ed204a :setup: Standalone instance :steps: 1. Create instance 2. Modify passwordStorageScheme attribute 3. Remove passwordStorageScheme attribute 4. Check exception message :expectedresults: 1. Success 2. Success 3. Removal should be rejected 4. Message should be about rejected change """ standalone = topology_st.standalone log.info('Check we can modify passwordStorageScheme') standalone.config.set('passwordStorageScheme', 'CLEAR') assert standalone.config.get_attr_val_utf8('passwordStorageScheme') == 'CLEAR' log.info('Check removal of passwordStorageScheme is rejected') with pytest.raises(ldap.OPERATIONS_ERROR) as excinfo: standalone.config.remove('passwordStorageScheme', None) assert "deleting the value is not allowed" in str(excinfo.value) @pytest.mark.ds48961 def test_reset_attributes(topology_st): """ Test that we can reset some attributes while others are rejected :id: 5f78088f-36d3-4a0b-8c1b-4abc161e996f :setup: Standalone instance :steps: 1. Create instance 2. Check attributes from attr_to_test can be reset 3. Check value of that attribute is empty 4. Check reset of attributes from attr_to_fail is rejected :expectedresults: 1. Success 2. Success 3. Success 4. Success """ standalone = topology_st.standalone # These attributes should not be able to reset attr_to_fail = [ 'nsslapd-localuser', 'nsslapd-defaultnamingcontext', 'nsslapd-accesslog', 'nsslapd-auditlog', 'nsslapd-errorlog', 'nsslapd-tmpdir', 'nsslapd-rundir', 'nsslapd-bakdir', 'nsslapd-certdir', 'nsslapd-instancedir', 'nsslapd-ldifdir', 'nsslapd-lockdir', 'nsslapd-schemadir', 'nsslapd-workingdir', 'nsslapd-localhost', 'nsslapd-certmap-basedn', 'nsslapd-port', 'nsslapd-secureport', 'nsslapd-conntablesize', 'nsslapd-rootpw', 'nsslapd-hash-filters', 'nsslapd-requiresrestart', 'nsslapd-plugin', 'nsslapd-privatenamespaces', 'nsslapd-allowed-to-delete-attrs', 'nsslapd-accesslog-list', 'nsslapd-auditfaillog-list', 'nsslapd-auditlog-list', 'nsslapd-errorlog-list', 'nsslapd-config', 'nsslapd-versionstring', 'objectclass', 'cn', 'nsslapd-backendconfig', 'nsslapd-betype', 'nsslapd-connection-buffer', 'nsslapd-malloc-mmap-threshold', 'nsslapd-malloc-mxfast', 'nsslapd-malloc-trim-threshold', 'nsslapd-referralmode', 'nsslapd-saslpath', 'passwordadmindn' ] attr_to_test = { 'nsslapd-listenhost': 'localhost', 'nsslapd-securelistenhost': 'localhost', 'nsslapd-allowed-sasl-mechanisms': 'GSSAPI', 'nsslapd-svrtab': 'Some data' } for attr in attr_to_test: newval = attr_to_test[attr] log.info("Change %s value to --> %s" % (attr, newval)) standalone.config.set(attr, newval) assert standalone.config.get_attr_val_utf8(attr) == newval log.info('Now reset the attribute') standalone.config.reset(attr) assert standalone.config.get_attr_val_utf8(attr) == '' log.info("%s is reset to None" % attr) for attr in attr_to_fail: log.info("Resetting %s" % attr) try: standalone.config.reset(attr) # Shouldn't reach here, the reset should fail! log.info('Attribute deletion should fail => test failed!') assert False except (ldap.UNWILLING_TO_PERFORM, ldap.OPERATIONS_ERROR, ldap.OBJECT_CLASS_VIOLATION): log.info('Change was rejected, test passed') pass except ldap.NO_SUCH_ATTRIBUTE: log.info("This attribute isn't part of cn=config, so is already default!") pass if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/config/config_test.py000066400000000000000000000612421421664411400265000ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2020 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import logging import pytest from lib389.tasks import * from lib389.topologies import topology_m2, topology_st as topo from lib389.utils import * from lib389._constants import DN_CONFIG, DEFAULT_SUFFIX, DEFAULT_BENAME from lib389.idm.user import UserAccounts, TEST_USER_PROPERTIES from lib389.idm.group import Groups from lib389.backend import * from lib389.config import LDBMConfig, BDB_LDBMConfig from lib389.cos import CosPointerDefinitions, CosTemplates from lib389.backend import Backends from lib389.monitor import MonitorLDBM from lib389.plugins import ReferentialIntegrityPlugin pytestmark = pytest.mark.tier0 USER_DN = 'uid=test_user,%s' % DEFAULT_SUFFIX logging.getLogger(__name__).setLevel(logging.INFO) log = logging.getLogger(__name__) @pytest.fixture(scope="module") def big_file(): TEMP_BIG_FILE = '' # 1024*1024=1048576 # B for 1 MiB # Big for 3 MiB for x in range(1048576): TEMP_BIG_FILE += '+' return TEMP_BIG_FILE @pytest.mark.bz1897248 @pytest.mark.ds4315 @pytest.mark.skipif(ds_is_older('1.4.3.16'), reason="This config setting exists in 1.4.3.16 and higher") def test_nagle_default_value(topo): """Test that nsslapd-nagle attribute is off by default :id: 00361f5d-d638-4d39-8231-66fa52637203 :setup: Standalone instance :steps: 1. Create instance 2. Check the value of nsslapd-nagle :expectedresults: 1. Success 2. The value of nsslapd-nagle should be off """ log.info('Check the value of nsslapd-nagle attribute is off by default') assert topo.standalone.config.get_attr_val_utf8('nsslapd-nagle') == 'off' def test_maxbersize_repl(topology_m2, big_file): """maxbersize is ignored in the replicated operations. :id: ad57de60-7d56-4323-bbca-5556e5cdb126 :setup: MMR with two suppliers, test user, 1 MiB big value for any attribute :steps: 1. Set maxbersize attribute to a small value (20KiB) on supplier2 2. Add the big value to supplier2 3. Add the big value to supplier1 4. Check if the big value was successfully replicated to supplier2 :expectedresults: 1. maxbersize should be successfully set 2. Adding the big value to supplier2 failed 3. Adding the big value to supplier1 succeed 4. The big value is successfully replicated to supplier2 """ users_m1 = UserAccounts(topology_m2.ms["supplier1"], DEFAULT_SUFFIX) users_m2 = UserAccounts(topology_m2.ms["supplier2"], DEFAULT_SUFFIX) user_m1 = users_m1.create(properties=TEST_USER_PROPERTIES) time.sleep(2) user_m2 = users_m2.get(dn=user_m1.dn) log.info("Set nsslapd-maxbersize: 20K to supplier2") topology_m2.ms["supplier2"].config.set('nsslapd-maxbersize', '20480') topology_m2.ms["supplier2"].restart() log.info('Try to add attribute with a big value to supplier2 - expect to FAIL') with pytest.raises(ldap.SERVER_DOWN): user_m2.add('jpegphoto', big_file) topology_m2.ms["supplier2"].restart() topology_m2.ms["supplier1"].restart() log.info('Try to add attribute with a big value to supplier1 - expect to PASS') user_m1.add('jpegphoto', big_file) time.sleep(2) log.info('Check if a big value was successfully added to supplier1') photo_m1 = user_m1.get_attr_vals('jpegphoto') log.info('Check if a big value was successfully replicated to supplier2') photo_m2 = user_m2.get_attr_vals('jpegphoto') assert photo_m2 == photo_m1 def test_config_listen_backport_size(topology_m2): """Check that nsslapd-listen-backlog-size acted as expected :id: a4385d58-a6ab-491e-a604-6df0e8ed91cd :setup: MMR with two suppliers :steps: 1. Search for nsslapd-listen-backlog-size 2. Set nsslapd-listen-backlog-size to a positive value 3. Set nsslapd-listen-backlog-size to a negative value 4. Set nsslapd-listen-backlog-size to an invalid value 5. Set nsslapd-listen-backlog-size back to a default value :expectedresults: 1. Search should be successful 2. nsslapd-listen-backlog-size should be successfully set 3. nsslapd-listen-backlog-size should be successfully set 4. Modification with an invalid value should throw an error 5. nsslapd-listen-backlog-size should be successfully set """ default_val = topology_m2.ms["supplier1"].config.get_attr_val_bytes('nsslapd-listen-backlog-size') topology_m2.ms["supplier1"].config.replace('nsslapd-listen-backlog-size', '256') topology_m2.ms["supplier1"].config.replace('nsslapd-listen-backlog-size', '-1') with pytest.raises(ldap.LDAPError): topology_m2.ms["supplier1"].config.replace('nsslapd-listen-backlog-size', 'ZZ') topology_m2.ms["supplier1"].config.replace('nsslapd-listen-backlog-size', default_val) def test_config_deadlock_policy(topology_m2): """Check that nsslapd-db-deadlock-policy acted as expected :id: a24e25fd-bc15-47fa-b018-372f6a2ec59c :setup: MMR with two suppliers :steps: 1. Search for nsslapd-db-deadlock-policy and check if it contains a default value 2. Set nsslapd-db-deadlock-policy to a positive value 3. Set nsslapd-db-deadlock-policy to a negative value 4. Set nsslapd-db-deadlock-policy to an invalid value 5. Set nsslapd-db-deadlock-policy back to a default value :expectedresults: 1. Search should be a successful and should contain a default value 2. nsslapd-db-deadlock-policy should be successfully set 3. nsslapd-db-deadlock-policy should be successfully set 4. Modification with an invalid value should throw an error 5. nsslapd-db-deadlock-policy should be successfully set """ default_val = b'9' ldbmconfig = LDBMConfig(topology_m2.ms["supplier1"]) bdbconfig = BDB_LDBMConfig(topology_m2.ms["supplier1"]) if ds_is_older('1.4.2'): deadlock_policy = ldbmconfig.get_attr_val_bytes('nsslapd-db-deadlock-policy') else: deadlock_policy = bdbconfig.get_attr_val_bytes('nsslapd-db-deadlock-policy') assert deadlock_policy == default_val # Try a range of valid values for val in (b'0', b'5', b'9'): ldbmconfig.replace('nsslapd-db-deadlock-policy', val) if ds_is_older('1.4.2'): deadlock_policy = ldbmconfig.get_attr_val_bytes('nsslapd-db-deadlock-policy') else: deadlock_policy = bdbconfig.get_attr_val_bytes('nsslapd-db-deadlock-policy') assert deadlock_policy == val # Try a range of invalid values for val in ('-1', '10'): with pytest.raises(ldap.LDAPError): ldbmconfig.replace('nsslapd-db-deadlock-policy', val) # Cleanup - undo what we've done ldbmconfig.replace('nsslapd-db-deadlock-policy', deadlock_policy) @pytest.mark.bz766322 @pytest.mark.ds26 def test_defaultnamingcontext(topo): """Tests configuration attribute defaultNamingContext in the rootdse :id: de9a21d3-00f9-4c6d-bb40-56aa1ba36578 :setup: Standalone instance :steps: 1. Check the attribute nsslapd-defaultnamingcontext is present in cn=config 2. Delete nsslapd-defaultnamingcontext attribute 3. Add new valid Suffix and modify nsslapd-defaultnamingcontext with new suffix 4. Add new invalid value at runtime to nsslapd-defaultnamingcontext 5. Modify nsslapd-defaultnamingcontext with blank value 6. Add new suffix when nsslapd-defaultnamingcontext is empty 7. Check the value of the nsslapd-defaultnamingcontext automatically have the new suffix 8. Adding new suffix when nsslapd-defaultnamingcontext is not empty 9. Check the value of the nsslapd-defaultnamingcontext has not changed 10. Remove the newly added suffix and check the values of the attribute is not changed 11. Remove the original suffix which is currently nsslapd-defaultnamingcontext 12. Check nsslapd-defaultnamingcontext become empty. :expectedresults: 1. This should be successful 2. It should give 'server unwilling to perform' error 3. It should be successful 4. It should give 'no such object' error 5. It should be successful 6. Add should be successful 7. nsslapd-defaultnamingcontext should have new suffix 8. Add should be successful 9. defaultnamingcontext should not change 10. Remove should be successful and defaultnamingcontext should not change 11. Removal should be successful 12. nsslapd-defaultnamingcontext should be empty """ backends = Backends(topo.standalone) test_suffix1 = 'dc=test1,dc=com' test_db1 = 'test1_db' test_suffix2 = 'dc=test2,dc=com' test_db2 = 'test2_db' test_suffix3 = 'dc=test3,dc=com' test_db3 = 'test3_db' log.info("Check the attribute nsslapd-defaultnamingcontext is present in cn=config") assert topo.standalone.config.present('nsslapd-defaultnamingcontext') log.info("Delete nsslapd-defaultnamingcontext attribute") with pytest.raises(ldap.UNWILLING_TO_PERFORM): topo.standalone.config.remove_all('nsslapd-defaultnamingcontext') b1 = backends.create(properties={'cn': test_db1, 'nsslapd-suffix': test_suffix1}) log.info("modify nsslapd-defaultnamingcontext with new suffix") topo.standalone.config.replace('nsslapd-defaultnamingcontext', test_suffix1) log.info("Add new invalid value at runtime to nsslapd-defaultnamingcontext") with pytest.raises(ldap.NO_SUCH_OBJECT): topo.standalone.config.replace('nsslapd-defaultnamingcontext', 'some_invalid_value') log.info("Modify nsslapd-defaultnamingcontext with blank value") topo.standalone.config.replace('nsslapd-defaultnamingcontext', ' ') log.info("Add new suffix when nsslapd-defaultnamingcontext is empty") b2 = backends.create(properties={'cn': test_db2, 'nsslapd-suffix': test_suffix2}) log.info("Check the value of the nsslapd-defaultnamingcontext automatically have the new suffix") assert topo.standalone.config.get_attr_val_utf8('nsslapd-defaultnamingcontext') == test_suffix2 log.info("Adding new suffix when nsslapd-defaultnamingcontext is not empty") b3 = backends.create(properties={'cn': test_db3, 'nsslapd-suffix': test_suffix3}) log.info("Check the value of the nsslapd-defaultnamingcontext has not changed") assert topo.standalone.config.get_attr_val_utf8('nsslapd-defaultnamingcontext') == test_suffix2 log.info("Remove the newly added suffix and check the values of the attribute is not changed") b3.delete() assert topo.standalone.config.get_attr_val_utf8('nsslapd-defaultnamingcontext') == test_suffix2 log.info("Remove all the suffix at the end") b1.delete() b2.delete() @pytest.mark.xfail(reason="This may fail due to bug 1610234") def test_defaultnamingcontext_1(topo): """This test case should be part of function test_defaultnamingcontext Please move it back after we have a fix for bug 1610234 """ log.info("Remove the original suffix which is currently nsslapd-defaultnamingcontext" "and check nsslapd-defaultnamingcontext become empty.") """ Please remove these declarations after moving the test to function test_defaultnamingcontext """ backends = Backends(topo.standalone) test_db2 = 'test2_db' test_suffix2 = 'dc=test2,dc=com' b2 = backends.create(properties={'cn': test_db2, 'nsslapd-suffix': test_suffix2}) b2.delete() assert topo.standalone.config.get_attr_val_utf8('nsslapd-defaultnamingcontext') == ' ' @pytest.mark.bz602456 def test_allow_add_delete_config_attributes(topo): """Tests configuration attributes are allowed to add and delete :id: d9a3f264-4111-406b-9900-a70e5403458a :setup: Standalone instance :steps: 1. Add a new valid attribute at runtime to cn=config 2. Check if the new valid attribute is present 3. Delete nsslapd-listenhost to restore the default value 4. Restart the server 5. Check nsslapd-listenhost is present with default value 6. Add new invalid attribute at runtime to cn=config 7. Make sure the invalid attribute is not added :expectedresults: 1. This should be successful 2. This should be successful 3. This should be successful 4. This should be successful 5. This should be successful 6. It should give 'server unwilling to perform' error 7. Invalid attribute should not be added """ default_listenhost = topo.standalone.config.get_attr_val_utf8('nsslapd-listenhost') log.info("Add a new valid attribute at runtime to cn=config") topo.standalone.config.add('nsslapd-listenhost', 'localhost') assert topo.standalone.config.present('nsslapd-listenhost', 'localhost') log.info("Delete nsslapd-listenhost to restore the default value") topo.standalone.config.remove('nsslapd-listenhost', 'localhost') topo.standalone.restart() assert topo.standalone.config.present('nsslapd-listenhost', default_listenhost) log.info("Add new invalid attribute at runtime to cn=config") with pytest.raises(ldap.UNWILLING_TO_PERFORM): topo.standalone.config.add('invalid-attribute', 'invalid-value') log.info("Make sure the invalid attribute is not added") assert not topo.standalone.config.present('invalid-attribute', 'invalid-value') @pytest.mark.bz918705 @pytest.mark.ds511 def test_ignore_virtual_attrs(topo): """Test nsslapd-ignore-virtual-attrs configuration attribute :id: 9915d71b-2c71-4ac0-91d7-92655d53541b :setup: Standalone instance :steps: 1. Check the attribute nsslapd-ignore-virtual-attrs is present in cn=config 2. Check the default value of attribute nsslapd-ignore-virtual-attrs should be ON 3. Set the valid values i.e. on/ON and off/OFF for nsslapd-ignore-virtual-attrs 4. Set invalid value for attribute nsslapd-ignore-virtual-attrs 5. Set nsslapd-ignore-virtual-attrs=off 6. Add cosPointer, cosTemplate and test entry to default suffix, where virtual attribute is postal code 7. Test if virtual attribute i.e. postal code shown in test entry while nsslapd-ignore-virtual-attrs: off 8. Set nsslapd-ignore-virtual-attrs=on 9. Test if virtual attribute i.e. postal code not shown while nsslapd-ignore-virtual-attrs: on :expectedresults: 1. This should be successful 2. This should be successful 3. This should be successful 4. This should fail 5. This should be successful 6. This should be successful 7. Postal code should be present 8. This should be successful 9. Postal code should not be present """ log.info("Check the attribute nsslapd-ignore-virtual-attrs is present in cn=config") assert topo.standalone.config.present('nsslapd-ignore-virtual-attrs') log.info("Check the default value of attribute nsslapd-ignore-virtual-attrs should be ON") assert topo.standalone.config.get_attr_val_utf8('nsslapd-ignore-virtual-attrs') == "on" log.info("Set the valid values i.e. on/ON and off/OFF for nsslapd-ignore-virtual-attrs") for attribute_value in ['on', 'off', 'ON', 'OFF']: topo.standalone.config.set('nsslapd-ignore-virtual-attrs', attribute_value) assert topo.standalone.config.present('nsslapd-ignore-virtual-attrs', attribute_value) log.info("Set invalid value for attribute nsslapd-ignore-virtual-attrs") with pytest.raises(ldap.OPERATIONS_ERROR): topo.standalone.config.set('nsslapd-ignore-virtual-attrs', 'invalid_value') cos_template_properties = { 'cn': 'cosTemplateExample', 'postalcode': '117' } cos_templates = CosTemplates(topo.standalone, DEFAULT_SUFFIX, 'ou=People') test_cos_template = cos_templates.create(properties=cos_template_properties) log.info("Add cosPointer, cosTemplate and test entry to default suffix, where virtual attribute is postal code") cos_pointer_properties = { 'cn': 'cosPointer', 'description': 'cosPointer example', 'cosTemplateDn': 'cn=cosTemplateExample,ou=People,dc=example,dc=com', 'cosAttribute': 'postalcode', } cos_pointer_definitions = CosPointerDefinitions(topo.standalone, DEFAULT_SUFFIX, 'ou=People') test_cos_pointer_definition = cos_pointer_definitions.create(properties=cos_pointer_properties) test_users = UserAccounts(topo.standalone, DEFAULT_SUFFIX) test_user = test_users.create(properties=TEST_USER_PROPERTIES) log.info("Test if virtual attribute i.e. postal code shown in test entry while nsslapd-ignore-virtual-attrs: off") assert test_user.present('postalcode', '117') log.info("Set nsslapd-ignore-virtual-attrs=on") topo.standalone.config.set('nsslapd-ignore-virtual-attrs', 'on') log.info("Test if virtual attribute i.e. postal code not shown while nsslapd-ignore-virtual-attrs: on") assert not test_user.present('postalcode', '117') def test_ignore_virtual_attrs_after_restart(topo): """Test nsslapd-ignore-virtual-attrs configuration attribute The attribute is ON by default. If it set to OFF, it keeps its value on restart :id: ac368649-4fda-473c-9ef8-e0c728b162af :setup: Standalone instance :steps: 1. Check the attribute nsslapd-ignore-virtual-attrs is present in cn=config 2. Check the default value of attribute nsslapd-ignore-virtual-attrs should be ON 3. Set nsslapd-ignore-virtual-attrs=off 4. restart the instance 5. Check the attribute nsslapd-ignore-virtual-attrs is OFF :expectedresults: 1. This should be successful 2. This should be successful 3. This should be successful 4. This should be successful 5. This should be successful """ log.info("Check the attribute nsslapd-ignore-virtual-attrs is present in cn=config") assert topo.standalone.config.present('nsslapd-ignore-virtual-attrs') log.info("Check the default value of attribute nsslapd-ignore-virtual-attrs should be ON") assert topo.standalone.config.get_attr_val_utf8('nsslapd-ignore-virtual-attrs') == "on" log.info("Set nsslapd-ignore-virtual-attrs = off") topo.standalone.config.set('nsslapd-ignore-virtual-attrs', 'off') topo.standalone.restart() log.info("Check the default value of attribute nsslapd-ignore-virtual-attrs should be OFF") assert topo.standalone.config.present('nsslapd-ignore-virtual-attrs', 'off') @pytest.mark.bz918694 @pytest.mark.ds408 def test_ndn_cache_enabled(topo): """Test nsslapd-ignore-virtual-attrs configuration attribute :id: 2caa3ec0-cd05-458e-9e21-3b73cf4697ff :setup: Standalone instance :steps: 1. Check the attribute nsslapd-ndn-cache-enabled is present in cn=config 2. Check the attribute nsslapd-ndn-cache-enabled has the default value set as ON 3. Check the attribute nsslapd-ndn-cache-max-size is present in cn=config 4. Check the backend monitor output for Normalized DN cache statistics while nsslapd-ndn-cache-enabled is OFF 5. Set nsslapd-ndn-cache-enabled ON and check the backend monitor output for Normalized DN cache statistics 6. Set invalid value for nsslapd-ndn-cache-enabled 7. Set invalid value for nsslapd-ndn-cache-max-size :expectedresults: 1. This should be successful 2. This should be successful 3. This should be successful 4. Backend monitor output should not have NDN cache statistics 5. Backend monitor output should have NDN cache statistics 6. This should fail 7. This should fail """ log.info("Check the attribute nsslapd-ndn-cache-enabled is present in cn=config") assert topo.standalone.config.present('nsslapd-ndn-cache-enabled') log.info("Check the attribute nsslapd-ndn-cache-enabled has the default value set as ON") assert topo.standalone.config.get_attr_val_utf8('nsslapd-ndn-cache-enabled') == 'on' log.info("Check the attribute nsslapd-ndn-cache-max-size is present in cn=config") assert topo.standalone.config.present('nsslapd-ndn-cache-max-size') backends = Backends(topo.standalone) backend = backends.get(DEFAULT_BENAME) log.info("Ticket#49593 : NDN cache stats should be under the global stats - Implemented in 1.4") log.info("Fetch the monitor value according to the ds version") if ds_is_older('1.4'): monitor = backend.get_monitor() else: monitor = MonitorLDBM(topo.standalone) log.info("Check the backend monitor output for Normalized DN cache statistics, " "while nsslapd-ndn-cache-enabled is off") topo.standalone.config.set('nsslapd-ndn-cache-enabled', 'off') topo.standalone.restart() assert not monitor.present('normalizedDnCacheHits') log.info("Check the backend monitor output for Normalized DN cache statistics, " "while nsslapd-ndn-cache-enabled is on") topo.standalone.config.set('nsslapd-ndn-cache-enabled', 'on') topo.standalone.restart() assert monitor.present('normalizedDnCacheHits') log.info("Set invalid value for nsslapd-ndn-cache-enabled") with pytest.raises(ldap.OPERATIONS_ERROR): topo.standalone.config.set('nsslapd-ndn-cache-enabled', 'invalid_value') log.info("Set invalid value for nsslapd-ndn-cache-max-size") with pytest.raises(ldap.OPERATIONS_ERROR): topo.standalone.config.set('nsslapd-ndn-cache-max-size', 'invalid_value') def test_require_index(topo): """Test nsslapd-ignore-virtual-attrs configuration attribute :id: fb6e31f2-acc2-4e75-a195-5c356faeb803 :setup: Standalone instance :steps: 1. Set "nsslapd-require-index" to "on" 2. Test an unindexed search is rejected :expectedresults: 1. Success 2. Success """ # Set the config be_insts = Backends(topo.standalone).list() for be in be_insts: if be.get_attr_val_utf8_l('nsslapd-suffix') == DEFAULT_SUFFIX: be.set('nsslapd-require-index', 'on') db_cfg = DatabaseConfig(topo.standalone) db_cfg.set([('nsslapd-idlistscanlimit', '100')]) users = UserAccounts(topo.standalone, DEFAULT_SUFFIX) for i in range(101): users.create_test_user(uid=i) # Issue unindexed search,a nd make sure it is rejected raw_objects = DSLdapObjects(topo.standalone, basedn=DEFAULT_SUFFIX) with pytest.raises(ldap.UNWILLING_TO_PERFORM): raw_objects.filter("(description=test*)") @pytest.mark.skipif(ds_is_older('1.4.2'), reason="The config setting only exists in 1.4.2 and higher") def test_require_internal_index(topo): """Test nsslapd-ignore-virtual-attrs configuration attribute :id: 22b94f30-59e3-4f27-89a1-c4f4be036f7f :setup: Standalone instance :steps: 1. Set "nsslapd-require-internalop-index" to "on" 2. Enable RI plugin, and configure it to use an attribute that is not indexed 3. Create a user and add it a group 4. Deleting user should be rejected as the RI plugin issues an unindexed internal search :expectedresults: 1. Success 2. Success 3. Success 4. Success """ # Set the config be_insts = Backends(topo.standalone).list() for be in be_insts: if be.get_attr_val_utf8_l('nsslapd-suffix') == DEFAULT_SUFFIX: be.set('nsslapd-require-index', 'off') be.set('nsslapd-require-internalop-index', 'on') # Configure RI plugin rip = ReferentialIntegrityPlugin(topo.standalone) rip.set('referint-membership-attr', 'description') rip.enable() # Create a bunch of users db_cfg = DatabaseConfig(topo.standalone) db_cfg.set([('nsslapd-idlistscanlimit', '100')]) users = UserAccounts(topo.standalone, DEFAULT_SUFFIX) for i in range(102, 202): users.create_test_user(uid=i) # Create user and group user = users.create(properties={ 'uid': 'indexuser', 'cn' : 'indexuser', 'sn' : 'user', 'uidNumber' : '1010', 'gidNumber' : '2010', 'homeDirectory' : '/home/indexuser' }) groups = Groups(topo.standalone, DEFAULT_SUFFIX) group = groups.create(properties={'cn': 'group', 'member': user.dn}) # Restart the server topo.standalone.restart() # Deletion of user should be rejected with pytest.raises(ldap.UNWILLING_TO_PERFORM): user.delete() if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/config/regression_test.py000066400000000000000000000077371421664411400274240ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2017 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import logging import pytest from lib389.utils import * from lib389.dseldif import DSEldif from lib389.config import LDBMConfig from lib389.backend import Backends from lib389.topologies import topology_st as topo pytestmark = pytest.mark.tier0 logging.getLogger(__name__).setLevel(logging.INFO) log = logging.getLogger(__name__) CUSTOM_MEM = '9100100100' # Function to return value of available memory in kb def get_available_memory(): with open('/proc/meminfo') as file: for line in file: if 'MemAvailable' in line: free_mem_in_kb = line.split()[1] return int(free_mem_in_kb) @pytest.mark.skipif(get_available_memory() < (int(CUSTOM_MEM)/1024), reason="available memory is too low") @pytest.mark.bz1627512 @pytest.mark.ds49618 def test_set_cachememsize_to_custom_value(topo): """Test if value nsslapd-cachememsize remains set at the custom setting of value above 3805132804 bytes after changing the value to 9100100100 bytes :id: 8a3efc00-65a9-4ee7-b8ee-e35840991ea9 :setup: Standalone Instance :steps: 1. Disable in the cn=config,cn=ldbm database,cn=plugins,cn=config: nsslapd-cache-autosize by setting it to 0 2. Disable in the cn=config,cn=ldbm database,cn=plugins,cn=config: nsslapd-cache-autosize-split by setting it to 0 3. Restart the instance 4. Set in the cn=UserRoot,cn=ldbm database,cn=plugins,cn=config: nsslapd-cachememsize: CUSTOM_MEM :expectedresults: 1. nsslapd-cache-autosize is successfully disabled 2. nsslapd-cache-autosize-split is successfully disabled 3. The instance should be successfully restarted 4. nsslapd-cachememsize is successfully set """ config_ldbm = LDBMConfig(topo.standalone) backends = Backends(topo.standalone) userroot_ldbm = backends.get("userroot") log.info("Disabling nsslapd-cache-autosize by setting it to 0") assert config_ldbm.set('nsslapd-cache-autosize', '0') log.info("Disabling nsslapd-cache-autosize-split by setting it to 0") assert config_ldbm.set('nsslapd-cache-autosize-split', '0') log.info("Restarting instance") topo.standalone.restart() log.info("Instance restarted successfully") log.info("Set nsslapd-cachememsize to value {}".format(CUSTOM_MEM)) assert userroot_ldbm.set('nsslapd-cachememsize', CUSTOM_MEM) def test_maxbersize_repl(topo): """Check that instance starts when nsslapd-errorlog-maxlogsize nsslapd-errorlog-logmaxdiskspace are set in certain order :id: 743e912c-2be4-4f5f-9c2a-93dcb18f51a0 :setup: MMR with two suppliers :steps: 1. Stop the instance 2. Set nsslapd-errorlog-maxlogsize before/after nsslapd-errorlog-logmaxdiskspace 3. Start the instance 4. Check the error log for errors :expectedresults: 1. Success 2. Success 3. Success 4. The error log should contain no errors """ inst = topo.standalone dse_ldif = DSEldif(inst) inst.stop() log.info("Set nsslapd-errorlog-maxlogsize before nsslapd-errorlog-logmaxdiskspace") dse_ldif.replace('cn=config', 'nsslapd-errorlog-maxlogsize', '300') dse_ldif.replace('cn=config', 'nsslapd-errorlog-logmaxdiskspace', '500') inst.start() log.info("Assert no init_dse_file errors in the error log") assert not inst.ds_error_log.match('.*ERR - init_dse_file.*') inst.stop() log.info("Set nsslapd-errorlog-maxlogsize after nsslapd-errorlog-logmaxdiskspace") dse_ldif.replace('cn=config', 'nsslapd-errorlog-logmaxdiskspace', '500') dse_ldif.replace('cn=config', 'nsslapd-errorlog-maxlogsize', '300') inst.start() log.info("Assert no init_dse_file errors in the error log") assert not inst.ds_error_log.match('.*ERR - init_dse_file.*') 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/config/removed_config_49298_test.py000066400000000000000000000050111421664411400307700ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2017 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import pytest import os import logging import subprocess from lib389.topologies import topology_st as topo pytestmark = pytest.mark.tier0 DEBUGGING = os.getenv("DEBUGGING", default=False) if DEBUGGING: logging.getLogger(__name__).setLevel(logging.DEBUG) else: logging.getLogger(__name__).setLevel(logging.INFO) log = logging.getLogger(__name__) def test_restore_config(topo): """ Check that if a dse.ldif and backup are removed, that the server still starts. :id: e1c38fa7-30bc-46f2-a934-f8336f387581 :setup: Standalone instance :steps: 1. Stop the instance 2. Delete 'dse.ldif' 3. Start the instance :expectedresults: 1. Steps 1 and 2 succeed. 2. Server will succeed to start with restored cfg. """ topo.standalone.stop() dse_path = topo.standalone.get_config_dir() log.info(dse_path) for i in ('dse.ldif', 'dse.ldif.startOK'): p = os.path.join(dse_path, i) d = os.path.join(dse_path, i + '.49298') os.rename(p, d) # This will pass. topo.standalone.start() def test_removed_config(topo): """ Check that if a dse.ldif and backup are removed, that the server exits better than "segfault". :id: b45272d1-c197-473e-872f-07257fcb2ec0 :setup: Standalone instance :steps: 1. Stop the instance 2. Delete 'dse.ldif', 'dse.ldif.bak', 'dse.ldif.startOK' 3. Start the instance :expectedresults: 1. Steps 1 and 2 succeed. 2. Server will fail to start, but will not crash. """ topo.standalone.stop() dse_path = topo.standalone.get_config_dir() log.info(dse_path) for i in ('dse.ldif', 'dse.ldif.bak', 'dse.ldif.startOK'): p = os.path.join(dse_path, i) d = os.path.join(dse_path, i + '.49298') os.rename(p, d) # We actually can't check the log output, because it can't read dse.ldif, # don't know where to write it yet! All we want is the server fail to # start here, rather than infinite run + segfault. with pytest.raises(subprocess.CalledProcessError): topo.standalone.start() # Restore the files so that setup-ds.l can work for i in ('dse.ldif', 'dse.ldif.bak', 'dse.ldif.startOK'): p = os.path.join(dse_path, i) d = os.path.join(dse_path, i + '.49298') os.rename(d, p) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/cos/000077500000000000000000000000001421664411400231345ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/cos/__init__.py000066400000000000000000000000671421664411400252500ustar00rootroot00000000000000""" :Requirement: 389-ds-base: Class of Service """ 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/cos/cos_test.py000066400000000000000000000141121421664411400253300ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2019 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- import logging import time import pytest, os, ldap from lib389.cos import CosClassicDefinition, CosClassicDefinitions, CosTemplate from lib389._constants import DEFAULT_SUFFIX from lib389.topologies import topology_st as topo from lib389.idm.role import FilteredRoles from lib389.idm.nscontainer import nsContainer from lib389.idm.user import UserAccount logging.getLogger(__name__).setLevel(logging.INFO) log = logging.getLogger(__name__) pytestmark = pytest.mark.tier1 @pytest.fixture(scope="function") def reset_ignore_vattr(topo, request): default_ignore_vattr_value = topo.standalone.config.get_attr_val_utf8('nsslapd-ignore-virtual-attrs') def fin(): topo.standalone.config.set('nsslapd-ignore-virtual-attrs', default_ignore_vattr_value) request.addfinalizer(fin) def test_positive(topo, reset_ignore_vattr): """CoS positive tests :id: a5a74235-597f-4fe8-8c38-826860927472 :setup: server :steps: 1. Add filter role entry 2. Add ns container 3. Add cos template 4. Add CosClassic Definition 5. Cos entries should be added and searchable 6. employeeType attribute should be there in user entry as per the cos plugin property :expectedresults: 1. Operation should success 2. Operation should success 3. Operation should success 4. Operation should success 5. Operation should success 6. Operation should success """ # Adding ns filter role roles = FilteredRoles(topo.standalone, DEFAULT_SUFFIX) roles.create(properties={'cn': 'FILTERROLEENGROLE', 'nsRoleFilter': 'cn=eng*'}) # adding ns container nsContainer(topo.standalone,'cn=cosClassicGenerateEmployeeTypeUsingnsroleTemplates,{}'.format(DEFAULT_SUFFIX))\ .create(properties={'cn': 'cosTemplates'}) # creating cos template properties = {'employeeType': 'EngType', 'cn': '"cn=filterRoleEngRole,dc=example,dc=com",cn=cosClassicGenerateEmployeeTypeUsingnsroleTemplates,dc=example,dc=com' } CosTemplate(topo.standalone, 'cn="cn=filterRoleEngRole,dc=example,dc=com",cn=cosClassicGenerateEmployeeTypeUsingnsroleTemplates,{}'.format(DEFAULT_SUFFIX))\ .create(properties=properties) # creating CosClassicDefinition properties = {'cosTemplateDn': 'cn=cosClassicGenerateEmployeeTypeUsingnsroleTemplates,{}'.format(DEFAULT_SUFFIX), 'cosAttribute': 'employeeType', 'cosSpecifier': 'nsrole', 'cn': 'cosClassicGenerateEmployeeTypeUsingnsrole'} CosClassicDefinition(topo.standalone,'cn=cosClassicGenerateEmployeeTypeUsingnsrole,{}'.format(DEFAULT_SUFFIX))\ .create(properties=properties) # Adding User entry properties = { 'uid': 'enguser1', 'cn': 'enguser1', 'sn': 'user', 'uidNumber': '1000', 'gidNumber': '2000', 'homeDirectory': '/home/' + 'enguser1' } user = UserAccount(topo.standalone, 'cn=enguser1,{}'.format(DEFAULT_SUFFIX)) user.create(properties=properties) # Asserting Cos should be added and searchable cosdef = CosClassicDefinitions(topo.standalone, DEFAULT_SUFFIX).get('cosClassicGenerateEmployeeTypeUsingnsrole') assert cosdef.dn == 'cn=cosClassicGenerateEmployeeTypeUsingnsrole,dc=example,dc=com' assert cosdef.get_attr_val_utf8('cn') == 'cosClassicGenerateEmployeeTypeUsingnsrole' # CoS definition entry's cosSpecifier attribute specifies the employeeType attribute assert user.present('employeeType') cosdef.delete() def test_vattr_on_cos_definition(topo, reset_ignore_vattr): """Test nsslapd-ignore-virtual-attrs configuration attribute The attribute is ON by default. If a cos definition is added it is moved to OFF :id: e7ef5254-386f-4362-bbb4-9409f3f51b08 :setup: Standalone instance :steps: 1. Check the attribute nsslapd-ignore-virtual-attrs is present in cn=config 2. Check the default value of attribute nsslapd-ignore-virtual-attrs should be ON 3. Create a cos definition for employeeType 4. Check the value of nsslapd-ignore-virtual-attrs should be OFF (with a delay for postop processing) 5. Check a message "slapi_vattrspi_regattr - Because employeeType,.." in error logs :expectedresults: 1. This should be successful 2. This should be successful 3. This should be successful 4. This should be successful 5. This should be successful """ log.info("Check the attribute nsslapd-ignore-virtual-attrs is present in cn=config") assert topo.standalone.config.present('nsslapd-ignore-virtual-attrs') log.info("Check the default value of attribute nsslapd-ignore-virtual-attrs should be ON") assert topo.standalone.config.get_attr_val_utf8('nsslapd-ignore-virtual-attrs') == "on" # creating CosClassicDefinition log.info("Create a cos definition") properties = {'cosTemplateDn': 'cn=cosClassicGenerateEmployeeTypeUsingnsroleTemplates,{}'.format(DEFAULT_SUFFIX), 'cosAttribute': 'employeeType', 'cosSpecifier': 'nsrole', 'cn': 'cosClassicGenerateEmployeeTypeUsingnsrole'} cosdef = CosClassicDefinition(topo.standalone,'cn=cosClassicGenerateEmployeeTypeUsingnsrole,{}'.format(DEFAULT_SUFFIX))\ .create(properties=properties) log.info("Check the default value of attribute nsslapd-ignore-virtual-attrs should be OFF") time.sleep(2) assert topo.standalone.config.present('nsslapd-ignore-virtual-attrs', 'off') topo.standalone.stop() assert topo.standalone.searchErrorsLog("slapi_vattrspi_regattr - Because employeeType is a new registered virtual attribute , nsslapd-ignore-virtual-attrs was set to \'off\'") topo.standalone.start() cosdef.delete() if __name__ == "__main__": CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s -v %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/cos/indirect_cos_test.py000066400000000000000000000141271421664411400272170ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2020 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- import logging import pytest import os import ldap import time import subprocess from lib389 import Entry from lib389.idm.user import UserAccounts from lib389.idm.domain import Domain from lib389.topologies import topology_st as topo from lib389._constants import (DEFAULT_SUFFIX, DN_DM, PASSWORD, HOST_STANDALONE, SERVERID_STANDALONE, PORT_STANDALONE) pytestmark = pytest.mark.tier1 DEBUGGING = os.getenv("DEBUGGING", default=False) if DEBUGGING: logging.getLogger(__name__).setLevel(logging.DEBUG) else: logging.getLogger(__name__).setLevel(logging.INFO) log = logging.getLogger(__name__) TEST_USER_DN = "uid=test_user,ou=people,dc=example,dc=com" OU_PEOPLE = 'ou=people,{}'.format(DEFAULT_SUFFIX) PW_POLICY_CONT_PEOPLE = 'cn="cn=nsPwPolicyEntry,' \ 'ou=people,dc=example,dc=com",' \ 'cn=nsPwPolicyContainer,ou=people,dc=example,dc=com' def check_user(inst): """Search the test user and make sure it has the expected attrs """ try: entries = inst.search_s('dc=example,dc=com', ldap.SCOPE_SUBTREE, "uid=test_user") log.debug('user: \n' + str(entries[0])) assert entries[0].hasAttr('ou'), "Entry is missing ou cos attribute" assert entries[0].hasAttr('x-department'), "Entry is missing description cos attribute" assert entries[0].hasAttr('x-en-ou'), "Entry is missing givenname cos attribute" except ldap.LDAPError as e: log.fatal('Failed to search for user: ' + str(e)) raise e def setup_subtree_policy(topo): """Set up subtree password policy """ topo.standalone.config.set('nsslapd-pwpolicy-local', 'on') log.info('Create password policy for subtree {}'.format(OU_PEOPLE)) try: subprocess.call(['%s/dsconf' % topo.standalone.get_sbin_dir(), 'slapd-standalone1', 'localpwp', 'addsubtree', OU_PEOPLE]) except subprocess.CalledProcessError as e: log.error('Failed to create pw policy policy for {}: error {}'.format( OU_PEOPLE, e.message['desc'])) raise e domain = Domain(topo.standalone, DEFAULT_SUFFIX) domain.replace('pwdpolicysubentry', PW_POLICY_CONT_PEOPLE) time.sleep(1) def setup_indirect_cos(topo): """Setup indirect COS definition and template """ cosDef = Entry(('cn=cosDefinition,dc=example,dc=com', {'objectclass': ['top', 'ldapsubentry', 'cossuperdefinition', 'cosIndirectDefinition'], 'cosAttribute': ['ou merge-schemes', 'x-department merge-schemes', 'x-en-ou merge-schemes'], 'cosIndirectSpecifier': 'seeAlso', 'cn': 'cosDefinition'})) cosTemplate = Entry(('cn=cosTemplate,dc=example,dc=com', {'objectclass': ['top', 'extensibleObject', 'cosTemplate'], 'ou': 'My COS Org', 'x-department': 'My COS x-department', 'x-en-ou': 'my COS x-en-ou', 'cn': 'cosTemplate'})) try: topo.standalone.add_s(cosDef) topo.standalone.add_s(cosTemplate) except ldap.LDAPError as e: log.fatal('Failed to add cos: error ' + str(e)) raise e time.sleep(1) @pytest.fixture(scope="module") def setup(topo, request): """Add schema, and test user """ log.info('Add custom schema...') try: ATTR_1 = (b"( 1.3.6.1.4.1.409.389.2.189 NAME 'x-department' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 X-ORIGIN 'user defined' )") ATTR_2 = (b"( 1.3.6.1.4.1.409.389.2.187 NAME 'x-en-ou' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 X-ORIGIN 'user defined' )") OC = (b"( xPerson-oid NAME 'xPerson' DESC '' SUP person STRUCTURAL MAY ( x-department $ x-en-ou ) X-ORIGIN 'user defined' )") topo.standalone.modify_s("cn=schema", [(ldap.MOD_ADD, 'attributeTypes', ATTR_1), (ldap.MOD_ADD, 'attributeTypes', ATTR_2), (ldap.MOD_ADD, 'objectClasses', OC)]) except ldap.LDAPError as e: log.fatal('Failed to add custom schema') raise e time.sleep(1) log.info('Add test user...') users = UserAccounts(topo.standalone, DEFAULT_SUFFIX) user_properties = { 'uid': 'test_user', 'cn': 'test user', 'sn': 'user', 'uidNumber': '1000', 'gidNumber': '2000', 'homeDirectory': '/home/test_user', 'seeAlso': 'cn=cosTemplate,dc=example,dc=com' } user = users.create(properties=user_properties) user.add('objectClass', 'xPerson') # Setup COS log.info("Setup indirect COS...") setup_indirect_cos(topo) def test_indirect_cos(topo, setup): """Test indirect cos :id: 890d5929-7d52-4a56-956e-129611b4649a :setup: standalone :steps: 1. Test cos is working for test user 2. Add subtree password policy 3. Test cos is working for test user :expectedresults: 1. User has expected cos attrs 2. Substree password policy setup is successful 3. User still has expected cos attrs """ # Step 1 - Search user and see if the COS attrs are included log.info('Checking user...') check_user(topo.standalone) # Step 2 - Add subtree password policy (Second COS - operational attribute) setup_subtree_policy(topo) # Step 3 - Check user again now hat we have a mix of vattrs log.info('Checking user...') check_user(topo.standalone) if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/disk_monitoring/000077500000000000000000000000001421664411400255475ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/disk_monitoring/__init__.py000066400000000000000000000000661421664411400276620ustar00rootroot00000000000000""" :Requirement: 389-ds-base: Disk Monitoring """ disk_monitoring_divide_test.py000066400000000000000000000070241421664411400336270ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/disk_monitoring# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2021 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import time import subprocess import pytest from lib389.tasks import * from lib389.utils import * from lib389.topologies import topology_st from lib389._mapped_object import DSLdapObjects pytestmark = pytest.mark.tier2 disk_monitoring_ack = pytest.mark.skipif(not os.environ.get('DISK_MONITORING_ACK', False), reason="Disk monitoring tests may damage system configuration.") logging.getLogger(__name__).setLevel(logging.DEBUG) log = logging.getLogger(__name__) @pytest.fixture(scope="function") def create_dummy_mount(topology_st, request): cmds = ['setenforce 0', 'mkdir /var/log/dirsrv/slapd-{}/tmp'.format(topology_st.standalone.serverid), 'mount -t tmpfs tmpfs /var/log/dirsrv/slapd-{}/tmp -o size=0'.format(topology_st.standalone.serverid), 'chown dirsrv: /var/log/dirsrv/slapd-{}/tmp'.format(topology_st.standalone.serverid)] log.info('Create dummy mount') for cmd in cmds: log.info('Command used : %s' % cmd) subprocess.Popen(cmd, shell=True) def fin(): cmds = ['umount /var/log/dirsrv/slapd-{}/tmp'.format(topology_st.standalone.serverid), 'setenforce 1'] for cmd in cmds: log.info('Command used : %s' % cmds) subprocess.Popen(cmd, shell=True) request.addfinalizer(fin) @pytest.fixture(scope="function") def change_config(topology_st): topology_st.standalone.config.set('nsslapd-disk-monitoring', 'on') topology_st.standalone.config.set('nsslapd-disk-monitoring-readonly-on-threshold', 'on') @pytest.mark.ds4414 @pytest.mark.bz1890118 @pytest.mark.skipif(ds_is_older("1.4.3.16"), reason="Might fail because of bz1890118") @disk_monitoring_ack def test_produce_division_by_zero(topology_st, create_dummy_mount, change_config): """Test dirsrv will not crash when division by zero occurs :id: 51b11093-8851-41bd-86cb-217b1a3339c7 :customerscenario: True :setup: Standalone :steps: 1. Turn on disk monitoring 2. Go below the threshold 3. Check DS is up and not entering shutdown mode :expectedresults: 1. Success 2. Success 3. Success """ standalone = topology_st.standalone log.info('Check search works before changing the nsslapd-auditlog attribute') try: DSLdapObjects(topology_st.standalone, basedn='cn=disk space,cn=monitor').filter("(objectclass=*)", scope=0) except ldap.SERVER_DOWN as e: log.info('Test failed - dirsrv crashed') assert False log.info('Change location of nsslapd-auditlog') standalone.config.set('nsslapd-auditlog', '/var/log/dirsrv/slapd-{}/tmp/audit'.format(standalone.serverid)) log.info('Check search will not fail') try: DSLdapObjects(topology_st.standalone, basedn='cn=disk space,cn=monitor').filter("(objectclass=*)", scope=0) except ldap.SERVER_DOWN as e: log.info('Test failed - dirsrv crashed') assert False log.info('If passed, run search again just in case') try: DSLdapObjects(topology_st.standalone, basedn='cn=disk space,cn=monitor').filter("(objectclass=*)", scope=0) except ldap.SERVER_DOWN as e: log.info('Test failed - dirsrv crashed') assert False if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/disk_monitoring/disk_monitoring_test.py000066400000000000000000001063451421664411400323700ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2018 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- import os import subprocess import re import time import pytest from lib389.tasks import * from lib389._constants import * from lib389.utils import ensure_bytes from lib389.backend import Backends from lib389.topologies import topology_st as topo from lib389.paths import * from lib389.idm.user import UserAccounts pytestmark = pytest.mark.tier2 disk_monitoring_ack = pytest.mark.skipif(not os.environ.get('DISK_MONITORING_ACK', False), reason="Disk monitoring tests may damage system configuration.") THRESHOLD = '30' THRESHOLD_BYTES = '30000000' def _withouterrorlog(topo, condition, maxtimesleep): timecount = 0 while eval(condition): time.sleep(1) timecount += 1 if timecount >= maxtimesleep: break assert not eval(condition) def _witherrorlog(topo, condition, maxtimesleep): timecount = 0 with open(topo.standalone.errlog, 'r') as study: study = study.read() while condition not in study: time.sleep(1) timecount += 1 with open(topo.standalone.errlog, 'r') as study: study = study.read() if timecount >= maxtimesleep: break assert condition in study def presetup(topo): """ This is function is part of fixture function setup , will setup the environment for this test. """ topo.standalone.stop() if os.path.exists(topo.standalone.ds_paths.log_dir): subprocess.call(['mount', '-t', 'tmpfs', '-o', 'size=35M', 'tmpfs', topo.standalone.ds_paths.log_dir]) else: os.mkdir(topo.standalone.ds_paths.log_dir) subprocess.call(['mount', '-t', 'tmpfs', '-o', 'size=35M', 'tmpfs', topo.standalone.ds_paths.log_dir]) subprocess.call('chown {}: -R {}'.format(DEFAULT_USER, topo.standalone.ds_paths.log_dir), shell=True) subprocess.call('chown {}: -R {}/*'.format(DEFAULT_USER, topo.standalone.ds_paths.log_dir), shell=True) subprocess.call('restorecon -FvvR {}'.format(topo.standalone.ds_paths.log_dir), shell=True) topo.standalone.start() def setupthesystem(topo): """ This function is part of fixture function setup , will setup the environment for this test. """ global TOTAL_SIZE, USED_SIZE, AVAIL_SIZE, HALF_THR_FILL_SIZE, FULL_THR_FILL_SIZE topo.standalone.start() topo.standalone.config.set('nsslapd-disk-monitoring-grace-period', '1') topo.standalone.config.set('nsslapd-accesslog-logbuffering', 'off') topo.standalone.config.set('nsslapd-disk-monitoring-threshold', ensure_bytes(THRESHOLD_BYTES)) TOTAL_SIZE = int(re.findall(r'\d+', str(os.statvfs(topo.standalone.ds_paths.log_dir)))[2])*4096/1024/1024 AVAIL_SIZE = round(int(re.findall(r'\d+', str(os.statvfs(topo.standalone.ds_paths.log_dir)))[3]) * 4096 / 1024 / 1024) USED_SIZE = TOTAL_SIZE - AVAIL_SIZE HALF_THR_FILL_SIZE = TOTAL_SIZE - float(THRESHOLD) + 5 - USED_SIZE FULL_THR_FILL_SIZE = TOTAL_SIZE - 0.5 * float(THRESHOLD) + 5 - USED_SIZE HALF_THR_FILL_SIZE = round(HALF_THR_FILL_SIZE) FULL_THR_FILL_SIZE = round(FULL_THR_FILL_SIZE) topo.standalone.restart() @pytest.fixture(scope="module") def setup(request, topo): """ This is the fixture function , will run before running every test case. """ presetup(topo) setupthesystem(topo) def fin(): topo.standalone.stop() subprocess.call(['umount', '-fl', topo.standalone.ds_paths.log_dir]) topo.standalone.start() request.addfinalizer(fin) @pytest.fixture(scope="function") def reset_logs(topo): """ Reset the errors log file before the test """ open('{}/errors'.format(topo.standalone.ds_paths.log_dir), 'w').close() @disk_monitoring_ack def test_verify_operation_when_disk_monitoring_is_off(topo, setup, reset_logs): """Verify operation when Disk monitoring is off :id: 73a97536-fe9e-11e8-ba9f-8c16451d917b :setup: Standalone :steps: 1. Turn off disk monitoring 2. Go below the threshold 3. Check DS is up and not entering shutdown mode :expectedresults: 1. Should Success 2. Should Success 3. Should Success """ try: # Turn off disk monitoring topo.standalone.config.set('nsslapd-disk-monitoring', 'off') topo.standalone.restart() # go below the threshold subprocess.call(['dd', 'if=/dev/zero', 'of={}/foo'.format(topo.standalone.ds_paths.log_dir), 'bs=1M', 'count={}'.format(FULL_THR_FILL_SIZE)]) subprocess.call(['dd', 'if=/dev/zero', 'of={}/foo1'.format(topo.standalone.ds_paths.log_dir), 'bs=1M', 'count={}'.format(FULL_THR_FILL_SIZE)]) # Wait for disk monitoring plugin thread to wake up _withouterrorlog(topo, 'topo.standalone.status() != True', 10) # Check DS is up and not entering shutdown mode assert topo.standalone.status() == True finally: os.remove('{}/foo'.format(topo.standalone.ds_paths.log_dir)) os.remove('{}/foo1'.format(topo.standalone.ds_paths.log_dir)) @disk_monitoring_ack def test_enable_external_libs_debug_log(topo, setup, reset_logs): """Check that OpenLDAP logs are successfully enabled and disabled when disk threshold is reached :id: 121b2b24-ecba-48e2-9ee2-312d929dc8c6 :setup: Standalone instance :steps: 1. Set nsslapd-external-libs-debug-enabled to "on" 2. Go straight below 1/2 of the threshold 3. Verify that the external libs debug setting is disabled 4. Go back above 1/2 of the threshold 5. Verify that the external libs debug setting is enabled back :expectedresults: 1. Success 2. Success 3. Success 4. Success 5. Success """ try: # Verify that verbose logging was set to default level assert topo.standalone.config.set('nsslapd-disk-monitoring', 'on') assert topo.standalone.config.set('nsslapd-disk-monitoring-logging-critical', 'off') assert topo.standalone.config.set('nsslapd-external-libs-debug-enabled', 'on') assert topo.standalone.config.set('nsslapd-errorlog-level', '8') topo.standalone.restart() subprocess.call(['dd', 'if=/dev/zero', 'of={}/foo'.format(topo.standalone.ds_paths.log_dir), 'bs=1M', 'count={}'.format(HALF_THR_FILL_SIZE)]) # Verify that logging is disabled _withouterrorlog(topo, "topo.standalone.config.get_attr_val_utf8('nsslapd-external-libs-debug-enabled') != 'off'", 31) finally: os.remove('{}/foo'.format(topo.standalone.ds_paths.log_dir)) _withouterrorlog(topo, "topo.standalone.config.get_attr_val_utf8('nsslapd-external-libs-debug-enabled') != 'on'", 31) assert topo.standalone.config.set('nsslapd-external-libs-debug-enabled', 'off') @disk_monitoring_ack def test_free_up_the_disk_space_and_change_ds_config(topo, setup, reset_logs): """Free up the disk space and change DS config :id: 7be4d560-fe9e-11e8-a307-8c16451d917b :setup: Standalone :steps: 1. Enabling Disk Monitoring plugin and setting disk monitoring logging to critical 2. Verify no message about loglevel is present in the error log 3. Verify no message about disabling logging is present in the error log 4. Verify no message about removing rotated logs is present in the error log :expectedresults: 1. Should Success 2. Should Success 3. Should Success 4. Should Success """ # Enabling Disk Monitoring plugin and setting disk monitoring logging to critical assert topo.standalone.config.set('nsslapd-disk-monitoring', 'on') assert topo.standalone.config.set('nsslapd-disk-monitoring-logging-critical', 'on') assert topo.standalone.config.set('nsslapd-errorlog-level', '8') topo.standalone.restart() # Verify no message about loglevel is present in the error log # Verify no message about disabling logging is present in the error log # Verify no message about removing rotated logs is present in the error log with open(topo.standalone.errlog, 'r') as study: study = study.read() assert 'temporarily setting error loglevel to zero' not in study assert 'disabling access and audit logging' not in study assert 'deleting rotated logs' not in study @disk_monitoring_ack def test_verify_operation_with_nsslapd_disk_monitoring_logging_critical_off(topo, setup, reset_logs): """Verify operation with "nsslapd-disk-monitoring-logging-critical: off :id: 82363bca-fe9e-11e8-9ae7-8c16451d917b :setup: Standalone :steps: 1. Verify that verbose logging was set to default level 2. Verify that logging is disabled 3. Verify that rotated logs were not removed :expectedresults: 1. Should Success 2. Should Success 3. Should Success """ try: # Verify that verbose logging was set to default level assert topo.standalone.config.set('nsslapd-disk-monitoring', 'on') assert topo.standalone.config.set('nsslapd-disk-monitoring-logging-critical', 'off') assert topo.standalone.config.set('nsslapd-errorlog-level', '8') topo.standalone.restart() subprocess.call(['dd', 'if=/dev/zero', 'of={}/foo'.format(topo.standalone.ds_paths.log_dir), 'bs=1M', 'count={}'.format(HALF_THR_FILL_SIZE)]) _witherrorlog(topo, 'temporarily setting error loglevel to the default level', 11) assert LOG_DEFAULT == int(re.findall(r'nsslapd-errorlog-level: \d+', str( topo.standalone.search_s('cn=config', ldap.SCOPE_SUBTREE, '(objectclass=*)', ['nsslapd-errorlog-level'])))[ 0].split(' ')[1]) # Verify that logging is disabled _withouterrorlog(topo, "topo.standalone.config.get_attr_val_utf8('nsslapd-accesslog-logging-enabled') != 'off'", 10) assert topo.standalone.config.get_attr_val_utf8('nsslapd-accesslog-logging-enabled') == 'off' # Verify that rotated logs were not removed with open(topo.standalone.errlog, 'r') as study: study = study.read() assert 'disabling access and audit logging' in study _witherrorlog(topo, 'deleting rotated logs', 11) study = open(topo.standalone.errlog).read() assert "Unable to remove file: {}".format(topo.standalone.ds_paths.log_dir) not in study assert 'is too far below the threshold' not in study finally: os.remove('{}/foo'.format(topo.standalone.ds_paths.log_dir)) @disk_monitoring_ack def test_operation_with_nsslapd_disk_monitoring_logging_critical_on_below_half_of_the_threshold(topo, setup, reset_logs): """Verify operation with \"nsslapd-disk-monitoring-logging-critical: on\" below 1/2 of the threshold Verify recovery :id: 8940c502-fe9e-11e8-bcc0-8c16451d917b :setup: Standalone :steps: 1. Verify that DS goes into shutdown mode 2. Verify that DS exited shutdown mode :expectedresults: 1. Should Success 2. Should Success """ assert topo.standalone.config.set('nsslapd-disk-monitoring', 'on') assert topo.standalone.config.set('nsslapd-disk-monitoring-logging-critical', 'on') topo.standalone.restart() # Verify that DS goes into shutdown mode if float(THRESHOLD) > FULL_THR_FILL_SIZE: FULL_THR_FILL_SIZE_new = FULL_THR_FILL_SIZE + round(float(THRESHOLD) - FULL_THR_FILL_SIZE) + 1 subprocess.call(['dd', 'if=/dev/zero', 'of={}/foo'.format(topo.standalone.ds_paths.log_dir), 'bs=1M', 'count={}'.format(FULL_THR_FILL_SIZE_new)]) else: subprocess.call(['dd', 'if=/dev/zero', 'of={}/foo'.format(topo.standalone.ds_paths.log_dir), 'bs=1M', 'count={}'.format(FULL_THR_FILL_SIZE)]) _witherrorlog(topo, 'is too far below the threshold', 20) os.remove('{}/foo'.format(topo.standalone.ds_paths.log_dir)) # Verify that DS exited shutdown mode _witherrorlog(topo, 'Available disk space is now acceptable', 25) @disk_monitoring_ack def test_setting_nsslapd_disk_monitoring_logging_critical_to_off(topo, setup, reset_logs): """Setting nsslapd-disk-monitoring-logging-critical to "off" :id: 93265ec4-fe9e-11e8-af93-8c16451d917b :setup: Standalone :steps: 1. Setting nsslapd-disk-monitoring-logging-critical to "off" :expectedresults: 1. Should Success """ assert topo.standalone.config.set('nsslapd-disk-monitoring', 'on') assert topo.standalone.config.set('nsslapd-disk-monitoring-logging-critical', 'off') assert topo.standalone.config.set('nsslapd-errorlog-level', '8') topo.standalone.restart() assert topo.standalone.status() == True @disk_monitoring_ack def test_operation_with_nsslapd_disk_monitoring_logging_critical_off(topo, setup, reset_logs): """Verify operation with nsslapd-disk-monitoring-logging-critical: off :id: 97985a52-fe9e-11e8-9914-8c16451d917b :setup: Standalone :steps: 1. Verify that logging is disabled 2. Verify that rotated logs were removed 3. Verify that verbose logging was set to default level 4. Verify that logging is disabled 5. Verify that rotated logs were removed :expectedresults: 1. Should Success 2. Should Success 3. Should Success 4. Should Success 5. Should Success """ # Verify that logging is disabled try: assert topo.standalone.config.set('nsslapd-disk-monitoring', 'on') assert topo.standalone.config.set('nsslapd-disk-monitoring-logging-critical', 'off') assert topo.standalone.config.set('nsslapd-errorlog-level', '8') assert topo.standalone.config.set('nsslapd-accesslog-maxlogsize', '1') assert topo.standalone.config.set('nsslapd-accesslog-logrotationtimeunit', 'minute') assert topo.standalone.config.set('nsslapd-accesslog-level', '772') topo.standalone.restart() # Verify that rotated logs were removed users = UserAccounts(topo.standalone, DEFAULT_SUFFIX) for i in range(10): user_properties = { 'uid': 'cn=anuj{}'.format(i), 'cn': 'cn=anuj{}'.format(i), 'sn': 'cn=anuj{}'.format(i), 'userPassword': "Itsme123", 'uidNumber': '1{}'.format(i), 'gidNumber': '2{}'.format(i), 'homeDirectory': '/home/{}'.format(i) } users.create(properties=user_properties) for j in range(100): for i in [i for i in users.list()]: i.bind('Itsme123') assert re.findall(r'access.\d+-\d+',str(os.listdir(topo.standalone.ds_paths.log_dir))) topo.standalone.bind_s(DN_DM, PW_DM) assert topo.standalone.config.set('nsslapd-accesslog-maxlogsize', '100') assert topo.standalone.config.set('nsslapd-accesslog-logrotationtimeunit', 'day') assert topo.standalone.config.set('nsslapd-accesslog-level', '256') topo.standalone.restart() subprocess.call(['dd', 'if=/dev/zero', 'of={}/foo2'.format(topo.standalone.ds_paths.log_dir), 'bs=1M', 'count={}'.format(HALF_THR_FILL_SIZE)]) # Verify that verbose logging was set to default level _witherrorlog(topo, 'temporarily setting error loglevel to the default level', 10) assert LOG_DEFAULT == int(re.findall(r'nsslapd-errorlog-level: \d+', str( topo.standalone.search_s('cn=config', ldap.SCOPE_SUBTREE, '(objectclass=*)', ['nsslapd-errorlog-level'])))[0].split(' ')[1]) # Verify that logging is disabled _withouterrorlog(topo, "topo.standalone.config.get_attr_val_utf8('nsslapd-accesslog-logging-enabled') != 'off'", 20) with open(topo.standalone.errlog, 'r') as study: study = study.read() assert 'disabling access and audit logging' in study # Verify that rotated logs were removed _witherrorlog(topo, 'deleting rotated logs', 10) with open(topo.standalone.errlog, 'r') as study:study = study.read() assert 'Unable to remove file:' not in study assert 'is too far below the threshold' not in study for i in [i for i in users.list()]: i.delete() finally: os.remove('{}/foo2'.format(topo.standalone.ds_paths.log_dir)) @disk_monitoring_ack def test_operation_with_nsslapd_disk_monitoring_logging_critical_off_below_half_of_the_threshold(topo, setup, reset_logs): """Verify operation with nsslapd-disk-monitoring-logging-critical: off below 1/2 of the threshold Verify shutdown Recovery and setup :id: 9d4c7d48-fe9e-11e8-b5d6-8c16451d917b :setup: Standalone :steps: 1. Verify that DS goes into shutdown mode 2. Verifying that DS has been shut down after the grace period 3. Verify logging enabled 4. Create rotated logfile 5. Enable verbose logging :expectedresults: 1. Should Success 2. Should Success 3. Should Success 4. Should Success 5. Should Success """ assert topo.standalone.config.set('nsslapd-disk-monitoring', 'on') assert topo.standalone.config.set('nsslapd-disk-monitoring-logging-critical', 'off') topo.standalone.restart() # Verify that DS goes into shutdown mode if float(THRESHOLD) > FULL_THR_FILL_SIZE: FULL_THR_FILL_SIZE_new = FULL_THR_FILL_SIZE + round(float(THRESHOLD) - FULL_THR_FILL_SIZE) subprocess.call(['dd', 'if=/dev/zero', 'of={}/foo'.format(topo.standalone.ds_paths.log_dir), 'bs=1M', 'count={}'.format(FULL_THR_FILL_SIZE_new)]) else: subprocess.call(['dd', 'if=/dev/zero', 'of={}/foo'.format(topo.standalone.ds_paths.log_dir), 'bs=1M', 'count={}'.format(FULL_THR_FILL_SIZE)]) # Increased sleep to avoid failure _witherrorlog(topo, 'is too far below the threshold', 100) _witherrorlog(topo, 'Signaling slapd for shutdown', 90) # Verifying that DS has been shut down after the grace period time.sleep(2) assert topo.standalone.status() == False # free_space os.remove('{}/foo'.format(topo.standalone.ds_paths.log_dir)) open('{}/errors'.format(topo.standalone.ds_paths.log_dir), 'w').close() # StartSlapd topo.standalone.start() # verify logging enabled assert topo.standalone.config.get_attr_val_utf8('nsslapd-accesslog-logging-enabled') == 'on' assert topo.standalone.config.get_attr_val_utf8('nsslapd-errorlog-logging-enabled') == 'on' with open(topo.standalone.errlog, 'r') as study: study = study.read() assert 'disabling access and audit logging' not in study assert topo.standalone.config.set('nsslapd-accesslog-maxlogsize', '1') assert topo.standalone.config.set('nsslapd-accesslog-logrotationtimeunit', 'minute') assert topo.standalone.config.set('nsslapd-accesslog-level', '772') topo.standalone.restart() # create rotated logfile users = UserAccounts(topo.standalone, DEFAULT_SUFFIX) for i in range(10): user_properties = { 'uid': 'cn=anuj{}'.format(i), 'cn': 'cn=anuj{}'.format(i), 'sn': 'cn=anuj{}'.format(i), 'userPassword': "Itsme123", 'uidNumber': '1{}'.format(i), 'gidNumber': '2{}'.format(i), 'homeDirectory': '/home/{}'.format(i) } users.create(properties=user_properties) for j in range(100): for i in [i for i in users.list()]: i.bind('Itsme123') assert re.findall(r'access.\d+-\d+',str(os.listdir(topo.standalone.ds_paths.log_dir))) topo.standalone.bind_s(DN_DM, PW_DM) # enable verbose logging assert topo.standalone.config.set('nsslapd-accesslog-maxlogsize', '100') assert topo.standalone.config.set('nsslapd-accesslog-logrotationtimeunit', 'day') assert topo.standalone.config.set('nsslapd-accesslog-level', '256') assert topo.standalone.config.set('nsslapd-errorlog-level', '8') topo.standalone.restart() for i in [i for i in users.list()]: i.delete() @disk_monitoring_ack def test_go_straight_below_half_of_the_threshold(topo, setup, reset_logs): """Go straight below 1/2 of the threshold Recovery and setup :id: a2a0664c-fe9e-11e8-b220-8c16451d917b :setup: Standalone :steps: 1. Go straight below 1/2 of the threshold 2. Verify that verbose logging was set to default level 3. Verify that logging is disabled 4. Verify DS is in shutdown mode 5. Verify DS has recovered from shutdown :expectedresults: 1. Should Success 2. Should Success 3. Should Success 4. Should Success 5. Should Success """ assert topo.standalone.config.set('nsslapd-disk-monitoring', 'on') assert topo.standalone.config.set('nsslapd-disk-monitoring-logging-critical', 'off') assert topo.standalone.config.set('nsslapd-errorlog-level', '8') topo.standalone.restart() if float(THRESHOLD) > FULL_THR_FILL_SIZE: FULL_THR_FILL_SIZE_new = FULL_THR_FILL_SIZE + round(float(THRESHOLD) - FULL_THR_FILL_SIZE) + 1 subprocess.call(['dd', 'if=/dev/zero', 'of={}/foo'.format(topo.standalone.ds_paths.log_dir), 'bs=1M', 'count={}'.format(FULL_THR_FILL_SIZE_new)]) else: subprocess.call(['dd', 'if=/dev/zero', 'of={}/foo'.format(topo.standalone.ds_paths.log_dir), 'bs=1M', 'count={}'.format(FULL_THR_FILL_SIZE)]) _witherrorlog(topo, 'temporarily setting error loglevel to the default level', 11) # Verify that verbose logging was set to default level assert LOG_DEFAULT == int(re.findall(r'nsslapd-errorlog-level: \d+', str(topo.standalone.search_s('cn=config', ldap.SCOPE_SUBTREE, '(objectclass=*)', ['nsslapd-errorlog-level'])) )[0].split(' ')[1]) # Verify that logging is disabled _withouterrorlog(topo, "topo.standalone.config.get_attr_val_utf8('nsslapd-accesslog-logging-enabled') != 'off'", 11) # Verify that rotated logs were removed _witherrorlog(topo, 'disabling access and audit logging', 2) _witherrorlog(topo, 'deleting rotated logs', 11) with open(topo.standalone.errlog, 'r') as study:study = study.read() assert 'Unable to remove file:' not in study # Verify DS is in shutdown mode _withouterrorlog(topo, 'topo.standalone.status() != False', 90) _witherrorlog(topo, 'is too far below the threshold', 2) # Verify DS has recovered from shutdown os.remove('{}/foo'.format(topo.standalone.ds_paths.log_dir)) open('{}/errors'.format(topo.standalone.ds_paths.log_dir), 'w').close() topo.standalone.start() _withouterrorlog(topo, "topo.standalone.config.get_attr_val_utf8('nsslapd-accesslog-logging-enabled') != 'on'", 20) with open(topo.standalone.errlog, 'r') as study: study = study.read() assert 'disabling access and audit logging' not in study @disk_monitoring_ack def test_readonly_on_threshold(topo, setup, reset_logs): """Verify that nsslapd-disk-monitoring-readonly-on-threshold switches the server to read-only mode :id: 06814c19-ef3c-4800-93c9-c7c6e76fcbb9 :customerscenario: True :setup: Standalone :steps: 1. Verify that the backend is in read-only mode 2. Go back above the threshold 3. Verify that the backend is in read-write mode :expectedresults: 1. Should Success 2. Should Success 3. Should Success """ file_path = '{}/foo'.format(topo.standalone.ds_paths.log_dir) backends = Backends(topo.standalone) backend_name = backends.list()[0].rdn # Verify that verbose logging was set to default level topo.standalone.deleteErrorLogs() assert topo.standalone.config.set('nsslapd-disk-monitoring', 'on') assert topo.standalone.config.set('nsslapd-disk-monitoring-readonly-on-threshold', 'on') topo.standalone.restart() try: subprocess.call(['dd', 'if=/dev/zero', f'of={file_path}', 'bs=1M', f'count={HALF_THR_FILL_SIZE}']) _witherrorlog(topo, f"Putting the backend '{backend_name}' to read-only mode", 11) users = UserAccounts(topo.standalone, DEFAULT_SUFFIX) try: user = users.create_test_user() user.delete() except ldap.UNWILLING_TO_PERFORM as e: if 'database is read-only' not in str(e): raise os.remove(file_path) _witherrorlog(topo, f"Putting the backend '{backend_name}' back to read-write mode", 11) user = users.create_test_user() assert user.exists() user.delete() finally: if os.path.exists(file_path): os.remove(file_path) @disk_monitoring_ack def test_readonly_on_threshold_below_half_of_the_threshold(topo, setup, reset_logs): """Go below 1/2 of the threshold when readonly on threshold is enabled :id: 10262663-b41f-420e-a2d0-9532dd54fa7c :customerscenario: True :setup: Standalone :steps: :expectedresults: 1. Go straight below 1/2 of the threshold 2. Verify that the backend is in read-only mode 3. Go back above the threshold 4. Verify that the backend is in read-write mode :expectedresults: 1. Should Success 2. Should Success 3. Should Success 4. Should Success """ file_path = '{}/foo'.format(topo.standalone.ds_paths.log_dir) backends = Backends(topo.standalone) backend_name = backends.list()[0].rdn topo.standalone.deleteErrorLogs() assert topo.standalone.config.set('nsslapd-disk-monitoring', 'on') assert topo.standalone.config.set('nsslapd-disk-monitoring-readonly-on-threshold', 'on') topo.standalone.restart() try: if float(THRESHOLD) > FULL_THR_FILL_SIZE: FULL_THR_FILL_SIZE_new = FULL_THR_FILL_SIZE + round(float(THRESHOLD) - FULL_THR_FILL_SIZE) + 1 subprocess.call(['dd', 'if=/dev/zero', f'of={file_path}', 'bs=1M', f'count={FULL_THR_FILL_SIZE_new}']) else: subprocess.call(['dd', 'if=/dev/zero', f'of={file_path}', 'bs=1M', f'count={FULL_THR_FILL_SIZE}']) _witherrorlog(topo, f"Putting the backend '{backend_name}' to read-only mode", 11) users = UserAccounts(topo.standalone, DEFAULT_SUFFIX) try: user = users.create_test_user() user.delete() except ldap.UNWILLING_TO_PERFORM as e: if 'database is read-only' not in str(e): raise _witherrorlog(topo, 'is too far below the threshold', 51) # Verify DS has recovered from shutdown os.remove(file_path) _witherrorlog(topo, f"Putting the backend '{backend_name}' back to read-write mode", 51) user = users.create_test_user() assert user.exists() user.delete() finally: if os.path.exists(file_path): os.remove(file_path) @disk_monitoring_ack def test_below_half_of_the_threshold_not_starting_after_shutdown(topo, setup, reset_logs): """Test that the instance won't start if we are below 1/2 of the threshold :id: cceeaefd-9fa4-45c5-9ac6-9887a0671ef8 :customerscenario: True :setup: Standalone :steps: 1. Go straight below 1/2 of the threshold 2. Try to start the instance 3. Go back above the threshold 4. Try to start the instance :expectedresults: 1. Should Success 2. Should Fail 3. Should Success 4. Should Success """ file_path = '{}/foo'.format(topo.standalone.ds_paths.log_dir) topo.standalone.deleteErrorLogs() assert topo.standalone.config.set('nsslapd-disk-monitoring', 'on') topo.standalone.restart() try: if float(THRESHOLD) > FULL_THR_FILL_SIZE: FULL_THR_FILL_SIZE_new = FULL_THR_FILL_SIZE + round(float(THRESHOLD) - FULL_THR_FILL_SIZE) + 1 subprocess.call(['dd', 'if=/dev/zero', f'of={file_path}', 'bs=1M', f'count={FULL_THR_FILL_SIZE_new}']) else: subprocess.call(['dd', 'if=/dev/zero', f'of={file_path}', 'bs=1M', f'count={FULL_THR_FILL_SIZE}']) _withouterrorlog(topo, 'topo.standalone.status() == True', 120) try: topo.standalone.start() except (ValueError, subprocess.CalledProcessError): topo.standalone.log.info("Instance start up has failed as expected") _witherrorlog(topo, f'is too far below the threshold({THRESHOLD_BYTES} bytes). Exiting now', 2) # Verify DS has recovered from shutdown os.remove(file_path) topo.standalone.start() finally: if os.path.exists(file_path): os.remove(file_path) @disk_monitoring_ack def test_go_straight_below_4kb(topo, setup, reset_logs): """Go straight below 4KB :id: a855115a-fe9e-11e8-8e91-8c16451d917b :setup: Standalone :steps: 1. Go straight below 4KB 2. Clean space :expectedresults: 1. Should Success 2. Should Success """ assert topo.standalone.config.set('nsslapd-disk-monitoring', 'on') topo.standalone.restart() subprocess.call(['dd', 'if=/dev/zero', 'of={}/foo'.format(topo.standalone.ds_paths.log_dir), 'bs=1M', 'count={}'.format(FULL_THR_FILL_SIZE)]) subprocess.call(['dd', 'if=/dev/zero', 'of={}/foo1'.format(topo.standalone.ds_paths.log_dir), 'bs=1M', 'count={}'.format(FULL_THR_FILL_SIZE)]) _withouterrorlog(topo, 'topo.standalone.status() != False', 11) os.remove('{}/foo'.format(topo.standalone.ds_paths.log_dir)) os.remove('{}/foo1'.format(topo.standalone.ds_paths.log_dir)) topo.standalone.start() assert topo.standalone.status() == True @disk_monitoring_ack @pytest.mark.bz982325 def test_threshold_to_overflow_value(topo, setup, reset_logs): """Overflow in nsslapd-disk-monitoring-threshold :id: ad60ab3c-fe9e-11e8-88dc-8c16451d917b :setup: Standalone :steps: 1. Setting nsslapd-disk-monitoring-threshold to overflow_value :expectedresults: 1. Should Success """ overflow_value = '3000000000' # Setting nsslapd-disk-monitoring-threshold to overflow_value assert topo.standalone.config.set('nsslapd-disk-monitoring-threshold', ensure_bytes(overflow_value)) assert overflow_value == re.findall(r'nsslapd-disk-monitoring-threshold: \d+', str( topo.standalone.search_s('cn=config', ldap.SCOPE_SUBTREE, '(objectclass=*)', ['nsslapd-disk-monitoring-threshold'])))[0].split(' ')[1] @disk_monitoring_ack @pytest.mark.bz970995 def test_threshold_is_reached_to_half(topo, setup, reset_logs): """RHDS not shutting down when disk monitoring threshold is reached to half. :id: b2d3665e-fe9e-11e8-b9c0-8c16451d917b :setup: Standalone :steps: Standalone 1. Verify that there is not endless loop of error messages :expectedresults: 1. Should Success """ assert topo.standalone.config.set('nsslapd-disk-monitoring', 'on') assert topo.standalone.config.set('nsslapd-disk-monitoring-logging-critical', 'on') assert topo.standalone.config.set('nsslapd-errorlog-level', '8') assert topo.standalone.config.set('nsslapd-disk-monitoring-threshold', ensure_bytes(THRESHOLD_BYTES)) topo.standalone.restart() subprocess.call(['dd', 'if=/dev/zero', 'of={}/foo'.format(topo.standalone.ds_paths.log_dir), 'bs=1M', 'count={}'.format(HALF_THR_FILL_SIZE)]) # Verify that there is not endless loop of error messages _witherrorlog(topo, "temporarily setting error loglevel to the default level", 10) with open(topo.standalone.errlog, 'r') as study:study = study.read() assert len(re.findall("temporarily setting error loglevel to the default level", study)) == 1 os.remove('{}/foo'.format(topo.standalone.ds_paths.log_dir)) @disk_monitoring_ack @pytest.mark.parametrize("test_input,expected", [ ("nsslapd-disk-monitoring-threshold", '-2'), ("nsslapd-disk-monitoring-threshold", '9223372036854775808'), ("nsslapd-disk-monitoring-threshold", '2047'), ("nsslapd-disk-monitoring-threshold", '0'), ("nsslapd-disk-monitoring-threshold", '-1294967296'), ("nsslapd-disk-monitoring-threshold", 'invalid'), ("nsslapd-disk-monitoring", 'invalid'), ("nsslapd-disk-monitoring", '1'), ("nsslapd-disk-monitoring-grace-period", '0'), ("nsslapd-disk-monitoring-grace-period", '525 948'), ("nsslapd-disk-monitoring-grace-period", '-1'), ("nsslapd-disk-monitoring-logging-critical", 'oninvalid'), ("nsslapd-disk-monitoring-grace-period", '-1'), ("nsslapd-disk-monitoring-grace-period", '0'), ]) def test_negagtive_parameterize(topo, setup, reset_logs, test_input, expected): """Verify that invalid operations are not permitted :id: b88efbf8-fe9e-11e8-8499-8c16451d917b :parametrized: yes :setup: Standalone :steps: 1. Verify that invalid operations are not permitted. :expectedresults: 1. Should not success. """ with pytest.raises(Exception): topo.standalone.config.set(test_input, ensure_bytes(expected)) @disk_monitoring_ack def test_valid_operations_are_permitted(topo, setup, reset_logs): """Verify that valid operations are permitted :id: bd4f83f6-fe9e-11e8-88f4-8c16451d917b :setup: Standalone :steps: 1. Verify that valid operations are permitted :expectedresults: 1. Should Success. """ assert topo.standalone.config.set('nsslapd-disk-monitoring', 'on') assert topo.standalone.config.set('nsslapd-disk-monitoring-logging-critical', 'on') assert topo.standalone.config.set('nsslapd-errorlog-level', '8') topo.standalone.restart() # Trying to delete nsslapd-disk-monitoring-threshold assert topo.standalone.modify_s('cn=config', [(ldap.MOD_DELETE, 'nsslapd-disk-monitoring-threshold', '')]) # Trying to add another value to nsslapd-disk-monitoring-threshold (check that it is not multivalued) topo.standalone.config.add('nsslapd-disk-monitoring-threshold', '2000001') # Trying to delete nsslapd-disk-monitoring assert topo.standalone.modify_s('cn=config', [(ldap.MOD_DELETE, 'nsslapd-disk-monitoring', ensure_bytes(str( topo.standalone.search_s('cn=config', ldap.SCOPE_SUBTREE, '(objectclass=*)', ['nsslapd-disk-monitoring'])[ 0]).split(' ')[2].split('\n\n')[0]))]) # Trying to add another value to nsslapd-disk-monitoring topo.standalone.config.add('nsslapd-disk-monitoring', 'off') # Trying to delete nsslapd-disk-monitoring-grace-period assert topo.standalone.modify_s('cn=config', [(ldap.MOD_DELETE, 'nsslapd-disk-monitoring-grace-period', '')]) # Trying to add another value to nsslapd-disk-monitoring-grace-period topo.standalone.config.add('nsslapd-disk-monitoring-grace-period', '61') # Trying to delete nsslapd-disk-monitoring-logging-critical assert topo.standalone.modify_s('cn=config', [(ldap.MOD_DELETE, 'nsslapd-disk-monitoring-logging-critical', ensure_bytes(str( topo.standalone.search_s('cn=config', ldap.SCOPE_SUBTREE, '(objectclass=*)', [ 'nsslapd-disk-monitoring-logging-critical'])[ 0]).split(' ')[2].split('\n\n')[0]))]) # Trying to add another value to nsslapd-disk-monitoring-logging-critical assert topo.standalone.config.set('nsslapd-disk-monitoring-logging-critical', 'on') if __name__ == '__main__': CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s -v %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/disk_monitoring/disk_space_test.py000066400000000000000000000027701421664411400312730ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2019 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- import pytest from lib389.monitor import MonitorDiskSpace from lib389.topologies import topology_st as topo pytestmark = pytest.mark.tier2 def test_basic(topo): """Test that the cn=disk space,cn=monitor gives at least one value :id: f1962762-2c6c-4e50-97af-a00012a7486d :setup: Standalone :steps: 1. Get cn=disk space,cn=monitor entry 2. Check it has at least one dsDisk attribute 3. Check dsDisk attribute has the partition and sizes 4. Check the numbers are valid integers :expectedresults: 1. It should succeed 2. It should succeed 3. It should succeed 4. It should succeed """ inst = topo.standalone # Turn off disk monitoring disk_space_mon = MonitorDiskSpace(inst) disk_str = disk_space_mon.get_disks()[0] inst.log.info('Check that "partition", "size", "used", "available", "use%" words are present in the string') words = ["partition", "size", "used", "available", "use%"] assert all(map(lambda word: word in disk_str, words)) inst.log.info("Check that the sizes are numbers") for word in words[1:]: number = disk_str.split(f'{word}="')[1].split('"')[0] try: int(number) except ValueError: raise ValueError(f'A "{word}" value is not a number') 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/ds_logs/000077500000000000000000000000001421664411400240025ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/ds_logs/__init__.py000066400000000000000000000000741421664411400261140ustar00rootroot00000000000000""" :Requirement: 389-ds-base: Directory Server Logs """ 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py000066400000000000000000001431001421664411400270440ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2020 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # from decimal import * import os import logging import pytest import subprocess from lib389._mapped_object import DSLdapObject from lib389.topologies import topology_st from lib389.plugins import AutoMembershipPlugin, ReferentialIntegrityPlugin, AutoMembershipDefinitions from lib389.idm.user import UserAccounts from lib389.idm.group import Groups from lib389.idm.organizationalunit import OrganizationalUnits from lib389._constants import DEFAULT_SUFFIX, LOG_ACCESS_LEVEL, PASSWORD from lib389.utils import ds_is_older, ds_is_newer from lib389.config import RSA import ldap import glob import re pytestmark = pytest.mark.tier1 logging.getLogger(__name__).setLevel(logging.DEBUG) log = logging.getLogger(__name__) PLUGIN_TIMESTAMP = 'nsslapd-logging-hr-timestamps-enabled' PLUGIN_LOGGING = 'nsslapd-plugin-logging' USER1_DN = 'uid=user1,' + DEFAULT_SUFFIX def add_users(topology_st, users_num): users = UserAccounts(topology_st, DEFAULT_SUFFIX) log.info('Adding %d users' % users_num) for i in range(0, users_num): uid = 1000 + i users.create(properties={ 'uid': 'testuser%d' % uid, 'cn': 'testuser%d' % uid, 'sn': 'user', 'uidNumber': '%d' % uid, 'gidNumber': '%d' % uid, 'homeDirectory': '/home/testuser%d' % uid }) def search_users(topology_st): users = UserAccounts(topology_st, DEFAULT_SUFFIX) entries = users.list() # We just assert we got some data ... assert len(entries) > 0 def delete_obj(obj): if obj.exists(): obj.delete() def add_group_and_perform_user_operations(topology_st): topo = topology_st.standalone # Add the automember group groups = Groups(topo, DEFAULT_SUFFIX) group = groups.create(properties={'cn': 'group'}) ous = OrganizationalUnits(topo, DEFAULT_SUFFIX) branch1 = ous.create(properties={'ou': 'branch1'}) # Add the automember config entry am_configs = AutoMembershipDefinitions(topo) am_config = am_configs.create(properties={'cn': 'config', 'autoMemberScope': branch1.dn, 'autoMemberFilter': 'objectclass=top', 'autoMemberDefaultGroup': group.dn, 'autoMemberGroupingAttr': 'member:dn'}) # Add a user that should get added to the group users = UserAccounts(topo, DEFAULT_SUFFIX, rdn='ou={}'.format(branch1.rdn)) test_user = users.create_test_user(uid=777) # Check if created user is group member assert test_user.dn in group.list_members() log.info('Renaming user') test_user.rename('uid=new_test_user_777', newsuperior=DEFAULT_SUFFIX) log.info('Delete the user') delete_obj(test_user) log.info('Delete automember entry, org. unit and group for the next test') delete_obj(am_config) delete_obj(branch1) delete_obj(group) @pytest.fixture(scope="module") def enable_plugins(topology_st): topo = topology_st.standalone log.info("Enable automember plugin") plugin = AutoMembershipPlugin(topo) plugin.enable() log.info('Enable Referential Integrity plugin') plugin = ReferentialIntegrityPlugin(topo) plugin.enable() log.info('Set nsslapd-plugin-logging to on') topo.config.set(PLUGIN_LOGGING, 'ON') log.info('Restart the server') topo.restart() def add_user_log_level(topology_st, loglevel, request): topo = topology_st.standalone default_log_level = topo.config.get_attr_val_utf8(LOG_ACCESS_LEVEL) log.info(f'Configure access log level to {loglevel}') topo.config.set(LOG_ACCESS_LEVEL, str(loglevel)) add_group_and_perform_user_operations(topology_st) def fin(): topo.config.set(LOG_ACCESS_LEVEL, default_log_level) log.info('Delete the previous access logs for the next test') topo.deleteAccessLogs() request.addfinalizer(fin) @pytest.fixture(scope="function") def add_user_log_level_260(topology_st, enable_plugins, request): access_log_level = 4 + 256 add_user_log_level(topology_st, access_log_level, request) @pytest.fixture(scope="function") def add_user_log_level_516(topology_st, enable_plugins, request): access_log_level = 4 + 512 add_user_log_level(topology_st, access_log_level, request) @pytest.fixture(scope="function") def add_user_log_level_131076(topology_st, enable_plugins, request): access_log_level = 4 + 131072 add_user_log_level(topology_st, access_log_level, request) @pytest.fixture(scope="function") def clean_access_logs(topology_st, request): def _clean_access_logs(): topo = topology_st.standalone log.info("Stopping the instance") topo.stop() log.info("Deleting the access logs") topo.deleteAccessLogs() log.info("Starting the instance") topo.start() request.addfinalizer(_clean_access_logs) return clean_access_logs @pytest.fixture(scope="function") def remove_users(topology_st, request): def _remove_users(): topo = topology_st.standalone users = UserAccounts(topo, DEFAULT_SUFFIX) entries = users.list() assert len(entries) > 0 log.info("Removing all added users") for entry in entries: delete_obj(entry) request.addfinalizer(_remove_users) def set_audit_log_config_values(topology_st, request, enabled, logsize): topo = topology_st.standalone topo.config.set('nsslapd-auditlog-logging-enabled', enabled) topo.config.set('nsslapd-auditlog-maxlogsize', logsize) def fin(): topo.start() log.info('Setting audit log config back to default values') topo.config.set('nsslapd-auditlog-logging-enabled', 'off') topo.config.set('nsslapd-auditlog-maxlogsize', '100') request.addfinalizer(fin) @pytest.fixture(scope="function") def set_audit_log_config_values_to_rotate(topology_st, request): set_audit_log_config_values(topology_st, request, 'on', '1') @pytest.fixture(scope="function") def disable_access_log_buffering(topology_st, request): log.info('Disable access log buffering') topology_st.standalone.config.set('nsslapd-accesslog-logbuffering', 'off') def fin(): log.info('Enable access log buffering') topology_st.standalone.config.set('nsslapd-accesslog-logbuffering', 'on') request.addfinalizer(fin) return disable_access_log_buffering @pytest.mark.bz1273549 def test_check_default(topology_st): """Check the default value of nsslapd-logging-hr-timestamps-enabled, it should be ON :id: 2d15002e-9ed3-4796-b0bb-bf04e4e59bd3 :setup: Standalone instance :steps: 1. Fetch the value of nsslapd-logging-hr-timestamps-enabled attribute 2. Test that the attribute value should be "ON" by default :expectedresults: 1. Value should be fetched successfully 2. Value should be "ON" by default """ # Get the default value of nsslapd-logging-hr-timestamps-enabled attribute default = topology_st.standalone.config.get_attr_val_utf8(PLUGIN_TIMESTAMP) # Now check it should be ON by default assert default == "on" log.debug(default) @pytest.mark.bz1273549 def test_plugin_set_invalid(topology_st): """Try to set some invalid values for nsslapd-logging-hr-timestamps-enabled attribute :id: c60a68d2-703a-42bf-a5c2-4040736d511a :setup: Standalone instance :steps: 1. Set some "JUNK" value of nsslapd-logging-hr-timestamps-enabled attribute :expectedresults: 1. There should be an operation error """ log.info('test_plugin_set_invalid - Expect to fail with junk value') with pytest.raises(ldap.OPERATIONS_ERROR): topology_st.standalone.config.set(PLUGIN_TIMESTAMP, 'JUNK') @pytest.mark.bz1273549 def test_log_plugin_on(topology_st, remove_users): """Check access logs for millisecond, when nsslapd-logging-hr-timestamps-enabled=ON :id: 65ae4e2a-295f-4222-8d69-12124bc7a872 :setup: Standalone instance :steps: 1. To generate big logs, add 100 test users 2. Search users to generate more access logs 3. Restart server 4. Parse the logs to check the milliseconds got recorded in logs :expectedresults: 1. Add operation should be successful 2. Search operation should be successful 3. Server should be restarted successfully 4. There should be milliseconds added in the access logs """ log.info('Bug 1273549 - Check access logs for millisecond, when attribute is ON') log.info('perform any ldap operation, which will trigger the logs') add_users(topology_st.standalone, 10) search_users(topology_st.standalone) log.info('Restart the server to flush the logs') topology_st.standalone.restart(timeout=10) log.info('parse the access logs') access_log_lines = topology_st.standalone.ds_access_log.readlines() assert len(access_log_lines) > 0 assert topology_st.standalone.ds_access_log.match(r'^\[.+\d{9}.+\].+') @pytest.mark.bz1273549 def test_log_plugin_off(topology_st, remove_users): """Milliseconds should be absent from access logs when nsslapd-logging-hr-timestamps-enabled=OFF :id: b3400e46-d940-4574-b399-e3f4b49bc4b5 :setup: Standalone instance :steps: 1. Set nsslapd-logging-hr-timestamps-enabled=OFF 2. Restart the server 3. Delete old access logs 4. Do search operations to generate fresh access logs 5. Restart the server 6. Check access logs :expectedresults: 1. Attribute nsslapd-logging-hr-timestamps-enabled should be set to "OFF" 2. Server should restart 3. Access logs should be deleted 4. Search operation should PASS 5. Server should restart 6. There should not be any milliseconds added in the access logs """ log.info('Bug 1273549 - Check access logs for missing millisecond, when attribute is OFF') log.info('test_log_plugin_off - set the configuration attribute to OFF') topology_st.standalone.config.set(PLUGIN_TIMESTAMP, 'OFF') log.info('Restart the server to flush the logs') topology_st.standalone.restart(timeout=10) log.info('test_log_plugin_off - delete the previous access logs') topology_st.standalone.deleteAccessLogs() # Now generate some fresh logs add_users(topology_st.standalone, 10) search_users(topology_st.standalone) log.info('Restart the server to flush the logs') topology_st.standalone.restart(timeout=10) log.info('check access log that microseconds are not present') access_log_lines = topology_st.standalone.ds_access_log.readlines() assert len(access_log_lines) > 0 assert not topology_st.standalone.ds_access_log.match(r'^\[.+\d{9}.+\].+') @pytest.mark.xfail(ds_is_older('1.4.0'), reason="May fail on 1.3.x because of bug 1358706") @pytest.mark.bz1358706 @pytest.mark.ds49029 def test_internal_log_server_level_0(topology_st, clean_access_logs, disable_access_log_buffering): """Tests server-initiated internal operations :id: 798d06fe-92e8-4648-af66-21349c20638e :setup: Standalone instance :steps: 1. Set nsslapd-plugin-logging to on 2. Configure access log level to only 0 3. Check the access logs. :expectedresults: 1. Operation should be successful 2. Operation should be successful 3. Access log should not contain internal operations log formats """ topo = topology_st.standalone default_log_level = topo.config.get_attr_val_utf8(LOG_ACCESS_LEVEL) log.info('Set nsslapd-plugin-logging to on') topo.config.set(PLUGIN_LOGGING, 'ON') log.info('Configure access log level to 0') access_log_level = '0' topo.config.set(LOG_ACCESS_LEVEL, access_log_level) log.info('Restart the server to flush the logs') topo.restart() # These comments contain lines we are trying to find without regex (the op numbers are just examples) log.info("Check if access log does not contain internal log of MOD operation") # (Internal) op=2(2)(1) SRCH base="cn=config assert not topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) SRCH base="cn=config.*') # (Internal) op=2(2)(1) RESULT err=0 tag=48 nentries=1 assert not topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) RESULT err=0 tag=48 nentries=1.*') log.info("Check if the other internal operations are not present") # conn=Internal(0) op=0 assert not topo.ds_access_log.match(r'.*conn=Internal\([0-9]+\) op=[0-9]+\([0-9]+\)\([0-9]+\).*') topo.config.set(LOG_ACCESS_LEVEL, default_log_level) @pytest.mark.xfail(ds_is_older('1.4.0'), reason="May fail on 1.3.x because of bug 1358706") @pytest.mark.bz1358706 @pytest.mark.ds49029 def test_internal_log_server_level_4(topology_st, clean_access_logs, disable_access_log_buffering): """Tests server-initiated internal operations :id: a3500e47-d941-4575-b399-e3f4b49bc4b6 :setup: Standalone instance :steps: 1. Set nsslapd-plugin-logging to on 2. Configure access log level to only 4 3. Check the access logs, it should contain info about MOD operation of cn=config and other internal operations should have the conn field set to Internal and all values inside parenthesis set to 0. :expectedresults: 1. Operation should be successful 2. Operation should be successful 3. Access log should contain correct internal log formats with cn=config modification: "(Internal) op=2(1)(1)" "conn=Internal(0)" """ topo = topology_st.standalone default_log_level = topo.config.get_attr_val_utf8(LOG_ACCESS_LEVEL) log.info('Set nsslapd-plugin-logging to on') topo.config.set(PLUGIN_LOGGING, 'ON') log.info('Configure access log level to 4') access_log_level = '4' topo.config.set(LOG_ACCESS_LEVEL, access_log_level) log.info('Restart the server to flush the logs') topo.restart() try: # These comments contain lines we are trying to find without regex (the op numbers are just examples) log.info("Check if access log contains internal MOD operation in correct format") # (Internal) op=2(2)(1) SRCH base="cn=config assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) SRCH base="cn=config.*') # (Internal) op=2(2)(1) RESULT err=0 tag=48 nentries= assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) RESULT err=0 tag=48 nentries=.*') log.info("Check if the other internal operations have the correct format") # conn=Internal(0) op=0 assert topo.ds_access_log.match(r'.*conn=Internal\([0-9]+\) op=[0-9]+\([0-9]+\)\([0-9]+\).*') finally: topo.config.set(LOG_ACCESS_LEVEL, default_log_level) @pytest.mark.xfail(ds_is_older('1.4.0'), reason="May fail on 1.3.x because of bug 1358706") @pytest.mark.bz1358706 @pytest.mark.ds49029 def test_internal_log_level_260(topology_st, add_user_log_level_260, disable_access_log_buffering): """Tests client initiated operations when automember plugin is enabled :id: e68a303e-c037-42b2-a5a0-fbea27c338a9 :setup: Standalone instance with internal operation logging on and nsslapd-plugin-logging to on :steps: 1. Configure access log level to 260 (4 + 256) 2. Set nsslapd-plugin-logging to on 3. Enable Referential Integrity and automember plugins 4. Restart the server 5. Add a test group 6. Add a test user and add it as member of the test group 7. Rename the test user 8. Delete the test user 9. Check the access logs for nested internal operation logs :expectedresults: 1. Operation should be successful 2. Operation should be successful 3. Operation should be successful 4. Operation should be successful 5. Operation should be successful 6. Operation should be successful 7. Operation should be successful 8. Operation should be successful 9. Access log should contain internal info about operations of the user """ topo = topology_st.standalone log.info('Restart the server to flush the logs') topo.restart() # These comments contain lines we are trying to find without regex (the op numbers are just examples) log.info("Check the access logs for ADD operation of the user") # op=10 ADD dn="uid=test_user_777,ou=topology_st, branch1,dc=example,dc=com" assert topo.ds_access_log.match(r'.*op=[0-9]+ ADD dn="uid=test_user_777,ou=branch1,dc=example,dc=com".*') # (Internal) op=10(1)(1) MOD dn="cn=group,ou=Groups,dc=example,dc=com" assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) ' r'MOD dn="cn=group,ou=Groups,dc=example,dc=com".*') # (Internal) op=10(1)(2) SRCH base="cn=group,ou=Groups,dc=example,dc=com" assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) SRCH base="cn=group,' r'ou=Groups,dc=example,dc=com".*') # (Internal) op=10(1)(2) RESULT err=0 tag=48 nentries=1 assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) RESULT err=0 tag=48 nentries=1*') # (Internal) op=10(1)(1) RESULT err=0 tag=48 assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) RESULT err=0 tag=48.*') # op=10 RESULT err=0 tag=105 assert topo.ds_access_log.match(r'.*op=[0-9]+ RESULT err=0 tag=105.*') log.info("Check the access logs for MOD operation of the user") # op=12 MODRDN dn="uid=test_user_777,ou=branch1,dc=example,dc=com" ' # 'newrdn="uid=new_test_user_777" newsuperior="dc=example,dc=com" assert topo.ds_access_log.match(r'.*op=[0-9]+ MODRDN dn="uid=test_user_777,ou=branch1,dc=example,dc=com" ' 'newrdn="uid=new_test_user_777" newsuperior="dc=example,dc=com".*') if ds_is_older(('1.4.3.9', '1.4.4.3')): # (Internal) op=12(1)(1) SRCH base="uid=test_user_777, ou=branch1,dc=example,dc=com" assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) SRCH base="uid=test_user_777,' 'ou=branch1,dc=example,dc=com".*') # (Internal) op=12(1)(1) RESULT err=0 tag=48 nentries=1 assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) RESULT err=0 tag=48 nentries=1.*') # op=12 RESULT err=0 tag=109 assert topo.ds_access_log.match(r'.*op=[0-9]+ RESULT err=0 tag=109.*') log.info("Check the access logs for DEL operation of the user") # op=15 DEL dn="uid=new_test_user_777,dc=example,dc=com" assert topo.ds_access_log.match(r'.*op=[0-9]+ DEL dn="uid=new_test_user_777,dc=example,dc=com".*') if ds_is_older(('1.4.3.9', '1.4.4.3')): # (Internal) op=15(1)(1) SRCH base="uid=new_test_user_777, dc=example,dc=com" assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) SRCH base="uid=new_test_user_777,' 'dc=example,dc=com".*') # (Internal) op=15(1)(1) RESULT err=0 tag=48 nentries=1 assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) RESULT err=0 tag=48 nentries=1.*') # op=15 RESULT err=0 tag=107 assert topo.ds_access_log.match(r'.*op=[0-9]+ RESULT err=0 tag=107.*') log.info("Check if the other internal operations have the correct format") # conn=Internal(0) op=0 assert topo.ds_access_log.match(r'.*conn=Internal\([0-9]+\) op=[0-9]+\([0-9]+\)\([0-9]+\).*') @pytest.mark.xfail(ds_is_older('1.4.0'), reason="May fail on 1.3.x because of bug 1358706") @pytest.mark.bz1358706 @pytest.mark.ds49029 def test_internal_log_level_131076(topology_st, add_user_log_level_131076, disable_access_log_buffering): """Tests client-initiated operations while referential integrity plugin is enabled :id: 44836ac9-dabd-4a8c-abd5-ecd7c2509739 :setup: Standalone instance Configure access log level to - 131072 + 4 Set nsslapd-plugin-logging to on :steps: 1. Configure access log level to 131076 2. Set nsslapd-plugin-logging to on 3. Enable Referential Integrity and automember plugins 4. Restart the server 5. Add a test group 6. Add a test user and add it as member of the test group 7. Rename the test user 8. Delete the test user 9. Check the access logs for nested internal operation logs :expectedresults: 1. Operation should be successful 2. Operation should be successful 3. Operation should be successful 4. Operation should be successful 5. Operation should be successful 6. Operation should be successful 7. Operation should be successful 8. Operation should be successful 9. Access log should contain internal info about operations of the user """ topo = topology_st.standalone log.info('Restart the server to flush the logs') topo.restart() # These comments contain lines we are trying to find without regex (the op numbers are just examples) log.info("Check the access logs for ADD operation of the user") # op=10 ADD dn="uid=test_user_777,ou=branch1,dc=example,dc=com" assert not topo.ds_access_log.match(r'.*op=[0-9]+ ADD dn="uid=test_user_777,ou=branch1,dc=example,dc=com".*') # (Internal) op=10(1)(1) MOD dn="cn=group,ou=Groups,dc=example,dc=com" assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) ' r'MOD dn="cn=group,ou=Groups,dc=example,dc=com".*') # (Internal) op=10(1)(2) SRCH base="cn=group,ou=Groups,dc=example,dc=com" assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) ' r'SRCH base="cn=group,ou=Groups,dc=example,dc=com".*') # (Internal) op=10(1)(2) RESULT err=0 tag=48 nentries=1*') assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) RESULT err=0 tag=48 nentries=1*') # (Internal) op=10(1)(1) RESULT err=0 tag=48 assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) RESULT err=0 tag=48.*') # op=10 RESULT err=0 tag=105 assert not topo.ds_access_log.match(r'.*op=[0-9]+ RESULT err=0 tag=105.*') log.info("Check the access logs for MOD operation of the user") # op=12 MODRDN dn="uid=test_user_777,ou=branch1,dc=example,dc=com" ' # 'newrdn="uid=new_test_user_777" newsuperior="dc=example,dc=com" assert not topo.ds_access_log.match(r'.*op=[0-9]+ MODRDN dn="uid=test_user_777,ou=branch1,dc=example,dc=com" ' 'newrdn="uid=new_test_user_777" newsuperior="dc=example,dc=com".*') if ds_is_older(('1.4.3.9', '1.4.4.3')): # (Internal) op=12(1)(1) SRCH base="uid=test_user_777, ou=branch1,dc=example,dc=com" assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) SRCH base="uid=test_user_777,' 'ou=branch1,dc=example,dc=com".*') # (Internal) op=12(1)(1) RESULT err=0 tag=48 nentries=1 assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) RESULT err=0 tag=48 nentries=1.*') # op=12 RESULT err=0 tag=109 assert not topo.ds_access_log.match(r'.*op=[0-9]+ RESULT err=0 tag=109.*') log.info("Check the access logs for DEL operation of the user") # op=15 DEL dn="uid=new_test_user_777,dc=example,dc=com" assert not topo.ds_access_log.match(r'.*op=[0-9]+ DEL dn="uid=new_test_user_777,dc=example,dc=com".*') if ds_is_older(('1.4.3.9', '1.4.4.3')): # (Internal) op=15(1)(1) SRCH base="uid=new_test_user_777, dc=example,dc=com" assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) SRCH base="uid=new_test_user_777,' 'dc=example,dc=com".*') # (Internal) op=15(1)(1) RESULT err=0 tag=48 nentries=1 assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) RESULT err=0 tag=48 nentries=1.*') # op=15 RESULT err=0 tag=107 assert not topo.ds_access_log.match(r'.*op=[0-9]+ RESULT err=0 tag=107.*') log.info("Check if the other internal operations have the correct format") # conn=Internal(0) op=0 assert topo.ds_access_log.match(r'.*conn=Internal\([0-9]+\) op=[0-9]+\([0-9]+\)\([0-9]+\).*') @pytest.mark.xfail(ds_is_older('1.4.0'), reason="May fail on 1.3.x because of bug 1358706") @pytest.mark.bz1358706 @pytest.mark.ds49029 def test_internal_log_level_516(topology_st, add_user_log_level_516, disable_access_log_buffering): """Tests client initiated operations when referential integrity plugin is enabled :id: bee1d681-763d-4fa5-aca2-569cf93f8b71 :setup: Standalone instance Configure access log level to - 512+4 Set nsslapd-plugin-logging to on :steps: 1. Configure access log level to 516 2. Set nsslapd-plugin-logging to on 3. Enable Referential Integrity and automember plugins 4. Restart the server 5. Add a test group 6. Add a test user and add it as member of the test group 7. Rename the test user 8. Delete the test user 9. Check the access logs for nested internal operation logs :expectedresults: 1. Operation should be successful 2. Operation should be successful 3. Operation should be successful 4. Operation should be successful 5. Operation should be successful 6. Operation should be successful 7. Operation should be successful 8. Operation should be successful 9. Access log should contain internal info about operations of the user """ topo = topology_st.standalone log.info('Restart the server to flush the logs') topo.restart() # These comments contain lines we are trying to find without regex (the op numbers are just examples) log.info("Check the access logs for ADD operation of the user") # op=10 ADD dn="uid=test_user_777,ou=branch1,dc=example,dc=com" assert not topo.ds_access_log.match(r'.*op=[0-9]+ ADD dn="uid=test_user_777,ou=branch1,dc=example,dc=com".*') # (Internal) op=10(1)(1) MOD dn="cn=group,ou=Groups,dc=example,dc=com" assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) ' r'MOD dn="cn=group,ou=Groups,dc=example,dc=com".*') # (Internal) op=10(1)(2) SRCH base="cn=group,ou=Groups,dc=example,dc=com" assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) ' r'SRCH base="cn=group,ou=Groups,dc=example,dc=com".*') # (Internal) op=10(1)(2) ENTRY dn="cn=group,ou=Groups,dc=example,dc=com" assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) ' r'ENTRY dn="cn=group,ou=groups,dc=example,dc=com".*') # (Internal) op=10(1)(2) RESULT err=0 tag=48 nentries=1*') assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) RESULT err=0 tag=48 nentries=1*') # (Internal) op=10(1)(1) RESULT err=0 tag=48 assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) RESULT err=0 tag=48.*') log.info("Check the access logs for MOD operation of the user") # op=12 MODRDN dn="uid=test_user_777,ou=branch1,dc=example,dc=com" ' # 'newrdn="uid=new_test_user_777" newsuperior="dc=example,dc=com" assert not topo.ds_access_log.match(r'.*op=[0-9]+ MODRDN dn="uid=test_user_777,ou=branch1,dc=example,dc=com" ' 'newrdn="uid=new_test_user_777" newsuperior="dc=example,dc=com".*') if ds_is_older(('1.4.3.9', '1.4.4.3')): # Internal) op=12(1)(1) SRCH base="uid=test_user_777, ou=branch1,dc=example,dc=com" assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) SRCH base="uid=test_user_777,' 'ou=branch1,dc=example,dc=com".*') # (Internal) op=12(1)(1) ENTRY dn="uid=test_user_777, ou=branch1,dc=example,dc=com" assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) ENTRY dn="uid=test_user_777,' 'ou=branch1,dc=example,dc=com".*') # (Internal) op=12(1)(1) RESULT err=0 tag=48 nentries=1 assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) RESULT err=0 tag=48 nentries=1.*') # op=12 RESULT err=0 tag=48 assert not topo.ds_access_log.match(r'.*op=[0-9]+ RESULT err=0 tag=48.*') log.info("Check the access logs for DEL operation of the user") # op=15 DEL dn="uid=new_test_user_777,dc=example,dc=com" assert not topo.ds_access_log.match(r'.*op=[0-9]+ DEL dn="uid=new_test_user_777,dc=example,dc=com".*') if ds_is_older(('1.4.3.9', '1.4.4.3')): # (Internal) op=15(1)(1) SRCH base="uid=new_test_user_777, dc=example,dc=com" assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) SRCH base="uid=new_test_user_777,' 'dc=example,dc=com".*') # (Internal) op=15(1)(1) ENTRY dn="uid=new_test_user_777, dc=example,dc=com" assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) ENTRY dn="uid=new_test_user_777,' 'dc=example,dc=com".*') # (Internal) op=15(1)(1) RESULT err=0 tag=48 nentries=1 assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) RESULT err=0 tag=48 nentries=1.*') # op=15 RESULT err=0 tag=107 assert not topo.ds_access_log.match(r'.*op=[0-9]+ RESULT err=0 tag=107.*') log.info("Check if the other internal operations have the correct format") # conn=Internal(0) op=0 assert topo.ds_access_log.match(r'.*conn=Internal\([0-9]+\) op=[0-9]+\([0-9]+\)\([0-9]+\).*') @pytest.mark.skipif(ds_is_older('1.4.2.0'), reason="Not implemented") @pytest.mark.bz1358706 @pytest.mark.ds49232 def test_access_log_truncated_search_message(topology_st, clean_access_logs): """Tests that the access log message is properly truncated when the message is too long :id: 0a9af37d-3311-4a2f-ac0a-9a1c631aaf27 :setup: Standalone instance :steps: 1. Make a search with a 2048+ characters basedn, filter and attribute list 2. Check the access log has the message and it's truncated :expectedresults: 1. Operation should be successful 2. Access log should contain truncated basedn, filter and attribute list """ topo = topology_st.standalone large_str_base = "".join("cn=test," for _ in range(512)) large_str_filter = "".join("(cn=test)" for _ in range(512)) users = UserAccounts(topo, f'{large_str_base}dc=ending') users._list_attrlist = [f'cn{i}' for i in range(512)] log.info("Make a search") users.filter(f'(|(objectclass=tester){large_str_filter}(cn=ending))') log.info('Restart the server to flush the logs') topo.restart() assert topo.ds_access_log.match(r'.*cn=test,cn=test,.*') assert topo.ds_access_log.match(r'.*objectClass=tester.*') assert topo.ds_access_log.match(r'.*cn10.*') assert not topo.ds_access_log.match(r'.*dc=ending.*') assert not topo.ds_access_log.match(r'.*cn=ending.*') assert not topo.ds_access_log.match(r'.*cn500.*') @pytest.mark.skipif(ds_is_newer("1.4.3"), reason="rsearch was removed") @pytest.mark.xfail(ds_is_older('1.4.2.0'), reason="May fail because of bug 1732053") @pytest.mark.bz1732053 @pytest.mark.ds50510 def test_etime_at_border_of_second(topology_st, clean_access_logs): topo = topology_st.standalone prog = os.path.join(topo.ds_paths.bin_dir, 'rsearch') cmd = [prog] # base search cmd.extend(['-s', DN_CONFIG]) # scope of the search cmd.extend(['-S', '0']) # host / port cmd.extend(['-h', HOST_STANDALONE]) cmd.extend(['-p', str(PORT_STANDALONE)]) # bound as DM to make it faster cmd.extend(['-D', DN_DM]) cmd.extend(['-w', PASSWORD]) # filter cmd.extend(['-f', "(cn=config)"]) # 2 samples SRCH cmd.extend(['-C', "2"]) output = subprocess.check_output(cmd) topo.stop() # No etime with 0.199xxx (everything should be few ms) invalid_etime = topo.ds_access_log.match(r'.*etime=0\.19.*') if invalid_etime: for i in range(len(invalid_etime)): log.error('It remains invalid or weird etime: %s' % invalid_etime[i]) assert not invalid_etime @pytest.mark.skipif(ds_is_older('1.3.10.1', '1.4.1'), reason="Fail because of bug 1749236") @pytest.mark.bz1749236 def test_etime_order_of_magnitude(topology_st, clean_access_logs, remove_users, disable_access_log_buffering): """Test that the etime reported in the access log has a correct order of magnitude :id: e815cfa0-8136-4932-b50f-c3dfac34b0e6 :setup: Standalone instance :steps: 1. Unset log buffering for the access log 2. Delete potential existing access logs 3. Add users 4. Search users 5. Restart the server to flush the logs 6. Parse the access log looking for the SRCH operation log 7. From the SRCH string get the start time and op number of the operation 8. From the op num find the associated RESULT string in the access log 9. From the RESULT string get the end time and the etime for the operation 10. Calculate the ratio between the calculated elapsed time (end time - start time) and the logged etime :expectedresults: 1. access log buffering is off 2. Previously existing access logs are deleted 3. Users are successfully added 4. Search operation is successful 5. Server is restarted and logs are flushed 6. SRCH operation log string is catched 7. start time and op number are collected 8. RESULT string is catched from the access log 9. end time and etime are collected 10. ratio between calculated elapsed time and logged etime is less or equal to 1 """ DSLdapObject(topology_st.standalone, DEFAULT_SUFFIX) log.info('add_users') add_users(topology_st.standalone, 30) log.info ('search users') search_users(topology_st.standalone) log.info('parse the access logs to get the SRCH string') # Here we are looking at the whole string logged for the search request with base ou=People,dc=example,dc=com search_str = str(topology_st.standalone.ds_access_log.match(r'.*SRCH base="ou=People,dc=example,dc=com.*'))[1:-1] assert len(search_str) > 0 # the search_str returned looks like : # [23/Apr/2020:06:06:14.360857624 -0400] conn=1 op=93 SRCH base="ou=People,dc=example,dc=com" scope=2 filter="(&(objectClass=account)(objectClass=posixaccount)(objectClass=inetOrgPerson)(objectClass=organizationalPerson))" attrs="distinguishedName" log.info('get the operation start time from the SRCH string') # Here we are getting the sec.nanosec part of the date, '14.360857624' in the example above start_time = (search_str.split()[0]).split(':')[3] log.info('get the OP number from the SRCH string') # Here we are getting the op number, 'op=93' in the above example op_num = search_str.split()[3] log.info('get the RESULT string matching the SRCH OP number') # Here we are looking at the RESULT string for the above search op, 'op=93' in this example result_str = str(topology_st.standalone.ds_access_log.match(r'.*{} RESULT*'.format(op_num)))[1:-1] assert len(result_str) > 0 # The result_str returned looks like : # For ds older than 1.4.3.8: [23/Apr/2020:06:06:14.366429900 -0400] conn=1 op=93 RESULT err=0 tag=101 nentries=30 etime=0.005723017 # For ds newer than 1.4.3.8: [21/Oct/2020:09:27:50.095209871 -0400] conn=1 op=96 RESULT err=0 tag=101 nentries=30 wtime=0.000412584 optime=0.005428971 etime=0.005836077 log.info('get the operation end time from the RESULT string') # Here we are getting the sec.nanosec part of the date, '14.366429900' in the above example end_time = (result_str.split()[0]).split(':')[3] log.info('get the logged etime for the operation from the RESULT string') # Here we are getting the etime value, '0.005723017' in the example above if ds_is_older('1.4.3.8'): etime = result_str.split()[8].split('=')[1][:-3] else: etime = result_str.split()[10].split('=')[1][:-3] log.info('Calculate the ratio between logged etime for the operation and elapsed time from its start time to its end time - should be around 1') etime_ratio = (Decimal(end_time) - Decimal(start_time)) // Decimal(etime) assert etime_ratio <= 1 @pytest.mark.skipif(ds_is_older('1.4.3.8'), reason="Fail because of bug 1850275") @pytest.mark.bz1850275 @pytest.mark.bz1924848 def test_optime_and_wtime_keywords(topology_st, clean_access_logs, remove_users, disable_access_log_buffering): """Test that the new optime and wtime keywords are present in the access log and have correct values :id: dfb4a49d-1cfc-400e-ba43-c107f58d62cf :customerscenario: True :setup: Standalone instance :steps: 1. Unset log buffering for the access log 2. Delete potential existing access logs 3. Add users 4. Search users 5. Parse the access log looking for the SRCH operation log 6. From the SRCH string get the op number of the operation 7. From the op num find the associated RESULT string in the access log 8. Search for the wtime optime keywords in the RESULT string 9. From the RESULT string get the wtime, optime and etime values for the operation 10. Check that optime + wtime is approximatively etime :expectedresults: 1. access log buffering is off 2. Previously existing access logs are deleted 3. Users are successfully added 4. Search operation is successful 5. SRCH operation log string is catched 6. op number is collected 7. RESULT string is catched from the access log 8. wtime and optime keywords are collected 9. wtime, optime and etime values are collected 10. (optime + wtime) =~ etime """ log.info('add_users') add_users(topology_st.standalone, 30) log.info ('search users') search_users(topology_st.standalone) log.info('parse the access logs to get the SRCH string') # Here we are looking at the whole string logged for the search request with base ou=People,dc=example,dc=com search_str = str(topology_st.standalone.ds_access_log.match(r'.*SRCH base="ou=People,dc=example,dc=com.*'))[1:-1] assert len(search_str) > 0 # the search_str returned looks like : # [22/Oct/2020:09:47:11.951316798 -0400] conn=1 op=96 SRCH base="ou=People,dc=example,dc=com" scope=2 filter="(&(objectClass=account)(objectClass=posixaccount)(objectClass=inetOrgPerson)(objectClass=organizationalPerson))" attrs="distinguishedName" log.info('get the OP number from the SRCH string') # Here we are getting the op number, 'op=96' in the above example op_num = search_str.split()[3] log.info('get the RESULT string matching the SRCH op number') # Here we are looking at the RESULT string for the above search op, 'op=96' in this example result_str = str(topology_st.standalone.ds_access_log.match(r'.*{} RESULT*'.format(op_num)))[1:-1] assert len(result_str) > 0 # The result_str returned looks like : # [22/Oct/2020:09:47:11.963276018 -0400] conn=1 op=96 RESULT err=0 tag=101 nentries=30 wtime=0.000180294 optime=0.011966632 etime=0.012141311 log.info('Search for the wtime keyword in the RESULT string') assert re.search('wtime', result_str) log.info('get the wtime value from the RESULT string') wtime_value = result_str.split()[8].split('=')[1][:-3] log.info('Search for the optime keyword in the RESULT string') assert re.search('optime', result_str) log.info('get the optime value from the RESULT string') optime_value = result_str.split()[9].split('=')[1][:-3] log.info('get the etime value from the RESULT string') etime_value = result_str.split()[10].split('=')[1][:-3] log.info('Check that (wtime + optime) is approximately equal to etime i.e. their ratio is 1') etime_ratio = (Decimal(wtime_value) + Decimal(optime_value)) // Decimal(etime_value) assert etime_ratio == 1 log.info('Perform a compare operation') topology_st.standalone.compare_s('uid=testuser1000,ou=people,dc=example,dc=com','uid', 'testuser1000') ops = topology_st.standalone.ds_access_log.match('.*CMP dn="uid=testuser1000,ou=people,dc=example,dc=com"') log.info('get the wtime and optime values from the RESULT string') ops_value = topology_st.standalone.ds_access_log.parse_line(ops[0]) value = topology_st.standalone.ds_access_log.match(f'.*op={ops_value["op"]} RESULT') time_value = topology_st.standalone.ds_access_log.parse_line(value[0]) wtime = time_value['rem'].split()[3].split('=')[1] optime = time_value['rem'].split()[4].split('=')[1] log.info('Check that compare operation is not generating negative values for wtime and optime') if (Decimal(wtime) > 0) and (Decimal(optime) > 0): assert True else: log.info('wtime and optime values are negatives') assert False @pytest.mark.xfail(ds_is_older('1.3.10.1'), reason="May fail because of bug 1662461") @pytest.mark.bz1662461 @pytest.mark.ds50428 @pytest.mark.ds49969 def test_log_base_dn_when_invalid_attr_request(topology_st, disable_access_log_buffering): """Test that DS correctly logs the base dn when a search with invalid attribute request is performed :id: 859de962-c261-4ffb-8705-97bceab1ba2c :setup: Standalone instance :steps: 1. Disable the accesslog-logbuffering config parameter 2. Delete the previous access log 3. Perform a base search on the DEFAULT_SUFFIX, using ten empty attribute requests 4. Check the access log file for 'invalid attribute request' 5. Check the access log file for 'SRCH base="\(null\)"' 6. Check the access log file for 'SRCH base="DEFAULT_SUFFIX"' :expectedresults: 1. Operations are visible in the access log in real time 2. Fresh new access log is created 3. The search operation raises a Protocol error 4. The access log should have an 'invalid attribute request' message 5. The access log should not have "\(null\)" as value for the Search base dn 6. The access log should have the value of DEFAULT_SUFFIX as Search base dn """ entry = DSLdapObject(topology_st.standalone, DEFAULT_SUFFIX) log.info('delete the previous access logs to get a fresh new one') topology_st.standalone.deleteAccessLogs() log.info("Search the default suffix, with invalid '\"\" \"\"' attribute request") log.info("A Protocol error exception should be raised, see https://github.com/389ds/389-ds-base/issues/3028") # A ldap.PROTOCOL_ERROR exception is expected after 10 empty values with pytest.raises(ldap.PROTOCOL_ERROR): assert entry.get_attrs_vals_utf8(['', '', '', '', '', '', '', '', '', '', '']) # Search for appropriate messages in the access log log.info('Check the access logs for correct messages') # We should find the 'invalid attribute request' information assert topology_st.standalone.ds_access_log.match(r'.*invalid attribute request.*') # We should not find a "(null)" base dn mention assert not topology_st.standalone.ds_access_log.match(r'.*SRCH base="\(null\)".*') # We should find the base dn for the search assert topology_st.standalone.ds_access_log.match(r'.*SRCH base="{}".*'.format(DEFAULT_SUFFIX)) @pytest.mark.xfail(ds_is_older('1.3.8', '1.4.2'), reason="May fail because of bug 1676948") @pytest.mark.bz1676948 @pytest.mark.ds50536 def test_audit_log_rotate_and_check_string(topology_st, clean_access_logs, set_audit_log_config_values_to_rotate): """Version string should be logged only once at the top of audit log after it is rotated. :id: 14dffb22-2f9c-11e9-8a03-54e1ad30572c :customerscenario: True :setup: Standalone instance :steps: 1. Set nsslapd-auditlog-logging-enabled: on 2. Set nsslapd-auditlog-maxlogsize: 1 3. Do modifications to the entry, until audit log file is rotated 4. Check audit logs :expectedresults: 1. Attribute nsslapd-auditlog-logging-enabled should be set to on 2. Attribute nsslapd-auditlog-maxlogsize should be set to 1 3. Audit file should grow till 1MB and then should be rotated 4. Audit file log should contain version string only once at the top """ standalone = topology_st.standalone search_ds = '389-Directory' users = UserAccounts(standalone, DEFAULT_SUFFIX) user = users.create(properties={ 'uid': 'test_audit_log', 'cn': 'test', 'sn': 'user', 'uidNumber': '1000', 'gidNumber': '1000', 'homeDirectory': '/home/test', }) log.info('Doing modifications to rotate audit log') audit_log = standalone.ds_paths.audit_log while len(glob.glob(audit_log + '*')) == 2: user.replace('description', 'test'*100) log.info('Doing one more modification just in case') user.replace('description', 'test2'*100) standalone.stop() count = 0 with open(audit_log) as f: log.info('Check that DS string is present on first line') assert search_ds in f.readline() f.seek(0) log.info('Check that DS string is present only once') for line in f.readlines(): if search_ds in line: count += 1 assert count == 1 def test_enable_external_libs_debug_log(topology_st): """Check that OpenLDAP logs are successfully enabled and disabled :id: b04646e3-9a5e-45ae-ad81-2882c1daf23e :setup: Standalone instance :steps: 1. Create a user to bind on 2. Set nsslapd-external-libs-debug-enabled to "on" 3. Clean the error log 4. Bind as the user to generate OpenLDAP output 5. Restart the servers to flush the logs 6. Check the error log for OpenLDAP debug log 7. Set nsslapd-external-libs-debug-enabled to "on" 8. Clean the error log 9. Bind as the user to generate OpenLDAP output 10. Restart the servers to flush the logs 11. Check the error log for OpenLDAP debug log :expectedresults: 1. Success 2. Success 3. Success 4. Success 5. Success 6. Logs are present 7. Success 8. Success 9. Success 10. Success 11. No logs are present """ standalone = topology_st.standalone log.info('Create a user to bind on') users = UserAccounts(standalone, DEFAULT_SUFFIX) user = users.ensure_state(properties={ 'uid': 'test_audit_log', 'cn': 'test', 'sn': 'user', 'uidNumber': '1000', 'gidNumber': '1000', 'homeDirectory': '/home/test', 'userPassword': PASSWORD }) log.info('Set nsslapd-external-libs-debug-enabled to "on"') standalone.config.set('nsslapd-external-libs-debug-enabled', 'on') log.info('Clean the error log') standalone.deleteErrorLogs() log.info('Bind as the user to generate OpenLDAP output') user.bind(PASSWORD) log.info('Restart the servers to flush the logs') standalone.restart() log.info('Check the error log for OpenLDAP debug log') assert standalone.ds_error_log.match('.*libldap/libber.*') log.info('Set nsslapd-external-libs-debug-enabled to "off"') standalone.config.set('nsslapd-external-libs-debug-enabled', 'off') log.info('Clean the error log') standalone.deleteErrorLogs() log.info('Bind as the user to generate OpenLDAP output') user.bind(PASSWORD) log.info('Restart the servers to flush the logs') standalone.restart() log.info('Check the error log for OpenLDAP debug log') assert not standalone.ds_error_log.match('.*libldap/libber.*') @pytest.mark.skipif(ds_is_older('1.4.3'), reason="Might fail because of bug 1895460") @pytest.mark.bz1895460 @pytest.mark.ds4593 def test_cert_personality_log_help(topology_st): """Test changing the nsSSLPersonalitySSL attribute will raise help message in log :id: d6f17f64-d784-438e-89b6-8595bdf6defb :customerscenario: True :setup: Standalone :steps: 1. Create instance 2. Change nsSSLPersonalitySSL to wrong certificate nickname 3. Check there is a help message in error log :expectedresults: 1. Success 2. Success 3. Success """ WRONG_NICK = 'otherNick' standalone = topology_st.standalone standalone.enable_tls() log.info('Change nsSSLPersonalitySSL to wrong certificate nickname') config_RSA = RSA(standalone) config_RSA.set('nsSSLPersonalitySSL', WRONG_NICK) with pytest.raises(subprocess.CalledProcessError): standalone.restart() assert standalone.ds_error_log.match(r".*Please, make sure that nsSSLPersonalitySSL value " r"is correctly set to the certificate from NSS database " r"\(currently, nsSSLPersonalitySSL attribute " r"is set to '{}'\)\..*".format(WRONG_NICK)) if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/ds_logs/regression_test.py000066400000000000000000000052751421664411400276040ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2017 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import pytest from lib389.dseldif import DSEldif from lib389._constants import DN_CONFIG, LOG_REPLICA, LOG_DEFAULT, LOG_TRACE, LOG_ACL from lib389.utils import os, logging from lib389.topologies import topology_st as topo pytestmark = pytest.mark.tier2 DEBUGGING = os.getenv("DEBUGGING", default=False) if DEBUGGING: logging.getLogger(__name__).setLevel(logging.DEBUG) else: logging.getLogger(__name__).setLevel(logging.INFO) log = logging.getLogger(__name__) @pytest.mark.bz1460718 @pytest.mark.parametrize("log_level", [(LOG_REPLICA + LOG_DEFAULT), (LOG_ACL + LOG_DEFAULT), (LOG_TRACE + LOG_DEFAULT)]) def test_default_loglevel_stripped(topo, log_level): """The default log level 16384 is stripped from the log level returned to a client :id: c300f8f1-aa11-4621-b124-e2be51930a6b :parametrized: yes :setup: Standalone instance :steps: 1. Change the error log level to the default and custom value. 2. Check if the server returns the new value. :expectedresults: 1. Changing the error log level should be successful. 2. Server should return the new log level. """ assert topo.standalone.config.set('nsslapd-errorlog-level', str(log_level)) assert topo.standalone.config.get_attr_val_int('nsslapd-errorlog-level') == log_level @pytest.mark.bz1460718 def test_dse_config_loglevel_error(topo): """Manually setting nsslapd-errorlog-level to 64 in dse.ldif throws error :id: 0eeefa17-ec1c-4208-8e7b-44d8fbc38f10 :setup: Standalone instance :steps: 1. Stop the server, edit dse.ldif file and change nsslapd-errorlog-level value to 64 2. Start the server and observe the error logs. :expectedresults: 1. Server should be successfully stopped and nsslapd-errorlog-level value should be changed. 2. Server should be successfully started without any errors being reported in the logs. """ topo.standalone.stop(timeout=10) dse_ldif = DSEldif(topo.standalone) try: dse_ldif.replace(DN_CONFIG, 'nsslapd-errorlog-level', 64) except: log.error('Failed to replace cn=config values of nsslapd-errorlog-level') raise topo.standalone.start(timeout=10) assert not topo.standalone.ds_error_log.match( '.*nsslapd-errorlog-level: ignoring 64 \\(since -d 266354688 was given on the command line\\).*') if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/ds_tools/000077500000000000000000000000001421664411400241765ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/ds_tools/__init__.py000066400000000000000000000000761421664411400263120ustar00rootroot00000000000000 """ :Requirement: 389-ds-base: Directory Server Tools """ 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/ds_tools/logpipe_test.py000066400000000000000000000047651421664411400272620ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2017 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import pytest import subprocess from lib389.utils import * from lib389.topologies import topology_st as topo pytestmark = pytest.mark.tier1 DEBUGGING = os.getenv("DEBUGGING", default=False) if DEBUGGING: logging.getLogger(__name__).setLevel(logging.DEBUG) else: logging.getLogger(__name__).setLevel(logging.INFO) log = logging.getLogger(__name__) SYS_TEST_USER = 'dirsrv_testuser' @pytest.fixture(scope="module") def sys_test_user(request): """Creates and deletes a system test user""" cmd = ['/usr/sbin/useradd', SYS_TEST_USER] log.info('Add system test user - {}'.format(SYS_TEST_USER)) try: subprocess.call(cmd) except subprocess.CalledProcessError as e: log.exception('Failed to add user {} error {}'.format(SYS_TEST_USER, e.output)) def fin(): cmd = ['/usr/sbin/userdel', SYS_TEST_USER] log.info('Delete system test user - {}'.format(SYS_TEST_USER)) try: subprocess.call(cmd) except subprocess.CalledProcessError as e: log.exception('Failed to delete user {} error {}'.format(SYS_TEST_USER, e.output)) request.addfinalizer(fin) def test_user_permissions(topo, sys_test_user): """Check permissions for usual user operations in log dir :id: 4e423cd5-300c-4df0-ab40-aec7e51c3be8 :feature: ds-logpipe :setup: Standalone instance :steps: 1. Add a new user to the system 2. Try to create a logpipe in the log directory with '-u' option specifying the user 3. Delete the user :expectedresults: Permission denied error happens """ ds_logpipe_path = os.path.join(topo.standalone.ds_paths.bin_dir, 'ds-logpipe.py') fakelogpipe_path = os.path.join(topo.standalone.ds_paths.log_dir, 'fakelog.pipe') # I think we need to add a function for this to lib389, when we will port the full test suite cmd = [ds_logpipe_path, fakelogpipe_path, '-u', SYS_TEST_USER] log.info('Try to create a logpipe in the log directory with "-u" option specifying the user') with pytest.raises(subprocess.CalledProcessError) as cp: result = subprocess.check_output(cmd) assert 'Permission denied' in result if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/ds_tools/replcheck_test.py000066400000000000000000000527141421664411400275600ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2018 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import pytest import subprocess from lib389.utils import * from lib389.replica import Replicas, Replica, ReplicationManager from lib389._constants import * from lib389.config import CertmapLegacy from lib389.idm.nscontainer import nsContainers from lib389.idm.user import UserAccounts, TEST_USER_PROPERTIES from lib389.idm.services import ServiceAccounts from lib389.topologies import topology_m2 as topo pytestmark = pytest.mark.tier1 DEBUGGING = os.getenv("DEBUGGING", default=False) if DEBUGGING: logging.getLogger(__name__).setLevel(logging.DEBUG) else: logging.getLogger(__name__).setLevel(logging.INFO) log = logging.getLogger(__name__) def _create_container(inst, dn, name): """Creates container entry""" conts = nsContainers(inst, dn) cont = conts.create(properties={'cn': name}) time.sleep(1) return cont def _delete_container(cont): """Deletes container entry""" cont.delete() time.sleep(1) @pytest.fixture(scope="module") def topo_tls_ldapi(topo): """Enable TLS on both suppliers and reconfigure both agreements to use TLS Client auth. Also, setup ldapi and export DB """ m1 = topo.ms["supplier1"] m2 = topo.ms["supplier2"] # Create the certmap before we restart for enable_tls cm_m1 = CertmapLegacy(m1) cm_m2 = CertmapLegacy(m2) # We need to configure the same maps for both .... certmaps = cm_m1.list() certmaps['default']['DNComps'] = None certmaps['default']['CmapLdapAttr'] = 'nsCertSubjectDN' cm_m1.set(certmaps) cm_m2.set(certmaps) [i.enable_tls() for i in topo] # Create the replication dns services = ServiceAccounts(m1, DEFAULT_SUFFIX) repl_m1 = services.get('%s:%s' % (m1.host, m1.sslport)) repl_m1.set('nsCertSubjectDN', m1.get_server_tls_subject()) repl_m2 = services.get('%s:%s' % (m2.host, m2.sslport)) repl_m2.set('nsCertSubjectDN', m2.get_server_tls_subject()) # Check the replication is "done". repl = ReplicationManager(DEFAULT_SUFFIX) repl.wait_for_replication(m1, m2) # Now change the auth type replica_m1 = Replicas(m1).get(DEFAULT_SUFFIX) agmt_m1 = replica_m1.get_agreements().list()[0] agmt_m1.replace_many( ('nsDS5ReplicaBindMethod', 'SSLCLIENTAUTH'), ('nsDS5ReplicaTransportInfo', 'SSL'), ('nsDS5ReplicaPort', '%s' % m2.sslport), ) agmt_m1.remove_all('nsDS5ReplicaBindDN') replica_m2 = Replicas(m2).get(DEFAULT_SUFFIX) agmt_m2 = replica_m2.get_agreements().list()[0] agmt_m2.replace_many( ('nsDS5ReplicaBindMethod', 'SSLCLIENTAUTH'), ('nsDS5ReplicaTransportInfo', 'SSL'), ('nsDS5ReplicaPort', '%s' % m1.sslport), ) agmt_m2.remove_all('nsDS5ReplicaBindDN') log.info("Export LDAPTLS_CACERTDIR env variable for ds-replcheck") os.environ["LDAPTLS_CACERTDIR"] = m1.get_ssca_dir() for inst in topo: inst.config.set('nsslapd-ldapilisten', 'on') inst.config.set('nsslapd-ldapifilepath', '/var/run/slapd-{}.socket'.format(inst.serverid)) inst.restart() repl.test_replication(m1, m2) repl.test_replication(m2, m1) return topo def replcheck_cmd_list(topo_tls_ldapi): """Check ds-replcheck tool through ldap, ldaps, ldap with StartTLS, ldapi and compare exported ldif files """ m1 = topo_tls_ldapi.ms["supplier1"] m2 = topo_tls_ldapi.ms["supplier2"] for inst in topo_tls_ldapi: inst.stop() inst.db2ldif(bename=DEFAULT_BENAME, suffixes=[DEFAULT_SUFFIX], excludeSuffixes=[], encrypt=False, repl_data=True, outputfile='/tmp/export_{}.ldif'.format(inst.serverid)) inst.start() ds_replcheck_path = os.path.join(m1.ds_paths.bin_dir, 'ds-replcheck') if ds_is_newer("1.4.1.2"): replcheck_cmd = [[ds_replcheck_path, 'online', '-b', DEFAULT_SUFFIX, '-D', DN_DM, '-w', PW_DM, '-l', '1', '-m', 'ldap://{}:{}'.format(m1.host, m1.port), '--conflicts', '-r', 'ldap://{}:{}'.format(m2.host, m2.port)], [ds_replcheck_path, 'online', '-b', DEFAULT_SUFFIX, '-D', DN_DM, '-w', PW_DM, '-l', '1', '-m', 'ldaps://{}:{}'.format(m1.host, m1.sslport), '--conflicts', '-r', 'ldaps://{}:{}'.format(m2.host, m2.sslport)], [ds_replcheck_path, 'online', '-b', DEFAULT_SUFFIX, '-D', DN_DM, '-w', PW_DM, '-l', '1', '-m', 'ldap://{}:{}'.format(m1.host, m1.port), '-Z', m1.get_ssca_dir(), '-r', 'ldap://{}:{}'.format(m2.host, m2.port), '--conflicts'], [ds_replcheck_path, 'online', '-b', DEFAULT_SUFFIX, '-D', DN_DM, '-w', PW_DM, '-l', '1', '-m', 'ldapi://%2fvar%2frun%2fslapd-{}.socket'.format(m1.serverid), '--conflict', '-r', 'ldapi://%2fvar%2frun%2fslapd-{}.socket'.format(m2.serverid)], [ds_replcheck_path, 'offline', '-b', DEFAULT_SUFFIX, '--conflicts', '--rid', '1', '-m', '/tmp/export_{}.ldif'.format(m1.serverid), '-r', '/tmp/export_{}.ldif'.format(m2.serverid)]] else: replcheck_cmd = [[ds_replcheck_path, '-b', DEFAULT_SUFFIX, '-D', DN_DM, '-w', PW_DM, '-l', '1', '-m', 'ldap://{}:{}'.format(m1.host, m1.port), '--conflicts', '-r', 'ldap://{}:{}'.format(m2.host, m2.port)], [ds_replcheck_path, '-b', DEFAULT_SUFFIX, '-D', DN_DM, '-w', PW_DM, '-l', '1', '-m', 'ldaps://{}:{}'.format(m1.host, m1.sslport), '--conflicts', '-r', 'ldaps://{}:{}'.format(m2.host, m2.sslport)], [ds_replcheck_path, '-b', DEFAULT_SUFFIX, '-D', DN_DM, '-w', PW_DM, '-l', '1', '-m', 'ldap://{}:{}'.format(m1.host, m1.port), '-Z', m1.get_ssca_dir(), '-r', 'ldap://{}:{}'.format(m2.host, m2.port), '--conflicts'], [ds_replcheck_path, '-b', DEFAULT_SUFFIX, '-D', DN_DM, '-w', PW_DM, '-l', '1', '-m', 'ldapi://%2fvar%2frun%2fslapd-{}.socket'.format(m1.serverid), '--conflict', '-r', 'ldapi://%2fvar%2frun%2fslapd-{}.socket'.format(m2.serverid)], [ds_replcheck_path, '-b', DEFAULT_SUFFIX, '-D', DN_DM, '-w', PW_DM, '--conflicts', '-M', '/tmp/export_{}.ldif'.format(m1.serverid), '-R', '/tmp/export_{}.ldif'.format(m2.serverid)]] return replcheck_cmd @pytest.mark.skipif(ds_is_older("1.4.1.2"), reason="Not implemented") def test_state(topo_tls_ldapi): """Check "state" report :id: 1cc6b28b-8a42-45fb-ab50-9552db0ac178 :customerscenario: True :setup: Two supplier replication :steps: 1. Get the replication state value 2. The state value is as expected :expectedresults: 1. It should be successful 2. It should be successful """ m1 = topo_tls_ldapi.ms["supplier1"] m2 = topo_tls_ldapi.ms["supplier2"] ds_replcheck_path = os.path.join(m1.ds_paths.bin_dir, 'ds-replcheck') tool_cmd = [ds_replcheck_path, 'state', '-b', DEFAULT_SUFFIX, '-D', DN_DM, '-w', PW_DM, '-m', 'ldaps://{}:{}'.format(m1.host, m1.sslport), '-r', 'ldaps://{}:{}'.format(m2.host, m2.sslport)] result = subprocess.check_output(tool_cmd, encoding='utf-8') assert (result.rstrip() == "Replication State: Supplier and Replica are in perfect synchronization") def test_check_ruv(topo_tls_ldapi): """Check that the report has RUV :id: 1cc6b28b-8a42-45fb-ab50-9552db0ac179 :customerscenario: True :setup: Two supplier replication :steps: 1. Get RUV from supplier and replica 2. Generate the report 3. Check that the RUV is mentioned in the report :expectedresults: 1. It should be successful 2. It should be successful 3. The RUV should be mentioned in the report """ m1 = topo_tls_ldapi.ms["supplier1"] replicas_m1 = Replica(m1, DEFAULT_SUFFIX) ruv_entries = replicas_m1.get_attr_vals_utf8('nsds50ruv') for tool_cmd in replcheck_cmd_list(topo_tls_ldapi): result = subprocess.check_output(tool_cmd, encoding='utf-8') assert all([ruv_entry in result for ruv_entry in ruv_entries]) def test_missing_entries(topo_tls_ldapi): """Check that the report has missing entries :id: f91b6798-6e6e-420a-ad2f-3222bb908b7d :customerscenario: True :setup: Two supplier replication :steps: 1. Pause replication between supplier and replica 2. Add two entries to supplier and two entries to replica 3. Generate the report 4. Check that the entries DN are mentioned in the report :expectedresults: 1. It should be successful 2. It should be successful 3. It should be successful 4. The entries DN should be mentioned in the report """ m1 = topo_tls_ldapi.ms["supplier1"] m2 = topo_tls_ldapi.ms["supplier2"] try: topo_tls_ldapi.pause_all_replicas() users_m1 = UserAccounts(m1, DEFAULT_SUFFIX) user0 = users_m1.create_test_user(1000) user1 = users_m1.create_test_user(1001) users_m2 = UserAccounts(m2, DEFAULT_SUFFIX) user2 = users_m2.create_test_user(1002) user3 = users_m2.create_test_user(1003) for tool_cmd in replcheck_cmd_list(topo_tls_ldapi): result = subprocess.check_output(tool_cmd, encoding='utf-8').lower() assert user0.dn.lower() in result assert user1.dn.lower() in result finally: user0.delete() user1.delete() user2.delete() user3.delete() topo_tls_ldapi.resume_all_replicas() def test_tombstones(topo_tls_ldapi): """Check that the report mentions right number of tombstones :id: bd27de78-0046-431c-8240-a93052df1cdc :customerscenario: True :setup: Two supplier replication :steps: 1. Add an entry to supplier and wait for replication 2. Pause replication between supplier and replica 3. Delete the entry from supplier 4. Generate the report 5. Check that we have different number of tombstones in the report :expectedresults: 1. It should be successful 2. It should be successful 3. It should be successful 4. It should be successful 5. It should be successful """ m1 = topo_tls_ldapi.ms["supplier1"] try: users_m1 = UserAccounts(m1, DEFAULT_SUFFIX) user_m1 = users_m1.create(properties=TEST_USER_PROPERTIES) time.sleep(1) topo_tls_ldapi.pause_all_replicas() user_m1.delete() time.sleep(2) for tool_cmd in replcheck_cmd_list(topo_tls_ldapi): result = subprocess.check_output(tool_cmd, encoding='utf-8').lower() log.debug(result) finally: topo_tls_ldapi.resume_all_replicas() def test_conflict_entries(topo_tls_ldapi): """Check that the report has conflict entries :id: 4eda0c5d-0824-4cfd-896e-845faf49ddaf :customerscenario: True :setup: Two supplier replication :steps: 1. Pause replication between supplier and replica 2. Add two entries to supplier and two entries to replica 3. Delete first entry from supplier 4. Add a child to the first entry 5. Resume replication between supplier and replica 6. Generate the report 7. Check that the entries DN are mentioned in the report :expectedresults: 1. It should be successful 2. It should be successful 3. It should be successful 4. It should be successful 5. It should be successful 6. It should be successful 7. The entries DN should be mentioned in the report """ m1 = topo_tls_ldapi.ms["supplier1"] m2 = topo_tls_ldapi.ms["supplier2"] topo_tls_ldapi.pause_all_replicas() _create_container(m1, DEFAULT_SUFFIX, 'conflict_parent0') _create_container(m2, DEFAULT_SUFFIX, 'conflict_parent0') cont_p_m1 = _create_container(m1, DEFAULT_SUFFIX, 'conflict_parent1') cont_p_m2 = _create_container(m2, DEFAULT_SUFFIX, 'conflict_parent1') _delete_container(cont_p_m1) _create_container(m2, cont_p_m2.dn, 'conflict_child0') topo_tls_ldapi.resume_all_replicas() time.sleep(5) for tool_cmd in replcheck_cmd_list(topo_tls_ldapi): result = subprocess.check_output(tool_cmd, encoding='utf-8') assert 'conflict_parent1' in result def test_inconsistencies(topo_tls_ldapi): """Check that the report mentions inconsistencies with attributes :id: c8fe3e84-b346-4969-8f5d-3462b643a1d2 :customerscenario: True :setup: Two supplier replication :steps: 1. Add an entry to supplier and wait for replication 2. Pause replication between supplier and replica 3. Set different description attr values to supplier and replica 4. Add telephoneNumber attribute to supplier and not to replica 5. Generate the report 6. Check that attribute values are mentioned in the report 7. Generate the report with -i option to ignore some attributes 8. Check that attribute values are mentioned in the report :expectedresults: 1. It should be successful 2. It should be successful 3. It should be successful 4. It should be successful 5. It should be successful 6. The attribute values should be mentioned in the report 7. It should be successful 8. The attribute values should not be mentioned in the report """ m1 = topo_tls_ldapi.ms["supplier1"] m2 = topo_tls_ldapi.ms["supplier2"] attr_m1 = "m1_inconsistency" attr_m2 = "m2_inconsistency" attr_first = "first ordered valued" attr_second = "second ordered valued" attr_m1_only = "123123123" try: users_m1 = UserAccounts(m1, DEFAULT_SUFFIX) users_m2 = UserAccounts(m2, DEFAULT_SUFFIX) user_m1 = users_m1.create(properties=TEST_USER_PROPERTIES) time.sleep(1) user_m2 = users_m2.get(user_m1.rdn) topo_tls_ldapi.pause_all_replicas() user_m1.set("description", attr_m1) user_m2.set("description", attr_m2) user_m1.set("telephonenumber", attr_m1_only) # Add the same multi-valued attrs, but out of order user_m1.set("cn", [attr_first, attr_second]) user_m2.set("cn", [attr_second, attr_first]) time.sleep(2) for tool_cmd in replcheck_cmd_list(topo_tls_ldapi): result = subprocess.check_output(tool_cmd, encoding='utf-8').lower() assert attr_m1 in result assert attr_m2 in result assert attr_m1_only in result if ds_is_newer("1.3.9.1", "1.4.1.2"): assert attr_first not in result assert attr_second not in result # Ignore some attributes and check the output tool_cmd.extend(['-i', '{},{}'.format('description', 'telephonenumber')]) result = subprocess.check_output(tool_cmd, encoding='utf-8').lower() assert attr_m1 not in result assert attr_m2 not in result assert attr_m1_only not in result if ds_is_newer("1.3.9.1", "1.4.1.2"): assert attr_first not in result assert attr_second not in result finally: topo_tls_ldapi.resume_all_replicas() user_m1.delete() def test_suffix_exists(topo_tls_ldapi): """Check if wrong suffix is provided, server is giving Error: Failed to validate suffix. :id: ce75debc-c07f-4e72-8787-8f99cbfaf1e2 :customerscenario: True :setup: Two supplier replication :steps: 1. Run ds-replcheck with wrong suffix (Non Existing) :expectedresults: 1. It should be unsuccessful """ m1 = topo_tls_ldapi.ms["supplier1"] m2 = topo_tls_ldapi.ms["supplier2"] ds_replcheck_path = os.path.join(m1.ds_paths.bin_dir, 'ds-replcheck') if ds_is_newer("1.4.1.2"): tool_cmd = [ds_replcheck_path, 'online', '-b', 'dc=test,dc=com', '-D', DN_DM, '-w', PW_DM, '-m', 'ldaps://{}:{}'.format(m1.host, m1.sslport), '-r', 'ldaps://{}:{}'.format(m2.host, m2.sslport)] else: tool_cmd = [ds_replcheck_path, '-b', 'dc=test,dc=com', '-D', DN_DM, '-w', PW_DM, '-m', 'ldaps://{}:{}'.format(m1.host, m1.sslport), '-r', 'ldaps://{}:{}'.format(m2.host, m2.sslport)] result1 = subprocess.Popen(tool_cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, encoding='utf-8') result = result1.communicate() assert "Failed to validate suffix" in result[0] def test_check_missing_tombstones(topo_tls_ldapi): """Check missing tombstone entries is not reported. :id: 93067a5a-416e-4243-9418-c4dfcf42e093 :customerscenario: True :setup: Two supplier replication :steps: 1. Pause replication between supplier and replica 2. Add and delete an entry on the supplier 3. Run ds-replcheck 4. Verify there are NO complaints about missing entries/tombstones :expectedresults: 1. It should be successful 2. It should be successful 3. It should be successful 4. It should be successful """ m1 = topo_tls_ldapi.ms["supplier1"] m2 = topo_tls_ldapi.ms["supplier2"] try: topo_tls_ldapi.pause_all_replicas() users_m1 = UserAccounts(m1, DEFAULT_SUFFIX) user0 = users_m1.create_test_user(1000) user0.delete() for tool_cmd in replcheck_cmd_list(topo_tls_ldapi): result = subprocess.check_output(tool_cmd, encoding='utf-8').lower() assert "entries missing on replica" not in result finally: topo_tls_ldapi.resume_all_replicas() def test_dsreplcheck_with_password_file(topo_tls_ldapi, tmpdir): """Check ds-replcheck works if password file is provided with -y option. :id: 0d847ec7-6eaf-4cb5-a9c6-e4a5a1778f93 :customerscenario: True :setup: Two supplier replication :steps: 1. Create a password file with the default password of the server. 2. Run ds-replcheck with -y option (used to pass password file) :expectedresults: 1. It should be successful 2. It should be successful """ m1 = topo_tls_ldapi.ms["supplier1"] m2 = topo_tls_ldapi.ms["supplier2"] ds_replcheck_path = os.path.join(m1.ds_paths.bin_dir, 'ds-replcheck') f = tmpdir.mkdir("my_dir").join("password_file.txt") f.write(PW_DM) if ds_is_newer("1.4.1.2"): tool_cmd = [ds_replcheck_path, 'online', '-b', DEFAULT_SUFFIX, '-D', DN_DM, '-y', f.strpath, '-m', 'ldaps://{}:{}'.format(m1.host, m1.sslport), '-r', 'ldaps://{}:{}'.format(m2.host, m2.sslport)] else: tool_cmd = [ds_replcheck_path, '-b', DEFAULT_SUFFIX, '-D', DN_DM, '-y', f.strpath, '-m', 'ldaps://{}:{}'.format(m1.host, m1.sslport), '-r', 'ldaps://{}:{}'.format(m2.host, m2.sslport)] subprocess.Popen(tool_cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, encoding='utf-8') @pytest.mark.ds51102 @pytest.mark.bz1836428 @pytest.mark.skipif(ds_is_older('1.4.1'), reason='Not implemented') def test_dsreplcheck_timeout_connection_mechanisms(topo_tls_ldapi): """Check that ds-replcheck timeout option works with various connection mechanisms :id: aeeb99c9-09e2-45dc-bd75-9f95409babe7 :customerscenario: True :setup: Two supplier replication :steps: 1. Create two suppliers with various connection mechanisms configured 2. Run ds-replcheck with -t option :expectedresults: 1. Success 2. Success """ OUTPUT = 'Supplier and Replica are in perfect synchronization' m1 = topo_tls_ldapi.ms["supplier1"] m2 = topo_tls_ldapi.ms["supplier2"] ds_replcheck_path = os.path.join(m1.ds_paths.bin_dir, 'ds-replcheck') replcheck_cmd = [[ds_replcheck_path, 'online', '-b', DEFAULT_SUFFIX, '-D', DN_DM, '-w', PW_DM, '-l', '1', '-m', 'ldap://{}:{}'.format(m1.host, m1.port), '--conflicts', '-r', 'ldap://{}:{}'.format(m2.host, m2.port), '-t', '120'], [ds_replcheck_path, 'online', '-b', DEFAULT_SUFFIX, '-D', DN_DM, '-w', PW_DM, '-l', '1', '-m', 'ldaps://{}:{}'.format(m1.host, m1.sslport), '--conflicts', '-r', 'ldaps://{}:{}'.format(m2.host, m2.sslport), '-t', '120'], [ds_replcheck_path, 'online', '-b', DEFAULT_SUFFIX, '-D', DN_DM, '-w', PW_DM, '-l', '1', '-m', 'ldap://{}:{}'.format(m1.host, m1.port), '-Z', m1.get_ssca_dir(), '-r', 'ldap://{}:{}'.format(m2.host, m2.port), '--conflicts', '-t', '120'], [ds_replcheck_path, 'online', '-b', DEFAULT_SUFFIX, '-D', DN_DM, '-w', PW_DM, '-l', '1', '-m', 'ldapi://%2fvar%2frun%2fslapd-{}.socket'.format(m1.serverid), '--conflict', '-r', 'ldapi://%2fvar%2frun%2fslapd-{}.socket'.format(m2.serverid), '-t', '120']] log.info('Run ds-replcheck with -t option') for connection in replcheck_cmd: result = subprocess.check_output(connection) assert OUTPUT in ensure_str(result) if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/dynamic_plugins/000077500000000000000000000000001421664411400255355ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/dynamic_plugins/__init__.py000066400000000000000000000000661421664411400276500ustar00rootroot00000000000000""" :Requirement: 389-ds-base: Dynamic Plugins """ 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/dynamic_plugins/dynamic_plugins_test.py000066400000000000000000000404201421664411400323330ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2016 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # ''' Created on Dec 09, 2014 @author: mreynolds ''' import logging import ldap.sasl import pytest from lib389.tasks import * from lib389.replica import ReplicationManager from lib389.config import LDBMConfig from lib389._constants import * from lib389.topologies import topology_m2 from ..plugins import acceptance_test from . import stress_tests pytestmark = pytest.mark.tier1 log = logging.getLogger(__name__) def check_replicas(topology_m2): """Check that replication is in sync and working""" m1 = topology_m2.ms["supplier1"] m2 = topology_m2.ms["supplier2"] log.info('Checking if replication is in sync...') repl = ReplicationManager(DEFAULT_SUFFIX) repl.test_replication_topology(topology_m2) # # Verify the databases are identical. There should not be any "user, entry, employee" entries # log.info('Checking if the data is the same between the replicas...') # Check the supplier try: entries = m1.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, "(|(uid=person*)(uid=entry*)(uid=employee*))") if len(entries) > 0: log.error('Supplier database has incorrect data set!\n') assert False except ldap.LDAPError as e: log.fatal('Unable to search db on supplier: ' + e.message['desc']) assert False # Check the consumer try: entries = m2.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, "(|(uid=person*)(uid=entry*)(uid=employee*))") if len(entries) > 0: log.error('Consumer database in not consistent with supplier database') assert False except ldap.LDAPError as e: log.fatal('Unable to search db on consumer: ' + e.message['desc']) assert False log.info('Data is consistent across the replicas.\n') #unstable or unstatus tests, skipped for now @pytest.mark.flaky(max_runs=2, min_passes=1) def test_acceptance(topology_m2): """Exercise each plugin and its main features, while changing the configuration without restarting the server. :id: 96136538-0151-4b09-9933-0e0cbf2c786c :setup: 2 Supplier Instances :steps: 1. Pause all replication 2. Set nsslapd-dynamic-plugins to on 3. Try to update LDBM config entry 4. Go through all plugin basic functionality 5. Resume replication 6. Go through all plugin basic functionality again 7. Check that data in sync and replication is working :expectedresults: 1. Success 2. Success 3. Success 4. Success 5. Success 6. Success 7. Success """ m1 = topology_m2.ms["supplier1"] msg = ' (no replication)' replication_run = False # First part of the test should be without replication topology_m2.pause_all_replicas() # First enable dynamic plugins m1.config.replace('nsslapd-dynamic-plugins', 'on') # Test that critical plugins can be updated even though the change might not be applied ldbm_config = LDBMConfig(m1) ldbm_config.replace('description', 'test') while True: # First run the tests with replication disabled, then rerun them with replication set up ############################################################################ # Test plugin functionality ############################################################################ log.info('####################################################################') log.info('Testing Dynamic Plugins Functionality' + msg + '...') log.info('####################################################################\n') acceptance_test.check_all_plugins(topology_m2) log.info('####################################################################') log.info('Successfully Tested Dynamic Plugins Functionality' + msg + '.') log.info('####################################################################\n') if replication_run: # We're done. break else: log.info('Resume replication and run everything one more time') topology_m2.resume_all_replicas() replication_run = True msg = ' (replication enabled)' time.sleep(1) ############################################################################ # Check replication, and data are in sync ############################################################################ check_replicas(topology_m2) #unstable or unstatus tests, skipped for now @pytest.mark.flaky(max_runs=2, min_passes=1) def test_memory_corruption(topology_m2): """Check the plugins for memory corruption issues while dynamic plugins option is enabled :id: 96136538-0151-4b09-9933-0e0cbf2c7862 :setup: 2 Supplier Instances :steps: 1. Pause all replication 2. Set nsslapd-dynamic-plugins to on 3. Try to update LDBM config entry 4. Restart the plugin many times in a linked list fashion restarting previous and preprevious plugins in the list of all plugins 5. Run the functional test 6. Repeat 4 and 5 steps for all plugins 7. Resume replication 8. Go through 4-6 steps once more 9. Check that data in sync and replication is working :expectedresults: 1. Success 2. Success 3. Success 4. Success 5. Success 6. Success 7. Success 8. Success 9. Success """ m1 = topology_m2.ms["supplier1"] msg = ' (no replication)' replication_run = False # First part of the test should be without replication topology_m2.pause_all_replicas() # First enable dynamic plugins m1.config.replace('nsslapd-dynamic-plugins', 'on') # Test that critical plugins can be updated even though the change might not be applied ldbm_config = LDBMConfig(m1) ldbm_config.replace('description', 'test') while True: # First run the tests with replication disabled, then rerun them with replication set up ############################################################################ # Test the stability by exercising the internal lists, callabcks, and task handlers ############################################################################ log.info('####################################################################') log.info('Testing Dynamic Plugins for Memory Corruption' + msg + '...') log.info('####################################################################\n') prev_plugin_test = None prev_prev_plugin_test = None for plugin_test in acceptance_test.func_tests: # # Restart the plugin several times (and prev plugins) - work that linked list # plugin_test(topology_m2, "restart") if prev_prev_plugin_test: prev_prev_plugin_test(topology_m2, "restart") plugin_test(topology_m2, "restart") if prev_plugin_test: prev_plugin_test(topology_m2, "restart") plugin_test(topology_m2, "restart") # Now run the functional test plugin_test(topology_m2, "dynamic") # Set the previous tests if prev_plugin_test: prev_prev_plugin_test = prev_plugin_test prev_plugin_test = plugin_test log.info('####################################################################') log.info('Successfully Tested Dynamic Plugins for Memory Corruption' + msg + '.') log.info('####################################################################\n') if replication_run: # We're done. break else: log.info('Resume replication and run everything one more time') topology_m2.resume_all_replicas() replication_run = True msg = ' (replication enabled)' time.sleep(1) ############################################################################ # Check replication, and data are in sync ############################################################################ check_replicas(topology_m2) #unstable or unstatus tests, skipped for now @pytest.mark.flaky(max_runs=2, min_passes=1) @pytest.mark.tier2 def test_stress(topology_m2): """Test plugins while under a big load. Perform the test 5 times :id: 96136538-0151-4b09-9933-0e0cbf2c7863 :setup: 2 Supplier Instances :steps: 1. Pause all replication 2. Set nsslapd-dynamic-plugins to on 3. Try to update LDBM config entry 4. Do one run through all tests 5. Enable Referential integrity and MemberOf plugins 6. Launch three new threads to add a bunch of users 7. While we are adding users restart the MemberOf and Linked Attributes plugins many times 8. Wait for the 'adding' threads to complete 9. Now launch three threads to delete the users 10. Restart both the MemberOf, Referential integrity and Linked Attributes plugins during these deletes 11. Wait for the 'deleting' threads to complete 12. Now make sure both the MemberOf and Referential integrity plugins still work correctly 13. Cleanup the stress tests (delete the group entry) 14. Perform 4-13 steps five times 15. Resume replication 16. Go through 4-14 steps once more 17. Check that data in sync and replication is working :expectedresults: 1. Success 2. Success 3. Success 4. Success 5. Success 6. Success 7. Success 8. Success 9. Success 10. Success 11. Success 12. Success 13. Success 14. Success 15. Success 16. Success 17. Success """ m1 = topology_m2.ms["supplier1"] msg = ' (no replication)' replication_run = False stress_max_runs = 5 # First part of the test should be without replication topology_m2.pause_all_replicas() # First enable dynamic plugins m1.config.replace('nsslapd-dynamic-plugins', 'on') # Test that critical plugins can be updated even though the change might not be applied ldbm_config = LDBMConfig(m1) ldbm_config.replace('description', 'test') while True: # First run the tests with replication disabled, then rerun them with replication set up log.info('Do one run through all tests ' + msg + '...') acceptance_test.check_all_plugins(topology_m2) log.info('####################################################################') log.info('Stressing Dynamic Plugins' + msg + '...') log.info('####################################################################\n') stress_tests.configureMO(m1) stress_tests.configureRI(m1) stress_count = 0 while stress_count < stress_max_runs: log.info('####################################################################') log.info('Running stress test' + msg + '. Run (%d/%d)...' % (stress_count + 1, stress_max_runs)) log.info('####################################################################\n') # Launch three new threads to add a bunch of users add_users = stress_tests.AddUsers(m1, 'employee', True) add_users.start() add_users2 = stress_tests.AddUsers(m1, 'entry', True) add_users2.start() add_users3 = stress_tests.AddUsers(m1, 'person', True) add_users3.start() time.sleep(1) # While we are adding users restart the MO plugin and an idle plugin m1.plugins.disable(name=PLUGIN_MEMBER_OF) m1.plugins.enable(name=PLUGIN_MEMBER_OF) time.sleep(1) m1.plugins.disable(name=PLUGIN_MEMBER_OF) time.sleep(1) m1.plugins.enable(name=PLUGIN_MEMBER_OF) m1.plugins.disable(name=PLUGIN_LINKED_ATTRS) m1.plugins.enable(name=PLUGIN_LINKED_ATTRS) time.sleep(1) m1.plugins.disable(name=PLUGIN_MEMBER_OF) m1.plugins.enable(name=PLUGIN_MEMBER_OF) time.sleep(2) m1.plugins.disable(name=PLUGIN_MEMBER_OF) time.sleep(1) m1.plugins.enable(name=PLUGIN_MEMBER_OF) m1.plugins.disable(name=PLUGIN_LINKED_ATTRS) m1.plugins.enable(name=PLUGIN_LINKED_ATTRS) m1.plugins.disable(name=PLUGIN_MEMBER_OF) time.sleep(1) m1.plugins.enable(name=PLUGIN_MEMBER_OF) m1.plugins.disable(name=PLUGIN_MEMBER_OF) m1.plugins.enable(name=PLUGIN_MEMBER_OF) # Wait for the 'adding' threads to complete add_users.join() add_users2.join() add_users3.join() # Now launch three threads to delete the users del_users = stress_tests.DelUsers(m1, 'employee') del_users.start() del_users2 = stress_tests.DelUsers(m1, 'entry') del_users2.start() del_users3 = stress_tests.DelUsers(m1, 'person') del_users3.start() time.sleep(1) # Restart both the MO, RI plugins during these deletes, and an idle plugin m1.plugins.disable(name=PLUGIN_REFER_INTEGRITY) m1.plugins.disable(name=PLUGIN_MEMBER_OF) m1.plugins.enable(name=PLUGIN_MEMBER_OF) m1.plugins.enable(name=PLUGIN_REFER_INTEGRITY) time.sleep(1) m1.plugins.disable(name=PLUGIN_REFER_INTEGRITY) time.sleep(1) m1.plugins.disable(name=PLUGIN_MEMBER_OF) time.sleep(1) m1.plugins.enable(name=PLUGIN_MEMBER_OF) time.sleep(1) m1.plugins.enable(name=PLUGIN_REFER_INTEGRITY) m1.plugins.disable(name=PLUGIN_LINKED_ATTRS) m1.plugins.enable(name=PLUGIN_LINKED_ATTRS) m1.plugins.disable(name=PLUGIN_REFER_INTEGRITY) m1.plugins.disable(name=PLUGIN_MEMBER_OF) m1.plugins.enable(name=PLUGIN_MEMBER_OF) m1.plugins.enable(name=PLUGIN_REFER_INTEGRITY) time.sleep(2) m1.plugins.disable(name=PLUGIN_REFER_INTEGRITY) time.sleep(1) m1.plugins.disable(name=PLUGIN_MEMBER_OF) time.sleep(1) m1.plugins.enable(name=PLUGIN_MEMBER_OF) time.sleep(1) m1.plugins.enable(name=PLUGIN_REFER_INTEGRITY) m1.plugins.disable(name=PLUGIN_LINKED_ATTRS) m1.plugins.enable(name=PLUGIN_LINKED_ATTRS) # Wait for the 'deleting' threads to complete del_users.join() del_users2.join() del_users3.join() # Now make sure both the MO and RI plugins still work correctly acceptance_test.func_tests[8](topology_m2, "dynamic") # RI plugin acceptance_test.func_tests[5](topology_m2, "dynamic") # MO plugin # Cleanup the stress tests stress_tests.cleanup(m1) stress_count += 1 log.info('####################################################################') log.info('Successfully Stressed Dynamic Plugins' + msg + '. Completed (%d/%d)' % (stress_count, stress_max_runs)) log.info('####################################################################\n') if replication_run: # We're done. break else: log.info('Resume replication and run everything one more time') topology_m2.resume_all_replicas() replication_run = True msg = ' (replication enabled)' time.sleep(1) ############################################################################ # Check replication, and data are in sync ############################################################################ check_replicas(topology_m2) if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/dynamic_plugins/notice_for_restart_test.py000066400000000000000000000023651421664411400330470ustar00rootroot00000000000000 # --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2020 William Brown # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- import logging import ldap import time import pytest from lib389.topologies import topology_st as topology from lib389.utils import ds_is_older from lib389.paths import Paths from lib389.plugins import MemberOfPlugin default_paths = Paths() pytestmark = pytest.mark.tier1 log = logging.getLogger(__name__) @pytest.mark.skipif(ds_is_older('1.4.4.0'), reason="Notice not generated in older versions") def test_notice_when_dynamic_not_enabled(topology): """ Test to show the logged noticed when dynamic plugins is disabled. :id: e4923789-c187-44b0-8734-34f26cbae06e :setup: Standalone instance :steps: 1. Ensure Dynamic Plugins is disabled 2. Enable a plugin :expectedresults: 1. Success 2. Notice generated """ st = topology.standalone st.config.set("nsslapd-dynamic-plugins", "off") st.restart() mo = MemberOfPlugin(st) mo.enable() # Now check the error log. pattern = ".*nsslapd-dynamic-plugins is off.*" assert st.ds_error_log.match(pattern) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/dynamic_plugins/stress_tests.py000066400000000000000000000104011421664411400306500ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2016 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # ''' Created on Dec 16, 2014 @author: mreynolds ''' import logging import threading import ldap import pytest from lib389 import Entry from lib389._constants import * from lib389.properties import * from lib389.plugins import ReferentialIntegrityPlugin, MemberOfPlugin from lib389.utils import * from lib389.idm.directorymanager import * pytestmark = pytest.mark.tier2 log = logging.getLogger(__name__) NUM_USERS = 250 GROUP_DN = 'cn=stress-group,' + DEFAULT_SUFFIX # Configure Referential Integrity Plugin for stress test def configureRI(inst): plugin = ReferentialIntegrityPlugin(inst) plugin.enable() plugin.replace('referint-membership-attr', 'uniquemember') # Configure MemberOf Plugin for stress test def configureMO(inst): plugin = MemberOfPlugin(inst) plugin.enable() plugin.replace('memberofgroupattr', 'uniquemember') def cleanup(conn): try: conn.delete_s(GROUP_DN) except ldap.LDAPError as e: log.fatal('cleanup: failed to delete group (' + GROUP_DN + ') error: ' + e.message['desc']) assert False class DelUsers(threading.Thread): def __init__(self, inst, rdnval): threading.Thread.__init__(self) self.daemon = True self.inst = inst self.rdnval = rdnval def run(self): dm = DirectoryManager(self.inst) conn = dm.bind() idx = 0 log.info('DelUsers - Deleting ' + str(NUM_USERS) + ' entries (' + self.rdnval + ')...') while idx < NUM_USERS: USER_DN = 'uid=' + self.rdnval + str(idx) + ',' + DEFAULT_SUFFIX try: conn.delete_s(USER_DN) except ldap.LDAPError as e: if e == ldap.UNAVAILABLE or e == ldap.SERVER_DOWN: log.fatal('DeleteUsers: failed to delete (' + USER_DN + ') error: ' + e.message['desc']) assert False idx += 1 conn.close() log.info('DelUsers - Finished deleting ' + str(NUM_USERS) + ' entries (' + self.rdnval + ').') class AddUsers(threading.Thread): def __init__(self, inst, rdnval, addToGroup): threading.Thread.__init__(self) self.daemon = True self.inst = inst self.addToGroup = addToGroup self.rdnval = rdnval def run(self): # Start adding users dm = DirectoryManager(self.inst) conn = dm.bind() idx = 0 if self.addToGroup: try: conn.add_s(Entry((GROUP_DN, {'objectclass': b'top groupOfNames groupOfUniqueNames'.split(), 'cn': 'stress-group'}))) except ldap.LDAPError as e: if e == ldap.UNAVAILABLE or e == ldap.SERVER_DOWN: log.fatal('AddUsers: failed to add group (' + GROUP_DN + ') error: ' + e.message['desc']) assert False log.info('AddUsers - Adding ' + str(NUM_USERS) + ' entries (' + self.rdnval + ')...') while idx < NUM_USERS: USER_DN = 'uid=' + self.rdnval + str(idx) + ',' + DEFAULT_SUFFIX try: conn.add_s(Entry((USER_DN, {'objectclass': b'top nsOrgPerson'.split(), 'uid': ensure_bytes('user' + str(idx))}))) except ldap.LDAPError as e: if e == ldap.UNAVAILABLE or e == ldap.SERVER_DOWN: log.fatal('AddUsers: failed to add (' + USER_DN + ') error: ' + e.message['desc']) assert False if self.addToGroup: # Add the user to the group try: conn.modify_s(GROUP_DN, [(ldap.MOD_ADD, 'uniquemember', ensure_bytes(USER_DN))]) except ldap.LDAPError as e: if e == ldap.UNAVAILABLE or e == ldap.SERVER_DOWN: log.fatal('AddUsers: Failed to add user' + USER_DN + ' to group: error ' + e.message['desc']) assert False idx += 1 conn.close() log.info('AddUsers - Finished adding ' + str(NUM_USERS) + ' entries (' + self.rdnval + ').') 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/entryuuid/000077500000000000000000000000001421664411400244005ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/entryuuid/__init__.py000066400000000000000000000000611421664411400265060ustar00rootroot00000000000000""" :Requirement: 389-ds-base: Entry uuid """ 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/entryuuid/basic_test.py000066400000000000000000000240351421664411400270760ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2020 William Brown # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- import ldap import pytest import time import shutil import uuid from lib389.idm.user import nsUserAccounts, UserAccounts from lib389.idm.account import Accounts from lib389.idm.domain import Domain from lib389.topologies import topology_st as topology from lib389.backend import Backends from lib389.paths import Paths from lib389.utils import ds_is_older from lib389._constants import * from lib389.plugins import EntryUUIDPlugin default_paths = Paths() pytestmark = pytest.mark.tier1 DATADIR1 = os.path.join(os.path.dirname(__file__), '../../data/entryuuid/') IMPORT_UUID_A = "973e1bbf-ba9c-45d4-b01b-ff7371fd9008" UUID_BETWEEN = "eeeeeeee-0000-0000-0000-000000000000" IMPORT_UUID_B = "f6df8fe9-6b30-46aa-aa13-f0bf755371e8" UUID_MIN = "00000000-0000-0000-0000-000000000000" UUID_MAX = "ffffffff-ffff-ffff-ffff-ffffffffffff" def _entryuuid_import_and_search(topology): # 1 ldif_dir = topology.standalone.get_ldif_dir() target_ldif = os.path.join(ldif_dir, 'localhost-userRoot-2020_03_30_13_14_47.ldif') import_ldif = os.path.join(DATADIR1, 'localhost-userRoot-2020_03_30_13_14_47.ldif') shutil.copyfile(import_ldif, target_ldif) os.chmod(target_ldif, 0o777) be = Backends(topology.standalone).get('userRoot') task = be.import_ldif([target_ldif]) task.wait() assert(task.is_complete() and task.get_exit_code() == 0) accounts = Accounts(topology.standalone, DEFAULT_SUFFIX) # 2 - positive eq test r2 = accounts.filter("(entryUUID=%s)" % IMPORT_UUID_A) assert(len(r2) == 1) r3 = accounts.filter("(entryuuid=%s)" % IMPORT_UUID_B) assert(len(r3) == 1) # 3 - negative eq test r4 = accounts.filter("(entryuuid=%s)" % UUID_MAX) assert(len(r4) == 0) # 4 - le search r5 = accounts.filter("(entryuuid<=%s)" % UUID_BETWEEN) assert(len(r5) == 1) # 5 - ge search r6 = accounts.filter("(entryuuid>=%s)" % UUID_BETWEEN) assert(len(r6) == 1) # 6 - le 0 search r7 = accounts.filter("(entryuuid<=%s)" % UUID_MIN) assert(len(r7) == 0) # 7 - ge f search r8 = accounts.filter("(entryuuid>=%s)" % UUID_MAX) assert(len(r8) == 0) # 8 - export db task = be.export_ldif() task.wait() assert(task.is_complete() and task.get_exit_code() == 0) @pytest.mark.skipif(not default_paths.rust_enabled or ds_is_older('1.4.2.0'), reason="Entryuuid is not available in older versions") def test_entryuuid_indexed_import_and_search(topology): """ Test that an ldif of entries containing entryUUID's can be indexed and searched correctly. As https://tools.ietf.org/html/rfc4530 states, the MR's are equality and ordering, so we check these are correct. :id: c98ee6dc-a7ee-4bd4-974d-597ea966dad9 :setup: Standalone instance :steps: 1. Import the db from the ldif 2. EQ search for an entryuuid (match) 3. EQ search for an entryuuid that does not exist 4. LE search for an entryuuid lower (1 res) 5. GE search for an entryuuid greater (1 res) 6. LE for the 0 uuid (0 res) 7. GE for the f uuid (0 res) 8. export the db to ldif :expectedresults: 1. Success 2. 1 match 3. 0 match 4. 1 match 5. 1 match 6. 0 match 7. 0 match 8. success """ # Assert that the index correctly exists. be = Backends(topology.standalone).get('userRoot') indexes = be.get_indexes() indexes.ensure_state(properties={ 'cn': 'entryUUID', 'nsSystemIndex': 'false', 'nsIndexType': ['eq', 'pres'], }) _entryuuid_import_and_search(topology) @pytest.mark.skipif(not default_paths.rust_enabled or ds_is_older('1.4.2.0'), reason="Entryuuid is not available in older versions") def test_entryuuid_unindexed_import_and_search(topology): """ Test that an ldif of entries containing entryUUID's can be UNindexed searched correctly. As https://tools.ietf.org/html/rfc4530 states, the MR's are equality and ordering, so we check these are correct. :id: b652b54d-f009-464b-b5bd-299a33f97243 :setup: Standalone instance :steps: 1. Import the db from the ldif 2. EQ search for an entryuuid (match) 3. EQ search for an entryuuid that does not exist 4. LE search for an entryuuid lower (1 res) 5. GE search for an entryuuid greater (1 res) 6. LE for the 0 uuid (0 res) 7. GE for the f uuid (0 res) 8. export the db to ldif :expectedresults: 1. Success 2. 1 match 3. 0 match 4. 1 match 5. 1 match 6. 0 match 7. 0 match 8. success """ # Assert that the index does NOT exist for this test. be = Backends(topology.standalone).get('userRoot') indexes = be.get_indexes() try: idx = indexes.get('entryUUID') idx.delete() except ldap.NO_SUCH_OBJECT: # It's already not present, move along, nothing to see here. pass _entryuuid_import_and_search(topology) # Test entryUUID generation @pytest.mark.skipif(not default_paths.rust_enabled or ds_is_older('1.4.2.0'), reason="Entryuuid is not available in older versions") def test_entryuuid_generation_on_add(topology): """ Test that when an entry is added, the entryuuid is added. :id: a7439b0a-dcee-4cd6-b8ef-771476c0b4f6 :setup: Standalone instance :steps: 1. Create a new entry in the db 2. Check it has an entry uuid :expectedresults: 1. Success 2. An entry uuid is present """ # Step one - create a user! account = nsUserAccounts(topology.standalone, DEFAULT_SUFFIX).create_test_user() # Step two - does it have an entryuuid? euuid = account.get_attr_val_utf8('entryUUID') print(euuid) assert(euuid is not None) # Test fixup task @pytest.mark.skipif(not default_paths.rust_enabled or ds_is_older('1.4.2.0'), reason="Entryuuid is not available in older versions") def test_entryuuid_fixup_task(topology): """Test that when an entries without UUID's can have one generated via the fixup process. :id: ad42bba2-ffb2-4c22-a37d-cbe7bcf73d6b :setup: Standalone instance :steps: 1. Disable the entryuuid plugin 2. Create an entry 3. Enable the entryuuid plugin 4. Run the fixup 5. Assert the entryuuid now exists 6. Restart and check they persist :expectedresults: 1. Success 2. Success 3. Success 4. Success 5. Suddenly EntryUUID! 6. Still has EntryUUID! """ # 1. Disable the plugin plug = EntryUUIDPlugin(topology.standalone) plug.disable() topology.standalone.restart() # 2. create the account account = nsUserAccounts(topology.standalone, DEFAULT_SUFFIX).create_test_user(uid=2000) euuid = account.get_attr_val_utf8('entryUUID') assert(euuid is None) # 3. enable the plugin plug.enable() topology.standalone.restart() # 4. run the fix up # For now set the log level to high! topology.standalone.config.loglevel(vals=(ErrorLog.DEFAULT,ErrorLog.PLUGIN)) task = plug.fixup(DEFAULT_SUFFIX) task.wait() assert(task.is_complete() and task.get_exit_code() == 0) topology.standalone.config.loglevel(vals=(ErrorLog.DEFAULT,)) # 5.1 Assert the uuid on the user. euuid_user = account.get_attr_val_utf8('entryUUID') assert(euuid_user is not None) # 5.2 Assert it on the domain entry. domain = Domain(topology.standalone, dn=DEFAULT_SUFFIX) euuid_domain = domain.get_attr_val_utf8('entryUUID') assert(euuid_domain is not None) # Assert it persists after a restart. topology.standalone.restart() # 6.1 Assert the uuid on the use. euuid_user_2 = account.get_attr_val_utf8('entryUUID') assert(euuid_user_2 == euuid_user) # 6.2 Assert it on the domain entry. euuid_domain_2 = domain.get_attr_val_utf8('entryUUID') assert(euuid_domain_2 == euuid_domain) @pytest.mark.skipif(not default_paths.rust_enabled or ds_is_older('1.4.2.0'), reason="Entryuuid is not available in older versions") def test_entryuuid_import_and_fixup_of_invalid_values(topology): """ Test that when we import a database with an invalid entryuuid that it is accepted *and* that subsequently we can fix the invalid entryuuid during a fixup. :id: ec8ef3a7-3cd2-4cbd-b6f1-2449fa17be75 :setup: Standalone instance :steps: 1. Import the db from the ldif 2. Check the entryuuid is invalid 3. Run the fixup 4. Check the entryuuid is now valid (regenerated) :expectedresults: 1. Success 2. The entryuuid is invalid 3. Success 4. The entryuuid is valid """ # 1. Import the db ldif_dir = topology.standalone.get_ldif_dir() target_ldif = os.path.join(ldif_dir, 'localhost-userRoot-invalid.ldif') import_ldif = os.path.join(DATADIR1, 'localhost-userRoot-invalid.ldif') shutil.copyfile(import_ldif, target_ldif) os.chmod(target_ldif, 0o777) be = Backends(topology.standalone).get('userRoot') task = be.import_ldif([target_ldif]) task.wait() assert(task.is_complete() and task.get_exit_code() == 0) # 2. Check the entryuuid is invalid account = nsUserAccounts(topology.standalone, DEFAULT_SUFFIX).get("demo_user") euuid = account.get_attr_val_utf8('entryUUID') assert(euuid == "INVALID_UUID") # 3. Run the fixup topology.standalone.config.loglevel(vals=(ErrorLog.DEFAULT,ErrorLog.PLUGIN)) plug = EntryUUIDPlugin(topology.standalone) task = plug.fixup(DEFAULT_SUFFIX) task.wait() assert(task.is_complete() and task.get_exit_code() == 0) topology.standalone.config.loglevel(vals=(ErrorLog.DEFAULT,)) # 4. Check the entryuuid is valid euuid = account.get_attr_val_utf8('entryUUID') print(f"â„ï¸ account entryUUID -> {euuid}"); assert(euuid != "INVALID_UUID") # Raises an error if invalid uuid.UUID(euuid) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/entryuuid/replicated_test.py000066400000000000000000000043511421664411400301300ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2020 William Brown # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- import ldap import pytest import logging from lib389.topologies import topology_m2 as topo_m2 from lib389.idm.user import nsUserAccounts from lib389.paths import Paths from lib389.utils import ds_is_older from lib389._constants import * from lib389.replica import ReplicationManager default_paths = Paths() pytestmark = pytest.mark.tier1 @pytest.mark.skipif(not default_paths.rust_enabled or ds_is_older('1.4.2.0'), reason="Entryuuid is not available in older versions") def test_entryuuid_with_replication(topo_m2): """ Check that entryuuid works with replication :id: a5f15bf9-7f63-473a-840c-b9037b787024 :setup: two node mmr :steps: 1. Create an entry on one server 2. Wait for replication 3. Assert it is on the second :expectedresults: 1. Success 1. Success 1. Success """ server_a = topo_m2.ms["supplier1"] server_b = topo_m2.ms["supplier2"] server_a.config.loglevel(vals=(ErrorLog.DEFAULT,ErrorLog.TRACE)) server_b.config.loglevel(vals=(ErrorLog.DEFAULT,ErrorLog.TRACE)) repl = ReplicationManager(DEFAULT_SUFFIX) account_a = nsUserAccounts(server_a, DEFAULT_SUFFIX).create_test_user(uid=2000) euuid_a = account_a.get_attr_vals_utf8('entryUUID') print("🧩 %s" % euuid_a) assert(euuid_a is not None) assert(len(euuid_a) == 1) repl.wait_for_replication(server_a, server_b) account_b = nsUserAccounts(server_b, DEFAULT_SUFFIX).get("test_user_2000") euuid_b = account_b.get_attr_vals_utf8('entryUUID') print("🧩 %s" % euuid_b) server_a.config.loglevel(vals=(ErrorLog.DEFAULT,)) server_b.config.loglevel(vals=(ErrorLog.DEFAULT,)) assert(euuid_b is not None) assert(len(euuid_b) == 1) assert(euuid_b == euuid_a) account_b.set("description", "update") repl.wait_for_replication(server_b, server_a) euuid_c = account_a.get_attr_vals_utf8('entryUUID') print("🧩 %s" % euuid_c) assert(euuid_c is not None) assert(len(euuid_c) == 1) assert(euuid_c == euuid_a) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/export/000077500000000000000000000000001421664411400236715ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/export/__init__.py000066400000000000000000000000661421664411400260040ustar00rootroot00000000000000""" :Requirement: 389-ds-base: DataBase Export """ 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/export/export_test.py000066400000000000000000000140031421664411400266210ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2020 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- import os import pytest import subprocess from lib389.topologies import topology_st as topo from lib389._constants import DEFAULT_SUFFIX, DEFAULT_BENAME from lib389.utils import * from lib389.paths import Paths from lib389.cli_base import FakeArgs from lib389.cli_ctl.dbtasks import dbtasks_db2ldif pytestmark = pytest.mark.tier1 def run_db2ldif_and_clear_logs(topology, instance, backend, ldif, output_msg, encrypt=False, repl=False): args = FakeArgs() args.instance = instance.serverid args.backend = backend args.encrypted = encrypt args.replication = repl args.ldif = ldif dbtasks_db2ldif(instance, topology.logcap.log, args) log.info('checking output msg') if not topology.logcap.contains(output_msg): log.error('The output message is not the expected one') assert False log.info('Clear the log') topology.logcap.flush() @pytest.mark.bz1806978 @pytest.mark.ds51188 @pytest.mark.skipif(ds_is_older("1.3.10", "1.4.2"), reason="Not implemented") def test_dbtasks_db2ldif_with_non_accessible_ldif_file_path(topo): """Export with dsctl db2ldif, giving a ldif file path which can't be accessed by the user (dirsrv by default) :id: ca91eda7-27b1-4750-a013-531a63d3f5b0 :setup: Standalone Instance - entries imported in the db :steps: 1. Stop the server 2. Launch db2ldif with an non accessible ldif file path 3. Catch the reported error code 4. check the error reported in the errors log :expected results: 1. Operation successful 2. Operation properly fails, without crashing 3. An error code different from 139 (segmentation fault) should be reported 4. 'ERR - bdb_db2ldif - db2ldif: userRoot: can't open file' should be reported """ export_ldif = '/tmp/nonexistent/export.ldif' log.info("Stopping the instance...") topo.standalone.stop() log.info("Performing an offline export to a non accessible ldif file path - should fail properly") expected_output="db2ldif failed" run_db2ldif_and_clear_logs(topo, topo.standalone, DEFAULT_BENAME, export_ldif, expected_output) log.info("parsing the errors log to search for the error reported") if ds_is_newer("1.3.10"): search_str = str(topo.standalone.ds_error_log.match(r".*ERR - bdb_db2ldif - db2ldif: userRoot: can't open*"))[1:-1] else: search_str = str(topo.standalone.ds_error_log.match(r".*ERR - ldbm_back_ldbm2ldif - db2ldif: can't open*"))[1:-1] assert len(search_str) > 0 log.info("error string : %s" % search_str) log.info("Restarting the instance...") topo.standalone.start() @pytest.mark.bz1806978 @pytest.mark.ds51188 @pytest.mark.skipif(ds_is_older("1.4.3.8"), reason="bz1806978 not fixed") def test_db2ldif_cli_with_non_accessible_ldif_file_path(topo): """Export with ns-slapd db2ldif, giving a ldif file path which can't be accessed by the user (dirsrv by default) :id: ca91eda7-27b1-4750-a013-531a63d3f5b0 :setup: Standalone Instance - entries imported in the db :steps: 1. Stop the server 2. Launch db2ldif with an non accessible ldif file path 3. Catch the reported error code 4. check the error reported in the errors log :expected results: 1. Operation successful 2. Operation properly fails, without crashing 3. An error code different from 139 (segmentation fault) should be reported 4. 'ERR - bdb_db2ldif - db2ldif: userRoot: can't open file' should be reported """ export_ldif = '/tmp/nonexistent/export.ldif' db2ldif_cmd = os.path.join(topo.standalone.ds_paths.sbin_dir, 'dsctl') log.info("Stopping the instance...") topo.standalone.stop() log.info("Performing an offline export to a non accessible ldif file path - should fail properly") try: subprocess.check_call([db2ldif_cmd, topo.standalone.serverid, 'db2ldif', 'userroot', export_ldif]) except subprocess.CalledProcessError as e: if format(e.returncode) == '139': log.error('db2ldif had a Segmentation fault (core dumped)') assert False else: log.info('db2ldif failed properly: error ({})'.format(e.returncode)) assert True log.info("parsing the errors log to search for the error reported") search_str = str(topo.standalone.ds_error_log.match(r".*ERR - bdb_db2ldif - db2ldif: userRoot: can't open*"))[1:-1] assert len(search_str) > 0 log.info("error string : %s" % search_str) log.info("Restarting the instance...") topo.standalone.start() @pytest.mark.bz1860291 @pytest.mark.xfail(reason="bug 1860291") @pytest.mark.skipif(ds_is_older("1.3.10", "1.4.2"), reason="Not implemented") def test_dbtasks_db2ldif_with_non_accessible_ldif_file_path_output(topo): """Export with db2ldif, giving a ldif file path which can't be accessed by the user (dirsrv by default) :id: fcc63387-e650-40a7-b643-baa68c190037 :setup: Standalone Instance - entries imported in the db :steps: 1. Stop the server 2. Launch db2ldif with a non accessible ldif file path 3. check the error reported in the command output :expected results: 1. Operation successful 2. Operation properly fails 3. An clear error message is reported as output of the cli """ export_ldif = '/tmp/nonexistent/export.ldif' log.info("Stopping the instance...") topo.standalone.stop() log.info("Performing an offline export to a non accessible ldif file path - should fail and output a clear error message") expected_output="No such file or directory" run_db2ldif_and_clear_logs(topo, topo.standalone, DEFAULT_BENAME, export_ldif, expected_output) # This test will possibly have to be updated with the error message reported after bz1860291 fix log.info("Restarting the instance...") topo.standalone.start() 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/filter/000077500000000000000000000000001421664411400236355ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/filter/__init__.py000066400000000000000000000000631421664411400257450ustar00rootroot00000000000000""" :Requirement: 389-ds-base: LDAP Filters """ 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/filter/basic_filter_test.py000066400000000000000000000025371421664411400277030ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2019 RED Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK ---- import pytest, os from lib389._constants import DEFAULT_SUFFIX, PW_DM from lib389.topologies import topology_st as topo from lib389.idm.user import UserAccount, UserAccounts from lib389.idm.account import Accounts pytestmark = pytest.mark.tier0 def test_search_attr(topo): """Test filter can search attributes :id: 9a1b0a4b-111c-4105-866d-4288f143ee07 :setup: Standalone instance :steps: 1. Add test entry 2. make search :expectedresults: 1. Entry should be added 2. Operation should succeed """ user = UserAccounts(topo.standalone, DEFAULT_SUFFIX) for i in range(1, 5): user1 = user.create_test_user(uid=i) user1.set("mail", "AnujBorah{}@ok.com".format(i)) # Testing filter is working for any king of attr user = Accounts(topo.standalone, DEFAULT_SUFFIX) assert len(user.filter('(mail=*)')) == 4 assert len(user.filter('(uid=*)')) == 5 # Testing filter is working for other filters assert len(user.filter("(objectclass=inetOrgPerson)")) == 4 if __name__ == "__main__": CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s -v %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/filter/bitw_filter_test.py000066400000000000000000000365501421664411400275710ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2019 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- """ This script will test different type of Filers. """ import os import ldap import pytest from lib389.topologies import topology_st as topo from lib389._constants import PW_DM from lib389.idm.user import UserAccounts from lib389.idm.account import Accounts from lib389.plugins import BitwisePlugin from lib389.schema import Schema from lib389.backend import Backends from lib389.idm.domain import Domain pytestmark = pytest.mark.tier1 FILTER_TESTPERSON = "objectclass=testperson" FILTER_TESTERPERSON = "objectclass=testerperson" FILTER_CONTROL = f"(& ({FILTER_TESTPERSON}) (testUserAccountControl:1.2.840.113556.1.4.803:=514))" SUFFIX = 'dc=anuj,dc=com' class CreateUsers(): """ Will create users with different testUserAccountControl, testUserStatus """ def __init__(self, *args): self.args = args def user_create(self): """ Will create users with different testUserAccountControl, testUserStatus """ self.args[0].create(properties={ 'sn': self.args[1], 'uid': self.args[1], 'cn': self.args[1], 'userpassword': PW_DM, 'givenName': 'bit', 'mail': '{}@redhat.com'.format(self.args[1]), 'objectclass': 'top account posixaccount organizationalPerson ' 'inetOrgPerson testperson'.split(), 'testUserAccountControl': [i for i in self.args[2]], 'testUserStatus': [i for i in self.args[3]], 'uidNumber': str(self.args[4]), 'gidNumber': str(self.args[4]), 'homeDirectory': self.args[1] }) def create_users_other(self): """ Will create users with different testUserAccountControl(8388608) """ self.args[0].create(properties={ 'telephoneNumber': '98989819{}'.format(self.args[1]), 'uid': 'anuj_{}'.format(self.args[1]), 'sn': 'testwise_{}'.format(self.args[1]), 'cn': 'bit testwise{}'.format(self.args[1]), 'userpassword': PW_DM, 'givenName': 'anuj_{}'.format(self.args[1]), 'mail': 'anuj_{}@example.com'.format(self.args[1]), 'objectclass': 'top account posixaccount organizationalPerson ' 'inetOrgPerson testperson'.split(), 'testUserAccountControl': '8388608', 'testUserStatus': 'PasswordExpired', 'uidNumber': str(self.args[1]), 'gidNumber': str(self.args[1]), 'homeDirectory': '/home/' + 'testwise_{}'.format(self.args[1]) }) def user_create_52(self): """ Will create users with different testUserAccountControl(16777216) """ self.args[0].create(properties={ 'telephoneNumber': '98989819{}'.format(self.args[1]), 'uid': 'bditwfilter52_test{}'.format(self.args[1]), 'sn': 'bditwfilter52_test{}'.format(self.args[1]), 'cn': 'bit bditwfilter52_test{}'.format(self.args[1]), 'userpassword': PW_DM, 'givenName': 'bditwfilter52_test{}'.format(self.args[1]), 'mail': 'bditwfilter52_test{}@example.com'.format(self.args[1]), 'objectclass': 'top account posixaccount organizationalPerson ' 'inetOrgPerson testperson'.split(), 'testUserAccountControl': '16777216', 'testUserStatus': 'PasswordExpired', 'uidNumber': str(self.args[1]), 'gidNumber': str(self.args[1]), 'homeDirectory': '/home/' + 'bditwfilter52_test{}'.format(self.args[1]) }) @pytest.fixture(scope="module") def _create_schema(request, topo): Schema(topo.standalone).\ add('attributetypes', ["( NAME 'testUserAccountControl' DESC 'Attribute Bitwise filteri-Multi-Valued'" "SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 )", "( NAME 'testUserStatus' DESC 'State of User account active/disabled'" "SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 )"]) Schema(topo.standalone).\ add('objectClasses', "( NAME 'testperson' SUP top STRUCTURAL MUST " "( sn $ cn $ testUserAccountControl $ " "testUserStatus )MAY( userPassword $ telephoneNumber $ " "seeAlso $ description ) X-ORIGIN 'BitWise' )") # Creating Backend backends = Backends(topo.standalone) backend = backends.create(properties={'nsslapd-suffix': SUFFIX, 'cn': 'AnujRoot'}) # Creating suffix suffix = Domain(topo.standalone, SUFFIX).create(properties={'dc': 'anuj'}) # Creating users users = UserAccounts(topo.standalone, suffix.dn, rdn=None) for user in [('btestuser1', ['514'], ['Disabled'], 100), ('btestuser2', ['65536'], ['PasswordNeverExpired'], 101), ('btestuser3', ['8388608'], ['PasswordExpired'], 102), ('btestuser4', ['256'], ['TempDuplicateAccount'], 103), ('btestuser5', ['16777216'], ['TrustedAuthDelegation'], 104), ('btestuser6', ['528'], ['AccountLocked'], 105), ('btestuser7', ['513'], ['AccountActive'], 106), ('btestuser11', ['655236'], ['TestStatus1'], 107), ('btestuser12', ['665522'], ['TestStatus2'], 108), ('btestuser13', ['266552'], ['TestStatus3'], 109), ('btestuser8', ['98536', '99512', '99528'], ['AccountActive', 'PasswordExxpired', 'AccountLocked'], 110), ('btestuser9', ['87536', '912', ], ['AccountActive', 'PasswordNeverExpired', ], 111), ('btestuser10', ['89536', '97546', '96579'], ['TestVerify1', 'TestVerify2', 'TestVerify3'], 112)]: CreateUsers(users, user[0], user[1], user[2], user[3]).user_create() def fin(): """ Deletes entries after the test. """ for user in users.list(): user.delete() suffix.delete() backend.delete() request.addfinalizer(fin) def increasesizelimit(topo, size): """ Will change nsslapd-sizelimit to desire value """ topo.standalone.config.set('nsslapd-sizelimit', str(size)) def test_bitwise_plugin_status(topo, _create_schema): """Checking bitwise plugin enabled or not, by default it should be enabled. If disabled, this test case would enable the plugin :id: 3ade097e-9ebd-11e8-b2e7-8c16451d917b :setup: Standalone :steps: 1. Create Filter rules. 2. Try to pass filter rules as per the condition . :expectedresults: 1. It should pass 2. It should pass """ # Assert plugin BitwisePlugin is on assert BitwisePlugin(topo.standalone).status() def test_search_disabled_accounts(topo, _create_schema): """Searching for integer Disabled Accounts. Bitwise AND operator should match each integer, so it should return one entry. :id: 467ef0ea-9ebd-11e8-a37f-8c16451d917b :setup: Standalone :steps: 1. Create Filter rules. 2. Try to pass filter rules as per the condition . :expectedresults: 1. It should pass 2. It should pass """ assert len(Accounts(topo.standalone, SUFFIX).filter(FILTER_CONTROL)) == 2 def test_plugin_can_be_disabled(topo, _create_schema): """Verify whether plugin can be disabled :id: 4ed21588-9ebd-11e8-b862-8c16451d917b :setup: Standalone :steps: 1. Create Filter rules. 2. Try to pass filter rules as per the condition . :expectedresults: 1. It should pass 2. It should pass """ bitwise = BitwisePlugin(topo.standalone) assert bitwise.status() # make BitwisePlugin off bitwise.disable() topo.standalone.restart() assert not bitwise.status() def test_plugin_is_disabled(topo, _create_schema): """Testing Bitwise search when plugin is disabled Bitwise search filter should give proper error message :id: 54bebbfe-9ebd-11e8-8ca4-8c16451d917b :setup: Standalone :steps: 1. Create Filter rules. 2. Try to pass filter rules as per the condition . :expectedresults: 1. It should pass 2. It should pass """ with pytest.raises(ldap.UNAVAILABLE_CRITICAL_EXTENSION): Accounts(topo.standalone, SUFFIX).filter(FILTER_CONTROL) def test_enabling_works_fine(topo, _create_schema): """Enabling the plugin to make sure re-enabling works fine :id: 5a2fc2b8-9ebd-11e8-8e18-8c16451d917b :setup: Standalone :steps: 1. Create Filter rules. 2. Try to pass filter rules as per the condition . :expectedresults: 1. It should pass 2. It should pass """ # make BitwisePlugin off bitwise = BitwisePlugin(topo.standalone) bitwise.disable() # make BitwisePlugin on again bitwise.enable() topo.standalone.restart() assert bitwise.status() assert len(Accounts(topo.standalone, SUFFIX).filter(FILTER_CONTROL)) == 2 @pytest.mark.parametrize("filter_name, value", [ (f"(& ({FILTER_TESTPERSON}) (testUserAccountControl:1.2.840.113556.1.4.803:=513))", 1), (f"(& ({FILTER_TESTPERSON}) (testUserAccountControl:1.2.840.113556.1.4.803:=16777216))", 1), (f"(& ({FILTER_TESTPERSON}) (testUserAccountControl:1.2.840.113556.1.4.803:=8388608))", 1), (f"(& ({FILTER_TESTPERSON}) (testUserAccountControl:1.2.840.113556.1.4.804:=5))", 3), (f"(& ({FILTER_TESTPERSON}) (testUserAccountControl:1.2.840.113556.1.4.804:=8))", 3), (f"(& ({FILTER_TESTPERSON}) (testUserAccountControl:1.2.840.113556.1.4.804:=7))", 5), (f"(& ({FILTER_TESTERPERSON}) (testUserAccountControl:1.2.840.113556.1.4.804:=7))", 0), (f"(& ({FILTER_TESTPERSON}) (&(testUserAccountControl:1.2.840.113556.1.4.803:=98536)" "(testUserAccountControl:1.2.840.113556.1.4.803:=912)))", 0), (f"(& ({FILTER_TESTPERSON}) (&(testUserAccountControl:1.2.840.113556.1.4.804:=87)" "(testUserAccountControl:1.2.840.113556.1.4.804:=91)))", 8), (f"(& ({FILTER_TESTPERSON}) (&(testUserAccountControl:1.2.840.113556.1.4.803:=89536)" "(testUserAccountControl:1.2.840.113556.1.4.804:=79)))", 1), (f"(& ({FILTER_TESTPERSON}) (|(testUserAccountControl:1.2.840.113556.1.4.803:=89536)" "(testUserAccountControl:1.2.840.113556.1.4.804:=79)))", 8), (f"(& ({FILTER_TESTPERSON}) (|(testUserAccountControl:1.2.840.113556.1.4.803:=89)" "(testUserAccountControl:1.2.840.113556.1.4.803:=536)))", 0), (f"(& ({FILTER_TESTPERSON}) (testUserAccountControl:1.2.840.113556.1.4.803:=x))", 13), (f"(& ({FILTER_TESTPERSON}) (testUserAccountControl:1.2.840.113556.1.4.803:=&\\*#$%))", 13), (f"(& ({FILTER_TESTPERSON}) (testUserAccountControl:1.2.840.113556.1.4.803:=-65536))", 0), (f"(& ({FILTER_TESTPERSON}) (testUserAccountControl:1.2.840.113556.1.4.803:=-1))", 0), (f"(& ({FILTER_TESTPERSON}) (testUserAccountControl:1.2.840.113556.1.4.803:=-))", 13), (f"(& ({FILTER_TESTPERSON}) (testUserAccountControl:1.2.840.113556.1.4.803:=))", 13), (f"(& ({FILTER_TESTPERSON}) (testUserAccountControl:1.2.840.113556.1.4.803:=\\*))", 13), (f"(& ({FILTER_TESTPERSON}) (testUserAccountControl:1.2.840.113556.1.4.804:=\\*))", 0), (f"(& ({FILTER_TESTPERSON}) (testUserAccountControl:1.2.840.113556.1.4.803:=6552))", 0), (f"(& ({FILTER_TESTPERSON}\\))(testUserAccountControl:1.2.840.113556.1.4.804:=6552))", 0), (f"(& ({FILTER_TESTPERSON}) (testUserAccountControl:1.2.840.113556.1.4.803:=65536))", 5) ]) def test_all_together(topo, _create_schema, filter_name, value): """Target_set_with_ldap_instead_of_ldap :id: ba7f5106-9ebd-11e8-9ad6-8c16451d917b :parametrized: yes :setup: Standalone :steps: 1. Create Filter rules. 2. Try to pass filter rules as per the condition . :expectedresults: 1. It should pass 2. It should pass """ assert len(Accounts(topo.standalone, SUFFIX).filter(filter_name)) == value def test_5_entries(topo, _create_schema): """Bitwise filter test for 5 entries By default the size limit is 2000 Inorder to perform stress tests, we need to icrease the nsslapd-sizelimit. IncrSizeLimit 52000 :id: e939aa64-9ebd-11e8-815e-8c16451d917b :setup: Standalone :steps: 1. Create Filter rules. 2. Try to pass filter rules as per the condition . :expectedresults: 1. It should pass 2. It should pass """ filter51 = f"(& ({FILTER_TESTPERSON}) (testUserAccountControl:1.2.840.113556.1.4.803:=8388608))" increasesizelimit(topo, 52000) users = UserAccounts(topo.standalone, SUFFIX, rdn=None) for i in range(5): CreateUsers(users, i).create_users_other() assert len(Accounts(topo.standalone, SUFFIX).filter(filter51)) == 6 increasesizelimit(topo, 2000) def test_5_entries1(topo, _create_schema): """Bitwise filter for 5 entries By default the size limit is 2000 Inorder to perform stress tests, we need to icrease the nsslapd-sizelimit. IncrSizeLimit 52000 :id: ef8b050c-9ebd-11e8-979d-8c16451d917b :setup: Standalone :steps: 1. Create Filter rules. 2. Try to pass filter rules as per the condition . :expectedresults: 1. It should pass 2. It should pass """ filter52 = f"(& ({FILTER_TESTPERSON})(testUserAccountControl:1.2.840.113556.1.4.804:=16777216))" increasesizelimit(topo, 52000) users = UserAccounts(topo.standalone, SUFFIX, rdn=None) for i in range(5): CreateUsers(users, i).user_create_52() assert len(Accounts(topo.standalone, SUFFIX).filter(filter52)) == 6 increasesizelimit(topo, 2000) def test_5_entries3(topo, _create_schema): """Bitwise filter test for entries By default the size limit is 2000 Inorder to perform stress tests, we need to icrease the nsslapd-sizelimit. IncrSizeLimit 52000 :id: f5b06648-9ebd-11e8-b08f-8c16451d917b :setup: Standalone :steps: 1. Create Filter rules. 2. Try to pass filter rules as per the condition . :expectedresults: 1. It should pass 2. It should pass """ increasesizelimit(topo, 52000) assert len(Accounts(topo.standalone, SUFFIX).filter( "(testUserAccountControl:1.2.840.113556.1.4.803:=8388608, " "['attrlist=cn:sn:uid:testUserAccountControl'])")) == 6 increasesizelimit(topo, 2000) def test_5_entries4(topo, _create_schema): """Bitwise filter for entries By default the size limit is 2000 Inorder to perform stress tests, we need to icrease the nsslapd-sizelimit. IncrSizeLimit 52000 :id: fa5f7a4e-9ebd-11e8-ad54-8c16451d917b :setup: Standalone :steps: 1. Create Filter rules. 2. Try to pass filter rules as per the condition . :expectedresults: 1. It should pass 2. It should pass """ increasesizelimit(topo, 52000) assert len(Accounts(topo.standalone, SUFFIX). filter("(testUserAccountControl:1.2.840.113556.1.4.804:=16777216," "['attrlist=cn:sn:uid:testUserAccountControl'])")) == 6 increasesizelimit(topo, 2000) if __name__ == '__main__': CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s -v %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/filter/complex_filters_test.py000066400000000000000000000132651421664411400304540ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2020 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK ---- import logging import pytest import os import ldap from lib389._constants import * from lib389.topologies import topology_st as topo from lib389.idm.user import UserAccounts pytestmark = pytest.mark.tier1 DEBUGGING = os.getenv("DEBUGGING", default=False) if DEBUGGING: logging.getLogger(__name__).setLevel(logging.DEBUG) else: logging.getLogger(__name__).setLevel(logging.INFO) log = logging.getLogger(__name__) ALL_FILTERS = [] # Parameterized filters to test AND_FILTERS = [("(&(uid=uid1)(sn=last1)(givenname=first1))", 1), ("(&(uid=uid1)(&(sn=last1)(givenname=first1)))", 1), ("(&(uid=uid1)(&(&(sn=last1))(&(givenname=first1))))", 1), ("(&(uid=*)(sn=last3)(givenname=*))", 1), ("(&(uid=*)(&(sn=last3)(givenname=*)))", 1), ("(&(uid=uid5)(&(&(sn=*))(&(givenname=*))))", 1), ("(&(objectclass=*)(uid=*)(sn=last*))", 5), ("(&(objectclass=*)(uid=*)(sn=last1))", 1)] OR_FILTERS = [("(|(uid=uid1)(sn=last1)(givenname=first1))", 1), ("(|(uid=uid1)(|(sn=last1)(givenname=first1)))", 1), ("(|(uid=uid1)(|(|(sn=last1))(|(givenname=first1))))", 1), ("(|(objectclass=*)(sn=last1)(|(givenname=first1)))", 18), ("(|(&(objectclass=*)(sn=last1))(|(givenname=first1)))", 1), ("(|(&(objectclass=*)(sn=last))(|(givenname=first1)))", 1)] NOT_FILTERS = [("(&(uid=uid1)(!(cn=NULL)))", 1), ("(&(!(cn=NULL))(uid=uid1))", 1), ("(&(uid=*)(&(!(uid=1))(!(givenname=first1))))", 5)] MIX_FILTERS = [("(&(|(uid=uid1)(uid=NULL))(sn=last1))", 1), ("(&(|(uid=uid1)(uid=NULL))(!(sn=NULL)))", 1), ("(&(|(uid=uid1)(sn=last2))(givenname=first1))", 1), ("(|(&(uid=uid1)(!(uid=NULL)))(sn=last2))", 2), ("(|(&(uid=uid1)(uid=NULL))(sn=last2))", 1), ("(&(uid=uid5)(sn=*)(cn=*)(givenname=*)(uid=u*)(sn=la*)" + "(cn=full*)(givenname=f*)(uid>=u)(!(givenname=NULL)))", 1), ("(|(&(objectclass=*)(sn=last))(&(givenname=first1)))", 1)] ZERO_AND_FILTERS = [("(&(uid=uid1)(sn=last1)(givenname=NULL))", 0), ("(&(uid=uid1)(&(sn=last1)(givenname=NULL)))", 0), ("(&(uid=uid1)(&(&(sn=last1))(&(givenname=NULL))))", 0), ("(&(uid=uid1)(&(&(sn=last1))(&(givenname=NULL)(sn=*)))(|(sn=NULL)))", 0), ("(&(uid=uid1)(&(&(sn=last*))(&(givenname=first*)))(&(sn=NULL)))", 0)] ZERO_OR_FILTERS = [("(|(uid=NULL)(sn=NULL)(givenname=NULL))", 0), ("(|(uid=NULL)(|(sn=NULL)(givenname=NULL)))", 0), ("(|(uid=NULL)(|(|(sn=NULL))(|(givenname=NULL))))", 0)] RANGE_FILTERS = [("(uid>=uid3)", 3), ("(&(uid=*)(uid>=uid3))", 3), ("(|(uid>=uid3)(uid<=uid5))", 6), ("(&(uid>=uid3)(uid<=uid5))", 3), ("(|(&(uid>=uid3)(uid<=uid5))(uid=*))", 6)] LONG_FILTERS = [("(|(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)" + "(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)" + "(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)" + "(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)" + "(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)" + "(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)" + "(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)" + "(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)" + "(uid=*))", 6)] # Combine all the filters ALL_FILTERS += AND_FILTERS ALL_FILTERS += OR_FILTERS ALL_FILTERS += NOT_FILTERS ALL_FILTERS += MIX_FILTERS ALL_FILTERS += ZERO_AND_FILTERS ALL_FILTERS += ZERO_OR_FILTERS ALL_FILTERS += LONG_FILTERS ALL_FILTERS += RANGE_FILTERS @pytest.fixture(scope="module") def setup(topo, request): """Add teset users """ users = UserAccounts(topo.standalone, DEFAULT_SUFFIX) for i in range(1, 6): users.create(properties={ 'uid': 'uid%s' % i, 'cn': 'full%s' % i, 'sn': 'last%s' % i, 'givenname': 'first%s' % i, 'uidNumber': '%s' % i, 'gidNumber': '%s' % i, 'homeDirectory': '/home/user%s' % i }) @pytest.mark.parametrize("myfilter, expected_results", ALL_FILTERS) def test_filters(topo, setup, myfilter, expected_results): """Test various complex search filters and verify they are returning the expected number of entries :id: ee9ead27-5f63-4aed-844d-c39b99138c8d :parametrized: yes :setup: standalone :steps: 1. Issue search 2. Check the number of returned entries against the expected number :expectedresults: 1. Search succeeds 2. The number of returned entries matches the expected number """ log.info("Testing filter \"{}\"...".format(myfilter)) try: entries = topo.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, myfilter) if len(entries) != expected_results: log.fatal("Search filter \"{}\") returned {} entries, but we expected {}".format( myfilter, len(entries), expected_results)) assert False except ldap.LDAPError as e: log.fatal("Search filter \"{}\") generated ldap error: {}".format(myfilter, str(e))) assert False if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/filter/filter_cert_test.py000066400000000000000000000047221421664411400275550ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2020 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK ---- """ verify and testing Filter from a search """ import os import pytest from lib389._constants import DEFAULT_SUFFIX from lib389.topologies import topology_st as topo from lib389.idm.user import UserAccounts from lib389.idm.account import Accounts from lib389.nss_ssl import NssSsl from lib389.utils import search_filter_escape_bytes pytestmark = pytest.mark.tier1 def test_positive(topo): """Test User certificate field :id: e984ac40-63d1-4176-ad1e-0cbe71391b5f :setup: Standalone :steps: 1. Create entries with userCertificate field. 2. Try to search/filter them with userCertificate field. :expected results: 1. Pass 2. Pass """ # SETUP TLS topo.standalone.stop() NssSsl(topo.standalone).reinit() NssSsl(topo.standalone).create_rsa_ca() NssSsl(topo.standalone).create_rsa_key_and_cert() # Create user NssSsl(topo.standalone).create_rsa_user('testuser1') NssSsl(topo.standalone).create_rsa_user('testuser2') # Creating cert users topo.standalone.start() users_people = UserAccounts(topo.standalone, DEFAULT_SUFFIX) for count in range(1, 3): user = users_people.create_test_user(uid=count, gid=count) tls_locs = NssSsl(topo.standalone).get_rsa_user(f'testuser{count}') # {'ca': ca_path, 'key': key_path, 'crt': crt_path} user.enroll_certificate(tls_locs['crt_der_path']) assert Accounts(topo.standalone, DEFAULT_SUFFIX).filter("(usercertificate=*)") assert Accounts(topo.standalone, DEFAULT_SUFFIX).filter("(userCertificate;binary=*)") user1_cert = users_people.list()[0].get_attr_val("userCertificate;binary") assert Accounts(topo.standalone, DEFAULT_SUFFIX).filter( f'(userCertificate;binary={search_filter_escape_bytes(user1_cert)})')[0].dn == \ 'uid=test_user_1,ou=people,dc=example,dc=com' user2_cert = users_people.list()[1].get_attr_val("userCertificate;binary") assert Accounts(topo.standalone, DEFAULT_SUFFIX).filter( f'(userCertificate;binary={search_filter_escape_bytes(user2_cert)})')[0].dn == \ 'uid=test_user_2,ou=people,dc=example,dc=com' if __name__ == '__main__': CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s -v %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/filter/filter_index_match_test.py000066400000000000000000001315651421664411400311110ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2019 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK ---- """ Test the matching rules feature . """ import os import pytest from lib389._constants import DEFAULT_SUFFIX from lib389.topologies import topology_st from lib389.cos import CosTemplates from lib389.index import Indexes from lib389.schema import Schema import ldap pytestmark = pytest.mark.tier1 TESTED_MATCHING_RULES = ["bitStringMatch", "caseExactIA5Match", "caseExactMatch", "caseExactOrderingMatch", "caseExactSubstringsMatch", "caseExactIA5SubstringsMatch", "generalizedTimeMatch", "generalizedTimeOrderingMatch", "booleanMatch", "caseIgnoreIA5Match", "caseIgnoreIA5SubstringsMatch", "caseIgnoreMatch", "caseIgnoreOrderingMatch", "caseIgnoreSubstringsMatch", "caseIgnoreListMatch", "caseIgnoreListSubstringsMatch", "objectIdentifierMatch", "directoryStringFirstComponentMatch", "objectIdentifierFirstComponentMatch", "distinguishedNameMatch", "integerMatch", "integerOrderingMatch", "integerFirstComponentMatch", "uniqueMemberMatch", "numericStringMatch", "numericStringOrderingMatch", "numericStringSubstringsMatch", "telephoneNumberMatch", "telephoneNumberSubstringsMatch", "octetStringMatch", "octetStringOrderingMatch"] LIST_CN_INDEX = [('attroctetStringMatch', ['pres', 'eq']), ('attrbitStringMatch', ['pres', 'eq']), ('attrcaseExactIA5Match', ['pres', 'eq', 'sub']), ('attrcaseExactMatch', ['pres', 'eq', 'sub']), ('attrgeneralizedTimeMatch', ['pres', 'eq']), ('attrbooleanMatch', ['pres', 'eq']), ('attrcaseIgnoreIA5Match', ['pres', 'eq', 'sub']), ('attrcaseIgnoreMatch', ['pres', 'eq', 'sub']), ('attrcaseIgnoreListMatch', ['pres', 'eq', 'sub']), ('attrobjectIdentifierMatch', ['pres', 'eq']), ('attrdistinguishedNameMatch', ['pres', 'eq']), ('attrintegerMatch', ['pres', 'eq']), ('attruniqueMemberMatch', ['pres', 'eq']), ('attrnumericStringMatch', ['pres', 'eq', 'sub']), ('attrtelephoneNumberMatch', ['pres', 'eq', 'sub']), ('attrdirectoryStringFirstComponentMatch', ['pres', 'eq']), ('attrobjectIdentifierFirstComponentMatch', ['pres', 'eq']), ('attrintegerFirstComponentMatch', ['pres', 'eq'])] LIST_ATTR_INDEX = [ {'attr': 'attrbitStringMatch', 'positive': ["'0010'B", "'0011'B", "'0100'B", "'0101'B", "'0110'B"], 'negative': ["'0001'B", "'0001'B", "'0010'B", "'0010'B", "'0011'B", "'0011'B", "'0100'B", "'0100'B", "'0101'B", "'0101'B", "'0110'B", "'0110'B"]}, {'attr': 'attrcaseExactIA5Match', 'positive': ['sPrain', 'spRain', 'sprAin', 'spraIn', 'sprain'], 'negative': ['Sprain', 'Sprain', 'sPrain', 'sPrain', 'spRain', 'spRain', 'sprAin', 'sprAin', 'spraIn', 'spraIn', 'sprain', 'sprain']}, {'attr': 'attrcaseExactMatch', 'positive': ['ÇéliNé Ändrè', 'Çéliné ÄndrÈ', 'Çéliné Ändrè', 'çÉliné Ändrè'], 'negative': ['ÇélIné Ändrè', 'ÇélIné Ändrè', 'ÇéliNé Ändrè', 'ÇéliNé Ändrè', 'Çéliné ÄndrÈ', 'Çéliné ÄndrÈ', 'Çéliné Ändrè', 'Çéliné Ändrè', 'çÉliné Ändrè', 'çÉliné Ändrè']}, {'attr': 'attrgeneralizedTimeMatch', 'positive': ['20100218171301Z', '20100218171302Z', '20100218171303Z', '20100218171304Z', '20100218171305Z'], 'negative': ['20100218171300Z', '20100218171300Z', '20100218171301Z', '20100218171301Z', '20100218171302Z', '20100218171302Z', '20100218171303Z', '20100218171303Z', '20100218171304Z', '20100218171304Z', '20100218171305Z', '20100218171305Z']}, {'attr': 'attrbooleanMatch', 'positive': ['FALSE'], 'negative': ['TRUE', 'TRUE', 'FALSE', 'FALSE']}, {'attr': 'attrcaseIgnoreIA5Match', 'positive': ['sprain2', 'sprain3', 'sprain4', 'sprain5', 'sprain6'], 'negative': ['sprain1', 'sprain1', 'sprain2', 'sprain2', 'sprain3', 'sprain3', 'sprain4', 'sprain4', 'sprain5', 'sprain5', 'sprain6', 'sprain6']}, {'attr': 'attrcaseIgnoreMatch', 'positive': ['ÇélIné Ändrè2', 'ÇélIné Ändrè3', 'ÇélIné Ändrè4', 'ÇélIné Ändrè5', 'ÇélIné Ändrè6'], 'negative': ['ÇélIné Ändrè1', 'ÇélIné Ändrè1', 'ÇélIné Ändrè2', 'ÇélIné Ändrè2', 'ÇélIné Ändrè3', 'ÇélIné Ändrè3', 'ÇélIné Ändrè4', 'ÇélIné Ändrè4', 'ÇélIné Ändrè5', 'ÇélIné Ändrè5', 'ÇélIné Ändrè6', 'ÇélIné Ändrè6']}, {'attr': 'attrcaseIgnoreListMatch', 'positive': ['foo2$bar', 'foo3$bar', 'foo4$bar', 'foo5$bar', 'foo6$bar'], 'negative': ['foo1$bar', 'foo1$bar', 'foo2$bar', 'foo2$bar', 'foo3$bar', 'foo3$bar', 'foo4$bar', 'foo4$bar', 'foo5$bar', 'foo5$bar', 'foo6$bar', 'foo6$bar']}, {'attr': 'attrobjectIdentifierMatch', 'positive': ['1.3.6.1.4.1.1466.115.121.1.24', '1.3.6.1.4.1.1466.115.121.1.26', '1.3.6.1.4.1.1466.115.121.1.40', '1.3.6.1.4.1.1466.115.121.1.41', '1.3.6.1.4.1.1466.115.121.1.6'], 'negative': ['1.3.6.1.4.1.1466.115.121.1.15', '1.3.6.1.4.1.1466.115.121.1.15', '1.3.6.1.4.1.1466.115.121.1.24', '1.3.6.1.4.1.1466.115.121.1.24', '1.3.6.1.4.1.1466.115.121.1.26', '1.3.6.1.4.1.1466.115.121.1.26', '1.3.6.1.4.1.1466.115.121.1.40', '1.3.6.1.4.1.1466.115.121.1.40', '1.3.6.1.4.1.1466.115.121.1.41', '1.3.6.1.4.1.1466.115.121.1.41', '1.3.6.1.4.1.1466.115.121.1.6', '1.3.6.1.4.1.1466.115.121.1.6']}, {'attr': 'attrdirectoryStringFirstComponentMatch', 'positive': ['ÇélIné Ändrè2', 'ÇélIné Ändrè3', 'ÇélIné Ändrè4', 'ÇélIné Ändrè5', 'ÇélIné Ändrè6'], 'negative': ['ÇélIné Ändrè1', 'ÇélIné Ändrè1', 'ÇélIné Ändrè2', 'ÇélIné Ändrè2', 'ÇélIné Ändrè3', 'ÇélIné Ändrè3', 'ÇélIné Ändrè4', 'ÇélIné Ändrè4', 'ÇélIné Ändrè5', 'ÇélIné Ändrè5', 'ÇélIné Ändrè6', 'ÇélIné Ändrè6']}, {'attr': 'attrobjectIdentifierFirstComponentMatch', 'positive': ['1.3.6.1.4.1.1466.115.121.1.24', '1.3.6.1.4.1.1466.115.121.1.26', '1.3.6.1.4.1.1466.115.121.1.40', '1.3.6.1.4.1.1466.115.121.1.41', '1.3.6.1.4.1.1466.115.121.1.6'], 'negative': ['1.3.6.1.4.1.1466.115.121.1.15', '1.3.6.1.4.1.1466.115.121.1.15', '1.3.6.1.4.1.1466.115.121.1.24', '1.3.6.1.4.1.1466.115.121.1.24', '1.3.6.1.4.1.1466.115.121.1.26', '1.3.6.1.4.1.1466.115.121.1.26', '1.3.6.1.4.1.1466.115.121.1.40', '1.3.6.1.4.1.1466.115.121.1.40', '1.3.6.1.4.1.1466.115.121.1.41', '1.3.6.1.4.1.1466.115.121.1.41', '1.3.6.1.4.1.1466.115.121.1.6', '1.3.6.1.4.1.1466.115.121.1.6']}, {'attr': 'attrdistinguishedNameMatch', 'positive': ['cn=foo2,cn=bar', 'cn=foo3,cn=bar', 'cn=foo4,cn=bar', 'cn=foo5,cn=bar', 'cn=foo6,cn=bar'], 'negative': ['cn=foo1,cn=bar', 'cn=foo1,cn=bar', 'cn=foo2,cn=bar', 'cn=foo2,cn=bar', 'cn=foo3,cn=bar', 'cn=foo3,cn=bar', 'cn=foo4,cn=bar', 'cn=foo4,cn=bar', 'cn=foo5,cn=bar', 'cn=foo5,cn=bar', 'cn=foo6,cn=bar', 'cn=foo6,cn=bar']}, {'attr': 'attrintegerMatch', 'positive': ['-1', '0', '1', '2', '3'], 'negative': ['-2', '-2', '-1', '-1', '0', '0', '1', '1', '2', '2', '3', '3']}, {'attr': 'attrintegerFirstComponentMatch', 'positive': ['-1', '0', '1', '2', '3'], 'negative': ['-2', '-2', '-1', '-1', '0', '0', '1', '1', '2', '2', '3', '3']}, {'attr': 'attruniqueMemberMatch', 'positive': ["cn=foo2,cn=bar#'0010'B", "cn=foo3,cn=bar#'0011'B", "cn=foo4,cn=bar#'0100'B", "cn=foo5,cn=bar#'0101'B", "cn=foo6,cn=bar#'0110'B"], 'negative': ["cn=foo1,cn=bar#'0001'B", "cn=foo1,cn=bar#'0001'B", "cn=foo2,cn=bar#'0010'B", "cn=foo2,cn=bar#'0010'B", "cn=foo3,cn=bar#'0011'B", "cn=foo3,cn=bar#'0011'B", "cn=foo4,cn=bar#'0100'B", "cn=foo4,cn=bar#'0100'B", "cn=foo5,cn=bar#'0101'B", "cn=foo5,cn=bar#'0101'B", "cn=foo6,cn=bar#'0110'B", "cn=foo6,cn=bar#'0110'B"]}, {'attr': 'attrnumericStringMatch', 'positive': ['00002', '00003', '00004', '00005', '00006'], 'negative': ['00001', '00001', '00002', '00002', '00003', '00003', '00004', '00004', '00005', '00005', '00006', '00006']}, {'attr': 'attrtelephoneNumberMatch', 'positive': ['+1 408 555 5625', '+1 408 555 6201', '+1 408 555 8585', '+1 408 555 9187', '+1 408 555 9423'], 'negative': ['+1 408 555 4798', '+1 408 555 4798', '+1 408 555 5625', '+1 408 555 5625', '+1 408 555 6201', '+1 408 555 6201', '+1 408 555 8585', '+1 408 555 8585', '+1 408 555 9187', '+1 408 555 9187', '+1 408 555 9423', '+1 408 555 9423']}, {'attr': 'attroctetStringMatch', 'positive': ['AAAAAAAAAAAAAAI=', 'AAAAAAAAAAAAAAM=', 'AAAAAAAAAAAAAAQ=', 'AAAAAAAAAAAAAAU=', 'AAAAAAAAAAAAAAY='], 'negative': ['AAAAAAAAAAAAAAE=', 'AAAAAAAAAAAAAAE=', 'AAAAAAAAAAAAAAI=', 'AAAAAAAAAAAAAAI=', 'AAAAAAAAAAAAAAM=', 'AAAAAAAAAAAAAAM=', 'AAAAAAAAAAAAAAQ=', 'AAAAAAAAAAAAAAQ=', 'AAAAAAAAAAAAAAU=', 'AAAAAAAAAAAAAAU=', 'AAAAAAAAAAAAAAY=', 'AAAAAAAAAAAAAAY=']}] LIST_MOD_ATTR_ALL = [ {'attr': 'attrcaseExactMatch', 'positive': ['ÇélIné Ändrè'], 'negative': ['ÇélIné Ändrè', 'ÇéliNé Ändrè', 'Çéliné ÄndrÈ', 'Çéliné Ändrè', 'çÉliné Ändrè']}, {'attr': 'attrgeneralizedTimeMatch', 'positive': ['20100218171300Z'], 'negative': ['20100218171300Z', '20100218171301Z', '20100218171302Z', '20100218171303Z', '20100218171304Z', '20100218171305Z']}, {'attr': 'attrbooleanMatch', 'positive': ['TRUE'], 'negative': ['TRUE', 'FALSE']}, {'attr': 'attrcaseIgnoreIA5Match', 'positive': ['sprain1'], 'negative': ['sprain1', 'sprain2', 'sprain3', 'sprain4', 'sprain5', 'sprain6']}, {'attr': 'attrcaseIgnoreMatch', 'positive': ['ÇélIné Ändrè1'], 'negative': ['ÇélIné Ändrè1', 'ÇélIné Ändrè2', 'ÇélIné Ändrè3', 'ÇélIné Ändrè4', 'ÇélIné Ändrè5', 'ÇélIné Ändrè6']}, {'attr': 'attrcaseIgnoreListMatch', 'positive': ['foo1$bar'], 'negative': ['foo1$bar', 'foo2$bar', 'foo3$bar', 'foo4$bar', 'foo5$bar', 'foo6$bar']}, {'attr': 'attrbitStringMatch', 'positive': ["'0001'B"], 'negative': ["'0001'B", "'0010'B", "'0011'B", "'0100'B", "'0101'B", "'0110'B"]}, {'attr': 'attrcaseExactIA5Match', 'positive': ['Sprain'], 'negative': ['Sprain', 'sPrain', 'spRain', 'sprAin', 'spraIn', 'sprain']}, {'attr': 'attrobjectIdentifierMatch', 'positive': ['1.3.6.1.4.1.1466.115.121.1.15'], 'negative': ['1.3.6.1.4.1.1466.115.121.1.15', '1.3.6.1.4.1.1466.115.121.1.24', '1.3.6.1.4.1.1466.115.121.1.26', '1.3.6.1.4.1.1466.115.121.1.40', '1.3.6.1.4.1.1466.115.121.1.41', '1.3.6.1.4.1.1466.115.121.1.6']}, {'attr': 'attrdirectoryStringFirstComponentMatch', 'positive': ['ÇélIné Ändrè1'], 'negative': ['ÇélIné Ändrè1', 'ÇélIné Ändrè2', 'ÇélIné Ändrè3', 'ÇélIné Ändrè4', 'ÇélIné Ändrè5', 'ÇélIné Ändrè6']}, {'attr': 'attrobjectIdentifierFirstComponentMatch', 'positive': ['1.3.6.1.4.1.1466.115.121.1.15'], 'negative': ['1.3.6.1.4.1.1466.115.121.1.15', '1.3.6.1.4.1.1466.115.121.1.24', '1.3.6.1.4.1.1466.115.121.1.26', '1.3.6.1.4.1.1466.115.121.1.40', '1.3.6.1.4.1.1466.115.121.1.41', '1.3.6.1.4.1.1466.115.121.1.6']}, {'attr': 'attrdistinguishedNameMatch', 'positive': ['cn=foo1,cn=bar'], 'negative': ['cn=foo1,cn=bar', 'cn=foo2,cn=bar', 'cn=foo3,cn=bar', 'cn=foo4,cn=bar', 'cn=foo5,cn=bar', 'cn=foo6,cn=bar']}, {'attr': 'attrintegerMatch', 'positive': ['-2'], 'negative': ['-2', '-1', '0', '1', '2', '3']}, {'attr': 'attrintegerFirstComponentMatch', 'positive': ['-2'], 'negative': ['-2', '-1', '0', '1', '2', '3']}, {'attr': 'attruniqueMemberMatch', 'positive': ["cn=foo1,cn=bar#'0001'B"], 'negative': ["cn=foo1,cn=bar#'0001'B", "cn=foo2,cn=bar#'0010'B", "cn=foo3,cn=bar#'0011'B", "cn=foo4,cn=bar#'0100'B", "cn=foo5,cn=bar#'0101'B", "cn=foo6,cn=bar#'0110'B"]}, {'attr': 'attrnumericStringMatch', 'positive': ['00001'], 'negative': ['00001', '00002', '00003', '00004', '00005', '00006']}, {'attr': 'attrgeneralizedTimeMatch', 'positive': ['+1 408 555 4798'], 'negative': ['+1 408 555 4798', '+1 408 555 5625', '+1 408 555 6201', '+1 408 555 8585', '+1 408 555 9187', '+1 408 555 9423']}, {'attr': 'attroctetStringMatch', 'positive': ['AAAAAAAAAAAAAAE='], 'negative': ['AAAAAAAAAAAAAAE=', 'AAAAAAAAAAAAAAI=', 'AAAAAAAAAAAAAAM=', 'AAAAAAAAAAAAAAQ=', 'AAAAAAAAAAAAAAU=', 'AAAAAAAAAAAAAAY=']}] LIST_MOD_REPLACE_ALL = [ {'attr': 'attrcaseExactIA5Match', 'positive': ['Sprain', 'sPrain', 'spRain', 'sprAin', 'spraIn', 'sprain'], 'negative': ['Sprain', 'Sprain', 'sPrain', 'sPrain', 'spRain', 'spRain', 'sprAin', 'sprAin', 'spraIn', 'spraIn', 'sprain', 'sprain']}, {'attr': 'attrcaseExactMatch', 'positive': ['ÇélIné Ändrè', 'ÇéliNé Ändrè', 'Çéliné ÄndrÈ', 'Çéliné Ändrè', 'çÉliné Ändrè'], 'negative': ['ÇélIné Ändrè', 'ÇélIné Ändrè', 'ÇéliNé Ändrè', 'ÇéliNé Ändrè', 'Çéliné ÄndrÈ', 'Çéliné ÄndrÈ', 'Çéliné Ändrè', 'Çéliné Ändrè', 'çÉliné Ändrè', 'çÉliné Ändrè']}, {'attr': 'attrbitStringMatch', 'positive': ["'0001'B", "'0010'B", "'0011'B", "'0100'B", "'0101'B", "'0110'B"], 'negative': ["'0001'B", "'0001'B", "'0010'B", "'0010'B", "'0011'B", "'0011'B", "'0100'B", "'0100'B", "'0101'B", "'0101'B", "'0110'B", "'0110'B"]}, {'attr': 'attrgeneralizedTimeMatch', 'positive': ['20100218171300Z', '20100218171301Z', '20100218171302Z', '20100218171303Z', '20100218171304Z', '20100218171305Z'], 'negative': ['20100218171300Z', '20100218171300Z', '20100218171301Z', '20100218171301Z', '20100218171302Z', '20100218171302Z', '20100218171303Z', '20100218171303Z', '20100218171304Z', '20100218171304Z', '20100218171305Z', '20100218171305Z']}, {'attr': 'attrbooleanMatch', 'positive': ['TRUE', 'FALSE'], 'negative': ['TRUE', 'TRUE', 'FALSE', 'FALSE']}, {'attr': 'attrcaseIgnoreIA5Match', 'positive': ['sprain1', 'sprain2', 'sprain3', 'sprain4', 'sprain5', 'sprain6'], 'negative': ['sprain1', 'sprain1', 'sprain2', 'sprain2', 'sprain3', 'sprain3', 'sprain4', 'sprain4', 'sprain5', 'sprain5', 'sprain6', 'sprain6']}, {'attr': 'attrcaseIgnoreMatch', 'positive': ['ÇélIné Ändrè1', 'ÇélIné Ändrè2', 'ÇélIné Ändrè3', 'ÇélIné Ändrè4', 'ÇélIné Ändrè5', 'ÇélIné Ändrè6'], 'negative': ['ÇélIné Ändrè1', 'ÇélIné Ändrè1', 'ÇélIné Ändrè2', 'ÇélIné Ändrè2', 'ÇélIné Ändrè3', 'ÇélIné Ändrè3', 'ÇélIné Ändrè4', 'ÇélIné Ändrè4', 'ÇélIné Ändrè5', 'ÇélIné Ändrè5', 'ÇélIné Ändrè6', 'ÇélIné Ändrè6']}, {'attr': 'attrcaseIgnoreListMatch', 'positive': ['foo1$bar', 'foo2$bar', 'foo3$bar', 'foo4$bar', 'foo5$bar', 'foo6$bar'], 'negative': ['foo1$bar', 'foo1$bar', 'foo2$bar', 'foo2$bar', 'foo3$bar', 'foo3$bar', 'foo4$bar', 'foo4$bar', 'foo5$bar', 'foo5$bar', 'foo6$bar', 'foo6$bar']}, {'attr': 'attrobjectIdentifierMatch', 'positive': ['1.3.6.1.4.1.1466.115.121.1.15', '1.3.6.1.4.1.1466.115.121.1.24', '1.3.6.1.4.1.1466.115.121.1.26', '1.3.6.1.4.1.1466.115.121.1.40', '1.3.6.1.4.1.1466.115.121.1.41', '1.3.6.1.4.1.1466.115.121.1.6'], 'negative': ['1.3.6.1.4.1.1466.115.121.1.15', '1.3.6.1.4.1.1466.115.121.1.15', '1.3.6.1.4.1.1466.115.121.1.24', '1.3.6.1.4.1.1466.115.121.1.24', '1.3.6.1.4.1.1466.115.121.1.26', '1.3.6.1.4.1.1466.115.121.1.26', '1.3.6.1.4.1.1466.115.121.1.40', '1.3.6.1.4.1.1466.115.121.1.40', '1.3.6.1.4.1.1466.115.121.1.41', '1.3.6.1.4.1.1466.115.121.1.41', '1.3.6.1.4.1.1466.115.121.1.6', '1.3.6.1.4.1.1466.115.121.1.6']}, {'attr': 'attrdirectoryStringFirstComponentMatch', 'positive': ['ÇélIné Ändrè1', 'ÇélIné Ändrè2', 'ÇélIné Ändrè3', 'ÇélIné Ändrè4', 'ÇélIné Ändrè5', 'ÇélIné Ändrè6'], 'negative': ['ÇélIné Ändrè1', 'ÇélIné Ändrè1', 'ÇélIné Ändrè2', 'ÇélIné Ändrè2', 'ÇélIné Ändrè3', 'ÇélIné Ändrè3', 'ÇélIné Ändrè4', 'ÇélIné Ändrè4', 'ÇélIné Ändrè5', 'ÇélIné Ändrè5', 'ÇélIné Ändrè6', 'ÇélIné Ändrè6']}, {'attr': 'attrobjectIdentifierFirstComponentMatch', 'positive': ['1.3.6.1.4.1.1466.115.121.1.15', '1.3.6.1.4.1.1466.115.121.1.24', '1.3.6.1.4.1.1466.115.121.1.26', '1.3.6.1.4.1.1466.115.121.1.40', '1.3.6.1.4.1.1466.115.121.1.41', '1.3.6.1.4.1.1466.115.121.1.6'], 'negative': ['1.3.6.1.4.1.1466.115.121.1.15', '1.3.6.1.4.1.1466.115.121.1.15', '1.3.6.1.4.1.1466.115.121.1.24', '1.3.6.1.4.1.1466.115.121.1.24', '1.3.6.1.4.1.1466.115.121.1.26', '1.3.6.1.4.1.1466.115.121.1.26', '1.3.6.1.4.1.1466.115.121.1.40', '1.3.6.1.4.1.1466.115.121.1.40', '1.3.6.1.4.1.1466.115.121.1.41', '1.3.6.1.4.1.1466.115.121.1.41', '1.3.6.1.4.1.1466.115.121.1.6', '1.3.6.1.4.1.1466.115.121.1.6']}, {'attr': 'attrdistinguishedNameMatch', 'positive': ['cn=foo1,cn=bar', 'cn=foo2,cn=bar', 'cn=foo3,cn=bar', 'cn=foo4,cn=bar', 'cn=foo5,cn=bar', 'cn=foo6,cn=bar'], 'negative': ['cn=foo1,cn=bar', 'cn=foo1,cn=bar', 'cn=foo2,cn=bar', 'cn=foo2,cn=bar', 'cn=foo3,cn=bar', 'cn=foo3,cn=bar', 'cn=foo4,cn=bar', 'cn=foo4,cn=bar', 'cn=foo5,cn=bar', 'cn=foo5,cn=bar', 'cn=foo6,cn=bar', 'cn=foo6,cn=bar']}, {'attr': 'attrintegerMatch', 'positive': ['-2', '-1', '0', '1', '2', '3'], 'negative': ['-2', '-2', '-1', '-1', '0', '0', '1', '1', '2', '2', '3', '3']}, {'attr': 'attrintegerFirstComponentMatch', 'positive': ['-2', '-1', '0', '1', '2', '3'], 'negative': ['-2', '-2', '-1', '-1', '0', '0', '1', '1', '2', '2', '3', '3']}, {'attr': 'attruniqueMemberMatch', 'positive': ["cn=foo1,cn=bar#'0001'B", "cn=foo2,cn=bar#'0010'B", "cn=foo3,cn=bar#'0011'B", "cn=foo4,cn=bar#'0100'B", "cn=foo5,cn=bar#'0101'B", "cn=foo6,cn=bar#'0110'B"], 'negative': ["cn=foo1,cn=bar#'0001'B", "cn=foo1,cn=bar#'0001'B", "cn=foo2,cn=bar#'0010'B", "cn=foo2,cn=bar#'0010'B", "cn=foo3,cn=bar#'0011'B", "cn=foo3,cn=bar#'0011'B", "cn=foo4,cn=bar#'0100'B", "cn=foo4,cn=bar#'0100'B", "cn=foo5,cn=bar#'0101'B", "cn=foo5,cn=bar#'0101'B", "cn=foo6,cn=bar#'0110'B", "cn=foo6,cn=bar#'0110'B"]}, {'attr': 'attrnumericStringMatch', 'positive': ['00001', '00002', '00003', '00004', '00005', '00006'], 'negative': ['00001', '00001', '00002', '00002', '00003', '00003', '00004', '00004', '00005', '00005', '00006', '00006']}, {'attr': 'attrtelephoneNumberMatch', 'positive': ['+1 408 555 4798', '+1 408 555 5625', '+1 408 555 6201', '+1 408 555 8585', '+1 408 555 9187', '+1 408 555 9423'], 'negative': ['+1 408 555 4798', '+1 408 555 4798', '+1 408 555 5625', '+1 408 555 5625', '+1 408 555 6201', '+1 408 555 6201', '+1 408 555 8585', '+1 408 555 8585', '+1 408 555 9187', '+1 408 555 9187', '+1 408 555 9423', '+1 408 555 9423']}, {'attr': 'attroctetStringMatch', 'positive': ['AAAAAAAAAAAAAAE=', 'AAAAAAAAAAAAAAI=', 'AAAAAAAAAAAAAAM=', 'AAAAAAAAAAAAAAQ=', 'AAAAAAAAAAAAAAU=', 'AAAAAAAAAAAAAAY='], 'negative': ['AAAAAAAAAAAAAAE=', 'AAAAAAAAAAAAAAE=', 'AAAAAAAAAAAAAAI=', 'AAAAAAAAAAAAAAI=', 'AAAAAAAAAAAAAAM=', 'AAAAAAAAAAAAAAM=', 'AAAAAAAAAAAAAAQ=', 'AAAAAAAAAAAAAAQ=', 'AAAAAAAAAAAAAAU=', 'AAAAAAAAAAAAAAU=', 'AAAAAAAAAAAAAAY=', 'AAAAAAAAAAAAAAY=']}] LIST_MOD_DEL_ALL = [ {'attr': 'attrbitStringMatch', 'positive_negative': ["'0001'B"]}, {'attr': 'attrcaseExactIA5Match', 'positive_negative': ['Sprain']}, {'attr': 'attrbitStringMatch', 'positive_negative': ["'0001'B"]}, {'attr': 'attrcaseExactMatch', 'positive_negative': ['ÇélIné Ändrè']}, {'attr': 'attrgeneralizedTimeMatch', 'positive_negative': ['20100218171300Z']}, {'attr': 'attrbooleanMatch', 'positive_negative': ['TRUE']}, {'attr': 'attrcaseIgnoreIA5Match', 'positive_negative': ['sprain1']}, {'attr': 'attrcaseIgnoreMatch', 'positive_negative': ['ÇélIné Ändrè1']}, {'attr': 'attrcaseIgnoreListMatch', 'positive_negative': ['foo1$bar']}, {'attr': 'attrobjectIdentifierMatch', 'positive_negative': ['1.3.6.1.4.1.1466.115.121.1.15']}, {'attr': 'attrdirectoryStringFirstComponentMatch', 'positive_negative': ['ÇélIné Ändrè1']}, {'attr': 'attrintegerMatch', 'positive_negative': ['-2']}, {'attr': 'attrintegerFirstComponentMatch', 'positive_negative': ['cn=foo1,cn=bar']}, {'attr': 'attrintegerFirstComponentMatch', 'positive_negative': ['-2']}, {'attr': 'attruniqueMemberMatch', 'positive_negative': ["cn=foo1,cn=bar#'0001'B"]}, {'attr': 'attrnumericStringMatch', 'positive_negative': ['00001']}, {'attr': 'attrtelephoneNumberMatch', 'positive_negative': ['+1 408 555 4798']}, {'attr': 'attroctetStringMatch', 'positive_negative': ['AAAAAAAAAAAAAAE=']}] @pytest.fixture(scope="module") def _create_index_entry(topology_st): """Create index entries. :id: 9c93aec8-b87d-11e9-93b0-8c16451d917b :setup: Standalone :steps: 1. Test index entries can be created. :expected results: 1. Pass """ indexes = Indexes(topology_st.standalone) for cn_cn, index_type in LIST_CN_INDEX: indexes.create(properties={ 'cn': cn_cn, 'nsSystemIndex': 'true', 'nsIndexType': index_type }) @pytest.mark.parametrize("index", LIST_ATTR_INDEX) def test_valid_invalid_attributes(topology_st, _create_index_entry, index): """Test valid and invalid values of attributes :id: 93dc9e02-b87d-11e9-b39b-8c16451d917b :parametrized: yes :setup: Standalone :steps: 1. Create entry with an attribute that uses that matching rule 2. Delete existing entry 3. Create entry with an attribute that uses that matching rule providing duplicate values that are duplicates according to the equality matching rule. :expected results: 1. Pass 2. Pass 3. Fail(ldap.TYPE_OR_VALUE_EXISTS) """ cos = CosTemplates(topology_st.standalone, DEFAULT_SUFFIX) # Entry with extensibleObject entry = cos.create(properties={'cn': 'addentry' + index['attr'], index['attr']: index['positive']}) entry.delete() with pytest.raises(ldap.TYPE_OR_VALUE_EXISTS): cos.create(properties={'cn': 'addentry' + index['attr'].split('attr')[1], index['attr']: index['negative']}) @pytest.mark.parametrize("mod", LIST_MOD_ATTR_ALL) def test_mods(topology_st, _create_index_entry, mod): """Test valid and invalid values of attributes mods :id: 8c15874c-b87d-11e9-9c5d-8c16451d917b :parametrized: yes :setup: Standalone :steps: 1. Create entry with an attribute that uses matching mod 2. Add an attribute that uses that matching mod providing duplicate values that are duplicates according to the equality matching. 3. Delete existing entry :expected results: 1. Pass 2. Fail(ldap.TYPE_OR_VALUE_EXISTS) 3. Pass """ cos = CosTemplates(topology_st.standalone, DEFAULT_SUFFIX) # Entry with extensibleObject entry = cos.create(properties={'cn': 'addentry'+mod['attr'], mod['attr']: mod['positive']}) with pytest.raises(ldap.TYPE_OR_VALUE_EXISTS): entry.add(mod['attr'], mod['negative']) entry.delete() @pytest.mark.parametrize("mode", LIST_MOD_REPLACE_ALL) def test_mods_replace(topology_st, _create_index_entry, mode): """Test mods replace :id: 2dd46b7a-b928-11e9-91dd-8c16451d917b :parametrized: yes :setup: Standalone :steps: 1. Create entry with an attribute that uses matching mode 2. Add an attribute that uses that matching mode providing duplicate values that are duplicates according to the equality matching. 3. Delete existing entry :expected results: 1. Pass 2. Fail(ldap.TYPE_OR_VALUE_EXISTS) 3. Pass """ cos = CosTemplates(topology_st.standalone, DEFAULT_SUFFIX) # Entry with extensibleObject entry = cos.create(properties={'cn': 'addentry'+mode['attr'], mode['attr']: mode['positive']}) with pytest.raises(ldap.TYPE_OR_VALUE_EXISTS): entry.replace(mode['attr'], mode['negative']) entry.delete() @pytest.mark.parametrize("mode", LIST_MOD_DEL_ALL) def test_mods_delete(topology_st, _create_index_entry, mode): """Test mods delete :id: 1dda055e-b928-11e9-b5c1-8c16451d917b :parametrized: yes :setup: Standalone :steps: 1. Create entry with an attribute that uses matching mode 2. Add an attribute that uses that matching mode providing duplicate values that are duplicates according to the equality matching. 3. Delete existing entry :expected results: 1. Pass 2. Fail(ldap.NO_SUCH_ATTRIBUTE) 3. Pass """ cos = CosTemplates(topology_st.standalone, DEFAULT_SUFFIX) # Entry with extensibleObject entry = cos.create(properties={'cn': 'addentry'+mode['attr'], mode['attr']: mode['positive_negative']}) entry.remove(mode['attr'], mode['positive_negative'][0]) with pytest.raises(ldap.NO_SUCH_ATTRIBUTE): entry.remove(mode['attr'], mode['positive_negative'][0]) entry.delete() ATTR = ["( 2.16.840.1.113730.3.1.999999.0 NAME 'attroctetStringMatch' " "DESC 'for testing matching rules' EQUALITY octetStringMatch " "ORDERING octetStringOrderingMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.40 " "X-ORIGIN 'matching rule tests' )", "( 2.16.840.1.113730.3.1.999999.1 NAME 'attrbitStringMatch' DESC " "'for testing matching rules' EQUALITY bitStringMatch SYNTAX " "1.3.6.1.4.1.1466.115.121.1.6 X-ORIGIN 'matching rule tests' )", "( 2.16.840.1.113730.3.1.999999.2 NAME 'attrcaseExactIA5Match' " "DESC 'for testing matching rules' EQUALITY caseExactIA5Match " "SUBSTR caseExactIA5SubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 " "X-ORIGIN 'matching rule tests' )", "( 2.16.840.1.113730.3.1.999999.3 NAME 'attrcaseExactMatch' DESC " "'for testing matching rules' EQUALITY caseExactMatch ORDERING " "caseExactOrderingMatch SUBSTR caseExactSubstringsMatch SYNTAX " "1.3.6.1.4.1.1466.115.121.1.15 " "X-ORIGIN 'matching rule tests' )", "( 2.16.840.1.113730.3.1.999999.4 NAME 'attrgeneralizedTimeMatch' DESC " "'for testing matching rules' EQUALITY generalizedTimeMatch ORDERING " "generalizedTimeOrderingMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.24 " "X-ORIGIN 'matching rule tests' )", "( 2.16.840.1.113730.3.1.999999.5 NAME 'attrbooleanMatch' DESC " "'for testing matching rules' EQUALITY booleanMatch SYNTAX " "1.3.6.1.4.1.1466.115.121.1.7 X-ORIGIN 'matching rule tests' )", "( 2.16.840.1.113730.3.1.999999.6 NAME 'attrcaseIgnoreIA5Match' DESC " "'for testing matching rules' EQUALITY caseIgnoreIA5Match SUBSTR " "caseIgnoreIA5SubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 " "X-ORIGIN 'matching rule tests' )", "( 2.16.840.1.113730.3.1.999999.7 NAME 'attrcaseIgnoreMatch' DESC " "'for testing matching rules' EQUALITY caseIgnoreMatch ORDERING " "caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch " "SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 X-ORIGIN 'matching rule tests' )", "( 2.16.840.1.113730.3.1.999999.8 NAME 'attrcaseIgnoreListMatch' DESC " "'for testing matching rules' EQUALITY caseIgnoreListMatch SUBSTR " "caseIgnoreListSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.41 " "X-ORIGIN 'matching rule tests' )", "( 2.16.840.1.113730.3.1.999999.9 NAME 'attrobjectIdentifierMatch' DESC " "'for testing matching rules' EQUALITY objectIdentifierMatch SYNTAX " "1.3.6.1.4.1.1466.115.121.1.38 X-ORIGIN 'matching rule tests' )", "( 2.16.840.1.113730.3.1.999999.10 NAME 'attrdistinguishedNameMatch' DESC " "'for testing matching rules' EQUALITY distinguishedNameMatch SYNTAX " "1.3.6.1.4.1.1466.115.121.1.12 X-ORIGIN 'matching rule tests' )", "( 2.16.840.1.113730.3.1.999999.11 NAME 'attrintegerMatch' DESC " "'for testing matching rules' EQUALITY integerMatch ORDERING " "integerOrderingMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 " "X-ORIGIN 'matching rule tests' )", "( 2.16.840.1.113730.3.1.999999.12 NAME 'attruniqueMemberMatch' DESC " "'for testing matching rules' EQUALITY uniqueMemberMatch SYNTAX " "1.3.6.1.4.1.1466.115.121.1.34 X-ORIGIN 'matching rule tests' )", "( 2.16.840.1.113730.3.1.999999.13 NAME 'attrnumericStringMatch' DESC " "'for testing matching rules' EQUALITY numericStringMatch ORDERING " "numericStringOrderingMatch SUBSTR numericStringSubstringsMatch " "SYNTAX 1.3.6.1.4.1.1466.115.121.1.36 X-ORIGIN 'matching rule tests' )", "( 2.16.840.1.113730.3.1.999999.14 NAME 'attrtelephoneNumberMatch' DESC " "'for testing matching rules' EQUALITY telephoneNumberMatch SUBSTR " "telephoneNumberSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.50 " "X-ORIGIN 'matching rule tests' )", "( 2.16.840.1.113730.3.1.999999.15 NAME 'attrdirectoryStringFirstComponentMatch' " "DESC 'for testing matching rules' EQUALITY directoryStringFirstComponentMatch " "SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 X-ORIGIN 'matching rule tests' )", "( 2.16.840.1.113730.3.1.999999.16 NAME 'attrobjectIdentifierFirstComponentMatch' " "DESC 'for testing matching rules' EQUALITY objectIdentifierFirstComponentMatch " "SYNTAX 1.3.6.1.4.1.1466.115.121.1.38 X-ORIGIN 'matching rule tests' )", "( 2.16.840.1.113730.3.1.999999.17 NAME 'attrintegerFirstComponentMatch' " "DESC 'for testing matching rules' EQUALITY integerFirstComponentMatch SYNTAX " "1.3.6.1.4.1.1466.115.121.1.27 X-ORIGIN 'matching rule tests' )"] LIST_ATTR_TO_CREATE = [ ('entryoctetStringMatch0', 'AAAAAAAAAAAAAAE='), ('entryoctetStringMatch1', 'AAAAAAAAAAAAAAI='), ('entryoctetStringMatch2', 'AAAAAAAAAAAAAAM='), ('entryoctetStringMatch3', 'AAAAAAAAAAAAAAQ='), ('entryoctetStringMatch4', 'AAAAAAAAAAAAAAU='), ('entryoctetStringMatch5', 'AAAAAAAAAAAAAAY='), ('entrybitStringMatch0', "'0001'B"), ('entrybitStringMatch1', "'0010'B"), ('entrybitStringMatch2', "'0011'B"), ('entrybitStringMatch3', "'0100'B"), ('entrybitStringMatch4', "'0101'B"), ('entrybitStringMatch5', "'0110'B"), ('entrycaseExactIA5Match0', "Sprain"), ('entrycaseExactIA5Match1', "sPrain"), ('entrycaseExactIA5Match2', "spRain"), ('entrycaseExactIA5Match3', "sprAin"), ('entrycaseExactIA5Match4', "spraIn"), ('entrycaseExactIA5Match5', "sprain"), ('entrycaseExactMatch0', "ÇélIné Ändrè"), ('entrycaseExactMatch1', "ÇéliNé Ändrè"), ('entrycaseExactMatch2', "Çéliné ÄndrÈ"), ('entrycaseExactMatch3', "Çéliné Ändrè"), ('entrycaseExactMatch4', "çÉliné Ändrè"), ('entrygeneralizedTimeMatch0', "20100218171300Z"), ('entrygeneralizedTimeMatch1', "20100218171301Z"), ('entrygeneralizedTimeMatch2', "20100218171302Z"), ('entrygeneralizedTimeMatch3', "20100218171303Z"), ('entrygeneralizedTimeMatch4', "20100218171304Z"), ('entrygeneralizedTimeMatch5', "20100218171305Z"), ('entrybooleanMatch0', "TRUE"), ('entrybooleanMatch1', "FALSE"), ('entrycaseIgnoreIA5Match0', "sprain1"), ('entrycaseIgnoreIA5Match1', "sprain2"), ('entrycaseIgnoreIA5Match2', "sprain3"), ('entrycaseIgnoreIA5Match3', "sprain4"), ('entrycaseIgnoreIA5Match4', "sprain5"), ('entrycaseIgnoreIA5Match5', "sprain6"), ('entrycaseIgnoreMatch0', "ÇélIné Ändrè1"), ('entrycaseIgnoreMatch1', "ÇélIné Ändrè2"), ('entrycaseIgnoreMatch2', "ÇélIné Ändrè3"), ('entrycaseIgnoreMatch3', "ÇélIné Ändrè4"), ('entrycaseIgnoreMatch4', "ÇélIné Ändrè5"), ('entrycaseIgnoreMatch5', "ÇélIné Ändrè6"), ('entrycaseIgnoreListMatch0', "foo1$bar"), ('entrycaseIgnoreListMatch1', "foo2$bar"), ('entrycaseIgnoreListMatch2', "foo3$bar"), ('entrycaseIgnoreListMatch3', "foo4$bar"), ('entrycaseIgnoreListMatch4', "foo5$bar"), ('entrycaseIgnoreListMatch5', "foo6$bar"), ('entryobjectIdentifierMatch0', "1.3.6.1.4.1.1466.115.121.1.15"), ('entryobjectIdentifierMatch1', "1.3.6.1.4.1.1466.115.121.1.24"), ('entryobjectIdentifierMatch2', "1.3.6.1.4.1.1466.115.121.1.26"), ('entryobjectIdentifierMatch3', "1.3.6.1.4.1.1466.115.121.1.40"), ('entryobjectIdentifierMatch4', "1.3.6.1.4.1.1466.115.121.1.41"), ('entryobjectIdentifierMatch5', "1.3.6.1.4.1.1466.115.121.1.6"), ('entrydistinguishedNameMatch0', "cn=foo1,cn=bar"), ('entrydistinguishedNameMatch1', "cn=foo2,cn=bar"), ('entrydistinguishedNameMatch2', "cn=foo3,cn=bar"), ('entrydistinguishedNameMatch3', "cn=foo4,cn=bar"), ('entrydistinguishedNameMatch4', "cn=foo5,cn=bar"), ('entrydistinguishedNameMatch5', "cn=foo6,cn=bar"), ('entryintegerMatch0', "-2"), ('entryintegerMatch1', "-1"), ('entryintegerMatch2', "0"), ('entryintegerMatch3', "1"), ('entryintegerMatch4', "2"), ('entryintegerMatch5', "3"), ('entryuniqueMemberMatch0', "cn=foo1,cn=bar#'0001'B"), ('entryuniqueMemberMatch1', "cn=foo2,cn=bar#'0010'B"), ('entryuniqueMemberMatch2', "cn=foo3,cn=bar#'0011'B"), ('entryuniqueMemberMatch3', "cn=foo4,cn=bar#'0100'B"), ('entryuniqueMemberMatch4', "cn=foo5,cn=bar#'0101'B"), ('entryuniqueMemberMatch5', "cn=foo6,cn=bar#'0110'B"), ('entrynumericStringMatch0', "00001"), ('entrynumericStringMatch1', "00002"), ('entrynumericStringMatch2', "00003"), ('entrynumericStringMatch3', "00004"), ('entrynumericStringMatch4', "00005"), ('entrynumericStringMatch5', "00006"), ('entrytelephoneNumberMatch0', "+1 408 555 4798"), ('entrytelephoneNumberMatch1', "+1 408 555 5625"), ('entrytelephoneNumberMatch2', "+1 408 555 6201"), ('entrytelephoneNumberMatch3', "+1 408 555 8585"), ('entrytelephoneNumberMatch4', "+1 408 555 9187"), ('entrytelephoneNumberMatch5', "+1 408 555 9423"), ('entrydirectoryStringFirstComponentMatch0', "ÇélIné Ändrè1"), ('entrydirectoryStringFirstComponentMatch1', "ÇélIné Ändrè2"), ('entrydirectoryStringFirstComponentMatch2', "ÇélIné Ändrè3"), ('entrydirectoryStringFirstComponentMatch3', "ÇélIné Ändrè4"), ('entrydirectoryStringFirstComponentMatch4', "ÇélIné Ändrè5"), ('entrydirectoryStringFirstComponentMatch5', "ÇélIné Ändrè6"), ('entryobjectIdentifierFirstComponentMatch0', "1.3.6.1.4.1.1466.115.121.1.15"), ('entryobjectIdentifierFirstComponentMatch1', "1.3.6.1.4.1.1466.115.121.1.24"), ('entryobjectIdentifierFirstComponentMatch2', "1.3.6.1.4.1.1466.115.121.1.26"), ('entryobjectIdentifierFirstComponentMatch3', "1.3.6.1.4.1.1466.115.121.1.40"), ('entryobjectIdentifierFirstComponentMatch4', "1.3.6.1.4.1.1466.115.121.1.41"), ('entryobjectIdentifierFirstComponentMatch5', "1.3.6.1.4.1.1466.115.121.1.6"), ('entryintegerFirstComponentMatch0', "-2"), ('entryintegerFirstComponentMatch1', "-1"), ('entryintegerFirstComponentMatch2', "0"), ('entryintegerFirstComponentMatch3', "1"), ('entryintegerFirstComponentMatch4', "2"), ('entryintegerFirstComponentMatch5', "3")] @pytest.fixture(scope="module") def _create_entries(topology_st): """ Add attribute types to schema and Create filter entries(Entry with extensibleObject) """ for attribute in ATTR: Schema(topology_st.standalone).add('attributetypes', attribute) cos = CosTemplates(topology_st.standalone, DEFAULT_SUFFIX) # Entry with extensibleObject for attr, value in LIST_ATTR_TO_CREATE: cos.create(properties={ 'cn': attr, 'attr' + attr.split('entry')[1][:-1]: value }) FILTER_VALUES = [ ["(attrbitStringMatch='0001'B)", 1, "(attrbitStringMatch:bitStringMatch:='000100000'B)"], ["(attrgeneralizedTimeMatch=20100218171300Z)", 1, "(attrcaseExactIA5Match=SPRAIN)"], ["(attrcaseExactMatch>=ÇélIné Ändrè)", 5, "(attrcaseExactMatch=ÇéLINé ÄNDRè)"], ["(attrcaseExactMatch:caseExactMatch:=ÇélIné Ändrè)", 1, "(attrcaseExactMatch>=çéliné ändrè)"], ["(attrcaseExactIA5Match=Sprain)", 1, "(attrgeneralizedTimeMatch=20300218171300Z)"], ["(attrbooleanMatch=TRUE)", 1, "(attrgeneralizedTimeMatch>=20300218171300Z)"], ["(attrcaseIgnoreIA5Match=sprain1)", 1, "(attrcaseIgnoreIA5Match=sprain9999)"], ["(attrcaseIgnoreMatch=ÇélIné Ändrè1)", 1, "(attrcaseIgnoreMatch=ÇélIné Ändrè9999)"], ["(attrcaseIgnoreMatch>=ÇélIné Ändrè1)", 6, "(attrcaseIgnoreMatch>=ÇélIné Ändrè9999)"], ["(attrcaseIgnoreListMatch=foo1$bar)", 1, "(attrcaseIgnoreListMatch=foo1$bar$baz$biff)"], ["(attrobjectIdentifierMatch=1.3.6.1.4.1.1466.115.121.1.15)", 1, "(attrobjectIdentifierMatch=1.3.6.1.4.1.1466.115.121.1.15.99999)"], ["(attrgeneralizedTimeMatch>=20100218171300Z)", 6, "(attrdirectoryStringFirstComponentMatch=ÇélIné Ändrè9999)"], ["(attrdirectoryStringFirstComponentMatch=ÇélIné Ändrè1)", 1, "(attrobjectIdentifierFirstComponentMatch=1.3.6.1.4.1.1466.115.121.1.15.99999)"], ["(attrobjectIdentifierFirstComponentMatch=1.3.6.1.4.1.1466.115.121.1.15)", 1, "(attrdistinguishedNameMatch=cn=foo1,cn=bar,cn=baz)"], ["(attrdistinguishedNameMatch=cn=foo1,cn=bar)", 1, "(attrintegerMatch=-20)"], ["(attrintegerMatch=-2)", 1, "(attrintegerMatch>=20)"], ["(attrintegerMatch>=-2)", 6, "(attrintegerFirstComponentMatch=-20)"], ["(attrintegerFirstComponentMatch=-2)", 1, "(attruniqueMemberMatch=cn=foo1,cn=bar#'00010000'B)"], ["(attruniqueMemberMatch=cn=foo1,cn=bar#'0001'B)", 1, "(attrnumericStringMatch=000000001)"], ["(attrnumericStringMatch=00001)", 1, "(attrnumericStringMatch>=01)"], ["(attrnumericStringMatch>=00001)", 6, "(attrtelephoneNumberMatch=+2 408 555 4798)"], ["(attrtelephoneNumberMatch=+1 408 555 4798)", 1, "(attroctetStringMatch=AAAAAAAAAAAAAAEB)"], ["(attroctetStringMatch=AAAAAAAAAAAAAAE=)", 1, "(attroctetStringMatch>=AAAAAAAAAAABAQE=)"], ["(attroctetStringMatch>=AAAAAAAAAAAAAAE=)", 6, "(attrdirectoryStringFirstComponentMatch=ÇélIné Ändrè9998)"]] def test_search_positive_negative(topology_st, _create_entries): """Filters with positive and with no output. :id: abe3e6dd-9ecc-12e8-adf0-8c16451d917b :parametrized: yes :setup: Standalone :steps: 1.For valid filer output should match the exact value given. 2. For invalid filter there should not be any output. :expected results: 1. Pass 2. Pass """ cos = CosTemplates(topology_st.standalone, DEFAULT_SUFFIX) for attr, value, negative_filter in FILTER_VALUES: assert len(cos.filter(attr)) == value assert not cos.filter(negative_filter) LIST_EXT_ATTR_COUNT = [ ("(attrbitStringMatch:bitStringMatch:='0001'B)", 1), ("(attrcaseExactIA5Match:caseExactIA5Match:=Sprain)", 1), ("(attrcaseExactMatch:caseExactMatch:=ÇélIné Ändrè)", 1), ("(attrcaseExactMatch:caseExactOrderingMatch:=ÇélIné Ändrè)", 5), ("(attrgeneralizedTimeMatch:generalizedTimeMatch:=20100218171300Z)", 1), ("(attrgeneralizedTimeMatch:generalizedTimeOrderingMatch:=20100218171300Z)", 6), ("(attrbooleanMatch:booleanMatch:=TRUE)", 1), ("(attrcaseIgnoreIA5Match:caseIgnoreIA5Match:=sprain1)", 1), ("(attrcaseIgnoreMatch:caseIgnoreMatch:=ÇélIné Ändrè1)", 1), ("(attrcaseIgnoreMatch:caseIgnoreOrderingMatch:=ÇélIné Ändrè1)", 6), ("(attrcaseIgnoreListMatch:caseIgnoreListMatch:=foo1$bar)", 1), ("(attrobjectIdentifierMatch:objectIdentifierMatch:=1.3.6.1.4.1.1466.115.121.1.15)", 1), ("(attrdirectoryStringFirstComponentMatch:directoryString" "FirstComponentMatch:=ÇélIné Ändrè1)", 1), ("(attrobjectIdentifierFirstComponentMatch:objectIdentifier" "FirstComponentMatch:=1.3.6.1.4.1.1466.115.121.1.15)", 1), ("(attrdistinguishedNameMatch:distinguishedNameMatch:=cn=foo1,cn=bar)", 1), ("(attrintegerMatch:integerMatch:=-2)", 1), ("(attrintegerMatch:integerOrderingMatch:=-2)", 6), ("(attrintegerFirstComponentMatch:integerFirstComponentMatch:=-2)", 1), ("(attruniqueMemberMatch:uniqueMemberMatch:=cn=foo1,cn=bar#'0001'B)", 1), ("(attrnumericStringMatch:numericStringMatch:=00001)", 1), ("(attrnumericStringMatch:numericStringMatch:=00001)", 1), ("(attrtelephoneNumberMatch:telephoneNumberMatch:=+1 408 555 4798)", 1), ("(attroctetStringMatch:octetStringMatch:=AAAAAAAAAAAAAAE=)", 1), ("(attroctetStringMatch:octetStringOrderingMatch:=AAAAAAAAAAAAAAE=)", 6), ("(attrcaseExactMatch=*ÇélIné Ändrè*)", 1), ("(attrcaseExactMatch=ÇélIné Ändrè*)", 1), ("(attrcaseExactMatch=*ÇélIné Ändrè)", 1), ("(attrcaseExactMatch=*é Ä*)", 5), ("(attrcaseExactIA5Match=*Sprain*)", 1), ("(attrcaseExactIA5Match=Sprain*)", 1), ("(attrcaseExactIA5Match=*Sprain)", 1), ("(attrcaseExactIA5Match=*rai*)", 3), ("(attrcaseIgnoreIA5Match=*sprain1*)", 1), ("(attrcaseIgnoreIA5Match=sprain1*)", 1), ("(attrcaseIgnoreIA5Match=*sprain1)", 1), ("(attrcaseIgnoreIA5Match=*rai*)", 6), ("(attrcaseIgnoreMatch=*ÇélIné Ändrè1*)", 1), ("(attrcaseIgnoreMatch=ÇélIné Ändrè1*)", 1), ("(attrcaseIgnoreMatch=*ÇélIné Ändrè1)", 1), ("(attrcaseIgnoreMatch=*é Ä*)", 6), ("(attrcaseIgnoreListMatch=*foo1$bar*)", 1), ("(attrcaseIgnoreListMatch=foo1$bar*)", 1), ("(attrcaseIgnoreListMatch=*foo1$bar)", 1), ("(attrcaseIgnoreListMatch=*1$b*)", 1), ("(attrnumericStringMatch=*00001*)", 1), ("(attrnumericStringMatch=00001*)", 1), ("(attrnumericStringMatch=*00001)", 1), ("(attrnumericStringMatch=*000*)", 6), ("(attrtelephoneNumberMatch=*+1 408 555 4798*)", 1), ("(attrtelephoneNumberMatch=+1 408 555 4798*)", 1), ("(attrtelephoneNumberMatch=*+1 408 555 4798)", 1), ("(attrtelephoneNumberMatch=* 55*)", 6)] @pytest.mark.parametrize("attr, value", LIST_EXT_ATTR_COUNT) def test_do_extensible_search(topology_st, _create_entries, attr, value): """Match filter and output. :id: abe3e6dd-9ecc-11e8-adf0-8c16451d917c :parametrized: yes :setup: Standalone :steps: 1. Filer output should match the exact value given. :expected results: 1. Pass """ cos = CosTemplates(topology_st.standalone, DEFAULT_SUFFIX) assert len(cos.filter(attr)) == value if __name__ == '__main__': CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s -v %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/filter/filter_indexing_test.py000066400000000000000000000136251421664411400304270ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2019 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK ---- """ verify and testing indexing Filter from a search """ import os import pytest from lib389._constants import DEFAULT_SUFFIX, PW_DM from lib389.topologies import topology_st as topo from lib389.idm.user import UserAccounts from lib389.idm.account import Accounts from lib389.cos import CosTemplates from lib389.schema import Schema pytestmark = pytest.mark.tier1 FILTERS = ["(|(|(ou=nothing1)(ou=people))(|(ou=nothing2)(ou=nothing3)))", "(|(|(ou=people)(ou=nothing1))(|(ou=nothing2)(ou=nothing3)))", "(|(|(ou=nothing1)(ou=nothing2))(|(ou=people)(ou=nothing3)))", "(|(|(ou=nothing1)(ou=nothing2))(|(ou=nothing3)(ou=people)))", "(&(sn<=0000000000000000)(givenname>=FFFFFFFFFFFFFFFF))", "(&(sn>=0000000000000000)(sn<=1111111111111111))", "(&(sn>=0000000000000000)(givenname<=FFFFFFFFFFFFFFFF))"] INDEXES = ["(uidNumber=18446744073709551617)", "(gidNumber=18446744073709551617)", "(MYINTATTR=18446744073709551617)", "(&(uidNumber=*)(!(uidNumber=18446744073709551617)))", "(&(gidNumber=*)(!(gidNumber=18446744073709551617)))", "(&(uidNumber=*)(!(gidNumber=18446744073709551617)))", "(&(myintattr=*)(!(myintattr=18446744073709551617)))", "(uidNumber>=-18446744073709551617)", "(gidNumber>=-18446744073709551617)", "(uidNumber<=18446744073709551617)", "(gidNumber<=18446744073709551617)", "(myintattr<=18446744073709551617)"] INDEXES_FALSE = ["(gidNumber=54321)", "(uidNumber=54321)", "(myintattr=54321)", "(gidNumber<=-999999999999999999999999999999)", "(uidNumber<=-999999999999999999999999999999)", "(myintattr<=-999999999999999999999999999999)", "(gidNumber>=999999999999999999999999999999)", "(uidNumber>=999999999999999999999999999999)", "(myintattr>=999999999999999999999999999999)"] @pytest.fixture(scope="module") def _create_entries(topo): """ Will create necessary users for this script. """ # Creating Users users_people = UserAccounts(topo.standalone, DEFAULT_SUFFIX) for count in range(3): users_people.create(properties={ 'ou': ['Accounting', 'People'], 'cn': f'User {count}F', 'sn': f'{count}' * 16, 'givenname': 'FFFFFFFFFFFFFFFF', 'uid': f'user{count}F', 'mail': f'user{count}F@test.com', 'manager': f'uid=user{count}F,ou=People,{DEFAULT_SUFFIX}', 'userpassword': PW_DM, 'homeDirectory': '/home/' + f'user{count}F', 'uidNumber': '1000', 'gidNumber': '2000', }) cos = CosTemplates(topo.standalone, DEFAULT_SUFFIX, rdn='ou=People') for user, number, des in [('a', '18446744073709551617', '2^64+1'), ('b', '18446744073709551618', '2^64+1'), ('c', '-18446744073709551617', '-2^64+1'), ('d', '-18446744073709551618', '-2^64+1'), ('e', '0', '0'), ('f', '2', '2'), ('g', '-2', '-2')]: cos.create(properties={ 'cn': user, 'uidnumber': number, 'gidnumber': number, 'myintattr': number, 'description': f'uidnumber value {des} - gidnumber is same but not indexed' }) @pytest.mark.parametrize("real_value", FILTERS) def test_positive(topo, _create_entries, real_value): """Test positive filters :id: 57243326-91ae-11e9-aca3-8c16451d917b :parametrized: yes :setup: Standalone :steps: 1. Try to pass filter rules as per the condition . :expected results: 1. Pass """ assert Accounts(topo.standalone, DEFAULT_SUFFIX).filter(real_value) def test_indexing_schema(topo, _create_entries): """Test with schema :id: 67a2179a-91ae-11e9-9a33-8c16451d917b :setup: Standalone :steps: 1. Add attribute types to Schema. 2. Try to pass filter rules as per the condition . :expected results: 1. Pass 2. Pass """ cos = CosTemplates(topo.standalone, DEFAULT_SUFFIX, rdn='ou=People') Schema(topo.standalone).add('attributetypes', "( 8.9.10.11.12.13.14.15 NAME 'myintattr' DESC 'for integer " "syntax index ordering testing' EQUALITY integerMatch ORDERING " "integerOrderingMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 )") topo.standalone.restart() assert cos.filter("(myintattr>=-18446744073709551617)") @pytest.mark.parametrize("real_value", INDEXES) def test_indexing(topo, _create_entries, real_value): """Test positive index filters :id: 7337589a-91ae-11e9-ad44-8c16451d917b :parametrized: yes :setup: Standalone :steps: 1. Try to pass filter rules as per the condition . :expected results: 1. Pass """ cos = CosTemplates(topo.standalone, DEFAULT_SUFFIX, rdn='ou=People') assert cos.filter(real_value) @pytest.mark.parametrize("real_value", INDEXES_FALSE) def test_indexing_negative(topo, _create_entries, real_value): """Test negative index filters :id: 7e19deae-91ae-11e9-900c-8c16451d917b :parametrized: yes :setup: Standalone :steps: 1. Try to pass negative filter rules as per the condition . :expected results: 1. Fail """ cos = CosTemplates(topo.standalone, DEFAULT_SUFFIX, rdn='ou=People') assert not cos.filter(real_value) if __name__ == '__main__': CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s -v %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/filter/filter_logic_test.py000066400000000000000000000465461421664411400277270ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2020 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import pytest import ldap from lib389.topologies import topology_st from lib389._constants import DEFAULT_SUFFIX from lib389.idm.user import UserAccount, UserAccounts pytestmark = pytest.mark.tier1 """ This test case asserts that various logical filters apply correctly and as expected. This is to assert that we have correct and working search operations, especially related to indexed content from filterindex.c and idl_sets. important to note, some tests check greater than 10 elements to assert that k-way intersect works, where as most of these actually hit the filtertest threshold so they early return. """ USER0_DN = 'uid=user0,ou=people,%s' % DEFAULT_SUFFIX USER1_DN = 'uid=user1,ou=people,%s' % DEFAULT_SUFFIX USER2_DN = 'uid=user2,ou=people,%s' % DEFAULT_SUFFIX USER3_DN = 'uid=user3,ou=people,%s' % DEFAULT_SUFFIX USER4_DN = 'uid=user4,ou=people,%s' % DEFAULT_SUFFIX USER5_DN = 'uid=user5,ou=people,%s' % DEFAULT_SUFFIX USER6_DN = 'uid=user6,ou=people,%s' % DEFAULT_SUFFIX USER7_DN = 'uid=user7,ou=people,%s' % DEFAULT_SUFFIX USER8_DN = 'uid=user8,ou=people,%s' % DEFAULT_SUFFIX USER9_DN = 'uid=user9,ou=people,%s' % DEFAULT_SUFFIX USER10_DN = 'uid=user10,ou=people,%s' % DEFAULT_SUFFIX USER11_DN = 'uid=user11,ou=people,%s' % DEFAULT_SUFFIX USER12_DN = 'uid=user12,ou=people,%s' % DEFAULT_SUFFIX USER13_DN = 'uid=user13,ou=people,%s' % DEFAULT_SUFFIX USER14_DN = 'uid=user14,ou=people,%s' % DEFAULT_SUFFIX USER15_DN = 'uid=user15,ou=people,%s' % DEFAULT_SUFFIX USER16_DN = 'uid=user16,ou=people,%s' % DEFAULT_SUFFIX USER17_DN = 'uid=user17,ou=people,%s' % DEFAULT_SUFFIX USER18_DN = 'uid=user18,ou=people,%s' % DEFAULT_SUFFIX USER19_DN = 'uid=user19,ou=people,%s' % DEFAULT_SUFFIX @pytest.fixture(scope="module") def topology_st_f(topology_st): # Add our users to the topology_st users = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX) for i in range(0, 20): users.create(properties={ 'uid': 'user%s' % i, 'cn': 'user%s' % i, 'sn': '%s' % i, 'uidNumber': '%s' % i, 'gidNumber': '%s' % i, 'homeDirectory': '/home/user%s' % i }) demo_user = UserAccount(topology_st.standalone, "uid=demo_user,ou=people,dc=example,dc=com") demo_user.delete() # return it # print("ATTACH NOW") # import time # time.sleep(30) return topology_st.standalone def _check_filter(topology_st_f, filt, expect_len, expect_dns): # print("checking %s" % filt) results = topology_st_f.search_s("ou=people,%s" % DEFAULT_SUFFIX, ldap.SCOPE_ONELEVEL, filt, ['uid',]) assert len(results) == expect_len result_dns = [result.dn for result in results] assert set(expect_dns) == set(result_dns) def test_eq(topology_st_f): """Test filter logic with "equal to" operator :id: 1b0b7e59-a5ac-4825-8d36-525f4f0149a9 :setup: Standalone instance with 20 test users added from uid=user0 to uid=user20 :steps: 1. Search for test users with filter ``(uid=user0)`` :expectedresults: 1. There should be 1 user listed user0 """ _check_filter(topology_st_f, '(uid=user0)', 1, [USER0_DN]) def test_sub(topology_st_f): """Test filter logic with "sub" :id: 8cfa946d-7ddf-4f8e-9f9f-39da8f35304e :setup: Standalone instance with 20 test users added from uid=user0 to uid=user20 :steps: 1. Search for test users with filter ``(uid=user*)`` :expectedresults: 1. There should be 20 users listed from user0 to user19 """ _check_filter(topology_st_f, '(uid=user*)', 20, [ USER0_DN, USER1_DN, USER2_DN, USER3_DN, USER4_DN, USER5_DN, USER6_DN, USER7_DN, USER8_DN, USER9_DN, USER10_DN, USER11_DN, USER12_DN, USER13_DN, USER14_DN, USER15_DN, USER16_DN, USER17_DN, USER18_DN, USER19_DN ]) def test_not_eq(topology_st_f): """Test filter logic with "not equal to" operator :id: 1422ec65-421d-473b-89ba-649f8decc1ab :setup: Standalone instance with 20 test users added from uid=user0 to uid=user20 :steps: 1. Search for test users with filter ``(!(uid=user0))`` :expectedresults: 1. There should be 19 users listed from user1 to user19 """ _check_filter(topology_st_f, '(!(uid=user0))', 19, [ USER1_DN, USER2_DN, USER3_DN, USER4_DN, USER5_DN, USER6_DN, USER7_DN, USER8_DN, USER9_DN, USER10_DN, USER11_DN, USER12_DN, USER13_DN, USER14_DN, USER15_DN, USER16_DN, USER17_DN, USER18_DN, USER19_DN ]) # More not cases? def test_ranges(topology_st_f): """Test filter logic with range :id: cc7c25f0-6a6e-465b-8d32-7fcc1aec84ee :setup: Standalone instance with 20 test users added from uid=user0 to uid=user20 :steps: 1. Search for test users with filter ``(uid>=user5)`` 2. Search for test users with filter ``(uid<=user4)`` 3. Search for test users with filter ``(uid>=ZZZZ)`` 4. Search for test users with filter ``(uid<=aaaa)`` :expectedresults: 1. There should be 5 users listed from user5 to user9 2. There should be 15 users listed from user0 to user4 and from user10 to user19 3. There should not be any user listed 4. There should not be any user listed """ ### REMEMBER: user10 is less than user5 because it's strcmp!!! _check_filter(topology_st_f, '(uid>=user5)', 5, [ USER5_DN, USER6_DN, USER7_DN, USER8_DN, USER9_DN, ]) _check_filter(topology_st_f, '(uid<=user4)', 15, [ USER0_DN, USER1_DN, USER2_DN, USER3_DN, USER4_DN, USER10_DN, USER11_DN, USER12_DN, USER13_DN, USER14_DN, USER15_DN, USER16_DN, USER17_DN, USER18_DN, USER19_DN ]) _check_filter(topology_st_f, '(uid>=ZZZZ)', 0, []) _check_filter(topology_st_f, '(uid<=aaaa)', 0, []) def test_and_eq(topology_st_f): """Test filter logic with "AND" operator :id: 4721fd7c-8d0b-43e6-b2e8-a5bac7674f99 :setup: Standalone instance with 20 test users added from uid=user0 to uid=user20 :steps: 1. Search for test users with filter ``(&(uid=user0)(cn=user0))`` 2. Search for test users with filter ``(&(uid=user0)(cn=user1))`` 3. Search for test users with filter ``(&(uid=user0)(cn=user0)(sn=0))`` 4. Search for test users with filter ``(&(uid=user0)(cn=user1)(sn=0))`` 5. Search for test users with filter ``(&(uid=user0)(cn=user0)(sn=1))`` :expectedresults: 1. There should be 1 user listed i.e. user0 2. There should not be any user listed 3. There should be 1 user listed i.e. user0 4. There should not be any user listed 5. There should not be any user listed """ _check_filter(topology_st_f, '(&(uid=user0)(cn=user0))', 1, [USER0_DN]) _check_filter(topology_st_f, '(&(uid=user0)(cn=user1))', 0, []) _check_filter(topology_st_f, '(&(uid=user0)(cn=user0)(sn=0))', 1, [USER0_DN]) _check_filter(topology_st_f, '(&(uid=user0)(cn=user1)(sn=0))', 0, []) _check_filter(topology_st_f, '(&(uid=user0)(cn=user0)(sn=1))', 0, []) def test_range(topology_st_f): """Test filter logic with range :id: 617e6290-866e-4b5d-a300-d8f1715ad052 :setup: Standalone instance with 20 test users added from uid=user0 to uid=user20 :steps: 1. Search for test users with filter ``(&(uid>=user5)(cn<=user7))`` :expectedresults: 1. There should be 3 users listed i.e. user5 to user7 """ _check_filter(topology_st_f, '(&(uid>=user5)(cn<=user7))', 3, [ USER5_DN, USER6_DN, USER7_DN ]) def test_and_allid_shortcut(topology_st_f): """Test filter logic with "AND" operator and shortcuts :id: f4784752-d269-4ceb-aada-fafe0a5fc14c :setup: Standalone instance with 20 test users added from uid=user0 to uid=user20 :steps: 1. Search for test users with filter ``(&(objectClass=*)(uid=user0)(cn=user0))`` 2. Search for test users with filter ``(&(uid=user0)(cn=user0)(objectClass=*))`` :expectedresults: 1. There should be 1 user listed i.e. user0 2. There should be 1 user listed i.e. user0 """ _check_filter(topology_st_f, '(&(objectClass=*)(uid=user0)(cn=user0))', 1, [USER0_DN]) _check_filter(topology_st_f, '(&(uid=user0)(cn=user0)(objectClass=*))', 1, [USER0_DN]) def test_or_eq(topology_st_f): """Test filter logic with "or" and "equal to" operators :id: a23a4fc9-0f5c-49ce-b1f7-6ac10bcd7763 :setup: Standalone instance with 20 test users added from uid=user0 to uid=user20 :steps: 1. Search for test users with filter ``|(uid=user0)(cn=user0)`` 2. Search for test users with filter ``(|(uid=user0)(uid=user1))`` 3. Search for test users with filter ``(|(uid=user0)(cn=user0)(sn=0))`` 4. Search for test users with filter ``(|(uid=user0)(uid=user1)(sn=0))`` 5. Search for test users with filter ``(|(uid=user0)(uid=user1)(uid=user2))`` :expectedresults: 1. There should be 1 user listed i.e. user0 2. There should be 2 users listed i.e. user0 and user1 3. There should be 1 user listed i.e. user0 4. There should be 2 users listed i.e. user0 and user1 5. There should be 3 users listed i.e. user0 to user2 """ _check_filter(topology_st_f, '(|(uid=user0)(cn=user0))', 1, [USER0_DN]) _check_filter(topology_st_f, '(|(uid=user0)(uid=user1))', 2, [USER0_DN, USER1_DN]) _check_filter(topology_st_f, '(|(uid=user0)(cn=user0)(sn=0))', 1, [USER0_DN]) _check_filter(topology_st_f, '(|(uid=user0)(uid=user1)(sn=0))', 2, [USER0_DN, USER1_DN]) _check_filter(topology_st_f, '(|(uid=user0)(uid=user1)(uid=user2))', 3, [USER0_DN, USER1_DN, USER2_DN]) def test_and_not_eq(topology_st_f): """Test filter logic with "not equal" to operator :id: bd00cb2b-35bb-49c0-8387-f60a6ada7c87 :setup: Standalone instance with 20 test users added from uid=user0 to uid=user20 :steps: 1. Search for test users with filter ``(&(uid=user0)(!(cn=user0)))`` 2. Search for test users with filter ``(&(uid=*)(!(uid=user0)))`` :expectedresults: 1. There should be no users listed 2. There should be 19 users listed i.e. user1 to user19 """ _check_filter(topology_st_f, '(&(uid=user0)(!(cn=user0)))', 0, []) _check_filter(topology_st_f, '(&(uid=*)(!(uid=user0)))', 19, [ USER1_DN, USER2_DN, USER3_DN, USER4_DN, USER5_DN, USER6_DN, USER7_DN, USER8_DN, USER9_DN, USER10_DN, USER11_DN, USER12_DN, USER13_DN, USER14_DN, USER15_DN, USER16_DN, USER17_DN, USER18_DN, USER19_DN ]) def test_or_not_eq(topology_st_f): """Test filter logic with "OR and NOT" operators :id: 8f62f339-72c9-49e4-8126-b2a14e61b9c0 :setup: Standalone instance with 20 test users added from uid=user0 to uid=user20 :steps: 1. Search for test users with filter ``(|(!(uid=user0))(!(uid=user1)))`` :expectedresults: 1. There should be 20 users listed i.e. user0 to user19 """ _check_filter(topology_st_f, '(|(!(uid=user0))(!(uid=user1)))', 20, [ USER0_DN, USER1_DN, USER2_DN, USER3_DN, USER4_DN, USER5_DN, USER6_DN, USER7_DN, USER8_DN, USER9_DN, USER10_DN, USER11_DN, USER12_DN, USER13_DN, USER14_DN, USER15_DN, USER16_DN, USER17_DN, USER18_DN, USER19_DN ]) def test_and_range(topology_st_f): """Test filter logic with range :id: 8e5a0e2a-4ee1-4cd7-b5ec-90ad4d3ace64 :setup: Standalone instance with 20 test users added from uid=user0 to uid=user20 :steps: 1. Search for test users with filter ``(&(uid>=user5)(uid=user6))`` 2. Search for test users with filter ``(&(uid>=user5)(uid=user0))`` 3. Search for test users with filter ``(&(uid>=user5)(uid=user6)(sn=6))`` 4. Search for test users with filter ``(&(uid>=user5)(uid=user0)(sn=0))`` 5. Search for test users with filter ``(&(uid>=user5)(uid=user0)(sn=1))`` 6. Search for test users with filter ``(&(uid>=user5)(uid>=user6))`` 7. Search for test users with filter ``(&(uid>=user5)(uid>=user6)(uid>=user7))`` :expectedresults: 1. There should be 1 user listed i.e. user6 2. There should be no users listed 3. There should be 1 user listed i.e. user6 4. There should be no users listed 5. There should be no users listed 6. There should be 4 users listed i.e. user6 to user9 7. There should be 3 users listed i.e. user7 to user9 """ # These all hit shortcut cases. _check_filter(topology_st_f, '(&(uid>=user5)(uid=user6))', 1, [USER6_DN]) _check_filter(topology_st_f, '(&(uid>=user5)(uid=user0))', 0, []) _check_filter(topology_st_f, '(&(uid>=user5)(uid=user6)(sn=6))', 1, [USER6_DN]) _check_filter(topology_st_f, '(&(uid>=user5)(uid=user0)(sn=0))', 0, []) _check_filter(topology_st_f, '(&(uid>=user5)(uid=user0)(sn=1))', 0, []) # These all take 2-way or k-way cases. _check_filter(topology_st_f, '(&(uid>=user5)(uid>=user6))', 4, [ USER6_DN, USER7_DN, USER8_DN, USER9_DN, ]) _check_filter(topology_st_f, '(&(uid>=user5)(uid>=user6)(uid>=user7))', 3, [ USER7_DN, USER8_DN, USER9_DN, ]) def test_or_range(topology_st_f): """Test filter logic with range :id: bc413e74-667a-48b0-8fbd-e9b7d18a01e4 :setup: Standalone instance with 20 test users added from uid=user0 to uid=user20 :steps: 1. Search for test users with filter ``(|(uid>=user5)(uid=user6))`` 2. Search for test users with filter ``(|(uid>=user5)(uid=user0))`` :expectedresults: 1. There should be 5 users listed i.e. user5 to user9 2. There should be 6 users listed i.e. user5 to user9 and user0 """ _check_filter(topology_st_f, '(|(uid>=user5)(uid=user6))', 5, [ USER5_DN, USER6_DN, USER7_DN, USER8_DN, USER9_DN, ]) _check_filter(topology_st_f, '(|(uid>=user5)(uid=user0))', 6, [ USER0_DN, USER5_DN, USER6_DN, USER7_DN, USER8_DN, USER9_DN, ]) def test_and_and_eq(topology_st_f): """Test filter logic with "AND" and "equal to" operators :id: 5c66eb38-d01f-459e-81e4-d335f97211c7 :setup: Standalone instance with 20 test users added from uid=user0 to uid=user20 :steps: 1. Search for test users with filter ``(&(&(uid=user0)(sn=0))(cn=user0))`` 2. Search for test users with filter ``(&(&(uid=user1)(sn=0))(cn=user0))`` 3. Search for test users with filter ``(&(&(uid=user0)(sn=1))(cn=user0))`` 4. Search for test users with filter ``(&(&(uid=user0)(sn=0))(cn=user1))`` :expectedresults: 1. There should be 1 user listed i.e. user0 2. There should be no users listed 3. There should be no users listed 4. There should be no users listed """ _check_filter(topology_st_f, '(&(&(uid=user0)(sn=0))(cn=user0))', 1, [USER0_DN]) _check_filter(topology_st_f, '(&(&(uid=user1)(sn=0))(cn=user0))', 0, []) _check_filter(topology_st_f, '(&(&(uid=user0)(sn=1))(cn=user0))', 0, []) _check_filter(topology_st_f, '(&(&(uid=user0)(sn=0))(cn=user1))', 0, []) def test_or_or_eq(topology_st_f): """Test filter logic with "AND" and "equal to" operators :id: 0cab4bbd-637c-419d-8069-ad5463ecaa75 :setup: Standalone instance with 20 test users added from uid=user0 to uid=user20 :steps: 1. Search for test users with filter ``(|(|(uid=user0)(sn=0))(cn=user0))`` 2. Search for test users with filter ``(|(|(uid=user1)(sn=0))(cn=user0))`` 3. Search for test users with filter ``(|(|(uid=user0)(sn=1))(cn=user0))`` 4. Search for test users with filter ``(|(|(uid=user0)(sn=0))(cn=user1))`` 5. Search for test users with filter ``(|(|(uid=user0)(sn=1))(cn=user2))`` :expectedresults: 1. There should be 1 user listed i.e. user0 2. There should be 2 users listed i.e. user0, user1 3. There should be 2 users listed i.e. user0, user1 4. There should be 2 users listed i.e. user0, user1 5. There should be 3 users listed i.e. user0, user1 and user2 """ _check_filter(topology_st_f, '(|(|(uid=user0)(sn=0))(cn=user0))', 1, [USER0_DN]) _check_filter(topology_st_f, '(|(|(uid=user1)(sn=0))(cn=user0))', 2, [USER0_DN, USER1_DN]) _check_filter(topology_st_f, '(|(|(uid=user0)(sn=1))(cn=user0))', 2, [USER0_DN, USER1_DN]) _check_filter(topology_st_f, '(|(|(uid=user0)(sn=0))(cn=user1))', 2, [USER0_DN, USER1_DN]) _check_filter(topology_st_f, '(|(|(uid=user0)(sn=1))(cn=user2))', 3, [USER0_DN, USER1_DN, USER2_DN]) def test_and_or_eq(topology_st_f): """Test filter logic with "AND" and "equal to" operators :id: 2ce7cc2e-6058-422d-ac3e-e678decf1cc4 :setup: Standalone instance with 20 test users added from uid=user0 to uid=user20 :steps: 1. Search for test users with filter ``(&(|(uid=user0)(sn=0))(cn=user0))`` 2. Search for test users with filter ``(&(|(uid=user1)(sn=0))(cn=user0))`` 3. Search for test users with filter ``(&(|(uid=user0)(sn=1))(cn=user0))`` 4. Search for test users with filter ``(&(|(uid=user0)(sn=0))(cn=user1))`` 5. Search for test users with filter ``(&(|(uid=user0)(sn=1))(cn=*))`` :expectedresults: 1. There should be 1 user listed i.e. user0 2. There should be 1 user listed i.e. user0 3. There should be 1 user listed i.e. user0 4. There should be no users listed 5. There should be 2 users listed i.e. user0 and user1 """ _check_filter(topology_st_f, '(&(|(uid=user0)(sn=0))(cn=user0))', 1, [USER0_DN]) _check_filter(topology_st_f, '(&(|(uid=user1)(sn=0))(cn=user0))', 1, [USER0_DN]) _check_filter(topology_st_f, '(&(|(uid=user0)(sn=1))(cn=user0))', 1, [USER0_DN]) _check_filter(topology_st_f, '(&(|(uid=user0)(sn=0))(cn=user1))', 0, []) _check_filter(topology_st_f, '(&(|(uid=user0)(sn=1))(cn=*))', 2, [USER0_DN, USER1_DN]) def test_or_and_eq(topology_st_f): """Test filter logic with "AND" and "equal to" operators :id: ee9fb400-451a-479e-852c-f59b4c937a8d :setup: Standalone instance with 20 test users added from uid=user0 to uid=user20 :steps: 1. Search for test users with filter ``(|(&(uid=user0)(sn=0))(uid=user0))`` 2. Search for test users with filter ``(|(&(uid=user1)(sn=2))(uid=user0))`` 3. Search for test users with filter ``(|(&(uid=user0)(sn=1))(uid=user0))`` 4. Search for test users with filter ``(|(&(uid=user1)(sn=1))(uid=user0))`` :expectedresults: 1. There should be 1 user listed i.e. user0 2. There should be 1 user listed i.e. user0 3. There should be 1 user listed i.e. user0 4. There should be 2 user listed i.e. user0 and user1 """ _check_filter(topology_st_f, '(|(&(uid=user0)(sn=0))(uid=user0))', 1, [USER0_DN]) _check_filter(topology_st_f, '(|(&(uid=user1)(sn=2))(uid=user0))', 1, [USER0_DN]) _check_filter(topology_st_f, '(|(&(uid=user0)(sn=1))(uid=user0))', 1, [USER0_DN]) _check_filter(topology_st_f, '(|(&(uid=user1)(sn=1))(uid=user0))', 2, [USER0_DN, USER1_DN]) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/filter/filter_match_test.py000066400000000000000000001231101421664411400277050ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2019 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK ---- """ Test the matching rules feature . """ import os import pytest from lib389._constants import DEFAULT_SUFFIX from lib389.topologies import topology_st from lib389.cos import CosTemplates from lib389.schema import Schema import ldap pytestmark = pytest.mark.tier1 ATTR = ["( 2.16.840.1.113730.3.1.999999.0 NAME 'attroctetStringMatch' " "DESC 'for testing matching rules' EQUALITY octetStringMatch " "ORDERING octetStringOrderingMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.40 " "X-ORIGIN 'matching rule tests' )", "( 2.16.840.1.113730.3.1.999999.1 NAME 'attrbitStringMatch' DESC " "'for testing matching rules' EQUALITY bitStringMatch SYNTAX " "1.3.6.1.4.1.1466.115.121.1.6 X-ORIGIN 'matching rule tests' )", "( 2.16.840.1.113730.3.1.999999.2 NAME 'attrcaseExactIA5Match' " "DESC 'for testing matching rules' EQUALITY caseExactIA5Match " "SUBSTR caseExactIA5SubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 " "X-ORIGIN 'matching rule tests' )", "( 2.16.840.1.113730.3.1.999999.3 NAME 'attrcaseExactMatch' DESC " "'for testing matching rules' EQUALITY caseExactMatch ORDERING " "caseExactOrderingMatch SUBSTR caseExactSubstringsMatch SYNTAX " "1.3.6.1.4.1.1466.115.121.1.15 " "X-ORIGIN 'matching rule tests' )", "( 2.16.840.1.113730.3.1.999999.4 NAME 'attrgeneralizedTimeMatch' DESC " "'for testing matching rules' EQUALITY generalizedTimeMatch ORDERING " "generalizedTimeOrderingMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.24 " "X-ORIGIN 'matching rule tests' )", "( 2.16.840.1.113730.3.1.999999.5 NAME 'attrbooleanMatch' DESC " "'for testing matching rules' EQUALITY booleanMatch SYNTAX " "1.3.6.1.4.1.1466.115.121.1.7 X-ORIGIN 'matching rule tests' )", "( 2.16.840.1.113730.3.1.999999.6 NAME 'attrcaseIgnoreIA5Match' DESC " "'for testing matching rules' EQUALITY caseIgnoreIA5Match SUBSTR " "caseIgnoreIA5SubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 " "X-ORIGIN 'matching rule tests' )", "( 2.16.840.1.113730.3.1.999999.7 NAME 'attrcaseIgnoreMatch' DESC " "'for testing matching rules' EQUALITY caseIgnoreMatch ORDERING " "caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch " "SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 X-ORIGIN 'matching rule tests' )", "( 2.16.840.1.113730.3.1.999999.8 NAME 'attrcaseIgnoreListMatch' DESC " "'for testing matching rules' EQUALITY caseIgnoreListMatch SUBSTR " "caseIgnoreListSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.41 " "X-ORIGIN 'matching rule tests' )", "( 2.16.840.1.113730.3.1.999999.9 NAME 'attrobjectIdentifierMatch' DESC " "'for testing matching rules' EQUALITY objectIdentifierMatch SYNTAX " "1.3.6.1.4.1.1466.115.121.1.38 X-ORIGIN 'matching rule tests' )", "( 2.16.840.1.113730.3.1.999999.10 NAME 'attrdistinguishedNameMatch' DESC " "'for testing matching rules' EQUALITY distinguishedNameMatch SYNTAX " "1.3.6.1.4.1.1466.115.121.1.12 X-ORIGIN 'matching rule tests' )", "( 2.16.840.1.113730.3.1.999999.11 NAME 'attrintegerMatch' DESC " "'for testing matching rules' EQUALITY integerMatch ORDERING " "integerOrderingMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 " "X-ORIGIN 'matching rule tests' )", "( 2.16.840.1.113730.3.1.999999.12 NAME 'attruniqueMemberMatch' DESC " "'for testing matching rules' EQUALITY uniqueMemberMatch SYNTAX " "1.3.6.1.4.1.1466.115.121.1.34 X-ORIGIN 'matching rule tests' )", "( 2.16.840.1.113730.3.1.999999.13 NAME 'attrnumericStringMatch' DESC " "'for testing matching rules' EQUALITY numericStringMatch ORDERING " "numericStringOrderingMatch SUBSTR numericStringSubstringsMatch " "SYNTAX 1.3.6.1.4.1.1466.115.121.1.36 X-ORIGIN 'matching rule tests' )", "( 2.16.840.1.113730.3.1.999999.14 NAME 'attrtelephoneNumberMatch' DESC " "'for testing matching rules' EQUALITY telephoneNumberMatch SUBSTR " "telephoneNumberSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.50 " "X-ORIGIN 'matching rule tests' )", "( 2.16.840.1.113730.3.1.999999.15 NAME 'attrdirectoryStringFirstComponentMatch' " "DESC 'for testing matching rules' EQUALITY directoryStringFirstComponentMatch " "SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 X-ORIGIN 'matching rule tests' )", "( 2.16.840.1.113730.3.1.999999.16 NAME 'attrobjectIdentifierFirstComponentMatch' " "DESC 'for testing matching rules' EQUALITY objectIdentifierFirstComponentMatch " "SYNTAX 1.3.6.1.4.1.1466.115.121.1.38 X-ORIGIN 'matching rule tests' )", "( 2.16.840.1.113730.3.1.999999.17 NAME 'attrintegerFirstComponentMatch' " "DESC 'for testing matching rules' EQUALITY integerFirstComponentMatch SYNTAX " "1.3.6.1.4.1.1466.115.121.1.27 X-ORIGIN 'matching rule tests' )"] TESTED_MATCHING_RULES = ["bitStringMatch", "caseExactIA5Match", "caseExactMatch", "caseExactOrderingMatch", "caseExactSubstringsMatch", "caseExactIA5SubstringsMatch", "generalizedTimeMatch", "generalizedTimeOrderingMatch", "booleanMatch", "caseIgnoreIA5Match", "caseIgnoreIA5SubstringsMatch", "caseIgnoreMatch", "caseIgnoreOrderingMatch", "caseIgnoreSubstringsMatch", "caseIgnoreListMatch", "caseIgnoreListSubstringsMatch", "objectIdentifierMatch", "directoryStringFirstComponentMatch", "objectIdentifierFirstComponentMatch", "distinguishedNameMatch", "integerMatch", "integerOrderingMatch", "integerFirstComponentMatch", "uniqueMemberMatch", "numericStringMatch", "numericStringOrderingMatch", "numericStringSubstringsMatch", "telephoneNumberMatch", "telephoneNumberSubstringsMatch", "octetStringMatch", "octetStringOrderingMatch"] MATCHING_RULES = [ {'attr': 'attrbitStringMatch', 'positive': ["'0010'B", "'0011'B", "'0100'B", "'0101'B", "'0110'B"], 'negative': ["'0001'B", "'0001'B", "'0010'B", "'0010'B", "'0011'B", "'0011'B", "'0100'B", "'0100'B", "'0101'B", "'0101'B", "'0110'B", "'0110'B"]}, {'attr': 'attrcaseExactIA5Match', 'positive': ['sPrain', 'spRain', 'sprAin', 'spraIn', 'sprain'], 'negative': ['Sprain', 'Sprain', 'Sprain', 'Sprain', 'SpRain', 'SpRain', 'SprAin', 'SprAin', 'SpraIn', 'SpraIn', 'Sprain', 'Sprain']}, {'attr': 'attrcaseExactMatch', 'positive': ['ÇéliNé Ändrè', 'Çéliné ÄndrÈ', 'Çéliné Ändrè', 'çÉliné Ändrè'], 'negative': ['ÇélIné Ändrè', 'ÇélIné Ändrè', 'ÇéliNé Ändrè', 'ÇéliNé Ändrè', 'Çéliné ÄndrÈ', 'Çéliné ÄndrÈ', 'Çéliné Ändrè', 'Çéliné Ändrè', 'çÉliné Ändrè', 'çÉliné Ändrè']}, {'attr': 'attrgeneralizedTimeMatch', 'positive': ['20100218171301Z', '20100218171302Z', '20100218171303Z', '20100218171304Z', '20100218171305Z'], 'negative': ['20100218171300Z', '20100218171300Z', '20100218171301Z', '20100218171301Z', '20100218171302Z', '20100218171302Z', '20100218171303Z', '20100218171303Z', '20100218171304Z', '20100218171304Z', '20100218171305Z', '20100218171305Z']}, {'attr': 'attrbooleanMatch', 'positive': ['FALSE'], 'negative': ['TRUE', 'TRUE', 'FALSE', 'FALSE']}, {'attr': 'attrcaseIgnoreIA5Match', 'positive': ['sprain2', 'sprain3', 'sprain4', 'sprain5', 'sprain6'], 'negative': ['sprain1', 'sprain1', 'sprain2', 'sprain2', 'sprain3', 'sprain3', 'sprain4', 'sprain4', 'sprain5', 'sprain5', 'sprain6', 'sprain6']}, {'attr': 'attrcaseIgnoreMatch', 'positive': ['ÇélIné Ändrè2', 'ÇélIné Ändrè3', 'ÇélIné Ändrè4', 'ÇélIné Ändrè5', 'ÇélIné Ändrè6'], 'negative': ['ÇélIné Ändrè1', 'ÇélIné Ändrè1', 'ÇélIné Ändrè2', 'ÇélIné Ändrè2', 'ÇélIné Ändrè3', 'ÇélIné Ändrè3', 'ÇélIné Ändrè4', 'ÇélIné Ändrè4', 'ÇélIné Ändrè5', 'ÇélIné Ändrè5', 'ÇélIné Ändrè6', 'ÇélIné Ändrè6']}, {'attr': 'attrcaseIgnoreListMatch', 'positive': ['foo2$bar', 'foo3$bar', 'foo4$bar', 'foo5$bar', 'foo6$bar'], 'negative': ['foo1$bar', 'foo1$bar', 'foo2$bar', 'foo2$bar', 'foo3$bar', 'foo3$bar', 'foo4$bar', 'foo4$bar', 'foo5$bar', 'foo5$bar', 'foo6$bar', 'foo6$bar']}, {'attr': 'attrobjectIdentifierMatch', 'positive': ['1.3.6.1.4.1.1466.115.121.1.24', '1.3.6.1.4.1.1466.115.121.1.26', '1.3.6.1.4.1.1466.115.121.1.40', '1.3.6.1.4.1.1466.115.121.1.41', '1.3.6.1.4.1.1466.115.121.1.6'], 'negative': ['1.3.6.1.4.1.1466.115.121.1.15', '1.3.6.1.4.1.1466.115.121.1.15', '1.3.6.1.4.1.1466.115.121.1.24', '1.3.6.1.4.1.1466.115.121.1.24', '1.3.6.1.4.1.1466.115.121.1.26', '1.3.6.1.4.1.1466.115.121.1.26', '1.3.6.1.4.1.1466.115.121.1.40', '1.3.6.1.4.1.1466.115.121.1.40', '1.3.6.1.4.1.1466.115.121.1.41', '1.3.6.1.4.1.1466.115.121.1.41', '1.3.6.1.4.1.1466.115.121.1.6', '1.3.6.1.4.1.1466.115.121.1.6']}, {'attr': 'attrdirectoryStringFirstComponentMatch', 'positive': ['ÇélIné Ändrè2', 'ÇélIné Ändrè3', 'ÇélIné Ändrè4', 'ÇélIné Ändrè5', 'ÇélIné Ändrè6'], 'negative': ['ÇélIné Ändrè1', 'ÇélIné Ändrè1', 'ÇélIné Ändrè2', 'ÇélIné Ändrè2', 'ÇélIné Ändrè3', 'ÇélIné Ändrè3', 'ÇélIné Ändrè4', 'ÇélIné Ändrè4', 'ÇélIné Ändrè5', 'ÇélIné Ändrè5', 'ÇélIné Ändrè6', 'ÇélIné Ändrè6']}, {'attr': 'attrobjectIdentifierFirstComponentMatch', 'positive': ['1.3.6.1.4.1.1466.115.121.1.24', '1.3.6.1.4.1.1466.115.121.1.26', '1.3.6.1.4.1.1466.115.121.1.40', '1.3.6.1.4.1.1466.115.121.1.41', '1.3.6.1.4.1.1466.115.121.1.6'], 'negative': ['1.3.6.1.4.1.1466.115.121.1.15', '1.3.6.1.4.1.1466.115.121.1.15', '1.3.6.1.4.1.1466.115.121.1.24', '1.3.6.1.4.1.1466.115.121.1.24', '1.3.6.1.4.1.1466.115.121.1.26', '1.3.6.1.4.1.1466.115.121.1.26', '1.3.6.1.4.1.1466.115.121.1.40', '1.3.6.1.4.1.1466.115.121.1.40', '1.3.6.1.4.1.1466.115.121.1.41', '1.3.6.1.4.1.1466.115.121.1.41', '1.3.6.1.4.1.1466.115.121.1.6', '1.3.6.1.4.1.1466.115.121.1.6']}, {'attr': 'attrdistinguishedNameMatch', 'positive': ['cn=foo2,cn=bar', 'cn=foo3,cn=bar', 'cn=foo4,cn=bar', 'cn=foo5,cn=bar', 'cn=foo6,cn=bar'], 'negative': ['cn=foo1,cn=bar', 'cn=foo1,cn=bar', 'cn=foo2,cn=bar', 'cn=foo2,cn=bar', 'cn=foo3,cn=bar', 'cn=foo3,cn=bar', 'cn=foo4,cn=bar', 'cn=foo4,cn=bar', 'cn=foo5,cn=bar', 'cn=foo5,cn=bar', 'cn=foo6,cn=bar', 'cn=foo6,cn=bar']}, {'attr': 'attrintegerMatch', 'positive': ['-1', '0', '1', '2', '3'], 'negative': ['-2', '-2', '-1', '-1', '0', '0', '1', '1', '2', '2', '3', '3']}, {'attr': 'attrintegerFirstComponentMatch', 'positive': ['-1', '0', '1', '2', '3'], 'negative': ['-2', '-2', '-1', '-1', '0', '0', '1', '1', '2', '2', '3', '3']}, {'attr': 'attruniqueMemberMatch', 'positive': ["cn=foo2,cn=bar#'0010'B", "cn=foo3,cn=bar#'0011'B", "cn=foo4,cn=bar#'0100'B", "cn=foo5,cn=bar#'0101'B", "cn=foo6,cn=bar#'0110'B"], 'negative': ["cn=foo1,cn=bar#'0001'B", "cn=foo1,cn=bar#'0001'B", "cn=foo2,cn=bar#'0010'B", "cn=foo2,cn=bar#'0010'B", "cn=foo3,cn=bar#'0011'B", "cn=foo3,cn=bar#'0011'B", "cn=foo4,cn=bar#'0100'B", "cn=foo4,cn=bar#'0100'B", "cn=foo5,cn=bar#'0101'B", "cn=foo5,cn=bar#'0101'B", "cn=foo6,cn=bar#'0110'B", "cn=foo6,cn=bar#'0110'B"]}, {'attr': 'attrnumericStringMatch', 'positive': ['00002', '00003', '00004', '00005', '00006'], 'negative': ['00001', '00001', '00002', '00002', '00003', '00003', '00004', '00004', '00005', '00005', '00006', '00006']}, {'attr': 'attrtelephoneNumberMatch', 'positive': ['+1 408 555 5625', '+1 408 555 6201', '+1 408 555 8585', '+1 408 555 9187', '+1 408 555 9423'], 'negative': ['+1 408 555 4798', '+1 408 555 4798', '+1 408 555 5625', '+1 408 555 5625', '+1 408 555 6201', '+1 408 555 6201', '+1 408 555 8585', '+1 408 555 8585', '+1 408 555 9187', '+1 408 555 9187', '+1 408 555 9423', '+1 408 555 9423']}, {'attr': 'attroctetStringMatch', 'positive': ['AAAAAAAAAAAAAAI=', 'AAAAAAAAAAAAAAM=', 'AAAAAAAAAAAAAAQ=', 'AAAAAAAAAAAAAAU=', 'AAAAAAAAAAAAAAY='], 'negative': ['AAAAAAAAAAAAAAE=', 'AAAAAAAAAAAAAAE=', 'AAAAAAAAAAAAAAI=', 'AAAAAAAAAAAAAAI=', 'AAAAAAAAAAAAAAM=', 'AAAAAAAAAAAAAAM=', 'AAAAAAAAAAAAAAQ=', 'AAAAAAAAAAAAAAQ=', 'AAAAAAAAAAAAAAU=', 'AAAAAAAAAAAAAAU=', 'AAAAAAAAAAAAAAY=', 'AAAAAAAAAAAAAAY=']}] MATCHING_MODES = [ {'attr': 'attrbitStringMatch', 'positive': ["'0001'B"], 'negative': ["'0001'B", "'0010'B", "'0011'B", "'0100'B", "'0101'B", "'0110'B"]}, {'attr': 'attrcaseExactIA5Match', 'positive': 'Sprain', 'negative': ['Sprain', 'sPrain', 'spRain', 'sprAin', 'spraIn', 'sprain']}, {'attr': 'attrcaseExactMatch', 'positive': 'ÇélIné Ändrè', 'negative': ['ÇélIné Ändrè', 'ÇéliNé Ändrè', 'Çéliné ÄndrÈ', 'Çéliné Ändrè', 'çÉliné Ändrè']}, {'attr': 'attrgeneralizedTimeMatch', 'positive': '20100218171300Z', 'negative': ['20100218171300Z', '20100218171301Z', '20100218171302Z', '20100218171303Z', '20100218171304Z', '20100218171305Z']}, {'attr': 'attrbooleanMatch', 'positive': 'TRUE', 'negative': ['TRUE', 'FALSE']}, {'attr': 'attrcaseIgnoreIA5Match', 'positive': 'sprain1', 'negative': ['sprain1', 'sprain2', 'sprain3', 'sprain4', 'sprain5', 'sprain6']}, {'attr': 'attrcaseIgnoreMatch', 'positive': 'ÇélIné Ändrè1', 'negative': ['ÇélIné Ändrè1', 'ÇélIné Ändrè2', 'ÇélIné Ändrè3', 'ÇélIné Ändrè4', 'ÇélIné Ändrè5', 'ÇélIné Ändrè6']}, {'attr': 'attrcaseIgnoreListMatch', 'positive': 'foo1$bar', 'negative': ['foo1$bar', 'foo2$bar', 'foo3$bar', 'foo4$bar', 'foo5$bar', 'foo6$bar']}, {'attr': 'attrobjectIdentifierMatch', 'positive': '1.3.6.1.4.1.1466.115.121.1.15', 'negative': ['1.3.6.1.4.1.1466.115.121.1.15', '1.3.6.1.4.1.1466.115.121.1.24', '1.3.6.1.4.1.1466.115.121.1.26', '1.3.6.1.4.1.1466.115.121.1.40', '1.3.6.1.4.1.1466.115.121.1.41', '1.3.6.1.4.1.1466.115.121.1.6']}, {'attr': 'attrdirectoryStringFirstComponentMatch', 'positive': 'ÇélIné Ändrè1', 'negative': ['ÇélIné Ändrè1', 'ÇélIné Ändrè2', 'ÇélIné Ändrè3', 'ÇélIné Ändrè4', 'ÇélIné Ändrè5', 'ÇélIné Ändrè6']}, {'attr': 'attrobjectIdentifierFirstComponentMatch', 'positive': '1.3.6.1.4.1.1466.115.121.1.15', 'negative': ['1.3.6.1.4.1.1466.115.121.1.15', '1.3.6.1.4.1.1466.115.121.1.24', '1.3.6.1.4.1.1466.115.121.1.26', '1.3.6.1.4.1.1466.115.121.1.40', '1.3.6.1.4.1.1466.115.121.1.41', '1.3.6.1.4.1.1466.115.121.1.6']}, {'attr': 'attrdistinguishedNameMatch', 'positive': 'cn=foo1,cn=bar', 'negative': ['cn=foo1,cn=bar', 'cn=foo2,cn=bar', 'cn=foo3,cn=bar', 'cn=foo4,cn=bar', 'cn=foo5,cn=bar', 'cn=foo6,cn=bar']}, {'attr': 'attrintegerMatch', 'positive': '-2', 'negative': ['-2', '-1', '0', '1', '2', '3']}, {'attr': 'attrintegerFirstComponentMatch', 'positive': '-2', 'negative': ['-2', '-1', '0', '1', '2', '3']}, {'attr': 'attruniqueMemberMatch', 'positive': "cn=foo1,cn=bar#'0001'B", 'negative': ["cn=foo1,cn=bar#'0001'B", "cn=foo2,cn=bar#'0010'B", "cn=foo3,cn=bar#'0011'B", "cn=foo4,cn=bar#'0100'B", "cn=foo5,cn=bar#'0101'B", "cn=foo6,cn=bar#'0110'B"]}, {'attr': 'attrnumericStringMatch', 'positive': '00001', 'negative': ['00001', '00002', '00003', '00004', '00005', '00006']}, {'attr': 'attrtelephoneNumberMatch', 'positive': '+1 408 555 4798', 'negative': ['+1 408 555 4798', '+1 408 555 5625', '+1 408 555 6201', '+1 408 555 8585', '+1 408 555 9187', '+1 408 555 9423']}, {'attr': 'attroctetStringMatch', 'positive': 'AAAAAAAAAAAAAAE=', 'negative': ['AAAAAAAAAAAAAAE=', 'AAAAAAAAAAAAAAI=', 'AAAAAAAAAAAAAAM=', 'AAAAAAAAAAAAAAQ=', 'AAAAAAAAAAAAAAU=', 'AAAAAAAAAAAAAAY=']}] MODE_REPLACE = [ {'attr': 'attrbitStringMatch', 'positive': ["'0001'B", "'0010'B", "'0011'B", "'0100'B", "'0101'B", "'0110'B"], 'negative': ["'0001'B", "'0001'B", "'0010'B", "'0010'B", "'0011'B", "'0011'B", "'0100'B", "'0100'B", "'0101'B", "'0101'B", "'0110'B", "'0110'B"]}, {'attr': 'attrcaseExactIA5Match', 'positive': ['Sprain', 'sPrain', 'spRain', 'sprAin', 'spraIn', 'sprain'], 'negative': ['Sprain', 'Sprain', 'sPrain', 'sPrain', 'spRain', 'spRain', 'sprAin', 'sprAin', 'spraIn', 'spraIn', 'sprain', 'sprain']}, {'attr': 'attrcaseExactMatch', 'positive': ['ÇélIné Ändrè', 'ÇéliNé Ändrè', 'Çéliné ÄndrÈ', 'Çéliné Ändrè', 'çÉliné Ändrè'], 'negative': ['ÇélIné Ändrè', 'ÇélIné Ändrè', 'ÇéliNé Ändrè', 'ÇéliNé Ändrè', 'Çéliné ÄndrÈ', 'Çéliné ÄndrÈ', 'Çéliné Ändrè', 'Çéliné Ändrè', 'çÉliné Ändrè', 'çÉliné Ändrè']}, {'attr': 'attrgeneralizedTimeMatch', 'positive': ['20100218171300Z', '20100218171301Z', '20100218171302Z', '20100218171303Z', '20100218171304Z', '20100218171305Z'], 'negative': ['20100218171300Z', '20100218171300Z', '20100218171301Z', '20100218171301Z', '20100218171302Z', '20100218171302Z', '20100218171303Z', '20100218171303Z', '20100218171304Z', '20100218171304Z', '20100218171305Z', '20100218171305Z']}, {'attr': 'attrbooleanMatch', 'positive': ['TRUE', 'FALSE'], 'negative': ['TRUE', 'TRUE', 'FALSE', 'FALSE']}, {'attr': 'attrcaseIgnoreIA5Match', 'positive': ['sprain1', 'sprain2', 'sprain3', 'sprain4', 'sprain5', 'sprain6'], 'negative': ['sprain1', 'sprain1', 'sprain2', 'sprain2', 'sprain3', 'sprain3', 'sprain4', 'sprain4', 'sprain5', 'sprain5', 'sprain6', 'sprain6']}, {'attr': 'attrcaseIgnoreMatch', 'positive': ['ÇélIné Ändrè1', 'ÇélIné Ändrè2', 'ÇélIné Ändrè3', 'ÇélIné Ändrè4', 'ÇélIné Ändrè5', 'ÇélIné Ändrè6'], 'negative': ['ÇélIné Ändrè1', 'ÇélIné Ändrè1', 'ÇélIné Ändrè2', 'ÇélIné Ändrè2', 'ÇélIné Ändrè3', 'ÇélIné Ändrè3', 'ÇélIné Ändrè4', 'ÇélIné Ändrè4', 'ÇélIné Ändrè5', 'ÇélIné Ändrè5', 'ÇélIné Ändrè6', 'ÇélIné Ändrè6']}, {'attr': 'attrcaseIgnoreListMatch', 'positive': ['foo1$bar', 'foo2$bar', 'foo3$bar', 'foo4$bar', 'foo5$bar', 'foo6$bar'], 'negative': ['foo1$bar', 'foo1$bar', 'foo2$bar', 'foo2$bar', 'foo3$bar', 'foo3$bar', 'foo4$bar', 'foo4$bar', 'foo5$bar', 'foo5$bar', 'foo6$bar', 'foo6$bar']}, {'attr': 'attrobjectIdentifierFirstComponentMatch', 'positive': ['1.3.6.1.4.1.1466.115.121.1.15', '1.3.6.1.4.1.1466.115.121.1.24', '1.3.6.1.4.1.1466.115.121.1.26', '1.3.6.1.4.1.1466.115.121.1.40', '1.3.6.1.4.1.1466.115.121.1.41', '1.3.6.1.4.1.1466.115.121.1.6'], 'negative': ['1.3.6.1.4.1.1466.115.121.1.15', '1.3.6.1.4.1.1466.115.121.1.15', '1.3.6.1.4.1.1466.115.121.1.24', '1.3.6.1.4.1.1466.115.121.1.24', '1.3.6.1.4.1.1466.115.121.1.26', '1.3.6.1.4.1.1466.115.121.1.26', '1.3.6.1.4.1.1466.115.121.1.40', '1.3.6.1.4.1.1466.115.121.1.40', '1.3.6.1.4.1.1466.115.121.1.41', '1.3.6.1.4.1.1466.115.121.1.41', '1.3.6.1.4.1.1466.115.121.1.6', '1.3.6.1.4.1.1466.115.121.1.6']}, {'attr': 'attrdistinguishedNameMatch', 'positive': ['cn=foo1,cn=bar', 'cn=foo2,cn=bar', 'cn=foo3,cn=bar', 'cn=foo4,cn=bar', 'cn=foo5,cn=bar', 'cn=foo6,cn=bar'], 'negative': ['cn=foo1,cn=bar', 'cn=foo1,cn=bar', 'cn=foo2,cn=bar', 'cn=foo2,cn=bar', 'cn=foo3,cn=bar', 'cn=foo3,cn=bar', 'cn=foo4,cn=bar', 'cn=foo4,cn=bar', 'cn=foo5,cn=bar', 'cn=foo5,cn=bar', 'cn=foo6,cn=bar', 'cn=foo6,cn=bar']}, {'attr': 'attrintegerMatch', 'positive': ['-2', '-1', '0', '1', '2', '3'], 'negative': ['-2', '-2', '-1', '-1', '0', '0', '1', '1', '2', '2', '3', '3']}, {'attr': 'attrintegerFirstComponentMatch', 'positive': ['-2', '-1', '0', '1', '2', '3'], 'negative': ['-2', '-2', '-1', '-1', '0', '0', '1', '1', '2', '2', '3', '3']}, {'attr': 'attruniqueMemberMatch', 'positive': ["cn=foo1,cn=bar#'0001'B", "cn=foo2,cn=bar#'0010'B", "cn=foo3,cn=bar#'0011'B", "cn=foo4,cn=bar#'0100'B", "cn=foo5,cn=bar#'0101'B", "cn=foo6,cn=bar#'0110'B"], 'negative': ["cn=foo1,cn=bar#'0001'B", "cn=foo1,cn=bar#'0001'B", "cn=foo2,cn=bar#'0010'B", "cn=foo2,cn=bar#'0010'B", "cn=foo3,cn=bar#'0011'B", "cn=foo3,cn=bar#'0011'B", "cn=foo4,cn=bar#'0100'B", "cn=foo4,cn=bar#'0100'B", "cn=foo5,cn=bar#'0101'B", "cn=foo5,cn=bar#'0101'B", "cn=foo6,cn=bar#'0110'B", "cn=foo6,cn=bar#'0110'B"]}, {'attr': 'attrnumericStringMatch', 'positive': ['00001', '00002', '00003', '00004', '00005', '00006'], 'negative': ['00001', '00001', '00002', '00002', '00003', '00003', '00004', '00004', '00005', '00005', '00006', '00006']}, {'attr': 'attrtelephoneNumberMatch', 'positive': ['+1 408 555 4798', '+1 408 555 5625', '+1 408 555 6201', '+1 408 555 8585', '+1 408 555 9187', '+1 408 555 9423'], 'negative': ['+1 408 555 4798', '+1 408 555 4798', '+1 408 555 5625', '+1 408 555 5625', '+1 408 555 6201', '+1 408 555 6201', '+1 408 555 8585', '+1 408 555 8585', '+1 408 555 9187', '+1 408 555 9187', '+1 408 555 9423', '+1 408 555 9423']}, {'attr': 'attroctetStringMatch', 'positive': ['AAAAAAAAAAAAAAE=', 'AAAAAAAAAAAAAAI=', 'AAAAAAAAAAAAAAM=', 'AAAAAAAAAAAAAAQ=', 'AAAAAAAAAAAAAAU=', 'AAAAAAAAAAAAAAY='], 'negative': ['AAAAAAAAAAAAAAE=', 'AAAAAAAAAAAAAAE=', 'AAAAAAAAAAAAAAI=', 'AAAAAAAAAAAAAAI=', 'AAAAAAAAAAAAAAM=', 'AAAAAAAAAAAAAAM=', 'AAAAAAAAAAAAAAQ=', 'AAAAAAAAAAAAAAQ=', 'AAAAAAAAAAAAAAU=', 'AAAAAAAAAAAAAAU=', 'AAAAAAAAAAAAAAY=', 'AAAAAAAAAAAAAAY=']}, {'attr': 'attrobjectIdentifierMatch', 'positive': ['1.3.6.1.4.1.1466.115.121.1.15', '1.3.6.1.4.1.1466.115.121.1.24', '1.3.6.1.4.1.1466.115.121.1.26', '1.3.6.1.4.1.1466.115.121.1.40', '1.3.6.1.4.1.1466.115.121.1.41', '1.3.6.1.4.1.1466.115.121.1.6'], 'negative': ['1.3.6.1.4.1.1466.115.121.1.15', '1.3.6.1.4.1.1466.115.121.1.15', '1.3.6.1.4.1.1466.115.121.1.24', '1.3.6.1.4.1.1466.115.121.1.24', '1.3.6.1.4.1.1466.115.121.1.26', '1.3.6.1.4.1.1466.115.121.1.26', '1.3.6.1.4.1.1466.115.121.1.40', '1.3.6.1.4.1.1466.115.121.1.40', '1.3.6.1.4.1.1466.115.121.1.41', '1.3.6.1.4.1.1466.115.121.1.41', '1.3.6.1.4.1.1466.115.121.1.6', '1.3.6.1.4.1.1466.115.121.1.6']}, {'attr': 'attrdirectoryStringFirstComponentMatch', 'positive': ['ÇélIné Ändrè1', 'ÇélIné Ändrè2', 'ÇélIné Ändrè3', 'ÇélIné Ändrè4', 'ÇélIné Ändrè5', 'ÇélIné Ändrè6'], 'negative': ['ÇélIné Ändrè1', 'ÇélIné Ändrè1', 'ÇélIné Ändrè2', 'ÇélIné Ändrè2', 'ÇélIné Ändrè3', 'ÇélIné Ändrè3', 'ÇélIné Ändrè4', 'ÇélIné Ändrè4', 'ÇélIné Ändrè5', 'ÇélIné Ändrè5', 'ÇélIné Ändrè6', 'ÇélIné Ändrè6']}] LIST_ATTR = [ ('entryoctetStringMatch0', 'AAAAAAAAAAAAAAE='), ('entryoctetStringMatch1', 'AAAAAAAAAAAAAAI='), ('entryoctetStringMatch2', 'AAAAAAAAAAAAAAM='), ('entryoctetStringMatch3', 'AAAAAAAAAAAAAAQ='), ('entryoctetStringMatch4', 'AAAAAAAAAAAAAAU='), ('entryoctetStringMatch5', 'AAAAAAAAAAAAAAY='), ('entrybitStringMatch0', "'0001'B"), ('entrybitStringMatch1', "'0010'B"), ('entrybitStringMatch2', "'0011'B"), ('entrybitStringMatch3', "'0100'B"), ('entrybitStringMatch4', "'0101'B"), ('entrybitStringMatch5', "'0110'B"), ('entrycaseExactIA5Match0', "Sprain"), ('entrycaseExactIA5Match1', "sPrain"), ('entrycaseExactIA5Match2', "spRain"), ('entrycaseExactIA5Match3', "sprAin"), ('entrycaseExactIA5Match4', "spraIn"), ('entrycaseExactIA5Match5', "sprain"), ('entrycaseExactMatch0', "ÇélIné Ändrè"), ('entrycaseExactMatch1', "ÇéliNé Ändrè"), ('entrycaseExactMatch2', "Çéliné ÄndrÈ"), ('entrycaseExactMatch3', "Çéliné Ändrè"), ('entrycaseExactMatch4', "çÉliné Ändrè"), ('entrygeneralizedTimeMatch0', "20100218171300Z"), ('entrygeneralizedTimeMatch1', "20100218171301Z"), ('entrygeneralizedTimeMatch2', "20100218171302Z"), ('entrygeneralizedTimeMatch3', "20100218171303Z"), ('entrygeneralizedTimeMatch4', "20100218171304Z"), ('entrygeneralizedTimeMatch5', "20100218171305Z"), ('entrybooleanMatch0', "TRUE"), ('entrybooleanMatch1', "FALSE"), ('entrycaseIgnoreIA5Match0', "sprain1"), ('entrycaseIgnoreIA5Match1', "sprain2"), ('entrycaseIgnoreIA5Match2', "sprain3"), ('entrycaseIgnoreIA5Match3', "sprain4"), ('entrycaseIgnoreIA5Match4', "sprain5"), ('entrycaseIgnoreIA5Match5', "sprain6"), ('entrycaseIgnoreMatch0', "ÇélIné Ändrè1"), ('entrycaseIgnoreMatch1', "ÇélIné Ändrè2"), ('entrycaseIgnoreMatch2', "ÇélIné Ändrè3"), ('entrycaseIgnoreMatch3', "ÇélIné Ändrè4"), ('entrycaseIgnoreMatch4', "ÇélIné Ändrè5"), ('entrycaseIgnoreMatch5', "ÇélIné Ändrè6"), ('entrycaseIgnoreListMatch0', "foo1$bar"), ('entrycaseIgnoreListMatch1', "foo2$bar"), ('entrycaseIgnoreListMatch2', "foo3$bar"), ('entrycaseIgnoreListMatch3', "foo4$bar"), ('entrycaseIgnoreListMatch4', "foo5$bar"), ('entrycaseIgnoreListMatch5', "foo6$bar"), ('entryobjectIdentifierMatch0', "1.3.6.1.4.1.1466.115.121.1.15"), ('entryobjectIdentifierMatch1', "1.3.6.1.4.1.1466.115.121.1.24"), ('entryobjectIdentifierMatch2', "1.3.6.1.4.1.1466.115.121.1.26"), ('entryobjectIdentifierMatch3', "1.3.6.1.4.1.1466.115.121.1.40"), ('entryobjectIdentifierMatch4', "1.3.6.1.4.1.1466.115.121.1.41"), ('entryobjectIdentifierMatch5', "1.3.6.1.4.1.1466.115.121.1.6"), ('entrydistinguishedNameMatch0', "cn=foo1,cn=bar"), ('entrydistinguishedNameMatch1', "cn=foo2,cn=bar"), ('entrydistinguishedNameMatch2', "cn=foo3,cn=bar"), ('entrydistinguishedNameMatch3', "cn=foo4,cn=bar"), ('entrydistinguishedNameMatch4', "cn=foo5,cn=bar"), ('entrydistinguishedNameMatch5', "cn=foo6,cn=bar"), ('entryintegerMatch0', "-2"), ('entryintegerMatch1', "-1"), ('entryintegerMatch2', "0"), ('entryintegerMatch3', "1"), ('entryintegerMatch4', "2"), ('entryintegerMatch5', "3"), ('entryuniqueMemberMatch0', "cn=foo1,cn=bar#'0001'B"), ('entryuniqueMemberMatch1', "cn=foo2,cn=bar#'0010'B"), ('entryuniqueMemberMatch2', "cn=foo3,cn=bar#'0011'B"), ('entryuniqueMemberMatch3', "cn=foo4,cn=bar#'0100'B"), ('entryuniqueMemberMatch4', "cn=foo5,cn=bar#'0101'B"), ('entryuniqueMemberMatch5', "cn=foo6,cn=bar#'0110'B"), ('entrynumericStringMatch0', "00001"), ('entrynumericStringMatch1', "00002"), ('entrynumericStringMatch2', "00003"), ('entrynumericStringMatch3', "00004"), ('entrynumericStringMatch4', "00005"), ('entrynumericStringMatch5', "00006"), ('entrytelephoneNumberMatch0', "+1 408 555 4798"), ('entrytelephoneNumberMatch1', "+1 408 555 5625"), ('entrytelephoneNumberMatch2', "+1 408 555 6201"), ('entrytelephoneNumberMatch3', "+1 408 555 8585"), ('entrytelephoneNumberMatch4', "+1 408 555 9187"), ('entrytelephoneNumberMatch5', "+1 408 555 9423"), ('entrydirectoryStringFirstComponentMatch0', "ÇélIné Ändrè1"), ('entrydirectoryStringFirstComponentMatch1', "ÇélIné Ändrè2"), ('entrydirectoryStringFirstComponentMatch2', "ÇélIné Ändrè3"), ('entrydirectoryStringFirstComponentMatch3', "ÇélIné Ändrè4"), ('entrydirectoryStringFirstComponentMatch4', "ÇélIné Ändrè5"), ('entrydirectoryStringFirstComponentMatch5', "ÇélIné Ändrè6"), ('entryobjectIdentifierFirstComponentMatch0', "1.3.6.1.4.1.1466.115.121.1.15"), ('entryobjectIdentifierFirstComponentMatch1', "1.3.6.1.4.1.1466.115.121.1.24"), ('entryobjectIdentifierFirstComponentMatch2', "1.3.6.1.4.1.1466.115.121.1.26"), ('entryobjectIdentifierFirstComponentMatch3', "1.3.6.1.4.1.1466.115.121.1.40"), ('entryobjectIdentifierFirstComponentMatch4', "1.3.6.1.4.1.1466.115.121.1.41"), ('entryobjectIdentifierFirstComponentMatch5', "1.3.6.1.4.1.1466.115.121.1.6"), ('entryintegerFirstComponentMatch0', "-2"), ('entryintegerFirstComponentMatch1', "-1"), ('entryintegerFirstComponentMatch2', "0"), ('entryintegerFirstComponentMatch3', "1"), ('entryintegerFirstComponentMatch4', "2"), ('entryintegerFirstComponentMatch5', "3")] POSITIVE_NEGATIVE_VALUES = [ ["(attrbitStringMatch='0001'B)", 1, "(attrbitStringMatch:bitStringMatch:='000100000'B)"], ["(attrgeneralizedTimeMatch=20100218171300Z)", 1, "(attrcaseExactIA5Match=SPRAIN)"], ["(attrcaseExactMatch>=ÇélIné Ändrè)", 5, "(attrcaseExactMatch=ÇéLINé ÄNDRè)"], ["(attrcaseExactMatch:caseExactMatch:=ÇélIné Ändrè)", 1, "(attrcaseExactMatch>=çéliné ändrè)"], ["(attrcaseExactIA5Match=Sprain)", 1, "(attrgeneralizedTimeMatch=20300218171300Z)"], ["(attrbooleanMatch=TRUE)", 1, "(attrgeneralizedTimeMatch>=20300218171300Z)"], ["(attrcaseIgnoreIA5Match=sprain1)", 1, "(attrcaseIgnoreIA5Match=sprain9999)"], ["(attrcaseIgnoreMatch=ÇélIné Ändrè1)", 1, "(attrcaseIgnoreMatch=ÇélIné Ändrè9999)"], ["(attrcaseIgnoreMatch>=ÇélIné Ändrè1)", 6, "(attrcaseIgnoreMatch>=ÇélIné Ändrè9999)"], ["(attrcaseIgnoreListMatch=foo1$bar)", 1, "(attrcaseIgnoreListMatch=foo1$bar$baz$biff)"], ["(attrobjectIdentifierMatch=1.3.6.1.4.1.1466.115.121.1.15)", 1, "(attrobjectIdentifierMatch=1.3.6.1.4.1.1466.115.121.1.15.99999)"], ["(attrgeneralizedTimeMatch>=20100218171300Z)", 6, "(attroctetStringMatch>=AAAAAAAAAAABAQQ=)"], ["(attrdirectoryStringFirstComponentMatch=ÇélIné Ändrè1)", 1, "(attrdirectoryStringFirstComponentMatch=ÇélIné Ändrè9999)"], ["(attrobjectIdentifierFirstComponentMatch=1.3.6.1.4.1.1466.115.121.1.15)", 1, "(attrobjectIdentifierFirstComponentMatch=1.3.6.1.4.1.1466.115.121.1.15.99999)"], ["(attrdistinguishedNameMatch=cn=foo1,cn=bar)", 1, "(attrdistinguishedNameMatch=cn=foo1,cn=bar,cn=baz)"], ["(attrintegerMatch=-2)", 1, "(attrintegerMatch=-20)"], ["(attrintegerMatch>=-2)", 6, "(attrintegerMatch>=20)"], ["(attrintegerFirstComponentMatch=-2)", 1, "(attrintegerFirstComponentMatch=-20)"], ["(attruniqueMemberMatch=cn=foo1,cn=bar#'0001'B)", 1, "(attruniqueMemberMatch=cn=foo1,cn=bar#'00010000'B)"], ["(attrnumericStringMatch=00001)", 1, "(attrnumericStringMatch=000000001)"], ["(attrnumericStringMatch>=00001)", 6, "(attrnumericStringMatch>=01)"], ["(attrtelephoneNumberMatch=+1 408 555 4798)", 1, "(attrtelephoneNumberMatch=+2 408 555 4798)"], ["(attroctetStringMatch=AAAAAAAAAAAAAAE=)", 1, "(attroctetStringMatch=AAAAAAAAAAAAAAEB)"], ["(attroctetStringMatch>=AAAAAAAAAAAAAAE=)", 6, "(attroctetStringMatch>=AAAAAAAAAAABAQE=)"]] LIST_EXT = [("(attrbitStringMatch:bitStringMatch:='0001'B)", 1), ("(attrcaseExactIA5Match:caseExactIA5Match:=Sprain)", 1), ("(attrcaseExactMatch:caseExactMatch:=ÇélIné Ändrè)", 1), ("(attrcaseExactMatch:caseExactOrderingMatch:=ÇélIné Ändrè)", 5), ("(attrgeneralizedTimeMatch:generalizedTimeMatch:=20100218171300Z)", 1), ("(attrgeneralizedTimeMatch:generalizedTimeOrderingMatch:=20100218171300Z)", 6), ("(attrbooleanMatch:booleanMatch:=TRUE)", 1), ("(attrcaseIgnoreIA5Match:caseIgnoreIA5Match:=sprain1)", 1), ("(attrcaseIgnoreMatch:caseIgnoreMatch:=ÇélIné Ändrè1)", 1), ("(attrcaseIgnoreMatch:caseIgnoreOrderingMatch:=ÇélIné Ändrè1)", 6), ("(attrcaseIgnoreListMatch:caseIgnoreListMatch:=foo1$bar)", 1), ("(attrobjectIdentifierMatch:objectIdentifierMatch:=1.3.6.1.4.1.1466.115.121.1.15)", 1), ("(attrdirectoryStringFirstComponentMatch:directory" "StringFirstComponentMatch:=ÇélIné Ändrè1)", 1), ("(attrobjectIdentifierFirstComponentMatch:objectIdentifier" "FirstComponentMatch:=1.3.6.1.4.1.1466.115.121.1.15)", 1), ("(attrdistinguishedNameMatch:distinguishedNameMatch:=cn=foo1,cn=bar)", 1), ("(attrintegerMatch:integerMatch:=-2)", 1), ("(attrintegerMatch:integerOrderingMatch:=-2)", 6), ("(attrintegerFirstComponentMatch:integerFirstComponentMatch:=-2)", 1), ("(attruniqueMemberMatch:uniqueMemberMatch:=cn=foo1,cn=bar#'0001'B)", 1), ("(attrnumericStringMatch:numericStringMatch:=00001)", 1), ("(attrnumericStringMatch:numericStringMatch:=00001)", 1), ("(attrtelephoneNumberMatch:telephoneNumberMatch:=+1 408 555 4798)", 1), ("(attroctetStringMatch:octetStringMatch:=AAAAAAAAAAAAAAE=)", 1), ("(attroctetStringMatch:octetStringOrderingMatch:=AAAAAAAAAAAAAAE=)", 6), ("(attrcaseExactMatch=*ÇélIné Ändrè*)", 1), ("(attrcaseExactMatch=ÇélIné Ändrè*)", 1), ("(attrcaseExactMatch=*ÇélIné Ändrè)", 1), ("(attrcaseExactMatch=*é Ä*)", 5), ("(attrcaseExactIA5Match=*Sprain*)", 1), ("(attrcaseExactIA5Match=Sprain*)", 1), ("(attrcaseExactIA5Match=*Sprain)", 1), ("(attrcaseExactIA5Match=*rai*)", 3), ("(attrcaseIgnoreIA5Match=*sprain1*)", 1), ("(attrcaseIgnoreIA5Match=sprain1*)", 1), ("(attrcaseIgnoreIA5Match=*sprain1)", 1), ("(attrcaseIgnoreIA5Match=*rai*)", 6), ("(attrcaseIgnoreMatch=*ÇélIné Ändrè1*)", 1), ("(attrcaseIgnoreMatch=ÇélIné Ändrè1*)", 1), ("(attrcaseIgnoreMatch=*ÇélIné Ändrè1)", 1), ("(attrcaseIgnoreMatch=*é Ä*)", 6), ("(attrcaseIgnoreListMatch=*foo1$bar*)", 1), ("(attrcaseIgnoreListMatch=foo1$bar*)", 1), ("(attrcaseIgnoreListMatch=*foo1$bar)", 1), ("(attrcaseIgnoreListMatch=*1$b*)", 1), ("(attrnumericStringMatch=*00001*)", 1), ("(attrnumericStringMatch=00001*)", 1), ("(attrnumericStringMatch=*00001)", 1), ("(attrnumericStringMatch=*000*)", 6), ("(attrtelephoneNumberMatch=*+1 408 555 4798*)", 1), ("(attrtelephoneNumberMatch=+1 408 555 4798*)", 1), ("(attrtelephoneNumberMatch=*+1 408 555 4798)", 1), ("(attrtelephoneNumberMatch=* 55*)", 6)] def test_matching_rules(topology_st): """Test matching rules. :id: 8cb6e62a-8cfc-11e9-be9a-8c16451d917b :setup: Standalone :steps: 1. Search for matching rule. 2. Matching rule should be there in schema. :expected results: 1. Pass 2. Pass """ matchingrules = Schema(topology_st.standalone).get_matchingrules() assert matchingrules rules = set(matchingrule.names for matchingrule in matchingrules) rules1 = [role[0] for role in rules if len(role) != 0] for rule in TESTED_MATCHING_RULES: assert rule in rules1 def test_add_attribute_types(topology_st): """Test add attribute types to schema :id: 84d6dece-8cfc-11e9-89a3-8c16451d917b :setup: Standalone :steps: 1. Add new attribute types to schema. :expected results: 1. Pass """ for attribute in ATTR: Schema(topology_st.standalone).add('attributetypes', attribute) @pytest.mark.parametrize("rule", MATCHING_RULES) def test_valid_invalid_attributes(topology_st, rule): """Delete duplicate attributes :id: d0bf3942-ba71-4947-90c8-1bfa9f0b838f :parametrized: yes :setup: Standalone :steps: 1. Create entry with an attribute that uses that matching rule 2. Delete existing entry 3. Create entry with an attribute that uses that matching rule providing duplicate values that are duplicates according to the equality matching rule. :expected results: 1. Pass 2. Pass 3. Fail(ldap.TYPE_OR_VALUE_EXISTS) """ # Entry with extensibleObject cos = CosTemplates(topology_st.standalone, DEFAULT_SUFFIX) entry = cos.create(properties={'cn': 'addentry'+rule['attr'], rule['attr']: rule['positive']}) entry.delete() with pytest.raises(ldap.TYPE_OR_VALUE_EXISTS): cos.create(properties={'cn': 'addentry'+rule['attr'].split('attr')[1], rule['attr']: rule['negative']}) @pytest.mark.parametrize("mode", MATCHING_MODES) def test_valid_invalid_modes(topology_st, mode): """Add duplicate attributes :id: dec03362-ba26-41da-b479-e2b788403fce :parametrized: yes :setup: Standalone :steps: 1. Create entry with an attribute that uses matching mode 2. Add an attribute that uses that matching mode providing duplicate values that are duplicates according to the equality matching. 3. Delete existing entry :expected results: 1. Pass 2. Fail(ldap.TYPE_OR_VALUE_EXISTS) 3. Pass """ # Entry with extensibleObject cos = CosTemplates(topology_st.standalone, DEFAULT_SUFFIX) entry = cos.create(properties={'cn': 'addentry'+mode['attr'], mode['attr']: mode['positive']}) with pytest.raises(ldap.TYPE_OR_VALUE_EXISTS): entry.add(mode['attr'], mode['negative']) entry.delete() @pytest.mark.parametrize("mode", MODE_REPLACE) def test_valid_invalid_mode_replace(topology_st, mode): """Replace and Delete duplicate attribute :id: 7ec19eca-8cfc-11e9-a0df-8c16451d917b :parametrized: yes :setup: Standalone :steps: 1. Create entry with an attribute that uses that matching rule 2. Replace an attribute that uses that matching rule 3. Replace an attribute that uses that matching rule providing duplicate values that are duplicates according to the equality matching mode. 4. Delete existing attribute 5. Try to delete the deleted attribute again. 6. Delete entry :expected results: 1. Pass 2. Pass 3. Fail(ldap.TYPE_OR_VALUE_EXISTS) 4. Pass 5. Fail(ldap.NO_SUCH_ATTRIBUTE) 6. Pass """ # Entry with extensibleObject cos = CosTemplates(topology_st.standalone, DEFAULT_SUFFIX) user = cos.create(properties={'cn': 'addentry'+mode['attr']}) # Replace Operation user.replace(mode['attr'], mode['positive']) with pytest.raises(ldap.TYPE_OR_VALUE_EXISTS): user.replace(mode['attr'], mode['negative']) # Delete Operation user.remove(mode['attr'], mode['positive'][0]) with pytest.raises(ldap.NO_SUCH_ATTRIBUTE): user.remove(mode['attr'], mode['positive'][0]) user.delete() @pytest.fixture(scope="module") def _searches(topology_st): """ Add attribute types to schema """ cos = CosTemplates(topology_st.standalone, DEFAULT_SUFFIX) for attr, value in LIST_ATTR: cos.create(properties={ 'cn': attr, 'attr' + attr.split('entry')[1][:-1]: value }) @pytest.mark.parametrize("attr, po_value, ne_attr", POSITIVE_NEGATIVE_VALUES) def test_match_count(topology_st, _searches, attr, po_value, ne_attr): """Search for an attribute with that matching rule with an assertion value that should match :id: 00276180-b902-11e9-bff2-8c16451d917b :parametrized: yes :setup: Standalone :steps: 1. Filter rules as per the condition and assert the no of output. 2. Negative filter with no outputs. :expected results: 1. Pass 2. Pass """ cos = CosTemplates(topology_st.standalone, DEFAULT_SUFFIX) assert len(cos.filter(attr)) == po_value assert not cos.filter(ne_attr) @pytest.mark.parametrize("attr, value", LIST_EXT) def test_extensible_search(topology_st, _searches, attr, value): """Match filter and output. :id: abe3e6dd-9ecc-11e8-adf0-8c16451d917c :parametrized: yes :setup: Standalone :steps: 1. Filer output should match the exact value given. :expected results: 1. Pass """ cos = CosTemplates(topology_st.standalone, DEFAULT_SUFFIX) assert len(cos.filter(attr)) == value if __name__ == '__main__': CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s -v %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/filter/filter_test.py000066400000000000000000000314741421664411400265440ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2020 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import logging import pytest from lib389.tasks import * from lib389.topologies import topology_st from lib389._constants import PASSWORD, DEFAULT_SUFFIX, DN_DM, SUFFIX from lib389.utils import * pytestmark = pytest.mark.tier1 logging.getLogger(__name__).setLevel(logging.DEBUG) log = logging.getLogger(__name__) ENTRY_NAME = 'test_entry' @pytest.mark.bz918686 @pytest.mark.ds497 def test_filter_escaped(topology_st): """Test we can search for an '*' in a attribute value. :id: 5c9aa40c-c641-4603-bce3-b19f4c1f2031 :setup: Standalone instance :steps: 1. Add a test user with an '*' in its attribute value i.e. 'cn=test * me' 2. Add another similar test user without '*' in its attribute value 3. Search test user using search filter "cn=*\\**" :expectedresults: 1. This should pass 2. This should pass 3. Test user with 'cn=test * me' only, should be listed """ log.info('Running test_filter_escaped...') USER1_DN = 'uid=test_entry,' + DEFAULT_SUFFIX USER2_DN = 'uid=test_entry2,' + DEFAULT_SUFFIX try: topology_st.standalone.add_s(Entry((USER1_DN, {'objectclass': "top extensibleObject".split(), 'sn': '1', 'cn': 'test * me', 'uid': 'test_entry', 'userpassword': PASSWORD}))) except ldap.LDAPError as e: log.fatal('test_filter_escaped: Failed to add test user ' + USER1_DN + ': error ' + e.message['desc']) assert False try: topology_st.standalone.add_s(Entry((USER2_DN, {'objectclass': "top extensibleObject".split(), 'sn': '2', 'cn': 'test me', 'uid': 'test_entry2', 'userpassword': PASSWORD}))) except ldap.LDAPError as e: log.fatal('test_filter_escaped: Failed to add test user ' + USER2_DN + ': error ' + e.message['desc']) assert False try: entry = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, 'cn=*\\**') if not entry or len(entry) > 1: log.fatal('test_filter_escaped: Entry was not found using "cn=*\\**"') assert False except ldap.LDAPError as e: log.fatal('test_filter_escaped: Failed to search for user(%s), error: %s' % (USER1_DN, e.message('desc'))) assert False log.info('test_filter_escaped: PASSED') def test_filter_search_original_attrs(topology_st): """Search and request attributes with extra characters. The returned entry should not have these extra characters: objectclass EXTRA" :id: d30d8a1c-84ac-47ba-95f9-41e3453fbf3a :setup: Standalone instance :steps: 1. Execute a search operation for attributes with extra characters 2. Check the search result have these extra characters or not :expectedresults: 1. Search should pass 2. Search result should not have these extra characters attribute """ log.info('Running test_filter_search_original_attrs...') try: entry = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_BASE, 'objectclass=top', ['objectclass-EXTRA']) if entry[0].hasAttr('objectclass-EXTRA'): log.fatal('test_filter_search_original_attrs: Entry does not have the original attribute') assert False except ldap.LDAPError as e: log.fatal('test_filter_search_original_attrs: Failed to search suffix(%s), error: %s' % (DEFAULT_SUFFIX, e.message('desc'))) assert False log.info('test_filter_search_original_attrs: PASSED') @pytest.mark.bz1511462 def test_filter_scope_one(topology_st): """Test ldapsearch with scope one gives only single entry :id: cf5a6078-bbe6-4d43-ac71-553c45923f91 :setup: Standalone instance :steps: 1. Search ou=services,dc=example,dc=com using ldapsearch with scope one using base as dc=example,dc=com 2. Check that search should return only one entry :expectedresults: 1. This should pass 2. This should pass """ log.info('Search user using ldapsearch with scope one') results = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_ONELEVEL,'ou=services',['ou'] ) log.info(results) log.info('Search should only have one entry') assert len(results) == 1 @pytest.mark.ds47313 def test_filter_with_attribute_subtype(topology_st): """Adds 2 test entries and Search with filters including subtype and ! :id: 0e69f5f2-6a0a-480e-8282-fbcc50231908 :setup: Standalone instance :steps: 1. Add 2 entries and create 3 filters 2. Search for entry with filter: (&(cn=test_entry en only)(!(cn=test_entry fr))) 3. Search for entry with filter: (&(cn=test_entry en only)(!(cn;fr=test_entry fr))) 4. Search for entry with filter: (&(cn=test_entry en only)(!(cn;en=test_entry en))) 5. Delete the added entries :expectedresults: 1. Operation should be successful 2. Search should be successful 3. Search should be successful 4. Search should not be successful 5. Delete the added entries """ # bind as directory manager topology_st.standalone.log.info("Bind as %s" % DN_DM) topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) # enable filter error logging # mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '32')] # topology_st.standalone.modify_s(DN_CONFIG, mod) topology_st.standalone.log.info("\n\n######################### ADD ######################\n") # Prepare the entry with cn;fr & cn;en entry_name_fr = '%s fr' % (ENTRY_NAME) entry_name_en = '%s en' % (ENTRY_NAME) entry_name_both = '%s both' % (ENTRY_NAME) entry_dn_both = 'cn=%s, %s' % (entry_name_both, SUFFIX) entry_both = Entry(entry_dn_both) entry_both.setValues('objectclass', 'top', 'person') entry_both.setValues('sn', entry_name_both) entry_both.setValues('cn', entry_name_both) entry_both.setValues('cn;fr', entry_name_fr) entry_both.setValues('cn;en', entry_name_en) # Prepare the entry with one member entry_name_en_only = '%s en only' % (ENTRY_NAME) entry_dn_en_only = 'cn=%s, %s' % (entry_name_en_only, SUFFIX) entry_en_only = Entry(entry_dn_en_only) entry_en_only.setValues('objectclass', 'top', 'person') entry_en_only.setValues('sn', entry_name_en_only) entry_en_only.setValues('cn', entry_name_en_only) entry_en_only.setValues('cn;en', entry_name_en) topology_st.standalone.log.info("Try to add Add %s: %r" % (entry_dn_both, entry_both)) topology_st.standalone.add_s(entry_both) topology_st.standalone.log.info("Try to add Add %s: %r" % (entry_dn_en_only, entry_en_only)) topology_st.standalone.add_s(entry_en_only) topology_st.standalone.log.info("\n\n######################### SEARCH ######################\n") # filter: (&(cn=test_entry en only)(!(cn=test_entry fr))) myfilter = '(&(sn=%s)(!(cn=%s)))' % (entry_name_en_only, entry_name_fr) topology_st.standalone.log.info("Try to search with filter %s" % myfilter) ents = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, myfilter) assert len(ents) == 1 assert ensure_str(ents[0].sn) == entry_name_en_only topology_st.standalone.log.info("Found %s" % ents[0].dn) # filter: (&(cn=test_entry en only)(!(cn;fr=test_entry fr))) myfilter = '(&(sn=%s)(!(cn;fr=%s)))' % (entry_name_en_only, entry_name_fr) topology_st.standalone.log.info("Try to search with filter %s" % myfilter) ents = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, myfilter) assert len(ents) == 1 assert ensure_str(ents[0].sn) == entry_name_en_only topology_st.standalone.log.info("Found %s" % ents[0].dn) # filter: (&(cn=test_entry en only)(!(cn;en=test_entry en))) myfilter = '(&(sn=%s)(!(cn;en=%s)))' % (entry_name_en_only, entry_name_en) topology_st.standalone.log.info("Try to search with filter %s" % myfilter) ents = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, myfilter) assert len(ents) == 0 topology_st.standalone.log.info("Found none") topology_st.standalone.log.info("\n\n######################### DELETE ######################\n") topology_st.standalone.log.info("Try to delete %s " % entry_dn_both) topology_st.standalone.delete_s(entry_dn_both) topology_st.standalone.log.info("Try to delete %s " % entry_dn_en_only) topology_st.standalone.delete_s(entry_dn_en_only) log.info('Testcase PASSED') @pytest.mark.bz1615155 def test_extended_search(topology_st): """Test we can search with equality extended matching rule :id: 396942ac-467b-435b-8d9f-e80c7ec4ba6c :setup: Standalone instance :steps: 1. Add a test user with 'sn: ext-test-entry' 2. Search '(cn:de:=ext-test-entry)' 3. Search '(sn:caseIgnoreIA5Match:=EXT-TEST-ENTRY)' 4. Search '(sn:caseIgnoreMatch:=EXT-TEST-ENTRY)' 5. Search '(sn:caseExactMatch:=EXT-TEST-ENTRY)' 6. Search '(sn:caseExactMatch:=ext-test-entry)' 7. Search '(sn:caseExactIA5Match:=EXT-TEST-ENTRY)' 8. Search '(sn:caseExactIA5Match:=ext-test-entry)' :expectedresults: 1. This should pass 2. This should return one entry 3. This should return one entry 4. This should return one entry 5. This should return NO entry 6. This should return one entry 7. This should return NO entry 8. This should return one entry """ log.info('Running test_filter_escaped...') ATTR_VAL = 'ext-test-entry' USER1_DN = "uid=%s,%s" % (ATTR_VAL, DEFAULT_SUFFIX) try: topology_st.standalone.add_s(Entry((USER1_DN, {'objectclass': "top extensibleObject".split(), 'sn': ATTR_VAL.encode(), 'cn': ATTR_VAL.encode(), 'uid': ATTR_VAL.encode()}))) except ldap.LDAPError as e: log.fatal('test_extended_search: Failed to add test user ' + USER1_DN + ': error ' + e.message['desc']) assert False # filter: '(cn:de:=ext-test-entry)' myfilter = '(cn:de:=%s)' % ATTR_VAL topology_st.standalone.log.info("Try to search with filter %s" % myfilter) ents = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, myfilter) assert len(ents) == 1 # filter: '(sn:caseIgnoreIA5Match:=EXT-TEST-ENTRY)' myfilter = '(cn:caseIgnoreIA5Match:=%s)' % ATTR_VAL.upper() topology_st.standalone.log.info("Try to search with filter %s" % myfilter) ents = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, myfilter) assert len(ents) == 1 # filter: '(sn:caseIgnoreMatch:=EXT-TEST-ENTRY)' myfilter = '(cn:caseIgnoreMatch:=%s)' % ATTR_VAL.upper() topology_st.standalone.log.info("Try to search with filter %s" % myfilter) ents = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, myfilter) assert len(ents) == 1 # filter: '(sn:caseExactMatch:=EXT-TEST-ENTRY)' myfilter = '(cn:caseExactMatch:=%s)' % ATTR_VAL.upper() topology_st.standalone.log.info("Try to search with filter %s" % myfilter) ents = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, myfilter) assert len(ents) == 0 # filter: '(sn:caseExactMatch:=ext-test-entry)' myfilter = '(cn:caseExactMatch:=%s)' % ATTR_VAL topology_st.standalone.log.info("Try to search with filter %s" % myfilter) ents = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, myfilter) assert len(ents) == 1 # filter: '(sn:caseExactIA5Match:=EXT-TEST-ENTRY)' myfilter = '(cn:caseExactIA5Match:=%s)' % ATTR_VAL.upper() topology_st.standalone.log.info("Try to search with filter %s" % myfilter) ents = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, myfilter) assert len(ents) == 0 # filter: '(sn:caseExactIA5Match:=ext-test-entry)' myfilter = '(cn:caseExactIA5Match:=%s)' % ATTR_VAL topology_st.standalone.log.info("Try to search with filter %s" % myfilter) ents = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, myfilter) assert len(ents) == 1 if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/filter/filter_with_non_root_user_test.py000066400000000000000000000440261421664411400325470ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2019 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK ---- """ verify and testing Filter from a search """ import os import pytest from lib389._constants import DEFAULT_SUFFIX, PW_DM from lib389.topologies import topology_st as topo from lib389.idm.domain import Domain from lib389.idm.user import UserAccounts, UserAccount from lib389.idm.account import Accounts pytestmark = pytest.mark.tier1 FILTER_MWARD = "(uid=mward)" FILTER_L = "(l=sunnyvale)" FILTER_MAIL = "(mail=jreu*)" FILTER_EXAM = "(mail=*exam*)" FILTER_7393 = "(telephonenumber=*7393)" FILTER_408 = "(telephonenumber=*408*3)" FILTER_UID = "(uid=*)" FILTER_PASSWD = "(userpassword=*)" FILTER_FRED = "(fred=*)" FILTER_AAA = "(uid:2.16.840.1.113730.3.3.2.15.1:=>AAA)" FILTER_AAA_ES = "(uid:es:=>AAA)" FILTER_AAA_UID = "(uid:2.16.840.1.113730.3.3.2.15.1.5:=AAA)" FILTER_100 = "(uid:2.16.840.1.113730.3.3.2.15.1:=>user100)" FILTER_ES_100 = "(uid:es:=>user100)" FILTER_UID_100 = "(uid:2.16.840.1.113730.3.3.2.15.1.5:=user100)" FILTER_UID_1 = "(uid:2.16.840.1.113730.3.3.2.15.1:=<1)" FILTER_UID_ES = "(uid:es:=<1)" FILTER_UID_2 = "(uid:2.16.840.1.113730.3.3.2.15.1.1:=1)" FILTER_UID_USER1 = "(uid:2.16.840.1.113730.3.3.2.15.1:= # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import pytest import ldap import time from lib389.topologies import topology_st as topology_st_pre from lib389.dirsrv_log import DirsrvAccessLog from lib389._mapped_object import DSLdapObjects from lib389._constants import DEFAULT_SUFFIX from lib389.extensibleobject import UnsafeExtensibleObjects pytestmark = pytest.mark.tier1 def _check_value(inst_cfg, value, exvalue=None): if exvalue is None: exvalue = value inst_cfg.set('nsslapd-verify-filter-schema', value) assert(inst_cfg.get_attr_val_utf8('nsslapd-verify-filter-schema') == exvalue) @pytest.fixture(scope="module") def topology_st(topology_st_pre): raw_objects = UnsafeExtensibleObjects(topology_st_pre.standalone, basedn=DEFAULT_SUFFIX) # Add an object that won't be able to be queried due to invalid attrs. raw_objects.create(properties = { "cn": "test_obj", "a": "a", "b": "b", "uid": "foo" }) return topology_st_pre @pytest.mark.ds50349 def test_filter_validation_config(topology_st): """Test that the new on/warn/off setting can be set and read correctly :id: ac14dad5-5bdf-474f-9936-7ce2d20fb8b6 :setup: Standalone instance :steps: 1. Check the default value of nsslapd-verify-filter-schema 2. Set the value to "on". 3. Read the value is "on". 4. Set the value to "warn". 5. Read the value is "warn". 6. Set the value to "off". 7. Read the value is "off". 8. Delete the value (reset) 9. Check the reset value matches 1. :expectedresults: 1. Value is "on", "off", or "warn". 2. Success 3. Value is "on" 4. Success 5. Value is "warn" 6. Success 7. Value is "off" 8. Success 9. Value is same as from 1. """ inst_cfg = topology_st.standalone.config initial_value = inst_cfg.get_attr_val_utf8('nsslapd-verify-filter-schema') # Check legacy values that may have been set _check_value(inst_cfg, "on", "reject-invalid") _check_value(inst_cfg, "warn", "process-safe") _check_value(inst_cfg, "off") # Check the more descriptive values _check_value(inst_cfg, "reject-invalid") _check_value(inst_cfg, "process-safe") _check_value(inst_cfg, "warn-invalid") _check_value(inst_cfg, "off") # This should fail with pytest.raises(ldap.OPERATIONS_ERROR): _check_value(inst_cfg, "thnaounaou") inst_cfg.remove_all('nsslapd-verify-filter-schema') final_value = inst_cfg.get_attr_val_utf8('nsslapd-verify-filter-schema') assert(initial_value == final_value) @pytest.mark.ds50349 def test_filter_validation_enabled(topology_st): """Test that queries which are invalid, are correctly rejected by the server. :id: 05afdbbd-0d7f-4774-958c-2139827fed70 :setup: Standalone instance :steps: 1. Search a well formed query 2. Search a poorly formed query 3. Search a poorly formed complex (and/or) query 4. Test the server can be restarted :expectedresults: 1. No warnings 2. Query is rejected (err) 3. Query is rejected (err) 4. Server restarts """ inst = topology_st.standalone # In case the default has changed, we set the value to warn. inst.config.set("nsslapd-verify-filter-schema", "reject-invalid") raw_objects = DSLdapObjects(inst, basedn=DEFAULT_SUFFIX) # Check a good query has no errors. r = raw_objects.filter("(objectClass=*)") with pytest.raises(ldap.UNWILLING_TO_PERFORM): # Check a bad one DOES emit an error. r = raw_objects.filter("(a=a)") with pytest.raises(ldap.UNWILLING_TO_PERFORM): # Check a bad complex one does emit an error. raw_objects.filter("(&(a=a)(b=b)(objectClass=*))") # Does restart work? inst.restart() @pytest.mark.ds50349 def test_filter_validation_warn_safe(topology_st): """Test that queries which are invalid, are correctly marked as "notes=F" in the access log, and return no entries or partial sets. :id: 8b2b23fe-d878-435c-bc84-8c298be4ca1f :setup: Standalone instance :steps: 1. Search a well formed query 2. Search a poorly formed query 3. Search a poorly formed complex (and/or) query :expectedresults: 1. No warnings 2. notes=F is present 3. notes=F is present """ inst = topology_st.standalone # In case the default has changed, we set the value to warn. inst.config.set("nsslapd-verify-filter-schema", "process-safe") # Set the access log to un-buffered so we get it immediately. inst.config.set("nsslapd-accesslog-logbuffering", "off") time.sleep(.5) # Setup the query object. # Now we don't care if there are any results, we only care about good/bad queries. # To do this we have to bypass some of the lib389 magic, and just emit raw queries # to check them. Turns out lib389 is well designed and this just works as expected # if you use a single DSLdapObjects and filter. :) raw_objects = DSLdapObjects(inst, basedn=DEFAULT_SUFFIX) # Find any initial notes=F access_log = DirsrvAccessLog(inst) r_init = access_log.match(".*notes=F.*") # Check a good query has no warnings. r = raw_objects.filter("(objectClass=*)") time.sleep(.5) assert(len(r) > 0) r_s1 = access_log.match(".*notes=F.*") # Should be the same number of log lines IE 0. assert(len(r_init) == len(r_s1)) # Check a bad one DOES emit a warning. r = raw_objects.filter("(a=a)") time.sleep(.5) assert(len(r) == 0) r_s2 = access_log.match(".*notes=F.*") # Should be the greater number of log lines IE +1 assert(len(r_init) + 1 == len(r_s2)) # Check a bad complex one does emit a warning. r = raw_objects.filter("(&(a=a)(b=b)(objectClass=*))") time.sleep(.5) assert(len(r) == 0) r_s3 = access_log.match(".*notes=F.*") # Should be the greater number of log lines IE +2 assert(len(r_init) + 2 == len(r_s3)) # Check that we can still get things when partial r = raw_objects.filter("(|(a=a)(b=b)(uid=foo))") time.sleep(.5) assert(len(r) == 1) r_s4 = access_log.match(".*notes=F.*") # Should be the greate number of log lines IE +2 assert(len(r_init) + 3 == len(r_s4)) @pytest.mark.ds50349 def test_filter_validation_warn_unsafe(topology_st): """Test that queries which are invalid, are correctly marked as "notes=F" in the access log, and uses the legacy query behaviour to return unsafe sets. :id: 8b2b23fe-d878-435c-bc84-8c298be4ca1f :setup: Standalone instance :steps: 1. Search a well formed query 2. Search a poorly formed query 3. Search a poorly formed complex (and/or) query :expectedresults: 1. No warnings 2. notes=F is present 3. notes=F is present """ inst = topology_st.standalone # In case the default has changed, we set the value to warn. inst.config.set("nsslapd-verify-filter-schema", "warn-invalid") # Set the access log to un-buffered so we get it immediately. inst.config.set("nsslapd-accesslog-logbuffering", "off") time.sleep(.5) # Setup the query object. # Now we don't care if there are any results, we only care about good/bad queries. # To do this we have to bypass some of the lib389 magic, and just emit raw queries # to check them. Turns out lib389 is well designed and this just works as expected # if you use a single DSLdapObjects and filter. :) raw_objects = DSLdapObjects(inst, basedn=DEFAULT_SUFFIX) # Find any initial notes=F access_log = DirsrvAccessLog(inst) r_init = access_log.match(".*notes=(U,)?F.*") # Check a good query has no warnings. r = raw_objects.filter("(objectClass=*)") time.sleep(.5) assert(len(r) > 0) r_s1 = access_log.match(".*notes=(U,)?F.*") # Should be the same number of log lines IE 0. assert(len(r_init) == len(r_s1)) # Check a bad one DOES emit a warning. r = raw_objects.filter("(a=a)") time.sleep(.5) assert(len(r) == 1) # NOTE: Unlike warn-process-safely, these become UNINDEXED and show in the logs. r_s2 = access_log.match(".*notes=(U,)?F.*") # Should be the greater number of log lines IE +1 assert(len(r_init) + 1 == len(r_s2)) # Check a bad complex one does emit a warning. r = raw_objects.filter("(&(a=a)(b=b)(objectClass=*))") time.sleep(.5) assert(len(r) == 1) r_s3 = access_log.match(".*notes=(U,)?F.*") # Should be the greater number of log lines IE +2 assert(len(r_init) + 2 == len(r_s3)) # Check that we can still get things when partial r = raw_objects.filter("(|(a=a)(b=b)(uid=foo))") time.sleep(.5) assert(len(r) == 1) r_s4 = access_log.match(".*notes=(U,)?F.*") # Should be the greater number of log lines IE +2 assert(len(r_init) + 3 == len(r_s4)) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/filter/vfilter_attribute_test.py000066400000000000000000000213201421664411400310020ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2019 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- """ This script will test different type of Filters. """ import os import pytest from lib389._constants import DEFAULT_SUFFIX, PW_DM from lib389.topologies import topology_st as topo from lib389.idm.user import UserAccounts from lib389.idm.organizationalunit import OrganizationalUnits from lib389.schema import Schema from lib389.idm.account import Accounts pytestmark = pytest.mark.tier1 FILTER_COMBINE = f"(& (| (nsRoleDN=cn=new managed role) (sn=Hall)) (l=sunnyvale))" FILTER_RJ = "(uid=rjense2)" FILTER_CN = "(nsRoleDN=cn=new managed *)" FILTER_CN_MT = f"(& {FILTER_CN} (uid=mtyler))" VALUES_POSITIVE = [ (FILTER_COMBINE, ['*', 'cn'], 'cn'), (FILTER_COMBINE, ['cn', 'cn', 'cn'], 'cn'), (FILTER_COMBINE, ['cn', 'Cn', 'CN'], 'cn'), (FILTER_COMBINE, ['cn', '*'], 'cn'), (FILTER_COMBINE, ['modifiersName', 'modifyTimestamp'], 'modifiersName'), (FILTER_COMBINE, ['modifiersName', 'modifyTimestamp'], 'modifyTimestamp'), (FILTER_COMBINE, ['*', 'modifiersName', 'modifyTimestamp'], 'modifiersName'), (FILTER_COMBINE, ['*', 'modifiersName', 'modifyTimestamp'], 'modifyTimestamp'), (FILTER_COMBINE, ['cn', 'modifiersName', 'modifyTimestamp'], 'modifiersName'), (FILTER_COMBINE, ['cn', 'modifiersName', 'modifyTimestamp'], 'modifyTimestamp'), (FILTER_COMBINE, ['cn', 'modifiersName', 'modifyTimestamp'], 'cn'), (FILTER_COMBINE, ['cn', 'modifiersName', 'nsRoleDN'], 'cn'), (FILTER_COMBINE, ['cn', 'modifiersName', 'nsRoleDN'], 'modifiersName'), (FILTER_COMBINE, ['cn', 'modifiersName', 'nsRoleDN'], 'nsRoleDN'), (FILTER_COMBINE, ['cn', '*', 'modifiersName', 'nsRoleDN'], 'cn'), (FILTER_COMBINE, ['cn', '*', 'modifiersName', 'nsRoleDN'], 'modifiersName'), (FILTER_COMBINE, ['cn', '*', 'modifiersName', 'nsRoleDN'], 'nsRoleDN'), (FILTER_RJ, ['*', 'mailquota'], 'mailquota'), (FILTER_RJ, ['mailquota', '*'], 'mailquota'), (FILTER_RJ, ['mailquota'], 'mailquota'), (FILTER_RJ, ['mailquota', 'nsRoleDN'], 'mailquota'), (FILTER_RJ, ['mailquota', 'nsRoleDN'], 'nsRoleDN'), (FILTER_CN, ['cn', 'nsRoleDN'], 'cn'), (FILTER_CN, ['cn', 'nsRoleDN'], 'nsRoleDN'), (FILTER_CN_MT, ['mailquota', 'nsRoleDN'], 'mailquota'), (FILTER_CN_MT, ['mailquota', 'nsRoleDN'], 'nsRoleDN'), (FILTER_CN_MT, ['mailquota', 'modifiersName', 'nsRoleDN'], 'mailquota'), (FILTER_CN_MT, ['mailquota', 'modifiersName', 'nsRoleDN'], 'modifiersName'), (FILTER_CN_MT, ['mailquota', 'modifiersName', 'nsRoleDN'], 'nsRoleDN'), (FILTER_CN_MT, ['*', 'modifiersName', 'nsRoleDN'], 'nsRoleDN'), (FILTER_CN_MT, ['*', 'modifiersName', 'nsRoleDN'], 'modifiersName')] LIST_OF_USER = ['scarter', 'tmorris', 'kvaughan', 'abergin', 'dmiller', 'gfarmer', 'kwinters', 'trigden', 'cschmith', 'jwallace', 'jwalker', 'tclow', 'rdaugherty', 'jreuter', 'tmason', 'btalbot', 'mward', 'bjablons', 'jmcFarla', 'llabonte', 'jcampaig', 'bhal2', 'alutz', 'achassin', 'hmiller', 'jcampai2', 'lulrich', 'mlangdon', 'striplet', 'gtriplet', 'jfalena', 'speterso', 'ejohnson', 'prigden', 'bwalker', 'kjensen', 'mlott', 'cwallace', 'tpierce', 'rbannist', 'bplante', 'rmills', 'bschneid', 'skellehe', 'brentz', 'dsmith', 'scarte2', 'dthorud', 'ekohler', 'lcampbel', 'tlabonte', 'slee', 'bfree', 'tschneid', 'prose', 'jhunter', 'ashelton', 'mmcinnis', 'falbers', 'mschneid', 'pcruse', 'tkelly', 'gtyler'] @pytest.fixture(scope="module") def _create_test_entries(topo): """ :param topo: :return: Will create users used for this test script . """ users_people = UserAccounts(topo.standalone, DEFAULT_SUFFIX) for demo1 in LIST_OF_USER: users_people.create(properties={ 'uid': demo1, 'cn': demo1, 'sn': demo1, 'uidNumber': str(1000), 'gidNumber': '2000', 'homeDirectory': '/home/' + demo1, 'givenname': demo1, 'userpassword': PW_DM }) users_people.create(properties={ 'uid': 'bhall', 'cn': 'Benjamin Hall', 'sn': 'Hall', 'uidNumber': str(1000), 'gidNumber': '2000', 'homeDirectory': '/home/' + 'bhall', 'mail': 'bhall@anuj.com', 'givenname': 'Benjamin', 'ou': ['Product Development', 'People'], 'l': 'sunnyvale', 'telephonenumber': '+1 408 555 6067', 'roomnumber': '2511', 'manager': 'uid=trigden, ou=People, dc=example, dc=com', 'nsRoleDN': 'cn=new managed role, ou=People, dc=example, dc=com', 'userpassword': PW_DM, }) ous = OrganizationalUnits(topo.standalone, DEFAULT_SUFFIX) ou_ou = ous.create(properties={'ou': 'COS'}) ous = OrganizationalUnits(topo.standalone, ou_ou.dn) ous.create(properties={'ou': 'MailSchemeClasses'}) Schema(topo.standalone).\ add('attributetypes', "( 9.9.8.4 NAME 'emailclass' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 " "X-ORIGIN 'RFC 2256' )") Schema(topo.standalone).\ add('objectclasses', "( 9.9.8.2 NAME 'mailSchemeUser' DESC " "'User Defined ObjectClass' SUP 'top' MUST " "( objectclass ) MAY (aci $ emailclass) X-ORIGIN 'RFC 2256' )") users_people.create(properties={ 'cn': 'Randy Jensen', 'sn': 'Jensen', 'givenname': 'Randy', 'objectclass': 'top account person organizationalPerson inetOrgPerson mailSchemeUser ' 'mailRecipient posixaccount'.split(), 'l': 'sunnyvale', 'uid': 'rjense2', 'uidNumber': str(1000), 'gidNumber': str(1000), 'homeDirectory': '/home/' + 'rjense2', 'mail': 'rjense2@example.com', 'telephonenumber': '+1 408 555 9045', 'roomnumber': '1984', 'manager': 'uid=jwalker, ou=People, dc=example,dc=com', 'nsRoleDN': 'cn=new managed role, ou=People, dc=example, dc=com', 'emailclass': 'vpemail', 'mailquota': '600', 'userpassword': PW_DM, }) users_people.create(properties={ 'cn': 'Bjorn Talbot', 'sn': 'Talbot', 'givenname': 'Bjorn', 'objectclass': 'top account person organizationalPerson inetOrgPerson posixaccount'.split(), 'ou': ['Product Development', 'People'], 'l': 'Santa Clara', 'uid': 'btalbo2', 'mail': 'btalbo2@example.com', 'telephonenumber': '+1 408 555 4234', 'roomnumber': '1205', 'uidNumber': str(1000), 'gidNumber': str(1000), 'homeDirectory': '/home/' + 'btalbo2', 'manager': 'uid=trigden, ou=People, dc=example,dc=com', 'nsRoleDN': 'cn=new managed role, ou=People, dc=example, dc=com', 'userpassword': PW_DM }) users_people.create(properties={ 'objectclass': 'top ' 'account ' 'person ' 'organizationalPerson ' 'inetOrgPerson ' 'mailRecipient ' 'mailSchemeUser ' 'posixaccount'.split(), 'cn': 'Matthew Tyler', 'sn': 'Tyler', 'givenname': 'Matthew', 'ou': ['Human Resources', 'People'], 'l': 'Cupertino', 'uid': 'mtyler', 'mail': 'mtyler@example.com', 'telephonenumber': '+1 408 555 7907', 'roomnumber': '2701', 'uidNumber': str(1000), 'gidNumber': str(1000), 'homeDirectory': '/home/' + 'mtyler', 'manager': 'uid=jwalker, ou=People, dc=example,dc=com', 'nsRoleDN': 'cn=new managed role, ou=People, dc=example, dc=com', 'mailquota': '600', 'userpassword': PW_DM}) @pytest.mark.parametrize("filter_test, condition, filter_out", VALUES_POSITIVE) def test_all_together_positive(topo, _create_test_entries, filter_test, condition, filter_out): """Test filter with positive results. :id: 51924a38-9baa-11e8-b22a-8c16451d917b :parametrized: yes :setup: Standalone Server :steps: 1. Create Filter rules. 2. Try to pass filter rules as per the condition . :expected results: 1. It should pass 2. It should pass """ account = Accounts(topo.standalone, DEFAULT_SUFFIX) assert account.filter(filter_test)[0].get_attrs_vals_utf8(condition)[filter_out] if __name__ == '__main__': CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s -v %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/filter/vfilter_simple_test.py000066400000000000000000000705331421664411400303020ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2019 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK ---- """ verify and testing Filter from a search """ import os import pytest from lib389._constants import DEFAULT_SUFFIX, PW_DM from lib389.topologies import topology_st as topo from lib389.idm.organizationalunit import OrganizationalUnits from lib389.idm.account import Accounts from lib389.idm.user import UserAccount, UserAccounts from lib389.schema import Schema from lib389.idm.role import ManagedRoles, FilteredRoles pytestmark = pytest.mark.tier1 FILTER_POSTAL = "(postalCode=99999)" FILTER_ADDRESS = "(postalAddress=345 California Av., Mountain View, CA)" FILTER_8888 = "(postalCode:2.16.840.1.113730.3.3.2.7.1:=88888)" FILTER_6666 = "(postalCode:2.16.840.1.113730.3.3.2.7.1.3:=66666)" FILTER_VPE = "(emailclass=vpe*)" FILTER_EMAIL = "(emailclass=*emai*)" FILTER_EMAILQUATA = "(mailquota=*00)" FILTER_QUATA = '(mailquota=*6*0)' FILTER_ROLE = '(nsRole=*)' FILTER_POST = '(postalAddress=*)' FILTER_CLASS = "(emailclass:2.16.840.1.113730.3.3.2.15.1:=>AAA)" FILTER_CLASSES = "(emailclass:es:=>AAA)" FILTER_AAA = "(emailclass:2.16.840.1.113730.3.3.2.15.1.5:=AAA)" FILTER_VE = "(emailclass:2.16.840.1.113730.3.3.2.15.1:=>vpemail)" FILTER_VPEM = "(emailclass:es:=>vpemail)" FILTER_900 = "(mailquota:2.16.840.1.113730.3.3.2.15.1.1:=900)" FILTER_7777 = "(postalCode:de:==77777)" FILTER_FRED = '(fred=*)' FILTER_ECLASS = "(emailclass:2.16.840.1.113730.3.3.2.15.1.5:=vpemail)" FILTER_ECLASS_1 = "(emailclass:2.16.840.1.113730.3.3.2.15.1:=<1)" FILTER_ECLASS_2 = "(emailclass:es:=<1)" FILTER_ECLASS_3 = "(emailclass:2.16.840.1.113730.3.3.2.15.1.1:=1)" FILTER_ECLASS_4 = "(emailclass:2.16.840.1.113730.3.3.2.15.1:= 0: return True return False def _allow_machine_account(inst, name): # First we need to get the mapping tree dn mt = inst.mappingtree.list(suffix=DEFAULT_SUFFIX)[0] inst.modify_s('cn=replica,%s' % mt.dn, [ (ldap.MOD_REPLACE, 'nsDS5ReplicaBindDN', "uid=%s,ou=Machines,%s" % (name, DEFAULT_SUFFIX)) ]) def test_gssapi_repl(topology_m2): """Test gssapi authenticated replication agreement of two suppliers using KDC :id: 552850aa-afc3-473e-9c39-aae802b46f11 :setup: MMR with two suppliers :steps: 1. Create the locations on each supplier for the other supplier to bind to 2. Set on the cn=replica config to accept the other suppliers mapping under mapping tree 3. Create the replication agreements from M1->M2 and vice versa (M2->M1) 4. Set the replica bind method to sasl gssapi for both agreements 5. Initialize all the agreements 6. Create a user on M1 and check if user is created on M2 7. Create a user on M2 and check if user is created on M1 :expectedresults: 1. Locations should be added successfully 2. Configuration should be added successfully 3. Replication agreements should be added successfully 4. Bind method should be set to sasl gssapi for both agreements 5. Agreements should be initialized successfully 6. Test User should be created on M1 and M2 both 7. Test User should be created on M1 and M2 both """ return supplier1 = topology_m2.ms["supplier1"] supplier2 = topology_m2.ms["supplier2"] # Create the locations on each supplier for the other to bind to. _create_machine_ou(supplier1) _create_machine_ou(supplier2) _create_machine_account(supplier1, 'ldap/%s' % HOST_SUPPLIER_1) _create_machine_account(supplier1, 'ldap/%s' % HOST_SUPPLIER_2) _create_machine_account(supplier2, 'ldap/%s' % HOST_SUPPLIER_1) _create_machine_account(supplier2, 'ldap/%s' % HOST_SUPPLIER_2) # Set on the cn=replica config to accept the other suppliers princ mapping under mapping tree _allow_machine_account(supplier1, 'ldap/%s' % HOST_SUPPLIER_2) _allow_machine_account(supplier2, 'ldap/%s' % HOST_SUPPLIER_1) # # Create all the agreements # # Creating agreement from supplier 1 to supplier 2 # Set the replica bind method to sasl gssapi properties = {RA_NAME: r'meTo_$host:$port', RA_METHOD: 'SASL/GSSAPI', RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} m1_m2_agmt = supplier1.agreement.create(suffix=SUFFIX, host=supplier2.host, port=supplier2.port, properties=properties) if not m1_m2_agmt: log.fatal("Fail to create a supplier -> supplier replica agreement") sys.exit(1) log.debug("%s created" % m1_m2_agmt) # Creating agreement from supplier 2 to supplier 1 # Set the replica bind method to sasl gssapi properties = {RA_NAME: r'meTo_$host:$port', RA_METHOD: 'SASL/GSSAPI', RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} m2_m1_agmt = supplier2.agreement.create(suffix=SUFFIX, host=supplier1.host, port=supplier1.port, properties=properties) if not m2_m1_agmt: log.fatal("Fail to create a supplier -> supplier replica agreement") sys.exit(1) log.debug("%s created" % m2_m1_agmt) # Allow the replicas to get situated with the new agreements... time.sleep(5) # # Initialize all the agreements # supplier1.agreement.init(SUFFIX, HOST_SUPPLIER_2, PORT_SUPPLIER_2) supplier1.waitForReplInit(m1_m2_agmt) # Check replication is working... if supplier1.testReplication(DEFAULT_SUFFIX, supplier2): log.info('Replication is working.') else: log.fatal('Replication is not working.') assert False # Add a user to supplier 1 _create_machine_account(supplier1, 'http/one.example.com') # Check it's on 2 time.sleep(5) assert (_check_machine_account(supplier2, 'http/one.example.com')) # Add a user to supplier 2 _create_machine_account(supplier2, 'http/two.example.com') # Check it's on 1 time.sleep(5) assert (_check_machine_account(supplier2, 'http/two.example.com')) if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/healthcheck/000077500000000000000000000000001421664411400246135ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/healthcheck/__init__.py000066400000000000000000000000611421664411400267210ustar00rootroot00000000000000""" :Requirement: 389-ds-base: HealthCheck """389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/healthcheck/health_config_test.py000066400000000000000000000367441421664411400310340ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2020 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import pytest import os import subprocess from lib389.backend import Backends from lib389.cos import CosTemplates, CosPointerDefinitions from lib389.dbgen import dbgen_users from lib389.idm.account import Accounts from lib389.index import Index from lib389.plugins import ReferentialIntegrityPlugin from lib389.utils import * from lib389._constants import * from lib389.cli_base import FakeArgs from lib389.topologies import topology_st from lib389.cli_ctl.health import health_check_run from lib389.paths import Paths pytestmark = pytest.mark.tier1 CMD_OUTPUT = 'No issues found.' JSON_OUTPUT = '[]' log = logging.getLogger(__name__) def run_healthcheck_and_flush_log(topology, instance, searched_code, json, searched_code2=None): args = FakeArgs() args.instance = instance.serverid args.verbose = instance.verbose args.list_errors = False args.list_checks = False args.check = ['config', 'refint', 'backends', 'monitor-disk-space', 'logs'] args.dry_run = False if json: log.info('Use healthcheck with --json option') args.json = json health_check_run(instance, topology.logcap.log, args) assert topology.logcap.contains(searched_code) log.info('Healthcheck returned searched code: %s' % searched_code) if searched_code2 is not None: assert topology.logcap.contains(searched_code2) log.info('Healthcheck returned searched code: %s' % searched_code2) else: log.info('Use healthcheck without --json option') args.json = json health_check_run(instance, topology.logcap.log, args) assert topology.logcap.contains(searched_code) log.info('Healthcheck returned searched code: %s' % searched_code) if searched_code2 is not None: assert topology.logcap.contains(searched_code2) log.info('Healthcheck returned searched code: %s' % searched_code2) log.info('Clear the log') topology.logcap.flush() @pytest.fixture(scope="function") def setup_ldif(topology_st, request): log.info("Generating LDIF...") ldif_dir = topology_st.standalone.get_ldif_dir() global import_ldif import_ldif = ldif_dir + '/basic_import.ldif' dbgen_users(topology_st.standalone, 5000, import_ldif, DEFAULT_SUFFIX) def fin(): log.info('Delete file') os.remove(import_ldif) request.addfinalizer(fin) @pytest.mark.ds50873 @pytest.mark.bz1685160 @pytest.mark.xfail(ds_is_older("1.4.1"), reason="Not implemented") def test_healthcheck_logging_format_should_be_revised(topology_st): """Check if HealthCheck returns DSCLE0001 code :id: 277d7980-123b-481b-acba-d90921b9f5ac :setup: Standalone instance :steps: 1. Create DS instance 2. Set nsslapd-logging-hr-timestamps-enabled to 'off' 3. Use HealthCheck without --json option 4. Use HealthCheck with --json option 5. Set nsslapd-logging-hr-timestamps-enabled to 'on' 6. Use HealthCheck without --json option 7. Use HealthCheck with --json option :expectedresults: 1. Success 2. Success 3. Healthcheck reports DSCLE0001 code and related details 4. Healthcheck reports DSCLE0001 code and related details 5. Success 6. Healthcheck reports no issue found 7. Healthcheck reports no issue found """ RET_CODE = 'DSCLE0001' standalone = topology_st.standalone log.info('Set nsslapd-logging-hr-timestamps-enabled to off') standalone.config.set('nsslapd-logging-hr-timestamps-enabled', 'off') run_healthcheck_and_flush_log(topology_st, standalone, json=False, searched_code=RET_CODE) run_healthcheck_and_flush_log(topology_st, standalone, json=True, searched_code=RET_CODE) log.info('Set nsslapd-logging-hr-timestamps-enabled to off') standalone.config.set('nsslapd-logging-hr-timestamps-enabled', 'on') run_healthcheck_and_flush_log(topology_st, standalone, json=False, searched_code=CMD_OUTPUT) run_healthcheck_and_flush_log(topology_st, standalone, json=True, searched_code=JSON_OUTPUT) @pytest.mark.ds50873 @pytest.mark.bz1685160 @pytest.mark.xfail(ds_is_older("1.4.1"), reason="Not implemented") def test_healthcheck_RI_plugin_is_misconfigured(topology_st): """Check if HealthCheck returns DSRILE0001 code :id: de2e90a2-89fe-472c-acdb-e13cbca5178d :setup: Standalone instance :steps: 1. Create DS instance 2. Configure the instance with Integrity Plugin 3. Set the referint-update-delay attribute of the RI plugin, to a value upper than 0 4. Use HealthCheck without --json option 5. Use HealthCheck with --json option 6. Set the referint-update-delay attribute to 0 7. Use HealthCheck without --json option 8. Use HealthCheck with --json option :expectedresults: 1. Success 2. Success 3. Success 4. Healthcheck reports DSRILE0001 code and related details 5. Healthcheck reports DSRILE0001 code and related details 6. Success 7. Healthcheck reports no issue found 8. Healthcheck reports no issue found """ RET_CODE = 'DSRILE0001' standalone = topology_st.standalone plugin = ReferentialIntegrityPlugin(standalone) plugin.disable() plugin.enable() log.info('Set the referint-update-delay attribute to a value upper than 0') plugin.replace('referint-update-delay', '5') run_healthcheck_and_flush_log(topology_st, standalone, json=False, searched_code=RET_CODE) run_healthcheck_and_flush_log(topology_st, standalone, json=True, searched_code=RET_CODE) log.info('Set the referint-update-delay attribute back to 0') plugin.replace('referint-update-delay', '0') run_healthcheck_and_flush_log(topology_st, standalone, json=False, searched_code=CMD_OUTPUT) run_healthcheck_and_flush_log(topology_st, standalone, json=True, searched_code=JSON_OUTPUT) @pytest.mark.ds50873 @pytest.mark.bz1685160 @pytest.mark.xfail(ds_is_older("1.4.1"), reason="Not implemented") def test_healthcheck_RI_plugin_missing_indexes(topology_st): """Check if HealthCheck returns DSRILE0002 code :id: 05c55e37-bb3e-48d1-bbe8-29c980f94f10 :setup: Standalone instance :steps: 1. Create DS instance 2. Configure the instance with Integrity Plugin 3. Change the index type of the member attribute index to ‘approx’ 4. Use HealthCheck without --json option 5. Use HealthCheck with --json option 6. Set the index type of the member attribute index to ‘eq’ 7. Use HealthCheck without --json option 8. Use HealthCheck with --json option :expectedresults: 1. Success 2. Success 3. Success 4. Healthcheck reports DSRILE0002 code and related details 5. Healthcheck reports DSRILE0002 code and related details 6. Success 7. Healthcheck reports no issue found 8. Healthcheck reports no issue found """ RET_CODE = 'DSRILE0002' MEMBER_DN = 'cn=member,cn=index,cn=userroot,cn=ldbm database,cn=plugins,cn=config' standalone = topology_st.standalone log.info('Enable RI plugin') plugin = ReferentialIntegrityPlugin(standalone) plugin.disable() plugin.enable() log.info('Change the index type of the member attribute index to approx') index = Index(topology_st.standalone, MEMBER_DN) index.replace('nsIndexType', 'approx') run_healthcheck_and_flush_log(topology_st, standalone, json=False, searched_code=RET_CODE) run_healthcheck_and_flush_log(topology_st, standalone, json=True, searched_code=RET_CODE) log.info('Set the index type of the member attribute index back to eq') index.replace('nsIndexType', 'eq') run_healthcheck_and_flush_log(topology_st, standalone, json=False, searched_code=CMD_OUTPUT) run_healthcheck_and_flush_log(topology_st, standalone, json=True, searched_code=JSON_OUTPUT) @pytest.mark.ds50873 @pytest.mark.bz1685160 @pytest.mark.xfail(ds_is_older("1.4.1"), reason="Not implemented") def test_healthcheck_virtual_attr_incorrectly_indexed(topology_st): """Check if HealthCheck returns DSVIRTLE0001 code :id: 1055173b-21aa-4aaa-9e91-4dc6c5e0c01f :setup: Standalone instance :steps: 1. Create DS instance 2. Create a CoS definition entry 3. Create the matching CoS template entry, with postalcode as virtual attribute 4. Create an index for postalcode 5. Use HealthCheck without --json option 6. Use HealthCheck with --json option :expectedresults: 1. Success 2. Success 3. Success 4. Success 5. Healthcheck reports DSVIRTLE0001 code and related details 6. Healthcheck reports DSVIRTLE0001 code and related details """ RET_CODE = 'DSVIRTLE0001' standalone = topology_st.standalone postal_index_properties = { 'cn': 'postalcode', 'nsSystemIndex': 'False', 'nsIndexType': ['eq', 'sub', 'pres'], } log.info('Add cosPointer, cosTemplate and test entry to default suffix, where virtual attribute is postal code') cos_pointer_properties = { 'cn': 'cosPointer', 'description': 'cosPointer example', 'cosTemplateDn': 'cn=cosTemplateExample,ou=People,dc=example,dc=com', 'cosAttribute': 'postalcode', } cos_pointer_definitions = CosPointerDefinitions(standalone, DEFAULT_SUFFIX, 'ou=People') cos_pointer_definitions.create(properties=cos_pointer_properties) log.info('Create CoS template') cos_template_properties = { 'cn': 'cosTemplateExample', 'postalcode': '117' } cos_templates = CosTemplates(standalone, DEFAULT_SUFFIX, 'ou=People') cos_templates.create(properties=cos_template_properties) log.info('Create an index for postalcode') backends = Backends(topology_st.standalone) ur_indexes = backends.get('userRoot').get_indexes() ur_indexes.create(properties=postal_index_properties) run_healthcheck_and_flush_log(topology_st, standalone, RET_CODE, json=False) run_healthcheck_and_flush_log(topology_st, standalone, RET_CODE, json=True) @pytest.mark.ds50873 @pytest.mark.bz1685160 @pytest.mark.xfail(ds_is_older("1.4.1"), reason="Not implemented") @pytest.mark.xfail(ds_is_older("1.4.2.4"), reason="May fail because of bug 1796050") def test_healthcheck_low_disk_space(topology_st): """Check if HealthCheck returns DSDSLE0001 code :id: 144b335d-077e-430c-9c0e-cd6b0f2f73c1 :setup: Standalone instance :steps: 1. Create DS instance 2. Get the free disk space for / 3. Use fallocate to create a file large enough for the use % be up 90% 4. Use HealthCheck without --json option 5. Use HealthCheck with --json option :expectedresults: 1. Success 2. Success 3. Success 4. Healthcheck reports DSDSLE0001 code and related details 5. Healthcheck reports DSDSLE0001 code and related details """ RET_CODE = 'DSDSLE0001' standalone = topology_st.standalone file = '{}/foo'.format(standalone.ds_paths.log_dir) log.info('Count the disk space to allocate') total_size = int(re.findall(r'\d+', str(os.statvfs(standalone.ds_paths.log_dir)))[2]) * 4096 avail_size = round(int(re.findall(r'\d+', str(os.statvfs(standalone.ds_paths.log_dir)))[3]) * 4096) used_size = total_size - avail_size count_total_percent = total_size * 0.92 final_value = count_total_percent - used_size log.info('Create a file large enough for the use % be up 90%') subprocess.call(['fallocate', '-l', str(round(final_value)), file]) run_healthcheck_and_flush_log(topology_st, standalone, RET_CODE, json=False) run_healthcheck_and_flush_log(topology_st, standalone, RET_CODE, json=True) log.info('Remove created file') os.remove(file) @pytest.mark.flaky(max_runs=2, min_passes=1) @pytest.mark.ds50791 @pytest.mark.bz1843567 @pytest.mark.xfail(ds_is_older("1.4.3.8"), reason="Not implemented") def test_healthcheck_notes_unindexed_search(topology_st, setup_ldif): """Check if HealthCheck returns DSLOGNOTES0001 code :id: b25f7027-d43f-4ec2-ac49-9c9bb285df1d :setup: Standalone instance :steps: 1. Create DS instance 2. Set nsslapd-accesslog-logbuffering to off 3. Import users from created ldif file 4. Use HealthCheck without --json option 5. Use HealthCheck with --json option :expectedresults: 1. Success 2. Success 3. Success 4. Healthcheck reports DSLOGNOTES0001 5. Healthcheck reports DSLOGNOTES0001 """ RET_CODE = 'DSLOGNOTES0001' standalone = topology_st.standalone log.info('Delete the previous access logs') topology_st.standalone.deleteAccessLogs() log.info('Set nsslapd-accesslog-logbuffering to off') standalone.config.set("nsslapd-accesslog-logbuffering", "off") log.info('Stopping the server and running offline import...') standalone.stop() assert standalone.ldif2db(bename=DEFAULT_BENAME, suffixes=[DEFAULT_SUFFIX], encrypt=None, excludeSuffixes=None, import_file=import_ldif) standalone.start() log.info('Use filters to reproduce "notes=A" in access log') accounts = Accounts(standalone, DEFAULT_SUFFIX) accounts.filter('(uid=test*)') log.info('Check that access log contains "notes=A"') assert standalone.ds_access_log.match(r'.*notes=A.*') run_healthcheck_and_flush_log(topology_st, standalone, RET_CODE, json=False) run_healthcheck_and_flush_log(topology_st, standalone, RET_CODE, json=True) @pytest.mark.ds50791 @pytest.mark.bz1843567 @pytest.mark.xfail(ds_is_older("1.4.3.8"), reason="Not implemented") def test_healthcheck_notes_unknown_attribute(topology_st, setup_ldif): """Check if HealthCheck returns DSLOGNOTES0002 code :id: 71ccd1d7-3c71-416b-9d2a-27f9f6633101 :setup: Standalone instance :steps: 1. Create DS instance 2. Set nsslapd-accesslog-logbuffering to off 3. Import users from created ldif file 4. Use HealthCheck without --json option 5. Use HealthCheck with --json option :expectedresults: 1. Success 2. Success 3. Success 4. Healthcheck reports DSLOGNOTES0002 5. Healthcheck reports DSLOGNOTES0002 """ RET_CODE = 'DSLOGNOTES0002' standalone = topology_st.standalone log.info('Delete the previous access logs') topology_st.standalone.deleteAccessLogs() log.info('Set nsslapd-accesslog-logbuffering to off') standalone.config.set("nsslapd-accesslog-logbuffering", "off") log.info('Stopping the server and running offline import...') standalone.stop() assert standalone.ldif2db(bename=DEFAULT_BENAME, suffixes=[DEFAULT_SUFFIX], encrypt=None, excludeSuffixes=None, import_file=import_ldif) standalone.start() log.info('Use filters to reproduce "notes=F" in access log') accounts = Accounts(standalone, DEFAULT_SUFFIX) accounts.filter('(unknown=test)') log.info('Check that access log contains "notes=F"') assert standalone.ds_access_log.match(r'.*notes=F.*') run_healthcheck_and_flush_log(topology_st, standalone, RET_CODE, json=False) run_healthcheck_and_flush_log(topology_st, standalone, RET_CODE, json=True) if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/healthcheck/health_repl_test.py000066400000000000000000000235451421664411400305240ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2020 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import pytest import os from lib389.backend import Backend, Backends from lib389.idm.user import UserAccounts from lib389.replica import Changelog, ReplicationManager, Replicas from lib389.utils import * from lib389._constants import * from lib389.cli_base import FakeArgs from lib389.topologies import topology_m2, topology_m3 from lib389.cli_ctl.health import health_check_run from lib389.paths import Paths CMD_OUTPUT = 'No issues found.' JSON_OUTPUT = '[]' ds_paths = Paths() log = logging.getLogger(__name__) def run_healthcheck_and_flush_log(topology, instance, searched_code, json, searched_code2=None): args = FakeArgs() args.instance = instance.serverid args.verbose = instance.verbose args.list_errors = False args.list_checks = False args.check = ['replication', 'backends:userroot:cl_trimming'] args.dry_run = False if json: log.info('Use healthcheck with --json option') args.json = json health_check_run(instance, topology.logcap.log, args) assert topology.logcap.contains(searched_code) log.info('Healthcheck returned searched code: %s' % searched_code) if searched_code2 is not None: assert topology.logcap.contains(searched_code2) log.info('Healthcheck returned searched code: %s' % searched_code2) else: log.info('Use healthcheck without --json option') args.json = json health_check_run(instance, topology.logcap.log, args) assert topology.logcap.contains(searched_code) log.info('Healthcheck returned searched code: %s' % searched_code) if searched_code2 is not None: assert topology.logcap.contains(searched_code2) log.info('Healthcheck returned searched code: %s' % searched_code2) log.info('Clear the log') topology.logcap.flush() def set_changelog_trimming(instance): log.info('Get the changelog enteries') inst_changelog = Changelog(instance, suffix=DEFAULT_SUFFIX) log.info('Set nsslapd-changelogmaxage to 30d') inst_changelog.add('nsslapd-changelogmaxage', '30') @pytest.mark.ds50873 @pytest.mark.bz1685160 @pytest.mark.xfail(ds_is_older("1.4.1"), reason="Not implemented") def test_healthcheck_replication_replica_not_reachable(topology_m2): """Check if HealthCheck returns DSREPLLE0005 code :id: d452a564-7b82-4c1a-b331-a71abbd82a10 :setup: Replicated topology :steps: 1. Create a replicated topology 2. On M1, set nsds5replicaport for the replication agreement to an unreachable port on the replica 3. Use HealthCheck without --json option 4. Use HealthCheck with --json option 5. On M1, set nsds5replicaport for the replication agreement to a reachable port number 6. Use HealthCheck without --json option 7. Use HealthCheck with --json option :expectedresults: 1. Success 2. Success 3. Healthcheck reports DSREPLLE0005 code and related details 4. Healthcheck reports DSREPLLE0005 code and related details 5. Success 6. Healthcheck reports no issue found 7. Healthcheck reports no issue found """ RET_CODE = 'DSREPLLE0005' M1 = topology_m2.ms['supplier1'] M2 = topology_m2.ms['supplier2'] set_changelog_trimming(M1) log.info('Set nsds5replicaport for the replication agreement to an unreachable port') repl = ReplicationManager(DEFAULT_SUFFIX) repl.wait_for_replication(M1, M2) replica_m1 = Replicas(M1).get(DEFAULT_SUFFIX) agmt_m1 = replica_m1.get_agreements().list()[0] agmt_m1.replace('nsds5replicaport', '4389') run_healthcheck_and_flush_log(topology_m2, M1, RET_CODE, json=False) run_healthcheck_and_flush_log(topology_m2, M1, RET_CODE, json=True) log.info('Set nsds5replicaport for the replication agreement to a reachable port') agmt_m1.replace('nsDS5ReplicaPort', '{}'.format(M2.port)) repl.wait_for_replication(M1, M2) run_healthcheck_and_flush_log(topology_m2, M1, CMD_OUTPUT, json=False) run_healthcheck_and_flush_log(topology_m2, M1, JSON_OUTPUT, json=True) @pytest.mark.ds50873 @pytest.mark.bz1685160 @pytest.mark.xfail(ds_is_older("1.4.1"), reason="Not implemented") def test_healthcheck_changelog_trimming_not_configured(topology_m2): """Check if HealthCheck returns DSCLLE0001 code :id: c2165032-88ba-4978-a4ca-2fecfd8c35d8 :setup: Replicated topology :steps: 1. Create a replicated topology 2. On M1, check that value of nsslapd-changelogmaxage from cn=changelog5,cn=config is None 3. Use HealthCheck without --json option 4. Use HealthCheck with --json option 5. On M1, set nsslapd-changelogmaxage to 30d 6. Use HealthCheck without --json option 7. Use HealthCheck with --json option :expectedresults: 1. Success 2. Success 3. Healthcheck reports DSCLLE0001 code and related details 4. Healthcheck reports DSCLLE0001 code and related details (json) 5. Success 6. Healthcheck reports no issue found 7. Healthcheck reports no issue found (json) """ M1 = topology_m2.ms['supplier1'] RET_CODE = 'DSCLLE0001' log.info('Get the changelog entries for M1') changelog_m1 = Changelog(M1, suffix=DEFAULT_SUFFIX) log.info('Check nsslapd-changelogmaxage value') if changelog_m1.get_attr_val('nsslapd-changelogmaxage') is not None: changelog_m1.remove_all('nsslapd-changelogmaxage') time.sleep(3) run_healthcheck_and_flush_log(topology_m2, M1, RET_CODE, json=False) run_healthcheck_and_flush_log(topology_m2, M1, RET_CODE, json=True) set_changelog_trimming(M1) run_healthcheck_and_flush_log(topology_m2, M1, CMD_OUTPUT, json=False) run_healthcheck_and_flush_log(topology_m2, M1, JSON_OUTPUT, json=True) @pytest.mark.ds50873 @pytest.mark.bz1685160 @pytest.mark.xfail(ds_is_older("1.4.1"), reason="Not implemented") def test_healthcheck_replication_presence_of_conflict_entries(topology_m2): """Check if HealthCheck returns DSREPLLE0002 code :id: 43abc6c6-2075-42eb-8fa3-aa092ff64cba :setup: Replicated topology :steps: 1. Create a replicated topology 2. Create conflict entries : different entries renamed to the same dn 3. Use HealthCheck without --json option 4. Use HealthCheck with --json option :expectedresults: 1. Success 2. Success 3. Healthcheck reports DSREPLLE0002 code and related details 4. Healthcheck reports DSREPLLE0002 code and related details """ RET_CODE = 'DSREPLLE0002' M1 = topology_m2.ms['supplier1'] M2 = topology_m2.ms['supplier2'] repl = ReplicationManager(DEFAULT_SUFFIX) repl.wait_for_replication(M1, M2) topology_m2.pause_all_replicas() log.info("Create conflict entries") test_users_m1 = UserAccounts(M1, DEFAULT_SUFFIX) test_users_m2 = UserAccounts(M2, DEFAULT_SUFFIX) user_num = 1000 test_users_m1.create_test_user(user_num, 2000) test_users_m2.create_test_user(user_num, 2000) topology_m2.resume_all_replicas() repl.test_replication_topology(topology_m2) run_healthcheck_and_flush_log(topology_m2, M1, RET_CODE, json=False) run_healthcheck_and_flush_log(topology_m2, M1, RET_CODE, json=True) def test_healthcheck_non_replicated_suffixes(topology_m2): """Check if backend lint function unexpectedly throws exception :id: f922edf8-c527-4802-9f42-0b75bf97098a :setup: 2 MMR topology :steps: 1. Create a new suffix: cn=changelog 2. Call healthcheck (there should not be any exceptions raised) :expectedresults: 1. Success 2. Success """ inst = topology_m2.ms['supplier1'] # Create second suffix backends = Backends(inst) backends.create(properties={'nsslapd-suffix': "cn=changelog", 'name': 'changelog'}) # Call healthcheck args = FakeArgs() args.instance = inst.serverid args.verbose = inst.verbose args.list_errors = False args.list_checks = False args.check = ['backends'] args.dry_run = False args.json = False health_check_run(inst, topology_m2.logcap.log, args) @pytest.mark.ds50873 @pytest.mark.bz1685160 @pytest.mark.xfail(ds_is_older("1.4.1"), reason="Not implemented") def test_healthcheck_replication_out_of_sync_broken(topology_m3): """Check if HealthCheck returns DSREPLLE0001 code :id: b5ae7cae-de0f-4206-95a4-f81538764bea :setup: 3 MMR topology :steps: 1. Create a 3 suppliers full-mesh topology, on M2 and M3 don’t set nsds5BeginReplicaRefresh:start 2. Perform modifications on M1 3. Use HealthCheck without --json option 4. Use HealthCheck with --json option :expectedresults: 1. Success 2. Success 3. Healthcheck reports DSREPLLE0001 code and related details 4. Healthcheck reports DSREPLLE0001 code and related details """ RET_CODE = 'DSREPLLE0001' M1 = topology_m3.ms['supplier1'] M2 = topology_m3.ms['supplier2'] M3 = topology_m3.ms['supplier3'] log.info('Break supplier2 and supplier3') replicas = Replicas(M2) replica = replicas.list()[0] replica.replace('nsds5ReplicaBindDNGroup', 'cn=repl') replicas = Replicas(M3) replica = replicas.list()[0] replica.replace('nsds5ReplicaBindDNGroup', 'cn=repl') log.info('Perform update on supplier1') test_users_m1 = UserAccounts(M1, DEFAULT_SUFFIX) test_users_m1.create_test_user(1005, 2000) run_healthcheck_and_flush_log(topology_m3, M1, RET_CODE, json=False) run_healthcheck_and_flush_log(topology_m3, M1, RET_CODE, json=True) if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/healthcheck/health_security_test.py000066400000000000000000000310021421664411400314140ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2020 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import pytest import os import subprocess import distro import time from datetime import * from lib389.config import Encryption from lib389.utils import * from lib389._constants import * from lib389.cli_base import FakeArgs from lib389.topologies import topology_st from lib389.cli_ctl.health import health_check_run from lib389.paths import Paths CMD_OUTPUT = 'No issues found.' JSON_OUTPUT = '[]' ds_paths = Paths() libfaketime = pytest.importorskip('libfaketime') libfaketime.reexec_if_needed() log = logging.getLogger(__name__) def run_healthcheck_and_flush_log(topology, instance, searched_code, json, searched_code2=None): args = FakeArgs() args.instance = instance.serverid args.verbose = instance.verbose args.list_errors = False args.list_checks = False args.check = ['config', 'encryption', 'tls', 'fschecks'] args.dry_run = False if json: log.info('Use healthcheck with --json option') args.json = json health_check_run(instance, topology.logcap.log, args) assert topology.logcap.contains(searched_code) log.info('Healthcheck returned searched code: %s' % searched_code) if searched_code2 is not None: assert topology.logcap.contains(searched_code2) log.info('Healthcheck returned searched code: %s' % searched_code2) else: log.info('Use healthcheck without --json option') args.json = json health_check_run(instance, topology.logcap.log, args) assert topology.logcap.contains(searched_code) log.info('Healthcheck returned searched code: %s' % searched_code) if searched_code2 is not None: assert topology.logcap.contains(searched_code2) log.info('Healthcheck returned searched code: %s' % searched_code2) log.info('Clear the log') topology.logcap.flush() @pytest.mark.ds50873 @pytest.mark.bz1685160 @pytest.mark.xfail(ds_is_older("1.4.1"), reason="Not implemented") def test_healthcheck_insecure_pwd_hash_configured(topology_st): """Check if HealthCheck returns DSCLE0002 code :id: 6baf949c-a5eb-4f4e-83b4-8302e677758a :setup: Standalone instance :steps: 1. Create DS instance 2. Configure an insecure passwordStorageScheme (as SHA) for the instance 3. Use HealthCheck without --json option 4. Use HealthCheck with --json option 5. Set passwordStorageScheme and nsslapd-rootpwstoragescheme to PBKDF2_SHA256 6. Use HealthCheck without --json option 7. Use HealthCheck with --json option :expectedresults: 1. Success 2. Success 3. Healthcheck reports DSCLE0002 code and related details 4. Healthcheck reports DSCLE0002 code and related details 5. Success 6. Healthcheck reports no issue found 7. Healthcheck reports no issue found """ RET_CODE = 'DSCLE0002' standalone = topology_st.standalone log.info('Configure an insecure passwordStorageScheme (SHA)') standalone.config.set('passwordStorageScheme', 'SHA') run_healthcheck_and_flush_log(topology_st, standalone, json=False, searched_code=RET_CODE) run_healthcheck_and_flush_log(topology_st, standalone, json=True, searched_code=RET_CODE) if is_fips(): log.info('Set passwordStorageScheme and nsslapd-rootpwstoragescheme to SSHA512 in FIPS mode') standalone.config.set('passwordStorageScheme', 'SSHA512') standalone.config.set('nsslapd-rootpwstoragescheme', 'SSHA512') else: log.info('Set passwordStorageScheme and nsslapd-rootpwstoragescheme to PBKDF2_SHA256') standalone.config.set('passwordStorageScheme', 'PBKDF2_SHA256') standalone.config.set('nsslapd-rootpwstoragescheme', 'PBKDF2_SHA256') run_healthcheck_and_flush_log(topology_st, standalone, json=False, searched_code=CMD_OUTPUT) run_healthcheck_and_flush_log(topology_st, standalone, json=True, searched_code=JSON_OUTPUT) @pytest.mark.ds50873 @pytest.mark.bz1685160 @pytest.mark.xfail(ds_is_older("1.4.1"), reason="Not implemented") def test_healthcheck_min_allowed_tls_version_too_low(topology_st): """Check if HealthCheck returns DSELE0001 code :id: a4be3390-9508-4827-8f82-e4e21081caab :setup: Standalone instance :steps: 1. Create DS instance 2. Set the TLS minimum version to TLS1.0 3. Use HealthCheck without --json option 4. Use HealthCheck with --json option 5. Set the TLS minimum version to TLS1.2 6. Use HealthCheck without --json option 7. Use HealthCheck with --json option :expectedresults: 1. Success 2. Success 3. Healthcheck reports DSELE0001 code and related details 4. Healthcheck reports DSELE0001 code and related details 5. Success 6. Healthcheck reports no issue found 7. Healthcheck reports no issue found """ RET_CODE = 'DSELE0001' HIGHER_VS = 'TLS1.2' SMALL_VS = 'TLS1.0' RHEL = 'Red Hat Enterprise Linux' standalone = topology_st.standalone standalone.enable_tls() # We have to update-crypto-policies to LEGACY, otherwise we can't set TLS1.0 log.info('Updating crypto policies') assert subprocess.check_call(['update-crypto-policies', '--set', 'LEGACY']) == 0 log.info('Set the TLS minimum version to TLS1.0') enc = Encryption(standalone) enc.replace('sslVersionMin', SMALL_VS) standalone.restart() run_healthcheck_and_flush_log(topology_st, standalone, json=False, searched_code=RET_CODE) run_healthcheck_and_flush_log(topology_st, standalone, json=True, searched_code=RET_CODE) log.info('Set the TLS minimum version to TLS1.2') enc.replace('sslVersionMin', HIGHER_VS) standalone.restart() run_healthcheck_and_flush_log(topology_st, standalone, json=False, searched_code=CMD_OUTPUT) run_healthcheck_and_flush_log(topology_st, standalone, json=True, searched_code=JSON_OUTPUT) if RHEL in distro.linux_distribution(): log.info('Set crypto-policies back to DEFAULT') assert subprocess.check_call(['update-crypto-policies', '--set', 'DEFAULT']) == 0 @pytest.mark.ds50873 @pytest.mark.bz1685160 @pytest.mark.xfail(ds_is_older("1.4.1"), reason="Not implemented") def test_healthcheck_resolvconf_bad_file_perm(topology_st): """Check if HealthCheck returns DSPERMLE0001 code :id: 8572b9e9-70e7-49e9-b745-864f6f2468a8 :setup: Standalone instance :steps: 1. Create DS instance 2. Change the /etc/resolv.conf file permissions to 444 3. Use HealthCheck without --json option 4. Use HealthCheck with --json option 5. set /etc/resolv.conf permissions to 644 6. Use HealthCheck without --json option 7. Use HealthCheck with --json option :expectedresults: 1. Success 2. Success 3. Healthcheck reports DSPERMLE0001 code and related details 4. Healthcheck reports DSPERMLE0001 code and related details 5. Success 6. Healthcheck reports no issue found 7. Healthcheck reports no issue found """ RET_CODE = 'DSPERMLE0001' standalone = topology_st.standalone log.info('Change the /etc/resolv.conf file permissions to 444') os.chmod('/etc/resolv.conf', 0o444) run_healthcheck_and_flush_log(topology_st, standalone, RET_CODE, json=False) run_healthcheck_and_flush_log(topology_st, standalone, RET_CODE, json=True) log.info('Change the /etc/resolv.conf file permissions to 644') os.chmod('/etc/resolv.conf', 0o644) run_healthcheck_and_flush_log(topology_st, standalone, CMD_OUTPUT, json=False) run_healthcheck_and_flush_log(topology_st, standalone, JSON_OUTPUT, json=True) @pytest.mark.ds50873 @pytest.mark.bz1685160 @pytest.mark.xfail(ds_is_older("1.4.1"), reason="Not implemented") def test_healthcheck_pwdfile_bad_file_perm(topology_st): """Check if HealthCheck returns DSPERMLE0002 code :id: ec137d66-bad6-4eed-90bd-fc1d572bbe1f :setup: Standalone instance :steps: 1. Create DS instance 2. Change the /etc/dirsrv/slapd-xxx/pwdfile.txt permissions to 000 3. Use HealthCheck without --json option 4. Use HealthCheck with --json option 5. Change the /etc/dirsrv/slapd-xxx/pwdfile.txt permissions to 400 6. Use HealthCheck without --json option 7. Use HealthCheck with --json option :expectedresults: 1. Success 2. Success 3. Healthcheck reports DSPERMLE0002 code and related details 4. Healthcheck reports DSPERMLE0002 code and related details 5. Success 6. Healthcheck reports no issue found 7. Healthcheck reports no issue found """ RET_CODE = 'DSPERMLE0002' standalone = topology_st.standalone cert_dir = standalone.ds_paths.cert_dir log.info('Change the /etc/dirsrv/slapd-{}/pwdfile.txt permissions to 000'.format(standalone.serverid)) os.chmod('{}/pwdfile.txt'.format(cert_dir), 0o000) run_healthcheck_and_flush_log(topology_st, standalone, RET_CODE, json=False) run_healthcheck_and_flush_log(topology_st, standalone, RET_CODE, json=True) log.info('Change the /etc/dirsrv/slapd-{}/pwdfile.txt permissions to 400'.format(standalone.serverid)) os.chmod('{}/pwdfile.txt'.format(cert_dir), 0o400) run_healthcheck_and_flush_log(topology_st, standalone, CMD_OUTPUT, json=False) run_healthcheck_and_flush_log(topology_st, standalone, JSON_OUTPUT, json=True) @pytest.mark.ds50873 @pytest.mark.bz1685160 @pytest.mark.xfail(ds_is_older("1.4.1"), reason="Not implemented") def test_healthcheck_certif_expiring_within_30d(topology_st): """Check if HealthCheck returns DSCERTLE0001 code :id: c2165032-88ba-4978-a4ca-2fecfd8c35d8 :setup: Standalone instance :steps: 1. Create DS instance 2. Use libfaketime to tell the process the date is within 30 days before certificate expiration 3. Use HealthCheck without --json option 4. Use HealthCheck with --json option :expectedresults: 1. Success 2. Success 3. Healthcheck reports DSCERTLE0001 code and related details 4. Healthcheck reports DSCERTLE0001 code and related details """ RET_CODE = 'DSCERTLE0001' standalone = topology_st.standalone standalone.enable_tls() # Cert is valid two years from today, so we count the date that is within 30 days before certificate expiration date_future = datetime.now() + timedelta(days=701) with libfaketime.fake_time(date_future): time.sleep(1) run_healthcheck_and_flush_log(topology_st, standalone, RET_CODE, json=False) run_healthcheck_and_flush_log(topology_st, standalone, RET_CODE, json=True) # Try again with real time just to make sure no issues were found run_healthcheck_and_flush_log(topology_st, standalone, CMD_OUTPUT, json=False) run_healthcheck_and_flush_log(topology_st, standalone, JSON_OUTPUT, json=True) @pytest.mark.ds50873 @pytest.mark.bz1685160 @pytest.mark.xfail(ds_is_older("1.4.1"), reason="Not implemented") def test_healthcheck_certif_expired(topology_st): """Check if HealthCheck returns DSCERTLE0002 code :id: ceff2c22-62c0-4fd9-b737-930a88458d68 :setup: Standalone instance :steps: 1. Create DS instance 2. Use libfaketime to tell the process the date is after certificate expiration 3. Use HealthCheck without --json option 4. Use HealthCheck with --json option :expectedresults: 1. Success 2. Success 3. Healthcheck reports DSCERTLE0002 code and related details 4. Healthcheck reports DSCERTLE0002 code and related details """ RET_CODE = 'DSCERTLE0002' standalone = topology_st.standalone standalone.enable_tls() # Cert is valid two years from today, so we count the date that is after expiration date_future = datetime.now() + timedelta(days=731) with libfaketime.fake_time(date_future): time.sleep(1) run_healthcheck_and_flush_log(topology_st, standalone, RET_CODE, json=False) run_healthcheck_and_flush_log(topology_st, standalone, RET_CODE, json=True) # Try again with real time just to make sure no issues were found run_healthcheck_and_flush_log(topology_st, standalone, CMD_OUTPUT, json=False) run_healthcheck_and_flush_log(topology_st, standalone, JSON_OUTPUT, json=True) if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/healthcheck/health_sync_test.py000066400000000000000000000100661421664411400305300ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2020 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import pytest import os import time from datetime import * from lib389.idm.user import UserAccounts from lib389.utils import * from lib389._constants import * from lib389.cli_base import FakeArgs from lib389.topologies import topology_m3 from lib389.cli_ctl.health import health_check_run from lib389.paths import Paths ds_paths = Paths() log = logging.getLogger(__name__) def run_healthcheck_and_flush_log(topology, instance, searched_code, json, searched_code2=None): args = FakeArgs() args.instance = instance.serverid args.verbose = instance.verbose args.list_errors = False args.list_checks = False args.check = ['replication'] args.dry_run = False if json: log.info('Use healthcheck with --json option') args.json = json health_check_run(instance, topology.logcap.log, args) assert topology.logcap.contains(searched_code) log.info('Healthcheck returned searched code: %s' % searched_code) if searched_code2 is not None: assert topology.logcap.contains(searched_code2) log.info('Healthcheck returned searched code: %s' % searched_code2) else: log.info('Use healthcheck without --json option') args.json = json health_check_run(instance, topology.logcap.log, args) assert topology.logcap.contains(searched_code) log.info('Healthcheck returned searched code: %s' % searched_code) if searched_code2 is not None: assert topology.logcap.contains(searched_code2) log.info('Healthcheck returned searched code: %s' % searched_code2) log.info('Clear the log') topology.logcap.flush() # This test is in separate file because it is timeout specific @pytest.mark.ds50873 @pytest.mark.bz1685160 @pytest.mark.xfail(ds_is_older("1.4.1"), reason="Not implemented") #unstable or unstatus tests, skipped for now @pytest.mark.flaky(max_runs=2, min_passes=1) def test_healthcheck_replication_out_of_sync_not_broken(topology_m3): """Check if HealthCheck returns DSREPLLE0003 code :id: 8305000d-ba4d-4c00-8331-be0e8bd92150 :setup: 3 MMR topology :steps: 1. Create a 3 suppliers full-mesh topology, all replicas being synchronized 2. Stop M1 3. Perform an update on M2 and M3. 4. Check M2 and M3 are synchronized. 5. From M2, reinitialize the M3 agreement 6. Stop M2 and M3 7. Restart M1 8. Start M3 9. Use HealthCheck without --json option 10. Use HealthCheck with --json option :expectedresults: 1. Success 2. Success 3. Success 4. Success 5. Success 6. Success 7. Success 8. Success 9. Healthcheck reports DSREPLLE0003 code and related details 10. Healthcheck reports DSREPLLE0003 code and related details """ RET_CODE = 'DSREPLLE0003' M1 = topology_m3.ms['supplier1'] M2 = topology_m3.ms['supplier2'] M3 = topology_m3.ms['supplier3'] log.info('Stop supplier1') M1.stop() log.info('Perform update on supplier2 and supplier3') test_users_m2 = UserAccounts(M2, DEFAULT_SUFFIX) test_users_m3 = UserAccounts(M3, DEFAULT_SUFFIX) test_users_m2.create_test_user(1000, 2000) for user_num in range(1001, 3000): test_users_m3.create_test_user(user_num, 2000) time.sleep(2) log.info('Stop M2 and M3') M2.stop() M3.stop() log.info('Start M1 first, then M2, so that M2 acquires M1') M1.start() M2.start() time.sleep(2) log.info('Start M3 which should not be able to acquire M1 since M2 is updating it') M3.start() time.sleep(2) run_healthcheck_and_flush_log(topology_m3, M3, RET_CODE, json=False) run_healthcheck_and_flush_log(topology_m3, M3, RET_CODE, json=True) if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/healthcheck/healthcheck_test.py000066400000000000000000000415041421664411400304730ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2020 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import pytest import os from lib389.backend import Backends from lib389.mappingTree import MappingTrees from lib389.replica import Changelog5, Changelog from lib389.utils import * from lib389._constants import * from lib389.cli_base import FakeArgs from lib389.topologies import topology_st, topology_no_sample, topology_m2 from lib389.cli_ctl.health import health_check_run from lib389.paths import Paths CMD_OUTPUT = 'No issues found.' JSON_OUTPUT = '[]' CHANGELOG = 'cn=changelog,{}'.format(DN_USERROOT_LDBM) ds_paths = Paths() log = logging.getLogger(__name__) def run_healthcheck_and_flush_log(topology, instance, searched_code=None, json=False, searched_code2=None, list_checks=False, list_errors=False, check=None, searched_list=None): args = FakeArgs() args.instance = instance.serverid args.verbose = instance.verbose args.list_errors = list_errors args.list_checks = list_checks args.check = check args.dry_run = False args.json = json log.info('Use healthcheck with --json == {} option'.format(json)) health_check_run(instance, topology.logcap.log, args) if searched_list is not None: for item in searched_list: assert topology.logcap.contains(item) log.info('Healthcheck returned searched item: %s' % item) else: assert topology.logcap.contains(searched_code) log.info('Healthcheck returned searched code: %s' % searched_code) if searched_code2 is not None: assert topology.logcap.contains(searched_code2) log.info('Healthcheck returned searched code: %s' % searched_code2) log.info('Clear the log') topology.logcap.flush() def set_changelog_trimming(instance): log.info('Set nsslapd-changelogmaxage to 30d') if ds_supports_new_changelog(): cl = Changelog(instance, DEFAULT_SUFFIX) else: cl = Changelog5(instance) cl.replace('nsslapd-changelogmaxage', '30') def test_healthcheck_disabled_suffix(topology_st): """Test that we report when a suffix is disabled :id: 49ebce72-7e7b-4eff-8bd9-8384d12251b4 :setup: Standalone Instance :steps: 1. Disable suffix 2. Use HealthCheck without --json option 3. Use HealthCheck with --json option :expectedresults: 1. Success 2. HealthCheck should return code DSBLE0002 3. HealthCheck should return code DSBLE0002 """ RET_CODE = 'DSBLE0002' mts = MappingTrees(topology_st.standalone) mt = mts.get(DEFAULT_SUFFIX) mt.replace("nsslapd-state", "disabled") run_healthcheck_and_flush_log(topology_st, topology_st.standalone, RET_CODE, json=False) run_healthcheck_and_flush_log(topology_st, topology_st.standalone, RET_CODE, json=True) # reset the suffix state mt.replace("nsslapd-state", "backend") @pytest.mark.ds50873 @pytest.mark.bz1685160 @pytest.mark.skipif(ds_is_older("1.4.1"), reason="Not implemented") def test_healthcheck_standalone(topology_st): """Check functionality of HealthCheck Tool on standalone instance with no errors :id: 4844b446-3939-4fbd-b14b-293b20bb8be0 :setup: Standalone instance :steps: 1. Create DS instance 2. Use HealthCheck without --json option 3. Use HealthCheck with --json option :expectedresults: 1. Success 2. Success 3. Success """ standalone = topology_st.standalone run_healthcheck_and_flush_log(topology_st, standalone, CMD_OUTPUT,json=False) run_healthcheck_and_flush_log(topology_st, standalone, JSON_OUTPUT, json=True) @pytest.mark.ds50746 @pytest.mark.bz1816851 @pytest.mark.xfail(ds_is_older("1.4.2"), reason="Not implemented") def test_healthcheck_list_checks(topology_st): """Check functionality of HealthCheck Tool with --list-checks option :id: 44b1d8d3-b94a-4c2d-9233-ebe876802803 :setup: Standalone instance :steps: 1. Create DS instance 2. Set list_checks to True 3. Run HealthCheck :expectedresults: 1. Success 2. Success 3. Success """ output_list = ['config:hr_timestamp', 'config:passwordscheme', 'backends:userroot:cl_trimming', 'backends:userroot:mappingtree', 'backends:userroot:search', 'backends:userroot:virt_attrs', 'encryption:check_tls_version', 'fschecks:file_perms', 'refint:attr_indexes', 'refint:update_delay', 'monitor-disk-space:disk_space', 'replication:agmts_status', 'replication:conflicts', 'dseldif:nsstate', 'tls:certificate_expiration', 'logs:notes'] standalone = topology_st.standalone run_healthcheck_and_flush_log(topology_st, standalone, json=False, list_checks=True, searched_list=output_list) @pytest.mark.ds50746 @pytest.mark.bz1816851 @pytest.mark.xfail(ds_is_older("1.4.2"), reason="Not implemented") def test_healthcheck_list_errors(topology_st): """Check functionality of HealthCheck Tool with --list-errors option :id: 295c07c0-a939-4d5e-b3a6-b4c9d0da3897 :setup: Standalone instance :steps: 1. Create DS instance 2. Set list_errors to True 3. Run HealthCheck :expectedresults: 1. Success 2. Success 3. Success """ output_list = ['DSBLE0001 :: Possibly incorrect mapping tree', 'DSBLE0002 :: Unable to query backend', 'DSBLE0003 :: Uninitialized backend database', 'DSCERTLE0001 :: Certificate about to expire', 'DSCERTLE0002 :: Certificate expired', 'DSCLE0001 :: Different log timestamp format', 'DSCLE0002 :: Weak passwordStorageScheme', 'DSCLLE0001 :: Changelog trimming not configured', 'DSDSLE0001 :: Low disk space', 'DSELE0001 :: Weak TLS protocol version', 'DSLOGNOTES0001 :: Unindexed Search', 'DSLOGNOTES0002 :: Unknown Attribute In Filter', 'DSPERMLE0001 :: Incorrect file permissions', 'DSPERMLE0002 :: Incorrect security database file permissions', 'DSREPLLE0001 :: Replication agreement not set to be synchronized', 'DSREPLLE0002 :: Replication conflict entries found', 'DSREPLLE0003 :: Unsynchronized replication agreement', 'DSREPLLE0004 :: Unable to get replication agreement status', 'DSREPLLE0005 :: Replication consumer not reachable', 'DSRILE0001 :: Referential integrity plugin may be slower', 'DSRILE0002 :: Referential integrity plugin configured with unindexed attribute', 'DSSKEWLE0001 :: Medium time skew', 'DSSKEWLE0002 :: Major time skew', 'DSSKEWLE0003 :: Extensive time skew', 'DSVIRTLE0001 :: Virtual attribute indexed'] standalone = topology_st.standalone run_healthcheck_and_flush_log(topology_st, standalone, json=False, list_errors=True, searched_list=output_list) @pytest.mark.ds50746 @pytest.mark.bz1816851 @pytest.mark.xfail(ds_is_older("1.4.2"), reason="Not implemented") def test_healthcheck_check_option(topology_st): """Check functionality of HealthCheck Tool with --check option :id: ee382d6f-8bec-4236-ace4-4700d19dc9fd :setup: Standalone instance :steps: 1. Create DS instance 2. Set check to value from list 3. Run HealthCheck :expectedresults: 1. Success 2. Success 3. Success """ output_list = ['config:hr_timestamp', 'config:passwordscheme', 'backends:userroot:cl_trimming', 'backends:userroot:mappingtree', 'backends:userroot:search', 'backends:userroot:virt_attrs', 'encryption:check_tls_version', 'fschecks:file_perms', 'refint:attr_indexes', 'refint:update_delay', 'monitor-disk-space:disk_space', 'replication:agmts_status', 'replication:conflicts', 'dseldif:nsstate', 'tls:certificate_expiration', 'logs:notes'] standalone = topology_st.standalone for item in output_list: pattern = 'Checking ' + item log.info('Check {}'.format(item)) run_healthcheck_and_flush_log(topology_st, standalone, searched_code=pattern, json=False, check=[item], searched_code2=CMD_OUTPUT) run_healthcheck_and_flush_log(topology_st, standalone, searched_code=JSON_OUTPUT, json=True, check=[item]) @pytest.mark.ds50873 @pytest.mark.bz1685160 @pytest.mark.skipif(ds_is_older("1.4.1"), reason="Not implemented") def test_healthcheck_standalone_tls(topology_st): """Check functionality of HealthCheck Tool on TLS enabled standalone instance with no errors :id: 4844b446-3939-4fbd-b14b-293b20bb8be0 :setup: Standalone instance :steps: 1. Create DS instance 2. Enable TLS 3. Use HealthCheck without --json option 4. Use HealthCheck with --json option :expectedresults: 1. Success 2. Success 3. Success 4. Success """ standalone = topology_st.standalone standalone.enable_tls() run_healthcheck_and_flush_log(topology_st, standalone, CMD_OUTPUT,json=False) run_healthcheck_and_flush_log(topology_st, standalone, JSON_OUTPUT, json=True) @pytest.mark.ds50873 @pytest.mark.bz1685160 @pytest.mark.skipif(ds_is_older("1.4.1"), reason="Not implemented") def test_healthcheck_replication(topology_m2): """Check functionality of HealthCheck Tool on replication instance with no errors :id: 9ee6d491-d6d7-4c2c-ac78-70d08f054166 :setup: 2 MM topology :steps: 1. Create a two suppliers replication topology 2. Set nsslapd-changelogmaxage to 30d 3. Use HealthCheck without --json option 4. Use HealthCheck with --json option :expectedresults: 1. Success 2. Success 3. Success 4. Success """ M1 = topology_m2.ms['supplier1'] M2 = topology_m2.ms['supplier2'] # If we don't set changelog trimming, we will get error DSCLLE0001 set_changelog_trimming(M1) set_changelog_trimming(M2) log.info('Run healthcheck for supplier1') run_healthcheck_and_flush_log(topology_m2, M1, CMD_OUTPUT, json=False) run_healthcheck_and_flush_log(topology_m2, M1, JSON_OUTPUT, json=True) log.info('Run healthcheck for supplier2') run_healthcheck_and_flush_log(topology_m2, M2, CMD_OUTPUT, json=False) run_healthcheck_and_flush_log(topology_m2, M2, JSON_OUTPUT, json=True) @pytest.mark.ds50873 @pytest.mark.bz1685160 @pytest.mark.skipif(ds_is_older("1.4.1"), reason="Not implemented") def test_healthcheck_replication_tls(topology_m2): """Check functionality of HealthCheck Tool on replication instance with no errors :id: 9ee6d491-d6d7-4c2c-ac78-70d08f054166 :setup: 2 MM topology :steps: 1. Create a two suppliers replication topology 2. Enable TLS 3. Set nsslapd-changelogmaxage to 30d 4. Use HealthCheck without --json option 5. Use HealthCheck with --json option :expectedresults: 1. Success 2. Success 3. Success 4. Success 5. Success """ M1 = topology_m2.ms['supplier1'] M2 = topology_m2.ms['supplier2'] M1.enable_tls() M2.enable_tls() log.info('Run healthcheck for supplier1') run_healthcheck_and_flush_log(topology_m2, M1, CMD_OUTPUT, json=False) run_healthcheck_and_flush_log(topology_m2, M1, JSON_OUTPUT, json=True) log.info('Run healthcheck for supplier2') run_healthcheck_and_flush_log(topology_m2, M2, CMD_OUTPUT, json=False) run_healthcheck_and_flush_log(topology_m2, M2, JSON_OUTPUT, json=True) @pytest.mark.ds50873 @pytest.mark.bz1685160 @pytest.mark.skipif(ds_is_older("1.4.1"), reason="Not implemented") @pytest.mark.xfail(ds_is_older("1.4.3"),reason="Might fail because of bz1835619") def test_healthcheck_backend_missing_mapping_tree(topology_st): """Check if HealthCheck returns DSBLE0001 and DSBLE0003 code :id: 4c83ffcf-01a4-4ec8-a3d2-01022b566225 :setup: Standalone instance :steps: 1. Create DS instance 2. Disable the dc=example,dc=com backend suffix entry in the mapping tree 3. Use HealthCheck without --json option 4. Use HealthCheck with --json option 5. Enable the dc=example,dc=com backend suffix entry in the mapping tree 6. Use HealthCheck without --json option 7. Use HealthCheck with --json option :expectedresults: 1. Success 2. Success 3. Healthcheck reports DSBLE0001 and DSBLE0003 codes and related details 4. Healthcheck reports DSBLE0001 and DSBLE0003 codes and related details 5. Success 6. Healthcheck reports no issue found 7. Healthcheck reports no issue found """ RET_CODE1 = 'DSBLE0001' RET_CODE2 = 'DSBLE0003' standalone = topology_st.standalone log.info('Delete the dc=example,dc=com backend suffix entry in the mapping tree') mts = MappingTrees(standalone) mt = mts.get(DEFAULT_SUFFIX) mt.delete() run_healthcheck_and_flush_log(topology_st, standalone, RET_CODE1, json=False, searched_code2=RET_CODE2) run_healthcheck_and_flush_log(topology_st, standalone, RET_CODE1, json=True, searched_code2=RET_CODE2) log.info('Create the dc=example,dc=com backend suffix entry') mts.create(properties={ 'cn': DEFAULT_SUFFIX, 'nsslapd-state': 'backend', 'nsslapd-backend': 'userRoot', }) run_healthcheck_and_flush_log(topology_st, standalone, CMD_OUTPUT, json=False) run_healthcheck_and_flush_log(topology_st, standalone, JSON_OUTPUT, json=True) @pytest.mark.ds50873 @pytest.mark.bz1796343 @pytest.mark.skipif(ds_is_older("1.4.1"), reason="Not implemented") @pytest.mark.xfail(reason="Will fail because of bz1837315. Set proper version after bug is fixed") def test_healthcheck_unable_to_query_backend(topology_st): """Check if HealthCheck returns DSBLE0002 code :id: 716b1ff1-94bd-4780-98b8-96ff8ef21e30 :setup: Standalone instance :steps: 1. Create DS instance 2. Create a new root suffix and database 3. Disable new suffix 4. Use HealthCheck without --json option 5. Use HealthCheck with --json option :expectedresults: 1. Success 2. Success 3. Success 4. HealthCheck should return code DSBLE0002 5. HealthCheck should return code DSBLE0002 """ RET_CODE = 'DSBLE0002' NEW_SUFFIX = 'dc=test,dc=com' NEW_BACKEND = 'userData' standalone = topology_st.standalone log.info('Create new suffix') backends = Backends(standalone) backends.create(properties={ 'cn': NEW_BACKEND, 'nsslapd-suffix': NEW_SUFFIX, }) log.info('Disable the newly created suffix') mts = MappingTrees(standalone) mt_new = mts.get(NEW_SUFFIX) mt_new.replace('nsslapd-state', 'disabled') run_healthcheck_and_flush_log(topology_st, standalone, RET_CODE, json=False) run_healthcheck_and_flush_log(topology_st, standalone, RET_CODE, json=True) log.info('Enable the suffix again and check if nothing is broken') mt_new.replace('nsslapd-state', 'backend') run_healthcheck_and_flush_log(topology_st, standalone, RET_CODE, json=False) run_healthcheck_and_flush_log(topology_st, standalone, RET_CODE, json=True) @pytest.mark.ds50873 @pytest.mark.bz1796343 @pytest.mark.skipif(ds_is_older("1.4.1"), reason="Not implemented") def test_healthcheck_database_not_initialized(topology_no_sample): """Check if HealthCheck returns DSBLE0003 code :id: 716b1ff1-94bd-4780-98b8-96ff8ef21e30 :setup: Standalone instance :steps: 1. Create DS instance without example entries 2. Use HealthCheck without --json option 3. Use HealthCheck with --json option :expectedresults: 1. Success 2. HealthCheck should return code DSBLE0003 3. HealthCheck should return code DSBLE0003 """ RET_CODE = 'DSBLE0003' standalone = topology_no_sample.standalone run_healthcheck_and_flush_log(topology_no_sample, standalone, RET_CODE, json=False) run_healthcheck_and_flush_log(topology_no_sample, standalone, RET_CODE, json=True) if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/import/000077500000000000000000000000001421664411400236625ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/import/__init__.py000066400000000000000000000000661421664411400257750ustar00rootroot00000000000000""" :Requirement: 389-ds-base: DataBase Import """ 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/import/import_test.py000066400000000000000000000420531421664411400266110ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2020 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- """ Will test Import (Offline/Online) """ import os import pytest import time import glob import logging from lib389.topologies import topology_st as topo from lib389._constants import DEFAULT_SUFFIX, TaskWarning from lib389.dbgen import dbgen_users from lib389.tasks import ImportTask from lib389.index import Indexes from lib389.monitor import Monitor from lib389.backend import Backends from lib389.config import LDBMConfig from lib389.utils import ds_is_newer from lib389.idm.user import UserAccount from lib389.idm.account import Accounts pytestmark = pytest.mark.tier1 DEBUGGING = os.getenv("DEBUGGING", default=False) if DEBUGGING: logging.getLogger(__name__).setLevel(logging.DEBUG) else: logging.getLogger(__name__).setLevel(logging.INFO) log = logging.getLogger(__name__) def _generate_ldif(topo, no_no): """ Will generate the ldifs """ ldif_dir = topo.standalone.get_ldif_dir() import_ldif = ldif_dir + '/basic_import.ldif' if os.path.isfile(import_ldif): pass else: dbgen_users(topo.standalone, no_no, import_ldif, DEFAULT_SUFFIX) def _check_users_before_test(topo, no_no): """ Will check no user before test. """ accounts = Accounts(topo.standalone, DEFAULT_SUFFIX) assert len(accounts.filter('(uid=*)')) < no_no def _search_for_user(topo, no_n0): """ Will make sure that users are imported """ accounts = Accounts(topo.standalone, DEFAULT_SUFFIX) assert len(accounts.filter('(uid=*)')) == no_n0 @pytest.fixture(scope="function") def _import_clean(request, topo): def finofaci(): accounts = Accounts(topo.standalone, DEFAULT_SUFFIX) for i in accounts.filter('(uid=*)'): UserAccount(topo.standalone, i.dn).delete() ldif_dir = topo.standalone.get_ldif_dir() import_ldif = ldif_dir + '/basic_import.ldif' if os.path.exists(import_ldif): os.remove(import_ldif) syntax_err_ldif = ldif_dir + '/syntax_err.dif' if os.path.exists(syntax_err_ldif): os.remove(syntax_err_ldif) request.addfinalizer(finofaci) def _import_offline(topo, no_no): """ Will import ldifs offline """ _check_users_before_test(topo, no_no) ldif_dir = topo.standalone.get_ldif_dir() import_ldif = ldif_dir + '/basic_import.ldif' # Generate ldif _generate_ldif(topo, no_no) # Offline import topo.standalone.stop() t1 = time.time() if not topo.standalone.ldif2db('userRoot', None, None, None, import_ldif): assert False total_time = time.time() - t1 topo.standalone.start() _search_for_user(topo, no_no) return total_time def _import_online(topo, no_no): """ Will import ldifs online """ _check_users_before_test(topo, no_no) ldif_dir = topo.standalone.get_ldif_dir() import_ldif = ldif_dir + '/basic_import.ldif' _generate_ldif(topo, no_no) # Online import_task = ImportTask(topo.standalone) import_task.import_suffix_from_ldif(ldiffile=import_ldif, suffix=DEFAULT_SUFFIX) # Wait a bit till the task is created and available for searching time.sleep(0.5) # Good as place as any to quick test the task has some expected attributes if ds_is_newer('1.4.1.2'): assert import_task.present('nstaskcreated') assert import_task.present('nstasklog') assert import_task.present('nstaskcurrentitem') assert import_task.present('nstasktotalitems') assert import_task.present('ttl') import_task.wait() topo.standalone.searchAccessLog('ADD dn="cn=import') topo.standalone.searchErrorsLog('import userRoot: Import complete.') _search_for_user(topo, no_no) def _create_bogus_ldif(topo): """ Will create bogus ldifs """ ldif_dir = topo.standalone.get_ldif_dir() line1 = r'dn: cn=Eladio \"A\"\, Santabarbara\, (A\, B\, C),ou=Accounting, dc=example,dc=com' line2 = """objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson cn: Eladio "A", Santabarbara, (A, B, C) cn: Eladio Santabarbara sn: Santabarbara givenName: Eladio ou: Accounting""" with open(f'{ldif_dir}/bogus.dif', 'w') as out: out.write(f'{line1}{line2}') out.close() import_ldif1 = ldif_dir + '/bogus.ldif' return import_ldif1 def _create_syntax_err_ldif(topo): """ Create an ldif file, which contains an entry that violates syntax check """ ldif_dir = topo.standalone.get_ldif_dir() line1 = """dn: dc=example,dc=com objectClass: top objectClass: domain dc: example dn: ou=groups,dc=example,dc=com objectClass: top objectClass: organizationalUnit ou: groups dn: uid=JHunt,ou=groups,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetOrgPerson objectclass: inetUser cn: James Hunt sn: Hunt uid: JHunt givenName: """ with open(f'{ldif_dir}/syntax_err.ldif', 'w') as out: out.write(f'{line1}') os.chmod(out.name, 0o777) out.close() import_ldif1 = ldif_dir + '/syntax_err.ldif' return import_ldif1 def test_import_with_index(topo, _import_clean): """ Add an index, then import via cn=tasks :id: 5bf75c47-a283-430e-a65c-3c5fd8dbadb8 :setup: Standalone Instance :steps: 1. Creating the room number index 2. Importing online 3. Import is done -- verifying that it worked :expected results: 1. Operation successful 2. Operation successful 3. Operation successful """ place = topo.standalone.dbdir assert not glob.glob(f'{place}/userRoot/roomNumber.db*', recursive=True) # Creating the room number index indexes = Indexes(topo.standalone) indexes.create(properties={ 'cn': 'roomNumber', 'nsSystemIndex': 'false', 'nsIndexType': 'eq'}) topo.standalone.restart() # Importing online _import_online(topo, 5) # Import is done -- verifying that it worked assert glob.glob(f'{place}/userRoot/roomNumber.db*', recursive=True) def test_online_import_with_warning(topo, _import_clean): """ Import an ldif file with syntax errors, verify skipped entry warning code :id: 5bf75c47-a283-430e-a65c-3c5fd8dbadb8 :setup: Standalone Instance :steps: 1. Create standalone Instance 2. Create an ldif file with an entry that violates syntax check (empty givenname) 3. Online import of troublesome ldif file :expected results: 1. Successful import with skipped entry warning """ topo.standalone.restart() import_task = ImportTask(topo.standalone) import_ldif1 = _create_syntax_err_ldif(topo) # Importing the offending ldif file - online import_task.import_suffix_from_ldif(ldiffile=import_ldif1, suffix=DEFAULT_SUFFIX) # There is just a single entry in this ldif import_task.wait(5) # Check for the task nsTaskWarning attr, make sure its set to skipped entry code assert import_task.present('nstaskwarning') assert TaskWarning.WARN_SKIPPED_IMPORT_ENTRY == import_task.get_task_warn() def test_crash_on_ldif2db(topo, _import_clean): """ Delete the cn=monitor entry for an LDBM backend instance. Doing this will cause the DS to re-create that entry the next time it starts up. :id: aecad390-9352-11ea-8a31-8c16451d917b :setup: Standalone Instance :steps: 1. Delete the cn=monitor entry for an LDBM backend instance 2. Restart the server and verify that the LDBM monitor entry was re-created. :expected results: 1. Operation successful 2. Operation successful """ # Delete the cn=monitor entry for an LDBM backend instance. Doing this will # cause the DS to re-create that entry the next time it starts up. monitor = Monitor(topo.standalone) monitor.delete() # Restart the server and verify that the LDBM monitor entry was re-created. _import_offline(topo, 5) @pytest.mark.bz185477 def test_ldif2db_allows_entries_without_a_parent_to_be_imported(topo, _import_clean): """Should reject import of entries that's missing parent suffix :id: 27195cea-9c0e-11ea-800b-8c16451d917b :setup: Standalone Instance :steps: 1. Import the offending LDIF data - offline 2. Violates schema, ending line :expected results: 1. Operation successful 2. Operation Fail """ import_ldif1 = _create_bogus_ldif(topo) # Import the offending LDIF data - offline topo.standalone.stop() topo.standalone.ldif2db('userRoot', None, None, None, import_ldif1) # which violates schema, ending line topo.standalone.searchErrorsLog('import_producer - import userRoot: Skipping entry ' '"dc=example,dc=com" which violates schema') topo.standalone.start() def test_ldif2db_syntax_check(topo, _import_clean): """ldif2db should return a warning when a skipped entry has occured. :id: 85e75670-42c5-4062-9edc-7f117c97a06f :setup: 1. Standalone Instance 2. Ldif entry that violates syntax check rule (empty givenname) :steps: 1. Create an ldif file which violates the syntax checking rule 2. Stop the server and import ldif file with ldif2db :expected results: 1. ldif2db import returns a warning to signify skipped entries """ import_ldif1 = _create_syntax_err_ldif(topo) # Import the offending LDIF data - offline topo.standalone.stop() ret = topo.standalone.ldif2db('userRoot', None, None, None, import_ldif1) assert ret == TaskWarning.WARN_SKIPPED_IMPORT_ENTRY topo.standalone.start() def test_issue_a_warning_if_the_cache_size_is_smaller(topo, _import_clean): """Report during startup if nsslapd-cachememsize is too small :id: 1aa8cbda-9c0e-11ea-9297-8c16451d917b :setup: Standalone Instance :steps: 1. Set nsslapd-cache-autosize to 0 2. Change cachememsize 3. Check that cachememsize is sufficiently small 4. Import some users to make id2entry.db big 5. Warning message should be there in error logs :expected results: 1. Operation successful 2. Operation successful 3. Operation successful 4. Operation successful 5. Operation successful """ config = LDBMConfig(topo.standalone) backend = Backends(topo.standalone).list()[0] # Set nsslapd-cache-autosize to 0 config.replace('nsslapd-cache-autosize', '0') # Change cachememsize backend.replace('nsslapd-cachememsize', '1') # Check that cachememsize is sufficiently small assert int(backend.get_attr_val_utf8('nsslapd-cachememsize')) < 1500000 # Import some users to make id2entry.db big _import_offline(topo, 20) # warning message should look like assert topo.standalone.searchErrorsLog('INFO - ldbm_instance_config_cachememsize_set - ' 'force a minimal value 512000') @pytest.fixture(scope="function") def _toggle_private_import_mem(request, topo): config = LDBMConfig(topo.standalone) config.replace_many( ('nsslapd-db-private-import-mem', 'on'), ('nsslapd-import-cache-autosize', '0')) def finofaci(): # nsslapd-import-cache-autosize: off and # nsslapd-db-private-import-mem: off config.replace_many( ('nsslapd-db-private-import-mem', 'off')) request.addfinalizer(finofaci) #unstable or unstatus tests, skipped for now #@pytest.mark.flaky(max_runs=2, min_passes=1) def test_fast_slow_import(topo, _toggle_private_import_mem, _import_clean): """With nsslapd-db-private-import-mem: on is faster import. :id: 3044331c-9c0e-11ea-ac9f-8c16451d917b :setup: Standalone Instance :steps: 1. Let's set nsslapd-db-private-import-mem:on, nsslapd-import-cache-autosize: 0 2. Measure offline import time duration total_time1 3. Now nsslapd-db-private-import-mem:off 4. Measure offline import time duration total_time2 5. total_time1 < total_time2 6. Set nsslapd-db-private-import-mem:on, nsslapd-import-cache-autosize: -1 7. Measure offline import time duration total_time1 8. Now nsslapd-db-private-import-mem:off 9. Measure offline import time duration total_time2 10. total_time1 < total_time2 :expected results: 1. Operation successful 2. Operation successful 3. Operation successful 4. Operation successful 5. Operation successful 6. Operation successful 7. Operation successful 8. Operation successful 9. Operation successful 10. Operation successful """ # Let's set nsslapd-db-private-import-mem:on, nsslapd-import-cache-autosize: 0 config = LDBMConfig(topo.standalone) # Measure offline import time duration total_time1 total_time1 = _import_offline(topo, 1000) # Now nsslapd-db-private-import-mem:off config.replace('nsslapd-db-private-import-mem', 'off') accounts = Accounts(topo.standalone, DEFAULT_SUFFIX) for i in accounts.filter('(uid=*)'): UserAccount(topo.standalone, i.dn).delete() # Measure offline import time duration total_time2 total_time2 = _import_offline(topo, 1000) # total_time1 < total_time2 log.info("total_time1 = %f" % total_time1) log.info("total_time2 = %f" % total_time2) assert total_time1 < total_time2 # Set nsslapd-db-private-import-mem:on, nsslapd-import-cache-autosize: -1 config.replace_many( ('nsslapd-db-private-import-mem', 'on'), ('nsslapd-import-cache-autosize', '-1')) for i in accounts.filter('(uid=*)'): UserAccount(topo.standalone, i.dn).delete() # Measure offline import time duration total_time1 total_time1 = _import_offline(topo, 1000) # Now nsslapd-db-private-import-mem:off config.replace('nsslapd-db-private-import-mem', 'off') for i in accounts.filter('(uid=*)'): UserAccount(topo.standalone, i.dn).delete() # Measure offline import time duration total_time2 total_time2 = _import_offline(topo, 1000) # total_time1 < total_time2 log.info("toral_time1 = %f" % total_time1) log.info("total_time2 = %f" % total_time2) assert total_time1 < total_time2 @pytest.mark.bz175063 def test_entry_with_escaped_characters_fails_to_import_and_index(topo, _import_clean): """If missing entry_id is found, skip it and continue reading the primary db to be re indexed. :id: 358c938c-9c0e-11ea-adbc-8c16451d917b :setup: Standalone Instance :steps: 1. Import the example data from ldif. 2. Remove some of the other entries that were successfully imported. 3. Now re-index the database. 4. Should not return error. :expected results: 1. Operation successful 2. Operation successful 3. Operation successful 4. Operation successful """ # Import the example data from ldif _import_offline(topo, 10) count = 0 # Remove some of the other entries that were successfully imported for user1 in [user for user in Accounts(topo.standalone, DEFAULT_SUFFIX).list() if user.dn.startswith('uid')]: if count <= 2: UserAccount(topo.standalone, user1.dn).delete() count += 1 # Now re-index the database topo.standalone.stop() topo.standalone.db2index() topo.standalone.start() # Should not return error. assert not topo.standalone.searchErrorsLog('error') assert not topo.standalone.searchErrorsLog('foreman fifo error') def test_import_perf_after_failure(topo): """Make an import fail by specifying the wrong LDIF file name, then try the import with the correct name. Make sure the import performance is what we expect. :id: d21dc67f-475e-402a-be9e-3eeb9181c156 :setup: Standalone Instance :steps: 1. Build LDIF file 2. Import invalid LDIF filename 3. Import valid LDIF filename 4. Import completes in a timely manner :expectedresults: 1. Success 2. Success 3. Success 4. Success """ ldif_dir = topo.standalone.get_ldif_dir() import_ldif = ldif_dir + '/perf_import.ldif' bad_import_ldif = ldif_dir + '/perf_import_typo.ldif' # Build LDIF file dbgen_users(topo.standalone, 30000, import_ldif, DEFAULT_SUFFIX) # Online import which fails import_task = ImportTask(topo.standalone) import_task.import_suffix_from_ldif(ldiffile=bad_import_ldif, suffix=DEFAULT_SUFFIX) import_task.wait() # Valid online import time.sleep(1) import_task = ImportTask(topo.standalone) import_task.import_suffix_from_ldif(ldiffile=import_ldif, suffix=DEFAULT_SUFFIX) import_task.wait(30) # If things go wrong import takes a lot longer than this assert import_task.is_complete() # Restart server topo.standalone.restart() if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/import/regression_test.py000066400000000000000000000356201421664411400274610ustar00rootroot00000000000000# Copyright (C) 2020 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # from decimal import * import ldap import logging import os import pytest import threading import time from lib389.backend import Backends from lib389.properties import TASK_WAIT from lib389.topologies import topology_st as topo from lib389.dbgen import dbgen_users from lib389._constants import DEFAULT_SUFFIX from lib389.tasks import * from lib389.idm.user import UserAccounts from lib389.idm.directorymanager import DirectoryManager from lib389.dbgen import * from lib389.utils import * pytestmark = pytest.mark.tier1 DEBUGGING = os.getenv("DEBUGGING", default=False) if DEBUGGING: logging.getLogger(__name__).setLevel(logging.DEBUG) else: logging.getLogger(__name__).setLevel(logging.INFO) log = logging.getLogger(__name__) TEST_SUFFIX1 = "dc=importest1,dc=com" TEST_BACKEND1 = "importest1" TEST_SUFFIX2 = "dc=importest2,dc=com" TEST_BACKEND2 = "importest2" TEST_DEFAULT_SUFFIX = "dc=default,dc=com" TEST_DEFAULT_NAME = "default" class AddDelUsers(threading.Thread): def __init__(self, inst): threading.Thread.__init__(self) self.daemon = True self.inst = inst self._should_stop = False self._ran = False def run(self): # Add 1000 entries log.info('Run.') conn = DirectoryManager(self.inst.standalone).bind() time.sleep(30) log.info('Adding users.') for i in range(1000): user = UserAccounts(conn, DEFAULT_SUFFIX) users = user.create_test_user(uid=i) users.delete() self._ran = True if self._should_stop: break if not self._should_stop: raise RuntimeError('We finished too soon.') conn.close() def stop(self): self._should_stop = True def has_started(self): return self._ran def test_replay_import_operation(topo): """ Check after certain failed import operation, is it possible to replay an import operation :id: 5f5ca532-8e18-4f7b-86bc-ac585215a473 :feature: Import :setup: Standalone instance :steps: 1. Export the backend into an ldif file 2. Perform high load of operation on the server (Add/Del users) 3. Perform an import operation 4. Again perform an import operation (same as 3) :expectedresults: 1. It should be successful 2. It should be successful 3. It should be unsuccessful, should give OPERATIONS_ERROR 4. It should be successful now """ log.info("Exporting LDIF online...") ldif_dir = topo.standalone.get_ldif_dir() export_ldif = ldif_dir + '/export.ldif' r = ExportTask(topo.standalone) r.export_suffix_to_ldif(ldiffile=export_ldif, suffix=DEFAULT_SUFFIX) r.wait() add_del_users1 = AddDelUsers(topo) add_del_users1.start() log.info("Importing LDIF online, should raise operation error.") trials = 0 while not add_del_users1.has_started() and trials < 10: trials += 1 time.sleep(1) r = ImportTask(topo.standalone) try: r.import_suffix_from_ldif(ldiffile=export_ldif, suffix=DEFAULT_SUFFIX) except ldap.OPERATIONS_ERROR: break log.info(f'Looping. Tried {trials} times so far.') add_del_users1.stop() add_del_users1.join() log.info("Importing LDIF online") r = ImportTask(topo.standalone) r.import_suffix_from_ldif(ldiffile=export_ldif, suffix=DEFAULT_SUFFIX) def test_import_be_default(topo): """ Create a backend using the name "default". previously this name was used int :id: 8e507beb-e917-4330-8cac-1ff0eee10508 :feature: Import :setup: Standalone instance :steps: 1. Create a test suffix using the be name of "default" 2. Create an ldif for the "default" backend 3. Import ldif 4. Verify all entries were imported :expectedresults: 1. Success 2. Success 3. Success 4. Success """ log.info('Adding suffix:{} and backend: {}...'.format(TEST_DEFAULT_SUFFIX, TEST_DEFAULT_NAME)) backends = Backends(topo.standalone) backends.create(properties={'nsslapd-suffix': TEST_DEFAULT_SUFFIX, 'name': TEST_DEFAULT_NAME}) log.info('Create LDIF file and import it...') ldif_dir = topo.standalone.get_ldif_dir() ldif_file = os.path.join(ldif_dir, 'default.ldif') dbgen_users(topo.standalone, 5, ldif_file, TEST_DEFAULT_SUFFIX) log.info('Stopping the server and running offline import...') topo.standalone.stop() assert topo.standalone.ldif2db(TEST_DEFAULT_NAME, None, None, None, ldif_file) topo.standalone.start() log.info('Verifying entry count after import...') entries = topo.standalone.search_s(TEST_DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, "(objectclass=*)") assert len(entries) > 1 log.info('Test PASSED') def test_del_suffix_import(topo): """Adding a database entry fails if the same database was deleted after an import :id: 652421ef-738b-47ed-80ec-2ceece6b5d77 :feature: Import :setup: Standalone instance :steps: 1. Create a test suffix and add few entries 2. Stop the server and do offline import using ldif2db 3. Delete the suffix backend 4. Add a new suffix with the same database name 5. Check if adding the same database name is a success :expectedresults: Adding database with the same name should be successful """ log.info('Adding suffix:{} and backend: {}'.format(TEST_SUFFIX1, TEST_BACKEND1)) backends = Backends(topo.standalone) backend = backends.create(properties={'nsslapd-suffix': TEST_SUFFIX1, 'name': TEST_BACKEND1}) log.info('Create LDIF file and import it') ldif_dir = topo.standalone.get_ldif_dir() ldif_file = os.path.join(ldif_dir, 'suffix_del1.ldif') dbgen_users(topo.standalone, 10, ldif_file, TEST_SUFFIX1) log.info('Stopping the server and running offline import') topo.standalone.stop() assert topo.standalone.ldif2db(TEST_BACKEND1, TEST_SUFFIX1, None, None, ldif_file) topo.standalone.start() log.info('Deleting suffix-{}'.format(TEST_SUFFIX2)) backend.delete() log.info('Adding the same database-{} after deleting it'.format(TEST_BACKEND1)) backends.create(properties={'nsslapd-suffix': TEST_SUFFIX1, 'name': TEST_BACKEND1}) def test_del_suffix_backend(topo): """Adding a database entry fails if the same database was deleted after an import :id: ac702c35-74b6-434e-8e30-316433f3e91a :feature: Import :setup: Standalone instance :steps: 1. Create a test suffix and add entries 2. Stop the server and do online import using ldif2db 3. Delete the suffix backend 4. Add a new suffix with the same database name 5. Restart the server and check the status :expectedresults: Adding database with the same name should be successful and the server should not hang """ log.info('Adding suffix:{} and backend: {}'.format(TEST_SUFFIX2, TEST_BACKEND2)) backends = Backends(topo.standalone) backend = backends.create(properties={'nsslapd-suffix': TEST_SUFFIX2, 'name': TEST_BACKEND2}) log.info('Create LDIF file and import it') ldif_dir = topo.standalone.get_ldif_dir() ldif_file = os.path.join(ldif_dir, 'suffix_del2.ldif') dbgen_users(topo.standalone, 10, ldif_file, TEST_SUFFIX2) topo.standalone.tasks.importLDIF(suffix=TEST_SUFFIX2, input_file=ldif_file, args={TASK_WAIT: True}) log.info('Deleting suffix-{}'.format(TEST_SUFFIX2)) backend.delete() log.info('Adding the same database-{} after deleting it'.format(TEST_BACKEND2)) backends.create(properties={'nsslapd-suffix': TEST_SUFFIX2, 'name': TEST_BACKEND2}) log.info('Checking if server can be restarted after re-adding the same database') topo.standalone.restart() assert not topo.standalone.detectDisorderlyShutdown() @pytest.mark.bz1406101 @pytest.mark.ds49071 def test_import_duplicate_dn(topo): """Import ldif with duplicate DNs, should not log error "unable to flush" :id: dce2b898-119d-42b8-a236-1130f58bff17 :setup: Standalone instance, ldif file with duplicate entries :steps: 1. Create a ldif file with duplicate entries 2. Import ldif file to DS 3. Check error log file, it should not log "unable to flush" 4. Check error log file, it should log "Duplicated DN detected" :expectedresults: 1. Success 2. Success 3. Success 4. Success """ standalone = topo.standalone log.info('Delete the previous error logs') standalone.deleteErrorLogs() log.info('Create import file') l = """dn: dc=example,dc=com objectclass: top objectclass: domain dc: example dn: ou=myDups00001,dc=example,dc=com objectclass: top objectclass: organizationalUnit ou: myDups00001 dn: ou=myDups00001,dc=example,dc=com objectclass: top objectclass: organizationalUnit ou: myDups00001 """ ldif_dir = standalone.get_ldif_dir() ldif_file = os.path.join(ldif_dir, 'data.ldif') with open(ldif_file, "w") as fd: fd.write(l) fd.close() os.chmod(ldif_file, 0o777) log.info('Import ldif with duplicate entry') assert standalone.tasks.importLDIF(suffix=DEFAULT_SUFFIX, input_file=ldif_file, args={TASK_WAIT: True}) log.info('Restart the server to flush the logs') standalone.restart() log.info('Error log should not have "unable to flush" message') assert not standalone.ds_error_log.match('.*unable to flush.*') log.info('Error log should have "Duplicated DN detected" message') assert standalone.ds_error_log.match('.*Duplicated DN detected.*') @pytest.mark.bz1749595 @pytest.mark.tier2 @pytest.mark.xfail(ds_is_older("1.3.10.1"), reason="bz1749595 not fixed on versions older than 1.3.10.1") def test_large_ldif2db_ancestorid_index_creation(topo): """Import with ldif2db a large file - check that the ancestorid index creation phase has a correct performance :id: fe7f78f6-6e60-425d-ad47-b39b67e29113 :setup: Standalone instance :steps: 1. Delete the previous errors log to start from a fresh one 2. Create test suffix and backend 3. Create a large nested ldif file 4. Stop the server 5. Run an offline import 6. Restart the server 7. Check in the errors log that an independant ancestorid IDs sorting is done 8. Get the log of the starting of the ancestorid indexing process 9. Get the log of the end of the ancestorid indexing process 10. Get the start and end time for ancestorid index creation from these logs 11. Calculate the duration of the ancestorid indexing process :expectedresults: 1. Success 2. Test suffix and backend successfully created 3. ldif file successfully created 4. Success 5. Import is successfully performed 6. Success 7. Log of ancestorid sorting start and end are present 8. Log of the beginning of gathering ancestorid is found 9. Log of the final ancestorid index creation is found 10. Start and end times are successfully extracted 11. The duration of the ancestorid index creation process should be less than 10s """ ldif_dir = topo.standalone.get_ldif_dir() ldif_file = os.path.join(topo.standalone.ds_paths.ldif_dir, 'large_nested.ldif') # Have a reasonable balance between the need for a large ldif file to import and the time of test execution # total number of users num_users = 100000 # Choose a limited number of users per node to get as much as possible non-leaf entries node_limit = 5 # top suffix suffix = 'o=test' # backend backend = 'test' log.info('Delete the previous errors logs') topo.standalone.deleteErrorLogs() log.info('Add suffix:{} and backend: {}...'.format(suffix, backend)) backends = Backends(topo.standalone) backends.create(properties={'nsslapd-suffix': suffix, 'name': backend}) props = { 'numUsers' : num_users, 'nodeLimit' : node_limit, 'suffix' : suffix } instance = topo.standalone log.info('Create a large nested ldif file using dbgen : %s' % ldif_file) dbgen_nested_ldif(instance, ldif_file, props) log.info('Stop the server and run offline import...') topo.standalone.stop() assert topo.standalone.ldif2db(backend, None, None, None, ldif_file) log.info('Starting the server') topo.standalone.start() log.info('parse the errors logs to check lines with "Starting sort of ancestorid" are present') start_sort_str = str(topo.standalone.ds_error_log.match(r'.*Starting sort of ancestorid non-leaf IDs*'))[1:-1] assert len(start_sort_str) > 0 log.info('parse the errors logs to check lines with "Finished sort of ancestorid" are present') end_sort_str = str(topo.standalone.ds_error_log.match(r'.*Finished sort of ancestorid non-leaf IDs*'))[1:-1] assert len(end_sort_str) > 0 log.info('parse the error logs for the line with "Gathering ancestorid non-leaf IDs"') start_ancestorid_indexing_op_str = str(topo.standalone.ds_error_log.match(r'.*Gathering ancestorid non-leaf IDs*'))[1:-1] assert len(start_ancestorid_indexing_op_str) > 0 log.info('parse the error logs for the line with "Created ancestorid index"') end_ancestorid_indexing_op_str = str(topo.standalone.ds_error_log.match(r'.*Created ancestorid index*'))[1:-1] assert len(end_ancestorid_indexing_op_str) > 0 log.info('get the ancestorid non-leaf IDs indexing start and end time from the collected strings') # Collected lines look like : '[15/May/2020:05:30:27.245967313 -0400] - INFO - bdb_get_nonleaf_ids - import userRoot: Gathering ancestorid non-leaf IDs...' # We are getting the sec.nanosec part of the date, '27.245967313' in the above example start_time = (start_ancestorid_indexing_op_str.split()[0]).split(':')[3] end_time = (end_ancestorid_indexing_op_str.split()[0]).split(':')[3] log.info('Calculate the elapsed time for the ancestorid non-leaf IDs index creation') etime = (Decimal(end_time) - Decimal(start_time)) # The time for the ancestorid index creation should be less than 10s for an offline import of an ldif file with 100000 entries / 5 entries per node # Should be adjusted if these numbers are modified in the test assert etime <= 10 if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s {}".format(CURRENT_FILE)) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/indexes/000077500000000000000000000000001421664411400240075ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/indexes/__init__.py000066400000000000000000000000561421664411400261210ustar00rootroot00000000000000""" :Requirement: 389-ds-base: Indexes """ 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/indexes/regression_test.py000066400000000000000000000170031421664411400276010ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2020 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import time import os import pytest import ldap from lib389._constants import DEFAULT_BENAME, DEFAULT_SUFFIX from lib389.index import Indexes from lib389.backend import Backends, DatabaseConfig from lib389.idm.user import UserAccounts from lib389.idm.group import Groups, Group from lib389.topologies import topology_st as topo from lib389.utils import ds_is_older from lib389.plugins import MemberOfPlugin pytestmark = pytest.mark.tier1 @pytest.fixture(scope="function") def add_a_group_with_users(request, topo): """ Add a group and users, which are members of this group. """ groups = Groups(topo.standalone, DEFAULT_SUFFIX, rdn=None) group = groups.create(properties={'cn': 'test_group'}) users_list = [] users_num = 100 users = UserAccounts(topo.standalone, DEFAULT_SUFFIX, rdn=None) for num in range(users_num): USER_NAME = f'test_{num}' user = users.create(properties={ 'uid': USER_NAME, 'sn': USER_NAME, 'cn': USER_NAME, 'uidNumber': f'{num}', 'gidNumber': f'{num}', 'homeDirectory': f'/home/{USER_NAME}' }) users_list.append(user) group.add_member(user.dn) def fin(): """ Removes group and users. """ # If the server crashed, start it again to do the cleanup if not topo.standalone.status(): topo.standalone.start() for user in users_list: user.delete() group.delete() request.addfinalizer(fin) @pytest.fixture(scope="function") def set_small_idlistscanlimit(request, topo): """ Set nsslapd-idlistscanlimit to a smaller value to accelerate the reproducer """ db_cfg = DatabaseConfig(topo.standalone) old_idlistscanlimit = db_cfg.get_attr_vals_utf8('nsslapd-idlistscanlimit') db_cfg.set([('nsslapd-idlistscanlimit', '100')]) topo.standalone.restart() def fin(): """ Set nsslapd-idlistscanlimit back to the default value """ # If the server crashed, start it again to do the cleanup if not topo.standalone.status(): topo.standalone.start() db_cfg.set([('nsslapd-idlistscanlimit', old_idlistscanlimit)]) topo.standalone.restart() request.addfinalizer(fin) #unstable or unstatus tests, skipped for now @pytest.mark.flaky(max_runs=2, min_passes=1) @pytest.mark.skipif(ds_is_older("1.4.4.4"), reason="Not implemented") def test_reindex_task_creates_abandoned_index_file(topo): """ Recreating an index for the same attribute but changing the case of for example 1 letter, results in abandoned indexfile :id: 07ae5274-481a-4fa8-8074-e0de50d89ac6 :customerscenario: True :setup: Standalone instance :steps: 1. Create a user object with additional attributes: objectClass: mozillaabpersonalpha mozillaCustom1: xyz 2. Add an index entry mozillacustom1 3. Reindex the backend 4. Check the content of the index (after it has been flushed to disk) mozillacustom1.db 5. Remove the index 6. Notice the mozillacustom1.db is removed 7. Recreate the index but now use the exact case as mentioned in the schema 8. Reindex the backend 9. Check the content of the index (after it has been flushed to disk) mozillaCustom1.db 10. Check that an ldapsearch does not return a result (mozillacustom1=xyz) 11. Check that an ldapsearch returns the results (mozillaCustom1=xyz) 12. Restart the instance 13. Notice that an ldapsearch does not return a result(mozillacustom1=xyz) 14. Check that an ldapsearch does not return a result (mozillacustom1=xyz) 15. Check that an ldapsearch returns the results (mozillaCustom1=xyz) 16. Reindex the backend 17. Notice the second indexfile for this attribute 18. Check the content of the index (after it has been flushed to disk) no mozillacustom1.db 19. Check the content of the index (after it has been flushed to disk) mozillaCustom1.db :expectedresults: 1. Should Success. 2. Should Success. 3. Should Success. 4. Should Success. 5. Should Success. 6. Should Success. 7. Should Success. 8. Should Success. 9. Should Success. 10. Should Success. 11. Should Success. 12. Should Success. 13. Should Success. 14. Should Success. 15. Should Success. 16. Should Success. 17. Should Success. 18. Should Success. 19. Should Success. """ inst = topo.standalone attr_name = "mozillaCustom1" attr_value = "xyz" users = UserAccounts(inst, DEFAULT_SUFFIX) user = users.create_test_user() user.add("objectClass", "mozillaabpersonalpha") user.add(attr_name, attr_value) backends = Backends(inst) backend = backends.get(DEFAULT_BENAME) indexes = backend.get_indexes() index = indexes.create(properties={ 'cn': attr_name.lower(), 'nsSystemIndex': 'false', 'nsIndexType': ['eq', 'pres'] }) backend.reindex() time.sleep(3) assert os.path.exists(f"{inst.ds_paths.db_home_dir}/{DEFAULT_BENAME}/{attr_name.lower()}.db") index.delete() assert not os.path.exists(f"{inst.ds_paths.db_home_dir}/{DEFAULT_BENAME}/{attr_name.lower()}.db") index = indexes.create(properties={ 'cn': attr_name, 'nsSystemIndex': 'false', 'nsIndexType': ['eq', 'pres'] }) backend.reindex() time.sleep(3) assert not os.path.exists(f"{inst.ds_paths.db_home_dir}/{DEFAULT_BENAME}/{attr_name.lower()}.db") assert os.path.exists(f"{inst.ds_paths.db_home_dir}/{DEFAULT_BENAME}/{attr_name}.db") entries = inst.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, f"{attr_name}={attr_value}") assert len(entries) > 0 inst.restart() entries = inst.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, f"{attr_name}={attr_value}") assert len(entries) > 0 backend.reindex() time.sleep(3) assert not os.path.exists(f"{inst.ds_paths.db_home_dir}/{DEFAULT_BENAME}/{attr_name.lower()}.db") assert os.path.exists(f"{inst.ds_paths.db_home_dir}/{DEFAULT_BENAME}/{attr_name}.db") @pytest.mark.bz1905450 def test_unindexed_internal_search_crashes_server(topo, add_a_group_with_users, set_small_idlistscanlimit): """ An internal unindexed search was able to crash the server due to missing logging function. :id: 2d0e4070-96d6-46e5-b2c8-9495925e3e87 :customerscenario: True :setup: Standalone instance :steps: 1. Add a group with users 2. Change nsslapd-idlistscanlimit to a smaller value to accelerate the reproducer 3. Enable memberOf plugin 4. Restart the instance 5. Run memberOf fixup task 6. Wait for the task to complete :expectedresults: 1. Should succeed 2. Should succeed 3. Should succeed 4. Should succeed 5. Should succeed 6. Server should not crash """ inst = topo.standalone memberof = MemberOfPlugin(inst) memberof.enable() inst.restart() task = memberof.fixup(DEFAULT_SUFFIX) task.wait() assert inst.status() if __name__ == "__main__": # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/ldapi/000077500000000000000000000000001421664411400234415ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/ldapi/__init__.py000066400000000000000000000000541421664411400255510ustar00rootroot00000000000000""" :Requirement: 389-ds-base: ldapi """ 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/ldapi/ldapi_test.py000066400000000000000000000130611421664411400261440ustar00rootroot00000000000000import logging import pytest import os import subprocess from lib389._constants import DEFAULT_SUFFIX, DN_DM from lib389.idm.user import UserAccounts from lib389.ldapi import LDAPIMapping, LDAPIFixedMapping from lib389.topologies import topology_st as topo from lib389.tasks import LDAPIMappingReloadTask def test_ldapi_authdn_attr_rewrite(topo, request): """Test LDAPI Authentication DN mapping feature :id: e8d68979-4b3d-4e2d-89ed-f9bad827718c :setup: Standalone Instance :steps: 1. Set LDAPI configuration 2. Create LDAP user 3. Create OS user 4. Create entries under cn=config for auto bind subtree and mapping entry 5. Do an LDAPI ldapsearch as the OS user 6. OS user was mapped expected LDAP entry 7. Do search using root & LDAPI :expectedresults: 1. Success 2. Success 3. Success 4. Success 5. Success 6. Success 7. Success """ LINUX_USER = "ldapi_test_lib389_user" LINUX_USER2 = "ldapi_test_lib389_user2" LINUX_USER3 = "ldapi_test_lib389_user3" LINUX_PWD = "5ecret_137" LDAP_ENTRY_DN = "uid=test_ldapi,ou=people,dc=example,dc=com" LDAP_ENTRY_DN2 = "uid=test_ldapi2,ou=people,dc=example,dc=com" LDAP_ENTRY_DN3 = "uid=test_ldapi3,ou=people,dc=example,dc=com" LDAPI_AUTH_CONTAINER = "cn=auto_bind,cn=config" def fin(): # Remove the OS users for user in [LINUX_USER, LINUX_USER2, LINUX_USER3]: try: subprocess.run(['userdel', '-r', user]) except: pass request.addfinalizer(fin) # Must be root if os.geteuid() != 0: return # Perform config tasks topo.standalone.config.set('nsslapd-accesslog-logbuffering', 'off') topo.standalone.config.set('nsslapd-ldapiDNMappingBase', 'cn=auto_bind,cn=config') topo.standalone.config.set('nsslapd-ldapimaptoentries', 'on') topo.standalone.config.set('nsslapd-ldapiuidnumbertype', 'uidNumber') topo.standalone.config.set('nsslapd-ldapigidnumbertype', 'gidNumber') ldapi_socket_raw = topo.standalone.config.get_attr_val_utf8('nsslapd-ldapifilepath') ldapi_socket = ldapi_socket_raw.replace('/', '%2F') # Create LDAP users users = UserAccounts(topo.standalone, DEFAULT_SUFFIX) user_properties = { 'uid': 'test_ldapi', 'cn': 'test_ldapi', 'sn': 'test_ldapi', 'uidNumber': '2020', 'gidNumber': '2020', 'userpassword': 'password', 'description': 'userdesc', 'homeDirectory': '/home/test_ldapi'} users.create(properties=user_properties) user_properties = { 'uid': 'test_ldapi2', 'cn': 'test_ldapi2', 'sn': 'test_ldapi2', 'uidNumber': '2021', 'gidNumber': '2021', 'userpassword': 'password', 'description': 'userdesc', 'homeDirectory': '/home/test_ldapi2'} users.create(properties=user_properties) user_properties = { 'uid': 'test_ldapi3', 'cn': 'test_ldapi3', 'sn': 'test_ldapi3', 'uidNumber': '2023', 'gidNumber': '2023', 'userpassword': 'password', 'description': 'userdesc', 'homeDirectory': '/home/test_ldapi3'} users.create(properties=user_properties) # Create OS users subprocess.run(['useradd', '-u', '5001', '-p', LINUX_PWD, LINUX_USER]) subprocess.run(['useradd', '-u', '5002', '-p', LINUX_PWD, LINUX_USER2]) # Create some mapping entries ldapi_mapping = LDAPIMapping(topo.standalone, LDAPI_AUTH_CONTAINER) ldapi_mapping.create_mapping(name='entry_map1', username='dummy1', ldap_dn='uid=dummy1,dc=example,dc=com') ldapi_mapping.create_mapping(name='entry_map2', username=LINUX_USER, ldap_dn=LDAP_ENTRY_DN) ldapi_mapping.create_mapping(name='entry_map3', username='dummy2', ldap_dn='uid=dummy3,dc=example,dc=com') # Restart server for config to take effect, and clear the access log topo.standalone.deleteAccessLogs(restart=True) # Bind as OS user using ldapsearch ldapsearch_cmd = f'ldapsearch -b \'\' -s base -Y EXTERNAL -H ldapi://{ldapi_socket}' os.system(f'su {LINUX_USER} -c "{ldapsearch_cmd}"') # Check access log assert topo.standalone.ds_access_log.match(f'.*AUTOBIND dn="{LDAP_ENTRY_DN}".*') # Bind as Root DN just to make sure it still works assert os.system(ldapsearch_cmd) == 0 assert topo.standalone.ds_access_log.match(f'.*AUTOBIND dn="{DN_DM}".*') # Create some fixed mapping ldapi_fixed_mapping = LDAPIFixedMapping(topo.standalone, LDAPI_AUTH_CONTAINER) ldapi_fixed_mapping.create_mapping("fixed", "5002", "5002", ldap_dn=LDAP_ENTRY_DN2) topo.standalone.deleteAccessLogs(restart=True) # Bind as OS user using ldapsearch os.system(f'su {LINUX_USER2} -c "{ldapsearch_cmd}"') # Check access log assert topo.standalone.ds_access_log.match(f'.*AUTOBIND dn="{LDAP_ENTRY_DN2}".*') # Add 3rd user, and test reload task subprocess.run(['useradd', '-u', '5003', '-p', LINUX_PWD, LINUX_USER3]) ldapi_fixed_mapping.create_mapping("reload", "5003", "5003", ldap_dn=LDAP_ENTRY_DN3) reload_task = LDAPIMappingReloadTask(topo.standalone).create() reload_task.wait(timeout=20) os.system(f'su {LINUX_USER3} -c "{ldapsearch_cmd}"') assert topo.standalone.ds_access_log.match(f'.*AUTOBIND dn="{LDAP_ENTRY_DN3}".*') if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main(["-s", CURRENT_FILE]) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/lib389/000077500000000000000000000000001421664411400233625ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/lib389/__init__.py000066400000000000000000000000551421664411400254730ustar00rootroot00000000000000""" :Requirement: 389-ds-base: Lib389 """ 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/lib389/config_compare_test.py000066400000000000000000000026571421664411400277600ustar00rootroot00000000000000import os import pytest from lib389.topologies import topology_i2 from lib389.config import Config pytestmark = pytest.mark.tier1 def test_config_compare(topology_i2): """ Compare test between cn=config of two different Directory Server intance. :id: 7b3e17d6-41ca-4926-bc3b-8173dd912a61 :setup: two isolated directory servers :steps: 1. Compare if cn=config is the same :expectedresults: 1. It should be the same (excluding unique id attrs) """ st1_config = topology_i2.ins.get('standalone1').config st2_config = topology_i2.ins.get('standalone2').config # 'nsslapd-port' attribute is expected to be same in cn=config comparison, # but they are different in our testing environment # as we are using 2 DS instances running, both running simultaneously. # Hence explicitly adding 'nsslapd-port' to compare_exclude. st1_config._compare_exclude.append('nsslapd-port') st2_config._compare_exclude.append('nsslapd-port') st1_config._compare_exclude.append('nsslapd-secureport') st2_config._compare_exclude.append('nsslapd-secureport') st1_config._compare_exclude.append('nsslapd-ldapssotoken-secret') st2_config._compare_exclude.append('nsslapd-ldapssotoken-secret') assert Config.compare(st1_config, st2_config) if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/lib389/dsldapobject/000077500000000000000000000000001421664411400260205ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/lib389/dsldapobject/__init__.py000066400000000000000000000000001421664411400301170ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/lib389/dsldapobject/dn_construct_test.py000066400000000000000000000162741421664411400321500ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2019 William Brown # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import ldap import pytest from lib389._constants import DEFAULT_SUFFIX from lib389.topologies import topology_st from lib389.idm.group import Groups, Group pytestmark = pytest.mark.tier1 ################################################################################# # This is a series of test cases to assert that various DN construction scenarios # work as expected in lib389. # # DSLdapObjects are designed to allow explicit control, or to "safely assume" # so that ldap concepts aren't as confusing. # You can thus construct an object with a DN that is: # * defined by you expliticly # * derived from properties of the object automatically # # There are also two paths to construction: from the pluralised factory style # builder, or from the singular. The factory style has very few extra parts # but it's worth testing anyway. # # In no case do we derive a multi value rdn due to their complexity. # def test_mul_explicit_rdn(topology_st): """Test that with multiple cn and an explicit rdn, we use the rdn :id: b39ef204-45c0-4a74-9b59-b4ac1199d78c :setup: standalone instance :steps: 1. Create with mulitple cn and rdn :expectedresults: 1. Create success """ # Create with an explicit rdn value, given to the properties/rdn gps = Groups(topology_st.standalone, DEFAULT_SUFFIX) gp = gps.create('cn=test_mul_explicit_rdn', properties={ 'cn': ['test_mul_explicit_rdn', 'other_cn_test_mul_explicit_rdn'], }) assert gp.dn.lower() == f'cn=test_mul_explicit_rdn,ou=groups,{DEFAULT_SUFFIX}'.lower() gp.delete() def test_mul_derive_single_dn(topology_st): """Test that with single cn we derive rdn correctly. :id: f34f271a-ca57-4aa0-905a-b5392ce06c79 :setup: standalone instance :steps: 1. Create with single cn :expectedresults: 1. Create success """ gps = Groups(topology_st.standalone, DEFAULT_SUFFIX) gp = gps.create(properties={ 'cn': ['test_mul_derive_single_dn'], }) assert gp.dn.lower() == f'cn=test_mul_derive_single_dn,ou=groups,{DEFAULT_SUFFIX}'.lower() gp.delete() def test_mul_derive_mult_dn(topology_st): """Test that with multiple cn we derive rdn correctly. :id: 1e1f5483-bfad-4f73-9dfb-aec54d08b268 :setup: standalone instance :steps: 1. Create with multiple cn :expectedresults: 1. Create success """ gps = Groups(topology_st.standalone, DEFAULT_SUFFIX) gp = gps.create(properties={ 'cn': ['test_mul_derive_mult_dn', 'test_mul_derive_single_dn'], }) assert gp.dn.lower() == f'cn=test_mul_derive_mult_dn,ou=groups,{DEFAULT_SUFFIX}'.lower() gp.delete() def test_sin_explicit_dn(topology_st): """Test explicit dn with create :id: 2d812225-243b-4f87-85ad-d403a4ae0267 :setup: standalone instance :steps: 1. Create with explicit dn :expectedresults: 1. Create success """ expect_dn = f'cn=test_sin_explicit_dn,ou=groups,{DEFAULT_SUFFIX}' gp = Group(topology_st.standalone, dn=expect_dn) gp.create(properties={ 'cn': ['test_sin_explicit_dn'], }) assert gp.dn.lower() == expect_dn.lower() gp.delete() def test_sin_explicit_rdn(topology_st): """Test explicit rdn with create. :id: a2c14e50-8086-4edb-9088-3f4a8e875c3a :setup: standalone instance :steps: 1. Create with explicit rdn :expectedresults: 1. Create success """ gp = Group(topology_st.standalone) gp.create(rdn='cn=test_sin_explicit_rdn', basedn=f'ou=groups,{DEFAULT_SUFFIX}', properties={ 'cn': ['test_sin_explicit_rdn'], }) assert gp.dn.lower() == f'cn=test_sin_explicit_rdn,ou=groups,{DEFAULT_SUFFIX}'.lower() gp.delete() def test_sin_derive_single_dn(topology_st): """Derive the dn from a single cn :id: d7597016-214c-4fbd-8b48-71eb16ea9ede :setup: standalone instance :steps: 1. Create with a single cn (no dn, no rdn) :expectedresults: 1. Create success """ gp = Group(topology_st.standalone) gp.create(basedn=f'ou=groups,{DEFAULT_SUFFIX}', properties={ 'cn': ['test_sin_explicit_dn'], }) assert gp.dn.lower() == f'cn=test_sin_explicit_dn,ou=groups,{DEFAULT_SUFFIX}'.lower() gp.delete() def test_sin_derive_mult_dn(topology_st): """Derive the dn from multiple cn :id: 0a1a7132-a08f-4b56-ae52-30c8ca59cfaf :setup: standalone instance :steps: 1. Create with multiple cn :expectedresults: 1. Create success """ gp = Group(topology_st.standalone) gp.create(basedn=f'ou=groups,{DEFAULT_SUFFIX}', properties={ 'cn': ['test_sin_derive_mult_dn', 'other_test_sin_derive_mult_dn'], }) assert gp.dn.lower() == f'cn=test_sin_derive_mult_dn,ou=groups,{DEFAULT_SUFFIX}'.lower() gp.delete() def test_sin_invalid_no_basedn(topology_st): """Test that with insufficent data, create fails. :id: a710b81c-cb74-4632-97b3-bdbcccd40954 :setup: standalone instance :steps: 1. Create with no basedn (no rdn derivation will work) :expectedresults: 1. Create fails """ gp = Group(topology_st.standalone) # No basedn, so we can't derive the full dn from this. with pytest.raises(ldap.UNWILLING_TO_PERFORM): gp.create(properties={ 'cn': ['test_sin_invalid_no_basedn'], }) def test_sin_invalid_no_rdn(topology_st): """Test that with no cn, rdn derivation fails. :id: c3bb28f8-db59-4d8a-8920-169879ef702b :setup: standalone instance :steps: 1. Create with no cn :expectedresults: 1. Create fails """ gp = Group(topology_st.standalone) with pytest.raises(ldap.UNWILLING_TO_PERFORM): # Note lack of rdn derivable type (cn) AND no rdn gp.create(basedn=f'ou=groups,{DEFAULT_SUFFIX}', properties={ 'member': ['test_sin_explicit_dn'], }) def test_sin_non_present_rdn(topology_st): """Test that with an rdn not present in attributes, create succeeds in some cases. :id: a5d9cb24-8907-4622-ac85-90407a66e00a :setup: standalone instance :steps: 1. Create with an rdn not in properties :expectedresults: 1. Create success """ # Test that creating something with an rdn not present in the properties works # NOTE: I think that this is 389-ds making this work, NOT lib389. gp1 = Group(topology_st.standalone) gp1.create(rdn='cn=test_sin_non_present_rdn', basedn=f'ou=groups,{DEFAULT_SUFFIX}', properties={ 'cn': ['other_test_sin_non_present_rdn'], }) assert gp1.dn.lower() == f'cn=test_sin_non_present_rdn,ou=groups,{DEFAULT_SUFFIX}'.lower() gp1.delete() # Now, test where there is no cn. lib389 is blocking this today, but # 50259 will change this. gp2 = Group(topology_st.standalone) gp2.create(rdn='cn=test_sin_non_present_rdn', basedn=f'ou=groups,{DEFAULT_SUFFIX}', properties={}) assert gp2.dn.lower() == f'cn=test_sin_non_present_rdn,ou=groups,{DEFAULT_SUFFIX}'.lower() gp2.delete() 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/lib389/idm/000077500000000000000000000000001421664411400241335ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/lib389/idm/__init__.py000066400000000000000000000000001421664411400262320ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/lib389/idm/account_test.py000066400000000000000000000021571421664411400272050ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2021 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import os import pytest from lib389.idm.user import UserAccounts, Account from lib389.topologies import topology_st as topo from lib389._constants import DEFAULT_SUFFIX def test_account_delete(topo): """ Test that delete function is working with Accounts/Account :id: 9b036f14-5144-4862-b18c-a6d91b7a1620 :setup: Standalone instance :steps: 1. Create a test user. 2. Delete the test user using Account class object. :expectedresults: 1. Operation should be successful 2. Operation should be successful """ users = UserAccounts(topo.standalone, DEFAULT_SUFFIX) users.create_test_user(uid=1001) account = Account(topo.standalone, f'uid=test_user_1001,ou=People,{DEFAULT_SUFFIX}') account.delete() if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/lib389/idm/user_compare_i2_test.py000066400000000000000000000027701421664411400306300ustar00rootroot00000000000000import os import pytest from lib389._constants import DEFAULT_SUFFIX from lib389.idm.user import UserAccounts, UserAccount from lib389.topologies import topology_i2 pytestmark = pytest.mark.tier1 def test_user_compare_i2(topology_i2): """ Compare test between users of two different Directory Server intances. :id: f0ffaf59-e2c2-41ec-9f26-e9b1ef287463 :setup: two isolated directory servers :steps: 1. Add an identical user to each server 2. Compare if the users are "the same" :expectedresults: 1. Users are added 2. The users are reported as the same """ st1_users = UserAccounts(topology_i2.ins.get('standalone1'), DEFAULT_SUFFIX) st2_users = UserAccounts(topology_i2.ins.get('standalone2'), DEFAULT_SUFFIX) # Create user user_properties = { 'uid': 'testuser', 'cn': 'testuser', 'sn': 'user', 'uidNumber': '1000', 'gidNumber': '2000', 'homeDirectory': '/home/testuser' } st1_users.create(properties=user_properties) st1_testuser = st1_users.get('testuser') st2_users.create(properties=user_properties) st2_testuser = st2_users.get('testuser') st1_testuser._compare_exclude.append('entryuuid') st2_testuser._compare_exclude.append('entryuuid') assert UserAccount.compare(st1_testuser, st2_testuser) if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/lib389/idm/user_compare_m2Repl_test.py000066400000000000000000000030141421664411400314470ustar00rootroot00000000000000import os import pytest from lib389._constants import DEFAULT_SUFFIX from lib389.replica import ReplicationManager from lib389.idm.user import UserAccounts, UserAccount from lib389.topologies import topology_m2 pytestmark = pytest.mark.tier1 def test_user_compare_m2Repl(topology_m2): """ User compare test between users of supplier to supplier replicaton topology. :id: 7c243bea-4075-4304-864d-5b789d364871 :setup: 2 supplier MMR :steps: 1. Add a user to m1 2. Wait for replication 3. Compare if the user is the same :expectedresults: 1. User is added 2. Replication success 3. The user is the same """ rm = ReplicationManager(DEFAULT_SUFFIX) m1 = topology_m2.ms.get('supplier1') m2 = topology_m2.ms.get('supplier2') m1_users = UserAccounts(m1, DEFAULT_SUFFIX) m2_users = UserAccounts(m2, DEFAULT_SUFFIX) # Create 1st user user1_properties = { 'uid': 'testuser', 'cn': 'testuser', 'sn': 'user', 'uidNumber': '1000', 'gidNumber': '2000', 'homeDirectory': '/home/testuser' } m1_users.create(properties=user1_properties) m1_testuser = m1_users.get('testuser') rm.wait_for_replication(m1, m2) m2_testuser = m2_users.get('testuser') assert UserAccount.compare(m1_testuser, m2_testuser) if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/lib389/idm/user_compare_st_test.py000066400000000000000000000043041421664411400307370ustar00rootroot00000000000000import os import pytest from lib389._constants import DEFAULT_SUFFIX from lib389.idm.group import Groups from lib389.idm.user import UserAccounts, UserAccount from lib389.topologies import topology_st as topology pytestmark = pytest.mark.tier1 def test_user_compare(topology): """ Testing compare function :id: 26f2dea9-be1e-48ca-bcea-79592823390c :setup: Standalone instance :steps: 1. Testing comparison of two different users. 2. Testing comparison of 'str' object with itself. 3. Testing comparison of user with similar user (different object id). 4. Testing comparison of user with group. :expectedresults: 1. Should fail to compare 2. Should raise value error 3. Should be the same despite uuid difference 4. Should fail to compare """ users = UserAccounts(topology.standalone, DEFAULT_SUFFIX) groups = Groups(topology.standalone, DEFAULT_SUFFIX) # Create 1st user user1_properties = { 'uid': 'testuser1', 'cn': 'testuser1', 'sn': 'user', 'uidNumber': '1000', 'gidNumber': '2000', 'homeDirectory': '/home/testuser1' } users.create(properties=user1_properties) testuser1 = users.get('testuser1') # Create 2nd user user2_properties = { 'uid': 'testuser2', 'cn': 'testuser2', 'sn': 'user', 'uidNumber': '1001', 'gidNumber': '2002', 'homeDirectory': '/home/testuser2' } users.create(properties=user2_properties) testuser2 = users.get('testuser2') # create group group_properties = { 'cn' : 'group1', 'description' : 'testgroup' } testuser1_copy = users.get("testuser1") group = groups.create(properties=group_properties) assert UserAccount.compare(testuser1, testuser2) is False with pytest.raises(ValueError): UserAccount.compare("test_str_object","test_str_object") assert UserAccount.compare(testuser1, testuser1_copy) assert UserAccount.compare(testuser1, group) is False if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/logging/000077500000000000000000000000001421664411400237765ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/logging/__init__.py000066400000000000000000000001161421664411400261050ustar00rootroot00000000000000""" :Requirement: 389-ds-base: Directory Server Logging Configurations """ 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/logging/logging_config_test.py000066400000000000000000000057251421664411400303730ustar00rootroot00000000000000import logging import pytest import os import ldap from lib389._constants import * from lib389.topologies import topology_st as topo DEBUGGING = os.getenv("DEBUGGING", default=False) if DEBUGGING: logging.getLogger(__name__).setLevel(logging.DEBUG) else: logging.getLogger(__name__).setLevel(logging.INFO) log = logging.getLogger(__name__) big_value = "1111111111111111111111111111111111111111111" pytestmark = pytest.mark.tier1 @pytest.mark.parametrize("attr, invalid_vals, valid_vals", [ ("logexpirationtime", ["-2", "0"], ["1", "-1"]), ("maxlogsize", ["-2", "0"], ["100", "-1"]), ("logmaxdiskspace", ["-2", "0"], ["100", "-1"]), ("logminfreediskspace", ["-2", "0"], ["100", "-1"]), ("mode", ["888", "778", "77", "7777"], ["777", "000", "600"]), ("maxlogsperdir", ["-1", "0"], ["1", "20"]), ("logrotationsynchour", ["-1", "24"], ["0", "23"]), ("logrotationsyncmin", ["-1", "60"], ["0", "59"]), ("logrotationtime", ["-2", "0"], ["100", "-1"]) ]) def test_logging_digit_config(topo, attr, invalid_vals, valid_vals): """Validate logging config settings :id: a0ef30e5-538b-46fa-9762-01a4435a15e9 :parametrized: yes :setup: Standalone Instance :steps: 1. Test log expiration time 2. Test log max size 3. Test log max disk space 4. Test log min disk space 5. Test log mode 6. Test log max number of logs 7. Test log rotation hour 8. Test log rotation minute 9. Test log rotation time :expectedresults: 1. Success 2. Success 3. Success 4. Success 5. Success 6. Success 7. Success 8. Success 9. Success """ accesslog_attr = "nsslapd-accesslog-{}".format(attr) auditlog_attr = "nsslapd-auditlog-{}".format(attr) auditfaillog_attr = "nsslapd-auditfaillog-{}".format(attr) errorlog_attr = "nsslapd-errorlog-{}".format(attr) # Test each log for attr in [accesslog_attr, auditlog_attr, auditfaillog_attr, errorlog_attr]: # Invalid values for invalid_val in invalid_vals: with pytest.raises(ldap.LDAPError): topo.standalone.config.set(attr, invalid_val) # Invalid high value with pytest.raises(ldap.LDAPError): topo.standalone.config.set(attr, big_value) # Non digits with pytest.raises(ldap.LDAPError): topo.standalone.config.set(attr, "abc") # Valid values for valid_val in valid_vals: topo.standalone.config.set(attr, valid_val) if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main(["-s", CURRENT_FILE]) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/mapping_tree/000077500000000000000000000000001421664411400250225ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/mapping_tree/__init__.py000066400000000000000000000000631421664411400271320ustar00rootroot00000000000000""" :Requirement: 389-ds-base: Mapping Tree """ 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/mapping_tree/acceptance_test.py000066400000000000000000000034711421664411400305260ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2020 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import ldap import logging import pytest import os from lib389._constants import * from lib389.topologies import topology_st as topo from lib389.mappingTree import MappingTrees pytestmark = pytest.mark.tier1 DEBUGGING = os.getenv("DEBUGGING", default=False) if DEBUGGING: logging.getLogger(__name__).setLevel(logging.DEBUG) else: logging.getLogger(__name__).setLevel(logging.INFO) log = logging.getLogger(__name__) def test_invalid_mt(topo): """Test that you can not add a new suffix/mapping tree that does not already have the backend entry created. :id: caabd407-f541-4695-b13f-8f92af1112a0 :setup: Standalone Instance :steps: 1. Create a new suffix that specifies an existing backend which has a different suffix. 2. Create a suffix that has no backend entry at all. :expectedresults: 1. Should fail with UNWILLING_TO_PERFORM 1. Should fail with UNWILLING_TO_PERFORM """ bad_suffix = 'dc=does,dc=not,dc=exist' mts = MappingTrees(topo.standalone) properties = { 'cn': bad_suffix, 'nsslapd-state': 'backend', 'nsslapd-backend': 'userroot', } with pytest.raises(ldap.UNWILLING_TO_PERFORM): mts.create(properties=properties) properties = { 'cn': bad_suffix, 'nsslapd-state': 'backend', 'nsslapd-backend': 'notCreatedRoot', } with pytest.raises(ldap.UNWILLING_TO_PERFORM): mts.create(properties=properties) if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main(["-s", CURRENT_FILE]) be_del_and_default_naming_attr_test.py000066400000000000000000000062111421664411400344770ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/mapping_treeimport logging import pytest import os from lib389._constants import DEFAULT_SUFFIX from lib389.topologies import topology_m1 as topo from lib389.backend import Backends from lib389.encrypted_attributes import EncryptedAttrs pytestmark = pytest.mark.tier1 DEBUGGING = os.getenv("DEBUGGING", default=False) if DEBUGGING: logging.getLogger(__name__).setLevel(logging.DEBUG) else: logging.getLogger(__name__).setLevel(logging.INFO) log = logging.getLogger(__name__) SECOND_SUFFIX = 'o=namingcontext' THIRD_SUFFIX = 'o=namingcontext2' def test_be_delete(topo): """Test that we can delete a backend that contains replication configuration and encrypted attributes. The default naming context should also be updated to reflect the next available suffix :id: 5208f897-7c95-4925-bad0-9ceb95fee678 :setup: Supplier Instance :steps: 1. Create second backend/suffix 2. Add an encrypted attribute to the default suffix 3. Delete default suffix 4. Check the nsslapd-defaultnamingcontext is updated 5. Delete the last backend 6. Check the namingcontext has not changed 7. Add new backend 8. Set default naming context 9. Verify the naming context is correct :expectedresults: 1. Success 2. Success 3. Success 4. Success 5. Success 6. Success 7. Success 8. Success 9. Success """ inst = topo.ms["supplier1"] # Create second suffix backends = Backends(inst) default_backend = backends.get(DEFAULT_SUFFIX) new_backend = backends.create(properties={'nsslapd-suffix': SECOND_SUFFIX, 'name': 'namingRoot'}) # Add encrypted attribute entry under default suffix encrypt_attrs = EncryptedAttrs(inst, basedn='cn=encrypted attributes,{}'.format(default_backend.dn)) encrypt_attrs.create(properties={'cn': 'employeeNumber', 'nsEncryptionAlgorithm': 'AES'}) # Delete default suffix default_backend.delete() # Check that the default naming context is set to the new/second suffix default_naming_ctx = inst.config.get_attr_val_utf8('nsslapd-defaultnamingcontext') assert default_naming_ctx == SECOND_SUFFIX # delete new backend, but the naming context should not change new_backend.delete() # Check that the default naming context is still set to the new/second suffix default_naming_ctx = inst.config.get_attr_val_utf8('nsslapd-defaultnamingcontext') assert default_naming_ctx == SECOND_SUFFIX # Add new backend new_backend = backends.create(properties={'nsslapd-suffix': THIRD_SUFFIX, 'name': 'namingRoot2'}) # manaully set naming context inst.config.set('nsslapd-defaultnamingcontext', THIRD_SUFFIX) # Verify naming context is correct default_naming_ctx = inst.config.get_attr_val_utf8('nsslapd-defaultnamingcontext') assert default_naming_ctx == THIRD_SUFFIX if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main(["-s", CURRENT_FILE]) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/mapping_tree/mt_cursed_test.py000066400000000000000000000261741421664411400304320ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2020 William Brown # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- import ldap import pytest import time from lib389.topologies import topology_st from lib389.backend import Backends, Backend from lib389.mappingTree import MappingTrees from lib389.idm.domain import Domain from lib389.configurations.sample import create_base_domain @pytest.fixture(scope="function") def topology(topology_st): bes = Backends(topology_st.standalone) bes.delete_all_dangerous() mts = MappingTrees(topology_st.standalone) assert len(mts.list()) == 0 return topology_st def create_backend(inst, rdn, suffix): # We only support dc= in this test. assert suffix.startswith('dc=') be1 = Backend(inst) be1.create(properties={ 'cn': rdn, 'nsslapd-suffix': suffix, }, create_mapping_tree=False ) # Now we temporarily make the MT for this node so we can add the base entry. mts = MappingTrees(inst) mt = mts.create(properties={ 'cn': suffix, 'nsslapd-state': 'backend', 'nsslapd-backend': rdn, }) # Create the domain entry create_base_domain(inst, suffix) # Now delete the mt mt.delete() return be1 def test_mapping_tree_inverted(topology): """Test the results of an inverted parent suffix definition in the configuration. For more details see: https://www.port389.org/docs/389ds/design/mapping_tree_assembly.html :id: 024c4960-3aac-4d05-bc51-963dfdeb16ca :setup: Standalone instance (no backends) :steps: 1. Add two backends without mapping trees. 2. Add the mapping trees with inverted parent-suffix definitions. 3. Attempt to search the definitions :expectedresults: 1. Success 2. Success 3. The search suceed and can see validly arranged entries. """ inst = topology.standalone # First create two Backends, without mapping trees. be1 = create_backend(inst, 'userRootA', 'dc=example,dc=com') be2 = create_backend(inst, 'userRootB', 'dc=straya,dc=example,dc=com') # Okay, now we create the mapping trees for these backends, and we *invert* them in the parent config setting mts = MappingTrees(inst) mtb = mts.create(properties={ 'cn': 'dc=straya,dc=example,dc=com', 'nsslapd-state': 'backend', 'nsslapd-backend': 'userRootB', }) mta = mts.create(properties={ 'cn': 'dc=example,dc=com', 'nsslapd-state': 'backend', 'nsslapd-backend': 'userRootA', 'nsslapd-parent-suffix': 'dc=straya,dc=example,dc=com' }) dc_ex = Domain(inst, dn='dc=example,dc=com') assert dc_ex.exists() dc_st = Domain(inst, dn='dc=straya,dc=example,dc=com') assert dc_st.exists() # Restart and check again inst.restart() assert dc_ex.exists() assert dc_st.exists() def test_mapping_tree_nonexist_parent(topology): """Test a backend whos mapping tree definition has a non-existant parent-suffix For more details see: https://www.port389.org/docs/389ds/design/mapping_tree_assembly.html :id: 7a9a09bd-7604-48f7-93cb-abff9e0d0131 :setup: Standalone instance (no backends) :steps: 1. Add one backend without mapping tree 2. Configure the mapping tree with a non-existant parent suffix 3. Attempt to search the backend :expectedresults: 1. Success 2. Success 3. The search suceed and can see validly entries. """ inst = topology.standalone be1 = create_backend(inst, 'userRootC', 'dc=test,dc=com') mts = MappingTrees(inst) mta = mts.create(properties={ 'cn': 'dc=test,dc=com', 'nsslapd-state': 'backend', 'nsslapd-backend': 'userRootC', 'nsslapd-parent-suffix': 'dc=com' }) # In this case the MT is never joined properly to the hierachy because the parent suffix # doesn't exist. The config is effectively ignored. That means that it can't be searched! dc_ex = Domain(inst, dn='dc=test,dc=com') assert dc_ex.exists() # Restart and check again. inst.restart() assert dc_ex.exists() # Two same length (dc=example,dc=com dc=abcdefg,dc=abc) def test_mapping_tree_same_length(topology): inst = topology.standalone # First create two Backends, without mapping trees. be1 = create_backend(inst, 'userRootA', 'dc=example,dc=com') be2 = create_backend(inst, 'userRootB', 'dc=abcdefg,dc=hij') # Okay, now we create the mapping trees for these backends, and we *invert* them in the parent config setting mts = MappingTrees(inst) mtb = mts.create(properties={ 'cn': 'dc=example,dc=com', 'nsslapd-state': 'backend', 'nsslapd-backend': 'userRootA', }) mta = mts.create(properties={ 'cn': 'dc=abcdefg,dc=hij', 'nsslapd-state': 'backend', 'nsslapd-backend': 'userRootB', }) dc_ex = Domain(inst, dn='dc=example,dc=com') assert dc_ex.exists() dc_ab = Domain(inst, dn='dc=abcdefg,dc=hij') assert dc_ab.exists() # Restart and check again inst.restart() assert dc_ex.exists() assert dc_ab.exists() # Flipped DC comps (dc=exmaple,dc=com dc=com,dc=example) def test_mapping_tree_flipped_components(topology): inst = topology.standalone # First create two Backends, without mapping trees. be1 = create_backend(inst, 'userRootA', 'dc=example,dc=com') be2 = create_backend(inst, 'userRootB', 'dc=com,dc=example') # Okay, now we create the mapping trees for these backends, and we *invert* them in the parent config setting mts = MappingTrees(inst) mtb = mts.create(properties={ 'cn': 'dc=example,dc=com', 'nsslapd-state': 'backend', 'nsslapd-backend': 'userRootA', }) mta = mts.create(properties={ 'cn': 'dc=com,dc=example', 'nsslapd-state': 'backend', 'nsslapd-backend': 'userRootB', }) dc_ex = Domain(inst, dn='dc=example,dc=com') assert dc_ex.exists() dc_ab = Domain(inst, dn='dc=com,dc=example') assert dc_ab.exists() # Restart and check again inst.restart() assert dc_ex.exists() assert dc_ab.exists() # Weirdnesting (dc=exmaple,dc=com, dc=com,dc=example, dc=com,dc=example,dc=com) def test_mapping_tree_weird_nesting(topology): inst = topology.standalone # First create two Backends, without mapping trees. be1 = create_backend(inst, 'userRootA', 'dc=example,dc=com') be2 = create_backend(inst, 'userRootB', 'dc=com,dc=example') be3 = create_backend(inst, 'userRootC', 'dc=com,dc=example,dc=com') # Okay, now we create the mapping trees for these backends, and we *invert* them in the parent config setting mts = MappingTrees(inst) mtb = mts.create(properties={ 'cn': 'dc=example,dc=com', 'nsslapd-state': 'backend', 'nsslapd-backend': 'userRootA', }) mta = mts.create(properties={ 'cn': 'dc=com,dc=example', 'nsslapd-state': 'backend', 'nsslapd-backend': 'userRootB', }) mtc = mts.create(properties={ 'cn': 'dc=com,dc=example,dc=com', 'nsslapd-state': 'backend', 'nsslapd-backend': 'userRootC', }) dc_ex = Domain(inst, dn='dc=example,dc=com') assert dc_ex.exists() dc_ab = Domain(inst, dn='dc=com,dc=example') assert dc_ab.exists() dc_ec = Domain(inst, dn='dc=com,dc=example,dc=com') assert dc_ec.exists() # Restart and check again inst.restart() assert dc_ex.exists() assert dc_ab.exists() assert dc_ec.exists() # Diff lens (dc=myserver, dc=a,dc=b,dc=c,dc=d, dc=example,dc=com) def test_mapping_tree_mixed_length(topology): inst = topology.standalone # First create two Backends, without mapping trees. be1 = create_backend(inst, 'userRootA', 'dc=myserver') be1 = create_backend(inst, 'userRootB', 'dc=m') be1 = create_backend(inst, 'userRootC', 'dc=a,dc=b,dc=c,dc=d,dc=e') be1 = create_backend(inst, 'userRootD', 'dc=example,dc=com') be1 = create_backend(inst, 'userRootE', 'dc=myldap') mts = MappingTrees(inst) mts.create(properties={ 'cn': 'dc=myserver', 'nsslapd-state': 'backend', 'nsslapd-backend': 'userRootA', }) mts.create(properties={ 'cn': 'dc=m', 'nsslapd-state': 'backend', 'nsslapd-backend': 'userRootB', }) mts.create(properties={ 'cn': 'dc=a,dc=b,dc=c,dc=d,dc=e', 'nsslapd-state': 'backend', 'nsslapd-backend': 'userRootC', }) mts.create(properties={ 'cn': 'dc=example,dc=com', 'nsslapd-state': 'backend', 'nsslapd-backend': 'userRootD', }) mts.create(properties={ 'cn': 'dc=myldap', 'nsslapd-state': 'backend', 'nsslapd-backend': 'userRootE', }) dc_a = Domain(inst, dn='dc=myserver') assert dc_a.exists() dc_b = Domain(inst, dn='dc=m') assert dc_b.exists() dc_c = Domain(inst, dn='dc=a,dc=b,dc=c,dc=d,dc=e') assert dc_c.exists() dc_d = Domain(inst, dn='dc=example,dc=com') assert dc_d.exists() dc_e = Domain(inst, dn='dc=myldap') assert dc_e.exists() inst.restart() assert dc_a.exists() assert dc_b.exists() assert dc_c.exists() assert dc_d.exists() assert dc_e.exists() # 50 suffixes, shallow nest (dc=example,dc=com, then dc=00 -> dc=50) def test_mapping_tree_many_shallow(topology): inst = topology.standalone dcs = [ ('dc=x%s,dc=example,dc=com' % x, 'userRoot%s' % x) for x in range(0,50) ] for (dc, bename) in dcs: create_backend(inst, bename, dc) mts = MappingTrees(inst) for (dc, bename) in dcs: mts.create(properties={ 'cn': dc, 'nsslapd-state': 'backend', 'nsslapd-backend': bename, }) dc_asserts = [ Domain(inst, dn=dc[0]) for dc in dcs ] for dc_a in dc_asserts: assert dc_a.exists() inst.restart() for dc_a in dc_asserts: assert dc_a.exists() # 50 suffixes, deeper nesting (dc=example,dc=com, dc=00 -> dc=10 and dc=a,dc=b,dc=c,dc=d,dc=XX,dc=example,dc=com) def test_mapping_tree_many_deep_nesting(topology): inst = topology.standalone be_count = 0 dcs = [] for x in range(0, 10): dcs.append(('dc=x%s,dc=example,dc=com' % x, 'userRoot%s' % be_count)) be_count += 1 # Now add some children. for x in range(0,10): dcs.append(('dc=nest,dc=x%s,dc=example,dc=com' % x, 'userRoot%s' % be_count)) be_count += 1 # Now add nested children for x in range(0,10): for y in range(0,5): dcs.append(('dc=y%s,dc=nest,dc=x%s,dc=example,dc=com' % (y, x), 'userRoot%s' % be_count)) be_count += 1 for (dc, bename) in dcs: create_backend(inst, bename, dc) mts = MappingTrees(inst) for (dc, bename) in dcs: mts.create(properties={ 'cn': dc, 'nsslapd-state': 'backend', 'nsslapd-backend': bename, }) dc_asserts = [ Domain(inst, dn=dc[0]) for dc in dcs ] for dc_a in dc_asserts: assert dc_a.exists() inst.restart() for dc_a in dc_asserts: assert dc_a.exists() referral_during_tot_init_test.py000066400000000000000000000041551421664411400334440ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/mapping_tree# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2020 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import ldap import pytest from lib389.topologies import topology_m2 from lib389._constants import (DEFAULT_SUFFIX) from lib389.agreement import Agreements from lib389.idm.user import (TEST_USER_PROPERTIES, UserAccounts) from lib389.dbgen import dbgen_users from lib389.utils import ds_is_older pytestmark = pytest.mark.tier1 @pytest.mark.skipif(ds_is_older("1.4.0.0"), reason="Not implemented") def test_referral_during_tot(topology_m2): supplier1 = topology_m2.ms["supplier1"] supplier2 = topology_m2.ms["supplier2"] users = UserAccounts(supplier2, DEFAULT_SUFFIX) u = users.create(properties=TEST_USER_PROPERTIES) u.set('userPassword', 'password') binddn = u.dn bindpw = 'password' # Create a bunch of entries on supplier1 ldif_dir = supplier1.get_ldif_dir() import_ldif = ldif_dir + '/ref_during_tot_import.ldif' dbgen_users(supplier1, 10000, import_ldif, DEFAULT_SUFFIX) supplier1.stop() supplier1.ldif2db(bename=None, excludeSuffixes=None, encrypt=False, suffixes=[DEFAULT_SUFFIX], import_file=import_ldif) supplier1.start() # Recreate the user on m1 also, so that if the init finishes first ew don't lose the user on m2 users = UserAccounts(supplier1, DEFAULT_SUFFIX) u = users.create(properties=TEST_USER_PROPERTIES) u.set('userPassword', 'password') # Now export them to supplier2 agmts = Agreements(supplier1) agmts.list()[0].begin_reinit() # While that's happening try to bind as a user to supplier 2 # This should trigger the referral code. referred = False for i in range(0, 100): conn = ldap.initialize(supplier2.toLDAPURL()) conn.set_option(ldap.OPT_REFERRALS, False) try: conn.simple_bind_s(binddn, bindpw) conn.unbind_s() except ldap.REFERRAL: referred = True break # Means we never go a referral, should not happen! assert referred # Done. 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/memberof_plugin/000077500000000000000000000000001421664411400255225ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/memberof_plugin/__init__.py000066400000000000000000000000661421664411400276350ustar00rootroot00000000000000""" :Requirement: 389-ds-base: Memberof Plugin """ memberof_include_scopes_test.py000066400000000000000000000106121421664411400337270ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/memberof_plugin# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2022 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import pytest import os import ldap from lib389.utils import ensure_str from lib389.topologies import topology_st as topo from lib389._constants import * from lib389.plugins import MemberOfPlugin from lib389.idm.user import UserAccount, UserAccounts from lib389.idm.group import Group, Groups from lib389.idm.nscontainer import nsContainers SUBTREE_1 = 'cn=sub1,%s' % SUFFIX SUBTREE_2 = 'cn=sub2,%s' % SUFFIX SUBTREE_3 = 'cn=sub3,%s' % SUFFIX def add_container(inst, dn, name): """Creates container entry""" conts = nsContainers(inst, dn) cont = conts.create(properties={'cn': name}) return cont def add_member_and_group(server, cn, group_cn, subtree): users = UserAccounts(server, subtree, rdn=None) users.create(properties={'uid': f'test_{cn}', 'cn': f'test_{cn}', 'sn': f'test_{cn}', 'description': 'member', 'uidNumber': '1000', 'gidNumber': '2000', 'homeDirectory': '/home/testuser'}) group = Groups(server, subtree, rdn=None) group.create(properties={'cn': group_cn, 'member': f'uid=test_{cn},{subtree}', 'description': 'group'}) def check_membership(server, user_dn=None, group_dn=None, find_result=True): ent = server.getEntry(user_dn, ldap.SCOPE_BASE, "(objectclass=*)", ['memberof']) found = False if ent.hasAttr('memberof'): for val in ent.getValues('memberof'): if ensure_str(val) == group_dn: found = True break if find_result: assert found else: assert (not found) def test_multiple_scopes(topo): """Specify memberOf works when multiple include scopes are defined :id: fbcd70cc-c83d-4c79-bd5b-2d8f017545ae :setup: Standalone Instance :steps: 1. Set multiple include scopes 2. Test members added to both scopes are correctly updated 3. Test user outside of scope was not updated 4. Set exclude scope 5. Move user into excluded subtree and check the membership is correct :expectedresults: 1. Success 2. Success 3. Success 4. Success 5. Success """ inst = topo.standalone # configure plugin memberof = MemberOfPlugin(inst) memberof.enable() memberof.add('memberOfEntryScope', SUBTREE_1) memberof.add('memberOfEntryScope', SUBTREE_2) inst.restart() # Add setup entries add_container(inst, SUFFIX, 'sub1') add_container(inst, SUFFIX, 'sub2') add_container(inst, SUFFIX, 'sub3') add_member_and_group(inst, 'm1', 'g1', SUBTREE_1) add_member_and_group(inst, 'm2', 'g2', SUBTREE_2) add_member_and_group(inst, 'm3', 'g3', SUBTREE_3) # Check users 1 and 2 were correctly updated check_membership(inst, f'uid=test_m1,{SUBTREE_1}', f'cn=g1,{SUBTREE_1}', True) check_membership(inst, f'uid=test_m2,{SUBTREE_2}', f'cn=g2,{SUBTREE_2}', True) # Check that user3, which is out of scope, was not updated check_membership(inst, f'uid=test_m3,{SUBTREE_3}', f'cn=g1,{SUBTREE_1}', False) check_membership(inst, f'uid=test_m3,{SUBTREE_3}', f'cn=g2,{SUBTREE_2}', False) check_membership(inst, f'uid=test_m3,{SUBTREE_3}', f'cn=g3,{SUBTREE_3}', False) # Set exclude scope EXCLUDED_SUBTREE = 'cn=exclude,%s' % SUFFIX EXCLUDED_USER = f"uid=test_m1,{EXCLUDED_SUBTREE}" INCLUDED_USER = f"uid=test_m1,{SUBTREE_1}" GROUP_DN = f'cn=g1,{SUBTREE_1}' add_container(inst, SUFFIX, 'exclude') memberof.add('memberOfEntryScopeExcludeSubtree', EXCLUDED_SUBTREE) # Move user to excluded scope user = UserAccount(topo.standalone, dn=INCLUDED_USER) user.rename("uid=test_m1", newsuperior=EXCLUDED_SUBTREE) # Check memberOf and group are cleaned up check_membership(inst, EXCLUDED_USER, GROUP_DN, False) group = Group(topo.standalone, dn=GROUP_DN) assert not group.present("member", EXCLUDED_USER) assert not group.present("member", INCLUDED_USER) if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main(["-s", CURRENT_FILE]) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/memberof_plugin/regression_test.py000066400000000000000000001015571421664411400313240ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2020 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import logging import pytest import os import time import ldap from random import sample from lib389.utils import ds_is_older, ensure_list_bytes, ensure_bytes, ensure_str from lib389.topologies import topology_m1h1c1 as topo, topology_st, topology_m2 as topo_m2 from lib389._constants import * from lib389.plugins import MemberOfPlugin from lib389 import Entry from lib389.idm.user import UserAccount, UserAccounts, TEST_USER_PROPERTIES from lib389.idm.group import Groups, Group from lib389.replica import ReplicationManager from lib389.tasks import * from lib389.idm.nscontainer import nsContainers # Skip on older versions pytestmark = [pytest.mark.tier1, pytest.mark.skipif(ds_is_older('1.3.7'), reason="Not implemented")] USER_CN = 'user_' GROUP_CN = 'group1' DEBUGGING = os.getenv('DEBUGGING', False) SUBTREE_1 = 'cn=sub1,%s' % SUFFIX SUBTREE_2 = 'cn=sub2,%s' % SUFFIX if DEBUGGING: logging.getLogger(__name__).setLevel(logging.DEBUG) else: logging.getLogger(__name__).setLevel(logging.INFO) log = logging.getLogger(__name__) def add_users(topo_m2, users_num, suffix): """Add users to the default suffix Return the list of added user DNs. """ users_list = [] users = UserAccounts(topo_m2.ms["supplier1"], suffix, rdn=None) log.info('Adding %d users' % users_num) for num in sample(list(range(1000)), users_num): num_ran = int(round(num)) USER_NAME = 'test%05d' % num_ran user = users.create(properties={ 'uid': USER_NAME, 'sn': USER_NAME, 'cn': USER_NAME, 'uidNumber': '%s' % num_ran, 'gidNumber': '%s' % num_ran, 'homeDirectory': '/home/%s' % USER_NAME, 'mail': '%s@redhat.com' % USER_NAME, 'userpassword': 'pass%s' % num_ran, }) users_list.append(user) return users_list def config_memberof(server): # Configure fractional to prevent total init to send memberof memberof = MemberOfPlugin(server) memberof.enable() memberof.set_autoaddoc('nsMemberOf') server.restart() ents = server.agreement.list(suffix=DEFAULT_SUFFIX) for ent in ents: log.info('update %s to add nsDS5ReplicatedAttributeListTotal' % ent.dn) server.agreement.setProperties(agmnt_dn=ents[0].dn, properties={RA_FRAC_EXCLUDE: '(objectclass=*) $ EXCLUDE memberOf', RA_FRAC_EXCLUDE_TOTAL_UPDATE: '(objectclass=*) $ EXCLUDE '}) def send_updates_now(server): ents = server.agreement.list(suffix=DEFAULT_SUFFIX) for ent in ents: server.agreement.pause(ent.dn) server.agreement.resume(ent.dn) def _find_memberof(server, member_dn, group_dn): # To get the specific server's (M1, C1 and H1) user and group user = UserAccount(server, member_dn) assert user.exists() group = Group(server, group_dn) assert group.exists() # test that the user entry should have memberof attribute with specified group dn value assert group._dn.lower() in user.get_attr_vals_utf8_l('memberOf') @pytest.mark.bz1352121 def test_memberof_with_repl(topo): """Test that we allowed to enable MemberOf plugin in dedicated consumer :id: ef71cd7c-e792-41bf-a3c0-b3b38391cbe5 :setup: 1 Supplier - 1 Hub - 1 Consumer :steps: 1. Configure replication to EXCLUDE memberof 2. Enable memberof plugin 3. Create users/groups 4. Make user_0 member of group_0 5. Checks that user_0 is memberof group_0 on M,H,C 6. Make group_0 member of group_1 (nest group) 7. Checks that user_0 is memberof group_0 and group_1 on M,H,C 8. Check group_0 is memberof group_1 on M,H,C 9. Remove group_0 from group_1 10. Check group_0 and user_0 are NOT memberof group_1 on M,H,C 11. Remove user_0 from group_0 12. Check user_0 is not memberof group_0 and group_1 on M,H,C 13. Disable memberof on C 14. make user_0 member of group_1 15. Checks that user_0 is memberof group_0 on M,H but not on C 16. Enable memberof on C 17. Checks that user_0 is memberof group_0 on M,H but not on C 18. Run memberof fixup task 19. Checks that user_0 is memberof group_0 on M,H,C :expectedresults: 1. Configuration should be successful 2. Plugin should be enabled 3. Users and groups should be created 4. user_0 should be member of group_0 5. user_0 should be memberof group_0 on M,H,C 6. group_0 should be member of group_1 7. user_0 should be memberof group_0 and group_1 on M,H,C 8. group_0 should be memberof group_1 on M,H,C 9. group_0 from group_1 removal should be successful 10. group_0 and user_0 should not be memberof group_1 on M,H,C 11. user_0 from group_0 remove should be successful 12. user_0 should not be memberof group_0 and group_1 on M,H,C 13. memberof should be disabled on C 14. user_0 should be member of group_1 15. user_0 should be memberof group_0 on M,H and should not on C 16. Enable memberof on C should be successful 17. user_0 should be memberof group_0 on M,H should not on C 18. memberof fixup task should be successful 19. user_0 should be memberof group_0 on M,H,C """ M1 = topo.ms["supplier1"] H1 = topo.hs["hub1"] C1 = topo.cs["consumer1"] # Step 1 & 2 M1.config.enable_log('audit') config_memberof(M1) M1.restart() H1.config.enable_log('audit') config_memberof(H1) H1.restart() C1.config.enable_log('audit') config_memberof(C1) C1.restart() #Declare lists of users and groups test_users = [] test_groups = [] # Step 3 # In for loop create users and add them in the user list # it creates user_0 to user_9 (range is fun) for i in range(10): CN = '%s%d' % (USER_CN, i) users = UserAccounts(M1, SUFFIX) user_props = TEST_USER_PROPERTIES.copy() user_props.update({'uid': CN, 'cn': CN, 'sn': '_%s' % CN}) testuser = users.create(properties=user_props) time.sleep(2) test_users.append(testuser) # In for loop create groups and add them to the group list # it creates group_0 to group_2 (range is fun) for i in range(3): CN = '%s%d' % (GROUP_CN, i) groups = Groups(M1, SUFFIX) testgroup = groups.create(properties={'cn': CN}) time.sleep(2) test_groups.append(testgroup) # Step 4 # Now start testing by adding differnt user to differn group if not ds_is_older('1.3.7'): test_groups[0].remove('objectClass', 'nsMemberOf') member_dn = test_users[0].dn grp0_dn = test_groups[0].dn grp1_dn = test_groups[1].dn test_groups[0].add_member(member_dn) time.sleep(2) # Step 5 for i in [M1, H1, C1]: _find_memberof(i, member_dn, grp0_dn) # Step 6 test_groups[1].add_member(test_groups[0].dn) time.sleep(2) # Step 7 for i in [grp0_dn, grp1_dn]: for inst in [M1, H1, C1]: _find_memberof(inst, member_dn, i) # Step 8 for i in [M1, H1, C1]: _find_memberof(i, grp0_dn, grp1_dn) # Step 9 test_groups[1].remove_member(test_groups[0].dn) time.sleep(2) # Step 10 # For negative testcase, we are using assertionerror for inst in [M1, H1, C1]: for i in [grp0_dn, member_dn]: with pytest.raises(AssertionError): _find_memberof(inst, i, grp1_dn) # Step 11 test_groups[0].remove_member(member_dn) time.sleep(2) # Step 12 for inst in [M1, H1, C1]: for grp in [grp0_dn, grp1_dn]: with pytest.raises(AssertionError): _find_memberof(inst, member_dn, grp) # Step 13 C1.plugins.disable(name=PLUGIN_MEMBER_OF) C1.restart() # Step 14 test_groups[0].add_member(member_dn) time.sleep(2) # Step 15 for i in [M1, H1]: _find_memberof(i, member_dn, grp0_dn) with pytest.raises(AssertionError): _find_memberof(C1, member_dn, grp0_dn) # Step 16 memberof = MemberOfPlugin(C1) memberof.enable() C1.restart() # Step 17 for i in [M1, H1]: _find_memberof(i, member_dn, grp0_dn) with pytest.raises(AssertionError): _find_memberof(C1, member_dn, grp0_dn) # Step 18 memberof.fixup(SUFFIX) # have to sleep instead of task.wait() because the task opens a thread and exits time.sleep(5) # Step 19 for i in [M1, H1, C1]: _find_memberof(i, member_dn, grp0_dn) @pytest.mark.skipif(ds_is_older('1.3.7'), reason="Not implemented") def test_scheme_violation_errors_logged(topo_m2): """Check that ERR messages are verbose enough, if a member entry doesn't have the appropriate objectclass to support 'memberof' attribute :id: e2af0aaa-447e-4e85-a5ce-57ae66260d0b :setup: Standalone instance :steps: 1. Enable memberofPlugin and set autoaddoc to nsMemberOf 2. Restart the instance 3. Add a user without nsMemberOf attribute 4. Create a group and add the user to the group 5. Check that user has memberOf attribute 6. Check the error log for ".*oc_check_allowed_sv.*USER_DN.*memberOf.*not allowed.*" and ".*schema violation caught - repair operation.*" patterns :expectedresults: 1. Should be successful 2. Should be successful 3. Should be successful 4. Should be successful 5. User should have the attribute 6. Errors should be logged """ inst = topo_m2.ms["supplier1"] memberof = MemberOfPlugin(inst) memberof.enable() memberof.set_autoaddoc('nsMemberOf') inst.restart() users = UserAccounts(inst, SUFFIX) user_props = TEST_USER_PROPERTIES.copy() user_props.update({'uid': USER_CN, 'cn': USER_CN, 'sn': USER_CN}) testuser = users.create(properties=user_props) testuser.remove('objectclass', 'nsMemberOf') groups = Groups(inst, SUFFIX) testgroup = groups.create(properties={'cn': GROUP_CN}) testgroup.add('member', testuser.dn) user_memberof_attr = testuser.get_attr_val_utf8('memberof') assert user_memberof_attr log.info('memberOf attr value - {}'.format(user_memberof_attr)) pattern = ".*oc_check_allowed_sv.*{}.*memberOf.*not allowed.*".format(testuser.dn.lower()) log.info("pattern = %s" % pattern) assert inst.ds_error_log.match(pattern) pattern = ".*schema violation caught - repair operation.*" assert inst.ds_error_log.match(pattern) @pytest.mark.bz1192099 def test_memberof_with_changelog_reset(topo_m2): """Test that replication does not break, after DS stop-start, due to changelog reset :id: 60c11636-55a1-4704-9e09-2c6bcc828de4 :setup: 2 Suppliers :steps: 1. On M1 and M2, Enable memberof 2. On M1, add 999 entries allowing memberof 3. On M1, add a group with these 999 entries as members 4. Stop M1 in between, when add the group memerof is called and before it is finished the add, so step 4 should be executed after memberof has started and before the add has finished 5. Check that replication is working fine :expectedresults: 1. memberof should be enabled 2. Entries should be added 3. Add operation should start 4. M1 should be stopped 5. Replication should be working fine """ m1 = topo_m2.ms["supplier1"] m2 = topo_m2.ms["supplier2"] log.info("Configure memberof on M1 and M2") memberof = MemberOfPlugin(m1) memberof.enable() memberof.set_autoaddoc('nsMemberOf') m1.restart() memberof = MemberOfPlugin(m2) memberof.enable() memberof.set_autoaddoc('nsMemberOf') m2.restart() log.info("On M1, add 999 test entries allowing memberof") users_list = add_users(topo_m2, 999, DEFAULT_SUFFIX) log.info("On M1, add a group with these 999 entries as members") dic_of_attributes = {'cn': ensure_bytes('testgroup'), 'objectclass': ensure_list_bytes(['top', 'groupOfNames'])} for user in users_list: dic_of_attributes.setdefault('member', []) dic_of_attributes['member'].append(user.dn) log.info('Adding the test group using async function') groupdn = 'cn=testgroup,%s' % DEFAULT_SUFFIX m1.add(Entry((groupdn, dic_of_attributes))) #shutdown the server in-between adding the group m1.stop() #start the server m1.start() log.info("Check the log messages for error") error_msg = "ERR - NSMMReplicationPlugin - ruv_compare_ruv" assert not m1.ds_error_log.match(error_msg) log.info("Check that the replication is working fine both ways, M1 <-> M2") repl = ReplicationManager(DEFAULT_SUFFIX) repl.test_replication_topology(topo_m2) def add_container(inst, dn, name, sleep=False): """Creates container entry""" conts = nsContainers(inst, dn) cont = conts.create(properties={'cn': name}) if sleep: time.sleep(1) return cont def add_member(server, cn, subtree): dn = subtree users = UserAccounts(server, dn, rdn=None) users.create(properties={'uid': 'test_%s' % cn, 'cn': "%s" % cn, 'sn': 'SN', 'description': 'member', 'uidNumber': '1000', 'gidNumber': '2000', 'homeDirectory': '/home/testuser' }) def add_group(server, cn, subtree): group = Groups(server, subtree, rdn=None) group.create(properties={'cn': "%s" % cn, 'member': ['uid=test_m1,%s' % SUBTREE_1, 'uid=test_m2,%s' % SUBTREE_1], 'description': 'group'}) def rename_entry(server, cn, from_subtree, to_subtree): dn = '%s,%s' % (cn, from_subtree) nrdn = '%s-new' % cn log.fatal('Renaming user (%s): new %s' % (dn, nrdn)) server.rename_s(dn, nrdn, newsuperior=to_subtree, delold=0) def _find_memberof_ext(server, user_dn=None, group_dn=None, find_result=True): assert (server) assert (user_dn) assert (group_dn) ent = server.getEntry(user_dn, ldap.SCOPE_BASE, "(objectclass=*)", ['memberof']) found = False if ent.hasAttr('memberof'): for val in ent.getValues('memberof'): server.log.info("!!!!!!! %s: memberof->%s" % (user_dn, val)) if ensure_str(val) == group_dn: found = True break if find_result: assert found else: assert (not found) @pytest.mark.ds49161 def test_memberof_group(topology_st): """Test memberof does not fail if group is moved into scope :id: d1d276ae-6375-4ad8-9437-6a0afcbee7d2 :setup: Single instance :steps: 1. Enable memberof plugin and set memberofentryscope 2. Restart the server 3. Add test sub-suffixes 4. Add test users 5. Add test groups 6. Check for memberof attribute added to the test users 7. Rename the group entry 8. Check the new name is reflected in memberof attribute of user :expectedresults: 1. memberof plugin should be enabled and memberofentryscope should be set 2. Server should be restarted 3. Sub-suffixes should be added 4. Test users should be added 5. Test groups should be added 6. memberof attribute should be present in the test users 7. Group entry should be renamed 8. New group name should be present in memberof attribute of user """ inst = topology_st.standalone log.info('Enable memberof plugin and set the scope as cn=sub1,dc=example,dc=com') memberof = MemberOfPlugin(inst) memberof.enable() memberof.replace('memberOfEntryScope', SUBTREE_1) inst.restart() add_container(inst, SUFFIX, 'sub1') add_container(inst, SUFFIX, 'sub2') add_member(inst, 'm1', SUBTREE_1) add_member(inst, 'm2', SUBTREE_1) add_group(inst, 'g1', SUBTREE_1) add_group(inst, 'g2', SUBTREE_2) # _check_memberof dn1 = '%s,%s' % ('uid=test_m1', SUBTREE_1) dn2 = '%s,%s' % ('uid=test_m2', SUBTREE_1) g1 = '%s,%s' % ('cn=g1', SUBTREE_1) g2 = '%s,%s' % ('cn=g2', SUBTREE_2) _find_memberof_ext(inst, dn1, g1, True) _find_memberof_ext(inst, dn2, g1, True) _find_memberof_ext(inst, dn1, g2, False) _find_memberof_ext(inst, dn2, g2, False) rename_entry(inst, 'cn=g2', SUBTREE_2, SUBTREE_1) g2n = '%s,%s' % ('cn=g2-new', SUBTREE_1) _find_memberof_ext(inst, dn1, g1, True) _find_memberof_ext(inst, dn2, g1, True) _find_memberof_ext(inst, dn1, g2n, True) _find_memberof_ext(inst, dn2, g2n, True) def _config_memberof_entrycache_on_modrdn_failure(server): server.plugins.enable(name=PLUGIN_MEMBER_OF) peoplebase = 'ou=people,%s' % SUFFIX MEMBEROF_PLUGIN_DN = ('cn=' + PLUGIN_MEMBER_OF + ',cn=plugins,cn=config') server.modify_s(MEMBEROF_PLUGIN_DN, [(ldap.MOD_REPLACE, 'memberOfAllBackends', b'on'), (ldap.MOD_REPLACE, 'memberOfEntryScope', peoplebase.encode()), (ldap.MOD_REPLACE, 'memberOfAutoAddOC', b'nsMemberOf')]) def _disable_auto_oc_memberof(server): MEMBEROF_PLUGIN_DN = ('cn=' + PLUGIN_MEMBER_OF + ',cn=plugins,cn=config') server.modify_s(MEMBEROF_PLUGIN_DN, [(ldap.MOD_REPLACE, 'memberOfAutoAddOC', b'nsContainer')]) @pytest.mark.ds49967 def test_entrycache_on_modrdn_failure(topology_st): """This test checks that when a modrdn fails, the destination entry is not returned by a search This could happen in case the destination entry remains in the entry cache :id: a4d8ac0b-2448-406a-9dc2-5a72851e30b6 :setup: Standalone Instance :steps: 1. configure memberof to only scope ou=people,SUFFIX 2. Creates 10 users 3. Create groups0 (in peoplebase) that contain user0 and user1 4. Check user0 and user1 have memberof=group0.dn 5. Create group1 (OUT peoplebase) that contain user0 and user1 6. Check user0 and user1 have NOT memberof=group1.dn 7. Move group1 IN peoplebase and check users0 and user1 HAVE memberof=group1.dn 8. Create group2 (OUT peoplebase) that contain user2 and user3. Group2 contains a specific description value 9. Check user2 and user3 have NOT memberof=group2.dn 10. configure memberof so that added objectclass does not allow 'memberof' attribute 11. Move group2 IN peoplebase and check move failed OPERATIONS_ERROR (because memberof failed) 12. Search all groups and check that the group, having the specific description value, has the original DN of group2.dn :expectedresults: 1. should succeed 2. should succeed 3. should succeed 4. should succeed 5. should succeed 6. should succeed 7. should succeed 8. should succeed 9. should succeed 10. should succeed 11. should fail OPERATION_ERROR because memberof plugin fails to add 'memberof' to members. 12. should succeed """ # only scopes peoplebase _config_memberof_entrycache_on_modrdn_failure(topology_st.standalone) topology_st.standalone.restart(timeout=10) # create 10 users peoplebase = 'ou=people,%s' % SUFFIX for i in range(10): cn = 'user%d' % i dn = 'cn=%s,%s' % (cn, peoplebase) log.fatal('Adding user (%s): ' % dn) topology_st.standalone.add_s(Entry((dn, {'objectclass': ['top', 'person'], 'sn': 'user_%s' % cn, 'description': 'add on standalone'}))) # Check that members of group0 (in the scope) have 'memberof group0_dn = 'cn=group_in0,%s' % peoplebase topology_st.standalone.add_s(Entry((group0_dn, {'objectclass': ['top', 'groupofnames'], 'member': [ 'cn=user0,%s' % peoplebase, 'cn=user1,%s' % peoplebase, ], 'description': 'mygroup'}))) # Check the those entries have memberof with group0 for i in range(2): user_dn = 'cn=user%d,%s' % (i, peoplebase) ent = topology_st.standalone.getEntry(user_dn, ldap.SCOPE_BASE, "(objectclass=*)", ['memberof']) assert ent.hasAttr('memberof') found = False for val in ent.getValues('memberof'): topology_st.standalone.log.info("!!!!!!! %s: memberof->%s (vs %s)" % (user_dn, val, group0_dn.encode().lower())) if val.lower() == group0_dn.encode().lower(): found = True break assert found # Create a group1 out of the scope group1_dn = 'cn=group_out1,%s' % SUFFIX topology_st.standalone.add_s(Entry((group1_dn, {'objectclass': ['top', 'groupofnames'], 'member': [ 'cn=user0,%s' % peoplebase, 'cn=user1,%s' % peoplebase, ], 'description': 'mygroup'}))) # Check the those entries have not memberof with group1 for i in range(2): user_dn = 'cn=user%d,%s' % (i, peoplebase) ent = topology_st.standalone.getEntry(user_dn, ldap.SCOPE_BASE, "(objectclass=*)", ['memberof']) assert ent.hasAttr('memberof') found = False for val in ent.getValues('memberof'): topology_st.standalone.log.info("!!!!!!! %s: memberof->%s (vs %s)" % (user_dn, val, group1_dn.encode().lower())) if val.lower() == group1_dn.encode().lower(): found = True break assert not found # move group1 into the scope and check user0 and user1 are memberof group1 topology_st.standalone.rename_s(group1_dn, 'cn=group_in1', newsuperior=peoplebase, delold=0) new_group1_dn = 'cn=group_in1,%s' % peoplebase for i in range(2): user_dn = 'cn=user%d,%s' % (i, peoplebase) ent = topology_st.standalone.getEntry(user_dn, ldap.SCOPE_BASE, "(objectclass=*)", ['memberof']) assert ent.hasAttr('memberof') found = False for val in ent.getValues('memberof'): topology_st.standalone.log.info("!!!!!!! %s: memberof->%s (vs %s)" % (user_dn, val, new_group1_dn.encode().lower())) if val.lower() == new_group1_dn.encode().lower(): found = True break assert found # Create a group2 out of the scope with a SPECIFIC description value entry_description = "this is to check that the entry having this description has the appropriate DN" group2_dn = 'cn=group_out2,%s' % SUFFIX topology_st.standalone.add_s(Entry((group2_dn, {'objectclass': ['top', 'groupofnames'], 'member': [ 'cn=user2,%s' % peoplebase, 'cn=user3,%s' % peoplebase, ], 'description': entry_description}))) # Check the those entries have not memberof with group2 for i in (2, 3): user_dn = 'cn=user%d,%s' % (i, peoplebase) ent = topology_st.standalone.getEntry(user_dn, ldap.SCOPE_BASE, "(objectclass=*)", ['memberof']) assert not ent.hasAttr('memberof') # memberof will not add the missing objectclass _disable_auto_oc_memberof(topology_st.standalone) topology_st.standalone.restart(timeout=10) # move group2 into the scope and check it fails try: topology_st.standalone.rename_s(group2_dn, 'cn=group_in2', newsuperior=peoplebase, delold=0) topology_st.standalone.log.info("This is unexpected, modrdn should fail as the member entry have not the appropriate objectclass") assert False except ldap.OBJECT_CLASS_VIOLATION: pass # retrieve the entry having the specific description value # check that the entry DN is the original group2 DN ents = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(cn=gr*)') found = False for ent in ents: topology_st.standalone.log.info("retrieve: %s with desc=%s" % (ent.dn, ent.getValue('description'))) if ent.getValue('description') == entry_description.encode(): found = True assert ent.dn == group2_dn assert found def _config_memberof_silent_memberof_failure(server): _config_memberof_entrycache_on_modrdn_failure(server) def test_silent_memberof_failure(topology_st): """This test checks that if during a MODRDN, the memberof plugin fails then MODRDN also fails :id: 095aee01-581c-43dd-a241-71f9631a18bb :setup: Standalone Instance :steps: 1. configure memberof to only scope ou=people,SUFFIX 2. Do some cleanup and Creates 10 users 3. Create groups0 (IN peoplebase) that contain user0 and user1 4. Check user0 and user1 have memberof=group0.dn 5. Create group1 (OUT peoplebase) that contain user0 and user1 6. Check user0 and user1 have NOT memberof=group1.dn 7. Move group1 IN peoplebase and check users0 and user1 HAVE memberof=group1.dn 8. Create group2 (OUT peoplebase) that contain user2 and user3. 9. Check user2 and user3 have NOT memberof=group2.dn 10. configure memberof so that added objectclass does not allow 'memberof' attribute 11. Move group2 IN peoplebase and check move failed OPERATIONS_ERROR (because memberof failed) 12. Check user2 and user3 have NOT memberof=group2.dn 13. ADD group3 (IN peoplebase) with user4 and user5 members and check add failed OPERATIONS_ERROR (because memberof failed) 14. Check user4 and user5 have NOT memberof=group2.dn :expectedresults: 1. should succeed 2. should succeed 3. should succeed 4. should succeed 5. should succeed 6. should succeed 7. should succeed 8. should succeed 9. should succeed 10. should succeed 11. should fail OPERATION_ERROR because memberof plugin fails to add 'memberof' to members. 12. should succeed 13. should fail OPERATION_ERROR because memberof plugin fails to add 'memberof' to members 14. should succeed """ # only scopes peoplebase _config_memberof_silent_memberof_failure(topology_st.standalone) topology_st.standalone.restart(timeout=10) # first do some cleanup peoplebase = 'ou=people,%s' % SUFFIX for i in range(10): cn = 'user%d' % i dn = 'cn=%s,%s' % (cn, peoplebase) topology_st.standalone.delete_s(dn) topology_st.standalone.delete_s('cn=group_in0,%s' % peoplebase) topology_st.standalone.delete_s('cn=group_in1,%s' % peoplebase) topology_st.standalone.delete_s('cn=group_out2,%s' % SUFFIX) # create 10 users for i in range(10): cn = 'user%d' % i dn = 'cn=%s,%s' % (cn, peoplebase) log.fatal('Adding user (%s): ' % dn) topology_st.standalone.add_s(Entry((dn, {'objectclass': ['top', 'person'], 'sn': 'user_%s' % cn, 'description': 'add on standalone'}))) # Check that members of group0 (in the scope) have 'memberof group0_dn = 'cn=group_in0,%s' % peoplebase topology_st.standalone.add_s(Entry((group0_dn, {'objectclass': ['top', 'groupofnames'], 'member': [ 'cn=user0,%s' % peoplebase, 'cn=user1,%s' % peoplebase, ], 'description': 'mygroup'}))) # Check the those entries have memberof with group0 for i in range(2): user_dn = 'cn=user%d,%s' % (i, peoplebase) ent = topology_st.standalone.getEntry(user_dn, ldap.SCOPE_BASE, "(objectclass=*)", ['memberof']) assert ent.hasAttr('memberof') found = False for val in ent.getValues('memberof'): topology_st.standalone.log.info("!!!!!!! %s: memberof->%s (vs %s)" % (user_dn, val, group0_dn.encode().lower())) if val.lower() == group0_dn.encode().lower(): found = True break assert found # Create a group1 out of the scope group1_dn = 'cn=group_out1,%s' % SUFFIX topology_st.standalone.add_s(Entry((group1_dn, {'objectclass': ['top', 'groupofnames'], 'member': [ 'cn=user0,%s' % peoplebase, 'cn=user1,%s' % peoplebase, ], 'description': 'mygroup'}))) # Check the those entries have not memberof with group1 for i in range(2): user_dn = 'cn=user%d,%s' % (i, peoplebase) ent = topology_st.standalone.getEntry(user_dn, ldap.SCOPE_BASE, "(objectclass=*)", ['memberof']) assert ent.hasAttr('memberof') found = False for val in ent.getValues('memberof'): topology_st.standalone.log.info("!!!!!!! %s: memberof->%s (vs %s)" % (user_dn, val, group1_dn.encode().lower())) if val.lower() == group1_dn.encode().lower(): found = True break assert not found # move group1 into the scope and check user0 and user1 are memberof group1 topology_st.standalone.rename_s(group1_dn, 'cn=group_in1', newsuperior=peoplebase, delold=0) new_group1_dn = 'cn=group_in1,%s' % peoplebase for i in range(2): user_dn = 'cn=user%d,%s' % (i, peoplebase) ent = topology_st.standalone.getEntry(user_dn, ldap.SCOPE_BASE, "(objectclass=*)", ['memberof']) assert ent.hasAttr('memberof') found = False for val in ent.getValues('memberof'): topology_st.standalone.log.info("!!!!!!! %s: memberof->%s (vs %s)" % (user_dn, val, new_group1_dn.encode().lower())) if val.lower() == new_group1_dn.encode().lower(): found = True break assert found # Create a group2 out of the scope group2_dn = 'cn=group_out2,%s' % SUFFIX topology_st.standalone.add_s(Entry((group2_dn, {'objectclass': ['top', 'groupofnames'], 'member': [ 'cn=user2,%s' % peoplebase, 'cn=user3,%s' % peoplebase, ], 'description': 'mygroup'}))) # Check the those entries have not memberof with group2 for i in (2, 3): user_dn = 'cn=user%d,%s' % (i, peoplebase) ent = topology_st.standalone.getEntry(user_dn, ldap.SCOPE_BASE, "(objectclass=*)", ['memberof']) assert not ent.hasAttr('memberof') # memberof will not add the missing objectclass _disable_auto_oc_memberof(topology_st.standalone) topology_st.standalone.restart(timeout=10) # move group2 into the scope and check it fails try: topology_st.standalone.rename_s(group2_dn, 'cn=group_in2', newsuperior=peoplebase, delold=0) topology_st.standalone.log.info("This is unexpected, modrdn should fail as the member entry have not the appropriate objectclass") assert False except ldap.OBJECT_CLASS_VIOLATION: pass # Check the those entries have not memberof for i in (2, 3): user_dn = 'cn=user%d,%s' % (i, peoplebase) ent = topology_st.standalone.getEntry(user_dn, ldap.SCOPE_BASE, "(objectclass=*)", ['memberof']) topology_st.standalone.log.info("Should assert %s has memberof is %s" % (user_dn, ent.hasAttr('memberof'))) assert not ent.hasAttr('memberof') # Create a group3 in the scope group3_dn = 'cn=group3_in,%s' % peoplebase try: topology_st.standalone.add_s(Entry((group3_dn, {'objectclass': ['top', 'groupofnames'], 'member': [ 'cn=user4,%s' % peoplebase, 'cn=user5,%s' % peoplebase, ], 'description': 'mygroup'}))) topology_st.standalone.log.info("This is unexpected, ADD should fail as the member entry have not the appropriate objectclass") assert False except ldap.OBJECT_CLASS_VIOLATION: pass except ldap.OPERATIONS_ERROR: pass # Check the those entries do not have memberof for i in (4, 5): user_dn = 'cn=user%d,%s' % (i, peoplebase) ent = topology_st.standalone.getEntry(user_dn, ldap.SCOPE_BASE, "(objectclass=*)", ['memberof']) topology_st.standalone.log.info("Should assert %s has memberof is %s" % (user_dn, ent.hasAttr('memberof'))) assert not ent.hasAttr('memberof') if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/memory_leaks/000077500000000000000000000000001421664411400250375ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/memory_leaks/MMR_double_free_test.py000066400000000000000000000131031421664411400314340ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2020 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import pytest from lib389.replica import Replicas, Replica from lib389.tasks import * from lib389.utils import * from lib389.paths import Paths from lib389.topologies import topology_m2 from lib389._constants import (DEFAULT_SUFFIX, DN_CONFIG) from lib389.properties import (REPLICA_PURGE_DELAY, REPLICA_PURGE_INTERVAL) from lib389.idm.user import UserAccounts pytestmark = pytest.mark.tier2 DEBUGGING = os.getenv("DEBUGGING", default=False) if DEBUGGING: logging.getLogger(__name__).setLevel(logging.DEBUG) else: logging.getLogger(__name__).setLevel(logging.INFO) log = logging.getLogger(__name__) ds_paths = Paths() @pytest.fixture(scope="module") def topology_setup(topology_m2): """Configure the topology with purge parameters and enable audit logging - configure replica purge delay and interval on supplier1 and supplier2 - enable audit log on supplier1 and supplier2 - restart supplier1 and supplier2 """ m1 = topology_m2.ms["supplier1"] m2 = topology_m2.ms["supplier2"] replica1 = Replicas(m1).get(DEFAULT_SUFFIX) replica2 = Replicas(m2).get(DEFAULT_SUFFIX) replica1.set('nsDS5ReplicaPurgeDelay','5') replica2.set('nsDS5ReplicaPurgeDelay','5') assert replica1.present('nsDS5ReplicaPurgeDelay') assert replica2.present('nsDS5ReplicaPurgeDelay') replica1.display_attr('nsDS5ReplicaPurgeDelay') replica2.display_attr('nsDS5ReplicaPurgeDelay') replica1.set('nsDS5ReplicaTombstonePurgeInterval', '5') replica2.set('nsDS5ReplicaTombstonePurgeInterval', '5') assert replica1.present('nsDS5ReplicaTombstonePurgeInterval') assert replica2.present('nsDS5ReplicaTombstonePurgeInterval') replica1.display_attr('nsDS5ReplicaTombstonePurgeInterval') replica2.display_attr('nsDS5ReplicaTombstonePurgeInterval') m1.config.set('nsslapd-auditlog-logging-enabled', 'on') m2.config.set('nsslapd-auditlog-logging-enabled', 'on') m1.restart() m2.restart() @pytest.mark.skipif(not ds_paths.asan_enabled, reason="Don't run if ASAN is not enabled") @pytest.mark.ds48226 @pytest.mark.bz1243970 @pytest.mark.bz1262363 def test_MMR_double_free(topology_m2, topology_setup, timeout=5): """Reproduce conditions where a double free occurs and check it does not make the server crash :id: 91580b1c-ad10-49bc-8aed-402edac59f46 :setup: replicated topology - purge delay and purge interval are configured :steps: 1. create an entry on supplier1 2. modify the entry with description add 3. check the entry is correctly replicated on supplier2 4. stop supplier2 5. delete the entry's description on supplier1 6. stop supplier1 7. start supplier2 8. delete the entry's description on supplier2 9. add an entry's description on supplier2 10. wait the purge delay duration 11. add again an entry's description on supplier2 :expectedresults: 1. entry exists on supplier1 2. modification is effective 3. entry exists on supplier2 and modification is effective 4. supplier2 is stopped 5. description is removed from entry on supplier1 6. supplier1 is stopped 7. supplier2 is started - not synchronized with supplier1 8. description is removed from entry on supplier2 (same op should be performed too by replication mecanism) 9. description to entry is added on supplier2 10. Purge delay has expired - changes are erased 11. description to entry is added again on supplier2 """ name = 'test_entry' entry_m1 = UserAccounts(topology_m2.ms["supplier1"], DEFAULT_SUFFIX) entry = entry_m1.create(properties={ 'uid': name, 'sn': name, 'cn': name, 'uidNumber': '1001', 'gidNumber': '1001', 'homeDirectory': '/home/test_entry', 'userPassword': 'test_entry_pwd' }) log.info('First do an update that is replicated') entry.add('description', '5') log.info('Check the update in the replicated entry') entry_m2 = UserAccounts(topology_m2.ms["supplier2"], DEFAULT_SUFFIX) success = 0 for i in range(0, timeout): try: entry_repl = entry_m2.get(name) out = entry_repl.display_attr('description') if len(out) > 0: success = 1 break except: time.sleep(1) assert success log.info('Stop M2 so that it will not receive the next update') topology_m2.ms["supplier2"].stop(10) log.info('Perform a del operation that is not replicated') entry.remove('description', '5') log.info("Stop M1 so that it will keep del '5' that is unknown from supplier2") topology_m2.ms["supplier1"].stop(10) log.info('start M2 to do the next updates') topology_m2.ms["supplier2"].start() log.info("del 'description' by '5'") entry_repl.remove('description', '5') log.info("add 'description' by '5'") entry_repl.add('description', '5') log.info('sleep of purge delay so that the next update will purge the CSN_7') time.sleep(6) log.info("add 'description' by '6' that purge the state info") entry_repl.add('description', '6') log.info('Restart supplier1') topology_m2.ms["supplier1"].start(30) if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/memory_leaks/__init__.py000066400000000000000000000000701421664411400271450ustar00rootroot00000000000000""" :Requirement: 389-ds-base: Test Memory Leaks """ 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/memory_leaks/range_search_test.py000066400000000000000000000043751421664411400311020ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2016 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import pytest from lib389.tasks import * from lib389.utils import * from lib389.paths import Paths from lib389.topologies import topology_st from lib389._constants import * pytestmark = pytest.mark.tier2 logging.getLogger(__name__).setLevel(logging.DEBUG) log = logging.getLogger(__name__) ds_paths = Paths() @pytest.mark.skipif(not ds_paths.asan_enabled, reason="Don't run if ASAN is not enabled") def test_range_search(topology_st): """Add 100 entries, and run a range search. When we encounter an error we still need to disable valgrind before exiting :id: aadccf78-a2a8-48cc-8769-4764c7966189 :setup: Standalone instance, Retro changelog file, Enabled Valgrind if the system doesn't have asan :steps: 1. Add 100 test entries 2. Issue a range search with a changenumber filter 3. There should be no leak :expectedresults: 1. 100 test entries should be added 2. Search should be successful 3. Success """ log.info('Running test_range_search...') topology_st.standalone.plugins.enable(name=PLUGIN_RETRO_CHANGELOG) topology_st.standalone.restart() success = True # Add 100 test entries for idx in range(1, 100): idx = str(idx) USER_DN = 'uid=user' + idx + ',' + DEFAULT_SUFFIX try: topology_st.standalone.add_s(Entry((USER_DN, {'objectclass': "top extensibleObject".split(), 'uid': 'user' + idx}))) except ldap.LDAPError as e: log.fatal('test_range_search: Failed to add test user ' + USER_DN + ': error ' + e.message['desc']) success = False time.sleep(1) # Issue range search assert success entries = topology_st.standalone.search_s(RETROCL_SUFFIX, ldap.SCOPE_SUBTREE, '(&(changenumber>=74)(changenumber<=84))') assert entries if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/migration/000077500000000000000000000000001421664411400243415ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/migration/__init__.py000066400000000000000000000000651421664411400264530ustar00rootroot00000000000000""" :Requirement: 389-ds-base: DataBase Import """389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/migration/export_data_test.py000066400000000000000000000052031421664411400302640ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2019 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import logging import pytest import os from lib389._constants import * from lib389.topologies import topology_st from lib389.idm.user import UserAccounts, TEST_USER_PROPERTIES pytestmark = pytest.mark.tier3 logging.getLogger(__name__).setLevel(logging.DEBUG) log = logging.getLogger(__name__) @pytest.mark.skipif(os.getenv('MIGRATION') is None, reason="This test is meant to execute in specific test environment") def test_export_data_from_source_host(topology_st): """Prepare export file for migration using a single instance of Directory Server :id: 47f97d87-60f7-4f80-a72b-e7daa1de0061 :setup: Standalone :steps: 1. Add a test user with employeeNumber and telephoneNumber 2. Add a test user with escaped DN 3. Create export file 4. Check if values of searched attributes are present in exported file :expectedresults: 1. Success 2. Success 3. Success 4. Success """ standalone = topology_st.standalone output_file = os.path.join(topology_st.standalone.ds_paths.ldif_dir, "migration_export.ldif") log.info("Add a test user") users = UserAccounts(standalone, DEFAULT_SUFFIX) test_user = users.create(properties=TEST_USER_PROPERTIES) test_user.add('employeeNumber', '1000') test_user.add('telephoneNumber', '1234567890') assert test_user.present('employeeNumber', value='1000') assert test_user.present('telephoneNumber', value='1234567890') log.info("Creating user with escaped DN") users.create(properties={ 'uid': '\\#\\,\\+"\\\\>:\\=\\<\\<\\>\\;/', 'cn': 'tuser2', 'sn': 'user', 'uidNumber': '1000', 'gidNumber': '2000', 'homeDirectory': '/home/tuser2', }) log.info("Exporting LDIF offline...") standalone.stop() standalone.db2ldif(bename=DEFAULT_BENAME, suffixes=[DEFAULT_SUFFIX], excludeSuffixes=None, encrypt=None, repl_data=None, outputfile=output_file) standalone.start() log.info("Check that value of attribute is present in the exported file") with open(output_file, 'r') as ldif_file: ldif = ldif_file.read() assert 'employeeNumber: 1000' in ldif assert 'telephoneNumber: 1234567890' in ldif assert 'uid: \\#\\,\\+"\\\\>:\\=\\<\\<\\>\\;/' in ldif if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main(["-s", CURRENT_FILE]) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/migration/import_data_test.py000066400000000000000000000045331421664411400302620ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2019 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import logging import pytest import os from lib389._constants import * from lib389.topologies import topology_st from lib389.idm.user import UserAccounts pytestmark = pytest.mark.tier3 logging.getLogger(__name__).setLevel(logging.DEBUG) log = logging.getLogger(__name__) @pytest.mark.skipif(os.getenv('MIGRATION') is None, reason="This test is meant to execute in specific test environment") def test_import_data_to_target_host(topology_st): """Import file created in export_data_test.py using a single instance of Directory Server :id: 7e896b0c-6838-49c7-8e1d-5e8114f5fb02 :setup: Standalone :steps: 1. Check that attribute values are present in input file 2. Import input file 3. Check imported user data :expectedresults: 1. Success 2. Success 3. Success """ standalone = topology_st.standalone input_file = os.path.join(topology_st.standalone.ds_paths.ldif_dir, "migration_export.ldif") log.info("Check that value of attribute is present in the exported file") with open(input_file, 'r') as ldif_file: ldif = ldif_file.read() assert 'employeeNumber: 1000' in ldif assert 'telephoneNumber: 1234567890' in ldif assert 'uid: \\#\\,\\+"\\\\>:\\=\\<\\<\\>\\;/' in ldif log.info('Stopping the server and running offline import...') standalone.stop() assert standalone.ldif2db(bename=DEFAULT_BENAME, suffixes=[DEFAULT_SUFFIX], encrypt=None, excludeSuffixes=None, import_file=input_file) standalone.start() log.info("Check imported user data") users = UserAccounts(standalone, DEFAULT_SUFFIX) test_user = users.get('testuser') assert test_user.present('employeeNumber', value='1000') assert test_user.present('telephoneNumber', value='1234567890') test_user = users.get('\\#\\,\\+"\\\\>:\\=\\<\\<\\>\\;/') assert test_user.present('cn', value='tuser2') assert test_user.present('uid', value='\\#\\,\\+"\\\\>:\\=\\<\\<\\>\\;/') if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main(["-s", CURRENT_FILE]) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/monitor/000077500000000000000000000000001421664411400240375ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/monitor/__init__.py000066400000000000000000000001031421664411400261420ustar00rootroot00000000000000""" :Requirement: 389-ds-base: Status - Performance Monitor """ 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/monitor/db_locks_monitor_test.py000066400000000000000000000277751421664411400310210ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2021 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import logging import pytest import datetime import subprocess from multiprocessing import Process, Queue from lib389 import pid_from_file from lib389.utils import ldap, os from lib389._constants import DEFAULT_SUFFIX, ReplicaRole from lib389.cli_base import LogCapture from lib389.idm.user import UserAccounts from lib389.idm.organizationalunit import OrganizationalUnits from lib389.tasks import AccessLog from lib389.backend import Backends from lib389.ldclt import Ldclt from lib389.dbgen import dbgen_users from lib389.tasks import ImportTask from lib389.index import Indexes from lib389.plugins import AttributeUniquenessPlugin from lib389.config import BDB_LDBMConfig from lib389.monitor import MonitorLDBM from lib389.topologies import create_topology, _remove_ssca_db from lib389.utils import ds_is_older pytestmark = pytest.mark.tier2 db_locks_monitoring_ack = pytest.mark.skipif(not os.environ.get('DB_LOCKS_MONITORING_ACK', False), reason="DB locks monitoring tests may take hours if the feature is not present or another failure exists. " "Also, the feature requires a big amount of space as we set nsslapd-db-locks to 1300000.") DEBUGGING = os.getenv('DEBUGGING', default=False) if DEBUGGING: logging.getLogger(__name__).setLevel(logging.DEBUG) else: logging.getLogger(__name__).setLevel(logging.INFO) log = logging.getLogger(__name__) def _kill_ns_slapd(inst): pid = str(pid_from_file(inst.ds_paths.pid_file)) cmd = ['kill', '-9', pid] subprocess.Popen(cmd, stdout=subprocess.PIPE) @pytest.fixture(scope="function") def topology_st_fn(request): """Create DS standalone instance for each test case""" topology = create_topology({ReplicaRole.STANDALONE: 1}) def fin(): # Kill the hanging process at the end of test to prevent failures in the following tests if DEBUGGING: [_kill_ns_slapd(inst) for inst in topology] else: [_kill_ns_slapd(inst) for inst in topology] assert _remove_ssca_db(topology) [inst.stop() for inst in topology if inst.exists()] [inst.delete() for inst in topology if inst.exists()] request.addfinalizer(fin) topology.logcap = LogCapture() return topology @pytest.fixture(scope="function") def setup_attruniq_index_be_import(topology_st_fn): """Enable Attribute Uniqueness, disable indexes and import 120000 entries to the default backend """ inst = topology_st_fn.standalone inst.config.loglevel([AccessLog.DEFAULT, AccessLog.INTERNAL], service='access') inst.config.set('nsslapd-plugin-logging', 'on') inst.restart() attruniq = AttributeUniquenessPlugin(inst, dn="cn=attruniq,cn=plugins,cn=config") attruniq.create(properties={'cn': 'attruniq'}) for cn in ['uid', 'cn', 'sn', 'uidNumber', 'gidNumber', 'homeDirectory', 'givenName', 'description']: attruniq.add_unique_attribute(cn) attruniq.add_unique_subtree(DEFAULT_SUFFIX) attruniq.enable_all_subtrees() attruniq.enable() indexes = Indexes(inst) for cn in ['uid', 'cn', 'sn', 'uidNumber', 'gidNumber', 'homeDirectory', 'givenName', 'description']: indexes.ensure_state(properties={ 'cn': cn, 'nsSystemIndex': 'false', 'nsIndexType': 'none'}) bdb_config = BDB_LDBMConfig(inst) bdb_config.replace("nsslapd-db-locks", "130000") inst.restart() ldif_dir = inst.get_ldif_dir() import_ldif = ldif_dir + '/perf_import.ldif' # Valid online import import_task = ImportTask(inst) dbgen_users(inst, 120000, import_ldif, DEFAULT_SUFFIX, entry_name="userNew") import_task.import_suffix_from_ldif(ldiffile=import_ldif, suffix=DEFAULT_SUFFIX) import_task.wait() assert import_task.is_complete() def create_user_wrapper(q, users): try: users.create_test_user() except Exception as ex: q.put(ex) def spawn_worker_thread(function, users, log, timeout, info): log.info(f"Starting the thread - {info}") q = Queue() p = Process(target=function, args=(q,users,)) p.start() log.info(f"Waiting for {timeout} seconds for the thread to finish") p.join(timeout) if p.is_alive(): log.info("Killing the thread as it's still running") p.terminate() p.join() raise RuntimeError(f"Function call was aborted: {info}") result = q.get() if isinstance(result, Exception): raise result else: return result @db_locks_monitoring_ack @pytest.mark.parametrize("lock_threshold", [("70"), ("80"), ("95")]) def test_exhaust_db_locks_basic(topology_st_fn, setup_attruniq_index_be_import, lock_threshold): """Test that when all of the locks are exhausted the instance still working and database is not corrupted :id: 299108cc-04d8-4ddc-b58e-99157fccd643 :customerscenario: True :parametrized: yes :setup: Standalone instance with Attr Uniq plugin and user indexes disabled :steps: 1. Set nsslapd-db-locks to 11000 2. Check that we stop acquiring new locks when the threshold is reached 3. Check that we can regulate a pause interval for DB locks monitoring thread 4. Make sure the feature works for different backends on the same suffix :expectedresults: 1. Success 2. Success 3. Success 4. Success """ inst = topology_st_fn.standalone ADDITIONAL_SUFFIX = 'ou=newpeople,dc=example,dc=com' backends = Backends(inst) backends.create(properties={'nsslapd-suffix': ADDITIONAL_SUFFIX, 'name': ADDITIONAL_SUFFIX[-3:]}) ous = OrganizationalUnits(inst, DEFAULT_SUFFIX) ous.create(properties={'ou': 'newpeople'}) bdb_config = BDB_LDBMConfig(inst) bdb_config.replace("nsslapd-db-locks", "11000") # Restart server inst.restart() for lock_enabled in ["on", "off"]: for lock_pause in ["100", "500", "1000"]: bdb_config.replace("nsslapd-db-locks-monitoring-enabled", lock_enabled) bdb_config.replace("nsslapd-db-locks-monitoring-threshold", lock_threshold) bdb_config.replace("nsslapd-db-locks-monitoring-pause", lock_pause) inst.restart() if lock_enabled == "off": raised_exception = (RuntimeError, ldap.SERVER_DOWN) else: raised_exception = ldap.OPERATIONS_ERROR users = UserAccounts(inst, DEFAULT_SUFFIX) with pytest.raises(raised_exception): spawn_worker_thread(create_user_wrapper, users, log, 30, f"Adding user with monitoring enabled='{lock_enabled}'; " f"threshold='{lock_threshold}'; pause='{lock_pause}'.") # Restart because we already run out of locks and the next unindexed searches will fail eventually if lock_enabled == "off": _kill_ns_slapd(inst) inst.restart() users = UserAccounts(inst, ADDITIONAL_SUFFIX, rdn=None) with pytest.raises(raised_exception): spawn_worker_thread(create_user_wrapper, users, log, 30, f"Adding user with monitoring enabled='{lock_enabled}'; " f"threshold='{lock_threshold}'; pause='{lock_pause}'.") # In case feature is disabled - restart for the clean up if lock_enabled == "off": _kill_ns_slapd(inst) inst.restart() @db_locks_monitoring_ack def test_exhaust_db_locks_big_pause(topology_st_fn, setup_attruniq_index_be_import): """Test that DB lock pause setting increases the wait interval value for the monitoring thread :id: 7d5bf838-5d4e-4ad5-8c03-5716afb84ea6 :customerscenario: True :setup: Standalone instance with Attr Uniq plugin and user indexes disabled :steps: 1. Set nsslapd-db-locks to 20000 while using the default threshold value (95%) 2. Set nsslapd-db-locks-monitoring-pause to 10000 (10 seconds) 3. Make sure that the pause is successfully increased a few times in a row :expectedresults: 1. Success 2. Success 3. Success """ inst = topology_st_fn.standalone bdb_config = BDB_LDBMConfig(inst) bdb_config.replace("nsslapd-db-locks", "20000") lock_pause = bdb_config.get_attr_val_int("nsslapd-db-locks-monitoring-pause") assert lock_pause == 500 lock_pause = "10000" bdb_config.replace("nsslapd-db-locks-monitoring-pause", lock_pause) # Restart server inst.restart() lock_enabled = bdb_config.get_attr_val_utf8_l("nsslapd-db-locks-monitoring-enabled") lock_threshold = bdb_config.get_attr_val_int("nsslapd-db-locks-monitoring-threshold") assert lock_enabled == "on" assert lock_threshold == 90 users = UserAccounts(inst, DEFAULT_SUFFIX) start = datetime.datetime.now() with pytest.raises(ldap.OPERATIONS_ERROR): spawn_worker_thread(create_user_wrapper, users, log, 30, f"Adding user with monitoring enabled='{lock_enabled}'; " f"threshold='{lock_threshold}'; pause='{lock_pause}'. Expect it to 'Work'") end = datetime.datetime.now() time_delta = end - start if time_delta.seconds < 9: raise RuntimeError("nsslapd-db-locks-monitoring-pause attribute doesn't function correctly. " f"Finished the execution in {time_delta.seconds} seconds") # In case something has failed - restart for the clean up inst.restart() @pytest.mark.ds4623 @pytest.mark.bz1812286 @pytest.mark.skipif(ds_is_older("1.4.3.23"), reason="Not implemented") @pytest.mark.parametrize("invalid_value", [("0"), ("1"), ("42"), ("68"), ("69"), ("96"), ("120")]) def test_invalid_threshold_range(topology_st_fn, invalid_value): """Test that setting nsslapd-db-locks-monitoring-threshold to 60 % is rejected :id: e4551de1-8582-4c13-b59d-3d5ec4701457 :customerscenario: True :parametrized: yes :setup: Standalone instance :steps: 1. Set nsslapd-db-locks-monitoring-threshold to 60 % 2. Check if exception message contains info about invalid value range :expectedresults: 1. Exception is raised 2. Success """ inst = topology_st_fn.standalone bdb_config = BDB_LDBMConfig(inst) msg = 'threshold is indicated as a percentage and it must lie in range of 70 and 95' try: bdb_config.replace("nsslapd-db-locks-monitoring-threshold", invalid_value) except ldap.OPERATIONS_ERROR as e: log.info('Got expected error: {}'.format(str(e))) assert msg in str(e) @pytest.mark.ds4623 @pytest.mark.bz1812286 @pytest.mark.skipif(ds_is_older("1.4.3.23"), reason="Not implemented") @pytest.mark.parametrize("locks_invalid", [("0"), ("1"), ("9999"), ("10000")]) def test_invalid_db_locks_value(topology_st_fn, locks_invalid): """Test that setting nsslapd-db-locks to 0 is rejected :id: bbb40279-d622-4f36-a129-c54f963f494a :customerscenario: True :parametrized: yes :setup: Standalone instance :steps: 1. Set nsslapd-db-locks to 0 2. Check if exception message contains info about invalid value :expectedresults: 1. Exception is raised 2. Success """ inst = topology_st_fn.standalone bdb_config = BDB_LDBMConfig(inst) msg = 'Invalid value for nsslapd-db-locks ({}). Must be greater than 10000'.format(locks_invalid) try: bdb_config.replace("nsslapd-db-locks", locks_invalid) except ldap.UNWILLING_TO_PERFORM as e: log.info('Got expected error: {}'.format(str(e))) assert msg in str(e) if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main(["-s", CURRENT_FILE]) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/monitor/monitor_test.py000066400000000000000000000137521421664411400271470ustar00rootroot00000000000000import logging import pytest import os from lib389.monitor import * from lib389.backend import Backends, DatabaseConfig from lib389._constants import * from lib389.topologies import topology_st as topo from lib389._mapped_object import DSLdapObjects pytestmark = pytest.mark.tier1 DEBUGGING = os.getenv("DEBUGGING", default=False) if DEBUGGING: logging.getLogger(__name__).setLevel(logging.DEBUG) else: logging.getLogger(__name__).setLevel(logging.INFO) log = logging.getLogger(__name__) def test_monitor(topo): """This test is to display monitor attributes to check the performace :id: f7c8a815-07cf-4e67-9574-d26a0937d3db :setup: Single instance :steps: 1. Get the cn=monitor connections attributes 2. Print connections attributes 3. Get the cn=monitor version 4. Print cn=monitor version 5. Get the cn=monitor threads attributes 6. Print cn=monitor threads attributes 7. Get cn=monitor backends attributes 8. Print cn=monitor backends attributes 9. Get cn=monitor operations attributes 10. Print cn=monitor operations attributes 11. Get cn=monitor statistics attributes 12. Print cn=monitor statistics attributes :expectedresults: 1. cn=monitor attributes should be fetched and printed successfully. """ #define the monitor object from Monitor class in lib389 monitor = Monitor(topo.standalone) #get monitor connections connections = monitor.get_connections() log.info('connection: {0[0]}, currentconnections: {0[1]}, totalconnections: {0[2]}'.format(connections)) #get monitor version version = monitor.get_version() log.info('version :: %s' %version) #get monitor threads threads = monitor.get_threads() log.info('threads: {0[0]},currentconnectionsatmaxthreads: {0[1]},maxthreadsperconnhits: {0[2]}'.format(threads)) #get monitor backends backend = monitor.get_backends() log.info('nbackends: {0[0]}, backendmonitordn: {0[1]}'.format(backend)) #get monitor operations operations = monitor.get_operations() log.info('opsinitiated: {0[0]}, opscompleted: {0[1]}'.format(operations)) #get monitor stats stats = monitor.get_statistics() log.info('dtablesize: {0[0]},readwaiters: {0[1]},entriessent: {0[2]},bytessent: {0[3]},currenttime: {0[4]},starttime: {0[5]}'.format(stats)) def test_monitor_ldbm(topo): """This test is to check if we are getting the correct monitor entry :id: e62ba369-32f5-4b03-8865-f597a5bb6a70 :setup: Single instance :steps: 1. Get the backend library (bdb, ldbm, etc) 2. Get the database monitor 3. Check for expected attributes in output 4. Check for expected DB library specific attributes :expectedresults: 1. Success 2. Success 3. Success 4. Success """ # Are we using BDB? db_config = DatabaseConfig(topo.standalone) db_lib = db_config.get_db_lib() # Get the database monitor entry monitor = MonitorLDBM(topo.standalone).get_status() # Check that known attributes exist (only NDN cache stats) assert 'normalizeddncachehits' in monitor # Check for library specific attributes if db_lib == 'bdb': assert 'dbcachehits' in monitor assert 'nsslapd-db-configured-locks' in monitor elif db_lib == 'lmdb': pass else: # Unknown - the server would probably fail to start but check it anyway log.fatal(f'Unknown backend library: {db_lib}') assert False def test_monitor_backend(topo): """This test is to check if we are getting the correct backend monitor entry :id: 27b0534f-a18c-4c95-aa2b-936bc1886a7b :setup: Single instance :steps: 1. Get the backend library (bdb, ldbm, etc) 2. Get the backend monitor 3. Check for expected attributes in output 4. Check for expected DB library specific attributes :expectedresults: 1. Success 2. Success 3. Success 4. Success """ # Are we using BDB? db_config = DatabaseConfig(topo.standalone) db_lib = db_config.get_db_lib() # Get the backend monitor be = Backends(topo.standalone).list()[0] monitor = be.get_monitor().get_status() # Check for expected attributes assert 'entrycachehits' in monitor assert 'dncachehits' in monitor # Check for library specific attributes if db_lib == 'bdb': assert 'dbfilename-0' in monitor elif db_lib == 'lmdb': pass else: # Unknown - the server would probably fail to start but check it anyway log.fatal(f'Unknown backend library: {db_lib}') assert False @pytest.mark.bz1843550 @pytest.mark.ds4153 @pytest.mark.bz1903539 @pytest.mark.ds4528 def test_num_subordinates_with_monitor_suffix(topo): """This test is to compare the numSubordinates value on the root entry with the actual number of direct subordinate(s). :id: fdcfe0ac-33c3-4252-bf38-79819ec58a51 :setup: Single instance :steps: 1. Create sample entries and perform a search with basedn as cn=monitor, filter as "(objectclass=*)" and scope as base. 2. Extract the numSubordinates value. 3. Perform another search with basedn as cn=monitor, filter as "(|(objectclass=*)(objectclass=ldapsubentry))" and scope as one. 4. Compare numSubordinates value with the number of sub-entries. :expectedresults: 1. Success 2. Success 3. Success 4. Should be same """ raw_objects = DSLdapObjects(topo.standalone, basedn='cn=monitor') filter1 = raw_objects.filter("(objectclass=*)", scope=0) num_subordinates_val = filter1[0].get_attr_val_int('numSubordinates') filter2 = raw_objects.filter("(|(objectclass=*)(objectclass=ldapsubentry))",scope=1) assert len(filter2) == num_subordinates_val if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/openldap_2_389/000077500000000000000000000000001421664411400247765ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/openldap_2_389/__init__.py000066400000000000000000000000641421664411400271070ustar00rootroot00000000000000""" :Requirement: 389-ds-base: Test OpenLDAP """ 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/openldap_2_389/migrate_hdb_test.py000066400000000000000000000027531421664411400306630ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2021 William Brown # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import pytest import os from lib389.topologies import topology_st from lib389.password_plugins import PBKDF2Plugin from lib389.utils import ds_is_older from lib389.migrate.openldap.config import olConfig from lib389.migrate.openldap.config import olOverlayType from lib389.migrate.plan import Migration pytestmark = pytest.mark.tier1 DATADIR1 = os.path.join(os.path.dirname(__file__), '../../data/openldap_2_389/4539/') @pytest.mark.skipif(ds_is_older('1.4.3'), reason="Not implemented") def test_migrate_openldap_hdb(topology_st): """Attempt a migration with HDB and no overlay configuration folder. :id: 377dbdee-7138-47d9-a518-9e0b0f4d8622 :setup: Data directory with an openldap config with HDB database. :steps: 1. Parse the configuration 2. Execute a full migration plan :expectedresults: 1. Success 2. Success """ inst = topology_st.standalone config_path = os.path.join(DATADIR1, 'slapd.d') config = olConfig(config_path) ldifs = {} migration = Migration(inst, config.schema, config.databases, ldifs) print("==== migration plan ====") print(migration.__unicode__()) print("==== end migration plan ====") migration.execute_plan() # End test, should suceed with no exceptions. 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/openldap_2_389/migrate_test.py000066400000000000000000000102431421664411400300370ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2020 William Brown # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import pytest import os from lib389.topologies import topology_st from lib389.password_plugins import PBKDF2Plugin from lib389.utils import ds_is_older from lib389.migrate.openldap.config import olConfig from lib389.migrate.openldap.config import olOverlayType from lib389.migrate.plan import Migration # from lib389.migrate.plan import * pytestmark = pytest.mark.tier1 DATADIR1 = os.path.join(os.path.dirname(__file__), '../../data/openldap_2_389/1/') @pytest.mark.skipif(ds_is_older('1.4.3'), reason="Not implemented") def test_parse_openldap_slapdd(): """Test parsing an example openldap configuration. We should be able to at least determine the backends, what overlays they have, and some other minimal amount. :id: b0061ab0-fff4-45c6-b6c6-171ca3d2dfbc :setup: Data directory with an openldap config directory. :steps: 1. Parse the openldap configuration :expectedresults: 1. Success """ config_path = os.path.join(DATADIR1, 'slapd.d') config = olConfig(config_path) # Do we have databases? assert len(config.databases) == 2 # Check that we unpacked uid eq,pres,sub correctly. assert len(config.databases[0].index) == 4 assert ('objectClass', 'eq') in config.databases[0].index assert ('uid', 'eq') in config.databases[0].index assert ('uid', 'pres') in config.databases[0].index assert ('uid', 'sub') in config.databases[0].index # Did our schema parse? assert any(['suseModuleConfiguration' in x.names for x in config.schema.classes]) @pytest.mark.skipif(ds_is_older('1.4.3'), reason="Not implemented") def test_migrate_openldap_slapdd(topology_st): """ :id: e9748040-90a0-4d69-bdde-007104f75cc5 :setup: Data directory with an openldap config directory. :steps: 1. Parse the configuration 2. Execute a full migration plan :expectedresults: 1. Success 2. Success """ inst = topology_st.standalone config_path = os.path.join(DATADIR1, 'slapd.d') config = olConfig(config_path) ldifs = { "dc=example,dc=com": os.path.join(DATADIR1, 'example_com.slapcat.ldif'), "dc=example,dc=net": os.path.join(DATADIR1, 'example_net.slapcat.ldif'), } migration = Migration(inst, config.schema, config.databases, ldifs) print("==== migration plan ====") print(migration.__unicode__()) print("==== end migration plan ====") migration.execute_plan() # Check the BE's are there # Check plugins # Check the schema # Check a user can bind @pytest.mark.skipif(ds_is_older('1.4.3'), reason="Not implemented") def test_migrate_openldap_slapdd_skip_elements(topology_st): """ :id: d5e16aeb-6810-423b-b5e0-f89e0596292e :setup: Data directory with an openldap config directory. :steps: 1. Parse the configuration 2. Execute a migration with skipped elements :expectedresults: 1. Success 2. Success """ inst = topology_st.standalone config_path = os.path.join(DATADIR1, 'slapd.d') config = olConfig(config_path) ldifs = { "dc=example,dc=com": os.path.join(DATADIR1, 'example_com.slapcat.ldif'), } # 1.3.6.1.4.1.5322.13.1.1 is namedObject, so check that isn't there migration = Migration(inst, config.schema, config.databases, ldifs, skip_schema_oids=['1.3.6.1.4.1.5322.13.1.1'], skip_overlays=[olOverlayType.UNIQUE], ) print("==== migration plan ====") print(migration.__unicode__()) print("==== end migration plan ====") migration.execute_plan() # Check that the overlay ISNT there # Check the schema that SHOULDNT be there. # # how to convert the config # # # How to slapcat # # openldap_2_389 --config /etc/openldap/slapd.d --ldif "path" # # # --confirm # --ignore-overlay=X # --ignore-schema-oid=X # --no-overlays # --no-passwords # --no-schema # --no-indexes # # # # # Add skip overlay # Add password Strip # check userPasswords 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/openldap_2_389/password_migrate_test.py000066400000000000000000000056041421664411400317660ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2020 William Brown # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import pytest import os from lib389.topologies import topology_st from lib389.utils import ds_is_older from lib389.idm.user import nsUserAccounts from lib389._constants import DEFAULT_SUFFIX pytestmark = pytest.mark.tier1 @pytest.mark.skipif(ds_is_older('1.4.3'), reason="Not implemented") def test_migrate_openldap_password_hash(topology_st): """Test import of an openldap password value into the directory and assert it can bind. :id: e4898e0d-5d18-4765-9249-84bcbf862fde :setup: Standalone Instance :steps: 1. Import a hash 2. Attempt a bind 3. Goto 1 :expectedresults: 1. Success 2. Success 3. Success """ inst = topology_st.standalone inst.config.set('nsslapd-allow-hashed-passwords', 'on') # You generate these with: # slappasswd -s password -o module-load=/usr/lib64/openldap/pw-argon2.so -h {ARGON2} pwds = [ '{CRYPT}ZZKRwXSu3tt8s', '{SSHA}jdALDtX0+MVMkRsX0ilHz0O6Uos95D4s', '{MD5}X03MO1qnZdYdgyfeuILPmQ==', '{SMD5}RnexgcsjdBHMQ1yhB7+sD+a+qDI=', '{SHA}W6ph5Mm5Pz8GgiULbPgzG37mj9g=', '{SHA256}XohImNooBHFR0OVvjcYpJ3NgPQ1qq73WKhHvch0VQtg=', '{SSHA256}covFryM35UrKB3gMYxtYpQYTHbTn5kFphjcNHewfj581SLJwjA9jew==', '{SHA384}qLZLq9CsqRpZvbt3YbQh1PK7OCgNOnW6DyHyvrxFWD1EbFmGYMlM5oDEfRnDB4On', '{SSHA384}kNjTWdmyy2G1IgJF8WrOpq0N//Yc2Ec5TIQYceuiuHQXRXpC1bfnMqyOx0NxrSREjBWDwUpqXjo=', '{SHA512}sQnzu7wkTrgkQZF+0G1hi5AI3Qmzvv0bXgc5THBqi7mAsdd4Xll27ASbRt9fEyavWi6m0QP9B8lThf+rDKy8hg==', '{SSHA512}+7A8kA32q4mCBao4Cbatdyzl5imVwJ62ZAE7UOTP4pfrF90E9R2LabOfJFzx6guaYhTmUEVK2wRKC8bToqspdeTluX2d1BX2', # Need to check -- '{PBKDF2}10000$IlfapjA351LuDSwYC0IQ8Q$saHqQTuYnjJN/tmAndT.8mJt.6w', '{PBKDF2-SHA1}10000$ZBEH6B07rgQpJSikyvMU2w$TAA03a5IYkz1QlPsbJKvUsTqNV', '{PBKDF2-SHA256}10000$henZGfPWw79Cs8ORDeVNrQ$1dTJy73v6n3bnTmTZFghxHXHLsAzKaAy8SksDfZBPIw', '{PBKDF2-SHA512}10000$Je1Uw19Bfv5lArzZ6V3EPw$g4T/1sqBUYWl9o93MVnyQ/8zKGSkPbKaXXsT8WmysXQJhWy8MRP2JFudSL.N9RklQYgDPxPjnfum/F2f/TrppA', # '{ARGON2}$argon2id$v=19$m=65536,t=2,p=1$IyTQMsvzB2JHDiWx8fq7Ew$VhYOA7AL0kbRXI5g2kOyyp8St1epkNj7WZyUY4pAIQQ', ] accounts = nsUserAccounts(inst, basedn=DEFAULT_SUFFIX) account = accounts.create(properties={ 'uid': 'pw_migrate_test_user', 'cn': 'pw_migrate_test_user', 'displayName': 'pw_migrate_test_user', 'uidNumber': '12345', 'gidNumber': '12345', 'homeDirectory': '/var/empty', }) for pwhash in pwds: inst.log.debug(f"Attempting -> {pwhash}") account.set('userPassword', pwhash) nconn = account.bind('password') 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/paged_results/000077500000000000000000000000001421664411400252115ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/paged_results/__init__.py000066400000000000000000000000731421664411400273220ustar00rootroot00000000000000""" :Requirement: 389-ds-base: Simple Paged Results """ 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/paged_results/paged_results_test.py000066400000000000000000001341071421664411400314710ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2020 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import socket from random import sample import pytest from ldap.controls import SimplePagedResultsControl, GetEffectiveRightsControl from lib389.tasks import * from lib389.utils import * from lib389.topologies import topology_st from lib389._constants import DN_LDBM, DN_DM, DEFAULT_SUFFIX from lib389._controls import SSSRequestControl from lib389.idm.user import UserAccount, UserAccounts from lib389.idm.organization import Organization from lib389.idm.organizationalunit import OrganizationalUnit from lib389.backend import Backends from lib389._mapped_object import DSLdapObject pytestmark = pytest.mark.tier1 DEBUGGING = os.getenv('DEBUGGING', False) if DEBUGGING: logging.getLogger(__name__).setLevel(logging.DEBUG) else: logging.getLogger(__name__).setLevel(logging.INFO) log = logging.getLogger(__name__) TEST_USER_PWD = 'simplepaged_test' NEW_SUFFIX_1_NAME = 'test_parent' NEW_SUFFIX_1 = 'o={}'.format(NEW_SUFFIX_1_NAME) NEW_SUFFIX_2_NAME = 'child' NEW_SUFFIX_2 = 'ou={},{}'.format(NEW_SUFFIX_2_NAME, NEW_SUFFIX_1) NEW_BACKEND_1 = 'parent_base' NEW_BACKEND_2 = 'child_base' OLD_HOSTNAME = socket.gethostname() socket.sethostname('localhost') HOSTNAME = socket.gethostname() IP_ADDRESS = socket.gethostbyname(HOSTNAME) OLD_IP_ADDRESS = socket.gethostbyname(OLD_HOSTNAME) @pytest.fixture(scope="module") def create_user(topology_st, request): """User for binding operation""" log.info('Adding user simplepaged_test') new_uri = topology_st.standalone.ldapuri.replace(OLD_HOSTNAME, HOSTNAME) topology_st.standalone.ldapuri = new_uri users = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX) user = users.create(properties={ 'uid': 'simplepaged_test', 'cn': 'simplepaged_test', 'sn': 'simplepaged_test', 'uidNumber': '1234', 'gidNumber': '1234', 'homeDirectory': '/home/simplepaged_test', 'userPassword': TEST_USER_PWD, }) # Now add the ACI so simplepage_test can read the users ... ACI_BODY = ensure_bytes('(targetattr= "uid || sn || dn")(version 3.0; acl "Allow read for user"; allow (read,search,compare) userdn = "ldap:///all";)') topology_st.standalone.modify_s(DEFAULT_SUFFIX, [(ldap.MOD_REPLACE, 'aci', ACI_BODY)]) def fin(): log.info('Deleting user simplepaged_test') user.delete() request.addfinalizer(fin) return user @pytest.fixture(scope="module") def new_suffixes(topology_st): """Add two suffixes with backends, one is a parent of the another """ log.info('Adding suffix:{} and backend: {}'.format(NEW_SUFFIX_1, NEW_BACKEND_1)) bes = Backends(topology_st.standalone) bes.create(properties={ 'cn': 'NEW_BACKEND_1', 'nsslapd-suffix': NEW_SUFFIX_1, }) # Create the root objects with their ACI log.info('Adding ACI to allow our test user to search') ACI_TARGET = '(targetattr != "userPassword || aci")' ACI_ALLOW = '(version 3.0; acl "Enable anonymous access";allow (read, search, compare)' ACI_SUBJECT = '(userdn = "ldap:///anyone");)' ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT o_1 = Organization(topology_st.standalone, NEW_SUFFIX_1) o_1.create(properties={ 'o': NEW_SUFFIX_1_NAME, 'aci': ACI_BODY, }) log.info('Adding suffix:{} and backend: {}'.format(NEW_SUFFIX_2, NEW_BACKEND_2)) be_2 = bes.create(properties={ 'cn': 'NEW_BACKEND_2', 'nsslapd-suffix': NEW_SUFFIX_2, }) # We have to adjust the MT to say that BE_1 is a parent. mt = be_2.get_mapping_tree() mt.set_parent(NEW_SUFFIX_1) ou_2 = OrganizationalUnit(topology_st.standalone, NEW_SUFFIX_2) ou_2.create(properties={ 'ou': NEW_SUFFIX_2_NAME }) def add_users(topology_st, users_num, suffix): """Add users to the default suffix Return the list of added user DNs. """ users_list = [] users = UserAccounts(topology_st.standalone, suffix, rdn=None) log.info('Adding %d users' % users_num) for num in sample(range(1000), users_num): num_ran = int(round(num)) USER_NAME = 'test%05d' % num_ran user = users.create(properties={ 'uid': USER_NAME, 'sn': USER_NAME, 'cn': USER_NAME, 'uidNumber': '%s' % num_ran, 'gidNumber': '%s' % num_ran, 'homeDirectory': '/home/%s' % USER_NAME, 'mail': '%s@redhat.com' % USER_NAME, 'userpassword': 'pass%s' % num_ran, }) users_list.append(user) return users_list def del_users(users_list): """Delete users with DNs from given list""" log.info('Deleting %d users' % len(users_list)) for user in users_list: user.delete() def change_conf_attr(topology_st, suffix, attr_name, attr_value): """Change configuration attribute in the given suffix. Returns previous attribute value. """ entry = DSLdapObject(topology_st.standalone, suffix) attr_value_bck = entry.get_attr_val_bytes(attr_name) log.info('Set %s to %s. Previous value - %s. Modified suffix - %s.' % ( attr_name, attr_value, attr_value_bck, suffix)) if attr_value is None: entry.remove_all(attr_name) else: entry.replace(attr_name, attr_value) return attr_value_bck def paged_search(conn, suffix, controls, search_flt, searchreq_attrlist): """Search at the DEFAULT_SUFFIX with ldap.SCOPE_SUBTREE using Simple Paged Control(should the first item in the list controls. Assert that no cookie left at the end. Return the list with results summarized from all pages. """ pages = 0 pctrls = [] all_results = [] req_pr_ctrl = controls[0] log.info('Running simple paged result search with - ' 'search suffix: {}; filter: {}; attr list {}; ' 'page_size = {}; controls: {}.'.format(suffix, search_flt, searchreq_attrlist, req_pr_ctrl.size, str(controls))) msgid = conn.search_ext(suffix, ldap.SCOPE_SUBTREE, search_flt, searchreq_attrlist, serverctrls=controls) while True: log.info('Getting page %d' % (pages,)) rtype, rdata, rmsgid, rctrls = conn.result3(msgid) log.debug('Data: {}'.format(rdata)) all_results.extend(rdata) pages += 1 pctrls = [ c for c in rctrls if c.controlType == SimplePagedResultsControl.controlType ] if pctrls: if pctrls[0].cookie: # Copy cookie from response control to request control log.debug('Cookie: {}'.format(pctrls[0].cookie)) req_pr_ctrl.cookie = pctrls[0].cookie msgid = conn.search_ext(suffix, ldap.SCOPE_SUBTREE, search_flt, searchreq_attrlist, serverctrls=controls) else: break # No more pages available else: break assert not pctrls[0].cookie return all_results @pytest.mark.parametrize("page_size,users_num", [(6, 5), (5, 5), (5, 25)]) def test_search_success(topology_st, create_user, page_size, users_num): """Verify that search with a simple paged results control returns all entries it should without errors. :id: ddd15b70-64f1-4a85-a793-b24761e50354 :customerscenario: True :parametrized: yes :feature: Simple paged results :setup: Standalone instance, test user for binding, varying number of users for the search base :steps: 1. Bind as test user 2. Search through added users with a simple paged control :expectedresults: 1. Bind should be successful 2. All users should be found """ users_list = add_users(topology_st, users_num, DEFAULT_SUFFIX) search_flt = r'(uid=test*)' searchreq_attrlist = ['dn', 'sn'] log.info('Set user bind %s ' % create_user) conn = create_user.bind(TEST_USER_PWD) req_ctrl = SimplePagedResultsControl(True, size=page_size, cookie='') all_results = paged_search(conn, DEFAULT_SUFFIX, [req_ctrl], search_flt, searchreq_attrlist) log.info('%d results' % len(all_results)) assert len(all_results) == len(users_list) del_users(users_list) @pytest.mark.parametrize("page_size,users_num,suffix,attr_name,attr_value,expected_err", [ (50, 200, 'cn=config,%s' % DN_LDBM, 'nsslapd-idlistscanlimit', '100', ldap.UNWILLING_TO_PERFORM), (5, 15, DN_CONFIG, 'nsslapd-timelimit', '20', ldap.UNAVAILABLE_CRITICAL_EXTENSION), (21, 50, DN_CONFIG, 'nsslapd-sizelimit', '20', ldap.SIZELIMIT_EXCEEDED), (21, 50, DN_CONFIG, 'nsslapd-pagedsizelimit', '5', ldap.SIZELIMIT_EXCEEDED), (5, 50, 'cn=config,%s' % DN_LDBM, 'nsslapd-lookthroughlimit', '20', ldap.ADMINLIMIT_EXCEEDED)]) def test_search_limits_fail(topology_st, create_user, page_size, users_num, suffix, attr_name, attr_value, expected_err): """Verify that search with a simple paged results control throws expected exceptoins when corresponding limits are exceeded. :id: e3067107-bd6d-493d-9989-3e641a9337b0 :customerscenario: True :parametrized: yes :setup: Standalone instance, test user for binding, varying number of users for the search base :steps: 1. Bind as test user 2. Set limit attribute to the value that will cause an expected exception 3. Search through added users with a simple paged control :expectedresults: 1. Bind should be successful 2. Operation should be successful 3. Should fail with appropriate exception """ users_list = add_users(topology_st, users_num, DEFAULT_SUFFIX) attr_value_bck = change_conf_attr(topology_st, suffix, attr_name, attr_value) conf_param_dict = {attr_name: attr_value} search_flt = r'(uid=test*)' searchreq_attrlist = ['dn', 'sn'] controls = [] try: log.info('Set user bind') conn = create_user.bind(TEST_USER_PWD) log.info('Create simple paged results control instance') req_ctrl = SimplePagedResultsControl(True, size=page_size, cookie='') controls.append(req_ctrl) if attr_name == 'nsslapd-idlistscanlimit': sort_ctrl = SSSRequestControl(True, ['sn']) controls.append(sort_ctrl) log.info('Initiate ldapsearch with created control instance') msgid = conn.search_ext(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, search_flt, searchreq_attrlist, serverctrls=controls) time_val = conf_param_dict.get('nsslapd-timelimit') if time_val: time.sleep(int(time_val) + 10) pages = 0 all_results = [] pctrls = [] while True: log.info('Getting page %d' % (pages,)) if pages == 0 and (time_val or attr_name == 'nsslapd-pagesizelimit'): rtype, rdata, rmsgid, rctrls = conn.result3(msgid) else: with pytest.raises(expected_err): rtype, rdata, rmsgid, rctrls = conn.result3(msgid) all_results.extend(rdata) pages += 1 pctrls = [ c for c in rctrls if c.controlType == SimplePagedResultsControl.controlType ] if pctrls: if pctrls[0].cookie: # Copy cookie from response control to request control req_ctrl.cookie = pctrls[0].cookie msgid = conn.search_ext(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, search_flt, searchreq_attrlist, serverctrls=controls) else: break # No more pages available else: break finally: del_users(users_list) change_conf_attr(topology_st, suffix, attr_name, attr_value_bck) def test_search_sort_success(topology_st, create_user): """Verify that search with a simple paged results control and a server side sort control returns all entries it should without errors. :id: 17d8b150-ed43-41e1-b80f-ee9b4ce45155 :customerscenario: True :setup: Standalone instance, test user for binding, varying number of users for the search base :steps: 1. Bind as test user 2. Search through added users with a simple paged control and a server side sort control :expectedresults: 1. Bind should be successful 2. All users should be found and sorted """ users_num = 50 page_size = 5 users_list = add_users(topology_st, users_num, DEFAULT_SUFFIX) search_flt = r'(uid=test*)' searchreq_attrlist = ['dn', 'sn'] try: conn = create_user.bind(TEST_USER_PWD) req_ctrl = SimplePagedResultsControl(True, size=page_size, cookie='') sort_ctrl = SSSRequestControl(True, ['sn']) log.info('Initiate ldapsearch with created control instance') log.info('Collect data with sorting') controls = [req_ctrl, sort_ctrl] results_sorted = paged_search(conn, DEFAULT_SUFFIX, controls, search_flt, searchreq_attrlist) log.info('Substring numbers from user DNs') # r_nums = map(lambda x: int(x[0][8:13]), results_sorted) r_nums = [int(x[0][8:13]) for x in results_sorted] log.info('Assert that list is sorted') assert all(r_nums[i] <= r_nums[i + 1] for i in range(len(r_nums) - 1)) finally: del_users(users_list) def test_search_abandon(topology_st, create_user): """Verify that search with simple paged results control can be abandon :id: 0008538b-7585-4356-839f-268828066978 :customerscenario: True :setup: Standalone instance, test user for binding, varying number of users for the search base :steps: 1. Bind as test user 2. Search through added users with a simple paged control 3. Abandon the search :expectedresults: 1. Bind should be successful 2. Search should be started successfully 3. It should throw an ldap.TIMEOUT exception while trying to get the rest of the search results """ users_num = 10 page_size = 2 users_list = add_users(topology_st, users_num, DEFAULT_SUFFIX) search_flt = r'(uid=test*)' searchreq_attrlist = ['dn', 'sn'] try: log.info('Set user bind') conn = create_user.bind(TEST_USER_PWD) log.info('Create simple paged results control instance') req_ctrl = SimplePagedResultsControl(True, size=page_size, cookie='') controls = [req_ctrl] log.info('Initiate a search with a paged results control') msgid = conn.search_ext(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, search_flt, searchreq_attrlist, serverctrls=controls) log.info('Abandon the search') conn.abandon(msgid) log.info('Expect an ldap.TIMEOUT exception, while trying to get the search results') with pytest.raises(ldap.TIMEOUT): conn.result3(msgid, timeout=5) finally: del_users(users_list) def test_search_with_timelimit(topology_st, create_user): """Verify that after performing multiple simple paged searches to completion, each with a timelimit, it wouldn't fail, if we sleep for a time more than the timelimit. :id: 6cd7234b-136c-419f-bf3e-43aa73592cff :customerscenario: True :setup: Standalone instance, test user for binding, varying number of users for the search base :steps: 1. Bind as test user 2. Search through added users with a simple paged control and timelimit set to 5 3. When the returned cookie is empty, wait 10 seconds 4. Perform steps 2 and 3 three times in a row :expectedresults: 1. Bind should be successful 2. No error should happen 3. 10 seconds should pass 4. No error should happen """ users_num = 100 page_size = 50 timelimit = 5 users_list = add_users(topology_st, users_num, DEFAULT_SUFFIX) search_flt = r'(uid=test*)' searchreq_attrlist = ['dn', 'sn'] try: log.info('Set user bind') conn = create_user.bind(TEST_USER_PWD) log.info('Create simple paged results control instance') req_ctrl = SimplePagedResultsControl(True, size=page_size, cookie='') controls = [req_ctrl] for ii in range(3): log.info('Iteration %d' % ii) msgid = conn.search_ext(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, search_flt, searchreq_attrlist, serverctrls=controls, timeout=timelimit) pages = 0 pctrls = [] while True: log.info('Getting page %d' % (pages,)) rtype, rdata, rmsgid, rctrls = conn.result3(msgid) pages += 1 pctrls = [ c for c in rctrls if c.controlType == SimplePagedResultsControl.controlType ] if pctrls: if pctrls[0].cookie: # Copy cookie from response control to request control req_ctrl.cookie = pctrls[0].cookie msgid = conn.search_ext(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, search_flt, searchreq_attrlist, serverctrls=controls, timeout=timelimit) else: log.info('Done with this search - sleeping %d seconds' % ( timelimit * 2)) time.sleep(timelimit * 2) break # No more pages available else: break finally: del_users(users_list) def test_search_ip_aci(topology_st, create_user): """Verify that after performing multiple simple paged searches to completion on the suffix with DNS or IP based ACI :id: bbfddc46-a8c8-49ae-8c90-7265d05b22a9 :customerscenario: True :parametrized: yes :setup: Standalone instance, test user for binding, varying number of users for the search base :steps: 1. Back up and remove all previous ACI from suffix 2. Add an anonymous ACI for IP check 3. Bind as test user 4. Search through added users with a simple paged control 5. Perform steps 4 three times in a row 6. Return ACI to the initial state 7. Go through all steps once again, but use IP subject dn instead of DNS :expectedresults: 1. Operation should be successful 2. Anonymous ACI should be successfully added 3. Bind should be successful 4. No error happens, all users should be found and sorted 5. Results should remain the same 6. ACI should be successfully returned 7. Results should be the same with ACI with IP subject dn """ users_num = 20 page_size = 5 users_list = add_users(topology_st, users_num, DEFAULT_SUFFIX) search_flt = r'(uid=test*)' searchreq_attrlist = ['dn', 'sn'] log.info("test_search_dns_ip_aci: HOSTNAME: " + HOSTNAME) log.info("test_search_dns_ip_aci: IP_ADDRESS: " + IP_ADDRESS) try: log.info('Back up current suffix ACI') acis_bck = topology_st.standalone.aci.list(DEFAULT_SUFFIX, ldap.SCOPE_BASE) log.info('Add test ACI') bind_rule = 'ip = "{}" or ip = "::1" or ip = "{}"'.format(IP_ADDRESS, OLD_IP_ADDRESS) ACI_TARGET = '(targetattr != "userPassword")' ACI_ALLOW = '(version 3.0;acl "Anonymous access within domain"; allow (read,compare,search)' ACI_SUBJECT = '(userdn = "ldap:///anyone") and (%s);)' % bind_rule ACI_BODY = ensure_bytes(ACI_TARGET + ACI_ALLOW + ACI_SUBJECT) topology_st.standalone.modify_s(DEFAULT_SUFFIX, [(ldap.MOD_REPLACE, 'aci', ACI_BODY)]) time.sleep(.5) log.info('Set user bind') conn = create_user.bind(TEST_USER_PWD, uri=f'ldap://{HOSTNAME}:{topology_st.standalone.port}') log.info('Create simple paged results control instance') req_ctrl = SimplePagedResultsControl(True, size=page_size, cookie='') controls = [req_ctrl] log.info('Initiate three searches with a paged results control') for ii in range(3): log.info('%d search' % (ii + 1)) all_results = paged_search(conn, DEFAULT_SUFFIX, controls, search_flt, searchreq_attrlist) log.info('%d results' % len(all_results)) assert len(all_results) == len(users_list) log.info('If we are here, then no error has happened. We are good.') finally: log.info('Restore ACI') topology_st.standalone.modify_s(DEFAULT_SUFFIX, [(ldap.MOD_DELETE, 'aci', None)]) for aci in acis_bck: topology_st.standalone.modify_s(DEFAULT_SUFFIX, [(ldap.MOD_ADD, 'aci', aci.getRawAci())]) time.sleep(1) del_users(users_list) def test_search_multiple_paging(topology_st, create_user): """Verify that after performing multiple simple paged searches on a single connection without a complition, it wouldn't fail. :id: 628b29a6-2d47-4116-a88d-00b87405ef7f :customerscenario: True :setup: Standalone instance, test user for binding, varying number of users for the search base :steps: 1. Bind as test user 2. Initiate the search with a simple paged control 3. Acquire the returned cookie only one time 4. Perform steps 2 and 3 three times in a row :expectedresults: 1. Bind should be successful 2. Search should be successfully initiated 3. Cookie should be successfully acquired 4. No error happens """ users_num = 20 page_size = 5 users_list = add_users(topology_st, users_num, DEFAULT_SUFFIX) search_flt = r'(uid=test*)' searchreq_attrlist = ['dn', 'sn'] try: log.info('Set user bind') conn = create_user.bind(TEST_USER_PWD) log.info('Create simple paged results control instance') req_ctrl = SimplePagedResultsControl(True, size=page_size, cookie='') controls = [req_ctrl] for ii in range(3): log.info('Iteration %d' % ii) msgid = conn.search_ext(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, search_flt, searchreq_attrlist, serverctrls=controls) rtype, rdata, rmsgid, rctrls = conn.result3(msgid) pctrls = [ c for c in rctrls if c.controlType == SimplePagedResultsControl.controlType ] # Copy cookie from response control to request control req_ctrl.cookie = pctrls[0].cookie msgid = conn.search_ext(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, search_flt, searchreq_attrlist, serverctrls=controls) finally: del_users(users_list) @pytest.mark.parametrize("invalid_cookie", [1000, -1]) def test_search_invalid_cookie(topology_st, create_user, invalid_cookie): """Verify that using invalid cookie while performing search with the simple paged results control throws a TypeError exception :id: 107be12d-4fe4-47fe-ae86-f3e340a56f42 :customerscenario: True :parametrized: yes :setup: Standalone instance, test user for binding, varying number of users for the search base :steps: 1. Bind as test user 2. Initiate the search with a simple paged control 3. Put an invalid cookie (-1, 1000) to the control 4. Continue the search :expectedresults: 1. Bind should be successful 2. Search should be successfully initiated 3. Cookie should be added 4. It should throw a TypeError exception """ users_num = 20 page_size = 5 users_list = add_users(topology_st, users_num, DEFAULT_SUFFIX) search_flt = r'(uid=test*)' searchreq_attrlist = ['dn', 'sn'] try: log.info('Set user bind') conn = create_user.bind(TEST_USER_PWD) log.info('Create simple paged results control instance') req_ctrl = SimplePagedResultsControl(True, size=page_size, cookie='') controls = [req_ctrl] msgid = conn.search_ext(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, search_flt, searchreq_attrlist, serverctrls=controls) rtype, rdata, rmsgid, rctrls = conn.result3(msgid) log.info('Put an invalid cookie (%d) to the control. TypeError is expected' % invalid_cookie) req_ctrl.cookie = invalid_cookie with pytest.raises(TypeError): msgid = conn.search_ext(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, search_flt, searchreq_attrlist, serverctrls=controls) finally: del_users(users_list) def test_search_abandon_with_zero_size(topology_st, create_user): """Verify that search with simple paged results control can be abandon using page_size = 0 :id: d2fd9a10-84e1-4b69-a8a7-36ca1427c171 :customerscenario: True :setup: Standalone instance, test user for binding, varying number of users for the search base :steps: 1. Bind as test user 2. Search through added users with a simple paged control and page_size = 0 :expectedresults: 1. Bind should be successful 2. No cookie should be returned at all """ users_num = 10 page_size = 0 users_list = add_users(topology_st, users_num, DEFAULT_SUFFIX) search_flt = r'(uid=test*)' searchreq_attrlist = ['dn', 'sn'] try: log.info('Set user bind') conn = create_user.bind(TEST_USER_PWD) log.info('Create simple paged results control instance') req_ctrl = SimplePagedResultsControl(True, size=page_size, cookie='') controls = [req_ctrl] msgid = conn.search_ext(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, search_flt, searchreq_attrlist, serverctrls=controls) rtype, rdata, rmsgid, rctrls = conn.result3(msgid) pctrls = [ c for c in rctrls if c.controlType == SimplePagedResultsControl.controlType ] assert not pctrls[0].cookie finally: del_users(users_list) def test_search_pagedsizelimit_success(topology_st, create_user): """Verify that search with a simple paged results control returns all entries it should without errors while valid value set to nsslapd-pagedsizelimit. :id: 88193f10-f6f0-42f5-ae9c-ff34b8f9ee8c :customerscenario: True :setup: Standalone instance, test user for binding, 10 users for the search base :steps: 1. Set nsslapd-pagedsizelimit: 20 2. Bind as test user 3. Search through added users with a simple paged control using page_size = 10 :expectedresults: 1. nsslapd-pagedsizelimit should be successfully set 2. Bind should be successful 3. All users should be found """ users_num = 10 page_size = 10 attr_name = 'nsslapd-pagedsizelimit' attr_value = '20' attr_value_bck = change_conf_attr(topology_st, DN_CONFIG, attr_name, attr_value) users_list = add_users(topology_st, users_num, DEFAULT_SUFFIX) search_flt = r'(uid=test*)' searchreq_attrlist = ['dn', 'sn'] try: log.info('Set user bind') conn = create_user.bind(TEST_USER_PWD) req_ctrl = SimplePagedResultsControl(True, size=page_size, cookie='') controls = [req_ctrl] all_results = paged_search(conn, DEFAULT_SUFFIX, controls, search_flt, searchreq_attrlist) log.info('%d results' % len(all_results)) assert len(all_results) == len(users_list) finally: del_users(users_list) change_conf_attr(topology_st, DN_CONFIG, 'nsslapd-pagedsizelimit', attr_value_bck) @pytest.mark.parametrize('conf_attr,user_attr,expected_rs', (('5', '15', 'PASS'), ('15', '5', ldap.SIZELIMIT_EXCEEDED))) def test_search_nspagedsizelimit(topology_st, create_user, conf_attr, user_attr, expected_rs): """Verify that nsPagedSizeLimit attribute overrides nsslapd-pagedsizelimit while performing search with the simple paged results control. :id: b08c6ad2-ba28-447a-9f04-5377c3661d0d :customerscenario: True :parametrized: yes :setup: Standalone instance, test user for binding, 10 users for the search base :steps: 1. Set nsslapd-pagedsizelimit: 5 2. Set nsPagedSizeLimit: 15 3. Bind as test user 4. Search through added users with a simple paged control using page_size = 10 5. Bind as Directory Manager 6. Restore all values 7. Set nsslapd-pagedsizelimit: 15 8. Set nsPagedSizeLimit: 5 9. Bind as test user 10. Search through added users with a simple paged control using page_size = 10 :expectedresults: 1. nsslapd-pagedsizelimit should be successfully set 2. nsPagedSizeLimit should be successfully set 3. Bind should be successful 4. No error happens, all users should be found 5. Bind should be successful 6. All values should be restored 7. nsslapd-pagedsizelimit should be successfully set 8. nsPagedSizeLimit should be successfully set 9. Bind should be successful 10. It should throw SIZELIMIT_EXCEEDED exception """ users_num = 10 page_size = 10 users_list = add_users(topology_st, users_num, DEFAULT_SUFFIX) search_flt = r'(uid=test*)' searchreq_attrlist = ['dn', 'sn'] conf_attr_bck = change_conf_attr(topology_st, DN_CONFIG, 'nsslapd-pagedsizelimit', conf_attr) user_attr_bck = change_conf_attr(topology_st, create_user.dn, 'nsPagedSizeLimit', user_attr) try: log.info('Set user bind') conn = create_user.bind(TEST_USER_PWD) req_ctrl = SimplePagedResultsControl(True, size=page_size, cookie='') controls = [req_ctrl] if expected_rs == ldap.SIZELIMIT_EXCEEDED: log.info('Expect to fail with SIZELIMIT_EXCEEDED') with pytest.raises(expected_rs): all_results = paged_search(conn, DEFAULT_SUFFIX, controls, search_flt, searchreq_attrlist) elif expected_rs == 'PASS': log.info('Expect to pass') all_results = paged_search(conn, DEFAULT_SUFFIX, controls, search_flt, searchreq_attrlist) log.info('%d results' % len(all_results)) assert len(all_results) == len(users_list) finally: del_users(users_list) change_conf_attr(topology_st, DN_CONFIG, 'nsslapd-pagedsizelimit', conf_attr_bck) change_conf_attr(topology_st, create_user.dn, 'nsPagedSizeLimit', user_attr_bck) @pytest.mark.parametrize('conf_attr_values,expected_rs', ((('5000', '100', '100'), ldap.ADMINLIMIT_EXCEEDED), (('5000', '120', '122'), 'PASS'))) def test_search_paged_limits(topology_st, create_user, conf_attr_values, expected_rs): """Verify that nsslapd-idlistscanlimit and nsslapd-lookthroughlimit can limit the administrator search abilities. :id: e0f8b916-7276-4bd3-9e73-8696a4468811 :customerscenario: True :parametrized: yes :setup: Standalone instance, test user for binding, 10 users for the search base :steps: 1. Set nsslapd-sizelimit and nsslapd-pagedsizelimit to 5000 2. Set nsslapd-idlistscanlimit: 120 3. Set nsslapd-lookthroughlimit: 122 4. Bind as test user 5. Search through added users with a simple paged control using page_size = 10 6. Bind as Directory Manager 7. Set nsslapd-idlistscanlimit: 100 8. Set nsslapd-lookthroughlimit: 100 9. Bind as test user 10. Search through added users with a simple paged control using page_size = 10 :expectedresults: 1. nsslapd-sizelimit and nsslapd-pagedsizelimit should be successfully set 2. nsslapd-idlistscanlimit should be successfully set 3. nsslapd-lookthroughlimit should be successfully set 4. Bind should be successful 5. No error happens, all users should be found 6. Bind should be successful 7. nsslapd-idlistscanlimit should be successfully set 8. nsslapd-lookthroughlimit should be successfully set 9. Bind should be successful 10. It should throw ADMINLIMIT_EXCEEDED exception """ users_num = 101 page_size = 10 users_list = add_users(topology_st, users_num, DEFAULT_SUFFIX) search_flt = r'(uid=test*)' searchreq_attrlist = ['dn', 'sn'] size_attr_bck = change_conf_attr(topology_st, DN_CONFIG, 'nsslapd-sizelimit', conf_attr_values[0]) pagedsize_attr_bck = change_conf_attr(topology_st, DN_CONFIG, 'nsslapd-pagedsizelimit', conf_attr_values[0]) idlistscan_attr_bck = change_conf_attr(topology_st, 'cn=config,%s' % DN_LDBM, 'nsslapd-idlistscanlimit', conf_attr_values[1]) lookthrough_attr_bck = change_conf_attr(topology_st, 'cn=config,%s' % DN_LDBM, 'nsslapd-lookthroughlimit', conf_attr_values[2]) try: log.info('Set user bind') conn = create_user.bind(TEST_USER_PWD) req_ctrl = SimplePagedResultsControl(True, size=page_size, cookie='') controls = [req_ctrl] if expected_rs == ldap.ADMINLIMIT_EXCEEDED: log.info('Expect to fail with ADMINLIMIT_EXCEEDED') with pytest.raises(expected_rs): all_results = paged_search(conn, DEFAULT_SUFFIX, controls, search_flt, searchreq_attrlist) elif expected_rs == 'PASS': log.info('Expect to pass') all_results = paged_search(conn, DEFAULT_SUFFIX, controls, search_flt, searchreq_attrlist) log.info('%d results' % len(all_results)) assert len(all_results) == len(users_list) finally: del_users(users_list) change_conf_attr(topology_st, DN_CONFIG, 'nsslapd-sizelimit', size_attr_bck) change_conf_attr(topology_st, DN_CONFIG, 'nsslapd-pagedsizelimit', pagedsize_attr_bck) change_conf_attr(topology_st, 'cn=config,%s' % DN_LDBM, 'nsslapd-lookthroughlimit', lookthrough_attr_bck) change_conf_attr(topology_st, 'cn=config,%s' % DN_LDBM, 'nsslapd-idlistscanlimit', idlistscan_attr_bck) @pytest.mark.parametrize('conf_attr_values,expected_rs', ((('1000', '100', '100'), ldap.ADMINLIMIT_EXCEEDED), (('1000', '120', '122'), 'PASS'))) def test_search_paged_user_limits(topology_st, create_user, conf_attr_values, expected_rs): """Verify that nsPagedIDListScanLimit and nsPagedLookthroughLimit override nsslapd-idlistscanlimit and nsslapd-lookthroughlimit while performing search with the simple paged results control. :id: 69e393e9-1ab8-4f4e-b4a1-06ca63dc7b1b :customerscenario: True :parametrized: yes :setup: Standalone instance, test user for binding, 10 users for the search base :steps: 1. Set nsslapd-idlistscanlimit: 1000 2. Set nsslapd-lookthroughlimit: 1000 3. Set nsPagedIDListScanLimit: 120 4. Set nsPagedLookthroughLimit: 122 5. Bind as test user 6. Search through added users with a simple paged control using page_size = 10 7. Bind as Directory Manager 8. Set nsPagedIDListScanLimit: 100 9. Set nsPagedLookthroughLimit: 100 10. Bind as test user 11. Search through added users with a simple paged control using page_size = 10 :expectedresults: 1. nsslapd-idlistscanlimit should be successfully set 2. nsslapd-lookthroughlimit should be successfully set 3. nsPagedIDListScanLimit should be successfully set 4. nsPagedLookthroughLimit should be successfully set 5. Bind should be successful 6. No error happens, all users should be found 7. Bind should be successful 8. nsPagedIDListScanLimit should be successfully set 9. nsPagedLookthroughLimit should be successfully set 10. Bind should be successful 11. It should throw ADMINLIMIT_EXCEEDED exception """ users_num = 101 page_size = 10 users_list = add_users(topology_st, users_num, DEFAULT_SUFFIX) search_flt = r'(uid=test*)' searchreq_attrlist = ['dn', 'sn'] lookthrough_attr_bck = change_conf_attr(topology_st, 'cn=config,%s' % DN_LDBM, 'nsslapd-lookthroughlimit', conf_attr_values[0]) idlistscan_attr_bck = change_conf_attr(topology_st, 'cn=config,%s' % DN_LDBM, 'nsslapd-idlistscanlimit', conf_attr_values[0]) user_idlistscan_attr_bck = change_conf_attr(topology_st, create_user.dn, 'nsPagedIDListScanLimit', conf_attr_values[1]) user_lookthrough_attr_bck = change_conf_attr(topology_st, create_user.dn, 'nsPagedLookthroughLimit', conf_attr_values[2]) try: log.info('Set user bind') conn = create_user.bind(TEST_USER_PWD) req_ctrl = SimplePagedResultsControl(True, size=page_size, cookie='') controls = [req_ctrl] if expected_rs == ldap.ADMINLIMIT_EXCEEDED: log.info('Expect to fail with ADMINLIMIT_EXCEEDED') with pytest.raises(expected_rs): all_results = paged_search(conn, DEFAULT_SUFFIX, controls, search_flt, searchreq_attrlist) elif expected_rs == 'PASS': log.info('Expect to pass') all_results = paged_search(conn, DEFAULT_SUFFIX, controls, search_flt, searchreq_attrlist) log.info('%d results' % len(all_results)) assert len(all_results) == len(users_list) finally: del_users(users_list) change_conf_attr(topology_st, 'cn=config,%s' % DN_LDBM, 'nsslapd-lookthroughlimit', lookthrough_attr_bck) change_conf_attr(topology_st, 'cn=config,%s' % DN_LDBM, 'nsslapd-idlistscanlimit', idlistscan_attr_bck) change_conf_attr(topology_st, create_user.dn, 'nsPagedIDListScanLimit', user_idlistscan_attr_bck) change_conf_attr(topology_st, create_user.dn, 'nsPagedLookthroughLimit', user_lookthrough_attr_bck) def test_ger_basic(topology_st, create_user): """Verify that search with a simple paged results control and get effective rights control returns all entries it should without errors. :id: 7b0bdfc7-a2f2-4c1a-bcab-f1eb8b330d45 :customerscenario: True :setup: Standalone instance, test user for binding, varying number of users for the search base :steps: 1. Search through added users with a simple paged control and get effective rights control :expectedresults: 1. All users should be found, every found entry should have an 'attributeLevelRights' returned """ users_list = add_users(topology_st, 20, DEFAULT_SUFFIX) search_flt = r'(uid=test*)' searchreq_attrlist = ['dn', 'sn'] page_size = 4 try: spr_ctrl = SimplePagedResultsControl(True, size=page_size, cookie='') ger_ctrl = GetEffectiveRightsControl(True, ensure_bytes("dn: " + DN_DM)) all_results = paged_search(topology_st.standalone, DEFAULT_SUFFIX, [spr_ctrl, ger_ctrl], search_flt, searchreq_attrlist) log.info('{} results'.format(len(all_results))) assert len(all_results) == len(users_list) log.info('Check for attributeLevelRights') assert all(attrs['attributeLevelRights'][0] for dn, attrs in all_results) finally: log.info('Remove added users') del_users(users_list) def test_multi_suffix_search(topology_st, create_user, new_suffixes): """Verify that page result search returns empty cookie if there is no returned entry. :id: 9712345b-9e38-4df6-8794-05f12c457d39 :customerscenario: True :setup: Standalone instance, test user for binding, two suffixes with backends, one is inserted into another, 10 users for the search base within each suffix :steps: 1. Bind as test user 2. Search through all 20 added users with a simple paged control using page_size = 4 3. Wait some time for the logs to be updated 4. Check access log :expectedresults: 1. Bind should be successful 2. All users should be found 3. Some time should pass 4. The access log should contain the pr_cookie for each page request and it should be equal 0, except the last one should be equal -1 """ search_flt = r'(uid=test*)' searchreq_attrlist = ['dn', 'sn'] page_size = 4 users_num = 20 log.info('Clear the access log') topology_st.standalone.deleteAccessLogs() users_list_1 = add_users(topology_st, 10, NEW_SUFFIX_1) users_list_2 = add_users(topology_st, 10, NEW_SUFFIX_2) try: req_ctrl = SimplePagedResultsControl(True, size=page_size, cookie='') all_results = paged_search(topology_st.standalone, NEW_SUFFIX_1, [req_ctrl], search_flt, searchreq_attrlist) log.info('{} results'.format(len(all_results))) assert len(all_results) == users_num log.info('Restart the server to flush the logs') topology_st.standalone.restart(timeout=10) access_log_lines = topology_st.standalone.ds_access_log.match('.*pr_cookie=.*') pr_cookie_list = ([line.rsplit('=', 1)[-1] for line in access_log_lines]) pr_cookie_list = [int(pr_cookie) for pr_cookie in pr_cookie_list] log.info('Assert that last pr_cookie == -1 and others pr_cookie == 0') pr_cookie_zeros = list(pr_cookie == 0 for pr_cookie in pr_cookie_list[0:-1]) assert all(pr_cookie_zeros) assert pr_cookie_list[-1] == -1 finally: log.info('Remove added users') del_users(users_list_1) del_users(users_list_2) @pytest.mark.parametrize('conf_attr_value', (None, '-1', '1000')) def test_maxsimplepaged_per_conn_success(topology_st, create_user, conf_attr_value): """Verify that nsslapd-maxsimplepaged-per-conn acts according design :id: 192e2f25-04ee-4ff9-9340-d875dcbe8011 :customerscenario: True :parametrized: yes :setup: Standalone instance, test user for binding, 20 users for the search base :steps: 1. Set nsslapd-maxsimplepaged-per-conn in cn=config to the next values: no value, -1, some positive 2. Search through the added users with a simple paged control using page size = 4 :expectedresults: 1. nsslapd-maxsimplepaged-per-conn should be successfully set 2. If no value or value = -1 - all users should be found, default behaviour; If the value is positive, the value is the max simple paged results requests per connection. """ users_list = add_users(topology_st, 20, DEFAULT_SUFFIX) search_flt = r'(uid=test*)' searchreq_attrlist = ['dn', 'sn'] page_size = 4 if conf_attr_value: max_per_con_bck = change_conf_attr(topology_st, DN_CONFIG, 'nsslapd-maxsimplepaged-per-conn', conf_attr_value) try: log.info('Set user bind') conn = create_user.bind(TEST_USER_PWD) req_ctrl = SimplePagedResultsControl(True, size=page_size, cookie='') all_results = paged_search(conn, DEFAULT_SUFFIX, [req_ctrl], search_flt, searchreq_attrlist) log.info('{} results'.format(len(all_results))) assert len(all_results) == len(users_list) finally: log.info('Remove added users') del_users(users_list) if conf_attr_value: change_conf_attr(topology_st, DN_CONFIG, 'nsslapd-maxsimplepaged-per-conn', max_per_con_bck) @pytest.mark.parametrize('conf_attr_value', ('0', '1')) def test_maxsimplepaged_per_conn_failure(topology_st, create_user, conf_attr_value): """Verify that nsslapd-maxsimplepaged-per-conn acts according design :id: eb609e63-2829-4331-8439-a35f99694efa :customerscenario: True :parametrized: yes :setup: Standalone instance, test user for binding, 20 users for the search base :steps: 1. Set nsslapd-maxsimplepaged-per-conn = 0 in cn=config 2. Search through the added users with a simple paged control using page size = 4 3. Set nsslapd-maxsimplepaged-per-conn = 1 in cn=config 4. Search through the added users with a simple paged control using page size = 4 two times, but don't close the connections :expectedresults: 1. nsslapd-maxsimplepaged-per-conn should be successfully set 2. UNWILLING_TO_PERFORM should be thrown 3. Bind should be successful 4. UNWILLING_TO_PERFORM should be thrown """ users_list = add_users(topology_st, 20, DEFAULT_SUFFIX) search_flt = r'(uid=test*)' searchreq_attrlist = ['dn', 'sn'] page_size = 4 max_per_con_bck = change_conf_attr(topology_st, DN_CONFIG, 'nsslapd-maxsimplepaged-per-conn', conf_attr_value) try: log.info('Set user bind') conn = create_user.bind(TEST_USER_PWD) log.info('Create simple paged results control instance') req_ctrl = SimplePagedResultsControl(True, size=page_size, cookie='') with pytest.raises(ldap.UNWILLING_TO_PERFORM): msgid = conn.search_ext(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, search_flt, searchreq_attrlist, serverctrls=[req_ctrl]) rtype, rdata, rmsgid, rctrls = conn.result3(msgid) # If nsslapd-maxsimplepaged-per-conn = 1, # it should pass this point, but failed on the next search assert conf_attr_value == '1' msgid = conn.search_ext(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, search_flt, searchreq_attrlist, serverctrls=[req_ctrl]) rtype, rdata, rmsgid, rctrls = conn.result3(msgid) finally: log.info('Remove added users') del_users(users_list) change_conf_attr(topology_st, DN_CONFIG, 'nsslapd-maxsimplepaged-per-conn', max_per_con_bck) if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/password/000077500000000000000000000000001421664411400242125ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/password/__init__.py000066400000000000000000000000661421664411400263250ustar00rootroot00000000000000""" :Requirement: 389-ds-base: Password Policy """ 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/password/password_policy_test.py000066400000000000000000001644251421664411400310600ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2020 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- """ This test script will test password policy. """ import os import pytest import time from lib389.topologies import topology_st as topo from lib389.idm.domain import Domain from lib389.idm.organizationalunit import OrganizationalUnits from lib389.idm.user import UserAccounts, UserAccount from lib389._constants import DEFAULT_SUFFIX from lib389.pwpolicy import PwPolicyManager import ldap pytestmark = pytest.mark.tier1 def create_user(topo, uid, cn, sn, givenname, userpasseord, gid, ou): """ Will create user """ user = UserAccounts(topo.standalone, DEFAULT_SUFFIX, rdn=ou).create(properties={ 'uid': uid, 'cn': cn, 'sn': sn, 'givenname': givenname, 'mail': f'{uid}@example.com', 'userpassword': userpasseord, 'homeDirectory': f'/home/{uid}', 'uidNumber': gid, 'gidNumber': gid }) return user @pytest.fixture(scope="module") def _policy_setup(topo): """ Will do pretest setup. """ # Add self user modification and anonymous aci USER_SELF_MOD_ACI = '(targetattr="userpassword")(version 3.0; acl "pwp test"; allow (all) userdn="ldap:///self";)' ANON_ACI = "(targetattr=\"*\")(version 3.0; acl \"Anonymous Read access\"; allow (read,search,compare) userdn = \"ldap:///anyone\";)" suffix = Domain(topo.standalone, DEFAULT_SUFFIX) suffix.add('aci', USER_SELF_MOD_ACI) suffix.add('aci', ANON_ACI) for suffix, ou in [(DEFAULT_SUFFIX, 'dirsec'), (f'ou=people,{DEFAULT_SUFFIX}', 'others')]: OrganizationalUnits(topo.standalone, suffix).create(properties={ 'ou': ou }) for uid, cn, sn, givenname, userpasseord, gid, ou in [ ('dbyers', 'Danny Byers', 'Byers', 'Danny', 'dby3rs1', '10001', 'ou=dirsec'), ('orla', 'Orla Hegarty', 'Hegarty', 'Orla', '000rla1', '10002', 'ou=dirsec'), ('joe', 'Joe Rath', 'Rath', 'Joe', '00j0e1', '10003', 'ou=people'), ('jack', 'Jack Rath', 'Rath', 'Jack', '00j6ck1', '10004', 'ou=people'), ('fred', 'Fred Byers', 'Byers', 'Fred', '00fr3d1', '10005', None), ('deep', 'Deep Blue', 'Blue', 'Deep', '00de3p1', '10006', 'ou=others, ou=people'), ('accntlusr', 'AccountControl User', 'ControlUser', 'Account', 'AcControl123', '10007', 'ou=dirsec'), ('nocntlusr', 'NoAccountControl User', 'ControlUser', 'NoAccount', 'NoControl123', '10008', 'ou=dirsec') ]: create_user(topo, uid, cn, sn, givenname, userpasseord, gid, ou) policy_props = {'passwordexp': 'off', 'passwordchange': 'off', 'passwordmustchange': 'off', 'passwordchecksyntax': 'off', 'passwordinhistory': '6', 'passwordhistory': 'off', 'passwordlockout': 'off', 'passwordlockoutduration': '3600', 'passwordmaxage': '8640000', 'passwordmaxfailure': '3', 'passwordminage': '0', 'passwordminlength': '6', 'passwordresetfailurecount': '600', 'passwordunlock': 'on', 'passwordStorageScheme': 'CLEAR', 'passwordwarning': '86400' } pwp = PwPolicyManager(topo.standalone) for dn_dn in (f'uid=orla,ou=dirsec,{DEFAULT_SUFFIX}', f'uid=joe,ou=People,{DEFAULT_SUFFIX}'): pwp.create_user_policy(dn_dn, policy_props) pwp.create_subtree_policy(f'ou=People,{DEFAULT_SUFFIX}', policy_props) def change_password(topo, user_password_new_pass_list): """ Will change password with self binding. """ for user, password, new_pass in user_password_new_pass_list: real_user = UserAccount(topo.standalone, f'{user},{DEFAULT_SUFFIX}') conn = real_user.bind(password) UserAccount(conn, real_user.dn).replace('userpassword', new_pass) def change_password_ultra_new(topo, user_password_new_pass_list): """ Will change password with self binding. """ for user, password, new_pass, ultra_new_pass in user_password_new_pass_list: real_user = UserAccount(topo.standalone, f'{user},{DEFAULT_SUFFIX}') conn = real_user.bind(password) UserAccount(conn, real_user.dn).replace('userpassword', new_pass) conn = real_user.bind(new_pass) UserAccount(conn, real_user.dn).replace('userpassword', ultra_new_pass) def change_password_with_admin(topo, user_password_new_pass_list): """ Will change password by root. """ for user, password in user_password_new_pass_list: UserAccount(topo.standalone, f'{user},{DEFAULT_SUFFIX}').replace('userpassword', password) def _do_transaction_for_pwp(topo, attr1, attr2): """ Will change pwp parameters """ pwp = PwPolicyManager(topo.standalone) orl = pwp.get_pwpolicy_entry(f'uid=orla,ou=dirsec,{DEFAULT_SUFFIX}') joe = pwp.get_pwpolicy_entry(f'uid=joe,ou=people,{DEFAULT_SUFFIX}') people = pwp.get_pwpolicy_entry(f'ou=people,{DEFAULT_SUFFIX}') for instance in [orl, joe, people]: instance.replace(attr1, attr2) for instance in [orl, joe, people]: assert instance.get_attr_val_utf8(attr1) == attr2 @pytest.fixture(scope="function") def _fixture_for_password_change(request, topo): pwp = PwPolicyManager(topo.standalone) orl = pwp.get_pwpolicy_entry(f'uid=orla,ou=dirsec,{DEFAULT_SUFFIX}') for attribute in ('passwordMustChange', 'passwordmustchange'): orl.replace(attribute, 'off') assert orl.get_attr_val_utf8(attribute) == 'off' def final_task(): people = pwp.get_pwpolicy_entry(f'ou=people,{DEFAULT_SUFFIX}') people.replace('passwordchange', 'on') assert people.get_attr_val_utf8('passwordchange') == 'on' # Administrator Reseting to original password change_password_with_admin(topo, [ ('uid=joe,ou=people', '00j0e1'), ('uid=fred', '00fr3d1'), ('uid=jack,ou=people', '00j6ck1'), ('uid=deep,ou=others,ou=people', '00de3p1'), ('uid=orla,ou=dirsec', '000rla1'), ('uid=dbyers,ou=dirsec', 'Anuj') ]) request.addfinalizer(final_task) def test_password_change_section(topo, _policy_setup, _fixture_for_password_change): """Password Change Section. :id: 5d018c08-9388-11ea-8394-8c16451d917b :setup: Standalone :steps: 1. Confirm that user is not been affected by fine grained password (As its is not belong to any password policy) 2. Should be able to change password(As its is not belong to any password policy) 3. Try to change password for user even though pw policy is set to no. Should get error message: unwilling to Perform ! 4. Set Password change to May Change Password. 5. Administrator Reseting to original password ! 6. Attempt to Modify password to orla2 with an invalid first pw with error message. 7. Changing current password from orla1 to orla2 8. Changing current password from orla2 to orla1. 9. Set Password change to Must Not Change After Reset 10 Change password for joe,jack,deep even though pw policy is set to no with error message. 11. Fred can change.(Fred is not belong to any pw policy) 12. Changing pw policy to may change pw 13. Set Password change to May Change Password 14. Administrator Reseting to original password 15. Try to change password with invalid credentials. Should see error message. 16. Changing current password for joe and fed. 17. Changing current password for jack and deep with error message.(passwordchange not on) 18. Changing pw policy to may change pw 19. Set Password change to May Change Password 20. Administrator Reseting to original password 21. Try to change password with invalid credentials. Should see error message. 22. Changing current password 23. Set Password change to Must Not Change After Reset 24. Searching for passwordchange: Off 25. Administrator Reseting to original password 26. Try to change password with invalid credentials. Should see error message 27. Changing current password (('passwordchange', 'off') for joe) :expected results: 1. Success(As its is not belong to any password policy) 2. Success 3. Fail(pw policy is set to no) 4. Success 5. Success 6. Fail(invalid first pw) 7. Success 8. Success 9. Success 10. Fail(pw policy is set to no) 11. Success((Fred is not belong to any pw policy)) 12. Success 13. Success 14. Success 15. Fail(invalid credentials) 16. Success((passwordchange on)) 17. Fail(passwordchange not on) 18. Success 19. Success 20. Success 21. Fail(invalid credentials) 22. Success 23. Success 24. Success 25. Success 26. Fail(invalid credentials) 27. Success """ # Confirm that uid=dbyers is not been affected by fine grained password dbyers = UserAccount(topo.standalone, f'uid=dbyers,ou=dirsec,{DEFAULT_SUFFIX}') conn = dbyers.bind('dby3rs1') dbyers_conn = UserAccount(conn, f'uid=dbyers,ou=dirsec,{DEFAULT_SUFFIX}') # Should be able to change password(As its is not belong to any password policy) dbyers_conn.replace('userpassword', "Anuj") # Try to change password for uid=orla even though pw policy is set to no. # Should get error message: unwilling to Perform ! orla = UserAccount(topo.standalone, f'uid=orla,ou=dirsec,{DEFAULT_SUFFIX}') conn = orla.bind('000rla1') orla_conn = UserAccount(conn, f'uid=orla,ou=dirsec,{DEFAULT_SUFFIX}') # pw policy is set to no with pytest.raises(ldap.UNWILLING_TO_PERFORM): orla_conn.replace('userpassword', "000rla2") pwp = PwPolicyManager(topo.standalone) orl = pwp.get_pwpolicy_entry(f'uid=orla,ou=dirsec,{DEFAULT_SUFFIX}') # Set Password change to May Change Password. orl.replace('passwordchange', 'on') assert orl.get_attr_val_utf8('passwordchange') == 'on' # Administrator Reseting to original password ! orla.replace('userpassword', '000rla1') # Attempt to Modify password to orla2 with an invalid first pw with error message. with pytest.raises(ldap.INVALID_CREDENTIALS): conn = orla.bind('Invalid_password') # Changing current password from orla1 to orla2 orla_conn.replace('userpassword', '000rla2') # Changing current password from orla2 to orla1. orla_conn = UserAccount(conn, f'uid=orla,ou=dirsec,{DEFAULT_SUFFIX}') orla_conn.replace('userpassword', '000rla1') # Set Password change to Must Not Change After Reset joe = pwp.get_pwpolicy_entry(f'uid=joe,ou=people,{DEFAULT_SUFFIX}') people = pwp.get_pwpolicy_entry(f'ou=people,{DEFAULT_SUFFIX}') joe.replace_many(('passwordmustchange', 'off'), ('passwordchange', 'off')) people.replace_many(('passwordmustchange', 'off'), ('passwordchange', 'off')) for attr in ['passwordMustChange', 'passwordchange']: assert joe.get_attr_val_utf8(attr) == 'off' for attr in ['passwordMustChange', 'passwordchange']: assert people.get_attr_val_utf8(attr) == 'off' # Change password for uid,joe,jack,deep even though pw policy is set to no with error message. for user, password, pass_to_change in [ ('joe', '00j0e1', '00j0e2'), ('jack', '00j6ck1', '00j6ck2'), ('deep,ou=others', '00de3p1', '00de3p2') ]: real_user = UserAccount(topo.standalone, f'uid={user},ou=people,{DEFAULT_SUFFIX}') conn = real_user.bind(password) real_conn = UserAccount(conn, real_user.dn) # pw policy is set to no with pytest.raises(ldap.UNWILLING_TO_PERFORM): real_conn.replace('userpassword', pass_to_change) real_user = UserAccount(topo.standalone, f'uid=fred,{DEFAULT_SUFFIX}') conn = real_user.bind('00fr3d1') # Fred can change.(Fred is not belong to any pw policy) real_conn = UserAccount(conn, real_user.dn) real_conn.replace('userpassword', '00fr3d2') # Changing pw policy to may change pw # Set Password change to May Change Password joe = pwp.get_pwpolicy_entry(f'uid=joe,ou=people,{DEFAULT_SUFFIX}') joe.replace('passwordchange', 'on') assert joe.get_attr_val_utf8('passwordchange') == 'on' # Administrator Reseting to original password change_password_with_admin(topo, [ ('uid=joe,ou=people', '00j0e1'), ('uid=jack,ou=people', '00j6ck1'), ('uid=fred', '00fr3d1'), ('uid=deep,ou=others,ou=people', '00de3p1') ]) # Try to change password with invalid credentials. Should see error message. for user in [ 'uid=joe,ou=people', 'uid=jack,ou=people', 'uid=fred', 'uid=deep,ou=others,ou=people' ]: with pytest.raises(ldap.INVALID_CREDENTIALS): UserAccount(topo.standalone, f'{user},{DEFAULT_SUFFIX}').bind("bad") # Changing current password for joe and fed. for user, password, new_pass in [ ('uid=joe,ou=people', '00j0e1', '00j0e2'), ('uid=fred', '00fr3d1', '00fr3d2') ]: real_user = UserAccount(topo.standalone, f'{user},{DEFAULT_SUFFIX}') conn = real_user.bind(password) UserAccount(conn, real_user.dn).replace('userpassword', new_pass) # Changing current password for jack and deep with error message.(passwordchange not on) for user, password, new_pass in [ ('uid=jack,ou=people', '00j6ck1', '00j6ck2'), ('uid=deep,ou=others,ou=people', '00de3p1', '00de3p2') ]: real_user = UserAccount(topo.standalone, f'{user},{DEFAULT_SUFFIX}') conn = real_user.bind(password) with pytest.raises(ldap.UNWILLING_TO_PERFORM): UserAccount(conn, real_user.dn).replace('userpassword', new_pass) # Changing pw policy to may change pw # Set Password change to May Change Password people.replace('passwordchange', 'on') assert people.get_attr_val_utf8('passwordchange') == 'on' # Administrator Reseting to original password change_password_with_admin(topo, [ ('uid=joe,ou=people', '00j0e1'), ('uid=jack,ou=people', '00j6ck1'), ('uid=fred', '00fr3d1'), ('uid=deep,ou=others,ou=people', '00de3p1') ]) # Try to change password with invalid credentials. Should see error message. for user in [ 'uid=joe,ou=people', 'uid=jack,ou=people', 'uid=fred', 'uid=deep,ou=others,ou=people' ]: with pytest.raises(ldap.INVALID_CREDENTIALS): UserAccount(topo.standalone, f'{user},{DEFAULT_SUFFIX}').bind("bad") # Changing current password change_password(topo, [ ('uid=joe,ou=people', '00j0e1', '00j0e2'), ('uid=fred', '00fr3d1', '00fr3d2'), ('uid=jack,ou=people', '00j6ck1', '00j6ck2'), ('uid=deep,ou=others,ou=people', '00de3p1', '00de3p2') ]) # Set Password change to Must Not Change After Reset joe.replace('passwordchange', 'off') assert joe.get_attr_val_utf8('passwordchange') == 'off' # Administrator Reseting to original password change_password_with_admin(topo, [ ('uid=joe,ou=people', '00j0e1'), ('uid=fred', '00fr3d1'), ('uid=jack,ou=people', '00j6ck1'), ('uid=deep,ou=others,ou=people', '00de3p1') ]) # Try to change password with invalid credentials. Should see error message for user in [ 'uid=joe,ou=people', 'uid=jack,ou=people', 'uid=fred', 'uid=deep,ou=others,ou=people' ]: with pytest.raises(ldap.INVALID_CREDENTIALS): UserAccount(topo.standalone, f'{user},{DEFAULT_SUFFIX}').bind("bad") # Changing current password change_password(topo, [ ('uid=fred', '00fr3d1', '00fr3d2'), ('uid=jack,ou=people', '00j6ck1', '00j6ck2'), ('uid=deep,ou=others,ou=people', '00de3p1', '00de3p2') ]) # ('passwordchange', 'off') for joe real_user = UserAccount(topo.standalone, f'uid=joe,ou=people,{DEFAULT_SUFFIX}') conn = real_user.bind('00j0e1') with pytest.raises(ldap.UNWILLING_TO_PERFORM): UserAccount(conn, real_user.dn).replace('userpassword', '00j0e2') @pytest.fixture(scope="function") def _fixture_for_syntax_section(request, topo): change_password_with_admin(topo, [ ('uid=joe,ou=people', '00j0e1'), ('uid=fred', '00fr3d1'), ('uid=jack,ou=people', '00j6ck1'), ('uid=deep,ou=others,ou=people', '00de3p1'), ('uid=orla,ou=dirsec', '000rla1'), ('uid=dbyers,ou=dirsec', 'Anuj') ]) pwp = PwPolicyManager(topo.standalone) orl = pwp.get_pwpolicy_entry(f'uid=orla,ou=dirsec,{DEFAULT_SUFFIX}') joe = pwp.get_pwpolicy_entry(f'uid=joe,ou=people,{DEFAULT_SUFFIX}') people = pwp.get_pwpolicy_entry(f'ou=people,{DEFAULT_SUFFIX}') for instance in [orl, joe, people]: instance.replace('passwordchecksyntax', 'on') instance.replace('passwordChange', 'on') assert instance.get_attr_val_utf8('passwordchecksyntax') == 'on' def final_step(): for instance1 in [orl, joe, people]: instance1.replace('passwordminlength', '6') change_password_with_admin(topo, [ ('uid=orla,ou=dirsec', '000rLb1'), ('uid=joe,ou=people', '00J0e1'), ('uid=jack,ou=people', '00J6ck1'), ('uid=deep,ou=others,ou=people', '00De3p1'), ('uid=dbyers,ou=dirsec', 'dby3rs1'), ('uid=fred', '00fr3d1') ]) request.addfinalizer(final_step) def test_password_syntax_section(topo, _policy_setup, _fixture_for_syntax_section): """Password Syntax Section. :id: 7bf1cb46-9388-11ea-9019-8c16451d917b :setup: Standalone :steps: 1. Try to change password with invalid credentials. Should get error (invalid cred). 2. Try to change to a password that violates length. Should get error (constaint viol.). 3. Attempt to Modify password to db which is in error to policy 4. Changing password minimum length to 5 to check triviality 5. Try to change password to the value of uid, which is trivial. Should get error. 6. Try to change password to givenname which is trivial. Should get error 7. Try to change password to sn which is trivial. Should get error 8. Changing password minimum length back to 6 9. Changing current password from ``*1`` to ``*2`` 10. Changing current password from ``*2`` to ``*1`` 11. Changing current password to the evil password 12. Resetting to original password as cn=directory manager 13. Setting policy to NOT Check Password Syntax 14. Test that when checking syntax is off, you can use small passwords 15. Test that when checking syntax is off, trivial passwords can be used 16. Resetting to original password as cn=directory manager 17. Changing password minimum length from 6 to 10 18. Setting policy to Check Password Syntax again 19. Try to change to a password that violates length 20. Change to a password that meets length requirement :expected results: 1. Fail(invalid cred) 2. Fail(constaint viol.) 3. Fail(Syntax error) 4. Success 5. Fail(trivial) 6. Fail(password to givenname ) 7. Success 8. Success 9. Success 10. Success 11. Fail(evil password) 12. Success 13. Success 14. Success 15. Success 16. Success 17. Success 18. Success 19. Fail(violates length) 20. Success """ # Try to change password with invalid credentials. Should get error (invalid cred). for user in [ 'uid=joe,ou=people', 'uid=jack,ou=people', 'uid=fred', 'uid=deep,ou=others,ou=people', 'uid=dbyers,ou=dirsec', 'uid=orla,ou=dirsec' ]: with pytest.raises(ldap.INVALID_CREDENTIALS): UserAccount(topo.standalone, f'{user},{DEFAULT_SUFFIX}').bind("bad") # Try to change to a password that violates length. Should get error (constaint viol.). with pytest.raises(ldap.CONSTRAINT_VIOLATION): change_password(topo, [ ('uid=orla,ou=dirsec', '000rla1', 'db'), ('uid=joe,ou=people', '00j0e1', 'db'), ('uid=jack,ou=people', '00j6ck1', 'db'), ('uid=deep,ou=others,ou=people', '00de3p1', 'db') ]) # Attempt to Modify password to db which is in error to policy(Syntax error) change_password_ultra_new(topo, [ ('uid=dbyers,ou=dirsec', 'Anuj', 'db', 'dby3rs1'), ('uid=fred', '00fr3d1', 'db', '00fr3d1') ]) # Changing password minimum length to 5 to check triviality pwp = PwPolicyManager(topo.standalone) orl = pwp.get_pwpolicy_entry(f'uid=orla,ou=dirsec,{DEFAULT_SUFFIX}') joe = pwp.get_pwpolicy_entry(f'uid=joe,ou=people,{DEFAULT_SUFFIX}') people = pwp.get_pwpolicy_entry(f'ou=people,{DEFAULT_SUFFIX}') for instance in [orl, joe, people]: instance.replace('passwordminlength', '5') # Try to change password to the value of uid, which is trivial. Should get error. with pytest.raises(ldap.CONSTRAINT_VIOLATION): change_password(topo, [ ('uid=orla,ou=dirsec', '000rla1', 'orla'), ('uid=joe,ou=people', '00j0e1', 'joe'), ('uid=jack,ou=people', '00j6ck1', 'jack'), ('uid=deep,ou=others,ou=people', '00de3p1', 'deep') ]) # dbyers and fred can change change_password_ultra_new(topo, [ ('uid=dbyers,ou=dirsec', 'dby3rs1', 'dbyers', 'dby3rs1'), ('uid=fred', '00fr3d1', 'fred', '00fr3d1') ]) # Try to change password to givenname which is trivial. Should get error with pytest.raises(ldap.CONSTRAINT_VIOLATION): change_password(topo, [ ('uid=orla,ou=dirsec', '000rla1', 'orla'), ('uid=joe,ou=people', '00j0e1', 'joe'), ('uid=jack,ou=people', '00j6ck1', 'jack'), ('uid=deep,ou=others,ou=people', '00de3p1', 'deep') ]) # dbyers and fred can change change_password_ultra_new(topo, [ ('uid=dbyers,ou=dirsec', 'dby3rs1', 'danny', 'dby3rs1'), ('uid=fred', '00fr3d1', 'fred', '00fr3d1') ]) # Try to change password to sn which is trivial. Should get error with pytest.raises(ldap.CONSTRAINT_VIOLATION): change_password(topo, [ ('uid=orla,ou=dirsec', '000rla1', 'Hegarty'), ('uid=joe,ou=people', '00j0e1', 'Rath'), ('uid=jack,ou=people', '00j6ck1', 'Rath'), ('uid=deep,ou=others,ou=people', '00de3p1', 'Blue') ]) # dbyers and fred can change change_password_ultra_new(topo, [ ('uid=dbyers,ou=dirsec', 'dby3rs1', 'Byers', 'dby3rs1'), ('uid=fred', '00fr3d1', 'Byers', '00fr3d1') ]) # Changing password minimum length back to 6 for instance1 in [orl, joe, people]: instance1.replace('passwordminlength', '6') # Changing current password from *1 to *2 change_password(topo, [ ('uid=orla,ou=dirsec', '000rla1', '000rLb2'), ('uid=dbyers,ou=dirsec', 'dby3rs1', 'dby3rs2'), ('uid=fred', '00fr3d1', '00fr3d2'), ('uid=joe,ou=people', '00j0e1', '00J0e2'), ('uid=jack,ou=people', '00j6ck1', '00J6ck2'), ('uid=deep,ou=others,ou=people', '00de3p1', '00De3p2') ]) # Changing current password from *2 to *1 change_password(topo, [ ('uid=orla,ou=dirsec', '000rLb2', '000rLb1'), ('uid=dbyers,ou=dirsec', 'dby3rs2', 'dby3rs1'), ('uid=fred', '00fr3d2', '00fr3d1'), ('uid=joe,ou=people', '00J0e2', '00J0e1'), ('uid=jack,ou=people', '00J6ck2', '00J6ck1'), ('uid=deep,ou=others,ou=people', '00De3p2', '00De3p1') ]) # Changing current password to the evil password with pytest.raises(ldap.CONSTRAINT_VIOLATION): change_password(topo, [ ('uid=orla,ou=dirsec', '000rLb1', r'{\;\\].'), ('uid=joe,ou=people', '00J0e1', r'{\;\\].'), ('uid=jack,ou=people', '00J6ck1', r'{\;\\].'), ('uid=deep,ou=others,ou=people', '00De3p1', r'{\;\\].') ]) # dbyers and fred can change change_password(topo, [ ('uid=dbyers,ou=dirsec', 'dby3rs1', r'{\;\\].'), ('uid=fred', '00fr3d1', r'{\;\\].') ]) # Resetting to original password as cn=directory manager change_password_with_admin(topo, [ ('uid=orla,ou=dirsec', '000rLb1'), ('uid=joe,ou=people', '00J0e1'), ('uid=jack,ou=people', '00J6ck1'), ('uid=deep,ou=others,ou=people', '00De3p1'), ('uid=dbyers,ou=dirsec', 'dby3rs1'), ('uid=fred', '00fr3d1') ]) # Setting policy to NOT Check Password Syntax # Searching for passwordminlength for instance in [orl, joe, people]: instance.replace('passwordchecksyntax', 'off') for instance in [orl, joe, people]: assert instance.get_attr_val_utf8('passwordchecksyntax') == 'off' assert instance.get_attr_val_utf8('passwordminlength') == '6' # Test that when checking syntax is off, you can use small passwords change_password(topo, [ ('uid=orla,ou=dirsec', '000rLb1', 'db'), ('uid=joe,ou=people', '00J0e1', 'db'), ('uid=jack,ou=people', '00J6ck1', 'db'), ('uid=deep,ou=others,ou=people', '00De3p1', 'db'), ('uid=dbyers,ou=dirsec', 'dby3rs1', 'db'), ('uid=fred', '00fr3d1', 'db') ]) # Test that when checking syntax is off, trivial passwords can be used change_password(topo, [ ('uid=orla,ou=dirsec', 'db', 'orla'), ('uid=joe,ou=people', 'db', 'joe'), ('uid=jack,ou=people', 'db', 'jack'), ('uid=deep,ou=others,ou=people', 'db', 'deep'), ('uid=dbyers,ou=dirsec', 'db', 'dbyers'), ('uid=fred', 'db', 'fred') ]) # Resetting to original password as cn=directory manager change_password_with_admin(topo, [ ('uid=orla,ou=dirsec', '000rLb1'), ('uid=joe,ou=people', '00J0e1'), ('uid=jack,ou=people', '00J6ck1'), ('uid=deep,ou=others,ou=people', '00De3p1'), ('uid=dbyers,ou=dirsec', 'dby3rs1'), ('uid=fred', '00fr3d1') ]) # Changing password minimum length from 6 to 10 # Setting policy to Check Password Syntax again for instance in [orl, joe, people]: instance.replace_many( ('passwordchecksyntax', 'on'), ('passwordminlength', '10')) # Try to change to a password that violates length with pytest.raises(ldap.CONSTRAINT_VIOLATION): change_password(topo, [ ('uid=orla,ou=dirsec', '000rLb1', 'db'), ('uid=joe,ou=people', '00J0e1', 'db'), ('uid=jack,ou=people', '00J6ck1', 'db'), ('uid=deep,ou=others,ou=people', '00De3p1', 'db') ]) # dbyers and fred can change as it does not belong to any pw policy change_password(topo, [ ('uid=dbyers,ou=dirsec', 'dby3rs1', 'db'), ('uid=fred', '00fr3d1', 'db') ]) # Change to a password that meets length requirement change_password(topo, [ ('uid=orla,ou=dirsec', '000rLb1', 'This_IS_a_very_very_long_password'), ('uid=joe,ou=people', '00J0e1', 'This_IS_a_very_very_long_password'), ('uid=jack,ou=people', '00J6ck1', 'This_IS_a_very_very_long_password'), ('uid=deep,ou=others,ou=people', '00De3p1', 'This_IS_a_very_very_long_password'), ('uid=dbyers,ou=dirsec', 'db', 'This_IS_a_very_very_long_password'), ('uid=fred', 'db', 'This_IS_a_very_very_long_password') ]) @pytest.fixture(scope="function") def _fixture_for_password_history(request, topo): pwp = PwPolicyManager(topo.standalone) orl = pwp.get_pwpolicy_entry(f'uid=orla,ou=dirsec,{DEFAULT_SUFFIX}') joe = pwp.get_pwpolicy_entry(f'uid=joe,ou=people,{DEFAULT_SUFFIX}') people = pwp.get_pwpolicy_entry(f'ou=people,{DEFAULT_SUFFIX}') change_password_with_admin(topo, [ ('uid=orla,ou=dirsec', '000rLb1'), ('uid=joe,ou=people', '00J0e1'), ('uid=jack,ou=people', '00J6ck1'), ('uid=deep,ou=others,ou=people', '00De3p1') ]) for instance in [orl, joe, people]: instance.replace_many( ('passwordhistory', 'on'), ('passwordinhistory', '3'), ('passwordChange', 'on')) for instance in [orl, joe, people]: assert instance.get_attr_val_utf8('passwordhistory') == 'on' assert instance.get_attr_val_utf8('passwordinhistory') == '3' assert instance.get_attr_val_utf8('passwordChange') == 'on' def final_step(): for instance1 in [orl, joe, people]: instance1.replace('passwordhistory', 'off') change_password_with_admin(topo, [ ('uid=orla,ou=dirsec', '000rLb1'), ('uid=joe,ou=people', '00J0e1'), ('uid=jack,ou=people', '00J6ck1'), ('uid=deep,ou=others,ou=people', '00De3p1') ]) request.addfinalizer(final_step) def test_password_history_section(topo, _policy_setup, _fixture_for_password_history): """Password History Section. :id: 51f459a0-a0ba-11ea-ade7-8c16451d917b :setup: Standalone :steps: 1. Changing current password for orla,joe,jack and deep 2. Checking that the passwordhistory attribute has been added ! 3. Try to change the password back which should fail 4. Change the passwords for all four test users to something new 5. Try to change passwords back to the first password 6. Change to a fourth password not in password history 7. Try to change all the passwords back to the first password 8. Change the password to one more new password as root dn 9. Now try to change the password back to the first password 10. Checking that password history does still containt the previous 3 passwords 11. Add a password test for long long password (more than 490 bytes). 12. Changing password : LONGPASSWORD goes in history 13. Setting policy to NOT keep password histories 14. Changing current password from ``*2 to ``*2`` 15. Try to change ``*2`` to ``*1``, should succeed :expected results: 1. Success 2. Success 3. Fail(ldap.CONSTRAINT_VIOLATION) 4. Success 5. Fail(ldap.CONSTRAINT_VIOLATION)) 6. Success 7. Fail(ldap.CONSTRAINT_VIOLATION)) 8. Success 9. Success 10. Success 11. Success 12. Success 13. Success 14. Success 15. Success """ # Changing current password for orla,joe,jack and deep change_password_with_admin(topo, [ ('uid=orla,ou=dirsec', '000rLb2'), ('uid=joe,ou=people', '00J0e2'), ('uid=jack,ou=people', '00J6ck2'), ('uid=deep,ou=others,ou=people', '00De3p2'), ]) time.sleep(1) # Checking that the password history attribute has been added ! for user, password in [ ('uid=orla,ou=dirsec', '000rLb1'), ('uid=joe,ou=people', '00J0e1'), ('uid=jack,ou=people', '00J6ck1'), ('uid=deep,ou=others,ou=people', '00De3p1'), ]: assert password in UserAccount(topo.standalone, f'{user},{DEFAULT_SUFFIX}').get_attr_val_utf8("passwordhistory") # Try to change the password back which should fail with pytest.raises(ldap.CONSTRAINT_VIOLATION): change_password(topo, [ ('uid=orla,ou=dirsec', '000rLb2', '000rLb1'), ('uid=joe,ou=people', '00J0e2', '00J0e1'), ('uid=jack,ou=people', '00J6ck2', '00J6ck1'), ('uid=deep,ou=others,ou=people', '00De3p2', '00De3p1'), ]) # Change the passwords for all four test users to something new change_password_with_admin(topo, [ ('uid=orla,ou=dirsec', '000rLb3'), ('uid=joe,ou=people', '00J0e3'), ('uid=jack,ou=people', '00J6ck3'), ('uid=deep,ou=others,ou=people', '00De3p3') ]) # Try to change passwords back to the first password time.sleep(1) with pytest.raises(ldap.CONSTRAINT_VIOLATION): change_password(topo, [ ('uid=orla,ou=dirsec', '000rLb3', '000rLb1'), ('uid=joe,ou=people', '00J0e3', '00J0e1'), ('uid=jack,ou=people', '00J6ck3', '00J6ck1'), ('uid=deep,ou=others,ou=people', '00De3p3', '00De3p1'), ]) # Change to a fourth password not in password history change_password_with_admin(topo, [ ('uid=orla,ou=dirsec', '000rLb4'), ('uid=joe,ou=people', '00J0e4'), ('uid=jack,ou=people', '00J6ck4'), ('uid=deep,ou=others,ou=people', '00De3p4') ]) time.sleep(1) # Try to change all the passwords back to the first password with pytest.raises(ldap.CONSTRAINT_VIOLATION): change_password(topo, [ ('uid=orla,ou=dirsec', '000rLb4', '000rLb1'), ('uid=joe,ou=people', '00J0e4', '00J0e1'), ('uid=jack,ou=people', '00J6ck4', '00J6ck1'), ('uid=deep,ou=others,ou=people', '00De3p4', '00De3p1') ]) # change the password to one more new password as root dn change_password_with_admin(topo, [ ('uid=orla,ou=dirsec', '000rLb5'), ('uid=joe,ou=people', '00J0e5'), ('uid=jack,ou=people', '00J6ck5'), ('uid=deep,ou=others,ou=people', '00De3p5') ]) time.sleep(1) # Now try to change the password back to the first password change_password(topo, [ ('uid=orla,ou=dirsec', '000rLb5', '000rLb1'), ('uid=joe,ou=people', '00J0e5', '00J0e1'), ('uid=jack,ou=people', '00J6ck5', '00J6ck1'), ('uid=deep,ou=others,ou=people', '00De3p5', '00De3p1') ]) time.sleep(1) # checking that password history does still containt the previous 3 passwords for user, password3, password2, password1 in [ ('uid=orla,ou=dirsec', '000rLb5', '000rLb4', '000rLb3'), ('uid=joe,ou=people', '00J0e5', '00J0e4', '00J0e3'), ('uid=jack,ou=people', '00J6ck5', '00J6ck4', '00J6ck3'), ('uid=deep,ou=others,ou=people', '00De3p5', '00De3p4', '00De3p3') ]: user1 = UserAccount(topo.standalone, f'{user},{DEFAULT_SUFFIX}') pass_list = ''.join(user1.get_attr_vals_utf8("passwordhistory")) assert password1 in pass_list assert password2 in pass_list assert password3 in pass_list # Add a password test for long long password (more than 490 bytes). long = '01234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901' \ '23456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456' \ '789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012' \ '345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678' \ '901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234' \ '5678901234567890123456789LENGTH=510' change_password(topo, [ ('uid=orla,ou=dirsec', '000rLb1', long), ('uid=joe,ou=people', '00J0e1', long), ('uid=jack,ou=people', '00J6ck1', long), ('uid=deep,ou=others,ou=people', '00De3p1', long) ]) time.sleep(1) # Changing password : LONGPASSWORD goes in history change_password(topo, [ ('uid=orla,ou=dirsec', long, '000rLb2'), ('uid=joe,ou=people', long, '00J0e2'), ('uid=jack,ou=people', long, '00J6ck2'), ('uid=deep,ou=others,ou=people', long, '00De3p2') ]) time.sleep(1) for user, password in [ ('uid=orla,ou=dirsec', '000rLb2'), ('uid=joe,ou=people', '00J0e2'), ('uid=jack,ou=people', '00J6ck2'), ('uid=deep,ou=others,ou=people', '00De3p2') ]: real_user = UserAccount(topo.standalone, f'{user},{DEFAULT_SUFFIX}') conn = real_user.bind(password) assert long in ''.join(UserAccount(conn, f'{user},{DEFAULT_SUFFIX}').get_attr_vals_utf8("passwordhistory")) # Setting policy to NOT keep password histories _do_transaction_for_pwp(topo, 'passwordhistory', 'off') time.sleep(1) # Changing current password from *2 to *2 change_password(topo, [ ('uid=orla,ou=dirsec', '000rLb2', '000rLb2'), ('uid=joe,ou=people', '00J0e2', '00J0e2'), ('uid=jack,ou=people', '00J6ck2', '00J6ck2'), ('uid=deep,ou=others,ou=people', '00De3p2', '00De3p2') ]) # Try to change *2 to *1, should succeed change_password(topo, [ ('uid=orla,ou=dirsec', '000rLb2', '000rLb1'), ('uid=joe,ou=people', '00J0e2', '00J0e1'), ('uid=jack,ou=people', '00J6ck2', '00J6ck1'), ('uid=deep,ou=others,ou=people', '00De3p2', '00De3p1') ]) @pytest.fixture(scope="function") def _fixture_for_password_min_age(request, topo): pwp = PwPolicyManager(topo.standalone) orl = pwp.get_pwpolicy_entry(f'uid=orla,ou=dirsec,{DEFAULT_SUFFIX}') joe = pwp.get_pwpolicy_entry(f'uid=joe,ou=people,{DEFAULT_SUFFIX}') people = pwp.get_pwpolicy_entry(f'ou=people,{DEFAULT_SUFFIX}') change_password_with_admin(topo, [ ('uid=orla,ou=dirsec', '000rLb1'), ('uid=joe,ou=people', '00J0e1'), ('uid=jack,ou=people', '00J6ck1'), ('uid=deep,ou=others,ou=people', '00De3p1') ]) for pwp1 in [orl, joe, people]: assert pwp1.get_attr_val_utf8('passwordminage') == '0' pwp1.replace_many( ('passwordminage', '10'), ('passwordChange', 'on')) def final_step(): for pwp2 in [orl, joe, people]: pwp2.replace('passwordminage', '0') request.addfinalizer(final_step) def test_password_minimum_age_section(topo, _policy_setup, _fixture_for_password_min_age): """Password History Section. :id: 470f5b2a-a0ba-11ea-ab2d-8c16451d917b :setup: Standalone :steps: 1. Searching for password minimum age, should be 0 per defaults set 2. Change current password from ``*1`` to ``*2`` 3. Wait 5 secs and try to change again. Should fail. 4. Wait more time to complete password min age 5. Now user can change password :expected results: 1. Success 2. Success 3. Fail(ldap.CONSTRAINT_VIOLATION) 4. Success 5. Success """ # Change current password from *1 to *2 change_password(topo, [ ('uid=orla,ou=dirsec', '000rLb1', '000rLb2'), ('uid=joe,ou=people', '00J0e1', '00J0e2'), ('uid=jack,ou=people', '00J6ck1', '00J6ck2'), ('uid=deep,ou=others,ou=people', '00De3p1', '00De3p2') ]) # Wait 5 secs and try to change again. Should fail. count = 0 while count < 5: with pytest.raises(ldap.CONSTRAINT_VIOLATION): change_password(topo, [ ('uid=orla,ou=dirsec', '000rLb2', '000rLb1'), ('uid=joe,ou=people', '00J0e2', '00J0e1'), ('uid=jack,ou=people', '00J6ck2', '00J6ck1'), ('uid=deep,ou=others,ou=people', '00De3p2', '00De3p1') ]) time.sleep(1) count += 1 # Wait more time to complete password min age time.sleep(6) # Now user can change password change_password(topo, [ ('uid=orla,ou=dirsec', '000rLb2', '000rLb1'), ('uid=joe,ou=people', '00J0e2', '00J0e1'), ('uid=jack,ou=people', '00J6ck2', '00J6ck1'), ('uid=deep,ou=others,ou=people', '00De3p2', '00De3p1') ]) @pytest.fixture(scope="function") def _fixture_for_password_lock_out(request, topo): pwp = PwPolicyManager(topo.standalone) orl = pwp.get_pwpolicy_entry(f'uid=orla,ou=dirsec,{DEFAULT_SUFFIX}') joe = pwp.get_pwpolicy_entry(f'uid=joe,ou=people,{DEFAULT_SUFFIX}') people = pwp.get_pwpolicy_entry(f'ou=people,{DEFAULT_SUFFIX}') change_password_with_admin(topo, [ ('uid=orla,ou=dirsec', '000rLb1'), ('uid=joe,ou=people', '00J0e1'), ('uid=jack,ou=people', '00J6ck1'), ('uid=deep,ou=others,ou=people', '00De3p1') ]) for pwp1 in [orl, joe, people]: assert pwp1.get_attr_val_utf8('passwordlockout') == 'off' pwp1.replace_many( ('passwordlockout', 'on'), ('passwordlockoutduration', '3'), ('passwordresetfailurecount', '3'), ('passwordChange', 'on')) def final_step(): for instance in [orl, joe, people]: instance.replace('passwordlockout', 'off') instance.replace('passwordunlock', 'off') assert instance.get_attr_val_utf8('passwordlockout') == 'off' assert instance.get_attr_val_utf8('passwordunlock') == 'off' request.addfinalizer(final_step) def test_account_lockout_and_lockout_duration_section(topo, _policy_setup, _fixture_for_password_lock_out): """Account Lockout and Lockout Duration Section :id: 1ff0b7a4-b560-11ea-9ece-8c16451d917b :setup: Standalone :steps: 1. Try to bind with invalid credentials 2. Try to bind with valid pw, should give lockout error 3. After 3 seconds Try to bind with valid pw, should work 4. Try to bind with invalid credentials 5. Attempt to bind with valid pw after timeout is up 6. Resetting with root can break lockout :expected results: 1. Fail(ldap.INVALID_CREDENTIALS) 2. Fail(ldap.CONSTRAINT_VIOLATION) 3. Success 4. Fail(ldap.INVALID_CREDENTIALS)) 5. Success 6. Success """ # Try to bind with invalid credentials for count1 in range(3): with pytest.raises(ldap.INVALID_CREDENTIALS): change_password(topo, [ ('uid=orla,ou=dirsec', 'Invalid', 'Invalid'), ('uid=joe,ou=people', 'Invalid', 'Invalid'), ('uid=jack,ou=people', 'Invalid', 'Invalid'), ('uid=deep,ou=others,ou=people', 'Invalid', 'Invalid') ]) # Try to bind with valid pw, should give lockout error with pytest.raises(ldap.CONSTRAINT_VIOLATION): change_password(topo, [ ('uid=orla,ou=dirsec', '000rLb1', '000rLb1'), ('uid=joe,ou=people', '00J0e1', '00J0e1'), ('uid=jack,ou=people', '00J6ck1', '00J6ck1'), ('uid=deep,ou=others,ou=people', '00De3p1', '00De3p1') ]) # Try to bind with valid pw, should work time.sleep(3) change_password(topo, [ ('uid=orla,ou=dirsec', '000rLb1', '000rLb2'), ('uid=joe,ou=people', '00J0e1', '00J0e2'), ('uid=jack,ou=people', '00J6ck1', '00J6ck2'), ('uid=deep,ou=others,ou=people', '00De3p1', '00De3p2') ]) # Try to bind with invalid credentials for count1 in range(2): with pytest.raises(ldap.INVALID_CREDENTIALS): change_password(topo, [ ('uid=orla,ou=dirsec', 'Invalid', 'Invalid'), ('uid=joe,ou=people', 'Invalid', 'Invalid'), ('uid=jack,ou=people', 'Invalid', 'Invalid'), ('uid=deep,ou=others,ou=people', 'Invalid', 'Invalid') ]) # Attempt to bind with valid pw after timeout is up time.sleep(3) change_password(topo, [ ('uid=orla,ou=dirsec', '000rLb2', '000rLb1'), ('uid=joe,ou=people', '00J0e2', '00J0e1'), ('uid=jack,ou=people', '00J6ck2', '00J6ck1'), ('uid=deep,ou=others,ou=people', '00De3p2', '00De3p1') ]) # Resetting with root can break lockout for count1 in range(3): with pytest.raises(ldap.INVALID_CREDENTIALS): change_password(topo, [ ('uid=orla,ou=dirsec', 'Invalid', 'Invalid'), ('uid=joe,ou=people', 'Invalid', 'Invalid'), ('uid=jack,ou=people', 'Invalid', 'Invalid'), ('uid=deep,ou=others,ou=people', 'Invalid', 'Invalid') ]) with pytest.raises(ldap.CONSTRAINT_VIOLATION): change_password(topo, [ ('uid=orla,ou=dirsec', '000rLb1', '000rLb1'), ('uid=joe,ou=people', '00J0e1', '00J0e1'), ('uid=jack,ou=people', '00J6ck1', '00J6ck1'), ('uid=deep,ou=others,ou=people', '00De3p1', '00De3p1') ]) change_password_with_admin(topo, [ ('uid=orla,ou=dirsec', '000rLb1'), ('uid=joe,ou=people', '00J0e1'), ('uid=jack,ou=people', '00J6ck1'), ('uid=deep,ou=others,ou=people', '00De3p1') ]) change_password(topo, [ ('uid=orla,ou=dirsec', '000rLb1', '000rLb1'), ('uid=joe,ou=people', '00J0e1', '00J0e1'), ('uid=jack,ou=people', '00J6ck1', '00J6ck1'), ('uid=deep,ou=others,ou=people', '00De3p1', '00De3p1') ]) @pytest.fixture(scope="function") def _fixture_for_grace_limit(topo): pwp = PwPolicyManager(topo.standalone) orl = pwp.get_pwpolicy_entry(f'uid=orla,ou=dirsec,{DEFAULT_SUFFIX}') joe = pwp.get_pwpolicy_entry(f'uid=joe,ou=people,{DEFAULT_SUFFIX}') people = pwp.get_pwpolicy_entry(f'ou=people,{DEFAULT_SUFFIX}') change_password_with_admin(topo, [ ('uid=orla,ou=dirsec', '000rLb1'), ('uid=joe,ou=people', '00J0e1'), ('uid=jack,ou=people', '00J6ck1'), ('uid=deep,ou=others,ou=people', '00De3p1'), ('uid=fred', '00fr3d1') ]) for instance in [orl, joe, people]: instance.replace_many(('passwordMaxAge', '3'), ('passwordGraceLimit', '7'), ('passwordexp', 'on'), ('passwordwarning', '30'), ('passwordChange', 'on')) def _bind_self(topo, user_password_new_pass_list): """ Will bind password with self. """ for user, password in user_password_new_pass_list: real_user = UserAccount(topo.standalone, f'{user},{DEFAULT_SUFFIX}') conn = real_user.bind(password) def test_grace_limit_section(topo, _policy_setup, _fixture_for_grace_limit): """Account Lockout and Lockout Duration Section :id: 288e3756-b560-11ea-9390-8c16451d917b :setup: Standalone :steps: 1. Check users have 7 grace login attempts after their password expires 2. Wait for password expiration 3. The the 8th should fail except fred who defaults to global password policy 4. Now try resetting the password before the grace login attempts run out 5. Wait for password expiration 6. Now change the password as the 7th attempt 7. Wait for password expiration 8. First 7 good attempts 9. The the 8th should fail except fred who defaults to global password policy 10. Changing the paswordMaxAge to 0 so expiration is immediate test 11. Modify the users passwords to start the clock of zero 12. PasswordGraceLimit to 0, passwordMaxAge to 3 seconds 13. Modify the users passwords to start the clock 14. Users should be blocked 15. Removing the passwordgracelimit attribute should make it default to 0 :expected results: 1. Success 2. Success 3. Fail(ldap.INVALID_CREDENTIALS) 4. Success 5. Success 6. Success 7. Success 8. Success 9. Fail(ldap.INVALID_CREDENTIALS) 10. Success 11. Success 12. Success 13. Success 14. Success 15. Success """ # Check users have 7 grace login attempts after their password expires change_password_with_admin(topo, [ ('uid=orla,ou=dirsec', '000rLb2'), ('uid=joe,ou=people', '00J0e2'), ('uid=jack,ou=people', '00J6ck2'), ('uid=deep,ou=others,ou=people', '00De3p2'), ('uid=fred', '00fr3d2') ]) # Wait for password expiration time.sleep(3) # The the 8th should fail except fred who defaults to global password policy for _ in range(7): _bind_self(topo, [ ('uid=orla,ou=dirsec', '000rLb2'), ('uid=joe,ou=people', '00J0e2'), ('uid=jack,ou=people', '00J6ck2'), ('uid=deep,ou=others,ou=people', '00De3p2'), ('uid=fred', '00fr3d2') ]) with pytest.raises(ldap.INVALID_CREDENTIALS): _bind_self(topo, [ ('uid=orla,ou=dirsec', '000rLb2'), ('uid=joe,ou=people', '00J0e2'), ('uid=jack,ou=people', '00J6ck2'), ('uid=deep,ou=others,ou=people', '00De3p2') ]) _bind_self(topo, [ ('uid=fred', '00fr3d2') ]) # Now try resetting the password before the grace login attempts run out change_password_with_admin(topo, [ ('uid=orla,ou=dirsec', '000rLb1'), ('uid=joe,ou=people', '00J0e1'), ('uid=jack,ou=people', '00J6ck1'), ('uid=deep,ou=others,ou=people', '00De3p1'), ('uid=fred', '00fr3d1') ]) # Wait for password expiration time.sleep(3) # first 6 good attempts for _ in range(6): _bind_self(topo, [ ('uid=orla,ou=dirsec', '000rLb1'), ('uid=joe,ou=people', '00J0e1'), ('uid=jack,ou=people', '00J6ck1'), ('uid=deep,ou=others,ou=people', '00De3p1'), ('uid=fred', '00fr3d1') ]) # now change the password as the 7th attempt change_password(topo, [ ('uid=orla,ou=dirsec', '000rLb1', '000rLb2'), ('uid=joe,ou=people', '00J0e1', '00J0e2'), ('uid=jack,ou=people', '00J6ck1', '00J6ck2'), ('uid=deep,ou=others,ou=people', '00De3p1', '00De3p2'), ('uid=fred', '00fr3d1', '00fr3d2') ]) # Wait for password expiration time.sleep(3) # first 7 good attempts for _ in range(7): _bind_self(topo, [ ('uid=orla,ou=dirsec', '000rLb2'), ('uid=joe,ou=people', '00J0e2'), ('uid=jack,ou=people', '00J6ck2'), ('uid=deep,ou=others,ou=people', '00De3p2'), ('uid=fred', '00fr3d2') ]) # The the 8th should fail except fred who defaults to global password policy with pytest.raises(ldap.INVALID_CREDENTIALS): _bind_self(topo, [ ('uid=orla,ou=dirsec', '000rLb2'), ('uid=joe,ou=people', '00J0e2'), ('uid=jack,ou=people', '00J6ck2'), ('uid=deep,ou=others,ou=people', '00De3p2') ]) _bind_self(topo, [ ('uid=fred', '00fr3d2') ]) # Changing the paswordMaxAge to 0 so expiration is immediate test to see # that the user still has 7 grace login attempts before locked out for att1 in ['passwordMaxAge', 'passwordwarning']: _do_transaction_for_pwp(topo, att1, '0') # Modify the users passwords to start the clock of zero change_password_with_admin(topo, [ ('uid=orla,ou=dirsec', '000rLb1'), ('uid=joe,ou=people', '00J0e1'), ('uid=jack,ou=people', '00J6ck1'), ('uid=deep,ou=others,ou=people', '00De3p1'), ('uid=fred', '00fr3d1') ]) # first 7 good attempts for _ in range(7): _bind_self(topo, [ ('uid=orla,ou=dirsec', '000rLb1'), ('uid=joe,ou=people', '00J0e1'), ('uid=jack,ou=people', '00J6ck1'), ('uid=deep,ou=others,ou=people', '00De3p1'), ('uid=fred', '00fr3d1') ]) # The the 8th should fail .... # except fred who defaults to global password policy with pytest.raises(ldap.INVALID_CREDENTIALS): _bind_self(topo, [ ('uid=orla,ou=dirsec', '000rLb1'), ('uid=joe,ou=people', '00J0e1'), ('uid=jack,ou=people', '00J6ck1'), ('uid=deep,ou=others,ou=people', '00De3p1') ]) _bind_self(topo, [ ('uid=fred', '00fr3d1') ]) # setting the passwordMaxAge to 3 seconds once more # and the passwordGraceLimit to 0 for att1, att2 in [('passwordMaxAge', '3'), ('passwordGraceLimit', '0')]: _do_transaction_for_pwp(topo, att1, att2) # modify the users passwords to start the clock change_password_with_admin(topo, [ ('uid=orla,ou=dirsec', '000rLb1'), ('uid=joe,ou=people', '00J0e1'), ('uid=jack,ou=people', '00J6ck1'), ('uid=deep,ou=others,ou=people', '00De3p1'), ('uid=fred', '00fr3d1') ]) # Users should be blocked time.sleep(3) with pytest.raises(ldap.INVALID_CREDENTIALS): _bind_self(topo, [ ('uid=orla,ou=dirsec', '000rLb1'), ('uid=joe,ou=people', '00J0e1'), ('uid=jack,ou=people', '00J6ck1'), ('uid=deep,ou=others,ou=people', '00De3p1') ]) _bind_self(topo, [ ('uid=fred', '00fr3d1') ]) for att1, att2 in [('passwordGraceLimit', '10')]: _do_transaction_for_pwp(topo, att1, att2) # removing the passwordgracelimit attribute should make it default to 0 for att1, att2 in [('passwordGraceLimit', ' ')]: _do_transaction_for_pwp(topo, att1, att2) change_password_with_admin(topo, [ ('uid=orla,ou=dirsec', '000rLb1'), ('uid=joe,ou=people', '00J0e1'), ('uid=jack,ou=people', '00J6ck1'), ('uid=deep,ou=others,ou=people', '00De3p1'), ('uid=fred', '00fr3d1') ]) time.sleep(3) with pytest.raises(ldap.INVALID_CREDENTIALS): _bind_self(topo, [ ('uid=orla,ou=dirsec', '000rLb1'), ('uid=joe,ou=people', '00J0e1'), ('uid=jack,ou=people', '00J6ck1'), ('uid=deep,ou=others,ou=people', '00De3p1') ]) _bind_self(topo, [ ('uid=fred', '00fr3d1') ]) @pytest.fixture(scope="function") def _fixture_for_additional_cases(topo): pwp = PwPolicyManager(topo.standalone) orl = pwp.get_pwpolicy_entry(f'uid=orla,ou=dirsec,{DEFAULT_SUFFIX}') joe = pwp.get_pwpolicy_entry(f'uid=joe,ou=people,{DEFAULT_SUFFIX}') people = pwp.get_pwpolicy_entry(f'ou=people,{DEFAULT_SUFFIX}') change_password_with_admin(topo, [ ('uid=orla,ou=dirsec', '000rLb1'), ('uid=joe,ou=people', '00J0e1'), ('uid=jack,ou=people', '00J6ck1'), ('uid=deep,ou=others,ou=people', '00De3p1'), ('uid=fred', '00fr3d1'), ('uid=dbyers,ou=dirsec', 'dby3rs1') ]) for instance in [orl, joe, people]: instance.replace_many(('passwordChange', 'on'), ('passwordwarning', '86400'), ('passwordGraceLimit', '0'), ('passwordexp', 'off'), ('passwordMaxAge', '8640000'), ('passwordchecksyntax', 'off')) def test_additional_corner_cases(topo, _policy_setup, _fixture_for_additional_cases): """Additional corner cases :id: 2f6cec66-b560-11ea-9d7c-8c16451d917b :setup: Standalone :steps: 1. Try to change password to one containing spaces 2. Setting password policy to Check password syntax 3. Try to change password to the value of mail, which is trivial. Should get error. 4. No error for fred and dbyers as they are not included in PW policy. 5. Revert changes for fred and dbyers 6. Try to change password to the value of ou, which is trivial. Should get error. 7. No error for fred and dbyers as they are not included in PW policy. 8. Revert changes for fred and dbyers :expected results: 1. Success 2. Success 3. Fail(CONSTRAINT_VIOLATION) 4. Success 5. Success 6. Fail(CONSTRAINT_VIOLATION) 7. Success 8. Success """ # Try to change password to one containing spaces change_password(topo, [ ('uid=orla,ou=dirsec', '000rLb1', 'This Password has spaces.'), ('uid=joe,ou=people', '00J0e1', 'This Password has spaces.'), ('uid=jack,ou=people', '00J6ck1', 'This Password has spaces.'), ('uid=fred', '00fr3d1', 'This Password has spaces.'), ('uid=deep,ou=others,ou=people', '00De3p1', 'This Password has spaces.'), ('uid=dbyers,ou=dirsec', 'dby3rs1', 'This Password has spaces.') ]) change_password(topo, [ ('uid=orla,ou=dirsec', 'This Password has spaces.', '000rLb1'), ('uid=joe,ou=people', 'This Password has spaces.', '00j0e1'), ('uid=jack,ou=people', 'This Password has spaces.', '00j6ck1'), ('uid=fred', 'This Password has spaces.', '00fr3d1'), ('uid=deep,ou=others,ou=people', 'This Password has spaces.', '00de3p1'), ('uid=dbyers,ou=dirsec', 'This Password has spaces.', 'dby3rs1') ]) # Setting password policy to Check password syntax for attr, para in [('passwordchecksyntax', 'on'), ('passwordminlength', '5')]: _do_transaction_for_pwp(topo, attr, para) # Try to change password to the value of mail, which is trivial. Should get error. with pytest.raises(ldap.CONSTRAINT_VIOLATION): change_password(topo, [ ('uid=orla,ou=dirsec', '000rLb1', 'orla@example.com'), ('uid=joe,ou=people', '00j0e1', 'joe@example.com'), ('uid=jack,ou=people', '00j6ck1', 'jack@example.com'), ('uid=deep,ou=others,ou=people', '00de3p1', 'deep@example.com') ]) # No error for fred and dbyers as they are not included in PW policy. change_password(topo, [ ('uid=fred', '00fr3d1', 'fred@example.com'), ('uid=dbyers,ou=dirsec', 'dby3rs1', 'dbyers@example.com') ]) # Revert changes for fred and dbyers change_password(topo, [ ('uid=fred', 'fred@example.com', '00fr3d1'), ('uid=dbyers,ou=dirsec', 'dbyers@example.com', 'dby3rs1') ]) # Creating OUs. for user, new_ou in [ ('uid=orla,ou=dirsec', 'dirsec'), ('uid=joe,ou=people', 'people'), ('uid=jack,ou=people', 'people'), ('uid=deep,ou=others,ou=people', 'others'), ('uid=dbyers,ou=dirsec', 'dirsec') ]: UserAccount(topo.standalone, f'{user},{DEFAULT_SUFFIX}').add('ou', new_ou) # Try to change password to the value of ou, which is trivial. Should get error. with pytest.raises(ldap.CONSTRAINT_VIOLATION): change_password(topo, [ ('uid=orla,ou=dirsec', '000rLb1', 'dirsec'), ('uid=joe,ou=people', '00j0e1', 'people'), ('uid=jack,ou=people', '00j6ck1', 'people'), ('uid=deep,ou=others,ou=people', '00de3p1', 'others') ]) # No error for byers as it is not included in PW policy. change_password(topo, [('uid=dbyers,ou=dirsec', 'dby3rs1', 'dirsec')]) # Revert changes for dbyers change_password_with_admin(topo, [ ('uid=fred', '00fr3d1'), ('uid=dbyers,ou=dirsec', 'dby3rs1') ]) if __name__ == "__main__": CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s -v %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/password/password_test.py000066400000000000000000000036611421664411400274730ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2016 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import logging import pytest from lib389.tasks import * from lib389.topologies import topology_st from lib389._constants import PASSWORD, DEFAULT_SUFFIX from lib389.idm.user import UserAccounts, TEST_USER_PROPERTIES pytestmark = pytest.mark.tier1 logging.getLogger(__name__).setLevel(logging.DEBUG) log = logging.getLogger(__name__) @pytest.mark.bz918684 @pytest.mark.ds394 def test_password_delete_specific_password(topology_st): """Delete a specific userPassword, and make sure it is actually deleted from the entry :id: 800f432a-52ab-4661-ac66-a2bdd9b984d6 :setup: Standalone instance :steps: 1. Add a user with userPassword attribute in cleartext 2. Delete the added value of userPassword attribute 3. Check if the userPassword attribute is deleted 4. Delete the user :expectedresults: 1. The user with userPassword in cleartext should be added successfully 2. Operation should be successful 3. UserPassword should be deleted 4. The user should be successfully deleted """ log.info('Running test_password_delete_specific_password...') users = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX) user = users.create(properties=TEST_USER_PROPERTIES) # # Add a test user with a password # user.set('userpassword', PASSWORD) # # Delete the exact password # user.remove('userpassword', PASSWORD) # # Check the password is actually deleted # assert not user.present('userPassword') log.info('test_password_delete_specific_password: PASSED') if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/password/pbkdf2_upgrade_plugin_test.py000066400000000000000000000032231421664411400320600ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2018 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import pytest from lib389.topologies import topology_st from lib389.password_plugins import PBKDF2Plugin from lib389.utils import ds_is_older pytestmark = pytest.mark.tier1 @pytest.mark.skipif(ds_is_older('1.4.1'), reason="Not implemented") def test_pbkdf2_upgrade(topology_st): """On upgrade pbkdf2 doesn't ship. We need to be able to provide this on upgrade to make sure default hashes work. However, password plugins are special - they need really early bootstap so that setting the default has specs work. This tests that the removal of the pbkdf2 plugin causes it to be re-bootstrapped and added. :id: c2198692-7c02-433b-af5b-3be54920571a :setup: Single instance :steps: 1. Remove the PBKDF2 plugin 2. Restart the server 3. Restart the server :expectedresults: 1. Plugin is removed (IE pre-upgrade state) 2. The plugin is bootstrapped and added 3. No change (already bootstrapped) """ # Remove the pbkdf2 plugin config p1 = PBKDF2Plugin(topology_st.standalone) assert(p1.exists()) p1._protected = False p1.delete() # Restart topology_st.standalone.restart() # check it's been readded. p2 = PBKDF2Plugin(topology_st.standalone) assert(p2.exists()) # Now restart to make sure we still work from the non-bootstrap form topology_st.standalone.restart() p3 = PBKDF2Plugin(topology_st.standalone) assert(p3.exists()) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/password/pw_expired_access_test.py000066400000000000000000000040251421664411400313130ustar00rootroot00000000000000import ldap import logging import pytest import os import time from lib389._constants import DEFAULT_SUFFIX, PASSWORD from lib389.idm.domain import Domain from lib389.idm.user import UserAccounts from lib389.topologies import topology_st as topo log = logging.getLogger(__name__) def test_expired_user_has_no_privledge(topo): """Specify a test case purpose or name here :id: 3df86b45-9929-414b-9bf6-06c25301d207 :setup: Standalone Instance :steps: 1. Set short password expiration time 2. Add user and wait for expiration time to run out 3. Set one aci that allows authenticated users full access 4. Bind as user (password should be expired) 5. Attempt modify :expectedresults: 1. Success 2. Success 3. Success 4. Success 5. Success """ # Configured password epxiration topo.standalone.config.replace_many(('passwordexp', 'on'), ('passwordmaxage', '1')) # Set aci suffix = Domain(topo.standalone, DEFAULT_SUFFIX) ACI_TEXT = '(targetattr="*")(version 3.0; acl "test aci"; allow (all) (userdn="ldap:///all");)' suffix.replace('aci', ACI_TEXT) # Add user user = UserAccounts(topo.standalone, DEFAULT_SUFFIX, rdn=None).create_test_user() user.replace('userpassword', PASSWORD) time.sleep(2) # Bind as user with expired password. Need to use raw ldap calls because # lib389 will close the connection when an error 49 is encountered. ldap_object = ldap.initialize(topo.standalone.toLDAPURL()) with pytest.raises(ldap.INVALID_CREDENTIALS): res_type, res_data, res_msgid, res_ctrls = ldap_object.simple_bind_s( user.dn, PASSWORD) # Try modify with pytest.raises(ldap.INSUFFICIENT_ACCESS): modlist = [ (ldap.MOD_REPLACE, 'description', b'Should not work!') ] ldap_object.modify_ext_s(DEFAULT_SUFFIX, modlist) if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main(["-s", CURRENT_FILE]) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/password/pwdAdmin_test.py000066400000000000000000000266411421664411400273770ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2016 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import pytest from lib389.tasks import * from lib389.utils import * from lib389.topologies import topology_st from lib389.idm.user import UserAccounts from lib389.idm.group import Groups from lib389.idm.domain import Domain from lib389._constants import SUFFIX, DN_DM, PASSWORD, DEFAULT_SUFFIX pytestmark = pytest.mark.tier1 logging.getLogger(__name__).setLevel(logging.DEBUG) log = logging.getLogger(__name__) CONFIG_DN = 'cn=config' ADMIN_NAME = 'passwd_admin' ADMIN_DN = 'cn=%s,%s' % (ADMIN_NAME, SUFFIX) ADMIN2_NAME = 'passwd_admin2' ADMIN2_DN = 'cn=%s,%s' % (ADMIN2_NAME, SUFFIX) ADMIN_PWD = 'ntaheonusheoasuhoau_9' ADMIN_GROUP_DN = 'cn=password admin group,%s' % (SUFFIX) ENTRY_NAME = 'Joe Schmo' ENTRY_DN = 'cn=%s,%s' % (ENTRY_NAME, SUFFIX) INVALID_PWDS = ('2_Short', 'No_Number', 'N0Special', '{SSHA}bBy8UdtPZwu8uZna9QOYG3Pr41RpIRVDl8wddw==') @pytest.fixture(scope="module") def password_policy(topology_st): """Set up password policy Create a Password Admin entry; Set up password policy attributes in config; Add an aci to give everyone full access; Test that the setup works """ log.info('test_pwdAdmin_init: Creating Password Administrator entries...') users = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX) groups = Groups(topology_st.standalone, DEFAULT_SUFFIX) # Add Password Admin 1 admin1_user = users.create(properties={ 'uid': 'admin1', 'cn' : 'admin1', 'sn' : 'strator', 'uidNumber' : '1000', 'gidNumber' : '2000', 'homeDirectory' : '/home/admin1', 'userPassword': ADMIN_PWD }) # Add Password Admin 2 admin2_user = users.create(properties={ 'uid': 'admin2', 'cn' : 'admin2', 'sn' : 'strator', 'uidNumber' : '1000', 'gidNumber' : '2000', 'homeDirectory' : '/home/admin2', 'userPassword': ADMIN_PWD }) # Add Password Admin Group admin_group = groups.create(properties={ 'cn': 'password admin group' }) admin_group.add_member(admin1_user.dn) admin_group.add_member(admin2_user.dn) # Configure password policy log.info('test_pwdAdmin_init: Configuring password policy...') topology_st.standalone.config.replace_many( ('nsslapd-pwpolicy-local', 'on'), ('passwordCheckSyntax', 'on'), ('passwordMinCategories', '1'), ('passwordMinTokenLength', '2'), ('passwordExp', 'on'), ('passwordMinDigits', '1'), ('passwordMinSpecials', '1') ) # # Add an aci to allow everyone all access (just makes things easier) # log.info('Add aci to allow password admin to add/update entries...') domain = Domain(topology_st.standalone, DEFAULT_SUFFIX) ACI_TARGET = "(target = \"ldap:///%s\")" % SUFFIX ACI_TARGETATTR = "(targetattr = *)" ACI_ALLOW = "(version 3.0; acl \"Password Admin Access\"; allow (all) " ACI_SUBJECT = "(userdn = \"ldap:///anyone\");)" ACI_BODY = ACI_TARGET + ACI_TARGETATTR + ACI_ALLOW + ACI_SUBJECT domain.add('aci', ACI_BODY) # # Bind as the future Password Admin # log.info('test_pwdAdmin_init: Bind as the Password Administrator (before activating)...') admin_conn = admin1_user.bind(ADMIN_PWD) # # Setup our test entry, and test password policy is working # # Connect up an admin authed users connection. admin_users = UserAccounts(admin_conn, DEFAULT_SUFFIX) # # Start by attempting to add an entry with an invalid password # log.info('test_pwdAdmin_init: Attempt to add entries with invalid passwords, these adds should fail...') for passwd in INVALID_PWDS: with pytest.raises(ldap.CONSTRAINT_VIOLATION): admin_users.create(properties={ 'uid': 'example', 'cn' : 'example', 'sn' : 'example', 'uidNumber' : '1000', 'gidNumber' : '2000', 'homeDirectory' : '/home/example', 'userPassword': passwd }) return (admin_group, admin1_user, admin2_user) def test_pwdAdmin_bypass(topology_st, password_policy): """Test that password administrators/root DN can bypass password syntax/policy :id: 743bfe33-a1f7-482b-8807-efeb7aa57348 :setup: Standalone instance, Password Admin entry, Password policy configured as below: nsslapd-pwpolicy-local: on passwordCheckSyntax: on passwordMinCategories: 1 passwordMinTokenLength: 2 passwordExp: on passwordMinDigits: 1 passwordMinSpecials: 1 :steps: 1: Add users with invalid passwords :expectedresults: 1: Users should be added successfully. """ # # Now activate a password administator, bind as root dn to do the config # update, then rebind as the password admin # log.info('test_pwdAdmin: Activate the Password Administator...') # Extract our fixture data. (admin_group, admin1_user, admin2_user) = password_policy # Set the password admin topology_st.standalone.config.set('passwordAdminDN', admin1_user.dn) # # Get our test entry # admin_conn = admin1_user.bind(ADMIN_PWD) admin_users = UserAccounts(admin_conn, DEFAULT_SUFFIX) # # Start adding entries with invalid passwords, delete the entry after each pass. # for passwd in INVALID_PWDS: u1 = admin_users.create(properties={ 'uid': 'example', 'cn' : 'example', 'sn' : 'example', 'uidNumber' : '1000', 'gidNumber' : '2000', 'homeDirectory' : '/home/example', 'userPassword': passwd }) u1.delete() def test_pwdAdmin_no_admin(topology_st, password_policy): """Test that password administrators/root DN can bypass password syntax/policy :id: 74347798-7cc7-4ce7-ad5c-06387ffde02c :setup: Standalone instance, Password Admin entry, Password policy configured as below: nsslapd-pwpolicy-local: on passwordCheckSyntax: on passwordMinCategories: 1 passwordMinTokenLength: 2 passwordExp: on passwordMinDigits: 1 passwordMinSpecials: 1 :steps: 1: Create a user 2: Attempt to set passwords on the user that are invalid :expectedresults: 1: Success 2: The passwords should NOT be set """ (admin_group, admin1_user, admin2_user) = password_policy # Remove password admin # Can't use pytest.raises. because this may or may not exist try: topology_st.standalone.config.remove_all('passwordAdminDN') except ldap.NO_SUCH_ATTRIBUTE: pass # # Add the entry for the next round of testing (modify password) # admin_conn = admin1_user.bind(ADMIN_PWD) admin_users = UserAccounts(admin_conn, DEFAULT_SUFFIX) u2 = admin_users.create(properties={ 'uid': 'example', 'cn' : 'example', 'sn' : 'example', 'uidNumber' : '1000', 'gidNumber' : '2000', 'homeDirectory' : '/home/example', 'userPassword': ADMIN_PWD }) # # Make invalid password updates that should fail # for passwd in INVALID_PWDS: with pytest.raises(ldap.CONSTRAINT_VIOLATION): u2.replace('userPassword', passwd) def test_pwdAdmin_modify(topology_st, password_policy): """Test that password administrators/root DN can modify passwords rather than adding them. :id: 85326527-8eeb-401f-9d1b-4ef55dee45a4 :setup: Standalone instance, Password Admin entry, Password policy configured as below: nsslapd-pwpolicy-local: on passwordCheckSyntax: on passwordMinCategories: 1 passwordMinTokenLength: 2 passwordExp: on passwordMinDigits: 1 passwordMinSpecials: 1 :steps: 1: Retrieve the user 2: Replace the password with invalid content :expectedresults: 1: Success 2: The password should be set """ (admin_group, admin1_user, admin2_user) = password_policy # Update config - set the password admin topology_st.standalone.config.set('passwordAdminDN', admin1_user.dn) admin_conn = admin1_user.bind(ADMIN_PWD) admin_users = UserAccounts(admin_conn, DEFAULT_SUFFIX) u3 = admin_users.get('example') # # Make the same password updates, but this time they should succeed # for passwd in INVALID_PWDS: u3.replace('userPassword', passwd) def test_pwdAdmin_group(topology_st, password_policy): """Test that password admin group can bypass policy. :id: 4d62ae34-0f25-486e-b823-afd2b431e9b0 :setup: Standalone instance, Password Admin entry, Password policy configured as below: nsslapd-pwpolicy-local: on passwordCheckSyntax: on passwordMinCategories: 1 passwordMinTokenLength: 2 passwordExp: on passwordMinDigits: 1 passwordMinSpecials: 1 :steps: 1: Add group to passwordadmin dn 2: Attempt to set invalid passwords. :expectedresults: 1: Success. 2: Password should be set. """ (admin_group, admin1_user, admin2_user) = password_policy # Update config - set the password admin group topology_st.standalone.config.set('passwordAdminDN', admin_group.dn) # Bind as admin2, who is in the group. admin2_conn = admin2_user.bind(ADMIN_PWD) admin2_users = UserAccounts(admin2_conn, DEFAULT_SUFFIX) u4 = admin2_users.get('example') # Make some invalid password updates, but they should succeed for passwd in INVALID_PWDS: u4.replace('userPassword', passwd) def test_pwdAdmin_config_validation(topology_st, password_policy): """Check passwordAdminDN for valid and invalid values :id: f7049482-41e8-438b-ae18-cdd2612c783a :setup: Standalone instance, Password Admin entry, Password policy configured as below: nsslapd-pwpolicy-local: on passwordCheckSyntax: on passwordMinCategories: 1 passwordMinTokenLength: 1 passwordExp: on passwordMinDigits: 1 passwordMinSpecials: 1 :steps: 1. Add multiple attributes - one already exists so just try and add the second one 2. Set passwordAdminDN attribute to an invalid value (ZZZZZ) :expectedresults: 1. The operation should fail 2. The operation should fail """ (admin_group, admin1_user, admin2_user) = password_policy # Add multiple attributes - one already exists so just try and add the second one topology_st.standalone.config.set('passwordAdminDN', admin_group.dn) with pytest.raises(ldap.OBJECT_CLASS_VIOLATION): topology_st.standalone.config.add('passwordAdminDN', admin1_user.dn) # Attempt to set invalid DN with pytest.raises(ldap.INVALID_SYNTAX): topology_st.standalone.config.set('passwordAdminDN', 'zzzzzzzzzzzz') if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/password/pwdModify_test.py000066400000000000000000000304551421664411400275740ustar00rootroot00000000000000# Copyright (C) 2018 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import pytest import re from ldap.controls import LDAPControl from lib389._constants import * from lib389.tasks import * from lib389.utils import * from lib389.topologies import topology_st as topo from lib389.idm.user import UserAccounts, TEST_USER_PROPERTIES from lib389.idm.organizationalunit import OrganizationalUnits from lib389.pwpolicy import PwPolicyManager pytestmark = pytest.mark.tier1 DEBUGGING = os.getenv("DEBUGGING", default=False) if DEBUGGING: logging.getLogger(__name__).setLevel(logging.DEBUG) else: logging.getLogger(__name__).setLevel(logging.INFO) log = logging.getLogger(__name__) OLD_PASSWD = 'password' NEW_PASSWD = 'newpassword' SHORT_PASSWD = 'wd' TESTPEOPLE_OU = "TestPeople_bug834047" USER_ACI = '(targetattr="userpassword")(version 3.0; acl "pwp test"; allow (all) userdn="ldap:///self";)' @pytest.fixture(scope="function") def pwd_policy_setup(topo, request): """ Setup to set passwordStorageScheme as CLEAR passwordHistory to on passwordStorageScheme to SSHA passwordHistory off """ log.info("Change the pwd storage type to clear and change the password once to refresh it(for the rest of tests") topo.standalone.simple_bind_s(DN_DM, PASSWORD) topo.standalone.config.set('passwordStorageScheme', 'CLEAR') assert topo.standalone.passwd_s(user_2.dn, OLD_PASSWD, NEW_PASSWD) topo.standalone.config.set('passwordHistory', 'on') def fin(): topo.standalone.simple_bind_s(DN_DM, PASSWORD) topo.standalone.config.set('passwordStorageScheme', 'SSHA') topo.standalone.config.set('passwordHistory', 'off') request.addfinalizer(fin) def test_pwd_modify_with_different_operation(topo): """Performing various password modify operation, make sure that password is actually modified :id: e36d68a8-0960-48e4-932c-6c2f64abaebc :setup: Standalone instance and TLS enabled :steps: 1. Attempt for Password change for an entry that does not exists 2. Attempt for Password change for an entry that exists 3. Attempt for Password change to old for an entry that exists 4. Attempt for Password Change with Binddn as testuser but with wrong old password 5. Attempt for Password Change with Binddn as testuser 6. Attempt for Password Change without giving newpassword 7. Checking password change Operation using a Non-Secure connection 8. Testuser attempts to change password for testuser2(userPassword attribute is Set) 9. Directory Manager attempts to change password for testuser2(userPassword attribute is Set) 10. Create a password syntax policy. Attempt to change to password that violates that policy 11. userPassword mod with control results in ber decode error :expectedresults: 1. Operation should be successful 2. Operation should be successful 3. Operation should be successful 4. Operation should not be successful 5. Operation should be successful 6. Operation should be successful 7. Operation should not be successful 8. Operation should not be successful 9. Operation should be successful 10. Operation should violates the policy 11. Operation should be successful """ topo.standalone.enable_tls() os.environ["LDAPTLS_CACERTDIR"] = topo.standalone.get_ssca_dir() users = UserAccounts(topo.standalone, DEFAULT_SUFFIX) TEST_USER_PROPERTIES['userpassword'] = OLD_PASSWD global user user = users.create(properties=TEST_USER_PROPERTIES) ous = OrganizationalUnits(topo.standalone, DEFAULT_SUFFIX) ou = ous.get('people') ou.add('aci', USER_ACI) with pytest.raises(ldap.NO_SUCH_OBJECT): log.info("Attempt for Password change for an entry that does not exists") assert topo.standalone.passwd_s('uid=testuser1,ou=People,dc=example,dc=com', OLD_PASSWD, NEW_PASSWD) log.info("Attempt for Password change for an entry that exists") assert topo.standalone.passwd_s(user.dn, OLD_PASSWD, NEW_PASSWD) log.info("Attempt for Password change to old for an entry that exists") assert topo.standalone.passwd_s(user.dn, NEW_PASSWD, OLD_PASSWD) log.info("Attempt for Password Change with Binddn as testuser but with wrong old password") topo.standalone.simple_bind_s(user.dn, OLD_PASSWD) with pytest.raises(ldap.INVALID_CREDENTIALS): topo.standalone.passwd_s(user.dn, NEW_PASSWD, NEW_PASSWD) log.info("Attempt for Password Change with Binddn as testuser") assert topo.standalone.passwd_s(user.dn, OLD_PASSWD, NEW_PASSWD) log.info("Attempt for Password Change without giving newpassword") assert topo.standalone.passwd_s(user.dn, None, OLD_PASSWD) assert user.get_attr_val_utf8('uid') == 'testuser' log.info("Change password to NEW_PASSWD i.e newpassword") assert topo.standalone.passwd_s(user.dn, None, NEW_PASSWD) assert topo.standalone.passwd_s(user.dn, NEW_PASSWD, None) log.info("Check binding with old/new password") password = [OLD_PASSWD, NEW_PASSWD] for pass_val in password: with pytest.raises(ldap.INVALID_CREDENTIALS): topo.standalone.simple_bind_s(user.dn, pass_val) log.info("Change password back to OLD_PASSWD i.e password") topo.standalone.simple_bind_s(DN_DM, PASSWORD) assert topo.standalone.passwd_s(user.dn, None, NEW_PASSWD) log.info("Checking password change Operation using a Non-Secure connection") conn = ldap.initialize("ldap://%s:%s" % (HOST_STANDALONE, PORT_STANDALONE)) with pytest.raises(ldap.CONFIDENTIALITY_REQUIRED): conn.passwd_s(user.dn, NEW_PASSWD, OLD_PASSWD) log.info("Testuser attempts to change password for testuser2(userPassword attribute is Set)") global user_2 users = UserAccounts(topo.standalone, DEFAULT_SUFFIX) user_2 = users.create(properties={ 'uid': 'testuser2', 'cn': 'testuser2', 'sn': 'testuser2', 'uidNumber': '3000', 'gidNumber': '4000', 'homeDirectory': '/home/testuser2', 'userPassword': OLD_PASSWD }) topo.standalone.simple_bind_s(user.dn, NEW_PASSWD) with pytest.raises(ldap.INSUFFICIENT_ACCESS): assert topo.standalone.passwd_s(user_2.dn, OLD_PASSWD, NEW_PASSWD) log.info("Directory Manager attempts to change password for testuser2(userPassword attribute is Set)") topo.standalone.simple_bind_s(DN_DM, PASSWORD) assert topo.standalone.passwd_s(user_2.dn, OLD_PASSWD, NEW_PASSWD) log.info("Changing userPassword attribute to Undefined for testuser2") topo.standalone.modify_s(user_2.dn, [(ldap.MOD_REPLACE, 'userPassword', None)]) log.info("Testuser attempts to change password for testuser2(userPassword attribute is Undefined)") with pytest.raises(ldap.INSUFFICIENT_ACCESS): topo.standalone.simple_bind_s(user.dn, NEW_PASSWD) assert topo.standalone.passwd_s(user_2.dn, None, NEW_PASSWD) log.info("Directory Manager attempts to change password for testuser2(userPassword attribute is Undefined)") topo.standalone.simple_bind_s(DN_DM, PASSWORD) assert topo.standalone.passwd_s(user_2.dn, None, OLD_PASSWD) log.info("Create a password syntax policy. Attempt to change to password that violates that policy") topo.standalone.config.set('PasswordCheckSyntax', 'on') with pytest.raises(ldap.CONSTRAINT_VIOLATION): assert topo.standalone.passwd_s(user_2.dn, OLD_PASSWD, SHORT_PASSWD) log.info("Reset password syntax policy") topo.standalone.config.set('PasswordCheckSyntax', 'off') log.info("userPassword mod with control results in ber decode error") topo.standalone.simple_bind_s(DN_DM, PASSWORD) assert topo.standalone.modify_ext_s(user.dn, [(ldap.MOD_REPLACE, 'userpassword', b'abcdefg')], serverctrls=[LDAPControl('2.16.840.1.113730.3.4.2', 1, None)]) log.info("Reseting the testuser's password") topo.standalone.passwd_s(user.dn, 'abcdefg', NEW_PASSWD) def test_pwd_modify_with_password_policy(topo, pwd_policy_setup): """Performing various password modify operation, with passwordStorageScheme as CLEAR passwordHistory to on :id: 200bf0fd-20ab-4dde-849e-54067e98b917 :setup: Standalone instance (TLS enabled) with pwd_policy_setup :steps: 1. Change the password and check that a new entry has been added to the history 2. Try changing password to one stored in history 3. Change the password several times in a row, and try binding after each change 4. Try to bind using short password :expectedresults: 1. Operation should be successful 2. Operation should be unsuccessful 3. Operation should be successful 4. Operation should be unsuccessful """ log.info("Change the password and check that a new entry has been added to the history") topo.standalone.passwd_s(user_2.dn, NEW_PASSWD, OLD_PASSWD) regex = re.search('Z(.+)', user_2.get_attr_val_utf8('passwordhistory')) assert NEW_PASSWD == regex.group(1) log.info("Try changing password to one stored in history. Should fail") with pytest.raises(ldap.CONSTRAINT_VIOLATION): assert topo.standalone.passwd_s(user_2.dn, OLD_PASSWD, NEW_PASSWD) log.info("Change the password several times in a row, and try binding after each change") topo.standalone.passwd_s(user.dn, NEW_PASSWD, OLD_PASSWD) assert topo.standalone.simple_bind_s(user.dn, OLD_PASSWD) topo.standalone.passwd_s(user.dn, OLD_PASSWD, SHORT_PASSWD) assert topo.standalone.simple_bind_s(user.dn, SHORT_PASSWD) with pytest.raises(ldap.CONSTRAINT_VIOLATION): topo.standalone.passwd_s(user.dn, SHORT_PASSWD, OLD_PASSWD) def test_pwd_modify_with_subsuffix(topo): """Performing various password modify operation. :id: 2255b4e6-3546-4ec5-84a5-cd8b3d894ac5 :setup: Standalone instance (TLS enabled) :steps: 1. Add a new SubSuffix & password policy 2. Add two New users under the SubEntry 3. Change password of uid=test_user0,ou=TestPeople_bug834047,dc=example,dc=com to newpassword 4. Try to delete password- case when password is specified 5. Try to delete password- case when password is not specified :expectedresults: 1. Operation should be successful 2. Operation should be successful 3. Operation should be successful 4. Operation should be successful 5. Operation should be successful """ log.info("Add a new SubSuffix") topo.standalone.simple_bind_s(DN_DM, PASSWORD) ous = OrganizationalUnits(topo.standalone, DEFAULT_SUFFIX) ou_temp = ous.create(properties={'ou': TESTPEOPLE_OU}) ou_temp.add('aci', USER_ACI) log.info("Add the container & create password policies") policy = PwPolicyManager(topo.standalone) policy.create_subtree_policy(ou_temp.dn, properties={ 'passwordHistory': 'on', 'passwordInHistory': '6', 'passwordChange': 'on', 'passwordStorageScheme': 'CLEAR'}) log.info("Add two New users under the SubEntry") user = UserAccounts(topo.standalone, DEFAULT_SUFFIX, rdn='ou=TestPeople_bug834047') test_user0 = user.create(properties={ 'uid': 'test_user0', 'cn': 'test0', 'sn': 'test0', 'uidNumber': '3002', 'gidNumber': '4002', 'homeDirectory': '/home/test_user0', 'userPassword': OLD_PASSWD }) test_user1 = user.create(properties={ 'uid': 'test_user1', 'cn': 'test1', 'sn': 'test1', 'uidNumber': '3003', 'gidNumber': '4003', 'homeDirectory': '/home/test_user3', 'userPassword': OLD_PASSWD }) log.info("Changing password of {} to newpassword".format(test_user0.dn)) test_user0.rebind(OLD_PASSWD) test_user0.reset_password(NEW_PASSWD) test_user0.rebind(NEW_PASSWD) log.info("Try to delete password- case when password is specified") test_user0.remove('userPassword', NEW_PASSWD) test_user1.rebind(OLD_PASSWD) log.info("Try to delete password- case when password is not specified") test_user1.remove_all('userPassword') if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main(["-s", CURRENT_FILE]) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/password/pwdPolicy_attribute_test.py000066400000000000000000000266001421664411400316640ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2016 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import pytest from lib389.tasks import * from lib389.utils import * import pdb from lib389.topologies import topology_st from lib389.pwpolicy import PwPolicyManager from lib389.idm.user import UserAccount, UserAccounts, TEST_USER_PROPERTIES from lib389.idm.organizationalunit import OrganizationalUnits from lib389._constants import (DEFAULT_SUFFIX, DN_DM, PASSWORD) pytestmark = pytest.mark.tier1 OU_PEOPLE = 'ou=people,{}'.format(DEFAULT_SUFFIX) TEST_USER_NAME = 'simplepaged_test' TEST_USER_DN = 'uid={},{}'.format(TEST_USER_NAME, OU_PEOPLE) TEST_USER_PWD = 'simplepaged_test' PW_POLICY_CONT_USER = 'cn="cn=nsPwPolicyEntry,uid=simplepaged_test,' \ 'ou=people,dc=example,dc=com",' \ 'cn=nsPwPolicyContainer,ou=people,dc=example,dc=com' PW_POLICY_CONT_PEOPLE = 'cn="cn=nsPwPolicyEntry,' \ 'ou=people,dc=example,dc=com",' \ 'cn=nsPwPolicyContainer,ou=people,dc=example,dc=com' logging.getLogger(__name__).setLevel(logging.INFO) log = logging.getLogger(__name__) @pytest.fixture(scope="module") def test_user(topology_st, request): """User for binding operation""" topology_st.standalone.config.set('nsslapd-auditlog-logging-enabled', 'on') log.info('Adding test user {}') users = UserAccounts(topology_st.standalone, OU_PEOPLE, rdn=None) user_props = TEST_USER_PROPERTIES.copy() user_props.update({'uid': TEST_USER_NAME, 'userpassword': TEST_USER_PWD}) try: user = users.create(properties=user_props) except: pass # debug only USER_ACI = '(targetattr="*")(version 3.0; acl "pwp test"; allow (all) userdn="ldap:///%s";)' % user.dn ous = OrganizationalUnits(topology_st.standalone, DEFAULT_SUFFIX) ou_people = ous.get('people') ou_people.add('aci', USER_ACI) def fin(): log.info('Deleting user {}'.format(user.dn)) topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) request.addfinalizer(fin) return user @pytest.fixture(scope="module") def password_policy(topology_st, test_user): """Set up password policy for subtree and user""" pwp = PwPolicyManager(topology_st.standalone) policy_props = {} log.info('Create password policy for subtree {}'.format(OU_PEOPLE)) pwp.create_subtree_policy(OU_PEOPLE, policy_props) log.info('Create password policy for user {}'.format(TEST_USER_DN)) pwp.create_user_policy(TEST_USER_DN, policy_props) @pytest.mark.bz1845094 @pytest.mark.skipif(ds_is_older('1.4.3.3'), reason="Not implemented") def test_pwdReset_by_user_DM(topology_st, test_user): """Test new password policy attribute "pwdReset" :id: 232bc7dc-8cb6-11eb-9791-98fa9ba19b65 :customerscenario: True :setup: Standalone instance, Add a new user with a password :steps: 1. Enable passwordMustChange 2. Bind as the user and change the password 3. Check that the pwdReset attribute is set to TRUE 4. Bind as the Directory manager and attempt to change the pwdReset to FALSE 5. Check that pwdReset is NOT SET to FALSE :expected results: 1. Success 2. Success 3. Successful bind as DS user, pwdReset as DS user fails w UNWILLING_TO_PERFORM 4. Success 5. Success """ # Reset user's password our_user = UserAccount(topology_st.standalone, TEST_USER_DN) log.info('Set password policy passwordMustChange on') topology_st.standalone.config.replace('passwordMustChange', 'on') our_user.replace('userpassword', PASSWORD) time.sleep(5) # Check that pwdReset is TRUE assert our_user.get_attr_val_utf8('pwdReset') == 'TRUE' log.info('Binding as the Directory manager and attempt to change the pwdReset to FALSE') topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) with pytest.raises(ldap.UNWILLING_TO_PERFORM): topology_st.standalone.config.replace('pwdReset', 'FALSE') log.info('Check that pwdReset is NOT SET to FALSE') assert our_user.get_attr_val_utf8('pwdReset') == 'TRUE' log.info('Resetting password for {}'.format(TEST_USER_PWD)) our_user.reset_password(TEST_USER_PWD) @pytest.mark.skipif(ds_is_older('1.4.3.3'), reason="Not implemented") def test_pwd_reset(topology_st, test_user): """Test new password policy attribute "pwdReset" :id: 03db357b-4800-411e-a36e-28a534293004 :customerscenario: True :setup: Standalone instance :steps: 1. Enable passwordMustChange 2. Reset user's password 3. Check that the pwdReset attribute is set to TRUE 4. Bind as the user and change its password 5. Check that pwdReset is now set to FALSE 6. Reset password policy configuration :expected results: 1. Success 2. Success 3. Success 4. Success 5. Success 6. Success """ # Set password policy config topology_st.standalone.config.replace('passwordMustChange', 'on') time.sleep(.5) # Reset user's password our_user = UserAccount(topology_st.standalone, TEST_USER_DN) our_user.replace('userpassword', PASSWORD) time.sleep(.5) # Check that pwdReset is TRUE assert our_user.get_attr_val_utf8('pwdReset') == 'TRUE' # Bind as user and change its own password our_user.rebind(PASSWORD) our_user.replace('userpassword', PASSWORD) time.sleep(.5) # Check that pwdReset is FALSE topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) assert our_user.get_attr_val_utf8('pwdReset') == 'FALSE' # Reset password policy config topology_st.standalone.config.replace('passwordMustChange', 'off') # Reset user's password our_user.replace('userpassword', TEST_USER_PWD) @pytest.mark.parametrize('subtree_pwchange,user_pwchange,exception', [('on', 'off', ldap.UNWILLING_TO_PERFORM), ('off', 'off', ldap.UNWILLING_TO_PERFORM), ('off', 'on', False), ('on', 'on', False)]) def test_change_pwd(topology_st, test_user, password_policy, subtree_pwchange, user_pwchange, exception): """Verify that 'passwordChange' attr works as expected User should have a priority over a subtree. :id: 2c884432-2ba1-4662-8e5d-2cd49f77e5fa :parametrized: yes :setup: Standalone instance, a test user, password policy entries for a user and a subtree :steps: 1. Set passwordChange on the user and the subtree to various combinations 2. Bind as test user 3. Try to change password 4. Clean up - change the password to default while bound as DM :expectedresults: 1. passwordChange should be successfully set 2. Bind should be successful 3. Subtree/User passwordChange - result, accordingly: off/on, on/on - success; on/off, off/off - UNWILLING_TO_PERFORM 4. Operation should be successful """ users = UserAccounts(topology_st.standalone, OU_PEOPLE, rdn=None) user = users.get(TEST_USER_NAME) log.info('Set passwordChange to "{}" - {}'.format(subtree_pwchange, OU_PEOPLE)) pwp = PwPolicyManager(topology_st.standalone) subtree_policy = pwp.get_pwpolicy_entry(OU_PEOPLE) subtree_policy.set('passwordChange', subtree_pwchange) time.sleep(1) log.info('Set passwordChange to "{}" - {}'.format(user_pwchange, TEST_USER_DN)) pwp2 = PwPolicyManager(topology_st.standalone) user_policy = pwp2.get_pwpolicy_entry(TEST_USER_DN) user_policy.set('passwordChange', user_pwchange) user_policy.set('passwordExp', 'on') time.sleep(1) try: log.info('Bind as user and modify userPassword') user.rebind(TEST_USER_PWD) if exception: with pytest.raises(exception): user.reset_password('new_pass') else: user.reset_password('new_pass') except ldap.LDAPError as e: log.error('Failed to change userpassword for {}: error {}'.format( TEST_USER_DN, e.args[0]['info'])) raise e finally: log.info('Bind as DM') topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) user.reset_password(TEST_USER_PWD) def test_pwd_min_age(topology_st, test_user, password_policy): """If we set passwordMinAge to some value, for example to 10, then it should not allow the user to change the password within 10 seconds after his previous change. :id: 85b98516-8c82-45bd-b9ec-90bd1245e09c :setup: Standalone instance, a test user, password policy entries for a user and a subtree :steps: 1. Set passwordMinAge to 10 on the user pwpolicy entry 2. Set passwordMinAge to 10 on the subtree pwpolicy entry 3. Set passwordMinAge to 10 on the cn=config entry 4. Bind as test user 5. Try to change the password two times in a row 6. Wait 12 seconds 7. Try to change the password 8. Clean up - change the password to default while bound as DM :expectedresults: 1. passwordMinAge should be successfully set on the user pwpolicy entry 2. passwordMinAge should be successfully set on the subtree pwpolicy entry 3. passwordMinAge should be successfully set on the cn=config entry 4. Bind should be successful 5. The password should be successfully changed 6. 12 seconds have passed 7. Constraint Violation error should be raised 8. Operation should be successful """ num_seconds = '10' users = UserAccounts(topology_st.standalone, OU_PEOPLE, rdn=None) user = users.get(TEST_USER_NAME) log.info('Set passwordminage to "{}" - {}'.format(num_seconds, OU_PEOPLE)) pwp = PwPolicyManager(topology_st.standalone) subtree_policy = pwp.get_pwpolicy_entry(OU_PEOPLE) subtree_policy.set('passwordminage', num_seconds) log.info('Set passwordminage to "{}" - {}'.format(num_seconds, TEST_USER_DN)) user_policy = pwp.get_pwpolicy_entry(TEST_USER_DN) user_policy.set('passwordminage', num_seconds) log.info('Set passwordminage to "{}" - {}'.format(num_seconds, DN_CONFIG)) topology_st.standalone.config.set('passwordminage', num_seconds) time.sleep(1) log.info('Bind as user and modify userPassword') user.rebind(TEST_USER_PWD) user.reset_password('new_pass') time.sleep(1) log.info('Bind as user and modify userPassword straight away after previous change') user.rebind('new_pass') with pytest.raises(ldap.CONSTRAINT_VIOLATION): user.reset_password('new_new_pass') log.info('Wait {} second'.format(int(num_seconds) + 2)) time.sleep(int(num_seconds) + 2) try: log.info('Bind as user and modify userPassword') user.rebind('new_pass') user.reset_password(TEST_USER_PWD) except ldap.LDAPError as e: log.error('Failed to change userpassword for {}: error {}'.format( TEST_USER_DN, e.args[0]['info'])) raise e finally: log.info('Bind as DM') topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) user.reset_password(TEST_USER_PWD) pwp.delete_local_policy(TEST_USER_DN) pwp.delete_local_policy(OU_PEOPLE) if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/password/pwdPolicy_controls_sequence_test.py000066400000000000000000000106511421664411400334130ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2021 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import logging import pytest import os import ldap import time import ast from ldap.controls.ppolicy import PasswordPolicyControl from ldap.controls.pwdpolicy import PasswordExpiredControl from lib389.topologies import topology_st as topo from lib389.idm.user import UserAccounts from lib389._constants import (DN_DM, PASSWORD, DEFAULT_SUFFIX) pytestmark = pytest.mark.tier1 DEBUGGING = os.getenv("DEBUGGING", default=False) if DEBUGGING: logging.getLogger(__name__).setLevel(logging.DEBUG) else: logging.getLogger(__name__).setLevel(logging.INFO) log = logging.getLogger(__name__) USER_DN = 'uid=test entry,ou=people,dc=example,dc=com' USER_PW = b'password123' @pytest.fixture def init_user(topo, request): users = UserAccounts(topo.standalone, DEFAULT_SUFFIX) user_data = {'uid': 'test entry', 'cn': 'test entry', 'sn': 'test entry', 'uidNumber': '3000', 'gidNumber': '4000', 'homeDirectory': '/home/test_entry', 'userPassword': USER_PW} test_user = users.create(properties=user_data) def fin(): log.info('Delete test user') if test_user.exists(): test_user.delete() request.addfinalizer(fin) def bind_and_get_control(topo): log.info('Bind as the user, and return any controls') res_type = res_data = res_msgid = res_ctrls = None result_id = '' try: result_id = topo.standalone.simple_bind(USER_DN, USER_PW, serverctrls=[PasswordPolicyControl()]) res_type, res_data, res_msgid, res_ctrls = topo.standalone.result3(result_id) except ldap.LDAPError as e: log.info('Got expected error: {}'.format(str(e))) res_ctrls = ast.literal_eval(str(e)) pass topo.standalone.simple_bind(DN_DM, PASSWORD) return res_ctrls def change_passwd(topo): log.info('Reset user password as the user, then re-bind as Directory Manager') users = UserAccounts(topo.standalone, DEFAULT_SUFFIX) user = users.get('test entry') user.rebind(USER_PW) user.reset_password(USER_PW) topo.standalone.simple_bind(DN_DM, PASSWORD) @pytest.mark.bz1724914 @pytest.mark.ds3585 def test_controltype_expired_grace_limit(topo, init_user): """Test for expiration control when password is expired with available and exhausted grace login :id: 0392a73c-6467-49f9-bdb6-3648f6971896 :setup: Standalone instance, a user for testing :steps: 1. Configure password policy, reset password and allow it to expire 2. Bind and check sequence of controlType 3. Bind (one grace login remaining) and check sequence of controlType 4. Bind (grace login exhausted) and check sequence of controlType :expectedresults: 1. Config update and password reset are successful 2. ControlType sequence is in correct order 3. ControlType sequence is in correct order 4. ControlType sequence is in correct order """ log.info('Configure password policy with grace limit set to 2') topo.standalone.config.set('passwordExp', 'on') topo.standalone.config.set('passwordMaxAge', '5') topo.standalone.config.set('passwordGraceLimit', '2') log.info('Change password and wait for it to expire') change_passwd(topo) time.sleep(6) log.info('Bind and use up one grace login (only one left)') controls = bind_and_get_control(topo) assert (controls[0].controlType == "1.3.6.1.4.1.42.2.27.8.5.1") assert (controls[1].controlType == "2.16.840.1.113730.3.4.4") log.info('Bind again and check the sequence') controls = bind_and_get_control(topo) assert (controls[0].controlType == "1.3.6.1.4.1.42.2.27.8.5.1") assert (controls[1].controlType == "2.16.840.1.113730.3.4.4") log.info('Bind with expired grace login and check the sequence') # No grace login available, bind should fail, controls will be returned in error message controls = bind_and_get_control(topo) assert (controls['ctrls'][0][0] == "1.3.6.1.4.1.42.2.27.8.5.1") assert (controls['ctrls'][1][0] == "2.16.840.1.113730.3.4.4") if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/password/pwdPolicy_controls_test.py000066400000000000000000000256511421664411400315310ustar00rootroot00000000000000import logging import pytest import os import ldap import time from ldap.controls.ppolicy import PasswordPolicyControl from lib389.topologies import topology_st as topo from lib389.idm.user import UserAccounts from lib389._constants import (DN_DM, PASSWORD, DEFAULT_SUFFIX) from lib389.idm.organizationalunit import OrganizationalUnits pytestmark = pytest.mark.tier1 DEBUGGING = os.getenv("DEBUGGING", default=False) if DEBUGGING: logging.getLogger(__name__).setLevel(logging.DEBUG) else: logging.getLogger(__name__).setLevel(logging.INFO) log = logging.getLogger(__name__) USER_DN = 'uid=test entry,ou=people,dc=example,dc=com' USER_PW = b'password123' USER_ACI = '(targetattr="userpassword")(version 3.0; acl "pwp test"; allow (all) userdn="ldap:///self";)' @pytest.fixture def init_user(topo, request): """Initialize a user - Delete and re-add test user """ try: topo.standalone.simple_bind_s(DN_DM, PASSWORD) users = UserAccounts(topo.standalone, DEFAULT_SUFFIX) user = users.get('test entry') user.delete() except ldap.NO_SUCH_OBJECT: pass except ldap.LDAPError as e: log.error("Failed to delete user, error: {}".format(e.message['desc'])) assert False user_data = {'uid': 'test entry', 'cn': 'test entry', 'sn': 'test entry', 'uidNumber': '3000', 'gidNumber': '4000', 'homeDirectory': '/home/test_entry', 'userPassword': USER_PW} users.create(properties=user_data) def change_passwd(topo): """Reset users password as the user, then re-bind as Directory Manager """ users = UserAccounts(topo.standalone, DEFAULT_SUFFIX) user = users.get('test entry') user.rebind(USER_PW) user.reset_password(USER_PW) topo.standalone.simple_bind_s(DN_DM, PASSWORD) def bind_and_get_control(topo, err=0): """Bind as the user, and return any controls """ res_type = res_data = res_msgid = res_ctrls = None result_id = '' try: result_id = topo.standalone.simple_bind(USER_DN, USER_PW, serverctrls=[PasswordPolicyControl()]) res_type, res_data, res_msgid, res_ctrls = topo.standalone.result3(result_id) if err: log.fatal('Expected an error, but bind succeeded') assert False except ldap.LDAPError as e: if err: log.debug('Got expected error: {}'.format(str(e))) pass else: log.fatal('Did not expect an error: {}'.format(str(e))) assert False if DEBUGGING and res_ctrls and len(res_ctrls) > 0: for ctl in res_ctrls: if ctl.timeBeforeExpiration: log.debug('control time before expiration: {}'.format(ctl.timeBeforeExpiration)) if ctl.graceAuthNsRemaining: log.debug('control grace login remaining: {}'.format(ctl.graceAuthNsRemaining)) if ctl.error is not None and ctl.error >= 0: log.debug('control error: {}'.format(ctl.error)) topo.standalone.simple_bind_s(DN_DM, PASSWORD) return res_ctrls def test_pwd_must_change(topo, init_user): """Test for expiration control when password must be changed because an admin reset the password :id: a3d99be5-0b69-410d-b72f-04eda8821a56 :setup: Standalone instance, a user for testing :steps: 1. Configure password policy and reset password as admin 2. Bind, and check for expired control withthe proper error code "2" :expectedresults: 1. Config update succeeds, adn the password is reset 2. The EXPIRED control is returned, and we the expected error code "2" """ log.info('Configure password policy with paswordMustChange set to "on"') topo.standalone.config.set('passwordExp', 'on') topo.standalone.config.set('passwordMaxAge', '200') topo.standalone.config.set('passwordGraceLimit', '0') topo.standalone.config.set('passwordWarning', '199') topo.standalone.config.set('passwordMustChange', 'on') ous = OrganizationalUnits(topo.standalone, DEFAULT_SUFFIX) ou = ous.get('people') ou.add('aci', USER_ACI) log.info('Reset userpassword as Directory Manager') users = UserAccounts(topo.standalone, DEFAULT_SUFFIX) user = users.get('test entry') user.reset_password(USER_PW) log.info('Bind should return ctrl with error code 2 (changeAfterReset)') time.sleep(2) ctrls = bind_and_get_control(topo) if ctrls and len(ctrls) > 0: if ctrls[0].error is None: log.fatal("Response ctrl error code not set") assert False elif ctrls[0].error != 2: log.fatal("Got unexpected error code: {}".format(ctrls[0].error)) assert False else: log.fatal("We did not get a response ctrl") assert False def test_pwd_expired_grace_limit(topo, init_user): """Test for expiration control when password is expired, but there are remaining grace logins :id: a3d99be5-0b69-410d-b72f-04eda8821a51 :setup: Standalone instance, a user for testing :steps: 1. Configure password policy and reset password,adn allow it to expire 2. Bind, and check for expired control, and grace limit 3. Bind again, consuming the last grace login, control should be returned 4. Bind again, it should fail, and no control returned :expectedresults: 1. Config update and password reset are successful 2. The EXPIRED control is returned, and we get the expected number of grace logins in the control 3. The response control has the expected value for grace logins 4. The bind fails with error 49, and no contorl is returned """ log.info('Configure password policy with grace limit set tot 2') topo.standalone.config.set('passwordExp', 'on') topo.standalone.config.set('passwordMaxAge', '5') topo.standalone.config.set('passwordGraceLimit', '2') log.info('Change password and wait for it to expire') change_passwd(topo) time.sleep(6) log.info('Bind and use up one grace login (only one left)') ctrls = bind_and_get_control(topo) if ctrls is None or len(ctrls) == 0: log.fatal('Did not get EXPIRED control in resposne') assert False else: if int(ctrls[0].graceAuthNsRemaining) != 1: log.fatal('Got unexpected value for grace logins: {}'.format(ctrls[0].graceAuthNsRemaining)) assert False log.info('Use up last grace login, should get control') ctrls = bind_and_get_control(topo) if ctrls is None or len(ctrls) == 0: log.fatal('Did not get control in response') assert False log.info('No grace login available, bind should fail, and no control should be returned') ctrls = bind_and_get_control(topo, err=49) if ctrls and len(ctrls) > 0: log.fatal('Incorrectly got control in response') assert False def test_pwd_expiring_with_warning(topo, init_user): """Test expiring control response before and after warning is sent :id: 3594431f-e681-4a04-8edb-33ad2d9dad5b :setup: Standalone instance, a user for testing :steps: 1. Configure password policy, and reset password 2. Check for EXPIRING control, and the "time to expire" 3. Bind again, as a warning has now been sent, and check the "time to expire" :expectedresults: 1. Configuration update and password reset are successful 2. Get the EXPIRING control, and the expected "time to expire" values 3. Get the EXPIRING control, and the expected "time to expire" values """ log.info('Configure password policy') topo.standalone.config.set('passwordExp', 'on') topo.standalone.config.set('passwordMaxAge', '50') topo.standalone.config.set('passwordWarning', '50') log.info('Change password and get controls') change_passwd(topo) ctrls = bind_and_get_control(topo) if ctrls is None or len(ctrls) == 0: log.fatal('Did not get EXPIRING control in response') assert False if int(ctrls[0].timeBeforeExpiration) < 50: log.fatal('Got unexpected value for timeBeforeExpiration: {}'.format(ctrls[0].timeBeforeExpiration)) assert False log.info('Warning has been sent, try the bind again, and recheck the expiring time') time.sleep(5) ctrls = bind_and_get_control(topo) if ctrls is None or len(ctrls) == 0: log.fatal('Did not get EXPIRING control in resposne') assert False if int(ctrls[0].timeBeforeExpiration) > 50: log.fatal('Got unexpected value for timeBeforeExpiration: {}'.format(ctrls[0].timeBeforeExpiration)) assert False def test_pwd_expiring_with_no_warning(topo, init_user): """Test expiring control response when no warning is sent :id: a3d99be5-0b69-410d-b72f-04eda8821a54 :setup: Standalone instance, a user for testing :steps: 1. Configure password policy, and reset password 2. Bind, and check that no controls are returned 3. Set passwordSendExpiringTime to "on", bind, and check that the EXPIRING control is returned :expectedresults: 1. Configuration update and passwordreset are successful 2. No control is returned from bind 3. A control is returned after setting "passwordSendExpiringTime" """ log.info('Configure password policy') topo.standalone.config.set('passwordExp', 'on') topo.standalone.config.set('passwordMaxAge', '50') topo.standalone.config.set('passwordWarning', '5') log.info('When the warning is less than the max age, we never send expiring control response') change_passwd(topo) ctrls = bind_and_get_control(topo) if len(ctrls) > 0: log.fatal('Incorrectly got a response control: {}'.format(ctrls)) assert False log.info('Turn on sending expiring control regardless of warning') topo.standalone.config.set('passwordSendExpiringTime', 'on') ctrls = bind_and_get_control(topo) if ctrls is None or len(ctrls) == 0: log.fatal('Did not get EXPIRED control in response') assert False if int(ctrls[0].timeBeforeExpiration) < 49: log.fatal('Got unexpected value for time before expiration: {}'.format(ctrls[0].timeBeforeExpiration)) assert False log.info('Check expiring time again') time.sleep(6) ctrls = bind_and_get_control(topo) if ctrls is None or len(ctrls) == 0: log.fatal('Did not get EXPIRED control in resposne') assert False if int(ctrls[0].timeBeforeExpiration) > 51: log.fatal('Got unexpected value for time before expiration: {}'.format(ctrls[0].timeBeforeExpiration)) assert False log.info('Turn off sending expiring control (restore the default setting)') topo.standalone.config.set('passwordSendExpiringTime', 'off') if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/password/pwdPolicy_inherit_global_test.py000066400000000000000000000167411421664411400326500ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2019 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import logging import time import ldap import pytest from lib389.utils import * from lib389._constants import * from lib389.pwpolicy import PwPolicyManager from lib389.topologies import topology_st from lib389.idm.organizationalunit import OrganizationalUnits from lib389.idm.user import UserAccounts, TEST_USER_PROPERTIES pytestmark = pytest.mark.tier1 logging.getLogger(__name__).setLevel(logging.INFO) log = logging.getLogger(__name__) OU_PEOPLE = 'ou=People,' + DEFAULT_SUFFIX ATTR_INHERIT_GLOBAL = 'nsslapd-pwpolicy-inherit-global' ATTR_CHECK_SYNTAX = 'passwordCheckSyntax' BN = 'uid=buser,' + OU_PEOPLE TEMP_USER = 'cn=test{}' TEMP_USER_DN = '%s,%s' % (TEMP_USER, OU_PEOPLE) @pytest.fixture(scope="module") def create_user(topology_st, request): """User for binding operation""" log.info('Adding user {}'.format(BN)) users = UserAccounts(topology_st.standalone, OU_PEOPLE, rdn=None) user_props = TEST_USER_PROPERTIES.copy() user_props.update({'uid': 'buser', 'cn': 'buser', 'userpassword': PASSWORD}) user = users.create(properties=user_props) log.info('Adding an aci for the bind user') BN_ACI = '(targetattr="*")(version 3.0; acl "pwp test"; allow (all) userdn="ldap:///%s";)' % user.dn ous = OrganizationalUnits(topology_st.standalone, DEFAULT_SUFFIX) ou_people = ous.get('people') ou_people.add('aci', BN_ACI) def fin(): log.info('Deleting user {}'.format(BN)) user.delete() ous = OrganizationalUnits(topology_st.standalone, DEFAULT_SUFFIX) ou_people = ous.get('people') ou_people.remove('aci', BN_ACI) request.addfinalizer(fin) @pytest.fixture(scope="module") def password_policy(topology_st, create_user): """Set global password policy. Then, set fine-grained subtree level password policy to ou=People with no password syntax. Note: do not touch nsslapd-pwpolicy-inherit-global -- off by default """ log.info('Enable fine-grained policy') pwp = PwPolicyManager(topology_st.standalone) policy_props = { 'passwordMustChange': 'off', 'passwordExp': 'off', 'passwordMinAge': '0', 'passwordChange': 'off', 'passwordStorageScheme': 'ssha' } pwp.create_subtree_policy(OU_PEOPLE, policy_props) check_attr_val(topology_st.standalone, ATTR_INHERIT_GLOBAL, 'off') check_attr_val(topology_st.standalone, ATTR_CHECK_SYNTAX, 'off') def check_attr_val(inst, attr, expected): """Check that entry has the value""" val = inst.config.get_attr_val_utf8(attr) assert val == expected, 'Default value of %s is not %s, but %s' % ( attr, expected, val) log.info('Default value of %s is %s' % (attr, expected)) @pytest.mark.parametrize('inherit_value,checksyntax_value', [('off', 'off'), ('on', 'off'), ('off', 'on')]) def test_entry_has_no_restrictions(topology_st, password_policy, create_user, inherit_value, checksyntax_value): """Make sure an entry added to ou=people has no password syntax restrictions :id: 2f07ff40-76ca-45a9-a556-331c94084945 :parametrized: yes :setup: Standalone instance, test user, password policy entries for a subtree :steps: 1. Bind as test user 2. Set 'nsslapd-pwpolicy-inherit-global' and 'passwordCheckSyntax' accordingly: 'off' and 'off'; 'on' and 'off'; 'off' and 'on' 3. Try to add user with a short password 4. Cleanup - remove temp user bound as DM :expectedresults: 1. Bind should be successful 2. Attributes should be successfully set 3. No exceptions should occur 4. Operation should be successful """ log.info('Set {} to {}'.format(ATTR_INHERIT_GLOBAL, inherit_value)) log.info('Set {} to {}'.format(ATTR_CHECK_SYNTAX, checksyntax_value)) topology_st.standalone.config.set(ATTR_INHERIT_GLOBAL, inherit_value) topology_st.standalone.config.set(ATTR_CHECK_SYNTAX, checksyntax_value) # Wait a second for cn=config to apply time.sleep(1) check_attr_val(topology_st.standalone, ATTR_INHERIT_GLOBAL, inherit_value) check_attr_val(topology_st.standalone, ATTR_CHECK_SYNTAX, checksyntax_value) log.info('Bind as test user') topology_st.standalone.simple_bind_s(BN, PASSWORD) log.info('Make sure an entry added to ou=people has ' 'no password syntax restrictions.') users = UserAccounts(topology_st.standalone, OU_PEOPLE, rdn=None) user_props = TEST_USER_PROPERTIES.copy() user_props.update({'cn': 'test0', 'userpassword': 'short'}) user = users.create(properties=user_props) topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) # Remove test user user.delete() def test_entry_has_restrictions(topology_st, password_policy, create_user): """Set 'nsslapd-pwpolicy-inherit-global: on' and 'passwordCheckSyntax: on'. Make sure that syntax rules work, if set them at both: cn=config and ou=people policy container. :id: 4bb0f474-17c1-40f7-aab4-4ddc17d019e8 :setup: Standalone instance, test user, password policy entries for a subtree :steps: 1. Bind as test user 2. Switch 'nsslapd-pwpolicy-inherit-global: on' 3. Switch 'passwordCheckSyntax: on' 4. Set 'passwordMinLength: 9' to: cn=config and ou=people policy container 5. Try to add user with a short password (<9) 6. Try to add user with a long password (>9) 7. Cleanup - remove temp users bound as DM :expectedresults: 1. Bind should be successful 2. nsslapd-pwpolicy-inherit-global should be successfully set 3. passwordCheckSyntax should be successfully set 4. passwordMinLength should be successfully set 5. User should be rejected 6. User should be rejected 7. Operation should be successful """ log.info('Set {} to {}'.format(ATTR_INHERIT_GLOBAL, 'on')) log.info('Set {} to {}'.format(ATTR_CHECK_SYNTAX, 'on')) topology_st.standalone.config.set(ATTR_INHERIT_GLOBAL, 'on') topology_st.standalone.config.set(ATTR_CHECK_SYNTAX, 'on') pwp = PwPolicyManager(topology_st.standalone) policy = pwp.get_pwpolicy_entry(OU_PEOPLE) policy.set('passwordMinLength', '9') # Wait a second for cn=config to apply time.sleep(1) check_attr_val(topology_st.standalone, ATTR_INHERIT_GLOBAL, 'on') check_attr_val(topology_st.standalone, ATTR_CHECK_SYNTAX, 'on') log.info('Bind as test user') topology_st.standalone.simple_bind_s(BN, PASSWORD) users = UserAccounts(topology_st.standalone, OU_PEOPLE, rdn=None) user_props = TEST_USER_PROPERTIES.copy() log.info('Try to add user with a short password (<9)') with pytest.raises(ldap.CONSTRAINT_VIOLATION): user_props.update({'cn': 'test0', 'userpassword': 'short'}) user = users.create(properties=user_props) log.info('Try to add user with a long password (>9)') user_props.update({'cn': 'test1', 'userpassword': 'Reallylong1'}) user = users.create(properties=user_props) log.info('Bind as DM user') topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) # Remove test user 1 user.delete() if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/password/pwdPolicy_syntax_test.py000066400000000000000000000377571421664411400312260ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2020 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import logging import pytest from lib389.tasks import * from lib389.utils import * from lib389.topologies import topology_st from lib389._constants import DEFAULT_SUFFIX, PASSWORD, DN_DM from lib389.idm.domain import Domain from lib389.idm.user import UserAccounts from lib389.idm.organizationalunit import OrganizationalUnits pytestmark = pytest.mark.tier1 USER_DN = 'uid=user,ou=People,%s' % DEFAULT_SUFFIX USER_RDN = 'user' USER_ACI = '(targetattr="userpassword")(version 3.0; acl "pwp test"; allow (all) userdn="ldap:///self";)' logging.getLogger(__name__).setLevel(logging.INFO) log = logging.getLogger(__name__) @pytest.fixture(scope="module") def password_policy(topology_st): """Set global password policy""" log.info('Enable global password policy. Check for syntax.') topology_st.standalone.config.set('passwordCheckSyntax', 'on') topology_st.standalone.config.set('nsslapd-pwpolicy-local', 'off') topology_st.standalone.config.set('passwordMinCategories', '1') # Add self user modification and anonymous aci USER_SELF_MOD_ACI = '(targetattr="userpassword")(version 3.0; acl "pwp test"; allow (all) userdn="ldap:///self";)' ANON_ACI = "(targetattr=\"*\")(version 3.0; acl \"Anonymous Read access\"; allow (read,search,compare) userdn = \"ldap:///anyone\";)" suffix = Domain(topology_st.standalone, DEFAULT_SUFFIX) suffix.add('aci', USER_SELF_MOD_ACI) suffix.add('aci', ANON_ACI) @pytest.fixture(scope="module") def create_user(topology_st): """Create the test user.""" users = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX) users.create(properties={ 'uid': USER_RDN, 'cn': USER_RDN, 'sn': USER_RDN, 'uidNumber': '3000', 'gidNumber': '4000', 'homeDirectory': '/home/user', 'description': 'd_e_s_c', 'loginShell': USER_RDN, 'userPassword': PASSWORD }) def setPolicy(inst, attr, value): """Bind as Root DN, set policy, and then bind as user""" inst.simple_bind_s(DN_DM, PASSWORD) # Set the policy value value = str(value) inst.config.set(attr, value) policy = inst.config.get_attr_val_utf8(attr) assert policy == value def resetPasswd(inst): """Reset the user password for the next test""" # First, bind as the ROOT DN so we can set the password inst.simple_bind_s(DN_DM, PASSWORD) # Now set the password users = UserAccounts(inst, DEFAULT_SUFFIX) user = users.get(USER_RDN) user.reset_password(PASSWORD) def tryPassword(inst, policy_attr, value, reset_value, pw_bad, pw_good, msg): """Attempt to change the users password inst: DirSrv Object password: password msg - error message if failure """ setPolicy(inst, policy_attr, value) inst.simple_bind_s(USER_DN, PASSWORD) users = UserAccounts(inst, DEFAULT_SUFFIX) user = users.get(USER_RDN) try: user.reset_password(pw_bad) log.fatal('Invalid password was unexpectedly accepted (%s)' % (policy_attr)) assert False except ldap.CONSTRAINT_VIOLATION: log.info('Invalid password correctly rejected by %s: %s' % (policy_attr, msg)) pass except ldap.LDAPError as e: log.fatal("Failed to change password: " + str(e)) assert False # Change password that is allowed user.reset_password(pw_good) # Reset for the next test resetPasswd(inst) setPolicy(inst, policy_attr, reset_value) def test_basic(topology_st, create_user, password_policy): """Ensure that on a password change, the policy syntax is enforced correctly. :id: e8de7029-7fa6-4e96-9eb6-4a121f4c8fb3 :customerscenario: True :setup: Standalone instance, a test user, global password policy with: passwordCheckSyntax - on; nsslapd-pwpolicy-local - off; passwordMinCategories - 1 :steps: 1. Set passwordMinLength to 10 in cn=config 2. Set userPassword to 'passwd' in cn=config 3. Set userPassword to 'password123' in cn=config 4. Set passwordMinLength to 2 in cn=config 5. Set passwordMinDigits to 2 in cn=config 6. Set userPassword to 'passwd' in cn=config 7. Set userPassword to 'password123' in cn=config 8. Set passwordMinDigits to 0 in cn=config 9. Set passwordMinAlphas to 2 in cn=config 10. Set userPassword to 'p123456789' in cn=config 11. Set userPassword to 'password123' in cn=config 12. Set passwordMinAlphas to 0 in cn=config 13. Set passwordMaxRepeats to 2 in cn=config 14. Set userPassword to 'password' in cn=config 15. Set userPassword to 'password123' in cn=config 16. Set passwordMaxRepeats to 0 in cn=config 17. Set passwordMinSpecials to 2 in cn=config 18. Set userPassword to 'passwd' in cn=config 19. Set userPassword to 'password_#$' in cn=config 20. Set passwordMinSpecials to 0 in cn=config 21. Set passwordMinLowers to 2 in cn=config 22. Set userPassword to 'PASSWORD123' in cn=config 23. Set userPassword to 'password123' in cn=config 24. Set passwordMinLowers to 0 in cn=config 25. Set passwordMinUppers to 2 in cn=config 26. Set userPassword to 'password' in cn=config 27. Set userPassword to 'PASSWORD' in cn=config 28. Set passwordMinUppers to 0 in cn=config 29. Test passwordDictCheck 30. Test passwordPalindrome 31. Test passwordMaxSequence for forward number sequence 32. Test passwordMaxSequence for backward number sequence 33. Test passwordMaxSequence for forward alpha sequence 34. Test passwordMaxSequence for backward alpha sequence 35. Test passwordMaxClassChars for digits 36. Test passwordMaxClassChars for specials 37. Test passwordMaxClassChars for lowers 38. Test passwordMaxClassChars for uppers 39. Test passwordBadWords using 'redhat' and 'fedora' 40. Test passwordUserAttrs using description attribute :expectedresults: 1. passwordMinLength should be successfully set 2. Password should be rejected because length too short 3. Password should be accepted 4. passwordMinLength should be successfully set 5. passwordMinDigits should be successfully set 6. Password should be rejected because it does not contain minimum number of digits 7. Password should be accepted 8. passwordMinDigits should be successfully set 9. passwordMinAlphas should be successfully set 10. Password should be rejected because it does not contain minimum number of alphas 11. Password should be accepted 12. passwordMinAlphas should be successfully set 13. passwordMaxRepeats should be successfully set 14. Password should be rejected because too many repeating characters 15. Password should be accepted 16. passwordMaxRepeats should be successfully set 17. passwordMinSpecials should be successfully set 18. Password should be rejected because it does not contain minimum number of special characters 19. Password should be accepted 20. passwordMinSpecials should be successfully set 21. passwordMinLowers should be successfully set 22. Password should be rejected because it does not contain minimum number of lowercase characters 23. Password should be accepted 24. passwordMinLowers should be successfully set 25. passwordMinUppers should be successfully set 26. Password should be rejected because it does not contain minimum number of lowercase characters 27. Password should be accepted 28. passwordMinUppers should be successfully set 29. The passwordDictCheck test succeeds 30. The passwordPalindrome test succeeds 31. Test passwordMaxSequence for forward number sequence succeeds 32. Test passwordMaxSequence for backward number sequence succeeds 33. Test passwordMaxSequence for forward alpha sequence succeeds 34. Test passwordMaxSequence for backward alpha sequence succeeds 35. Test passwordMaxClassChars for digits succeeds 36. Test passwordMaxClassChars for specials succeeds 37. Test passwordMaxClassChars for lowers succeeds 38. Test passwordMaxClassChars for uppers succeeds 39. The passwordBadWords test succeeds 40. The passwordUserAttrs test succeeds """ # # Test each syntax category # ous = OrganizationalUnits(topology_st.standalone, DEFAULT_SUFFIX) ou = ous.get('people') ou.add('aci', USER_ACI) # Min Length tryPassword(topology_st.standalone, 'passwordMinLength', 10, 2, 'passwd', 'password123', 'length too short') # Min Digit tryPassword(topology_st.standalone, 'passwordMinDigits', 2, 0, 'passwd', 'password123', 'does not contain minimum number of digits') # Min Alphas tryPassword(topology_st.standalone, 'passwordMinAlphas', 2, 0, 'p123456789', 'password123', 'does not contain minimum number of alphas') # Max Repeats tryPassword(topology_st.standalone, 'passwordMaxRepeats', 2, 0, 'passsword', 'password123', 'too many repeating characters') # Min Specials tryPassword(topology_st.standalone, 'passwordMinSpecials', 2, 0, 'passwd', 'password_#$', 'does not contain minimum number of special characters') # Min Lowers tryPassword(topology_st.standalone, 'passwordMinLowers', 2, 0, 'PASSWORD123', 'password123', 'does not contain minimum number of lowercase characters') # Min Uppers tryPassword(topology_st.standalone, 'passwordMinUppers', 2, 0, 'password', 'PASSWORD', 'does not contain minimum number of lowercase characters') # Min 8-bits - "ldap" package only accepts ascii strings at the moment if ds_is_newer('1.4.0.13'): # Dictionary check tryPassword(topology_st.standalone, 'passwordDictCheck', 'on', 'on', 'PASSWORD', '13_#Kad472h', 'Password found in dictionary') # Palindromes tryPassword(topology_st.standalone, 'passwordPalindrome', 'on', 'on', 'Za12_#_21aZ', '13_#Kad472h', 'Password is palindrome') # Sequences tryPassword(topology_st.standalone, 'passwordMaxSequence', 3, 0, 'Za1_1234', '13_#Kad472h', 'Max monotonic sequence is not allowed') tryPassword(topology_st.standalone, 'passwordMaxSequence', 3, 0, 'Za1_4321', '13_#Kad472h', 'Max monotonic sequence is not allowed') tryPassword(topology_st.standalone, 'passwordMaxSequence', 3, 0, 'Za1_abcd', '13_#Kad472h', 'Max monotonic sequence is not allowed') tryPassword(topology_st.standalone, 'passwordMaxSequence', 3, 0, 'Za1_dcba', '13_#Kad472h', 'Max monotonic sequence is not allowed') # Sequence Sets tryPassword(topology_st.standalone, 'passwordMaxSeqSets', 2, 0, 'Za1_123--123', '13_#Kad472h', 'Max monotonic sequence is not allowed') # Max characters in a character class tryPassword(topology_st.standalone, 'passwordMaxClassChars', 3, 0, 'Za1_9376', '13_#Kad472h', 'Too may consecutive characters from the same class') tryPassword(topology_st.standalone, 'passwordMaxClassChars', 3, 0, 'Za1_#$&!', '13_#Kad472h', 'Too may consecutive characters from the same class') tryPassword(topology_st.standalone, 'passwordMaxClassChars', 3, 0, 'Za1_ahtf', '13_#Kad472h', 'Too may consecutive characters from the same class') tryPassword(topology_st.standalone, 'passwordMaxClassChars', 3, 0, 'Za1_HTSE', '13_#Kad472h', 'Too may consecutive characters from the same class') # Bad words tryPassword(topology_st.standalone, 'passwordBadWords', 'redhat', 'none', 'Za1_redhat', '13_#Kad472h', 'Too may consecutive characters from the same class') # User Attributes tryPassword(topology_st.standalone, 'passwordUserAttributes', 'description', 0, 'Za1_d_e_s_c', '13_#Kad472h', 'Password found in user entry') @pytest.mark.bz1816857 @pytest.mark.ds50875 @pytest.mark.skipif(ds_is_older("1.4.1.18"), reason="Not implemented") def test_config_set_few_user_attributes(topology_st, create_user, password_policy): """Test that we can successfully set multiple values to passwordUserAttributes :id: 188e0aee-6e29-4857-910c-27d5606f8c08 :setup: Standalone instance :steps: 1. Set passwordUserAttributes to "description loginShell" 2. Verify passwordUserAttributes has the values 3. Verify passwordUserAttributes enforced the policy :expectedresults: 1. Operation should be successful 2. Operation should be successful 3. Operation should be successful """ standalone = topology_st.standalone standalone.simple_bind_s(DN_DM, PASSWORD) standalone.log.info('Set passwordUserAttributes to "description loginShell"') standalone.config.set('passwordUserAttributes', 'description loginshell') standalone.restart() standalone.log.info("Verify passwordUserAttributes has the values") user_attrs = standalone.config.get_attr_val_utf8('passwordUserAttributes') assert "description" in user_attrs assert "loginshell" in user_attrs standalone.log.info("Reset passwordUserAttributes") standalone.config.remove_all('passwordUserAttributes') standalone.log.info("Verify passwordUserAttributes enforced the policy") attributes = ['description, loginShell', 'description,loginShell', 'description loginShell'] values = ['Za1_d_e_s_c', f'Za1_{USER_RDN}', f'Za1_d_e_s_c{USER_RDN}'] for attr in attributes: for value in values: tryPassword(standalone, 'passwordUserAttributes', attr, 0, value, '13_#Kad472h', 'Password found in user entry') @pytest.mark.bz1816857 @pytest.mark.ds50875 @pytest.mark.skipif(ds_is_older("1.4.1.18"), reason="Not implemented") def test_config_set_few_bad_words(topology_st, create_user, password_policy): """Test that we can successfully set multiple values to passwordBadWords :id: 2977094c-921c-4b2f-af91-4c7a45ded48b :setup: Standalone instance :steps: 1. Set passwordBadWords to "fedora redhat" 2. Verify passwordBadWords has the values 3. Verify passwordBadWords enforced the policy :expectedresults: 1. Operation should be successful 2. Operation should be successful 3. Operation should be successful """ standalone = topology_st.standalone standalone.simple_bind_s(DN_DM, PASSWORD) standalone.log.info('Set passwordBadWords to "fedora redhat"') standalone.config.set('passwordBadWords', 'fedora redhat') standalone.restart() standalone.log.info("Verify passwordBadWords has the values") user_attrs = standalone.config.get_attr_val_utf8('passwordBadWords') assert "fedora" in user_attrs assert "redhat" in user_attrs standalone.log.info("Reset passwordBadWords") standalone.config.remove_all('passwordBadWords') standalone.log.info("Verify passwordBadWords enforced the policy") attributes = ['redhat, fedora', 'redhat,fedora', 'redhat fedora'] values = ['Za1_redhat_fedora', 'Za1_fedora', 'Za1_redhat'] for attr in attributes: for value in values: tryPassword(standalone, 'passwordBadWords', attr, 'none', value, '13_#Kad472h', 'Too may consecutive characters from the same class') if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/password/pwdPolicy_temporary_password.py000066400000000000000000001263421421664411400325720ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2016 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import pytest from lib389.tasks import * from lib389.utils import * import pdb from lib389.topologies import topology_st from lib389.pwpolicy import PwPolicyManager from lib389.idm.user import UserAccount, UserAccounts, TEST_USER_PROPERTIES from lib389.idm.organizationalunit import OrganizationalUnits from lib389._constants import (DEFAULT_SUFFIX, DN_DM, PASSWORD) pytestmark = pytest.mark.tier1 OU_PEOPLE = 'ou=people,{}'.format(DEFAULT_SUFFIX) TEST_USER_NAME = 'simplepaged_test' TEST_USER_DN = 'uid={},{}'.format(TEST_USER_NAME, OU_PEOPLE) TEST_USER_PWD = 'simplepaged_test' PW_POLICY_CONT_USER = 'cn="cn=nsPwPolicyEntry,uid=simplepaged_test,' \ 'ou=people,dc=example,dc=com",' \ 'cn=nsPwPolicyContainer,ou=people,dc=example,dc=com' PW_POLICY_CONT_PEOPLE = 'cn="cn=nsPwPolicyEntry,' \ 'ou=people,dc=example,dc=com",' \ 'cn=nsPwPolicyContainer,ou=people,dc=example,dc=com' logging.getLogger(__name__).setLevel(logging.INFO) log = logging.getLogger(__name__) @pytest.fixture(scope="module") def test_user(topology_st, request): """User for binding operation""" topology_st.standalone.config.set('nsslapd-auditlog-logging-enabled', 'on') log.info('Adding test user {}') users = UserAccounts(topology_st.standalone, OU_PEOPLE, rdn=None) user_props = TEST_USER_PROPERTIES.copy() user_props.update({'uid': TEST_USER_NAME, 'userpassword': TEST_USER_PWD}) try: user = users.create(properties=user_props) except: pass # debug only USER_ACI = '(targetattr="*")(version 3.0; acl "pwp test"; allow (all) userdn="ldap:///%s";)' % user.dn ous = OrganizationalUnits(topology_st.standalone, DEFAULT_SUFFIX) ou_people = ous.get('people') ou_people.add('aci', USER_ACI) def fin(): log.info('Deleting user {}'.format(user.dn)) topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) request.addfinalizer(fin) return user def test_global_tpr_maxuse_1(topology_st, test_user, request): """Test global TPR policy : passwordTPRMaxUse Test that after passwordTPRMaxUse failures to bind additional bind with valid password are failing with CONSTRAINT_VIOLATION :id: d1b38436-806c-4671-8ccf-c8fdad21f034 :customerscenario: False :setup: Standalone instance :steps: 1. Enable passwordMustChange 2. Set passwordTPRMaxUse=5 3. Set passwordMaxFailure to a higher value to not disturb the test 4. Bind with a wrong password passwordTPRMaxUse times and check INVALID_CREDENTIALS 5. Check that passwordTPRRetryCount got to the limit (5) 6. Bind with a wrong password (CONSTRAINT_VIOLATION) and check passwordTPRRetryCount overpass the limit by 1 (6) 7. Bind with a valid password 5 times and check CONSTRAINT_VIOLATION and check passwordTPRRetryCount overpass the limit by 1 (6) 8. Reset password policy configuration :expected results: 1. Success 2. Success 3. Success 4. Success 5. Success 6. Success 7. Success 8. Success """ try_tpr_failure = 5 # Set password policy config, passwordMaxFailure being higher than # passwordTPRMaxUse so that TPR is enforced first topology_st.standalone.config.replace('passwordMustChange', 'on') topology_st.standalone.config.replace('passwordMaxFailure', str(try_tpr_failure + 20)) topology_st.standalone.config.replace('passwordTPRMaxUse', str(try_tpr_failure)) time.sleep(.5) # Reset user's password our_user = UserAccount(topology_st.standalone, TEST_USER_DN) our_user.replace('userpassword', PASSWORD) time.sleep(.5) # look up to passwordTPRMaxUse with failing # bind to check that the limits of TPR are enforced for i in range(try_tpr_failure): # Bind as user with a wrong password with pytest.raises(ldap.INVALID_CREDENTIALS): our_user.rebind('wrong password') time.sleep(.5) # Check that pwdReset is TRUE topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) #assert our_user.get_attr_val_utf8('pwdReset') == 'TRUE' # Check that pwdTPRReset is TRUE assert our_user.get_attr_val_utf8('pwdTPRReset') == 'TRUE' assert our_user.get_attr_val_utf8('pwdTPRUseCount') == str(i+1) log.info("%dth failing bind (INVALID_CREDENTIALS) => pwdTPRUseCount = %d" % (i+1, i+1)) # Now the #failures reached passwordTPRMaxUse # Check that pwdReset is TRUE topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) #assert our_user.get_attr_val_utf8('pwdReset') == 'TRUE' # Check that pwdTPRReset is TRUE assert our_user.get_attr_val_utf8('pwdTPRReset') == 'TRUE' assert our_user.get_attr_val_utf8('pwdTPRUseCount') == str(try_tpr_failure) log.info("last failing bind (INVALID_CREDENTIALS) => pwdTPRUseCount = %d" % (try_tpr_failure)) # Bind as user with wrong password --> ldap.CONSTRAINT_VIOLATION with pytest.raises(ldap.CONSTRAINT_VIOLATION): our_user.rebind("wrong password") time.sleep(.5) # Check that pwdReset is TRUE topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) #assert our_user.get_attr_val_utf8('pwdReset') == 'TRUE' # Check that pwdTPRReset is TRUE assert our_user.get_attr_val_utf8('pwdTPRReset') == 'TRUE' assert our_user.get_attr_val_utf8('pwdTPRUseCount') == str(try_tpr_failure + 1) log.info("failing bind (CONSTRAINT_VIOLATION) => pwdTPRUseCount = %d" % (try_tpr_failure + i)) # Now check that all next attempts with correct password are all in LDAP_CONSTRAINT_VIOLATION # and passwordTPRRetryCount remains unchanged # account is now similar to locked for i in range(10): # Bind as user with valid password with pytest.raises(ldap.CONSTRAINT_VIOLATION): our_user.rebind(PASSWORD) time.sleep(.5) # Check that pwdReset is TRUE topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) #assert our_user.get_attr_val_utf8('pwdReset') == 'TRUE' # Check that pwdTPRReset is TRUE # pwdTPRUseCount keeps increasing assert our_user.get_attr_val_utf8('pwdTPRReset') == 'TRUE' assert our_user.get_attr_val_utf8('pwdTPRUseCount') == str(try_tpr_failure + i + 2) log.info("Rejected bind (CONSTRAINT_VIOLATION) => pwdTPRUseCount = %d" % (try_tpr_failure + i + 2)) def fin(): topology_st.standalone.restart() # Reset password policy config topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) topology_st.standalone.config.replace('passwordMustChange', 'off') # Reset user's password our_user.replace('userpassword', TEST_USER_PWD) request.addfinalizer(fin) def test_global_tpr_maxuse_2(topology_st, test_user, request): """Test global TPR policy : passwordTPRMaxUse Test that after less than passwordTPRMaxUse failures to bind additional bind with valid password are successfull :id: bd18bf8e-f3c3-4612-9009-500cf558317e :customerscenario: False :setup: Standalone instance :steps: 1. Enable passwordMustChange 2. Set passwordTPRMaxUse=5 3. Set passwordMaxFailure to a higher value to not disturb the test 4. Bind with a wrong password less than passwordTPRMaxUse times and check INVALID_CREDENTIALS 7. Bind successfully with a valid password 10 times and check passwordTPRRetryCount returns to 0 8. Reset password policy configuration :expected results: 1. Success 2. Success 3. Success 4. Success 5. Success 6. Success 7. Success 8. Success """ try_tpr_failure = 5 # Set password policy config, passwordMaxFailure being higher than # passwordTPRMaxUse so that TPR is enforced first topology_st.standalone.config.replace('passwordMustChange', 'on') topology_st.standalone.config.replace('passwordMaxFailure', str(try_tpr_failure + 20)) topology_st.standalone.config.replace('passwordTPRMaxUse', str(try_tpr_failure)) time.sleep(.5) # Reset user's password our_user = UserAccount(topology_st.standalone, TEST_USER_DN) our_user.replace('userpassword', PASSWORD) time.sleep(.5) # Do less than passwordTPRMaxUse failing bind try_tpr_failure = try_tpr_failure - 2 for i in range(try_tpr_failure): # Bind as user with a wrong password with pytest.raises(ldap.INVALID_CREDENTIALS): our_user.rebind('wrong password') time.sleep(.5) # Check that pwdReset is TRUE topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) #assert our_user.get_attr_val_utf8('pwdReset') == 'TRUE' # Check that pwdTPRReset is TRUE assert our_user.get_attr_val_utf8('pwdTPRReset') == 'TRUE' assert our_user.get_attr_val_utf8('pwdTPRUseCount') == str(i+1) log.info("%dth failing bind (INVALID_CREDENTIALS) => pwdTPRUseCount = %d" % (i+1, i+1)) # Now the #failures has not reached passwordTPRMaxUse # Check that pwdReset is TRUE topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) assert our_user.get_attr_val_utf8('pwdReset') == 'TRUE' # Check that pwdTPRReset is TRUE assert our_user.get_attr_val_utf8('pwdTPRReset') == 'TRUE' assert our_user.get_attr_val_utf8('pwdTPRUseCount') == str(try_tpr_failure) log.info("last failing bind (INVALID_CREDENTIALS) => pwdTPRUseCount = %d" % (try_tpr_failure)) our_user.rebind(PASSWORD) our_user.replace('userpassword', PASSWORD) # give time to update the pwp attributes in the entry time.sleep(.5) # Now check that all next attempts with correct password are successfull # and passwordTPRRetryCount reset to 0 for i in range(10): # Bind as user with valid password our_user.rebind(PASSWORD) time.sleep(.5) # Check that pwdReset is TRUE topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) #assert our_user.get_attr_val_utf8('pwdReset') == 'TRUE' # Check that pwdTPRReset is FALSE assert our_user.get_attr_val_utf8('pwdTPRReset') == 'FALSE' #pdb.set_trace() assert not our_user.present('pwdTPRUseCount') def fin(): topology_st.standalone.restart() # Reset password policy config topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) topology_st.standalone.config.replace('passwordMustChange', 'off') # Reset user's password our_user.replace('userpassword', TEST_USER_PWD) request.addfinalizer(fin) def test_global_tpr_maxuse_3(topology_st, test_user, request): """Test global TPR policy : passwordTPRMaxUse Test that after less than passwordTPRMaxUse failures to bind A bind with valid password is successfull but passwordMustChange does not allow to do a search. Changing the password allows to do a search :id: 7fd0301a-781e-4db8-a4bd-7b44e0f04bb6 :customerscenario: False :setup: Standalone instance :steps: 1. Enable passwordMustChange 2. Set passwordTPRMaxUse=5 3. Set passwordMaxFailure to a higher value to not disturb the test 4. Bind with a wrong password less then passwordTPRMaxUse times and check INVALID_CREDENTIALS 5. Bind with the valid password and check SRCH fail (ldap.UNWILLING_TO_PERFORM) because of passwordMustChange 6. check passwordTPRRetryCount reset to 0 7. Bindd with valid password and reset the password 8. Check we can bind again and SRCH succeeds 9. Reset password policy configuration :expected results: 1. Success 2. Success 3. Success 4. Success 5. Success 6. Success 7. Success 8. Success 9. Success """ try_tpr_failure = 5 # Set password policy config, passwordMaxFailure being higher than # passwordTPRMaxUse so that TPR is enforced first topology_st.standalone.config.replace('passwordMustChange', 'on') topology_st.standalone.config.replace('passwordMaxFailure', str(try_tpr_failure + 20)) topology_st.standalone.config.replace('passwordTPRMaxUse', str(try_tpr_failure)) time.sleep(.5) # Reset user's password our_user = UserAccount(topology_st.standalone, TEST_USER_DN) our_user.replace('userpassword', PASSWORD) # give time to update the pwp attributes in the entry time.sleep(.5) # Do less than passwordTPRMaxUse failing bind try_tpr_failure = try_tpr_failure - 2 for i in range(try_tpr_failure): # Bind as user with a wrong password with pytest.raises(ldap.INVALID_CREDENTIALS): our_user.rebind('wrong password') time.sleep(.5) # Check that pwdReset is TRUE topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) #assert our_user.get_attr_val_utf8('pwdReset') == 'TRUE' # Check that pwdTPRReset is TRUE assert our_user.get_attr_val_utf8('pwdTPRReset') == 'TRUE' assert our_user.get_attr_val_utf8('pwdTPRUseCount') == str(i+1) log.info("%dth failing bind (INVALID_CREDENTIALS) => pwdTPRUseCount = %d" % (i+1, i+1)) # Now the #failures has not reached passwordTPRMaxUse # Check that pwdReset is TRUE topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) assert our_user.get_attr_val_utf8('pwdReset') == 'TRUE' # Check that pwdTPRReset is TRUE assert our_user.get_attr_val_utf8('pwdTPRReset') == 'TRUE' assert our_user.get_attr_val_utf8('pwdTPRUseCount') == str(try_tpr_failure) log.info("last failing bind (INVALID_CREDENTIALS) => pwdTPRUseCount = %d" % (try_tpr_failure)) # Bind as user with valid password our_user.rebind(PASSWORD) time.sleep(.5) # We can not do anything else that reset password users = UserAccounts(topology_st.standalone, OU_PEOPLE, rdn=None) with pytest.raises(ldap.UNWILLING_TO_PERFORM): user = users.get(TEST_USER_NAME) # Check that pwdReset is TRUE topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) assert our_user.get_attr_val_utf8('pwdReset') == 'TRUE' # Check that pwdTPRReset is FALSE assert our_user.get_attr_val_utf8('pwdTPRReset') == 'TRUE' assert our_user.get_attr_val_utf8('pwdTPRUseCount') == str(try_tpr_failure + 1) # Now reset the password and check we can do fully use the account our_user.rebind(PASSWORD) our_user.reset_password(TEST_USER_PWD) # give time to update the pwp attributes in the entry time.sleep(.5) our_user.rebind(TEST_USER_PWD) time.sleep(.5) user = users.get(TEST_USER_NAME) def fin(): topology_st.standalone.restart() # Reset password policy config topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) topology_st.standalone.config.replace('passwordMustChange', 'off') # Reset user's password our_user.replace('userpassword', TEST_USER_PWD) request.addfinalizer(fin) def test_global_tpr_maxuse_4(topology_st, test_user, request): """Test global TPR policy : passwordTPRMaxUse Test that a TPR attribute passwordTPRMaxUse can be updated by DM but not the by user itself :id: ee698277-9c4e-4f58-8f57-158a6d966fe6 :customerscenario: False :setup: Standalone instance :steps: 1. Enable passwordMustChange 2. Set passwordTPRMaxUse=5 3. Set passwordMaxFailure to a higher value to not disturb the test 4. Create a user without specific rights to update passwordTPRMaxUse 5. Reset user password 6. Do 3 failing (bad password) user authentication -> INVALID_CREDENTIALS 7. Check that pwdTPRUseCount==3 8. Bind as user and reset its password 9. Check that user can not update pwdTPRUseCount => INSUFFICIENT_ACCESS 10. Check that DM can update pwdTPRUseCount :expected results: 1. Success 2. Success 3. Success 4. Success 5. Success 6. INVALID_CREDENTIALS 7. Success 8. Success 9. INSUFFICIENT_ACCESS 10. Success """ try_tpr_failure = 5 USER_NO_ACI_NAME = 'user_no_aci' USER_NO_ACI_DN = 'uid={},{}'.format(USER_NO_ACI_NAME, OU_PEOPLE) USER_NO_ACI_PWD = 'user_no_aci' # Set password policy config, passwordMaxFailure being higher than # passwordTPRMaxUse so that TPR is enforced first topology_st.standalone.config.replace('passwordMustChange', 'on') topology_st.standalone.config.replace('passwordMaxFailure', str(try_tpr_failure + 20)) topology_st.standalone.config.replace('passwordTPRMaxUse', str(try_tpr_failure)) time.sleep(.5) # create user account (without aci granting write rights) users = UserAccounts(topology_st.standalone, OU_PEOPLE, rdn=None) user_props = TEST_USER_PROPERTIES.copy() user_props.update({'uid': USER_NO_ACI_NAME, 'userpassword': USER_NO_ACI_PWD}) try: user = users.create(properties=user_props) except: pass # debug only # Reset user's password user.replace('userpassword', PASSWORD) time.sleep(.5) # Do less than passwordTPRMaxUse failing bind try_tpr_failure = try_tpr_failure - 2 for i in range(try_tpr_failure): # Bind as user with a wrong password with pytest.raises(ldap.INVALID_CREDENTIALS): user.rebind('wrong password') time.sleep(.5) # Check that pwdReset is TRUE topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) #assert user.get_attr_val_utf8('pwdReset') == 'TRUE' # Check that pwdTPRReset is TRUE assert user.get_attr_val_utf8('pwdTPRReset') == 'TRUE' assert user.get_attr_val_utf8('pwdTPRUseCount') == str(i+1) log.info("%dth failing bind (INVALID_CREDENTIALS) => pwdTPRUseCount = %d" % (i+1, i+1)) # Now the #failures has not reached passwordTPRMaxUse # Check that pwdReset is TRUE topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) assert user.get_attr_val_utf8('pwdReset') == 'TRUE' # Check that pwdTPRReset is TRUE assert user.get_attr_val_utf8('pwdTPRReset') == 'TRUE' assert user.get_attr_val_utf8('pwdTPRUseCount') == str(try_tpr_failure) log.info("last failing bind (INVALID_CREDENTIALS) => pwdTPRUseCount = %d" % (try_tpr_failure)) # Bind as user with valid password, reset the password # and do simple search user.rebind(PASSWORD) user.reset_password(USER_NO_ACI_PWD) time.sleep(.5) user.rebind(USER_NO_ACI_PWD) assert user.get_attr_val_utf8('uid') time.sleep(.5) # Fail to update pwdTPRUseCount being USER_NO_ACI with pytest.raises(ldap.INSUFFICIENT_ACCESS): user.replace('pwdTPRUseCount', '100') assert user.get_attr_val_utf8('pwdTPRUseCount') != '100' # Succeeds to update pwdTPRUseCount being DM topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) user.replace('pwdTPRUseCount', '100') assert user.get_attr_val_utf8('pwdTPRUseCount') == '100' def fin(): topology_st.standalone.restart() # Reset password policy config topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) topology_st.standalone.config.replace('passwordMustChange', 'off') # Reset user's password user.delete() request.addfinalizer(fin) def test_local_tpr_maxuse_5(topology_st, test_user, request): """Test TPR local policy overpass global one: passwordTPRMaxUse Test that after passwordTPRMaxUse failures to bind additional bind with valid password are failing with CONSTRAINT_VIOLATION :id: c3919707-d804-445a-8754-8385b1072c42 :customerscenario: False :setup: Standalone instance :steps: 1. Global password policy Enable passwordMustChange 2. Global password policy Set passwordTPRMaxUse=5 3. Global password policy Set passwordMaxFailure to a higher value to not disturb the test 4. Local password policy Enable passwordMustChange 5. Local password policy Set passwordTPRMaxUse=10 (higher than global) 6. Bind with a wrong password 10 times and check INVALID_CREDENTIALS 7. Check that passwordTPRUseCount got to the limit (5) 8. Bind with a wrong password (CONSTRAINT_VIOLATION) and check passwordTPRUseCount overpass the limit by 1 (11) 9. Bind with a valid password 10 times and check CONSTRAINT_VIOLATION and check passwordTPRUseCount increases 10. Reset password policy configuration and remove local password from user :expected results: 1. Success 2. Success 3. Success 4. Success 5. Success 6. Success 7. Success 8. Success 9. Success 10. Success """ global_tpr_maxuse = 5 # Set global password policy config, passwordMaxFailure being higher than # passwordTPRMaxUse so that TPR is enforced first topology_st.standalone.config.replace('passwordMustChange', 'on') topology_st.standalone.config.replace('passwordMaxFailure', str(global_tpr_maxuse + 20)) topology_st.standalone.config.replace('passwordTPRMaxUse', str(global_tpr_maxuse)) time.sleep(.5) local_tpr_maxuse = global_tpr_maxuse + 5 # Reset user's password with a local password policy # that has passwordTPRMaxUse higher than global #our_user = UserAccount(topology_st.standalone, TEST_USER_DN) subprocess.call(['%s/dsconf' % topology_st.standalone.get_sbin_dir(), '-D', '%s' % DN_DM, '-w', '%s' % PASSWORD, 'slapd-standalone1', 'localpwp', 'adduser', test_user.dn]) subprocess.call(['%s/dsconf' % topology_st.standalone.get_sbin_dir(), '-D', '%s' % DN_DM, '-w', '%s' % PASSWORD, 'slapd-standalone1', 'localpwp', 'set', '--pwptprmaxuse', str(local_tpr_maxuse), '--pwdmustchange', 'on', test_user.dn]) test_user.replace('userpassword', PASSWORD) time.sleep(.5) # look up to passwordTPRMaxUse with failing # bind to check that the limits of TPR are enforced for i in range(local_tpr_maxuse): # Bind as user with a wrong password with pytest.raises(ldap.INVALID_CREDENTIALS): test_user.rebind('wrong password') time.sleep(.5) # Check that pwdReset is TRUE topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) #assert test_user.get_attr_val_utf8('pwdReset') == 'TRUE' # Check that pwdTPRReset is TRUE assert test_user.get_attr_val_utf8('pwdTPRReset') == 'TRUE' assert test_user.get_attr_val_utf8('pwdTPRUseCount') == str(i+1) log.info("%dth failing bind (INVALID_CREDENTIALS) => pwdTPRUseCount = %d" % (i+1, i+1)) # Now the #failures reached passwordTPRMaxUse # Check that pwdReset is TRUE topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) # Check that pwdTPRReset is TRUE assert test_user.get_attr_val_utf8('pwdTPRReset') == 'TRUE' assert test_user.get_attr_val_utf8('pwdTPRUseCount') == str(local_tpr_maxuse) log.info("last failing bind (INVALID_CREDENTIALS) => pwdTPRUseCount = %d" % (local_tpr_maxuse)) # Bind as user with wrong password --> ldap.CONSTRAINT_VIOLATION with pytest.raises(ldap.CONSTRAINT_VIOLATION): test_user.rebind("wrong password") time.sleep(.5) # Check that pwdReset is TRUE topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) # Check that pwdTPRReset is TRUE assert test_user.get_attr_val_utf8('pwdTPRReset') == 'TRUE' assert test_user.get_attr_val_utf8('pwdTPRUseCount') == str(local_tpr_maxuse + 1) log.info("failing bind (CONSTRAINT_VIOLATION) => pwdTPRUseCount = %d" % (local_tpr_maxuse + i)) # Now check that all next attempts with correct password are all in LDAP_CONSTRAINT_VIOLATION # and passwordTPRRetryCount remains unchanged # account is now similar to locked for i in range(10): # Bind as user with valid password with pytest.raises(ldap.CONSTRAINT_VIOLATION): test_user.rebind(PASSWORD) time.sleep(.5) # Check that pwdReset is TRUE topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) # Check that pwdTPRReset is TRUE # pwdTPRUseCount keeps increasing assert test_user.get_attr_val_utf8('pwdTPRReset') == 'TRUE' assert test_user.get_attr_val_utf8('pwdTPRUseCount') == str(local_tpr_maxuse + i + 2) log.info("Rejected bind (CONSTRAINT_VIOLATION) => pwdTPRUseCount = %d" % (local_tpr_maxuse + i + 2)) def fin(): topology_st.standalone.restart() # Reset password policy config topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) topology_st.standalone.config.replace('passwordMustChange', 'off') # Remove local password policy from that entry subprocess.call(['%s/dsconf' % topology_st.standalone.get_sbin_dir(), '-D', '%s' % DN_DM, '-w', '%s' % PASSWORD, 'slapd-standalone1', 'localpwp', 'remove', test_user.dn]) # Reset user's password test_user.replace('userpassword', TEST_USER_PWD) request.addfinalizer(fin) def test_global_tpr_delayValidFrom_1(topology_st, test_user, request): """Test global TPR policy : passwordTPRDelayValidFrom Test that a TPR password is not valid before reset time + passwordTPRDelayValidFrom :id: 8420a348-e765-43ec-82c7-7f75cb4bf913 :customerscenario: False :setup: Standalone instance :steps: 1. Enable passwordMustChange 2. Set passwordTPRDelayValidFrom=10s 3. Create a account user 5. Reset the password 6. Check that Validity is not reached yet pwdTPRValidFrom >= now + passwordTPRDelayValidFrom - 2 (safety) 7. Bind with valid password, Fails because of CONSTRAINT_VIOLATION :expected results: 1. Success 2. Success 3. Success 4. Success 5. Success 6. Success 7. Success """ ValidFrom = 10 # Set password policy config, passwordMaxFailure being higher than # passwordTPRMaxUse so that TPR is enforced first topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) topology_st.standalone.config.replace('passwordMustChange', 'on') topology_st.standalone.config.replace('passwordTPRDelayValidFrom', str(ValidFrom)) time.sleep(.5) # Reset user's password our_user = UserAccount(topology_st.standalone, TEST_USER_DN) our_user.replace('userpassword', PASSWORD) # give time to update the pwp attributes in the entry time.sleep(.5) # Check that pwdReset is TRUE topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) assert our_user.get_attr_val_utf8('pwdReset') == 'TRUE' # Check that pwdTPRReset is TRUE assert our_user.get_attr_val_utf8('pwdTPRReset') == 'TRUE' now = time.mktime(time.gmtime()) log.info("compare pwdTPRValidFrom (%s) vs now (%s)" % (our_user.get_attr_val_utf8('pwdTPRValidFrom'), time.gmtime())) assert (gentime_to_posix_time(our_user.get_attr_val_utf8('pwdTPRValidFrom'))) >= (now + ValidFrom - 2) # Bind as user with valid password # But too early compare to ValidFrom with pytest.raises(ldap.CONSTRAINT_VIOLATION): our_user.rebind(PASSWORD) def fin(): topology_st.standalone.restart() # Reset password policy config topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) topology_st.standalone.config.replace('passwordMustChange', 'off') # Reset user's password our_user.replace('userpassword', TEST_USER_PWD) request.addfinalizer(fin) def test_global_tpr_delayValidFrom_2(topology_st, test_user, request): """Test global TPR policy : passwordTPRDelayValidFrom Test that a TPR password is valid after reset time + passwordTPRDelayValidFrom :id: 8fa9f6f7-9be2-47c0-bf92-d9fe78ddbc34 :customerscenario: False :setup: Standalone instance :steps: 1. Enable passwordMustChange 2. Set passwordTPRDelayValidFrom=6s 3. Create a account user 5. Reset the password 6. Wait for passwordTPRDelayValidFrom=6s 7. Bind with valid password, reset password to allow further searches 8. Check bound user can search attribute ('uid') :expected results: 1. Success 2. Success 3. Success 4. Success 5. Success 6. Success 7. Success 8. Success """ ValidFrom = 6 # Set password policy config, passwordMaxFailure being higher than # passwordTPRMaxUse so that TPR is enforced first topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) topology_st.standalone.config.replace('passwordMustChange', 'on') topology_st.standalone.config.replace('passwordTPRDelayValidFrom', str(ValidFrom)) time.sleep(.5) # Reset user's password our_user = UserAccount(topology_st.standalone, TEST_USER_DN) our_user.replace('userpassword', PASSWORD) # give time to update the pwp attributes in the entry time.sleep(.5) # Check that pwdReset is TRUE topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) assert our_user.get_attr_val_utf8('pwdReset') == 'TRUE' # Check that pwdTPRReset is TRUE assert our_user.get_attr_val_utf8('pwdTPRReset') == 'TRUE' now = time.mktime(time.gmtime()) log.info("compare pwdTPRValidFrom (%s) vs now (%s)" % (our_user.get_attr_val_utf8('pwdTPRValidFrom'), time.gmtime())) assert (gentime_to_posix_time(our_user.get_attr_val_utf8('pwdTPRValidFrom'))) >= (now + ValidFrom - 2) # wait for pwdTPRValidFrom time.sleep(ValidFrom + 1) # Bind as user with valid password, reset the password # and do simple search our_user.rebind(PASSWORD) our_user.reset_password(TEST_USER_PWD) our_user.rebind(TEST_USER_PWD) assert our_user.get_attr_val_utf8('uid') def fin(): topology_st.standalone.restart() # Reset password policy config topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) topology_st.standalone.config.replace('passwordMustChange', 'off') # Reset user's password our_user.replace('userpassword', TEST_USER_PWD) request.addfinalizer(fin) def test_global_tpr_delayValidFrom_3(topology_st, test_user, request): """Test global TPR policy : passwordTPRDelayValidFrom Test that a TPR attribute passwordTPRDelayValidFrom can be updated by DM but not the by user itself :id: c599aea2-bbad-4158-b32e-307e5c6fca2d :customerscenario: False :setup: Standalone instance :steps: 1. Enable passwordMustChange 2. Set passwordTPRDelayValidFrom=6s 3. Create a account user 5. Reset the password 6. Check pwdReset/pwdTPRReset/pwdTPRValidFrom 7. wait for 6s to let the new TPR password being valid 8. Bind with valid password, reset password to allow further searches 9. Check bound user can search attribute ('uid') 10. Bound as user, check user has not the rights to modify pwdTPRValidFrom 11. Bound as DM, check user has the right to modify pwdTPRValidFrom :expected results: 1. Success 2. Success 3. Success 4. Success 5. Success 6. Success 7. Success 8. Success 9. Success 10. ldap.INSUFFICIENT_ACCESS 11. Success """ ValidFrom = 6 USER_NO_ACI_NAME = 'user_no_aci' USER_NO_ACI_DN = 'uid={},{}'.format(USER_NO_ACI_NAME, OU_PEOPLE) USER_NO_ACI_PWD = 'user_no_aci' # Set password policy config topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) topology_st.standalone.config.replace('passwordMustChange', 'on') topology_st.standalone.config.replace('passwordTPRDelayValidFrom', str(ValidFrom)) time.sleep(.5) # create user account (without aci granting write rights) users = UserAccounts(topology_st.standalone, OU_PEOPLE, rdn=None) user_props = TEST_USER_PROPERTIES.copy() user_props.update({'uid': USER_NO_ACI_NAME, 'userpassword': USER_NO_ACI_PWD}) try: user = users.create(properties=user_props) except: pass # debug only # Reset user's password #our_user = UserAccount(topology_st.standalone, USER_NO_ACI_DN) user.replace('userpassword', PASSWORD) # give time to update the pwp attributes in the entry time.sleep(.5) # Check that pwdReset is TRUE topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) assert user.get_attr_val_utf8('pwdReset') == 'TRUE' # Check that pwdTPRReset is TRUE assert user.get_attr_val_utf8('pwdTPRReset') == 'TRUE' now = time.mktime(time.gmtime()) log.info("compare pwdTPRValidFrom (%s) vs now (%s)" % (user.get_attr_val_utf8('pwdTPRValidFrom'), time.gmtime())) assert (gentime_to_posix_time(user.get_attr_val_utf8('pwdTPRValidFrom'))) >= (now + ValidFrom - 2) # wait for pwdTPRValidFrom time.sleep(ValidFrom + 1) # Bind as user with valid password, reset the password # and do simple search user.rebind(PASSWORD) user.reset_password(USER_NO_ACI_PWD) user.rebind(USER_NO_ACI_PWD) assert user.get_attr_val_utf8('uid') # Fail to update pwdTPRValidFrom being USER_NO_ACI with pytest.raises(ldap.INSUFFICIENT_ACCESS): user.replace('pwdTPRValidFrom', '1234567890Z') assert user.get_attr_val_utf8('pwdTPRValidFrom') != '1234567890Z' # Succeeds to update pwdTPRValidFrom being DM topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) user.replace('pwdTPRValidFrom', '1234567890Z') assert user.get_attr_val_utf8('pwdTPRValidFrom') == '1234567890Z' def fin(): topology_st.standalone.restart() # Reset password policy config topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) topology_st.standalone.config.replace('passwordMustChange', 'off') # delete the no aci entry user.delete() request.addfinalizer(fin) def test_global_tpr_delayExpireAt_1(topology_st, test_user, request): """Test global TPR policy : passwordTPRDelayExpireAt Test that a TPR password is not valid after reset time + passwordTPRDelayExpireAt :id: b98def32-4e30-49fd-893b-8f959ba72b98 :customerscenario: False :setup: Standalone instance :steps: 1. Enable passwordMustChange 2. Set passwordTPRDelayExpireAt=6s 3. Create a account user 5. Reset the password 6. Wait for passwordTPRDelayExpireAt=6s + 2s (safety) 7. Bind with valid password should fail with ldap.CONSTRAINT_VIOLATION :expected results: 1. Success 2. Success 3. Success 4. Success 5. Success 6. Success 7. Success """ ExpireAt = 6 # Set password policy config, passwordMaxFailure being higher than # passwordTPRMaxUse so that TPR is enforced first topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) topology_st.standalone.config.replace('passwordMustChange', 'on') topology_st.standalone.config.replace('passwordTPRMaxUse', str(-1)) topology_st.standalone.config.replace('passwordTPRDelayValidFrom', str(-1)) topology_st.standalone.config.replace('passwordTPRDelayExpireAt', str(ExpireAt)) time.sleep(.5) # Reset user's password our_user = UserAccount(topology_st.standalone, TEST_USER_DN) our_user.replace('userpassword', PASSWORD) # give time to update the pwp attributes in the entry time.sleep(.5) # Check that pwdReset is TRUE topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) assert our_user.get_attr_val_utf8('pwdReset') == 'TRUE' # Check that pwdTPRReset is TRUE assert our_user.get_attr_val_utf8('pwdTPRReset') == 'TRUE' now = time.mktime(time.gmtime()) log.info("compare pwdTPRExpireAt (%s) vs now (%s)" % (our_user.get_attr_val_utf8('pwdTPRExpireAt'), time.gmtime())) assert (gentime_to_posix_time(our_user.get_attr_val_utf8('pwdTPRExpireAt'))) >= (now + ExpireAt - 2) # wait for pwdTPRExpireAt time.sleep(ExpireAt + 2) # Bind as user with valid password but too late # for pwdTPRExpireAt # and do simple search with pytest.raises(ldap.CONSTRAINT_VIOLATION): our_user.rebind(PASSWORD) def fin(): topology_st.standalone.restart() # Reset password policy config topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) topology_st.standalone.config.replace('passwordMustChange', 'off') # Reset user's password our_user.replace('userpassword', TEST_USER_PWD) request.addfinalizer(fin) def test_global_tpr_delayExpireAt_2(topology_st, test_user, request): """Test global TPR policy : passwordTPRDelayExpireAt Test that a TPR password is valid before reset time + passwordTPRDelayExpireAt :id: 9df320de-ebf6-4ed0-a619-51b1a05a560c :customerscenario: False :setup: Standalone instance :steps: 1. Enable passwordMustChange 2. Set passwordTPRDelayExpireAt=6s 3. Create a account user 5. Reset the password 6. Wait for 1s 7. Bind with valid password should succeeds :expected results: 1. Success 2. Success 3. Success 4. Success 5. Success 6. Success 7. Success """ ExpireAt = 6 # Set password policy config, passwordMaxFailure being higher than # passwordTPRMaxUse so that TPR is enforced first topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) topology_st.standalone.config.replace('passwordMustChange', 'on') topology_st.standalone.config.replace('passwordTPRMaxUse', str(-1)) topology_st.standalone.config.replace('passwordTPRDelayValidFrom', str(-1)) topology_st.standalone.config.replace('passwordTPRDelayExpireAt', str(ExpireAt)) time.sleep(.5) # Reset user's password our_user = UserAccount(topology_st.standalone, TEST_USER_DN) our_user.replace('userpassword', PASSWORD) # give time to update the pwp attributes in the entry time.sleep(.5) # Check that pwdReset is TRUE topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) assert our_user.get_attr_val_utf8('pwdReset') == 'TRUE' # Check that pwdTPRReset is TRUE assert our_user.get_attr_val_utf8('pwdTPRReset') == 'TRUE' now = time.mktime(time.gmtime()) log.info("compare pwdTPRExpireAt (%s) vs now (%s)" % (our_user.get_attr_val_utf8('pwdTPRExpireAt'), time.gmtime())) assert (gentime_to_posix_time(our_user.get_attr_val_utf8('pwdTPRExpireAt'))) >= (now + ExpireAt - 2) # wait for 1s time.sleep(1) # Bind as user with valid password, reset the password # and do simple search our_user.rebind(PASSWORD) our_user.reset_password(TEST_USER_PWD) time.sleep(.5) our_user.rebind(TEST_USER_PWD) assert our_user.get_attr_val_utf8('uid') def fin(): topology_st.standalone.restart() # Reset password policy config topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) topology_st.standalone.config.replace('passwordMustChange', 'off') # Reset user's password our_user.replace('userpassword', TEST_USER_PWD) request.addfinalizer(fin) def test_global_tpr_delayExpireAt_3(topology_st, test_user, request): """Test global TPR policy : passwordTPRDelayExpireAt Test that a TPR attribute passwordTPRDelayExpireAt can be updated by DM but not the by user itself :id: 22bb5dd8-d8f6-4484-988e-6de0ef704391 :customerscenario: False :setup: Standalone instance :steps: 1. Enable passwordMustChange 2. Set passwordTPRDelayExpireAt=6s 3. Create a account user 5. Reset the password 6. Check pwdReset/pwdTPRReset/pwdTPRValidFrom 7. wait for 1s so that TPR has not expired 8. Bind with valid password, reset password to allow further searches 9. Check bound user can search attribute ('uid') 10. Bound as user, check user has not the rights to modify pwdTPRExpireAt 11. Bound as DM, check user has the right to modify pwdTPRExpireAt :expected results: 1. Success 2. Success 3. Success 4. Success 5. Success 6. Success 7. Success 8. Success 9. Success 10. ldap.INSUFFICIENT_ACCESS 11. Success """ ExpireAt = 6 USER_NO_ACI_NAME = 'user_no_aci' USER_NO_ACI_DN = 'uid={},{}'.format(USER_NO_ACI_NAME, OU_PEOPLE) USER_NO_ACI_PWD = 'user_no_aci' # Set password policy config topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) topology_st.standalone.config.replace('passwordMustChange', 'on') topology_st.standalone.config.replace('passwordTPRDelayValidFrom', str(-1)) topology_st.standalone.config.replace('passwordTPRDelayExpireAt', str(ExpireAt)) topology_st.standalone.config.replace('passwordTPRDelayValidFrom', str(-1)) time.sleep(.5) # create user account (without aci granting write rights) users = UserAccounts(topology_st.standalone, OU_PEOPLE, rdn=None) user_props = TEST_USER_PROPERTIES.copy() user_props.update({'uid': USER_NO_ACI_NAME, 'userpassword': USER_NO_ACI_PWD}) try: user = users.create(properties=user_props) except: pass # debug only # Reset user's password user.replace('userpassword', PASSWORD) # give time to update the pwp attributes in the entry time.sleep(.5) # Check that pwdReset is TRUE topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) assert user.get_attr_val_utf8('pwdReset') == 'TRUE' # Check that pwdTPRReset is TRUE assert user.get_attr_val_utf8('pwdTPRReset') == 'TRUE' now = time.mktime(time.gmtime()) log.info("compare pwdTPRExpireAt (%s) vs now (%s)" % (user.get_attr_val_utf8('pwdTPRExpireAt'), time.gmtime())) assert (gentime_to_posix_time(user.get_attr_val_utf8('pwdTPRExpireAt'))) >= (now + ExpireAt - 2) # wait for 1s time.sleep(1) # Bind as user with valid password, reset the password # and do simple search user.rebind(PASSWORD) user.reset_password(USER_NO_ACI_PWD) time.sleep(.5) user.rebind(USER_NO_ACI_PWD) assert user.get_attr_val_utf8('uid') time.sleep(.5) # Fail to update pwdTPRExpireAt being USER_NO_ACI with pytest.raises(ldap.INSUFFICIENT_ACCESS): user.replace('pwdTPRExpireAt', '1234567890Z') assert user.get_attr_val_utf8('pwdTPRExpireAt') != '1234567890Z' # Succeeds to update pwdTPRExpireAt being DM topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) user.replace('pwdTPRExpireAt', '1234567890Z') assert user.get_attr_val_utf8('pwdTPRExpireAt') == '1234567890Z' def fin(): topology_st.standalone.restart() # Reset password policy config topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) topology_st.standalone.config.replace('passwordMustChange', 'off') # delete the no aci entry user.delete() request.addfinalizer(fin) if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/password/pwdPolicy_token_test.py000066400000000000000000000052501421664411400307770ustar00rootroot00000000000000import logging import pytest import os import time import ldap from lib389._constants import * from lib389.idm.user import UserAccounts from lib389.idm.organizationalunit import OrganizationalUnits from lib389.topologies import topology_st as topo pytestmark = pytest.mark.tier1 DEBUGGING = os.getenv("DEBUGGING", default=False) if DEBUGGING: logging.getLogger(__name__).setLevel(logging.DEBUG) else: logging.getLogger(__name__).setLevel(logging.INFO) log = logging.getLogger(__name__) USER_DN = 'uid=Test_user1,ou=People,dc=example,dc=com' USER_ACI = '(targetattr="userpassword")(version 3.0; acl "pwp test"; allow (all) userdn="ldap:///self";)' TOKEN = 'test_user1' user_properties = { 'uid': 'Test_user1', 'cn': 'test_user1', 'sn': 'test_user1', 'uidNumber': '1001', 'gidNumber': '2001', 'userpassword': PASSWORD, 'description': 'userdesc', 'homeDirectory': '/home/{}'.format('test_user')} def pwd_setup(topo): ous = OrganizationalUnits(topo.standalone, DEFAULT_SUFFIX) ou = ous.get('people') ou.add('aci', USER_ACI) topo.standalone.config.replace_many(('passwordCheckSyntax', 'on'), ('passwordMinLength', '4'), ('passwordMinCategories', '1')) users = UserAccounts(topo.standalone, DEFAULT_SUFFIX) return users.create(properties=user_properties) def test_token_lengths(topo): """Test that password token length is enforced for various lengths including the same length as the attribute being checked by the policy. :id: dae9d916-2a03-4707-b454-9e901d295b13 :setup: Standalone instance :steps: 1. Test token length rejects password of the same length as rdn value :expectedresults: 1. Passwords are rejected """ user = pwd_setup(topo) for length in ['4', '6', '10']: topo.standalone.simple_bind_s(DN_DM, PASSWORD) topo.standalone.config.set('passwordMinTokenLength', length) topo.standalone.simple_bind_s(USER_DN, PASSWORD) time.sleep(1) try: passwd = TOKEN[:int(length)] log.info("Testing password len {} token ({})".format(length, passwd)) user.replace('userpassword', passwd) log.fatal('Password incorrectly allowed!') assert False except ldap.CONSTRAINT_VIOLATION as e: log.info('Password correctly rejected: ' + str(e)) except ldap.LDAPError as e: log.fatal('Unexpected failure ' + str(e)) assert False if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/password/pwdPolicy_warning_test.py000066400000000000000000000512731421664411400313320ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2020 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import pytest import subprocess from ldap.controls.ppolicy import PasswordPolicyControl from lib389.tasks import * from lib389.utils import * from lib389.topologies import topology_st from lib389.idm.user import UserAccounts from lib389.idm.organizationalunit import OrganizationalUnits from lib389._constants import (DEFAULT_SUFFIX, DN_CONFIG, PASSWORD, DN_DM) from dateutil.parser import parse as dt_parse from lib389.config import Config import datetime pytestmark = pytest.mark.tier1 CONFIG_ATTR = 'passwordSendExpiringTime' USER_DN = 'uid=tuser,ou=people,{}'.format(DEFAULT_SUFFIX) USER_RDN = 'tuser' USER_PASSWD = 'secret123' USER_ACI = '(targetattr="userpassword")(version 3.0; acl "pwp test"; allow (all) userdn="ldap:///self";)' logging.getLogger(__name__).setLevel(logging.INFO) log = logging.getLogger(__name__) @pytest.fixture def global_policy(topology_st, request): """Sets the required global password policy attributes under cn=config entry """ attrs = {'passwordExp': '', 'passwordMaxAge': '', 'passwordWarning': '', CONFIG_ATTR: ''} log.info('Get the default values') entry = topology_st.standalone.getEntry(DN_CONFIG, ldap.SCOPE_BASE, '(objectClass=*)', attrs.keys()) for key in attrs.keys(): attrs[key] = entry.getValue(key) log.info('Set the new values') topology_st.standalone.config.replace_many(('passwordExp', 'on'), ('passwordMaxAge', '172800'), ('passwordWarning', '86400'), (CONFIG_ATTR, 'on')) def fin(): """Resets the defaults""" log.info('Reset the defaults') topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) for key in attrs.keys(): topology_st.standalone.config.replace(key, attrs[key]) request.addfinalizer(fin) # A short sleep is required after the modifying password policy or cn=config time.sleep(0.5) @pytest.fixture def global_policy_default(topology_st, request): """Sets the required global password policy attributes for testing the default behavior of password expiry warning time """ attrs = {'passwordExp': '', 'passwordMaxAge': '', 'passwordWarning': '', CONFIG_ATTR: ''} log.info('Get the default values') entry = topology_st.standalone.getEntry(DN_CONFIG, ldap.SCOPE_BASE, '(objectClass=*)', attrs.keys()) for key in attrs.keys(): attrs[key] = entry.getValue(key) log.info('Set the new values') topology_st.standalone.config.replace_many( ('passwordExp', 'on'), ('passwordMaxAge', '8640000'), ('passwordWarning', '86400'), (CONFIG_ATTR, 'off')) def fin(): """Resets the defaults""" log.info('Reset the defaults') topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) for key in attrs.keys(): topology_st.standalone.config.replace(key, attrs[key]) request.addfinalizer(fin) # A short sleep is required after modifying password policy or cn=config time.sleep(0.5) @pytest.fixture def add_user(topology_st, request): """Adds a user for binding""" log.info('Add the user') users = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX) user = users.create(properties={ 'uid': USER_RDN, 'cn': USER_RDN, 'sn': USER_RDN, 'uidNumber': '3000', 'gidNumber': '4000', 'homeDirectory': '/home/user', 'description': 'd_e_s_c', 'userPassword': USER_PASSWD }) def fin(): """Removes the user entry""" log.info('Remove the user entry') topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) user.delete() request.addfinalizer(fin) @pytest.fixture def local_policy(topology_st, add_user): """Sets fine grained policy for user entry""" log.info("Setting fine grained policy for user ({})".format(USER_DN)) subprocess.call(['%s/dsconf' % topology_st.standalone.get_sbin_dir(), 'slapd-standalone1', 'localpwp', 'adduser', USER_DN]) # A short sleep is required after modifying password policy time.sleep(0.5) def get_password_warning(topology_st): """Gets the password expiry warning time for the user""" res_type = res_data = res_msgid = res_ctrls = None result_id = '' log.info('Bind with the user and request the password expiry warning time') result_id = topology_st.standalone.simple_bind(USER_DN, USER_PASSWD, serverctrls=[PasswordPolicyControl()]) res_type, res_data, res_msgid, res_ctrls = \ topology_st.standalone.result3(result_id) # Return the control return res_ctrls def set_conf_attr(topology_st, attr, val): """Sets the value of a given attribute under cn=config""" log.info("Setting {} to {}".format(attr, val)) topology_st.standalone.config.set(attr, val) # A short sleep is required after modifying cn=config time.sleep(0.5) def get_conf_attr(topology_st, attr): """Gets the value of a given attribute under cn=config entry """ return topology_st.standalone.config.get_attr_val_utf8(attr) @pytest.mark.parametrize("value", (' ', 'junk123', 'on', 'off')) def test_different_values(topology_st, value): """Try to set passwordSendExpiringTime attribute to various values both valid and invalid :id: 3e6d79fb-b4c8-4860-897e-5b207815a75d :parametrized: yes :setup: Standalone instance :steps: 1. Try to set passwordSendExpiringTime to 'on' and 'off' under cn=config entry 2. Try to set passwordSendExpiringTime to ' ' and 'junk123' under cn=config entry 3. Run the search command to check the value of passwordSendExpiringTime attribute :expectedresults: 1. Valid values should be accepted and saved 2. Should be rejected with an OPERATIONS_ERROR 3. The attribute should be changed for valid values and unchanged for invalid """ log.info('Get the default value') defval = get_conf_attr(topology_st, CONFIG_ATTR) if value not in ('on', 'off'): log.info('An invalid value is being tested') with pytest.raises(ldap.OPERATIONS_ERROR): set_conf_attr(topology_st, CONFIG_ATTR, value) log.info('Now check the value is unchanged') assert get_conf_attr(topology_st, CONFIG_ATTR) == defval log.info("Invalid value {} was rejected correctly".format(value)) else: log.info('A valid value is being tested') set_conf_attr(topology_st, CONFIG_ATTR, value) log.info('Now check that the value has been changed') assert str(get_conf_attr(topology_st, CONFIG_ATTR)) == value log.info("{} is now set to {}".format(CONFIG_ATTR, value)) log.info('Set passwordSendExpiringTime back to the default value') set_conf_attr(topology_st, CONFIG_ATTR, defval) def test_expiry_time(topology_st, global_policy, add_user): """Test whether the password expiry warning time for a user is returned appropriately :id: 7adfd395-9b25-4cc0-9b71-14710dc1a28c :setup: Standalone instance, a user entry, Global password policy configured as below: passwordExp: on passwordMaxAge: 172800 passwordWarning: 86400 passwordSendExpiringTime: on :steps: 1. Bind as the normal user 2. Request password policy control for the user 3. Bind as DM :expectedresults: 1. Bind should be successful 2. The password expiry warning time for the user should be returned 3. Bind should be successful """ res_ctrls = None ous = OrganizationalUnits(topology_st.standalone, DEFAULT_SUFFIX) ou = ous.get('people') ou.add('aci', USER_ACI) log.info('Get the password expiry warning time') log.info("Binding with ({}) and requesting the password expiry warning time" .format(USER_DN)) res_ctrls = get_password_warning(topology_st) log.info('Check whether the time is returned') assert res_ctrls log.info("user's password will expire in {:d} seconds" .format(res_ctrls[0].timeBeforeExpiration)) log.info("Rebinding as DM") topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) @pytest.mark.parametrize("attr,val", [(CONFIG_ATTR, 'off'), ('passwordWarning', '3600')]) def test_password_warning(topology_st, global_policy, add_user, attr, val): """Test password expiry warning time by setting passwordSendExpiringTime to off and setting passwordWarning to a short value :id: 39f54b3c-8c80-43ca-856a-174d81c56ce8 :parametrized: yes :setup: Standalone instance, a test user, Global password policy configured as below: passwordExp: on passwordMaxAge: 172800 passwordWarning: 86400 passwordSendExpiringTime: on :steps: 1. Set passwordSendExpiringTime attribute to off or to on and passwordWarning to a small value (3600) 2. Bind as the normal user 3. Request the password expiry warning time 4. Bind as DM :expectedresults: 1. passwordSendExpiringTime and passwordWarning are set successfully 2. Bind should be successful 3. Password expiry warning time should be returned for the small value and should not be returned when passwordSendExpiringTime is off 4. Bind should be successful """ log.info('Set configuration parameter') set_conf_attr(topology_st, attr, val) log.info("Binding with ({}) and requesting password expiry warning time" .format(USER_DN)) res_ctrls = get_password_warning(topology_st) log.info('Check the state of the control') if not res_ctrls: log.info("Password Expiry warning time is not returned as {} is set to {}" .format(attr, val)) else: log.info("({}) password will expire in {:d} seconds" .format(USER_DN, res_ctrls[0].timeBeforeExpiration)) log.info("Rebinding as DM") topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) def test_with_different_password_states(topology_st, global_policy, add_user): """Test the control with different password states :id: d297fb1a-661f-4d52-bb43-2a2a340b8b0e :setup: Standalone instance, a user entry, Global password policy configured as below: passwordExp: on passwordMaxAge: 172800 passwordWarning: 86400 passwordSendExpiringTime: on :steps: 1. Expire user's password by changing passwordExpirationTime timestamp 2. Try to bind to the server with the user entry 3. Revert back user's passwordExpirationTime 4. Try to bind with the user entry and request the control 5. Bind as DM :expectedresults: 1. Operation should be successful 2. Operation should fail because of Invalid Credentials 3. passwordExpirationTime is successfully changed 4. Bind should be successful and the password expiry warning time should be returned 5. Bind should be successful """ res_ctrls = None log.info("Expire user's password by changing passwordExpirationTime timestamp") users = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX) user = users.get(USER_RDN) old_ts = user.get_attr_val_utf8('passwordExpirationTime') log.info("Old passwordExpirationTime: {}".format(old_ts)) new_ts = (dt_parse(old_ts) - datetime.timedelta(31)).strftime('%Y%m%d%H%M%SZ') log.info("New passwordExpirationTime: {}".format(new_ts)) user.replace('passwordExpirationTime', new_ts) log.info("Attempting to bind with user {} and retrive the password expiry warning time".format(USER_DN)) with pytest.raises(ldap.INVALID_CREDENTIALS) as ex: res_ctrls = get_password_warning(topology_st) log.info("Bind Failed, error: {}".format(str(ex))) log.info("Rebinding as DM") topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) log.info("Reverting back user's passwordExpirationTime") user.replace('passwordExpirationTime', old_ts) log.info("Rebinding with {} and retrieving the password expiry warning time".format(USER_DN)) res_ctrls = get_password_warning(topology_st) log.info('Check that the control is returned') assert res_ctrls log.info("user's password will expire in {:d} seconds" .format(res_ctrls[0].timeBeforeExpiration)) log.info("Rebinding as DM") topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) def test_default_behavior(topology_st, global_policy_default, add_user): """Test the default behavior of password expiry warning time :id: c47fa824-ee08-4b78-885f-bca4c42bb655 :setup: Standalone instance, a user entry, Global password policy configured as below: passwordExp: on passwordMaxAge: 8640000 passwordWarning: 86400 passwordSendExpiringTime: off :steps: 1. Bind as the normal user 2. Request the control for the user 3. Bind as DM :expectedresults: 1. Bind should be successful 2. No control should be returned 3. Bind should be successful """ res_ctrls = None log.info("Binding with {} and requesting the password expiry warning time" .format(USER_DN)) res_ctrls = get_password_warning(topology_st) log.info('Check that no control is returned') assert not res_ctrls log.info("Rebinding as DM") topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) def test_when_maxage_and_warning_are_the_same(topology_st, global_policy_default, add_user): """Test the warning expiry when passwordMaxAge and passwordWarning are set to the same value. :id: e57a1b1c-96fc-11e7-a91b-28d244694824 :setup: Standalone instance, a user entry, Global password policy configured as below: passwordExp: on passwordMaxAge: 86400 passwordWarning: 86400 passwordSendExpiringTime: off :steps: 1. Bind as the normal user 2. Change user's password to reset its password expiration time 3. Request the control for the user 4. Bind as DM :expectedresults: 1. Bind should be successful 2. Password should be changed and password's expiration time reset 3. Password expiry warning time should be returned by the server since passwordMaxAge and passwordWarning are set to the same value 4. Bind should be successful """ log.info('Set the new values') topology_st.standalone.config.set('passwordMaxAge', '86400') res_ctrls = None log.info("First change user's password to reset its password expiration time") users = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX) user = users.get(USER_RDN) user.rebind(USER_PASSWD) user.reset_password(USER_PASSWD) time.sleep(2) log.info("Binding with {} and requesting the password expiry warning time" .format(USER_DN)) res_ctrls = get_password_warning(topology_st) log.info('Check that control is returned even' 'if passwordSendExpiringTime is set to off') assert res_ctrls log.info("user's password will expire in {:d} seconds".format(res_ctrls[0].timeBeforeExpiration)) log.info("Rebinding as DM") topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) def test_with_local_policy(topology_st, global_policy, local_policy): """Test the attribute with fine grained policy set for the user :id: ab7d9f86-8cfe-48c3-8baa-739e599f006a :setup: Standalone instance, a user entry, Global password policy configured as below: passwordExp: on passwordMaxAge: 172800 passwordWarning: 86400 passwordSendExpiringTime: on Fine grained password policy for the user using: dsconf INST localpwp :steps: 1. Bind as the normal user 2. Request the control for the user 3. Bind as DM :expectedresults: 1. Bind should be successful 2. Password expiry warning time should not be returned for the user 3. Bind should be successful """ res_ctrls = None log.info("Attempting to get password expiry warning time for user {}".format(USER_DN)) res_ctrls = get_password_warning(topology_st) log.info('Check that the control is not returned') assert not res_ctrls log.info("Password expiry warning time is not returned") log.info("Rebinding as DM") topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) @pytest.mark.bz1589144 @pytest.mark.ds50091 def test_search_shadowWarning_when_passwordWarning_is_lower(topology_st, global_policy): """Test if value shadowWarning is present with global password policy when passwordWarning is set with lower value. :id: c1e82de6-1aa3-42c3-844a-9720172158a3 :setup: Standalone Instance :steps: 1. Bind as Directory Manager 2. Set global password policy 3. Add test user to instance. 4. Modify passwordWarning to have smaller value than 86400 5. Bind as the new user 6. Search for shadowWarning attribute 7. Rebind as Directory Manager :expectedresults: 1. Binding should be successful 2. Setting password policy should be successful 3. Adding test user should be successful 4. Modifying passwordWarning should be successful 5. Binding should be successful 6. Attribute shadowWarning should be found 7. Binding should be successful """ users = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX) log.info("Bind as %s" % DN_DM) assert topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) log.info("Creating test user") testuser = users.create_test_user(1004) testuser.add('objectclass', 'shadowAccount') testuser.set('userPassword', USER_PASSWD) log.info("Setting passwordWarning to smaller value than 86400") assert topology_st.standalone.config.set('passwordWarning', '86399') log.info("Bind as test user") assert topology_st.standalone.simple_bind_s(testuser.dn, USER_PASSWD) log.info("Check if attribute shadowWarning is present") topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) assert testuser.present('shadowWarning') @pytest.mark.bug624080 def test_password_expire_works(topology_st): """Regression test for bug624080. If passwordMaxAge is set to a value and a new user is added, if the passwordMaxAge is changed to a shorter expiration time and the new users password is then changed ..... the passwordExpirationTime for the new user should be changed too. There was a bug in DS 6.2 where the expirationtime remained unchanged. :id: 1ead6052-4636-11ea-b5af-8c16451d917b :setup: Standalone :steps: 1. Set the Global password policy and a passwordMaxAge to 5 days 2. Add the new user 3. Check the users password expiration time now 4. Decrease global passwordMaxAge to 2 days 5. Modify the users password 6. Modify the user one more time to make sur etime has been reset 7. turn off the password policy :expected results: 1. Success 2. Success 3. Success 4. Success 5. Success 6. Success 7. Success """ config = Config(topology_st.standalone) config.replace_many(('passwordMaxAge', '432000'), ('passwordExp', 'on')) user = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX, rdn=None).create_test_user() user.set('userPassword', 'anuj') time.sleep(0.5) expire_time = user.get_attr_val_utf8('passwordExpirationTime') config.replace('passwordMaxAge', '172800') user.set('userPassword', 'borah') time.sleep(0.5) expire_time2 = user.get_attr_val_utf8('passwordExpirationTime') config.replace('passwordMaxAge', '604800') user.set('userPassword', 'anujagaiin') time.sleep(0.5) expire_time3 = user.get_attr_val_utf8('passwordExpirationTime') assert expire_time != expire_time2 != expire_time3 config.replace('passwordExp', 'off') if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/password/pwd_algo_test.py000066400000000000000000000146751421664411400274340ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2019 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import pytest from lib389.tasks import * from lib389.utils import * from lib389.topologies import topology_st from lib389._constants import DEFAULT_SUFFIX, HOST_STANDALONE, PORT_STANDALONE from lib389.idm.user import UserAccounts, TEST_USER_PROPERTIES from lib389.paths import Paths default_paths = Paths() pytestmark = pytest.mark.tier1 DEBUGGING = os.getenv('DEBUGGING', False) USER_DN = 'uid=user,ou=People,%s' % DEFAULT_SUFFIX logging.getLogger(__name__).setLevel(logging.INFO) log = logging.getLogger(__name__) def _test_bind(user, password): result = True try: userconn = user.bind(password) userconn.unbind_s() except ldap.INVALID_CREDENTIALS: result = False return result def _test_algo(inst, algo_name): inst.config.set('passwordStorageScheme', algo_name) users = UserAccounts(inst, DEFAULT_SUFFIX) user_props = TEST_USER_PROPERTIES.copy() user_props.update({'uid': 'user', 'cn': 'buser', 'userpassword': 'Secret123'}) user = users.create(properties=user_props) # Make sure when we read the userPassword field, it is the correct ALGO pw_field = user.get_attr_val_utf8('userPassword') if algo_name != 'CLEAR' and algo_name != 'DEFAULT': assert (algo_name[:5].lower() in pw_field.lower()) # Now make sure a bind works assert (_test_bind(user, 'Secret123')) # Bind with a wrong shorter password, should fail assert (not _test_bind(user, 'Wrong')) # Bind with a wrong longer password, should fail assert (not _test_bind(user, 'This is even more wrong')) # Bind with a wrong exact length password. assert (not _test_bind(user, 'Alsowrong')) # Bind with a subset password, should fail assert (not _test_bind(user, 'Secret')) if not algo_name.startswith('CRYPT'): # Bind with a subset password that is 1 char shorter, to detect off by 1 in clear assert (not _test_bind(user, 'Secret12')) # Bind with a superset password, should fail assert (not _test_bind(user, 'Secret123456')) # Delete the user user.delete() def _test_bind_for_pbkdf2_algo(inst, password): result = True userconn = ldap.initialize("ldap://%s:%s" % (HOST_STANDALONE, PORT_STANDALONE)) try: userconn.simple_bind_s(USER_DN, password) userconn.unbind_s() except ldap.INVALID_CREDENTIALS: result = False return result def _test_algo_for_pbkdf2(inst, algo_name): inst.config.set('passwordStorageScheme', algo_name) if DEBUGGING: print('Testing %s' % algo_name) # Create the user with a password users = UserAccounts(inst, DEFAULT_SUFFIX) user_props = TEST_USER_PROPERTIES.copy() user_props.update({'uid': 'user', 'cn': 'buser', 'userpassword': 'Secret123'}) user = users.create(properties=user_props) # Make sure when we read the userPassword field, it is the correct ALGO pw_field = user.get_attr_val_utf8_l('userPassword') if DEBUGGING: print(pw_field) if algo_name != 'CLEAR': lalgo_name = algo_name.lower() assert (pw_field.startswith('{' + lalgo_name + '}')) # Now make sure a bind works assert (_test_bind_for_pbkdf2_algo(inst, 'Secret123')) # Bind with a wrong shorter password, should fail assert (not _test_bind_for_pbkdf2_algo(inst, 'Wrong')) # Bind with a wrong longer password, should fail assert (not _test_bind_for_pbkdf2_algo(inst, 'This is even more wrong')) # Bind with a password that has the algo in the name assert (not _test_bind_for_pbkdf2_algo(inst, '{%s}SomeValues....' % algo_name)) # Bind with a wrong exact length password. assert (not _test_bind_for_pbkdf2_algo(inst, 'Alsowrong')) # Bind with a subset password, should fail assert (not _test_bind_for_pbkdf2_algo(inst, 'Secret')) if algo_name != 'CRYPT': # Bind with a subset password that is 1 char shorter, to detect off by 1 in clear assert (not _test_bind_for_pbkdf2_algo(inst, 'Secret12')) # Bind with a superset password, should fail assert (not _test_bind_for_pbkdf2_algo(inst, 'Secret123456')) # Delete the user inst.delete_s(USER_DN) ALGO_SET = ('CLEAR', 'CRYPT', 'CRYPT-MD5', 'CRYPT-SHA256', 'CRYPT-SHA512', 'MD5', 'SHA', 'SHA256', 'SHA384', 'SHA512', 'SMD5', 'SSHA', 'SSHA256', 'SSHA384', 'SSHA512', 'PBKDF2_SHA256', 'DEFAULT', 'GOST_YESCRYPT', ) if default_paths.rust_enabled and ds_is_newer('1.4.3.0'): ALGO_SET = ('CLEAR', 'CRYPT', 'CRYPT-MD5', 'CRYPT-SHA256', 'CRYPT-SHA512', 'MD5', 'SHA', 'SHA256', 'SHA384', 'SHA512', 'SMD5', 'SSHA', 'SSHA256', 'SSHA384', 'SSHA512', 'PBKDF2_SHA256', 'DEFAULT', 'PBKDF2-SHA1', 'PBKDF2-SHA256', 'PBKDF2-SHA512', 'GOST_YESCRYPT', ) @pytest.mark.parametrize("algo", ALGO_SET) def test_pwd_algo_test(topology_st, algo): """Assert that all of our password algorithms correctly PASS and FAIL varying password conditions. :id: fbb308a8-8374-4abd-b786-1f88e56f7650 :parametrized: yes """ if algo == 'DEFAULT': if ds_is_older('1.4.0'): pytest.skip("Not implemented") _test_algo(topology_st.standalone, algo) log.info('Test %s PASSED' % algo) @pytest.mark.ds397 def test_pbkdf2_algo(topology_st): """Changing password storage scheme to PBKDF2_SHA256 and trying to bind with different password combination :id: 112e265b-f468-4758-b8fa-ed8742de0182 :setup: Standalone instance :steps: 1. Change password storage scheme to PBKDF2_SHA256 2. Add a test user entry 3. Bind with correct password 4. Bind with incorrect password combination(brute-force) :expectedresults: 1. Operation should be successful 2. Operation should be successful 3. Bind should be successful 4. Should not allow to bind with incorrect password """ if DEBUGGING: # Add debugging steps(if any)... log.info("ATTACH NOW") time.sleep(30) # Merge this to the password suite in the future for algo in ('PBKDF2_SHA256',): for i in range(0, 10): _test_algo_for_pbkdf2(topology_st.standalone, algo) log.info('Test PASSED') if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/password/pwd_crypt_asterisk_test.py000066400000000000000000000033351421664411400315470ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2021 William Brown # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import ldap import pytest from lib389.topologies import topology_st from lib389.idm.user import UserAccounts from lib389._constants import (DEFAULT_SUFFIX, PASSWORD) pytestmark = pytest.mark.tier1 def test_password_crypt_asterisk_is_rejected(topology_st): """It was reported that {CRYPT}* was allowing all passwords to be valid in the bind process. This checks that we should be rejecting these as they should represent locked accounts. Similar, {CRYPT}! :id: 0b8f1a6a-f3eb-4443-985e-da14d0939dc3 :setup: Single instance :steps: 1. Set a password hash in with CRYPT and the content * 2. Test a bind 3. Set a password hash in with CRYPT and the content ! 4. Test a bind :expectedresults: 1. Successfully set the values 2. The bind fails 3. Successfully set the values 4. The bind fails """ topology_st.standalone.config.set('nsslapd-allow-hashed-passwords', 'on') topology_st.standalone.config.set('nsslapd-enable-upgrade-hash', 'off') users = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX) user = users.create_test_user() user.set('userPassword', "{CRYPT}*") # Attempt to bind with incorrect password. with pytest.raises(ldap.INVALID_CREDENTIALS): badconn = user.bind('badpassword') user.set('userPassword', "{CRYPT}!") # Attempt to bind with incorrect password. with pytest.raises(ldap.INVALID_CREDENTIALS): badconn = user.bind('badpassword') 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/password/pwd_lockout_bypass_test.py000066400000000000000000000051051421664411400315370ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2017 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import pytest from lib389.tasks import * from lib389.utils import * from lib389.topologies import topology_st from lib389.idm.user import UserAccounts, TEST_USER_PROPERTIES import ldap pytestmark = pytest.mark.tier1 # The irony of these names is not lost on me. GOOD_PASSWORD = 'password' BAD_PASSWORD = 'aontseunao' logging.getLogger(__name__).setLevel(logging.INFO) log = logging.getLogger(__name__) def test_lockout_bypass(topology_st): """Check basic password lockout functionality :id: 2482a992-1719-495c-b75b-78fe5c48c873 :setup: Standalone instance :steps: 1. Set passwordMaxFailure to 1 2. Set passwordLockDuration to 7 3. Set passwordLockout to 'on' 4. Create a user 5. Set a userPassword attribute 6. Bind as the user with a bad credentials 7. Bind as the user with a bad credentials 8. Bind as the user with a good credentials :expectedresults: 1. passwordMaxFailure should be successfully set 2. passwordLockDuration should be successfully set 3. passwordLockout should be successfully set 4. User should be created 5. userPassword should be successfully set 6. Should throw an invalid credentials error 7. Should throw a constraint violation error 8. Should throw a constraint violation error """ inst = topology_st.standalone # Configure the lock policy inst.config.set('passwordMaxFailure', '1') inst.config.set('passwordLockoutDuration', '99999') inst.config.set('passwordLockout', 'on') # Create the account users = UserAccounts(inst, DEFAULT_SUFFIX) testuser = users.create(properties=TEST_USER_PROPERTIES) testuser.set('userPassword', GOOD_PASSWORD) conn = testuser.bind(GOOD_PASSWORD) assert conn != None conn.unbind_s() # Bind with bad creds twice # This is the failure. with pytest.raises(ldap.INVALID_CREDENTIALS): conn = testuser.bind(BAD_PASSWORD) # Now we should not be able to ATTEMPT the bind. It doesn't matter that # we disclose that we have hit the rate limit here, what matters is that # it exists. with pytest.raises(ldap.CONSTRAINT_VIOLATION): conn = testuser.bind(BAD_PASSWORD) # now bind with good creds # Should be error 19 still. with pytest.raises(ldap.CONSTRAINT_VIOLATION): conn = testuser.bind(GOOD_PASSWORD) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/password/pwd_log_test.py000066400000000000000000000056061421664411400272650ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2019 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import logging import pytest from lib389.tasks import * from lib389.topologies import topology_st from lib389.idm.user import UserAccounts, TEST_USER_PROPERTIES from lib389._constants import DEFAULT_SUFFIX pytestmark = pytest.mark.tier1 logging.getLogger(__name__).setLevel(logging.DEBUG) log = logging.getLogger(__name__) @pytest.mark.ds365 def test_hide_unhashed_pwd(topology_st): """Change userPassword, enable hiding of un-hashed password and check the audit logs. :id: c4a5d08d-f525-459b-82b9-3f68dae6fc71 :setup: Standalone instance :steps: 1. Add a test user entry 2. Set a new password for user and nsslapd-auditlog-logging-enabled to 'on' 3. Disable nsslapd-auditlog-logging-hide-unhashed-pw 4. Check the audit logs 5. Set a new password for user and nsslapd-auditlog-logging-hide-unhashed-pw to 'on' 6. Check the audit logs :expectedresults: 1. User addition should be successful 2. New password should be set and audit logs should be enabled 3. Operation should be successful 4. Audit logs should show password without hash 5. Operation should be successful 6. Audit logs should hide password which is un-hashed """ users = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX) user_props = TEST_USER_PROPERTIES.copy() user_props.update({'uid': 'user', 'cn': 'buser', 'userpassword': 'Secret123'}) user = users.create(properties=user_props) # Enable the audit log topology_st.standalone.config.set('nsslapd-auditlog-logging-enabled','on') # Allow the unhashed password to be written to audit log topology_st.standalone.config.set('nsslapd-auditlog-logging-hide-unhashed-pw', 'off') topology_st.standalone.config.set('nsslapd-unhashed-pw-switch', 'on') # Set new password, and check the audit log user.reset_password('mypassword') # Check audit log time.sleep(1) if not topology_st.standalone.searchAuditLog('unhashed#user#password: mypassword'): log.fatal('failed to find unhashed password in auditlog') assert False # Hide unhashed password in audit log topology_st.standalone.config.set('nsslapd-auditlog-logging-hide-unhashed-pw', 'on') # Modify password, and check the audit log user.reset_password('hidepassword') # Check audit log time.sleep(1) if topology_st.standalone.searchAuditLog('unhashed#user#password: hidepassword'): log.fatal('Found unhashed password in auditlog') assert False log.info('Test complete') if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/password/pwd_upgrade_on_bind_test.py000066400000000000000000000205141421664411400316160ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2019 William Brown # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import ldap import pytest from lib389.utils import * from lib389.topologies import topology_st from lib389.idm.user import UserAccounts from lib389._constants import (DEFAULT_SUFFIX, DN_CONFIG, PASSWORD, DN_DM) pytestmark = pytest.mark.tier1 CONFIG_ATTR = 'passwordSendExpiringTime' USER_DN = 'uid=tuser,ou=people,{}'.format(DEFAULT_SUFFIX) USER_RDN = 'tuser' USER_PASSWD = 'secret123' USER_ACI = '(targetattr="userpassword")(version 3.0; acl "pwp test"; allow (all) userdn="ldap:///self";)' @pytest.fixture def add_user(topology_st, request): """Adds a user for binding""" log.info('Add the user') users = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX) user = users.create(properties={ 'uid': USER_RDN, 'cn': USER_RDN, 'sn': USER_RDN, 'uidNumber': '3000', 'gidNumber': '4000', 'homeDirectory': '/home/user', 'description': 'd_e_s_c', 'userPassword': USER_PASSWD }) def fin(): """Removes the user entry""" log.info('Remove the user entry') topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) user.delete() request.addfinalizer(fin) @pytest.fixture def global_policy(topology_st, request): """Sets the required global password policy attributes under cn=config entry """ attrs = {'passwordExp': '', 'passwordMaxAge': '', 'passwordWarning': '', CONFIG_ATTR: ''} log.info('Get the default values') entry = topology_st.standalone.getEntry(DN_CONFIG, ldap.SCOPE_BASE, '(objectClass=*)', attrs.keys()) for key in attrs.keys(): attrs[key] = entry.getValue(key) log.info('Set the new values') topology_st.standalone.config.replace_many(('passwordExp', 'on'), ('passwordMaxAge', '172800'), ('passwordWarning', '86400'), (CONFIG_ATTR, 'on')) def fin(): """Resets the defaults""" log.info('Reset the defaults') topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) for key in attrs.keys(): topology_st.standalone.config.replace(key, attrs[key]) request.addfinalizer(fin) # A short sleep is required after the modifying password policy or cn=config time.sleep(0.5) def test_password_hash_on_upgrade(topology_st, global_policy, add_user): """If a legacy password hash is present, assert that on a correct bind the hash is "upgraded" to the latest-and-greatest hash format on the server. Assert also that password FAILURE does not alter the password. Assert that the password expiration date, history, etc is not modified as password hash upgrade on bind should be invisible to the user. :id: 42cf99e6-454d-46f5-8f1c-8bb699864a07 :setup: Single instance :steps: 1. Set a password hash in SSHA256, and hash to pbkdf2 statically 2. Get initial passwordExpirationtime 3. Test a faulty bind 4. Assert the PW is SSHA256 5. Test a correct bind 6. Assert the PW is PBKDF2 7. Assert the passwordExpirationtime hasnt changed after upgrade on bind :expectedresults: 1. Successfully set the values 2. Successfully get the passwordExpirationtime 3. The bind fails 4. The PW is SSHA256 5. The bind succeeds 6. The PW is PBKDF2udo 7. pwd expiration time hasnt been modifed """ # Make sure the server is set to pkbdf topology_st.standalone.config.set('passwordStorageScheme', 'PBKDF2_SHA256') topology_st.standalone.config.set('nsslapd-allow-hashed-passwords', 'on') topology_st.standalone.config.set('nsslapd-enable-upgrade-hash', 'on') users = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX) user = users.get(USER_RDN) # Static version of "password" in SSHA256. user.set('userPassword', "{SSHA256}9eliEQgjfc4Fcj1IXZtc/ne1GRF+OIjz/NfSTX4f7HByGMQrWHLMLA==") ts1 = user.get_attr_val_utf8('passwordExpirationTime') # Attempt to bind with incorrect password. with pytest.raises(ldap.INVALID_CREDENTIALS): badconn = user.bind('badpassword') # Check the pw is SSHA256 up = user.get_attr_val_utf8('userPassword') assert up.startswith('{SSHA256}') # Bind with correct, trigger update on bind time.sleep(1) conn = user.bind(PASSWORD) # Check the pw is now PBKDF2! up = user.get_attr_val_utf8('userPassword') assert up.startswith('{PBKDF2_SHA256}') # Verify passwordExpirationtime has not been reset ater hash upgrade ts2 = user.get_attr_val_utf8('passwordExpirationTime') assert ts1 == ts2 def test_password_hash_on_upgrade_clearcrypt(topology_st): """In some deploymentes, some passwords MAY be in clear or crypt which have specific possible application integrations allowing the read value to be processed by other entities. We avoid upgrading these two, to prevent breaking these integrations. :id: 27712492-a4bf-4ea9-977b-b4850ddfb628 :setup: Single instance :steps: 1. Set a password hash in CLEAR, and hash to pbkdf2 statically 2. Test a correct bind 3. Assert the PW is CLEAR 4. Set the password to CRYPT 5. Test a correct bind 6. Assert the PW is CLEAR :expectedresults: 1. Successfully set the values 2. The bind succeeds 3. The PW is CLEAR 4. The set succeeds 4. The bind succeeds 5. The PW is CRYPT """ # Make sure the server is set to pkbdf topology_st.standalone.config.set('nsslapd-allow-hashed-passwords', 'on') topology_st.standalone.config.set('nsslapd-enable-upgrade-hash', 'on') users = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX) user = users.create_test_user(1001) topology_st.standalone.config.set('passwordStorageScheme', 'CLEAR') user.set('userPassword', "password") topology_st.standalone.config.set('passwordStorageScheme', 'PBKDF2_SHA256') conn = user.bind(PASSWORD) up = user.get_attr_val_utf8('userPassword') assert up.startswith('password') user.set('userPassword', "{crypt}I0S3Ry62CSoFg") conn = user.bind(PASSWORD) up = user.get_attr_val_utf8('userPassword') assert up.startswith('{crypt}') def test_password_hash_on_upgrade_disable(topology_st): """If a legacy password hash is present, assert that on a correct bind the hash is "upgraded" to the latest-and-greatest hash format on the server. But some people may not like this, so test that we can disable the feature too! :id: ed315145-a3d1-4f17-b04c-73d3638e7ade :setup: Single instance :steps: 1. Set a password hash in SSHA256, and hash to pbkdf2 statically 2. Test a faulty bind 3. Assert the PW is SSHA256 4. Test a correct bind 5. Assert the PW is SSHA256 :expectedresults: 1. Successfully set the values 2. The bind fails 3. The PW is SSHA256 4. The bind succeeds 5. The PW is SSHA256 """ # Make sure the server is set to pkbdf topology_st.standalone.config.set('passwordStorageScheme', 'PBKDF2_SHA256') topology_st.standalone.config.set('nsslapd-allow-hashed-passwords', 'on') topology_st.standalone.config.set('nsslapd-enable-upgrade-hash', 'off') users = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX) user = users.create_test_user(1002) # Static version of "password" in SSHA256. user.set('userPassword', "{SSHA256}9eliEQgjfc4Fcj1IXZtc/ne1GRF+OIjz/NfSTX4f7HByGMQrWHLMLA==") # Attempt to bind with incorrect password. with pytest.raises(ldap.INVALID_CREDENTIALS): badconn = user.bind('badpassword') # Check the pw is SSHA256 up = user.get_attr_val_utf8('userPassword') assert up.startswith('{SSHA256}') # Bind with correct. conn = user.bind(PASSWORD) # Check the pw is NOT upgraded! up = user.get_attr_val_utf8('userPassword') assert up.startswith('{SSHA256}') 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/password/pwp_gracel_test.py000066400000000000000000000105551421664411400277540ustar00rootroot00000000000000""" # --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2020 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- """ import os import pytest from lib389.topologies import topology_st as topo from lib389.idm.user import UserAccounts, UserAccount from lib389._constants import DEFAULT_SUFFIX from lib389.config import Config import ldap import time pytestmark = pytest.mark.tier1 def test_password_gracelimit_section(topo): """Password grace limit section. :id: d6f4a7fa-473b-11ea-8766-8c16451d917c :setup: Standalone :steps: 1. Resets the default password policy 2. Turning on password expiration, passwordMaxAge: 30 and passwordGraceLimit: 7 3. Check users have 7 grace login attempts after their password expires 4. Reset the user passwords to start the clock 5. The the 8th should fail 6. Now try resetting the password before the grace login attempts run out 7. Bind 6 times, and on the 7th change the password 8. Setting passwordMaxAge: 1 and passwordGraceLimit: 7 9. Modify the users passwords to start the clock of zero 10. First 7 good attempts, 8th should fail 11. Setting the passwordMaxAge to 3 seconds once more and the passwordGraceLimit to 0 12. Modify the users passwords to start the clock 13. Users should be blocked automatically after 3 second :expected results: 1. Success 2. Success 3. Success 4. Success 5. Success 6. Success 7. Success 8. Success 9. Success 10. Success 11. Success 12. Success 13. Success """ config = Config(topo.standalone) # Resets the default password policy config.replace_many( ('passwordmincategories', '1'), ('passwordStorageScheme', 'CLEAR')) user = UserAccounts(topo.standalone, DEFAULT_SUFFIX, rdn=None).create_test_user() # Turning on password expiration, passwordMaxAge: 30 and passwordGraceLimit: 7 config.replace_many( ('passwordMaxAge', '3'), ('passwordGraceLimit', '7'), ('passwordexp', 'on'), ('passwordwarning', '30')) # Reset the user passwords to start the clock # Check users have 7 grace login attempts after their password expires user.replace('userpassword', '00fr3d1') for _ in range(3): time.sleep(1) user_account = UserAccount(topo.standalone, user.dn) for _ in range(7): conn = user_account.bind('00fr3d1') # The the 8th should fail with pytest.raises(ldap.INVALID_CREDENTIALS): conn = user_account.bind('00fr3d1') # Now try resetting the password before the grace login attempts run out user.replace('userpassword', '00fr3d2') for _ in range(3): time.sleep(1) user_account = UserAccount(topo.standalone, user.dn) # Bind 6 times, and on the 7th change the password for _ in range(6): conn = user_account.bind('00fr3d2') user.replace('userpassword', '00fr3d1') for _ in range(3): time.sleep(1) for _ in range(7): conn = user_account.bind('00fr3d1') with pytest.raises(ldap.INVALID_CREDENTIALS): conn = user_account.bind('00fr3d1') # Setting passwordMaxAge: 1 and passwordGraceLimit: 7 config.replace_many( ('passwordMaxAge', '1'), ('passwordwarning', '1')) # Modify the users passwords to start the clock of zero user.replace('userpassword', '00fr3d2') time.sleep(1) # First 7 good attempts, 8th should fail user_account = UserAccount(topo.standalone, user.dn) for _ in range(7): conn = user_account.bind('00fr3d2') with pytest.raises(ldap.INVALID_CREDENTIALS): conn = user_account.bind('00fr3d2') # Setting the passwordMaxAge to 3 seconds once more and the passwordGraceLimit to 0 config.replace_many( ('passwordMaxAge', '3'), ('passwordGraceLimit', '0')) # Modify the users passwords to start the clock # Users should be blocked automatically after 3 second user.replace('userpassword', '00fr3d1') for _ in range(3): time.sleep(1) with pytest.raises(ldap.INVALID_CREDENTIALS): conn = user_account.bind('00fr3d1') if __name__ == "__main__": CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s -v %s" % CURRENT_FILE)389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/password/pwp_history_test.py000066400000000000000000000275341421664411400302250ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2016 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import pytest import time import logging from lib389.tasks import * from lib389.utils import ds_is_newer from lib389.topologies import topology_st from lib389.idm.user import UserAccounts, TEST_USER_PROPERTIES from lib389.idm.directorymanager import DirectoryManager from lib389.idm.organizationalunit import OrganizationalUnits from lib389._constants import DEFAULT_SUFFIX pytestmark = pytest.mark.tier1 logging.getLogger(__name__).setLevel(logging.DEBUG) log = logging.getLogger(__name__) USER_PWD = 'password' @pytest.fixture(scope="function") def user(topology_st, request): """Add and remove a test user""" dm = DirectoryManager(topology_st.standalone) # Add aci so users can change their own password USER_ACI = '(targetattr="userpassword || passwordHistory")(version 3.0; acl "pwp test"; allow (all) userdn="ldap:///self";)' ous = OrganizationalUnits(topology_st.standalone, DEFAULT_SUFFIX) ou = ous.get('people') ou.add('aci', USER_ACI) # Create a user users = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX) user = users.create_test_user() user.set('userpassword', USER_PWD) def fin(): dm.rebind() user.delete() ou.remove('aci', USER_ACI) request.addfinalizer(fin) return user def test_history_is_not_overwritten(topology_st, user): """Test that passwordHistory user attribute is not overwritten :id: 1b311532-dd55-4072-88a9-1f960cb371bd :setup: Standalone instance, a test user :steps: 1. Configure password history policy as bellow: passwordHistory: on passwordInHistory: 3 2. Change the password 3 times 3. Try to change the password 2 more times to see if it rewrites passwordHistory even on a failure attempt 4. Try to change the password to the initial value (it should be still in history) :expectedresults: 1. Password history policy should be configured successfully 2. Success 3. Password changes should be correctly rejected with Constrant Violation error 4. Password change should be correctly rejected with Constrant Violation error """ topology_st.standalone.config.replace_many(('passwordHistory', 'on'), ('passwordInHistory', '3')) log.info('Configured password policy.') time.sleep(1) # Bind as the test user user.rebind(USER_PWD) time.sleep(.5) # Change the password 3 times user.set('userpassword', 'password1') user.rebind('password1') time.sleep(.5) user.set('userpassword', 'password2') user.rebind('password2') time.sleep(.5) user.set('userpassword', 'password3') user.rebind('password3') time.sleep(.5) # Try to change the password 2 more times to see # if it rewrites passwordHistory even on a failure attempt with pytest.raises(ldap.CONSTRAINT_VIOLATION): user.set('userpassword', 'password2') time.sleep(.5) with pytest.raises(ldap.CONSTRAINT_VIOLATION): user.set('userpassword', 'password1') time.sleep(.5) # Try to change the password to the initial value (it should be still in history) with pytest.raises(ldap.CONSTRAINT_VIOLATION): user.set('userpassword', USER_PWD) def test_basic(topology_st, user): """Test basic password policy history feature functionality :id: 83d74f7d-3036-4944-8839-1b40bbf265ff :setup: Standalone instance, a test user :steps: 1. Configure password history policy as bellow: passwordHistory: on passwordInHistory: 3 passwordChange: on passwordStorageScheme: CLEAR 2. Attempt to change password to the same password 3. Change password four times 4. Check that we only have 3 passwords stored in history 5. Attempt to change the password to previous passwords 6. Reset password by Directory Manager (admin reset) 7. Try and change the password to the previous password before the reset 8. Test passwordInHistory set to "0" rejects only the current password 9. Test passwordInHistory set to "2" rejects previous passwords :expectedresults: 1. Password history policy should be configured successfully 2. Password change should be correctly rejected with Constrant Violation error 3. Password should be successfully changed 4. Only 3 passwords should be stored in history 5. Password changes should be correctly rejected with Constrant Violation error 6. Password should be successfully reset 7. Password change should be correctly rejected with Constrant Violation error 8. Success 9. Success """ # # Configure password history policy and add a test user # try: topology_st.standalone.config.replace_many(('passwordHistory', 'on'), ('passwordInHistory', '3'), ('passwordChange', 'on'), ('passwordStorageScheme', 'CLEAR'), ('nsslapd-auditlog-logging-enabled', 'on')) log.info('Configured password policy.') except ldap.LDAPError as e: log.fatal('Failed to configure password policy: ' + str(e)) assert False time.sleep(1) # Bind as the test user user.rebind(USER_PWD) # # Test that password history is enforced. # # Attempt to change password to the same password try: user.set('userpassword', 'password') log.info('Incorrectly able to to set password to existing password.') assert False except ldap.CONSTRAINT_VIOLATION: log.info('Password change correctly rejected') except ldap.LDAPError as e: log.fatal('Failed to attempt to change password: ' + str(e)) assert False # # Keep changing password until we fill the password history (3) # user.set('userpassword', 'password1') user.rebind('password1') time.sleep(.5) user.set('userpassword', 'password2') user.rebind('password2') time.sleep(.5) user.set('userpassword', 'password3') user.rebind('password3') time.sleep(.5) user.set('userpassword', 'password4') user.rebind('password4') time.sleep(.5) # # Check that we only have 3 passwords stored in history # pwds = user.get_attr_vals('passwordHistory') if len(pwds) != 3: log.fatal('Incorrect number of passwords stored in history: %d' % len(pwds)) log.error('password history: ' + str(pwds)) assert False else: log.info('Correct number of passwords found in history.') # # Attempt to change the password to previous passwords # try: user.set('userpassword', 'password1') log.fatal('Incorrectly able to to set password to previous password1.') log.fatal('password history: ' + str(user.get_attr_vals('passwordhistory'))) assert False except ldap.CONSTRAINT_VIOLATION: log.info('Password change correctly rejected') except ldap.LDAPError as e: log.fatal('Failed to attempt to change password: ' + str(e)) assert False try: user.set('userpassword', 'password2') log.fatal('Incorrectly able to to set password to previous password2.') log.fatal('password history: ' + str(user.get_attr_vals('passwordhistory'))) assert False except ldap.CONSTRAINT_VIOLATION: log.info('Password change correctly rejected') except ldap.LDAPError as e: log.fatal('Failed to attempt to change password: ' + str(e)) assert False try: user.set('userpassword', 'password3') log.fatal('Incorrectly able to to set password to previous password3.') log.fatal('password history: ' + str(user.get_attr_vals('passwordhistory'))) assert False except ldap.CONSTRAINT_VIOLATION: log.info('Password change correctly rejected') except ldap.LDAPError as e: log.fatal('Failed to attempt to change password: ' + str(e)) assert False # # Reset password by Directory Manager(admin reset) # dm = DirectoryManager(topology_st.standalone) dm.rebind() time.sleep(.5) user.set('userpassword', 'password-reset') time.sleep(1) # Try and change the password to the previous password before the reset try: user.rebind('password-reset') user.set('userpassword', 'password4') log.fatal('Incorrectly able to to set password to previous password4.') log.fatal('password history: ' + str(user.get_attr_vals('passwordhistory'))) assert False except ldap.CONSTRAINT_VIOLATION: log.info('Password change correctly rejected') except ldap.LDAPError as e: log.fatal('Failed to attempt to change password: ' + str(e)) assert False if ds_is_newer("1.4.1.2"): # # Test passwordInHistory to 0 # dm = DirectoryManager(topology_st.standalone) dm.rebind() try: topology_st.standalone.config.replace('passwordInHistory', '0') log.info('Configured passwordInHistory to 0.') except ldap.LDAPError as e: log.fatal('Failed to configure password policy (passwordInHistory to 0): ' + str(e)) assert False time.sleep(1) # Verify the older passwords in the entry (passwordhistory) are ignored user.rebind('password-reset') user.set('userpassword', 'password4') time.sleep(.5) try: user.set('userpassword', 'password4') log.fatal('Incorrectly able to to set password to current password4.') log.fatal('password history: ' + str(user.get_attr_vals('passwordhistory'))) assert False except ldap.CONSTRAINT_VIOLATION: log.info('Password change correctly rejected') except ldap.LDAPError as e: log.fatal('Failed to attempt to change password: ' + str(e)) assert False # Need to make one successful update so history list is reset user.set('userpassword', 'password5') # # Set the history count back to a positive value and make sure things still work # as expected # dm = DirectoryManager(topology_st.standalone) dm.rebind() try: topology_st.standalone.config.replace('passwordInHistory', '2') log.info('Configured passwordInHistory to 2.') except ldap.LDAPError as e: log.fatal('Failed to configure password policy (passwordInHistory to 2): ' + str(e)) assert False time.sleep(1) try: user.rebind('password5') user.set('userpassword', 'password5') log.fatal('Incorrectly able to to set password to current password5.') log.fatal('password history: ' + str(user.get_attr_vals('passwordhistory'))) assert False except ldap.CONSTRAINT_VIOLATION: log.info('Password change correctly rejected') except ldap.LDAPError as e: log.fatal('Failed to attempt to change password: ' + str(e)) assert False # Test that old password that was in history is not being checked try: user.set('userpassword', 'password1') except ldap.LDAPError as e: log.fatal('Failed to attempt to change password: ' + str(e)) log.fatal('password history: ' + str(user.get_attr_vals('passwordhistory'))) assert False # Done log.info('Test suite PASSED.') if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/password/pwp_test.py000066400000000000000000000510221421664411400264310ustar00rootroot00000000000000""" # --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2020 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- """ import os import pytest from lib389.topologies import topology_st as topo from lib389.idm.user import UserAccounts, UserAccount from lib389._constants import DEFAULT_SUFFIX from lib389.config import Config from lib389.idm.group import Group from lib389.utils import ds_is_older, is_fips import ldap import time pytestmark = pytest.mark.tier1 if ds_is_older('1.4'): DEFAULT_PASSWORD_STORAGE_SCHEME = 'SSHA512' else: if is_fips(): DEFAULT_PASSWORD_STORAGE_SCHEME = 'SSHA512' else: DEFAULT_PASSWORD_STORAGE_SCHEME = 'PBKDF2_SHA256' def _create_user(topo, uid, cn, uidNumber, userpassword): """ Will Create user """ user = UserAccounts(topo.standalone, DEFAULT_SUFFIX).create(properties={ 'uid': uid, 'sn': cn.split(' ')[-1], 'cn': cn, 'givenname': cn.split(' ')[0], 'uidNumber': uidNumber, 'gidNumber': uidNumber, 'mail': f'{uid}@example.com', 'userpassword': userpassword, 'homeDirectory': f'/home/{uid}' }) return user def _change_password_with_own(topo, user_dn, password, new_password): """ Change user password with user self """ conn = UserAccount(topo.standalone, user_dn).bind(password) real_user = UserAccount(conn, user_dn) real_user.replace('userpassword', new_password) def _change_password_with_root(topo, user_dn, new_password): """ Root will change user password """ UserAccount(topo.standalone, user_dn).replace('userpassword', new_password) @pytest.fixture(scope="function") def _fix_password(topo, request): user = _create_user(topo, 'dbyers', 'Danny Byers', '1001', 'dbyers1') user.replace('userpassword', 'dbyers1') def fin(): user.delete() request.addfinalizer(fin) def test_passwordchange_to_no(topo, _fix_password): """Change password fo a user even password even though pw policy is set to no :id: 16c64ef0-5a20-11ea-a902-8c16451d917b :setup: Standalone :steps: 1. Adding an user with uid=dbyers 2. Set Password change to Must Not Change After Reset 3. Setting Password policy to May Not Change Password 4. Try to change password fo a user even password even though pw policy is set to no 5. Set Password change to May Change Password 6. Try to change password fo a user even password 7. Try to change password with invalid credentials. Should see error message. :expected results: 1. Success 2. Success 3. Success 4. Success 5. Success 6. Success 7. Success """ # Adding an user with uid=dbyers user = f'uid=dbyers,ou=People,{DEFAULT_SUFFIX}' config = Config(topo.standalone) # Set Password change to Must Not Change After Reset config.replace_many( ('passwordmustchange', 'off'), ('passwordchange', 'off')) # Try to change password fo a user even password even though pw policy is set to no with pytest.raises(ldap.UNWILLING_TO_PERFORM): _change_password_with_own(topo, user, 'dbyers1', 'AB') # Set Password change to May Change Password config.replace('passwordchange', 'on') _change_password_with_own(topo, user, 'dbyers1', 'dbyers1') # Try to change password with invalid credentials. Should see error message. with pytest.raises(ldap.INVALID_CREDENTIALS): _change_password_with_own(topo, f'uid=dbyers,ou=People,{DEFAULT_SUFFIX}', 'AB', 'dbyers1') def test_password_check_syntax(topo, _fix_password): """Password check syntax :id: 1e6fcc9e-5a20-11ea-9659-8c16451d917b :setup: Standalone :steps: 1. Sets Password check syntax to on 2. Try to change to a password that violates length. Should get error 3. Attempt to Modify password to db which is in error to policy 4. change min pw length to 5 5. Attempt to Modify password to dby3rs which is in error to policy 6. Attempt to Modify password to danny which is in error to policy 7. Attempt to Modify password to byers which is in error to policy 8. Change min pw length to 6 9. Try to change the password 10. Trying to set to a password containing value of sn 11. Sets policy to not check pw syntax 12. Test that when checking syntax is off, you can use small passwords 13. Test that when checking syntax is off, trivial passwords can be used 14. Changing password minimum length from 6 to 10 15. Setting policy to Check Password Syntax again 16. Try to change to a password that violates length 17. Reset Password :expected results: 1. Success 2. Success 3. Success 4. Success 5. Success 6. Success 7. Success 8. Success 9. Success 10. Success 11. Success 12. Success 13. Success 14. Success 15. Success 16. Fail 17. Success """ config = Config(topo.standalone) # Sets Password check syntax to on config.replace('passwordchecksyntax', 'on') # Try to change to a password that violates length. Should get error with pytest.raises(ldap.CONSTRAINT_VIOLATION): _change_password_with_own(topo, f'uid=dbyers,ou=People,{DEFAULT_SUFFIX}', 'dbyers1', 'dbyers2') # Attempt to Modify password to db which is in error to policy with pytest.raises(ldap.CONSTRAINT_VIOLATION): _change_password_with_own(topo, f'uid=dbyers,ou=People,{DEFAULT_SUFFIX}', 'dbyers1', 'db') # change min pw length to 5 config.replace('passwordminlength', '5') # Attempt to Modify password to dby3rs which is in error to policy # Attempt to Modify password to danny which is in error to policy # Attempt to Modify password to byers which is in error to policy for password in ['dbyers', 'Danny', 'byers']: with pytest.raises(ldap.CONSTRAINT_VIOLATION): _change_password_with_own(topo, f'uid=dbyers,ou=People,{DEFAULT_SUFFIX}', 'dbyers1', password) # Change min pw length to 6 config.replace('passwordminlength', '6') # Try to change the password # Trying to set to a password containing value of sn for password in ['dby3rs1', 'dbyers2', '67Danny89', 'YAByers8']: with pytest.raises(ldap.CONSTRAINT_VIOLATION): _change_password_with_own(topo, f'uid=dbyers,ou=People,{DEFAULT_SUFFIX}', 'dbyers1', password) # Sets policy to not check pw syntax # Test that when checking syntax is off, you can use small passwords # Test that when checking syntax is off, trivial passwords can be used config.replace('passwordchecksyntax', 'off') for password, new_pass in [('dbyers1', 'db'), ('db', 'dbyers'), ('dbyers', 'dbyers1')]: _change_password_with_own(topo, f'uid=dbyers,ou=People,{DEFAULT_SUFFIX}', password, new_pass) # Changing password minimum length from 6 to 10 # Setting policy to Check Password Syntax again config.replace_many( ('passwordminlength', '10'), ('passwordchecksyntax', 'on')) # Try to change to a password that violates length with pytest.raises(ldap.CONSTRAINT_VIOLATION): _change_password_with_own(topo, f'uid=dbyers,ou=People,{DEFAULT_SUFFIX}', 'dbyers1', 'db') UserAccount(topo.standalone, f'uid=dbyers,ou=People,{DEFAULT_SUFFIX}').replace('userpassword', 'dbyers1') def test_too_big_password(topo, _fix_password): """Test for long long password :id: 299a3fb4-5a20-11ea-bba8-8c16451d917b :setup: Standalone :steps: 1. Setting policy to keep password histories 2. Changing number of password in history to 3 3. Modify password from dby3rs1 to dby3rs2 4. Checking that the passwordhistory attribute has been added 5. Add a password test for long long password 6. Changing number of password in history to 6 and passwordhistory off :expected results: 1. Success 2. Success 3. Success 4. Success 5. Success 6. Success """ config = Config(topo.standalone) # Setting policy to keep password histories config.replace_many( ('passwordchecksyntax', 'off'), ('passwordhistory', 'on')) assert config.get_attr_val_utf8('passwordinhistory') == '6' # Changing number of password in history to 3 config.replace('passwordinhistory', '3') # Modify password from dby3rs1 to dby3rs2 _change_password_with_own(topo, f'uid=dbyers,ou=People,{DEFAULT_SUFFIX}', 'dbyers1', 'dbyers2') with pytest.raises(ldap.CONSTRAINT_VIOLATION): _change_password_with_own(topo, f'uid=dbyers,ou=People,{DEFAULT_SUFFIX}', 'dbyers2', 'dbyers1') # Checking that the passwordhistory attribute has been added assert UserAccount(topo.standalone, f'uid=dbyers,ou=People,{DEFAULT_SUFFIX}').get_attr_val_utf8('passwordhistory') # Add a password test for long long password long_pass = 50*'0123456789'+'LENGTH=510' _change_password_with_own(topo, f'uid=dbyers,ou=People,{DEFAULT_SUFFIX}', 'dbyers2', long_pass) with pytest.raises(ldap.CONSTRAINT_VIOLATION): _change_password_with_own(topo, f'uid=dbyers,ou=People,{DEFAULT_SUFFIX}', long_pass, long_pass) _change_password_with_root(topo, f'uid=dbyers,ou=People,{DEFAULT_SUFFIX}', 'dbyers1') # Changing number of password in history to 6 and passwordhistory off config.replace_many(('passwordhistory', 'off'), ('passwordinhistory', '6')) def test_pwminage(topo, _fix_password): """Test pwminage :id: 2df7bf32-5a20-11ea-ad23-8c16451d917b :setup: Standalone :steps: 1. Get pwminage; should be 0 currently 2. Sets policy to pwminage 3 3. Change current password 4. Try to change password again 5. Try now after 3 secs is up, should work. :expected results: 1. Success 2. Success 3. Success 4. Fail 5. Success """ config = Config(topo.standalone) # Get pwminage; should be 0 currently assert config.get_attr_val_utf8('passwordminage') == '0' # Sets policy to pwminage 3 config.replace('passwordminage', '3') # Change current password _change_password_with_own(topo, f'uid=dbyers,ou=People,{DEFAULT_SUFFIX}', 'dbyers1', 'dbyers2') # Try to change password again with pytest.raises(ldap.CONSTRAINT_VIOLATION): _change_password_with_own(topo, f'uid=dbyers,ou=People,{DEFAULT_SUFFIX}', 'dbyers2', 'dbyers1') for _ in range(3): time.sleep(1) # Try now after 3 secs is up, should work. _change_password_with_own(topo, f'uid=dbyers,ou=People,{DEFAULT_SUFFIX}', 'dbyers2', 'dbyers1') config.replace('passwordminage', '0') def test_invalid_credentials(topo, _fix_password): """Test bind again with valid password: We should be locked :id: 3233ca78-5a20-11ea-8d35-8c16451d917b :setup: Standalone :steps: 1. Search if passwordlockout is off 2. Turns on passwordlockout 3. sets lockout duration to 3 seconds 4. Changing pw failure count reset duration to 3 sec and passwordminlength to 10 5. Try to bind with invalid credentials 6. Change password to password lockout forever 7. Try to bind with invalid credentials 8. Now bind again with valid password: We should be locked 9. Delete dby3rs before exiting 10. Reset server :expected results: 1. Success 2. Success 3. Success 4. Success 5. Fail 6. Success 7. Success 8. Success 9. Success 10. Success """ config = Config(topo.standalone) # Search if passwordlockout is off assert config.get_attr_val_utf8('passwordlockout') == 'off' # Turns on passwordlockout # sets lockout duration to 3 seconds # Changing pw failure count reset duration to 3 sec and passwordminlength to 10 config.replace_many( ('passwordlockout', 'on'), ('passwordlockoutduration', '3'), ('passwordresetfailurecount', '3'), ('passwordminlength', '10')) # Try to bind with invalid credentials for _ in range(3): with pytest.raises(ldap.INVALID_CREDENTIALS): _change_password_with_own(topo, f'uid=dbyers,ou=People,{DEFAULT_SUFFIX}', 'Invalid', 'dbyers1') with pytest.raises(ldap.CONSTRAINT_VIOLATION): _change_password_with_own(topo, f'uid=dbyers,ou=People,{DEFAULT_SUFFIX}', 'Invalid', 'dbyers1') for _ in range(3): time.sleep(1) _change_password_with_own(topo, f'uid=dbyers,ou=People,{DEFAULT_SUFFIX}', 'dbyers1', 'dbyers1') # Change password to password lockout forever config.replace('passwordunlock', 'off') # Try to bind with invalid credentials for _ in range(3): with pytest.raises(ldap.INVALID_CREDENTIALS): _change_password_with_own(topo, f'uid=dbyers,ou=People,{DEFAULT_SUFFIX}', 'Invalid', 'dbyers1') with pytest.raises(ldap.CONSTRAINT_VIOLATION): _change_password_with_own(topo, f'uid=dbyers,ou=People,{DEFAULT_SUFFIX}', 'Invalid', 'dbyers1') for _ in range(3): time.sleep(1) # Now bind again with valid password: We should be locked with pytest.raises(ldap.CONSTRAINT_VIOLATION): _change_password_with_own(topo, f'uid=dbyers,ou=People,{DEFAULT_SUFFIX}', 'dbyers1', 'dbyers1') # Delete dby3rs before exiting _change_password_with_root(topo, f'uid=dbyers,ou=People,{DEFAULT_SUFFIX}', 'dbyers1') time.sleep(1) _change_password_with_own(topo, f'uid=dbyers,ou=People,{DEFAULT_SUFFIX}', 'dbyers1', 'dbyers1') # Reset server config.replace_many( ('passwordinhistory', '6'), ('passwordlockout', 'off'), ('passwordlockoutduration', '3600'), ('passwordminlength', '6'), ('passwordresetfailurecount', '600'), ('passwordunlock', 'on')) def test_expiration_date(topo, _fix_password): """Test check the expiration date is still in the future :id: 3691739a-5a20-11ea-8712-8c16451d917b :setup: Standalone :steps: 1. Password expiration 2. Add a user with a password expiration date 3. Modify their password 4. Check the expiration date is still in the future 5. Modify the password expiration date 6. Check the expiration date is still in the future 7. Change policy so that user can change passwords 8. Deleting user 9. Adding user 10. Set password history ON 11. Modify password Once 12. Try to change the password with same one :expected results: 1. Success 2. Success 3. Success 4. Success 5. Success 6. Success 7. Success 8. Success 9. Success 10. Success 11. Success 12. Fail """ # Add a user with a password expiration date user = UserAccounts(topo.standalone, DEFAULT_SUFFIX).create_test_user() user.replace_many( ('userpassword', 'bind4now'), ('passwordExpirationTime', '20380119031404Z')) # Modify their password user.replace('userPassword', 'secreter') # Check the expiration date is still in the future assert user.get_attr_val_utf8('passwordExpirationTime') == '20380119031404Z' # Modify the password expiration date user.replace('passwordExpirationTime', '20380119031405Z') # Check the expiration date is still in the future assert user.get_attr_val_utf8('passwordExpirationTime') == '20380119031405Z' config = Config(topo.standalone) # Change policy so that user can change passwords config.replace('passwordchange', 'on') # Deleting user UserAccount(topo.standalone, f'uid=test_user_1000,ou=People,{DEFAULT_SUFFIX}').delete() # Adding user user = UserAccounts(topo.standalone, DEFAULT_SUFFIX).create_test_user() # Set password history ON config.replace('passwordhistory', 'on') # Modify password Once user.replace('userPassword', 'secreter') time.sleep(1) assert DEFAULT_PASSWORD_STORAGE_SCHEME in user.get_attr_val_utf8('userPassword') # Try to change the password with same one for _ in range(3): with pytest.raises(ldap.CONSTRAINT_VIOLATION): _change_password_with_own(topo, user.dn, 'secreter', 'secreter') user.delete() def test_passwordlockout(topo, _fix_password): """Test adding admin user diradmin to Directory Administrator group :id: 3ffcffda-5a20-11ea-a3af-8c16451d917b :setup: Standalone :steps: 1. Account Lockout must be cleared on successful password change 2. Adding admin user diradmin 3. Adding admin user diradmin to Directory Administrator group 4. Turn on passwordlockout 5. Sets lockout duration to 30 seconds 6. Sets failure count reset duration to 30 sec 7. Sets max password bind failure count to 3 8. Reset password retry count (to 0) 9. Try to bind with invalid credentials(3 times) 10. Try to bind with valid pw, should give lockout error 11. Reset password using admin login 12. Try to login as the user to check the unlocking of account. Will also change the password back to original 13. Change to account lockout forever until reset 14. Reset password retry count (to 0) 15. Try to bind with invalid credentials(3 times) 16. Try to bind with valid pw, should give lockout error 17. Reset password using admin login 18. Try to login as the user to check the unlocking of account. Will also change the password back to original :expected results: 1. Success 2. Success 3. Success 4. Success 5. Success 6. Success 7. Success 8. Success 9. Fail 10. Success 11. Success 12. Success 13. Success 14. Success 15. Fail 16. Success 17. Success 18. Success """ config = Config(topo.standalone) # Adding admin user diradmin user = UserAccounts(topo.standalone, DEFAULT_SUFFIX).create_test_user() user.replace('userpassword', 'dby3rs2') admin = _create_user(topo, 'diradmin', 'Anuj Borah', '1002', 'diradmin') # Adding admin user diradmin to Directory Administrator group Group(topo.standalone, f'cn=user_passwd_reset,ou=permissions,{DEFAULT_SUFFIX}').add('member', admin.dn) # Turn on passwordlockout # Sets lockout duration to 30 seconds # Sets failure count reset duration to 30 sec # Sets max password bind failure count to 3 # Reset password retry count (to 0) config.replace_many( ('passwordlockout', 'on'), ('passwordlockoutduration', '30'), ('passwordresetfailurecount', '30'), ('passwordmaxfailure', '3'), ('passwordhistory', 'off')) user.replace('passwordretrycount', '0') # Try to bind with invalid credentials(3 times) for _ in range(3): with pytest.raises(ldap.INVALID_CREDENTIALS): _change_password_with_own(topo, user.dn, 'Invalid', 'secreter') # Try to bind with valid pw, should give lockout error with pytest.raises(ldap.CONSTRAINT_VIOLATION): _change_password_with_own(topo, user.dn, 'Invalid', 'secreter') # Reset password using admin login conn = admin.bind('diradmin') UserAccount(conn, user.dn).replace('userpassword', 'dby3rs2') time.sleep(1) # Try to login as the user to check the unlocking of account. Will also change # the password back to original _change_password_with_own(topo, user.dn, 'dby3rs2', 'secreter') # Change to account lockout forever until reset # Reset password retry count (to 0) config.replace('passwordunlock', 'off') user.replace('passwordretrycount', '0') # Try to bind with invalid credentials(3 times) for _ in range(3): with pytest.raises(ldap.INVALID_CREDENTIALS): _change_password_with_own(topo, user.dn, 'Invalid', 'secreter') # Try to bind with valid pw, should give lockout error with pytest.raises(ldap.CONSTRAINT_VIOLATION): _change_password_with_own(topo, user.dn, 'Invalid', 'secreter') # Reset password using admin login UserAccount(conn, user.dn).replace('userpassword', 'dby3rs2') time.sleep(1) # Try to login as the user to check the unlocking of account. Will also change the # password back to original _change_password_with_own(topo, user.dn, 'dby3rs2', 'secreter') if __name__ == "__main__": CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s -v %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/password/regression_of_bugs_test.py000066400000000000000000000644531421664411400315230ustar00rootroot00000000000000""" # --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2020 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- """ import os import pytest from lib389.topologies import topology_st as topo from lib389.idm.user import UserAccounts, UserAccount from lib389._constants import DEFAULT_SUFFIX, DN_DM from lib389.config import Config from lib389.idm.domain import Domain from lib389.idm.group import UniqueGroups, UniqueGroup from lib389.idm.organizationalunit import OrganizationalUnits, OrganizationalUnit from lib389.pwpolicy import PwPolicyManager import time import ldap pytestmark = pytest.mark.tier1 def _create_user(topo, uid, ou): user = UserAccounts(topo.standalone, DEFAULT_SUFFIX, rdn=ou).create(properties={ 'uid': uid, 'cn': uid, 'sn': uid, 'mail': f'{uid}@example.com', 'homeDirectory': f'/home/{uid}', 'uidNumber': '1000', 'gidNumber': '1000' }) return user def change_pwp_parameter(topo, pwp, operation, to_do): """ Will change password policy parameter """ pwp1 = PwPolicyManager(topo.standalone) user = pwp1.get_pwpolicy_entry(f'{pwp},{DEFAULT_SUFFIX}') user.replace(operation, to_do) def _create_pwp(topo, instance): """ Will create pwp """ policy_props = {} pwp = PwPolicyManager(topo.standalone) pwadm_locpol = pwp.create_subtree_policy(instance, policy_props) for attribute, value in [ ('passwordexp', 'off'), ('passwordchange', 'off'), ('passwordmustchange', 'off'), ('passwordchecksyntax', 'off'), ('passwordinhistory', '6'), ('passwordhistory', 'off'), ('passwordlockout', 'off'), ('passwordlockoutduration', '3600'), ('passwordmaxage', '8640000'), ('passwordmaxfailure', '3'), ('passwordminage', '0'), ('passwordminlength', '6'), ('passwordresetfailurecount', '600'), ('passwordunlock', 'on'), ('passwordStorageScheme', 'CLEAR'), ('passwordwarning', '86400'), ('passwordTPRMaxUse', '-1'), ('passwordTPRDelayExpireAt', '-1'), ('passwordTPRDelayValidFrom', '-1') ]: pwadm_locpol.add(attribute, value) return pwadm_locpol def change_password_of_user(topo, user_password_new_pass_list, pass_to_change): """ Will change password with self binding. """ for user, password, new_pass in user_password_new_pass_list: real_user = UserAccount(topo.standalone, f'{user},{DEFAULT_SUFFIX}') conn = real_user.bind(password) UserAccount(conn, pass_to_change).replace('userpassword', new_pass) @pytest.fixture(scope="function") def _add_user(request, topo): for uid, ou_ou in [('pwadm_user_1', None), ('pwadm_user_2', 'ou=People')]: _create_user(topo, uid, ou_ou) for uid, ou_ou in [('pwadm_admin_2', 'ou=People'), ('pwadm_admin_3', 'ou=People'), ('pwadm_admin_4', 'ou=People')]: user = _create_user(topo, uid, ou_ou) user.replace('userpassword', 'Secret123') def fin(): for user1 in UserAccounts(topo.standalone, DEFAULT_SUFFIX).list(): user1.delete() for user1 in UserAccounts(topo.standalone, DEFAULT_SUFFIX, rdn=None).list(): user1.delete() request.addfinalizer(fin) @pytest.mark.bz1044164 def test_local_password_policy(topo, _add_user): """Regression test for bz1044164 part 1. :id: d6f4a7fa-473b-11ea-8766-8c16451d917b :setup: Standalone :steps: 1. Add a User as Password Admin 2. Create a password admin user entry 3. Add an aci to allow this user all rights 4. Configure password admin 5. Create local password policy and enable passwordmustchange 6. Add another generic user but do not include the password (userpassword) 7. Use admin user to perform a password update on generic user 8. We don't need this ACI anymore. Delete it :expected results: 1. Success 2. Success 3. Success 4. Success 5. Success 6. Success 7. Success 8. Success """ # Add a User as Password Admin # Create a password admin user entry user = _create_user(topo, 'pwadm_admin_1', None) user.replace('userpassword', 'Secret123') domian = Domain(topo.standalone, DEFAULT_SUFFIX) # Add an aci to allow this user all rights domian.set("aci", f'(targetattr ="userpassword")' f'(version 3.0;acl "Allow password admin to write user ' f'passwords";allow (write)(userdn = "ldap:///{user.dn}");)') # Configure password admin # Create local password policy and enable passwordmustchange Config(topo.standalone).replace_many( ('passwordAdminDN', user.dn), ('passwordMustChange', 'off'), ('nsslapd-pwpolicy-local', 'on')) # Add another generic user but do not include the password (userpassword) # Use admin user to perform a password update on generic user real_user = UserAccount(topo.standalone, f'uid=pwadm_admin_1,{DEFAULT_SUFFIX}') conn = real_user.bind('Secret123') UserAccount(conn, f'uid=pwadm_user_1,{DEFAULT_SUFFIX}').replace('userpassword', 'hello') # We don't need this ACI anymore. Delete it domian.remove("aci", f'(targetattr ="userpassword")' f'(version 3.0;acl "Allow password admin to write user ' f'passwords";allow (write)(userdn = "ldap:///{user.dn}");)') @pytest.mark.bz1118006 def test_passwordexpirationtime_attribute(topo, _add_user): """Regression test for bz1118006. :id: 867472d2-473c-11ea-b583-8c16451d917b :setup: Standalone :steps: 1. Check that the passwordExpirationTime attribute is set to the epoch date :expected results: 1. Success """ Config(topo.standalone).replace('passwordMustChange', 'on') epoch_date = "19700101000000Z" time.sleep(1) user = UserAccount(topo.standalone, f'uid=pwadm_user_1,{DEFAULT_SUFFIX}') user.replace('userpassword', 'Secret123') time.sleep(1) # Check that the passwordExpirationTime attribute is set to the epoch date assert user.get_attr_val_utf8('passwordExpirationTime') == epoch_date Config(topo.standalone).replace('passwordMustChange', 'off') time.sleep(1) @pytest.mark.bz1118007 @pytest.mark.bz1044164 def test_admin_group_to_modify_password(topo, _add_user): """Regression test for bz1044164 part 2. :id: 12e09446-52da-11ea-aa11-8c16451d917b :setup: Standalone :steps: 1. Create unique members of admin group 2. Create admin group with unique members 3. Edit ACIs for admin group 4. Add group as password admin 5. Test password admin group to modify password of another admin user 6. Use admin user to perform a password update on Directory Manager user 7. Test password admin group for local password policy 8. Add top level container 9. Add user 10. Create local policy configuration entry 11. Adding admin group for local policy 12. Change user's password by admin user. Break the local policy rule 13. Test password admin group for global password policy 14. Add top level container 15. Change user's password by admin user. Break the global policy rule 16. Add new user in password admin group 17. Modify ordinary user's password 18. Modify user DN using modrdn of a user in password admin group 19. Test assigning invalid value to password admin attribute 20. Try to add more than one Password Admin attribute to config file 21. Use admin group setup from previous testcases, but delete ACI from that 22. Try to change user's password by admin user 23. Restore ACI 24. Edit ACIs for admin group 25. Delete a user from password admin group 26. Change users password by ex-admin user 27. Remove group from password admin configuration 28. Change admins 29. Change user's password by ex-admin user 30. Change admin user's password by ex-admin user :expected results: 1. Success 2. Success 3. Success 4. Success 5. Success 6. Fail(ldap.INSUFFICIENT_ACCESS) 7. Success 8. Success 9. Success 10. Success 11. Success 12. Success 13. Success 14. Success 15. Success 16. Success 17. Success 18. Success 19. Fail 20. Fail 21. Success 22. Success 23. Success 24. Success 25. Success 26. Success 27. Success 28. Success 29. Fail 30. Fail """ # create unique members of admin group admin_grp = UniqueGroups(topo.standalone, DEFAULT_SUFFIX).create(properties={ 'cn': 'pwadm_group_adm', 'description': 'pwadm_group_adm', 'uniqueMember': [f'uid=pwadm_admin_2,ou=People,{DEFAULT_SUFFIX}', f'uid=pwadm_admin_3,ou=People,{DEFAULT_SUFFIX}'] }) # Edit ACIs for admin group Domain(topo.standalone, f"ou=People,{DEFAULT_SUFFIX}").set('aci', f'(targetattr ="userpassword")' f'(version 3.0;acl "Allow passwords admin to write user ' f'passwords";allow (write)(groupdn = "ldap:///{admin_grp.dn}");)') # Add group as password admin Config(topo.standalone).replace('passwordAdminDN', admin_grp.dn) # Test password admin group to modify password of another admin user change_password_of_user(topo, [ ('uid=pwadm_admin_2,ou=People', 'Secret123', 'hello')], f'uid=pwadm_admin_3,ou=people,{DEFAULT_SUFFIX}') # Use admin user to perform a password update on Directory Manager user with pytest.raises(ldap.INSUFFICIENT_ACCESS): change_password_of_user(topo, [('uid=pwadm_admin_2,ou=People', 'Secret123', 'hello')], f'{DN_DM},{DEFAULT_SUFFIX}') # Add top level container ou = OrganizationalUnits(topo.standalone, DEFAULT_SUFFIX).create(properties={'ou': 'pwadm_locpol'}) # Change user's password by admin user. Break the global policy rule # Add new user in password admin group user = _create_user(topo, 'pwadm_locpol_user', 'ou=pwadm_locpol') user.replace('userpassword', 'Secret123') # Create local policy configuration entry _create_pwp(topo, ou.dn) # Set parameter for pwp for para_meter, op_op in [ ('passwordLockout', 'on'), ('passwordMaxFailure', '4'), ('passwordLockoutDuration', '10'), ('passwordResetFailureCount', '100'), ('passwordMinLength', '8'), ('passwordAdminDN', f'cn=pwadm_group_adm,ou=Groups,{DEFAULT_SUFFIX}')]: change_pwp_parameter(topo, 'ou=pwadm_locpol', para_meter, op_op) # Set ACI OrganizationalUnit(topo.standalone, ou.dn).set('aci', f'(targetattr ="userpassword")' f'(version 3.0;acl "Allow passwords admin to write user ' f'passwords";allow (write)' f'(groupdn = "ldap:///cn=pwadm_group_adm,ou=Groups,{DEFAULT_SUFFIX}");)') # Change password with new admin change_password_of_user(topo, [('uid=pwadm_admin_2,ou=People', 'Secret123', 'Sec')], user.dn) # Set global parameter Config(topo.standalone).replace_many( ('passwordTrackUpdateTime', 'on'), ('passwordGraceLimit', '4'), ('passwordHistory', 'on'), ('passwordInHistory', '4')) # Test password admin group for global password policy change_password_of_user(topo, [('uid=pwadm_admin_2,ou=People', 'Secret123', 'Sec')], f'uid=pwadm_user_2,ou=People,{DEFAULT_SUFFIX}') # Adding admin group for local policy grp = UniqueGroup(topo.standalone, f'cn=pwadm_group_adm,ou=Groups,{DEFAULT_SUFFIX}') grp.add('uniqueMember', f'uid=pwadm_admin_4,ou=People,{DEFAULT_SUFFIX}') # Modify ordinary user's password change_password_of_user(topo, [('uid=pwadm_admin_4,ou=People', 'Secret123', 'Secret')], f'uid=pwadm_user_2,ou=People,{DEFAULT_SUFFIX}') # Modify user DN using modrdn of a user in password admin group UserAccount(topo.standalone, f'uid=pwadm_admin_4,ou=People,{DEFAULT_SUFFIX}').rename('uid=pwadm_admin_4_new') # Remove admin grp.remove('uniqueMember', f'uid=pwadm_admin_4,ou=People,{DEFAULT_SUFFIX}') # Add Admin grp.add('uniqueMember', f'uid=pwadm_admin_4_new,ou=People,{DEFAULT_SUFFIX}') # Test the group pwp again with pytest.raises(ldap.INVALID_CREDENTIALS): change_password_of_user(topo, [(f'uid=pwadm_admin_4,ou=People', 'Secret123', 'Secret1')], f'uid=pwadm_user_2,ou=People,{DEFAULT_SUFFIX}') change_password_of_user(topo, [(f'uid=pwadm_admin_4_new,ou=People', 'Secret123', 'Secret1')], f'uid=pwadm_user_2,ou=People,{DEFAULT_SUFFIX}') with pytest.raises(ldap.INVALID_SYNTAX): Config(topo.standalone).replace('passwordAdminDN', "Invalid") # Test assigning invalid value to password admin attribute # Try to add more than one Password Admin attribute to config file with pytest.raises(ldap.OBJECT_CLASS_VIOLATION): Config(topo.standalone).replace('passwordAdminDN', [f'uid=pwadm_admin_2,ou=people,{DEFAULT_SUFFIX}', f'uid=pwadm_admin_3,ou=people,{DEFAULT_SUFFIX}']) # Use admin group setup from previous, but delete ACI from that people = Domain(topo.standalone, f"ou=People,{DEFAULT_SUFFIX}") people.remove('aci', f'(targetattr ="userpassword")(version 3.0;acl ' f'"Allow passwords admin to write user ' f'passwords";allow (write)' f'(groupdn = "ldap:///cn=pwadm_group_adm,ou=Groups,{DEFAULT_SUFFIX}");)') # Try to change user's password by admin user with pytest.raises(ldap.INSUFFICIENT_ACCESS): change_password_of_user(topo, [('uid=pwadm_admin_2,ou=People', 'Secret123', 'Sec')], f'uid=pwadm_user_2,ou=People,{DEFAULT_SUFFIX}') # Restore ACI people.set('aci', f'(targetattr ="userpassword")(version 3.0;acl ' f'"Allow passwords admin to write user ' f'passwords";allow (write)(groupdn = "ldap:///cn=pwadm_group_adm,ou=Groups,{DEFAULT_SUFFIX}");)') # Edit ACIs for admin group people.add('aci', f'(targetattr ="userpassword")(version 3.0;acl ' f'"Allow passwords admin to add user ' f'passwords";allow (add)(groupdn = "ldap:///cn=pwadm_group_adm,ou=Groups,{DEFAULT_SUFFIX}");)') UserAccount(topo.standalone, f'uid=pwadm_user_2,ou=people,{DEFAULT_SUFFIX}').replace('userpassword', 'Secret') real_user = UserAccount(topo.standalone, f'uid=pwadm_user_2,ou=people,{DEFAULT_SUFFIX}') conn = real_user.bind('Secret') # Test new aci with pytest.raises(ldap.INSUFFICIENT_ACCESS): UserAccounts(conn, DEFAULT_SUFFIX, rdn='ou=People').create(properties={ 'uid': 'ok', 'cn': 'ok', 'sn': 'ok', 'uidNumber': '1000', 'gidNumber': 'ok', 'homeDirectory': '/home/ok'}) UserAccounts(topo.standalone, DEFAULT_SUFFIX).list() real_user = UserAccount(topo.standalone, f'uid=pwadm_admin_2,ou=People,{DEFAULT_SUFFIX}') conn = real_user.bind('Secret123') # Test new aci which has new rights for uid, cn, password in [ ('pwadm_user_3', 'pwadm_user_1', 'U2VjcmV0MTIzCg=='), ('pwadm_user_4', 'pwadm_user_2', 'U2VjcmV0MTIzCg==')]: UserAccounts(conn, DEFAULT_SUFFIX, rdn='ou=People').create(properties={ 'uid': uid, 'cn': cn, 'sn': cn, 'uidNumber': '1000', 'gidNumber': '1001', 'homeDirectory': f'/home/{uid}', 'userpassword': password}) # Remove ACI Domain(topo.standalone, f"ou=People,{DEFAULT_SUFFIX}").remove('aci', f'(targetattr ="userpassword")' f'(version 3.0;acl ' f'"Allow passwords admin to add user ' f'passwords";allow ' f'(add)(groupdn = ' f'"ldap:///cn=pwadm_group_adm,ou=Groups,{DEFAULT_SUFFIX}");)') # Delete a user from password admin group grp = UniqueGroup(topo.standalone, f'cn=pwadm_group_adm,ou=Groups,{DEFAULT_SUFFIX}') grp.remove('uniqueMember', f'uid=pwadm_admin_2,ou=People,{DEFAULT_SUFFIX}') # Change users password by ex-admin user with pytest.raises(ldap.INSUFFICIENT_ACCESS): change_password_of_user(topo, [('uid=pwadm_admin_2,ou=People', 'Secret123', 'Secret')], f'uid=pwadm_user_2,ou=People,{DEFAULT_SUFFIX}') # Set aci for only user people = Domain(topo.standalone, f"ou=People,{DEFAULT_SUFFIX}") people.remove('aci', f'(targetattr ="userpassword")(version 3.0;acl ' f'"Allow passwords admin to write user ' f'passwords";allow (write)(groupdn = "ldap:///cn=pwadm_group_adm,ou=Groups,{DEFAULT_SUFFIX}");)') people.set('aci', f'(targetattr ="userpassword")(version 3.0;acl "Allow passwords admin ' f'to write user passwords";allow (write)(groupdn = "ldap:///uid=pwadm_admin_1,{DEFAULT_SUFFIX}");)') # Remove group from password admin configuration Config(topo.standalone).replace('passwordAdminDN', f"uid=pwadm_admin_1,{DEFAULT_SUFFIX}") # Change user's password by ex-admin user with pytest.raises(ldap.INSUFFICIENT_ACCESS): change_password_of_user(topo, [('uid=pwadm_admin_2,ou=People', 'Secret123', 'hellso')], f'uid=pwadm_user_2,ou=People,{DEFAULT_SUFFIX}') with pytest.raises(ldap.INSUFFICIENT_ACCESS): change_password_of_user(topo, [('uid=pwadm_admin_2,ou=People', 'Secret123', 'hellso')], f'uid=pwadm_admin_1,{DEFAULT_SUFFIX}') @pytest.mark.bz834060 def test_password_max_failure_should_lockout_password(topo): """Regression test for bz834060. :id: f2064efa-52d9-11ea-8037-8c16451d917b :setup: Standalone :steps: 1. passwordMaxFailure should lockout password one sooner 2. Setting passwordLockout to \"on\" 3. Set maximum number of login tries to 3 4. Turn off passwordLegacyPolicy 5. Turn off local password policy, so that global is applied :expected results: 1. Success 2. Success 3. Success 4. Success 5. Success """ config = Config(topo.standalone) config.replace_many( ('passwordLockout', 'on'), ('passwordMaxFailure', '3'), ('passwordLegacyPolicy', 'off'), ('nsslapd-pwpolicy-local', 'off')) user = _create_user(topo, 'tuser', 'ou=people') user.replace('userpassword', 'password') for _ in range(2): with pytest.raises(ldap.INVALID_CREDENTIALS): user.bind('Invalid') with pytest.raises(ldap.CONSTRAINT_VIOLATION): user.bind("Invalid") config.replace('nsslapd-pwpolicy-local', 'on') @pytest.mark.bz834063 def test_pwd_update_time_attribute(topo): """Regression test for bz834063 :id: ec2b1d4e-52d9-11ea-b13e-8c16451d917b :setup: Standalone :steps: 1. Add the attribute passwordTrackUpdateTime to cn=config 2. Add a test entry while passwordTrackUpdateTime is on 3. Check if new attribute pwdUpdateTime added automatically after changing the pwd 4. Modify User pwd 5. check for the pwdupdatetime attribute added to the test entry as passwordTrackUpdateTime is on 6. Set passwordTrackUpdateTime to OFF and modify test entry's pwd 7. Check passwordUpdateTime should not be changed 8. Record last pwdUpdateTime before changing the password 9. Modify Pwd 10. Set passwordTrackUpdateTime to ON and modify test entry's pwd, check passwordUpdateTime should be changed 11. Try setting Invalid value for passwordTrackUpdateTime 12. Try setting Invalid value for pwdupdatetime :expected results: 1. Success 2. Success 3. Success 4. Success 5. Success 6. Success 7. Success 8. Success 9. Success 10. Success 11. Fail 12. Fail """ config = Config(topo.standalone) # Add the attribute passwordTrackUpdateTime to cn=config config.replace('passwordTrackUpdateTime', 'on') # Add a test entry while passwordTrackUpdateTime is on user = _create_user(topo, 'test_bz834063', None) user.set('userpassword', 'Unknown') # Modify User pwd user.replace('userpassword', 'Unknown1') # Check if new attribute pwdUpdateTime added automatically after changing the pwd assert user.get_attr_val_utf8('pwdUpdateTime') # Set passwordTrackUpdateTime to OFF and modify test entry's pwd config.replace('passwordTrackUpdateTime', 'off') # Record last pwdUpdateTime before changing the password update_time = user.get_attr_val_utf8('pwdUpdateTime') time.sleep(1) user.replace('userpassword', 'Unknown') # Check passwordUpdateTime should not be changed update_time_again = user.get_attr_val_utf8('pwdUpdateTime') assert update_time == update_time_again # Set passwordTrackUpdateTime to ON and modify test entry's pwd, # check passwordUpdateTime should be changed time.sleep(1) config.replace('passwordTrackUpdateTime', 'on') user.replace('userpassword', 'Unknown') time.sleep(1) update_time_1 = user.get_attr_val_utf8('pwdUpdateTime') assert update_time_again != update_time_1 with pytest.raises(ldap.OPERATIONS_ERROR): config.replace('passwordTrackUpdateTime', "invalid") with pytest.raises(ldap.UNWILLING_TO_PERFORM): config.replace('pwdupdatetime', 'Invalid') def test_password_track_update_time(topo): """passwordTrackUpdateTime stops working with subtree password policies :id: e5d3e4c6-52d9-11ea-a65e-8c16451d917b :setup: Standalone :steps: 1. Add users 2. Create local policy configuration entry for subsuffix 3. Enable passwordTrackUpdateTime to local policy configuration entry 4. Check that attribute passwordUpdate was added to entries 5. check for the pwdupdatetime attribute added to the test entry as passwordTrackUpdateTime is on 6. Set passwordTrackUpdateTime to OFF and modify test entry's pwd, check passwordUpdateTime should not be changed 7. Record last pwdUpdateTime before changing the password 8. Modify Pwd 9. Check current pwdUpdateTime 10. Set passwordTrackUpdateTime to ON and modify test entry's pwd, check passwordUpdateTime should be changed :expected results: 1. Success 2. Success 3. Success 4. Success 5. Success 6. Success 7. Success 8. Success 9. Success 10. Success """ # Add users user1 = _create_user(topo, 'trac478_user1', None) user2 = _create_user(topo, 'trac478_user2', None) # Create local policy configuration entry for subsuffix pwp_for_sufix = _create_pwp(topo, DEFAULT_SUFFIX) pwp_for_user2 = _create_pwp(topo, user2.dn) # Enable passwordTrackUpdateTime to local policy configuration entry for instance in [pwp_for_user2, pwp_for_sufix]: instance.replace('passwordTrackUpdateTime', 'on') # Check that attribute passwordUpdate was added to entries # check for the pwdupdatetime attribute added to the test entry as passwordTrackUpdateTime is on for user in [user1, user2]: user.replace('userpassword', 'pwd') time.sleep(1) assert user.get_attr_val_utf8('pwdUpdateTime') # Set passwordTrackUpdateTime to OFF and modify test entry's pwd, # check passwordUpdateTime should not be changed pwp_for_sufix.replace('passwordTrackUpdateTime', 'off') # Record last pwdUpdateTime before changing the password last_login_time_user1 = user1.get_attr_val_utf8('pwdUpdateTime') last_login_time_user2 = user2.get_attr_val_utf8('pwdUpdateTime') time.sleep(1) # Modify Pwd user1.replace('userpassword', 'pwd1') # Check current pwdUpdateTime last_login_time_user1_last = user1.get_attr_val_utf8('pwdUpdateTime') assert last_login_time_user1 == last_login_time_user1_last # Set passwordTrackUpdateTime to ON and modify test entry's pwd, # check passwordUpdateTime should be changed pwp_for_user2.replace('passwordTrackUpdateTime', 'off') time.sleep(1) user2.replace('userpassword', 'pwd1') last_login_time_user2_last = user2.get_attr_val_utf8('pwdUpdateTime') assert last_login_time_user1 == last_login_time_user1_last assert last_login_time_user2 == last_login_time_user2_last pwp_for_sufix.replace('passwordTrackUpdateTime', 'on') user1.replace('userpassword', 'pwd1') time.sleep(1) last_login_time_user1_last = user1.get_attr_val_utf8('pwdUpdateTime') assert last_login_time_user1 != last_login_time_user1_last pwp_for_user2.replace('passwordTrackUpdateTime', 'on') time.sleep(1) user2.replace('userpassword', 'pwd1') time.sleep(1) last_login_time_user2_last = user2.get_attr_val_utf8('pwdUpdateTime') assert last_login_time_user2 != last_login_time_user2_last @pytest.mark.bz834063 def test_signal_11(topo): """ns-slapd instance crashed with signal 11 SIGSEGV :id: d757b9ae-52d9-11ea-802f-8c16451d917b :setup: Standalone :steps: 1. Adding new user 2. Modifying user passwod of uid=bz973583 :expected results: 1. Success 2. Success """ user = _create_user(topo, 'bz973583', None) user.set('userpassword', 'Secret123') user.remove('userpassword', 'Secret123') user.set('userpassword', 'new') assert topo.standalone.status() if __name__ == "__main__": CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s -v %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/password/regression_test.py000066400000000000000000000330551421664411400300110ustar00rootroot00000000000000# Copyright (C) 2017 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import pytest import time import glob from lib389._constants import PASSWORD, DN_DM, DEFAULT_SUFFIX from lib389._constants import SUFFIX, PASSWORD, DN_DM, DN_CONFIG, PLUGIN_RETRO_CHANGELOG, DEFAULT_SUFFIX, DEFAULT_CHANGELOG_DB, DEFAULT_BENAME from lib389 import Entry from lib389.topologies import topology_m1 as topo_supplier from lib389.idm.user import UserAccounts from lib389.utils import ldap, os, logging, ensure_bytes, ds_is_newer, ds_supports_new_changelog from lib389.topologies import topology_st as topo from lib389.idm.organizationalunit import OrganizationalUnits pytestmark = pytest.mark.tier1 DEBUGGING = os.getenv("DEBUGGING", default=False) if DEBUGGING: logging.getLogger(__name__).setLevel(logging.DEBUG) else: logging.getLogger(__name__).setLevel(logging.INFO) log = logging.getLogger(__name__) user_data = {'cn': 'CNpwtest1', 'sn': 'SNpwtest1', 'uid': 'UIDpwtest1', 'mail': 'MAILpwtest1@redhat.com', 'givenname': 'GNpwtest1'} TEST_PASSWORDS = list(user_data.values()) # Add substring/token values of "CNpwtest1" TEST_PASSWORDS += ['CNpwtest1ZZZZ', 'ZZZZZCNpwtest1', 'ZCNpwtest1', 'CNpwtest1Z', 'ZCNpwtest1Z', 'ZZCNpwtest1', 'CNpwtest1ZZ', 'ZZCNpwtest1ZZ', 'ZZZCNpwtest1', 'CNpwtest1ZZZ', 'ZZZCNpwtest1ZZZ', 'ZZZZZZCNpwtest1ZZZZZZZZ'] TEST_PASSWORDS2 = ( 'CN12pwtest31', 'SN3pwtest231', 'UID1pwtest123', 'MAIL2pwtest12@redhat.com', '2GN1pwtest123', 'People123') def _check_unhashed_userpw(inst, user_dn, is_present=False): """Check if unhashed#user#password attribute is present or not in the changelog""" unhashed_pwd_attribute = 'unhashed#user#password' if ds_supports_new_changelog(): dbscanOut = inst.dbscan(DEFAULT_BENAME, 'replication_changelog') else: changelog_dbdir = os.path.join(os.path.dirname(inst.dbdir), DEFAULT_CHANGELOG_DB) for changelog_dbfile in glob.glob(f'{changelog_dbdir}*/*.db*'): log.info('Changelog dbfile file exist: {}'.format(changelog_dbfile)) dbscanOut = inst.dbscan(DEFAULT_CHANGELOG_DB, changelog_dbfile) for entry in dbscanOut.split(b'dbid: '): if ensure_bytes('operation: modify') in entry and ensure_bytes(user_dn) in entry and ensure_bytes('userPassword') in entry: if is_present: assert ensure_bytes(unhashed_pwd_attribute) in entry else: assert ensure_bytes(unhashed_pwd_attribute) not in entry @pytest.fixture(scope="module") def passw_policy(topo, request): """Configure password policy with PasswordCheckSyntax attribute set to on""" log.info('Configure Pwpolicy with PasswordCheckSyntax and nsslapd-pwpolicy-local set to on') topo.standalone.simple_bind_s(DN_DM, PASSWORD) topo.standalone.config.set('PasswordExp', 'on') topo.standalone.config.set('PasswordCheckSyntax', 'off') topo.standalone.config.set('nsslapd-pwpolicy-local', 'on') subtree = 'ou=people,{}'.format(DEFAULT_SUFFIX) log.info('Configure subtree password policy for {}'.format(subtree)) topo.standalone.subtreePwdPolicy(subtree, {'passwordchange': b'on', 'passwordCheckSyntax': b'on', 'passwordLockout': b'on', 'passwordResetFailureCount': b'3', 'passwordLockoutDuration': b'3', 'passwordMaxFailure': b'2'}) time.sleep(1) def fin(): log.info('Reset pwpolicy configuration settings') topo.standalone.simple_bind_s(DN_DM, PASSWORD) topo.standalone.config.set('PasswordExp', 'off') topo.standalone.config.set('PasswordCheckSyntax', 'off') topo.standalone.config.set('nsslapd-pwpolicy-local', 'off') request.addfinalizer(fin) @pytest.fixture(scope="module") def create_user(topo, request): """Add test users using UserAccounts""" log.info('Adding user-uid={},ou=people,{}'.format(user_data['uid'], DEFAULT_SUFFIX)) users = UserAccounts(topo.standalone, DEFAULT_SUFFIX) user_properties = { 'uidNumber': '1001', 'gidNumber': '2001', 'cn': 'pwtest1', 'userpassword': PASSWORD, 'homeDirectory': '/home/pwtest1'} user_properties.update(user_data) tuser = users.create(properties=user_properties) def fin(): log.info('Deleting user-{}'.format(tuser.dn)) tuser.delete() request.addfinalizer(fin) return tuser def test_pwp_local_unlock(topo, passw_policy, create_user): """Test subtree policies use the same global default for passwordUnlock :id: 741a8417-5f65-4012-b9ed-87987ce3ca1b :setup: Standalone instance :steps: 1. Test user can bind 2. Bind with bad passwords to lockout account, and verify account is locked 3. Wait for lockout interval, and bind with valid password :expectedresults: 1. Bind successful 2. Entry is locked 3. Entry can bind with correct password """ # Add aci so users can change their own password USER_ACI = '(targetattr="userpassword")(version 3.0; acl "pwp test"; allow (all) userdn="ldap:///self";)' ous = OrganizationalUnits(topo.standalone, DEFAULT_SUFFIX) ou = ous.get('people') ou.add('aci', USER_ACI) log.info("Verify user can bind...") create_user.bind(PASSWORD) log.info('Test passwordUnlock default - user should be able to reset password after lockout') for i in range(0, 2): try: create_user.bind("bad-password") except ldap.INVALID_CREDENTIALS: # expected pass except ldap.LDAPError as e: log.fatal("Got unexpected failure: " + str(e)) raise e log.info('Verify account is locked') with pytest.raises(ldap.CONSTRAINT_VIOLATION): create_user.bind(PASSWORD) log.info('Wait for lockout duration...') time.sleep(4) log.info('Check if user can now bind with correct password') create_user.bind(PASSWORD) @pytest.mark.bz1465600 @pytest.mark.parametrize("user_pasw", TEST_PASSWORDS) def test_trivial_passw_check(topo, passw_policy, create_user, user_pasw): """PasswordCheckSyntax attribute fails to validate cn, sn, uid, givenname, ou and mail attributes :id: bf9fe1ef-56cb-46a3-a6f8-5530398a06dc :parametrized: yes :setup: Standalone instance. :steps: 1. Configure local password policy with PasswordCheckSyntax set to on. 2. Add users with cn, sn, uid, givenname, mail and userPassword attributes. 3. Configure subtree password policy for ou=people subtree. 4. Reset userPassword with trivial values like cn, sn, uid, givenname, ou and mail attributes. :expectedresults: 1. Enabling PasswordCheckSyntax should PASS. 2. Add users should PASS. 3. Configure subtree password policy should PASS. 4. Resetting userPassword to cn, sn, uid and mail should be rejected. """ create_user.rebind(PASSWORD) log.info('Replace userPassword attribute with {}'.format(user_pasw)) with pytest.raises(ldap.CONSTRAINT_VIOLATION) as excinfo: create_user.reset_password(user_pasw) log.fatal('Failed: Userpassword with {} is accepted'.format(user_pasw)) assert 'password based off of user entry' in str(excinfo.value) # reset password topo.standalone.simple_bind_s(DN_DM, PASSWORD) create_user.set('userPassword', PASSWORD) @pytest.mark.parametrize("user_pasw", TEST_PASSWORDS) def test_global_vs_local(topo, passw_policy, create_user, user_pasw): """Passwords rejected if its similar to uid, cn, sn, givenname, ou and mail attributes :id: dfd6cf5d-8bcd-4895-a691-a43ad9ec1be8 :parametrized: yes :setup: Standalone instance :steps: 1. Configure global password policy with PasswordCheckSyntax set to off 2. Add users with cn, sn, uid, mail, givenname and userPassword attributes 3. Replace userPassword similar to cn, sn, uid, givenname, ou and mail attributes :expectedresults: 1. Disabling the local policy should PASS. 2. Add users should PASS. 3. Resetting userPasswords similar to cn, sn, uid, givenname, ou and mail attributes should PASS. """ log.info('Configure Pwpolicy with PasswordCheckSyntax and nsslapd-pwpolicy-local set to off') topo.standalone.simple_bind_s(DN_DM, PASSWORD) topo.standalone.config.set('nsslapd-pwpolicy-local', 'off') create_user.rebind(PASSWORD) log.info('Replace userPassword attribute with {}'.format(user_pasw)) create_user.reset_password(user_pasw) # reset password create_user.set('userPassword', PASSWORD) #unstable or unstatus tests, skipped for now @pytest.mark.flaky(max_runs=2, min_passes=1) @pytest.mark.ds49789 def test_unhashed_pw_switch(topo_supplier): """Check that nsslapd-unhashed-pw-switch works corrently :id: e5aba180-d174-424d-92b0-14fe7bb0b92a :setup: Supplier Instance :steps: 1. A Supplier is created, enable retrocl (not used here) 2. Create a set of users 3. update userpassword of user1 and check that unhashed#user#password is not logged (default) 4. udpate userpassword of user2 and check that unhashed#user#password is not logged ('nolog') 5. udpate userpassword of user3 and check that unhashed#user#password is logged ('on') :expectedresults: 1. Success 2. Success 3. Success (unhashed#user#password is not logged in the replication changelog) 4. Success (unhashed#user#password is not logged in the replication changelog) 5. Success (unhashed#user#password is logged in the replication changelog) """ MAX_USERS = 10 PEOPLE_DN = ("ou=people," + DEFAULT_SUFFIX) inst = topo_supplier.ms["supplier1"] inst.modify_s("cn=Retro Changelog Plugin,cn=plugins,cn=config", [(ldap.MOD_REPLACE, 'nsslapd-changelogmaxage', b'2m'), (ldap.MOD_REPLACE, 'nsslapd-changelog-trim-interval', b"5s"), (ldap.MOD_REPLACE, 'nsslapd-logAccess', b'on')]) inst.config.loglevel(vals=[256 + 4], service='access') inst.restart() # If you need any test suite initialization, # please, write additional fixture for that (including finalizer). # Topology for suites are predefined in lib389/topologies.py. # enable dynamic plugins, memberof and retro cl plugin # log.info('Enable plugins...') try: inst.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-dynamic-plugins', b'on')]) except ldap.LDAPError as e: ldap.error('Failed to enable dynamic plugins! ' + e.message['desc']) assert False #topology_st.standalone.plugins.enable(name=PLUGIN_MEMBER_OF) inst.plugins.enable(name=PLUGIN_RETRO_CHANGELOG) #topology_st.standalone.modify_s("cn=changelog,cn=ldbm database,cn=plugins,cn=config", [(ldap.MOD_REPLACE, 'nsslapd-cachememsize', str(100000))]) inst.restart() log.info('create users and group...') for idx in range(1, MAX_USERS): try: USER_DN = ("uid=member%d,%s" % (idx, PEOPLE_DN)) inst.add_s(Entry((USER_DN, {'objectclass': 'top extensibleObject'.split(), 'uid': 'member%d' % (idx)}))) except ldap.LDAPError as e: log.fatal('Failed to add user (%s): error %s' % (USER_DN, e.message['desc'])) assert False # Check default is that unhashed#user#password is not logged on 1.4.1.6+ user = "uid=member1,%s" % (PEOPLE_DN) inst.modify_s(user, [(ldap.MOD_REPLACE, 'userpassword', PASSWORD.encode())]) inst.stop() if ds_is_newer('1.4.1.6'): _check_unhashed_userpw(inst, user, is_present=False) else: _check_unhashed_userpw(inst, user, is_present=True) # Check with nolog that unhashed#user#password is not logged inst.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-unhashed-pw-switch', b'nolog')]) inst.restart() user = "uid=member2,%s" % (PEOPLE_DN) inst.modify_s(user, [(ldap.MOD_REPLACE, 'userpassword', PASSWORD.encode())]) inst.stop() _check_unhashed_userpw(inst, user, is_present=False) # Check with value 'on' that unhashed#user#password is logged inst.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-unhashed-pw-switch', b'on')]) inst.restart() user = "uid=member3,%s" % (PEOPLE_DN) inst.modify_s(user, [(ldap.MOD_REPLACE, 'userpassword', PASSWORD.encode())]) inst.stop() _check_unhashed_userpw(inst, user, is_present=True) if DEBUGGING: # Add debugging steps(if any)... pass if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main(["-s", CURRENT_FILE]) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/plugins/000077500000000000000000000000001421664411400240315ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/plugins/__init__.py000066400000000000000000000000771421664411400261460ustar00rootroot00000000000000""" :Requirement: 389-ds-base: Directory Server Plugins """ 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/plugins/acceptance_test.py000066400000000000000000002000151421664411400275260ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2016 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # ''' Created on Dec 09, 2014 @author: mreynolds ''' import logging import threading from ldap.syncrepl import SyncreplConsumer from ldap.ldapobject import ReconnectLDAPObject import subprocess import pytest from lib389.utils import * from lib389.plugins import * from lib389._constants import * from lib389.dseldif import DSEldif from lib389.idm.user import UserAccounts from lib389.idm.group import Groups from lib389.idm.organizationalunit import OrganizationalUnits from lib389.idm.domain import Domain from lib389.topologies import create_topology, topology_i2 as topo pytestmark = pytest.mark.tier1 log = logging.getLogger(__name__) USER_DN = 'uid=test_user_1001,ou=people,dc=example,dc=com' USER_PW = 'password' GROUP_DN = 'cn=group,' + DEFAULT_SUFFIX CONFIG_AREA = 'nsslapd-pluginConfigArea' if ds_is_older('1.3.7'): MEMBER_ATTR = 'member' else: MEMBER_ATTR = 'memberOf' ''' Functional tests for each plugin Test: plugin restarts (test when on and off) plugin config validation plugin dependencies plugin functionality (including plugin tasks) ''' def check_dependency(inst, plugin, online=True): """Set the "account usability" plugin to depend on this plugin. This plugin is generic, always enabled, and perfect for our testing """ acct_usability = AccountUsabilityPlugin(inst) acct_usability.replace('nsslapd-plugin-depends-on-named', plugin.rdn) if online: with pytest.raises(ldap.UNWILLING_TO_PERFORM): plugin.disable() # Now undo the change acct_usability.remove('nsslapd-plugin-depends-on-named', plugin.rdn) else: plugin.disable() with pytest.raises((subprocess.CalledProcessError, ValueError)): inst.restart() dse_ldif = DSEldif(inst) dse_ldif.delete(acct_usability.dn, 'nsslapd-plugin-depends-on-named') dse_ldif.replace(plugin.dn, 'nsslapd-pluginEnabled', 'on') inst.start() def test_acctpolicy(topo, args=None): """Test Account policy basic functionality :id: 9b87493b-0493-46f9-8364-6099d0e5d829 :setup: Standalone Instance :steps: 1. Enable the plugin 2. Restart the instance 3. Add a config entry for 'lastLoginTime' 4. Add a user 5. Bind as the user 6. Check testLastLoginTime was added to the user 7. Replace 'stateattrname': 'testLastLoginTime' 8. Bind as the user 9. Check testLastLoginTime was added to the user 10. Check nsslapd-plugin-depends-on-named for the plugin 11. Clean up :expectedresults: 1. Success 2. Success 3. Success 4. Success 5. Success 6. Success 7. Success 8. Success 9. Success 10. Success 11. Success """ inst = topo[0] # stop the plugin, and start it plugin = AccountPolicyPlugin(inst) plugin.disable() plugin.enable() if args == "restart": return True # If args is None then we run the test suite as pytest standalone and it's not dynamic if args is None: inst.restart() log.info('Testing {}'.format(PLUGIN_ACCT_POLICY)) ############################################################################ # Configure plugin ############################################################################ # Add the config entry ap_configs = AccountPolicyConfigs(inst) try: ap_config = ap_configs.create(properties={'cn': 'config', 'alwaysrecordlogin': 'yes', 'stateattrname': 'lastLoginTime'}) except ldap.ALREADY_EXISTS: ap_config = ap_configs.get('config') ap_config.replace_many(('alwaysrecordlogin', 'yes'), ('stateattrname', 'lastLoginTime')) ############################################################################ # Test plugin ############################################################################ # Add an entry users = UserAccounts(inst, DEFAULT_SUFFIX) user = users.create_test_user(1000, 2000) user.add('objectclass', 'extensibleObject') user.replace('userPassword', USER_PW) # Bind as user user.bind(USER_PW) time.sleep(1) # Check lastLoginTime of USER1 entries = inst.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, 'lastLoginTime=*') assert entries ############################################################################ # Change config - change the stateAttrName to a new attribute ############################################################################ test_attribute = "( 2.16.840.1.113719.1.1.4.1.35999 \ NAME 'testLastLoginTime' DESC 'Test Last login time' \ SYNTAX 1.3.6.1.4.1.1466.115.121.1.24 SINGLE-VALUE USAGE \ directoryOperation X-ORIGIN 'dirsrvtests' )" Schema(inst).add('attributetypes', test_attribute) ap_config.replace('stateattrname', 'testLastLoginTime') ############################################################################ # Test plugin ############################################################################ # login as user user.bind(USER_PW) time.sleep(1) # Check testLastLoginTime was added to USER1 entries = inst.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(testLastLoginTime=*)') assert entries ############################################################################ # Test plugin dependency ############################################################################ check_dependency(inst, plugin, online=isinstance(args, str)) ############################################################################ # Cleanup ############################################################################ user.delete() ############################################################################ # Test passed ############################################################################ log.info('test_acctpolicy: PASS\n') return def test_attruniq(topo, args=None): """Test Attribute uniqueness basic functionality :id: 9b87493b-0493-46f9-8364-6099d0e5d801 :setup: Standalone Instance :steps: 1. Enable the plugin 2. Restart the instance 3. Add a user: with 'mail' and 'mailAlternateAddress' attributes 4. Replace 'uniqueness-attribute-name': 'cn' 5. Try to add a user with the same 'cn' 6. Replace 'uniqueness-attribute-name': 'mail' 7. Try to add a user with the same 'mail' 8. Add 'uniqueness-attribute-name': 'mailAlternateAddress' 9. Try to add a user with the same 'mailAlternateAddress' 10. Check nsslapd-plugin-depends-on-named for the plugin 11. Clean up :expectedresults: 1. Success 2. Success 3. Success 4. Success 5. Should fail 6. Success 7. Should fail 8. Success 9. Should fail 10. Success 11. Success """ inst = topo[0] # stop the plugin, and start it plugin = AttributeUniquenessPlugin(inst, dn="cn=attribute uniqueness,cn=plugins,cn=config") plugin.disable() plugin.enable() if args == "restart": return # If args is None then we run the test suite as pytest standalone and it's not dynamic if args is None: inst.restart() log.info('Testing {}'.format(PLUGIN_ATTR_UNIQUENESS)) user1_dict = {'objectclass': 'extensibleObject', 'uid': 'testuser1', 'cn': 'testuser1', 'sn': 'user1', 'uidNumber': '1001', 'gidNumber': '2001', 'mail': 'user1@example.com', 'mailAlternateAddress': 'user1@alt.example.com', 'homeDirectory': '/home/testuser1', 'userpassword': 'password'} user2_dict = {'objectclass': 'extensibleObject', 'uid': 'testuser2', 'cn': 'testuser2', 'sn': 'user2', 'uidNumber': '1000', 'gidNumber': '2000', 'homeDirectory': '/home/testuser2', 'userpassword': 'password'} ############################################################################ # Configure plugin ############################################################################ plugin.replace('uniqueness-attribute-name', 'cn') if args is None: inst.restart() ############################################################################ # Test plugin ############################################################################ # Add an entry users = UserAccounts(inst, DEFAULT_SUFFIX) user1 = users.create(properties=user1_dict) # Add an entry with a duplicate "cn" with pytest.raises(ldap.CONSTRAINT_VIOLATION): user2_dict['cn'] = 'testuser1' users.create(properties=user2_dict) ############################################################################ # Change config to use "mail" instead of "uid" ############################################################################ plugin.replace('uniqueness-attribute-name', 'mail') ############################################################################ # Test plugin - Add an entry, that has a duplicate "mail" value ############################################################################ with pytest.raises(ldap.CONSTRAINT_VIOLATION): user2_dict['mail'] = 'user1@example.com' users.create(properties=user2_dict) ############################################################################ # Reconfigure plugin for mail and mailAlternateAddress ############################################################################ plugin.add('uniqueness-attribute-name', 'mailAlternateAddress') ############################################################################ # Test plugin - Add an entry, that has a duplicate "mail" value ############################################################################ with pytest.raises(ldap.CONSTRAINT_VIOLATION): user2_dict['mail'] = 'user1@example.com' users.create(properties=user2_dict) ############################################################################ # Test plugin - Add an entry, that has a duplicate "mailAlternateAddress" value ############################################################################ with pytest.raises(ldap.CONSTRAINT_VIOLATION): user2_dict['mailAlternateAddress'] = 'user1@alt.example.com' users.create(properties=user2_dict) ############################################################################ # Test plugin - Add an entry, that has a duplicate "mail" value conflicting mailAlternateAddress ############################################################################ with pytest.raises(ldap.CONSTRAINT_VIOLATION): user2_dict['mail'] = 'user1@alt.example.com' users.create(properties=user2_dict) ############################################################################ # Test plugin - Add an entry, that has a duplicate "mailAlternateAddress" conflicting mail ############################################################################ with pytest.raises(ldap.CONSTRAINT_VIOLATION): user2_dict['mailAlternateAddress'] = 'user1@example.com' users.create(properties=user2_dict) ############################################################################ # Test plugin dependency ############################################################################ check_dependency(inst, plugin, online=isinstance(args, str)) ############################################################################ # Cleanup ############################################################################ user1.delete() ############################################################################ # Test passed ############################################################################ log.info('test_attruniq: PASS\n') return def test_automember(topo, args=None): """Test Auto Membership basic functionality :id: 9b87493b-0493-46f9-8364-6099d0e5d802 :setup: Standalone Instance :steps: 1. Enable the plugin 2. Restart the instance 3. Add a group 4. Add two Organisation Units entries 5. Add a config entry for the group and one branch 6. Add a user that should get added to the group 7. Check the entry is in group 8. Set groupattr to 'uniquemember:dn' and scope to branch2 9. Add a user that should get added to the group 10. Check the group 11. Disable plugin and restart 12. Add an entry that should be picked up by automember 13. Verify that the entry is not picked up by automember (yet) 14. Check the group - uniquemember should not exist 15. Enable plugin and restart 16. Verify the fixup task worked 17. Check nsslapd-plugin-depends-on-named for the plugin 18. Clean up :expectedresults: 1. Success 2. Success 3. Success 4. Success 5. Success 6. Success 7. Success 8. Success 9. Success 10. Success 11. Success 12. Success 13. Success 14. Success 15. Success 16. Success 17. Success 18. Success """ inst = topo[0] # stop the plugin, and start it plugin = AutoMembershipPlugin(inst) plugin.disable() plugin.enable() if args == "restart": return # If args is None then we run the test suite as pytest standalone and it's not dynamic if args is None: inst.restart() log.info('Testing ' + PLUGIN_AUTOMEMBER + '...') ############################################################################ # Configure plugin ############################################################################ # Add the automember group groups = Groups(inst, DEFAULT_SUFFIX) group = groups.create(properties={'cn': 'group'}) ous = OrganizationalUnits(inst, DEFAULT_SUFFIX) branch1 = ous.create(properties={'ou': 'branch1'}) branch2 = ous.create(properties={'ou': 'branch2'}) # Add the automember config entry am_configs = AutoMembershipDefinitions(inst) am_config = am_configs.create(properties={'cn': 'config', 'autoMemberScope': branch1.dn, 'autoMemberFilter': 'objectclass=top', 'autoMemberDefaultGroup': group.dn, 'autoMemberGroupingAttr': '{}:dn'.format(MEMBER_ATTR)}) ############################################################################ # Test the plugin ############################################################################ users = UserAccounts(inst, DEFAULT_SUFFIX, rdn='ou={}'.format(branch1.rdn)) # Add a user that should get added to the group user1 = users.create_test_user(uid=1001) # Check the group group_members = group.get_attr_vals_utf8(MEMBER_ATTR) assert user1.dn in group_members ############################################################################ # Change config ############################################################################ group.add('objectclass', 'groupOfUniqueNames') am_config.set_groupattr('uniquemember:dn') am_config.set_scope(branch2.dn) ############################################################################ # Test plugin ############################################################################ # Add a user that should get added to the group users = UserAccounts(inst, DEFAULT_SUFFIX, rdn='ou={}'.format(branch2.rdn)) user2 = users.create_test_user(uid=1002) # Check the group group_members = group.get_attr_vals_utf8('uniquemember') assert user2.dn in group_members ############################################################################ # Test Task ############################################################################ # Disable plugin plugin.disable() # If args is None then we run the test suite as pytest standalone and it's not dynamic if args is None: inst.restart() # Add an entry that should be picked up by automember - verify it is not(yet) user3 = users.create_test_user(uid=1003) # Check the group - uniquemember should not exist group_members = group.get_attr_vals_utf8('uniquemember') assert user3.dn not in group_members # Enable plugin plugin.enable() # If args is None then we run the test suite as pytest standalone and it's not dynamic if args is None: inst.restart() task = plugin.fixup(branch2.dn, _filter='objectclass=top') task.wait() # Verify the fixup task worked group_members = group.get_attr_vals_utf8('uniquemember') assert user3.dn in group_members ############################################################################ # Test plugin dependency ############################################################################ check_dependency(inst, plugin, online=isinstance(args, str)) ############################################################################ # Cleanup ############################################################################ user1.delete() user2.delete() user3.delete() branch1.delete() branch2.delete() group.delete() am_config.delete() ############################################################################ # Test passed ############################################################################ log.info('test_automember: PASS\n') return def test_dna(topo, args=None): """Test DNA basic functionality :id: 9b87493b-0493-46f9-8364-6099d0e5d803 :setup: Standalone Instance :steps: 1. Enable the plugin 2. Restart the instance 3. Configure plugin for uidNumber 4. Add a user 5. See if the entry now has the new uidNumber assignment - uidNumber=1 6. Test the magic regen value 7. See if the entry now has the new uidNumber assignment - uidNumber=2 8. Set 'dnaMagicRegen': '-2' 9. Test the magic regen value 10. See if the entry now has the new uidNumber assignment - uidNumber=3 11. Check nsslapd-plugin-depends-on-named for the plugin 12. Clean up :expectedresults: 1. Success 2. Success 3. Success 4. Success 5. Success 6. Success 7. Success 8. Success 9. Success 10. Success 11. Success 12. Success """ inst = topo[0] # stop the plugin, and start it plugin = DNAPlugin(inst) plugin.disable() plugin.enable() if args == "restart": return # If args is None then we run the test suite as pytest standalone and it's not dynamic if args is None: inst.restart() log.info('Testing ' + PLUGIN_DNA + '...') ############################################################################ # Configure plugin ############################################################################ dna_configs = DNAPluginConfigs(inst, plugin.dn) try: dna_config = dna_configs.create(properties={'cn': 'config', 'dnatype': 'uidNumber', 'dnafilter': '(objectclass=top)', 'dnascope': DEFAULT_SUFFIX, 'dnaMagicRegen': '-1', 'dnaMaxValue': '50000', 'dnaNextValue': '1'}) except ldap.ALREADY_EXISTS: dna_config = dna_configs.get('config') dna_config.replace_many(('dnaNextValue', '1'), ('dnaMagicRegen', '-1')) ############################################################################ # Test plugin ############################################################################ users = UserAccounts(inst, DEFAULT_SUFFIX) user1 = users.create_test_user(uid=1) # See if the entry now has the new uidNumber assignment - uidNumber=1 entries = inst.search_s(user1.dn, ldap.SCOPE_BASE, '(uidNumber=1)') assert entries # Test the magic regen value user1.replace('uidNumber', '-1') # See if the entry now has the new uidNumber assignment - uidNumber=2 entries = inst.search_s(user1.dn, ldap.SCOPE_BASE, '(uidNumber=2)') assert entries ################################################################################ # Change the config ################################################################################ dna_config.replace('dnaMagicRegen', '-2') ################################################################################ # Test plugin ################################################################################ # Test the magic regen value user1.replace('uidNumber', '-2') # See if the entry now has the new uidNumber assignment - uidNumber=3 entries = inst.search_s(user1.dn, ldap.SCOPE_BASE, '(uidNumber=3)') assert entries ############################################################################ # Test plugin dependency ############################################################################ check_dependency(inst, plugin, online=isinstance(args, str)) ############################################################################ # Cleanup ############################################################################ user1.delete() dna_config.delete() plugin.disable() # If args is None then we run the test suite as pytest standalone and it's not dynamic if args is None: inst.restart() ############################################################################ # Test passed ############################################################################ log.info('test_dna: PASS\n') return def test_linkedattrs(topo, args=None): """Test Linked Attributes basic functionality :id: 9b87493b-0493-46f9-8364-6099d0e5d804 :setup: Standalone Instance :steps: 1. Enable the plugin 2. Restart the instance 3. Add a config entry for directReport 4. Add test entries 5. Add the linked attrs config entry 6. User1 - Set "directReport" to user2 7. See if manager was added to the other entry 8. User1 - Remove "directReport" 9. See if manager was removed 10. Change the config - using linkType "indirectReport" now 11. Make sure the old linkType(directManager) is not working 12. See if manager was added to the other entry, better not be... 13. Now, set the new linkType "indirectReport", which should add "manager" to the other entry 14. See if manager was added to the other entry, better not be 15. Remove "indirectReport" should remove "manager" to the other entry 16. See if manager was removed 17. Disable plugin and make some updates that would of triggered the plugin 18. The entry should not have a manager attribute 19. Enable the plugin and rerun the task entry 20. Add the task again 21. Check if user2 now has a manager attribute now 22. Check nsslapd-plugin-depends-on-named for the plugin 23. Clean up :expectedresults: 1. Success 2. Success 3. Success 4. Success 5. Success 6. Success 7. Success 8. Success 9. Success 10. Success 11. Success 12. Success 13. Success 14. Success 15. Success 16. Success 17. Success 18. Success 19. Success 20. Success 21. Success 22. Success 23. Success """ inst = topo[0] # stop the plugin, and start it plugin = LinkedAttributesPlugin(inst) plugin.disable() plugin.enable() if args == "restart": return # If args is None then we run the test suite as pytest standalone and it's not dynamic if args is None: inst.restart() log.info('Testing ' + PLUGIN_LINKED_ATTRS + '...') ############################################################################ # Configure plugin ############################################################################ # Add test entries users = UserAccounts(inst, DEFAULT_SUFFIX) user1 = users.create_test_user(uid=1001) user1.add('objectclass', 'extensibleObject') user2 = users.create_test_user(uid=1002) user2.add('objectclass', 'extensibleObject') # Add the linked attrs config entry la_configs = LinkedAttributesConfigs(inst) la_config = la_configs.create(properties={'cn': 'config', 'linkType': 'directReport', 'managedType': 'manager'}) ############################################################################ # Test plugin ############################################################################ # Set "directReport" should add "manager" to the other entry user1.replace('directReport', user2.dn) # See if manager was added to the other entry entries = inst.search_s(user2.dn, ldap.SCOPE_BASE, '(manager=*)') assert entries # Remove "directReport" should remove "manager" to the other entry user1.remove_all('directReport') # See if manager was removed entries = inst.search_s(user2.dn, ldap.SCOPE_BASE, '(manager=*)') assert not entries ############################################################################ # Change the config - using linkType "indirectReport" now ############################################################################ la_config.replace('linkType', 'indirectReport') ############################################################################ # Test plugin ############################################################################ # Make sure the old linkType(directManager) is not working user1.replace('directReport', user2.dn) # See if manager was added to the other entry, better not be... entries = inst.search_s(user2.dn, ldap.SCOPE_BASE, '(manager=*)') assert not entries # Now, set the new linkType "indirectReport", which should add "manager" to the other entry user1.replace('indirectReport', user2.dn) # See if manager was added to the other entry, better not be entries = inst.search_s(user2.dn, ldap.SCOPE_BASE, '(manager=*)') assert entries # Remove "indirectReport" should remove "manager" to the other entry user1.remove_all('indirectReport') # See if manager was removed entries = inst.search_s(user2.dn, ldap.SCOPE_BASE, '(manager=*)') assert not entries ############################################################################ # Test Fixup Task ############################################################################ # Disable plugin and make some updates that would of triggered the plugin plugin.disable() # If args is None then we run the test suite as pytest standalone and it's not dynamic if args is None: inst.restart() user1.replace('indirectReport', user2.dn) # The entry should not have a manager attribute entries = inst.search_s(user2.dn, ldap.SCOPE_BASE, '(manager=*)') assert not entries # Enable the plugin and rerun the task entry plugin.enable() # If args is None then we run the test suite as pytest standalone and it's not dynamic if args is None: inst.restart() # Add the task again task = plugin.fixup(la_config.dn) task.wait() # Check if user2 now has a manager attribute now entries = inst.search_s(user2.dn, ldap.SCOPE_BASE, '(manager=*)') assert entries ############################################################################ # Test plugin dependency ############################################################################ check_dependency(inst, plugin, online=isinstance(args, str)) ############################################################################ # Cleanup ############################################################################ user1.delete() user2.delete() la_config.delete() ############################################################################ # Test passed ############################################################################ log.info('test_linkedattrs: PASS\n') return def test_memberof(topo, args=None): """Test MemberOf basic functionality :id: 9b87493b-0493-46f9-8364-6099d0e5d805 :setup: Standalone Instance :steps: 1. Enable the plugin 2. Restart the instance 3. Replace groupattr with 'member' 4. Add our test entries 5. Check if the user now has a "memberOf" attribute 6. Remove "member" should remove "memberOf" from the entry 7. Check that "memberOf" was removed 8. Replace 'memberofgroupattr': 'uniquemember' 9. Replace 'uniquemember': user1 10. Check if the user now has a "memberOf" attribute 11. Remove "uniquemember" should remove "memberOf" from the entry 12. Check that "memberOf" was removed 13. The shared config entry uses "member" - the above test uses "uniquemember" 14. Delete the test entries then read them to start with a clean slate 15. Check if the user now has a "memberOf" attribute 16. Check that "memberOf" was removed 17. Replace 'memberofgroupattr': 'uniquemember' 18. Check if the user now has a "memberOf" attribute 19. Remove "uniquemember" should remove "memberOf" from the entry 20. Check that "memberOf" was removed 21. Replace 'memberofgroupattr': 'member' 22. Remove shared config from plugin 23. Check if the user now has a "memberOf" attribute 24. Remove "uniquemember" should remove "memberOf" from the entry 25. Check that "memberOf" was removed 26. First change the plugin to use uniquemember 27. Add uniquemember, should not update user1 28. Check for "memberOf" 29. Enable memberof plugin 30. Run the task and validate that it worked 31. Check for "memberOf" 32. Check nsslapd-plugin-depends-on-named for the plugin 33. Clean up :expectedresults: 1. Success 2. Success 3. Success 4. Success 5. Success 6. Success 7. Success 8. Success 9. Success 10. Success 11. Success 12. Success 13. Success 14. Success 15. Success 16. Success 17. Success 18. Success 19. Success 20. Success 21. Success 22. Success 23. Success 24. Success 25. Success 26. Success 27. Success 28. Success 29. Success 30. Success 31. Success 32. Success 33. Success """ inst = topo[0] # stop the plugin, and start it plugin = MemberOfPlugin(inst) plugin.disable() plugin.enable() if args == "restart": return # If args is None then we run the test suite as pytest standalone and it's not dynamic if args is None: inst.restart() log.info('Testing ' + PLUGIN_MEMBER_OF + '...') ############################################################################ # Configure plugin ############################################################################ plugin.replace_groupattr('member') ############################################################################ # Test plugin ############################################################################ # Add our test entries users = UserAccounts(inst, DEFAULT_SUFFIX) user1 = users.create_test_user(uid=1001) groups = Groups(inst, DEFAULT_SUFFIX) group = groups.create(properties={'cn': 'group', 'member': user1.dn}) group.add('objectclass', 'groupOfUniqueNames') memberof_config = MemberOfSharedConfig(inst, 'cn=memberOf config,{}'.format(DEFAULT_SUFFIX)) memberof_config.create(properties={'cn': 'memberOf config', 'memberOfGroupAttr': 'member', 'memberOfAttr': MEMBER_ATTR}) # Check if the user now has a "memberOf" attribute entries = inst.search_s(user1.dn, ldap.SCOPE_BASE, '({}=*)'.format(MEMBER_ATTR)) assert entries # Remove "member" should remove "memberOf" from the entry group.remove_all('member') # Check that "memberOf" was removed entries = inst.search_s(user1.dn, ldap.SCOPE_BASE, '({}=*)'.format(MEMBER_ATTR)) assert not entries ############################################################################ # Change the config ############################################################################ plugin.replace('memberofgroupattr', 'uniquemember') ############################################################################ # Test plugin ############################################################################ group.replace('uniquemember', user1.dn) # Check if the user now has a "memberOf" attribute entries = inst.search_s(user1.dn, ldap.SCOPE_BASE, '({}=*)'.format(MEMBER_ATTR)) assert entries # Remove "uniquemember" should remove "memberOf" from the entry group.remove_all('uniquemember') # Check that "memberOf" was removed entries = inst.search_s(user1.dn, ldap.SCOPE_BASE, '({}=*)'.format(MEMBER_ATTR)) assert not entries ############################################################################ # Set the shared config entry and test the plugin ############################################################################ # The shared config entry uses "member" - the above test uses "uniquemember" plugin.set_configarea(memberof_config.dn) if args is None: inst.restart() # Delete the test entries then readd them to start with a clean slate user1.delete() group.delete() user1 = users.create_test_user(uid=1001) group = groups.create(properties={'cn': 'group', 'member': user1.dn}) group.add('objectclass', 'groupOfUniqueNames') # Test the shared config # Check if the user now has a "memberOf" attribute entries = inst.search_s(user1.dn, ldap.SCOPE_BASE, '({}=*)'.format(MEMBER_ATTR)) assert entries group.remove_all('member') # Check that "memberOf" was removed entries = inst.search_s(user1.dn, ldap.SCOPE_BASE, '({}=*)'.format(MEMBER_ATTR)) assert not entries ############################################################################ # Change the shared config entry to use 'uniquemember' and test the plugin ############################################################################ memberof_config.replace('memberofgroupattr', 'uniquemember') group.replace('uniquemember', user1.dn) # Check if the user now has a "memberOf" attribute entries = inst.search_s(user1.dn, ldap.SCOPE_BASE, '({}=*)'.format(MEMBER_ATTR)) assert entries # Remove "uniquemember" should remove "memberOf" from the entry group.remove_all('uniquemember') # Check that "memberOf" was removed entries = inst.search_s(user1.dn, ldap.SCOPE_BASE, '({}=*)'.format(MEMBER_ATTR)) assert not entries ############################################################################ # Remove shared config from plugin, and retest ############################################################################ # First change the plugin to use member before we move the shared config that uses uniquemember plugin.replace('memberofgroupattr', 'member') # Remove shared config from plugin plugin.remove_configarea() group.replace('member', user1.dn) # Check if the user now has a "memberOf" attribute entries = inst.search_s(user1.dn, ldap.SCOPE_BASE, '({}=*)'.format(MEMBER_ATTR)) assert entries # Remove "uniquemember" should remove "memberOf" from the entry group.remove_all('member') # Check that "memberOf" was removed entries = inst.search_s(user1.dn, ldap.SCOPE_BASE, '({}=*)'.format(MEMBER_ATTR)) assert not entries ############################################################################ # Test Fixup Task ############################################################################ plugin.disable() # If args is None then we run the test suite as pytest standalone and it's not dynamic if args is None: inst.restart() # First change the plugin to use uniquemember plugin.replace('memberofgroupattr', 'uniquemember') # Add uniquemember, should not update USER1 group.replace('uniquemember', user1.dn) # Check for "memberOf" entries = inst.search_s(user1.dn, ldap.SCOPE_BASE, '({}=*)'.format(MEMBER_ATTR)) assert not entries # Enable memberof plugin plugin.enable() # If args is None then we run the test suite as pytest standalone and it's not dynamic if args is None: inst.restart() ############################################################# # Test memberOf fixup arg validation: Test the DN and filter ############################################################# for basedn, filter in (('{}bad'.format(DEFAULT_SUFFIX), 'objectclass=top'), ("bad", 'objectclass=top'), (DEFAULT_SUFFIX, '(objectclass=top')): task = plugin.fixup(basedn, filter) task.wait() exitcode = task.get_exit_code() assert exitcode != "0", 'test_memberof: Task with invalid DN still reported success' #################################################### # Test fixup works #################################################### # Run the task and validate that it worked task = plugin.fixup(DEFAULT_SUFFIX, 'objectclass=top') task.wait() # Check for "memberOf" entries = inst.search_s(user1.dn, ldap.SCOPE_BASE, '({}=*)'.format(MEMBER_ATTR)) assert entries ############################################################################ # Test plugin dependency ############################################################################ check_dependency(inst, plugin, online=isinstance(args, str)) ############################################################################ # Cleanup ############################################################################ user1.delete() group.delete() memberof_config.delete() ############################################################################ # Test passed ############################################################################ log.info('test_memberof: PASS\n') return def test_mep(topo, args=None): """Test Managed Entries basic functionality :id: 9b87493b-0493-46f9-8364-6099d0e5d806 :setup: Standalone Instance :steps: 1. Enable the plugin 2. Restart the instance 3. Add our org units 4. Set up config entry and template entry for the org units 5. Add an entry that meets the MEP scope 6. Check if a managed group entry was created 7. Add a new template entry 8. Add an entry that meets the MEP scope 9. Check if a managed group entry was created 10. Check nsslapd-plugin-depends-on-named for the plugin 11. Clean up :expectedresults: 1. Success 2. Success 3. Success 4. Success 5. Success 6. Success 7. Success 8. Success 9. Success 10. Success 11. Success """ inst = topo[0] # stop the plugin, and start it plugin = ManagedEntriesPlugin(inst) plugin.disable() plugin.enable() if args == "restart": return # If args is None then we run the test suite as pytest standalone and it's not dynamic if args is None: inst.restart() log.info('Testing ' + PLUGIN_MANAGED_ENTRY + '...') ############################################################################ # Configure plugin ############################################################################ # Add our org units ous = OrganizationalUnits(inst, DEFAULT_SUFFIX) ou_people = ous.create(properties={'ou': 'managed_people'}) ou_groups = ous.create(properties={'ou': 'managed_groups'}) mep_templates = MEPTemplates(inst, DEFAULT_SUFFIX) mep_template1 = mep_templates.create(properties={ 'cn': 'MEP template', 'mepRDNAttr': 'cn', 'mepStaticAttr': 'objectclass: posixGroup|objectclass: extensibleObject'.split('|'), 'mepMappedAttr': 'cn: $cn|uid: $cn|gidNumber: $uidNumber'.split('|') }) mep_configs = MEPConfigs(inst) mep_config = mep_configs.create(properties={'cn': 'config', 'originScope': ou_people.dn, 'originFilter': 'objectclass=posixAccount', 'managedBase': ou_groups.dn, 'managedTemplate': mep_template1.dn}) if args is None: inst.restart() ############################################################################ # Test plugin ############################################################################ # Add an entry that meets the MEP scope test_users_m1 = UserAccounts(inst, DEFAULT_SUFFIX, rdn='ou={}'.format(ou_people.rdn)) test_user1 = test_users_m1.create_test_user(1001) # Check if a managed group entry was created entries = inst.search_s('cn={},{}'.format(test_user1.rdn, ou_groups.dn), ldap.SCOPE_BASE, '(objectclass=top)') assert len(entries) == 1 ############################################################################ # Change the config ############################################################################ # Add a new template entry mep_template2 = mep_templates.create(properties={ 'cn': 'MEP template2', 'mepRDNAttr': 'uid', 'mepStaticAttr': 'objectclass: posixGroup|objectclass: extensibleObject'.split('|'), 'mepMappedAttr': 'cn: $cn|uid: $cn|gidNumber: $uidNumber'.split('|') }) mep_config.replace('managedTemplate', mep_template2.dn) ############################################################################ # Test plugin ############################################################################ # Add an entry that meets the MEP scope test_user2 = test_users_m1.create_test_user(1002) # Check if a managed group entry was created entries = inst.search_s('uid={},{}'.format(test_user2.rdn, ou_groups.dn), ldap.SCOPE_BASE, '(objectclass=top)') assert len(entries) == 1 ############################################################################ # Test plugin dependency ############################################################################ check_dependency(inst, plugin, online=isinstance(args, str)) ############################################################################ # Cleanup ############################################################################ test_user1.delete() test_user2.delete() ou_people.delete() ou_groups.delete() mep_config.delete() mep_template1.delete() mep_template2.delete() ############################################################################ # Test passed ############################################################################ log.info('test_mep: PASS\n') return def test_passthru(topo, args=None): """Test Passthrough Authentication basic functionality :id: 9b87493b-0493-46f9-8364-6099d0e5d807 :setup: Standalone Instance :steps: 1. Stop the plugin 2. Restart the instance 3. Create a second backend 4. Create the top of the tree 5. Add user to suffix1 6. Configure and start plugin 7. Login as user 8. Login as root DN 9. Replace 'nsslapd-pluginarg0': ldap uri for second instance 10. Login as user 11. Login as root DN 12. Check nsslapd-plugin-depends-on-named for the plugin 13. Clean up :expectedresults: 1. Success 2. Success 3. Success 4. Success 5. Success 6. Success 7. Success 8. Success 9. Success 10. Success 11. Success 12. Success 13. Success """ inst1 = topo[0] inst2 = topo[1] # Passthru is a bit picky about the state of the entry - we can't just restart it if args == "restart": return # stop the plugin plugin = PassThroughAuthenticationPlugin(inst1) plugin.disable() # If args is None then we run the test suite as pytest standalone and it's not dynamic if args is None: inst1.restart() PASS_SUFFIX1 = 'dc=pass1,dc=thru' PASS_SUFFIX2 = 'dc=pass2,dc=thru' PASS_BE1 = 'PASS1' PASS_BE2 = 'PASS2' log.info('Testing ' + PLUGIN_PASSTHRU + '...') ############################################################################ # Use a new "remote" instance, and a user for auth ############################################################################ # Create a second backend backend1 = inst2.backends.create(properties={'cn': PASS_BE1, 'nsslapd-suffix': PASS_SUFFIX1}) backend2 = inst2.backends.create(properties={'cn': PASS_BE2, 'nsslapd-suffix': PASS_SUFFIX2}) # Create the top of the tree suffix = Domain(inst2, PASS_SUFFIX1) pass1 = suffix.create(properties={'dc': 'pass1'}) suffix = Domain(inst2, PASS_SUFFIX2) pass2 = suffix.create(properties={'dc': 'pass2'}) # Add user to suffix1 users = UserAccounts(inst2, pass1.dn, None) test_user1 = users.create_test_user(1001) test_user1.replace('userpassword', 'password') users = UserAccounts(inst2, pass2.dn, None) test_user2 = users.create_test_user(1002) test_user2.replace('userpassword', 'password') ############################################################################ # Configure and start plugin ############################################################################ plugin.replace('nsslapd-pluginarg0', 'ldap://{}:{}/{}'.format(inst2.host, inst2.port, pass1.dn)) plugin.enable() # If args is None then we run the test suite as pytest standalone and it's not dynamic if args is None: inst1.restart() ############################################################################ # Test plugin ############################################################################ # login as user inst1.simple_bind_s(test_user1.dn, "password") ############################################################################ # Change the config ############################################################################ # login as root DN inst1.simple_bind_s(DN_DM, PASSWORD) plugin.replace('nsslapd-pluginarg0', 'ldap://{}:{}/{}'.format(inst2.host, inst2.port, pass2.dn)) if args is None: inst1.restart() ############################################################################ # Test plugin ############################################################################ # login as user inst1.simple_bind_s(test_user2.dn, "password") # login as root DN inst1.simple_bind_s(DN_DM, PASSWORD) # Clean up backend1.delete() backend2.delete() ############################################################################ # Test plugin dependency ############################################################################ check_dependency(inst1, plugin, online=isinstance(args, str)) ############################################################################ # Test passed ############################################################################ log.info('test_passthru: PASS\n') return def test_referint(topo, args=None): """Test Referential Integrity basic functionality :id: 9b87493b-0493-46f9-8364-6099d0e5d808 :setup: Standalone Instance :steps: 1. Enable the plugin 2. Restart the instance 3. Replace 'referint-membership-attr': 'member' 4. Add some users and a group 5. Grab the referint log file from the plugin 6. Add shared config entry 7. Delete one user 8. Check for integrity 9. Replace 'referint-membership-attr': 'uniquemember' 10. Delete second user 11. Check for integrity 12. The shared config entry uses "member" - the above test used "uniquemember" 13. Recreate users and a group 14. Delete one user 15. Check for integrity 16. Change the shared config entry to use 'uniquemember' and test the plugin 17. Delete second user 18. Check for integrity 19. First change the plugin to use member before we move the shared config that uses uniquemember 20. Remove shared config from plugin 21. Add test user 22. Add user to group 23. Delete a user 24. Check for integrity 25. Check nsslapd-plugin-depends-on-named for the plugin 26. Clean up :expectedresults: 1. Success 2. Success 3. Success 4. Success 5. Success 6. Success 7. Success 8. Success 9. Success 10. Success 11. Success 12. Success 13. Success 14. Success 15. Success 16. Success 17. Success 18. Success 19. Success 20. Success 21. Success 22. Success 23. Success 24. Success 25. Success 26. Success """ inst = topo[0] # stop the plugin, and start it plugin = ReferentialIntegrityPlugin(inst) plugin.disable() plugin.enable() if args == "restart": return # If args is None then we run the test suite as pytest standalone and it's not dynamic if args is None: inst.restart() log.info('Testing ' + PLUGIN_REFER_INTEGRITY + '...') ############################################################################ # Configure plugin ############################################################################ plugin.replace('referint-membership-attr', 'member') ############################################################################ # Test plugin ############################################################################ # Add some users and a group users = UserAccounts(inst, DEFAULT_SUFFIX, None) user1 = users.create_test_user(uid=1001) user2 = users.create_test_user(uid=1002) groups = Groups(inst, DEFAULT_SUFFIX, None) group = groups.create(properties={'cn': 'group', MEMBER_ATTR: user1.dn}) group.add('objectclass', 'groupOfUniqueNames') group.add('uniquemember', user2.dn) # Grab the referint log file from the plugin referin_logfile = plugin.get_attr_val_utf8('referint-logfile') # Add shared config entry referin_config = ReferentialIntegrityConfig(inst, 'cn=RI config,{}'.format(DEFAULT_SUFFIX)) referin_config.create(properties={'cn': 'RI config', 'referint-membership-attr': 'member', 'referint-update-delay': '0', 'referint-logfile': referin_logfile}) user1.delete() # Check for integrity entry = inst.search_s(group.dn, ldap.SCOPE_BASE, '(member={})'.format(user1.dn)) assert not entry ############################################################################ # Change the config ############################################################################ plugin.replace('referint-membership-attr', 'uniquemember') ############################################################################ # Test plugin ############################################################################ user2.delete() # Check for integrity entry = inst.search_s(group.dn, ldap.SCOPE_BASE, '(uniquemember={})'.format(user2.dn)) assert not entry ############################################################################ # Set the shared config entry and test the plugin ############################################################################ # The shared config entry uses "member" - the above test used "uniquemember" plugin.set_configarea(referin_config.dn) group.delete() user1 = users.create_test_user(uid=1001) user2 = users.create_test_user(uid=1002) group = groups.create(properties={'cn': 'group', MEMBER_ATTR: user1.dn}) group.add('objectclass', 'groupOfUniqueNames') group.add('uniquemember', user2.dn) # Delete a user user1.delete() # Check for integrity entry = inst.search_s(group.dn, ldap.SCOPE_BASE, '(member={})'.format(user1.dn)) assert not entry ############################################################################ # Change the shared config entry to use 'uniquemember' and test the plugin ############################################################################ referin_config.replace('referint-membership-attr', 'uniquemember') # Delete a user user2.delete() # Check for integrity entry = inst.search_s(group.dn, ldap.SCOPE_BASE, '(uniquemember={})'.format(user2.dn)) assert not entry ############################################################################ # Remove shared config from plugin, and retest ############################################################################ # First change the plugin to use member before we move the shared config that uses uniquemember plugin.replace('referint-membership-attr', 'member') # Remove shared config from plugin plugin.remove_configarea() # Add test user user1 = users.create_test_user(uid=1001) # Add user to group group.replace('member', user1.dn) # Delete a user user1.delete() # Check for integrity entry = inst.search_s(group.dn, ldap.SCOPE_BASE, '(member={})'.format(user1.dn)) assert not entry ############################################################################ # Test plugin dependency ############################################################################ check_dependency(inst, plugin, online=isinstance(args, str)) ############################################################################ # Cleanup ############################################################################ group.delete() referin_config.delete() ############################################################################ # Test passed ############################################################################ log.info('test_referint: PASS\n') return def test_retrocl(topo, args=None): """Test Retro Changelog basic functionality :id: 9b87493b-0493-46f9-8364-6099d0e5d810 :setup: Standalone Instance :steps: 1. Enable the plugin 2. Restart the instance 3. Gather the current change count (it's not 1 once we start the stability tests) 4. Add a user 5. Check we logged this in the retro cl 6. Change the config - disable plugin 7. Delete the user 8. Check we didn't log this in the retro cl 9. Check nsslapd-plugin-depends-on-named for the plugin 10. Clean up :expectedresults: 1. Success 2. Success 3. Success 4. Success 5. Success 6. Success 7. Success 8. Success 9. Success 10. Success """ inst = topo[0] # stop the plugin, and start it plugin = RetroChangelogPlugin(inst) plugin.disable() plugin.enable() if args == "restart": return # If args is None then we run the test suite as pytest standalone and it's not dynamic if args is None: inst.restart() log.info('Testing ' + PLUGIN_RETRO_CHANGELOG + '...') ############################################################################ # Configure plugin ############################################################################ # Gather the current change count (it's not 1 once we start the stabilty tests) entry = inst.search_s(RETROCL_SUFFIX, ldap.SCOPE_SUBTREE, '(changenumber=*)') entry_count = len(entry) ############################################################################ # Test plugin ############################################################################ # Add a user users = UserAccounts(inst, DEFAULT_SUFFIX) user1 = users.create_test_user(uid=1001) # Check we logged this in the retro cl entry = inst.search_s(RETROCL_SUFFIX, ldap.SCOPE_SUBTREE, '(changenumber=*)') assert entry assert len(entry) != entry_count entry_count += 1 ############################################################################ # Change the config - disable plugin ############################################################################ plugin.disable() # If args is None then we run the test suite as pytest standalone and it's not dynamic if args is None: inst.restart() ############################################################################ # Test plugin ############################################################################ user1.delete() # Check we didn't logged this in the retro cl entry = inst.search_s(RETROCL_SUFFIX, ldap.SCOPE_SUBTREE, '(changenumber=*)') assert len(entry) == entry_count plugin.enable() if args is None: inst.restart() ############################################################################ # Test plugin dependency ############################################################################ check_dependency(inst, plugin, online=isinstance(args, str)) ############################################################################ # Test passed ############################################################################ log.info('test_retrocl: PASS\n') return def _rootdn_restart(inst): """Special restart wrapper function for rootDN plugin""" with pytest.raises(ldap.LDAPError): inst.restart() # Bind as the user who can make updates to the config inst.simple_bind_s(USER_DN, USER_PW) # We need it online for other operations to work inst.state = DIRSRV_STATE_ONLINE def test_rootdn(topo, args=None): """Test Root DNA Access control basic functionality :id: 9b87493b-0493-46f9-8364-6099d0e5d811 :setup: Standalone Instance :steps: 1. Enable the plugin 2. Restart the instance 3. Add an user and aci to open up cn=config 4. Set an aci so we can modify the plugin after we deny the root dn 5. Set allowed IP to an unknown host - blocks root dn 6. Bind as Root DN 7. Bind as the user who can make updates to the config 8. Test that invalid plugin changes are rejected 9. Remove the restriction 10. Bind as Root DN 11. Check nsslapd-plugin-depends-on-named for the plugin 12. Clean up :expectedresults: 1. Success 2. Success 3. Success 4. Success 5. Success 6. Success 7. Success 8. Success 9. Success 10. Success 11. Success 12. Success """ inst = topo[0] # stop the plugin, and start it plugin = RootDNAccessControlPlugin(inst) plugin.disable() plugin.enable() if args == "restart": return # If args is None then we run the test suite as pytest standalone and it's not dynamic if args is None: inst.restart() log.info('Testing ' + PLUGIN_ROOTDN_ACCESS + '...') ############################################################################ # Configure plugin ############################################################################ # Add an user and aci to open up cn=config users = UserAccounts(inst, DEFAULT_SUFFIX) user1 = users.create_test_user(uid=1001) user1.replace('userpassword', USER_PW) # Set an aci so we can modify the plugin after ew deny the root dn ACI = ('(target ="ldap:///cn=config")(targetattr = "*")(version 3.0;acl ' + '"all access";allow (all)(userdn="ldap:///anyone");)') inst.config.add('aci', ACI) # Set allowed IP to an unknown host - blocks root dn plugin.replace('rootdn-allow-ip', '10.10.10.10') ############################################################################ # Test plugin ############################################################################ # Bind as Root DN if args is None: _rootdn_restart(inst) else: with pytest.raises(ldap.LDAPError): inst.simple_bind_s(DN_DM, PASSWORD) # Bind as the user who can make updates to the config inst.simple_bind_s(USER_DN, USER_PW) ############################################################################ # Change the config ############################################################################ # First, test that invalid plugin changes are rejected if args is None: plugin.replace('rootdn-deny-ip', '12.12.ZZZ.12') with pytest.raises((subprocess.CalledProcessError, ValueError)): inst.restart() dse_ldif = DSEldif(inst) dse_ldif.delete(plugin.dn, 'rootdn-deny-ip') _rootdn_restart(inst) plugin.replace('rootdn-allow-host', 'host._.com') with pytest.raises((subprocess.CalledProcessError, ValueError)): inst.restart() dse_ldif = DSEldif(inst) dse_ldif.delete(plugin.dn, 'rootdn-allow-host') _rootdn_restart(inst) else: with pytest.raises(ldap.LDAPError): plugin.replace('rootdn-deny-ip', '12.12.ZZZ.12') with pytest.raises(ldap.LDAPError): plugin.replace('rootdn-allow-host', 'host._.com') # Remove the restriction plugin.remove_all('rootdn-allow-ip') if args is None: inst.restart() ############################################################################ # Test plugin ############################################################################ # Bind as Root DN inst.simple_bind_s(DN_DM, PASSWORD) ############################################################################ # Test plugin dependency ############################################################################ check_dependency(inst, plugin, online=isinstance(args, str)) ############################################################################ # Cleanup - remove ACI from cn=config and test user ############################################################################ inst.config.remove('aci', ACI) user1.delete() ############################################################################ # Test passed ############################################################################ log.info('test_rootdn: PASS\n') return # Array of test functions func_tests = [test_acctpolicy, test_attruniq, test_automember, test_dna, test_linkedattrs, test_memberof, test_mep, test_passthru, test_referint, test_retrocl, test_rootdn] def check_all_plugins(topo, args="online"): for func in func_tests: func(topo, args) return 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/plugins/accpol_test.py000066400000000000000000001405711421664411400267130ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2017 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import pytest import subprocess from lib389.tasks import * from lib389.utils import * from lib389.topologies import topology_st from lib389.idm.user import (UserAccount, UserAccounts) from lib389.plugins import (AccountPolicyPlugin, AccountPolicyConfig) from lib389.cos import (CosTemplate, CosPointerDefinition) from lib389._constants import (PLUGIN_ACCT_POLICY, DN_PLUGIN, DN_DM, PASSWORD, DEFAULT_SUFFIX, DN_CONFIG, SERVERID_STANDALONE) pytestmark = pytest.mark.tier1 LOCL_CONF = 'cn=AccountPolicy1,ou=people,dc=example,dc=com' TEMPL_COS = 'cn=TempltCoS,ou=people,dc=example,dc=com' DEFIN_COS = 'cn=DefnCoS,ou=people,dc=example,dc=com' ACCPOL_DN = "cn={},{}".format(PLUGIN_ACCT_POLICY, DN_PLUGIN) ACCP_CONF = "{},{}".format(DN_CONFIG, ACCPOL_DN) USER_PASW = 'Secret1234' INVL_PASW = 'Invalid234' @pytest.fixture(scope="module") def accpol_global(topology_st, request): """Configure Global account policy plugin and restart the server""" log.info('Configuring Global account policy plugin, pwpolicy attributes and restarting the server') plugin = AccountPolicyPlugin(topology_st.standalone) try: if DEBUGGING: topology_st.standalone.config.set('nsslapd-auditlog-logging-enabled', 'on') plugin.enable() plugin.set('nsslapd-pluginarg0', ACCP_CONF) accp = AccountPolicyConfig(topology_st.standalone, dn=ACCP_CONF) accp.set('alwaysrecordlogin', 'yes') accp.set('stateattrname', 'lastLoginTime') accp.set('altstateattrname', 'createTimestamp') accp.set('specattrname', 'acctPolicySubentry') accp.set('limitattrname', 'accountInactivityLimit') accp.set('accountInactivityLimit', '12') topology_st.standalone.config.set('passwordexp', 'on') topology_st.standalone.config.set('passwordmaxage', '400') topology_st.standalone.config.set('passwordwarning', '1') topology_st.standalone.config.set('passwordlockout', 'on') topology_st.standalone.config.set('passwordlockoutduration', '5') topology_st.standalone.config.set('passwordmaxfailure', '3') topology_st.standalone.config.set('passwordunlock', 'on') except ldap.LDAPError as e: log.error('Failed to enable Global Account Policy Plugin and Password policy attributes') raise e topology_st.standalone.restart(timeout=10) def fin(): log.info('Disabling Global accpolicy plugin and removing pwpolicy attrs') try: plugin = AccountPolicyPlugin(topology_st.standalone) plugin.disable() topology_st.standalone.config.set('passwordexp', 'off') topology_st.standalone.config.set('passwordlockout', 'off') except ldap.LDAPError as e: log.error('Failed to disable Global accpolicy plugin, {}'.format(e.message['desc'])) assert False topology_st.standalone.restart(timeout=10) request.addfinalizer(fin) @pytest.fixture(scope="module") def accpol_local(topology_st, accpol_global, request): """Configure Local account policy plugin for ou=people subtree and restart the server""" log.info('Adding Local account policy plugin configuration entries') try: topology_st.standalone.config.set('passwordmaxage', '400') accp = AccountPolicyConfig(topology_st.standalone, dn=ACCP_CONF) accp.remove_all('accountInactivityLimit') locl_conf = AccountPolicyConfig(topology_st.standalone, dn=LOCL_CONF) locl_conf.create(properties={'cn': 'AccountPolicy1', 'accountInactivityLimit': '10'}) cos_template = CosTemplate(topology_st.standalone, dn=TEMPL_COS) cos_template.create(properties={'cn': 'TempltCoS', 'acctPolicySubentry': LOCL_CONF}) cos_def = CosPointerDefinition(topology_st.standalone, dn=DEFIN_COS) cos_def.create(properties={ 'cn': 'DefnCoS', 'cosTemplateDn': TEMPL_COS, 'cosAttribute': 'acctPolicySubentry default operational-default'}) except ldap.LDAPError as e: log.error('Failed to configure Local account policy plugin') log.error('Failed to add entry {}, {}, {}:'.format(LOCL_CONF, TEMPL_COS, DEFIN_COS)) raise e topology_st.standalone.restart(timeout=10) def fin(): log.info('Disabling Local accpolicy plugin and removing pwpolicy attrs') try: topology_st.standalone.plugins.disable(name=PLUGIN_ACCT_POLICY) for entry_dn in [LOCL_CONF, TEMPL_COS, DEFIN_COS]: entry = UserAccount(topology_st.standalone, dn=entry_dn) entry.delete() except ldap.LDAPError as e: log.error('Failed to disable Local accpolicy plugin, {}'.format(e.message['desc'])) assert False topology_st.standalone.restart(timeout=10) request.addfinalizer(fin) def pwacc_lock(topology_st, suffix, subtree, userid, nousrs): """Lockout user account by attempting invalid password binds""" log.info('Lockout user account by attempting invalid password binds') while (nousrs > 0): usrrdn = '{}{}'.format(userid, nousrs) userdn = 'uid={},{},{}'.format(usrrdn, subtree, suffix) user = UserAccount(topology_st.standalone, dn=userdn) for i in range(3): with pytest.raises(ldap.INVALID_CREDENTIALS): user.bind(INVL_PASW) log.error('No invalid credentials error for User {}'.format(userdn)) with pytest.raises(ldap.CONSTRAINT_VIOLATION): user.bind(USER_PASW) log.error('User {} is not locked, expected error 19'.format(userdn)) nousrs = nousrs - 1 time.sleep(1) def userpw_reset(topology_st, suffix, subtree, userid, nousrs, bindusr, bindpw, newpasw): """Reset user password""" while (nousrs > 0): usrrdn = '{}{}'.format(userid, nousrs) userdn = 'uid={},{},{}'.format(usrrdn, subtree, suffix) user = UserAccount(topology_st.standalone, dn=userdn) log.info('Reset user password for user-{}'.format(userdn)) if (bindusr == "DirMgr"): try: user.replace('userPassword', newpasw) except ldap.LDAPError as e: log.error('Unable to reset userPassword for user-{}'.format(userdn)) raise e elif (bindusr == "RegUsr"): user_conn = user.bind(bindpw) try: user_conn.replace('userPassword', newpasw) except ldap.LDAPError as e: log.error('Unable to reset userPassword for user-{}'.format(userdn)) raise e nousrs = nousrs - 1 time.sleep(1) def nsact_inact(topology_st, suffix, subtree, userid, nousrs, command, expected): """Account activate/in-activate/status using dsidm""" log.info('Account activate/in-activate/status using dsidm') while (nousrs > 0): usrrdn = '{}{}'.format(userid, nousrs) userdn = 'uid={},{},{}'.format(usrrdn, subtree, suffix) log.info('Running {} for user {}'.format(command, userdn)) dsidm_cmd = ['%s/dsidm' % topology_st.standalone.get_sbin_dir(), 'slapd-standalone1', '-b', DEFAULT_SUFFIX, 'account', command, userdn] log.info('Running {} for user {}'.format(dsidm_cmd, userdn)) try: output = subprocess.check_output(dsidm_cmd) except subprocess.CalledProcessError as err: output = err.output log.info('output: {}'.format(output)) assert ensure_bytes(expected) in output nousrs = nousrs - 1 time.sleep(1) def modify_attr(topology_st, base_dn, attr_name, attr_val): """Modify attribute value for a given DN""" log.info('Modify attribute value for a given DN') try: entry = UserAccount(topology_st.standalone, dn=base_dn) entry.replace(attr_name, attr_val) except ldap.LDAPError as e: log.error('Failed to replace lastLoginTime attribute for user-{} {}'.format(userdn, e.message['desc'])) assert False time.sleep(1) def check_attr(topology_st, suffix, subtree, userid, nousrs, attr_name): """Check ModifyTimeStamp attribute present for user""" log.info('Check ModifyTimeStamp attribute present for user') while (nousrs > 0): usrrdn = '{}{}'.format(userid, nousrs) userdn = 'uid={},{},{}'.format(usrrdn, subtree, suffix) user = UserAccount(topology_st.standalone, dn=userdn) try: user.get_attr_val(attr_name) except ldap.LDAPError as e: log.error('ModifyTimeStamp attribute is not present for user-{} {}'.format(userdn, e.message['desc'])) assert False nousrs = nousrs - 1 def add_time_attr(topology_st, suffix, subtree, userid, nousrs, attr_name): """Enable account by replacing lastLoginTime/createTimeStamp/ModifyTimeStamp attribute""" new_attr_val = time.strftime("%Y%m%d%H%M%S", time.gmtime()) + 'Z' log.info('Enable account by replacing lastLoginTime/createTimeStamp/ModifyTimeStamp attribute') while (nousrs > 0): usrrdn = '{}{}'.format(userid, nousrs) userdn = 'uid={},{},{}'.format(usrrdn, subtree, suffix) user = UserAccount(topology_st.standalone, dn=userdn) try: user.replace(attr_name, new_attr_val) except ldap.LDAPError as e: log.error('Failed to add/replace {} attribute to-{}, for user-{}'.format(attr_name, new_attr_val, userdn)) raise e nousrs = nousrs - 1 time.sleep(1) time.sleep(1) def modusr_attr(topology_st, suffix, subtree, userid, nousrs, attr_name, attr_value): """Enable account by replacing cn attribute value, value of modifyTimeStamp changed""" log.info('Enable account by replacing cn attribute value, value of modifyTimeStamp changed') while (nousrs > 0): usrrdn = '{}{}'.format(userid, nousrs) userdn = 'uid={},{},{}'.format(usrrdn, subtree, suffix) user = UserAccount(topology_st.standalone, dn=userdn) try: user.replace(attr_name, attr_value) except ldap.LDAPError as e: log.error('Failed to add/replace {} attribute to-{}, for user-{}'.format(attr_name, attr_value, userdn)) raise e nousrs = nousrs - 1 time.sleep(1) def del_time_attr(topology_st, suffix, subtree, userid, nousrs, attr_name): """Delete lastLoginTime/createTimeStamp/ModifyTimeStamp attribute from user account""" log.info('Delete lastLoginTime/createTimeStamp/ModifyTimeStamp attribute from user account') while (nousrs > 0): usrrdn = '{}{}'.format(userid, nousrs) userdn = 'uid={},{},{}'.format(usrrdn, subtree, suffix) user = UserAccount(topology_st.standalone, dn=userdn) try: user.remove_all(attr_name) except ldap.LDAPError as e: log.error('Failed to delete {} attribute for user-{}'.format(attr_name, userdn)) raise e nousrs = nousrs - 1 time.sleep(1) def add_users(topology_st, suffix, subtree, userid, nousrs, ulimit): """Add users to default test instance with given suffix, subtree, userid and nousrs""" log.info('add_users: Pass all of these as parameters suffix, subtree, userid and nousrs') users = UserAccounts(topology_st.standalone, suffix, rdn=subtree) while (nousrs > ulimit): usrrdn = '{}{}'.format(userid, nousrs) user_properties = { 'uid': usrrdn, 'cn': usrrdn, 'sn': usrrdn, 'uidNumber': '1001', 'gidNumber': '2001', 'userpassword': USER_PASW, 'homeDirectory': '/home/{}'.format(usrrdn)} users.create(properties=user_properties) nousrs = nousrs - 1 def del_users(topology_st, suffix, subtree, userid, nousrs): """Delete users from default test instance with given suffix, subtree, userid and nousrs""" log.info('del_users: Pass all of these as parameters suffix, subtree, userid and nousrs') users = UserAccounts(topology_st.standalone, suffix, rdn=subtree) while (nousrs > 0): usrrdn = '{}{}'.format(userid, nousrs) userdn = users.get(usrrdn) userdn.delete() nousrs = nousrs - 1 def account_status(topology_st, suffix, subtree, userid, nousrs, ulimit, tochck): """Check account status for the given suffix, subtree, userid and nousrs""" while (nousrs > ulimit): usrrdn = '{}{}'.format(userid, nousrs) userdn = 'uid={},{},{}'.format(usrrdn, subtree, suffix) user = UserAccount(topology_st.standalone, dn=userdn) if (tochck == "Enabled"): try: user.bind(USER_PASW) except ldap.LDAPError as e: log.error('User {} failed to login, expected 0'.format(userdn)) raise e elif (tochck == "Expired"): with pytest.raises(ldap.INVALID_CREDENTIALS): user.bind(USER_PASW) log.error('User {} password not expired , expected error 49'.format(userdn)) elif (tochck == "Disabled"): with pytest.raises(ldap.CONSTRAINT_VIOLATION): user.bind(USER_PASW) log.error('User {} is not inactivated, expected error 19'.format(userdn)) nousrs = nousrs - 1 time.sleep(1) def test_glact_inact(topology_st, accpol_global): """Verify if user account is inactivated when accountInactivityLimit is exceeded. :id: 342af084-0ad0-442f-b6f6-5a8b8e5e4c28 :setup: Standalone instance, Global account policy plugin configuration, set accountInactivityLimit to few secs. :steps: 1. Add few users to ou=people subtree in the default suffix 2. Check if users are active just before it reaches accountInactivityLimit. 3. User accounts should not be inactivated, expected 0 4. Check if users are inactivated when accountInactivityLimit is exceeded. 5. User accounts should be inactivated, expected error 19. :expectedresults: 1. Success 2. Success 3. Success 4. Success 5. Should return error code 19 """ suffix = DEFAULT_SUFFIX subtree = "ou=people" userid = "glinactusr" nousrs = 3 log.info('AccountInactivityLimit set to 12. Account will be inactivated if not accessed in 12 secs') add_users(topology_st, suffix, subtree, userid, nousrs, 0) log.info('Sleep for 10 secs to check if account is not inactivated, expected value 0') time.sleep(10) log.info('Account should not be inactivated since AccountInactivityLimit not exceeded') account_status(topology_st, suffix, subtree, userid, 3, 2, "Enabled") log.info('Sleep for 3 more secs to check if account is inactivated') time.sleep(3) account_status(topology_st, suffix, subtree, userid, 2, 0, "Disabled") log.info('Sleep +10 secs to check if account {}3 is inactivated'.format(userid)) time.sleep(10) account_status(topology_st, suffix, subtree, userid, 3, 2, "Disabled") del_users(topology_st, suffix, subtree, userid, nousrs) def test_glremv_lastlogin(topology_st, accpol_global): """Verify if user account is inactivated by createTimeStamp, if lastLoginTime attribute is missing. :id: 8ded5d8e-ed93-4c22-9c8e-78c479189f84 :setup: Standalone instance, Global account policy plugin configuration, set accountInactivityLimit to few secs. :steps: 1. Add few users to ou=people subtree in the default suffix 2. Wait for few secs and bind as user to create lastLoginTime attribute. 3. Remove the lastLoginTime attribute from the user. 4. Wait till accountInactivityLimit exceeded based on createTimeStamp value 5. Check if users are inactivated, expected error 19. 6. Replace lastLoginTime attribute and check if account is activated 7. User should be activated based on lastLoginTime attribute, expected 0 :assert: 1. Success 2. Success 3. Success 4. Success 5. Success 6. Success 7. Should return error code 19 """ suffix = DEFAULT_SUFFIX subtree = "ou=people" userid = "nologtimeusr" nousrs = 1 log.info('AccountInactivityLimit set to 12. Account will be inactivated if not accessed in 12 secs') add_users(topology_st, suffix, subtree, userid, nousrs, 0) log.info('Sleep for 6 secs to check if account is not inactivated, expected value 0') time.sleep(6) log.info('Account should not be inactivated since AccountInactivityLimit not exceeded') account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Enabled") del_time_attr(topology_st, suffix, subtree, userid, nousrs, 'lastLoginTime') log.info('Sleep for 7 more secs to check if account is inactivated') time.sleep(7) account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Disabled") add_time_attr(topology_st, suffix, subtree, userid, nousrs, 'lastLoginTime') log.info('Check if account is activated, expected 0') account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Enabled") del_users(topology_st, suffix, subtree, userid, nousrs) def test_glact_login(topology_st, accpol_global): """Verify if user account can be activated by replacing the lastLoginTime attribute. :id: f89897cc-c13e-4824-af08-3dd1039bab3c :setup: Standalone instance, Global account policy plugin configuration, set accountInactivityLimit to few secs. :steps: 1. Add few users to ou=groups subtree in the default suffix 2. Wait till accountInactivityLimit exceeded 3. Run ldapsearch as normal user, expected error 19. 4. Replace the lastLoginTime attribute and check if account is activated 5. Run ldapsearch as normal user, expected 0. :assert: 1. Success 2. Success 3. Success 4. Success 5. Success """ suffix = DEFAULT_SUFFIX subtree = "ou=groups" userid = "glactusr" nousrs = 3 log.info('AccountInactivityLimit set to 12. Account will be inactivated if not accessed in 12 secs') add_users(topology_st, suffix, subtree, userid, nousrs, 0) log.info('Sleep for 13 secs to check if account is inactivated, expected error 19') time.sleep(13) account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Disabled") add_time_attr(topology_st, suffix, subtree, userid, nousrs, 'lastLoginTime') log.info('Check if account is activated, expected 0') account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Enabled") del_users(topology_st, suffix, subtree, userid, nousrs) def test_glinact_limit(topology_st, accpol_global): """Verify if account policy plugin functions well when changing accountInactivityLimit value. :id: 7fbc373f-a3d7-4774-8d34-89b057c5e74b :setup: Standalone instance, Global account policy plugin configuration, set accountInactivityLimit to few secs. :steps: 1. Add few users to ou=groups subtree in the default suffix 2. Check if users are active just before reaching accountInactivityLimit 3. Modify AccountInactivityLimit to a bigger value 4. Wait for additional few secs, but check users before it reaches accountInactivityLimit 5. Wait till accountInactivityLimit exceeded and check users, expected error 19 6. Modify accountInactivityLimit to use the min value. 7. Add few users to ou=groups subtree in the default suffix 8. Wait till it reaches accountInactivityLimit and check users, expected error 19 9. Modify accountInactivityLimit to 10 times(30 secs) bigger than the initial value. 10. Add few users to ou=groups subtree in the default suffix 11. Wait for 90 secs and check if account is not inactivated, expected 0 12. Wait for +27 secs and check if account is not inactivated, expected 0 13. Wait for +30 secs and check if account is inactivated, error 19 14. Replace the lastLoginTime attribute and check if account is activated 15. Modify accountInactivityLimit to 12 secs, which is the default 16. Run ldapsearch as normal user, expected 0. :assert: 1. Success 2. Success 3. Success 4. Success 5. Success 6. Success 7. Success 8. Success 9. Success 10. Success 11. Success 12. Success 13. Success 14. Success 15. Success 16. Success """ suffix = DEFAULT_SUFFIX subtree = "ou=groups" userid = "inactestusr" nousrs = 3 log.info('AccountInactivityLimit set to 12. Account will be inactivated if not accessed in 12 secs') add_users(topology_st, suffix, subtree, userid, nousrs, 2) log.info('Sleep for 9 secs to check if account is not inactivated, expected 0') time.sleep(9) account_status(topology_st, suffix, subtree, userid, nousrs, 2, "Enabled") modify_attr(topology_st, ACCP_CONF, 'accountInactivityLimit', '20') time.sleep(17) account_status(topology_st, suffix, subtree, userid, nousrs, 2, "Enabled") time.sleep(20) account_status(topology_st, suffix, subtree, userid, nousrs, 2, "Disabled") modify_attr(topology_st, ACCP_CONF, 'accountInactivityLimit', '1') add_users(topology_st, suffix, subtree, userid, 2, 1) time.sleep(2) account_status(topology_st, suffix, subtree, userid, 2, 1, "Disabled") modify_attr(topology_st, ACCP_CONF, 'accountInactivityLimit', '30') add_users(topology_st, suffix, subtree, userid, 1, 0) time.sleep(27) account_status(topology_st, suffix, subtree, userid, 1, 0, "Enabled") time.sleep(30) account_status(topology_st, suffix, subtree, userid, 1, 0, "Disabled") log.info('Check if account is activated, expected 0') add_time_attr(topology_st, suffix, subtree, userid, nousrs, 'lastLoginTime') account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Enabled") modify_attr(topology_st, ACCP_CONF, 'accountInactivityLimit', '12') del_users(topology_st, suffix, subtree, userid, nousrs) #unstable or unstatus tests, skipped for now @pytest.mark.flaky(max_runs=2, min_passes=1) def test_glnologin_attr(topology_st, accpol_global): """Verify if user account is inactivated based on createTimeStamp attribute, no lastLoginTime attribute present :id: 3032f670-705d-4f69-96f5-d75445cffcfb :setup: Standalone instance, Local account policy plugin configuration, set accountInactivityLimit to few secs. :steps: 1. Configure Global account policy plugin with createTimestamp as stateattrname 2. lastLoginTime attribute will not be effective. 3. Add few users to ou=groups subtree in the default suffix 4. Wait for 10 secs and check if account is not inactivated, expected 0 5. Modify AccountInactivityLimit to 20 secs 6. Wait for +9 secs and check if account is not inactivated, expected 0 7. Wait for +3 secs and check if account is inactivated, error 19 8. Modify accountInactivityLimit to 3 secs 9. Add few users to ou=groups subtree in the default suffix 10. Wait for 3 secs and check if account is inactivated, error 19 11. Modify accountInactivityLimit to 30 secs 12. Add few users to ou=groups subtree in the default suffix 13. Wait for 90 secs and check if account is not inactivated, expected 0 14. Wait for +28 secs and check if account is not inactivated, expected 0 15. Wait for +2 secs and check if account is inactivated, error 19 16. Replace the lastLoginTime attribute and check if account is activated 17. Modify accountInactivityLimit to 12 secs, which is the default 18. Run ldapsearch as normal user, expected 0. :assert: 1. Success 2. Success 3. Success 4. Success 5. Success 6. Success 7. Success 8. Success 9. Success 10. Success 11. Success 12. Success 13. Success 14. Success 15. Success 16. Success 17. Success 18. Success """ suffix = DEFAULT_SUFFIX subtree = "ou=groups" userid = "nologinusr" nousrs = 3 log.info('AccountInactivityLimit set to 12. Account will be inactivated if not accessed in 12 secs') log.info('Set attribute StateAttrName to createTimestamp, loginTime attr wont be considered') modify_attr(topology_st, ACCP_CONF, 'stateattrname', 'createTimestamp') topology_st.standalone.restart(timeout=10) add_users(topology_st, suffix, subtree, userid, nousrs, 2) log.info('Sleep for 9 secs to check if account is not inactivated, expected 0') time.sleep(9) account_status(topology_st, suffix, subtree, userid, nousrs, 2, "Enabled") modify_attr(topology_st, ACCP_CONF, 'accountInactivityLimit', '20') time.sleep(9) account_status(topology_st, suffix, subtree, userid, nousrs, 2, "Enabled") time.sleep(3) account_status(topology_st, suffix, subtree, userid, nousrs, 2, "Disabled") modify_attr(topology_st, ACCP_CONF, 'accountInactivityLimit', '3') add_users(topology_st, suffix, subtree, userid, 2, 1) time.sleep(2) account_status(topology_st, suffix, subtree, userid, 2, 1, "Enabled") time.sleep(2) account_status(topology_st, suffix, subtree, userid, 2, 1, "Disabled") modify_attr(topology_st, ACCP_CONF, 'accountInactivityLimit', '30') add_users(topology_st, suffix, subtree, userid, 1, 0) time.sleep(28) account_status(topology_st, suffix, subtree, userid, 1, 0, "Enabled") time.sleep(2) account_status(topology_st, suffix, subtree, userid, 1, 0, "Disabled") modify_attr(topology_st, ACCP_CONF, 'accountInactivityLimit', '12') log.info('Set attribute StateAttrName to lastLoginTime, the default') modify_attr(topology_st, ACCP_CONF, 'stateattrname', 'lastLoginTime') topology_st.standalone.restart(timeout=10) add_time_attr(topology_st, suffix, subtree, userid, nousrs, 'lastLoginTime') log.info('Check if account is activated, expected 0') account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Enabled") del_users(topology_st, suffix, subtree, userid, nousrs) #unstable or unstatus tests, skipped for now @pytest.mark.flaky(max_runs=2, min_passes=1) def test_glnoalt_stattr(topology_st, accpol_global): """Verify if user account can be inactivated based on lastLoginTime attribute, altstateattrname set to 1.1 :id: 8dcc3540-578f-422a-bb44-28c2cf20dbcd :setup: Standalone instance, Global account policy plugin configuration, set accountInactivityLimit to few secs. :steps: 1. Configure Global account policy plugin with altstateattrname to 1.1 2. Add few users to ou=groups subtree in the default suffix 3. Wait till it reaches accountInactivityLimit 4. Remove lastLoginTime attribute from the user entry 5. Run ldapsearch as normal user, expected 0. no lastLoginTime attribute present 6. Wait till it reaches accountInactivityLimit and check users, expected error 19 :assert: 1. Success 2. Success 3. Success 4. Success 5. Success 6. Success """ suffix = DEFAULT_SUFFIX subtree = "ou=groups" userid = "nologinusr" nousrs = 3 log.info('Set attribute altStateAttrName to 1.1') modify_attr(topology_st, ACCP_CONF, 'altstateattrname', '1.1') topology_st.standalone.restart(timeout=10) add_users(topology_st, suffix, subtree, userid, nousrs, 0) log.info('Sleep for 13 secs to check if account is not inactivated, expected 0') time.sleep(13) account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Enabled") log.info('lastLoginTime attribute is added from the above ldap bind by userdn') time.sleep(13) account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Disabled") del_time_attr(topology_st, suffix, subtree, userid, nousrs, 'lastLoginTime') account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Enabled") modify_attr(topology_st, ACCP_CONF, 'altstateattrname', 'createTimestamp') topology_st.standalone.restart(timeout=10) add_time_attr(topology_st, suffix, subtree, userid, nousrs, 'lastLoginTime') account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Enabled") del_users(topology_st, suffix, subtree, userid, nousrs) #unstable or unstatus tests, skipped for now @pytest.mark.flaky(max_runs=2, min_passes=1) def test_glattr_modtime(topology_st, accpol_global): """Verify if user account can be inactivated based on modifyTimeStamp attribute :id: 67380839-2966-45dc-848a-167a954153e1 :setup: Standalone instance, Global account policy plugin configuration, set accountInactivityLimit to few secs. :steps: 1. Configure Global account policy plugin with altstateattrname to modifyTimestamp 2. Add few users to ou=groups subtree in the default suffix 3. Wait till the accountInactivityLimit exceeded and check users, expected error 19 4. Modify cn attribute for user, ModifyTimeStamp is updated. 5. Check if user is activated based on ModifyTimeStamp attribute, expected 0 6. Change the plugin to use createTimeStamp and remove lastLoginTime attribute 7. Check if account is inactivated, expected error 19 :assert: 1. Success 2. Success 3. Success 4. Success 5. Success 6. Success 7. Success """ suffix = DEFAULT_SUFFIX subtree = "ou=groups" userid = "modtimeusr" nousrs = 3 log.info('Set attribute altStateAttrName to modifyTimestamp') modify_attr(topology_st, ACCP_CONF, 'altstateattrname', 'modifyTimestamp') topology_st.standalone.restart(timeout=10) add_users(topology_st, suffix, subtree, userid, nousrs, 0) log.info('Sleep for 13 secs to check if account is inactivated, expected 0') time.sleep(13) check_attr(topology_st, suffix, subtree, userid, nousrs, "modifyTimeStamp=*") account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Disabled") attr_name = "cn" attr_value = "cnewusr" modusr_attr(topology_st, suffix, subtree, userid, nousrs, attr_name, attr_value) account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Enabled") modify_attr(topology_st, ACCP_CONF, 'altstateattrname', 'createTimestamp') del_time_attr(topology_st, suffix, subtree, userid, nousrs, 'lastLoginTime') topology_st.standalone.restart(timeout=10) account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Disabled") add_time_attr(topology_st, suffix, subtree, userid, nousrs, 'lastLoginTime') account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Enabled") del_users(topology_st, suffix, subtree, userid, nousrs) #unstable or unstatus tests, skipped for now @pytest.mark.flaky(max_runs=2, min_passes=1) def test_glnoalt_nologin(topology_st, accpol_global): """Verify if account policy plugin works if we set altstateattrname set to 1.1 and alwaysrecordlogin to NO :id: 49eda7db-84de-47ba-8f81-ac5e4de3a500 :setup: Standalone instance, Global account policy plugin configuration, set accountInactivityLimit to few secs. :steps: 1. Configure Global account policy plugin with altstateattrname to 1.1 2. Set alwaysrecordlogin to NO. 3. Add few users to ou=groups subtree in the default suffix 4. Wait till accountInactivityLimit exceeded and check users, expected 0 5. Check for lastLoginTime attribute, it should not be present 6. Wait for few more secs and check if account is not inactivated, expected 0 7. Run ldapsearch as normal user, expected 0. no lastLoginTime attribute present 8. Set altstateattrname to createTimeStamp 9. Check if user account is inactivated based on createTimeStamp attribute. 10. Account should be inactivated, expected error 19 :assert: 1. Success 2. Success 3. Success 4. Success 5. Success 6. Success 7. Success 8. Success 9. Success 10. Success """ suffix = DEFAULT_SUFFIX subtree = "ou=groups" userid = "norecrodlogusr" nousrs = 3 log.info('Set attribute altStateAttrName to 1.1') modify_attr(topology_st, ACCP_CONF, 'altstateattrname', '1.1') log.info('Set attribute alwaysrecordlogin to No') modify_attr(topology_st, ACCP_CONF, 'alwaysrecordlogin', 'no') topology_st.standalone.restart(timeout=10) add_users(topology_st, suffix, subtree, userid, nousrs, 0) log.info('Sleep for 13 secs to check if account is not inactivated, expected 0') time.sleep(13) account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Enabled") time.sleep(3) account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Enabled") log.info('Set attribute altStateAttrName to createTimestamp') modify_attr(topology_st, ACCP_CONF, 'altstateattrname', 'createTimestamp') topology_st.standalone.restart(timeout=10) time.sleep(2) account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Disabled") log.info('Reset the default attribute values') modify_attr(topology_st, ACCP_CONF, 'alwaysrecordlogin', 'yes') topology_st.standalone.restart(timeout=10) add_time_attr(topology_st, suffix, subtree, userid, nousrs, 'lastLoginTime') account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Enabled") del_users(topology_st, suffix, subtree, userid, nousrs) #unstable or unstatus tests, skipped for now @pytest.mark.flaky(max_runs=2, min_passes=1) def test_glinact_nsact(topology_st, accpol_global): """Verify if user account can be activated using dsidm. :id: 876a7a7c-0b3f-4cd2-9b45-1dc80846e334 :setup: Standalone instance, Global account policy plugin configuration, set accountInactivityLimit to few secs. :steps: 1. Configure Global account policy plugin 2. Add few users to ou=groups subtree in the default suffix 3. Wait for few secs and inactivate user using dsidm 4. Wait till accountInactivityLimit exceeded. 5. Run ldapsearch as normal user, expected error 19. 6. Activate user using ns-activate.pl script 7. Check if account is activated, expected error 19 8. Replace the lastLoginTime attribute and check if account is activated 9. Run ldapsearch as normal user, expected 0. :assert: 1. Success 2. Success 3. Success 4. Success 5. Success 6. Success 7. Success 8. Success 9. Success """ suffix = DEFAULT_SUFFIX subtree = "ou=groups" userid = "nsactusr" nousrs = 1 log.info('AccountInactivityLimit set to 12. Account will be inactivated if not accessed in 12 secs') add_users(topology_st, suffix, subtree, userid, nousrs, 0) log.info('Sleep for 3 secs to check if account is not inactivated, expected value 0') time.sleep(3) nsact_inact(topology_st, suffix, subtree, userid, nousrs, "unlock", "") log.info('Sleep for 10 secs to check if account is inactivated, expected value 19') time.sleep(10) nsact_inact(topology_st, suffix, subtree, userid, nousrs, "unlock", "") account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Disabled") nsact_inact(topology_st, suffix, subtree, userid, nousrs, "entry-status", "inactivity limit exceeded") add_time_attr(topology_st, suffix, subtree, userid, nousrs, 'lastLoginTime') account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Enabled") nsact_inact(topology_st, suffix, subtree, userid, nousrs, "entry-status", "activated") del_users(topology_st, suffix, subtree, userid, nousrs) #unstable or unstatus tests, skipped for now @pytest.mark.flaky(max_runs=2, min_passes=1) def test_glinact_acclock(topology_st, accpol_global): """Verify if user account is activated when account is unlocked by passwordlockoutduration. :id: 43601a61-065c-4c80-a7c2-e4f6ae17beb8 :setup: Standalone instance, Global account policy plugin configuration, set accountInactivityLimit to few secs. :steps: 1. Add few users to ou=groups subtree in the default suffix 2. Wait for few secs and attempt invalid binds for user 3. User account should be locked based on Account Lockout policy. 4. Wait till accountInactivityLimit exceeded and check users, expected error 19 5. Wait for passwordlockoutduration and check if account is active 6. Check if account is unlocked, expected error 19, since account is inactivated 7. Replace the lastLoginTime attribute and check users, expected 0 :assert: 1. Success 2. Success 3. Success 4. Success 5. Success 6. Success 7. Success """ suffix = DEFAULT_SUFFIX subtree = "ou=groups" userid = "pwlockusr" nousrs = 1 log.info('AccountInactivityLimit set to 12. Account will be inactivated if not accessed in 12 secs') add_users(topology_st, suffix, subtree, userid, nousrs, 0) log.info('Sleep for 3 secs and try invalid binds to lockout the user') time.sleep(3) pwacc_lock(topology_st, suffix, subtree, userid, nousrs) log.info('Sleep for 10 secs to check if account is inactivated, expected value 19') time.sleep(10) account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Disabled") log.info('Add lastLoginTime to activate the user account') add_time_attr(topology_st, suffix, subtree, userid, nousrs, 'lastLoginTime') account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Enabled") log.info('Checking if account is unlocked after passwordlockoutduration, but inactivated after accountInactivityLimit') pwacc_lock(topology_st, suffix, subtree, userid, nousrs) account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Disabled") log.info('Account is expected to be unlocked after 5 secs of passwordlockoutduration') time.sleep(5) account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Enabled") log.info('Sleep 13s and check if account inactivated based on accountInactivityLimit, expected 19') time.sleep(13) account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Disabled") del_users(topology_st, suffix, subtree, userid, nousrs) #unstable or unstatus tests, skipped for now @pytest.mark.flaky(max_runs=2, min_passes=1) def test_glnact_pwexp(topology_st, accpol_global): """Verify if user account is activated when password is reset after password is expired :id: 3bb97992-101a-4e5a-b60a-4cc21adcc76e :setup: Standalone instance, Global account policy plugin configuration, set accountInactivityLimit to few secs. :steps: 1. Add few users to ou=groups subtree in the default suffix 2. Set passwordmaxage to few secs 3. Wait for passwordmaxage to reach and check if password expired 4. Run ldapsearch as normal user, expected error 19. 5. Reset the password for user account 6. Wait till accountInactivityLimit exceeded and check users 7. Run ldapsearch as normal user, expected error 19. 8. Replace the lastLoginTime attribute and check if account is activated 9. Run ldapsearch as normal user, expected 0. :assert: 1. Success 2. Success 3. Success 4. Success 5. Success 6. Success 7. Success 8. Success 9. Success """ suffix = DEFAULT_SUFFIX subtree = "ou=groups" userid = "pwexpusr" nousrs = 1 try: topology_st.standalone.config.set('passwordmaxage', '9') except ldap.LDAPError as e: log.error('Failed to change the value of passwordmaxage to 9') raise e log.info('AccountInactivityLimit set to 12. Account will be inactivated if not accessed in 12 secs') log.info('Passwordmaxage is set to 9. Password will expire in 9 secs') add_users(topology_st, suffix, subtree, userid, nousrs, 0) log.info('Sleep for 9 secs and check if password expired') time.sleep(9) account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Expired") time.sleep(4) # Passed inactivity account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Disabled") log.info('Add lastLoginTime to activate the user account') add_time_attr(topology_st, suffix, subtree, userid, nousrs, 'lastLoginTime') account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Expired") userpw_reset(topology_st, suffix, subtree, userid, nousrs, "DirMgr", PASSWORD, USER_PASW) account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Enabled") # Allow password to expire again, but inactivity continues time.sleep(7) # reset password to counter expiration, we will test expiration again later userpw_reset(topology_st, suffix, subtree, userid, nousrs, "DirMgr", PASSWORD, USER_PASW) log.info('Sleep for 4 secs and check if account is now inactivated, expected error 19') time.sleep(4) account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Disabled") userpw_reset(topology_st, suffix, subtree, userid, nousrs, "DirMgr", PASSWORD, USER_PASW) account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Disabled") # Reset inactivity and check for expiration add_time_attr(topology_st, suffix, subtree, userid, nousrs, 'lastLoginTime') account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Enabled") time.sleep(8) account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Expired") # Reset account userpw_reset(topology_st, suffix, subtree, userid, nousrs, "DirMgr", PASSWORD, USER_PASW) account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Enabled") # Reset maxage try: topology_st.standalone.config.set('passwordmaxage', '400') except ldap.LDAPError as e: log.error('Failed to change the value of passwordmaxage to 400') raise e del_users(topology_st, suffix, subtree, userid, nousrs) #unstable or unstatus tests, skipped for now @pytest.mark.flaky(max_runs=2, min_passes=1) def test_locact_inact(topology_st, accpol_local): """Verify if user account is inactivated when accountInactivityLimit is exceeded. :id: 02140e36-79eb-4d88-ba28-66478689289b :setup: Standalone instance, ou=people subtree configured for Local account policy plugin configuration, set accountInactivityLimit to few secs. :steps: 1. Add few users to ou=people subtree in the default suffix 2. Wait for few secs before it reaches accountInactivityLimit and check users. 3. Run ldapsearch as normal user, expected 0 4. Wait till accountInactivityLimit is exceeded 5. Run ldapsearch as normal user and check if its inactivated, expected error 19. 6. Replace user's lastLoginTime attribute and check if its activated, expected 0 :assert: 1. Success 2. Success 3. Success 4. Success 5. Success 6. Should return error code 19 """ suffix = DEFAULT_SUFFIX subtree = "ou=people" userid = "inactusr" nousrs = 3 log.info('AccountInactivityLimit set to 10. Account will be inactivated if not accessed in 10 secs') add_users(topology_st, suffix, subtree, userid, nousrs, 0) log.info('Sleep for 9 secs to check if account is not inactivated, expected value 0') time.sleep(9) log.info('Account should not be inactivated since AccountInactivityLimit not exceeded') account_status(topology_st, suffix, subtree, userid, 3, 2, "Enabled") log.info('Sleep for 2 more secs to check if account is inactivated') time.sleep(2) account_status(topology_st, suffix, subtree, userid, 2, 0, "Disabled") log.info('Sleep +9 secs to check if account {}3 is inactivated'.format(userid)) time.sleep(9) account_status(topology_st, suffix, subtree, userid, 3, 2, "Disabled") log.info('Add lastLoginTime attribute to all users and check if its activated') add_time_attr(topology_st, suffix, subtree, userid, nousrs, 'lastLoginTime') account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Enabled") del_users(topology_st, suffix, subtree, userid, nousrs) #unstable or unstatus tests, skipped for now @pytest.mark.flaky(max_runs=2, min_passes=1) def test_locinact_modrdn(topology_st, accpol_local): """Verify if user account is inactivated when moved from ou=groups to ou=people subtree. :id: 5f25bea3-fab0-4db4-b43d-2d47cc6e5ad1 :setup: Standalone instance, ou=people subtree configured for Local account policy plugin configuration, set accountInactivityLimit to few secs. :steps: 1. Add few users to ou=groups subtree in the default suffix 2. Plugin configured to ou=people subtree only. 3. Wait for few secs before it reaches accountInactivityLimit and check users. 4. Run ldapsearch as normal user, expected 0 5. Wait till accountInactivityLimit exceeded 6. Move users from ou=groups subtree to ou=people subtree 7. Check if users are inactivated, expected error 19 :assert: 1. Success 2. Success 3. Success 4. Success 5. Success 6. Success 7. Should return error code 0 and 19 """ suffix = DEFAULT_SUFFIX subtree = "ou=groups" userid = "nolockusr" nousrs = 1 log.info('Account should not be inactivated since the subtree is not configured') add_users(topology_st, suffix, subtree, userid, nousrs, 0) log.info('Sleep for 11 secs to check if account is not inactivated, expected value 0') time.sleep(11) account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Enabled") log.info('Moving users from ou=groups to ou=people subtree') user = UserAccount(topology_st.standalone, dn='uid=nolockusr1,ou=groups,dc=example,dc=com') try: user.rename('uid=nolockusr1', newsuperior='ou=people,dc=example,dc=com') except ldap.LDAPError as e: log.error('Failed to move user uid=nolockusr1 from ou=groups to ou=people') raise e subtree = "ou=people" log.info('Then wait for 11 secs and check if entries are inactivated') time.sleep(11) account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Disabled") add_time_attr(topology_st, suffix, subtree, userid, nousrs, 'lastLoginTime') account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Enabled") del_users(topology_st, suffix, subtree, userid, nousrs) def test_locact_modrdn(topology_st, accpol_local): """Verify if user account is inactivated when users moved from ou=people to ou=groups subtree. :id: e821cbae-bfc3-40d3-947d-b228c809987f :setup: Standalone instance, ou=people subtree configured for Local account policy plugin configuration, set accountInactivityLimit to few secs. :steps: 1. Add few users to ou=people subtree in the default suffix 2. Wait for few secs and check if users not inactivated, expected 0. 3. Move users from ou=people to ou=groups subtree 4. Wait till accountInactivityLimit is exceeded 5. Check if users are active in ou=groups subtree, expected 0 :assert: 1. Success 2. Success 3. Success 4. Success 5. Success """ suffix = DEFAULT_SUFFIX subtree = "ou=people" userid = "lockusr" nousrs = 1 log.info('Account should be inactivated since the subtree is configured') add_users(topology_st, suffix, subtree, userid, nousrs, 0) log.info('Sleep for 11 secs to check if account is inactivated, expected value 19') time.sleep(11) account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Disabled") log.info('Moving users from ou=people to ou=groups subtree') user = UserAccount(topology_st.standalone, dn='uid=lockusr1,ou=people,dc=example,dc=com') try: user.rename('uid=lockusr1', newsuperior='ou=groups,dc=example,dc=com') except ldap.LDAPError as e: log.error('Failed to move user uid=lockusr1 from ou=people to ou=groups') raise e log.info('Sleep for +2 secs and check users from both ou=people and ou=groups subtree') time.sleep(2) subtree = "ou=groups" account_status(topology_st, suffix, subtree, userid, 1, 0, "Enabled") del_users(topology_st, suffix, subtree, userid, nousrs) if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s {}".format(CURRENT_FILE)) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/plugins/attr_nsslapd-pluginarg_test.py000066400000000000000000000202571421664411400321340ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2016 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import pytest from lib389.tasks import * from lib389.utils import * from lib389.topologies import topology_st from lib389._constants import DEFAULT_SUFFIX, DN_PLUGIN, SUFFIX, PLUGIN_7_BIT_CHECK # Skip on older versions pytestmark = [pytest.mark.tier1, pytest.mark.skipif(ds_is_older('1.3'), reason="Not implemented")] logging.getLogger(__name__).setLevel(logging.DEBUG) log = logging.getLogger(__name__) DN_7BITPLUGIN = "cn=7-bit check,%s" % DN_PLUGIN ATTRS = ["uid", "mail", "userpassword", ",", SUFFIX, None] @pytest.fixture(scope="module") def enable_plugin(topology_st): """Enabling the 7-bit plugin for the environment setup""" log.info("Ticket 47431 - 0: Enable 7bit plugin...") topology_st.standalone.plugins.enable(name=PLUGIN_7_BIT_CHECK) @pytest.mark.ds47431 def test_duplicate_values(topology_st, enable_plugin): """Check 26 duplicate values are treated as one :id: b23e04f1-2757-42cc-b3a2-26426c903f6d :setup: Standalone instance, enable 7bit plugin :steps: 1. Modify the entry for cn=7-bit check,cn=plugins,cn=config as : nsslapd-pluginarg0 : uid nsslapd-pluginarg1 : mail nsslapd-pluginarg2 : userpassword nsslapd-pluginarg3 : , nsslapd-pluginarg4 : dc=example,dc=com 2. Set nsslapd-pluginarg2 to 'userpassword' for multiple time (ideally 27) 3. Check whether duplicate values are treated as one :expectedresults: 1. It should be modified successfully 2. It should be successful 3. It should be successful """ log.info("Ticket 47431 - 1: Check 26 duplicate values are treated as one...") expected = "str2entry_dupcheck.* duplicate values for attribute type nsslapd-pluginarg2 detected in entry cn=7-bit check,cn=plugins,cn=config." log.debug('modify_s %s' % DN_7BITPLUGIN) topology_st.standalone.modify_s(DN_7BITPLUGIN, [(ldap.MOD_REPLACE, 'nsslapd-pluginarg0', b"uid"), (ldap.MOD_REPLACE, 'nsslapd-pluginarg1', b"mail"), (ldap.MOD_REPLACE, 'nsslapd-pluginarg2', b"userpassword"), (ldap.MOD_REPLACE, 'nsslapd-pluginarg3', b","), (ldap.MOD_REPLACE, 'nsslapd-pluginarg4', ensure_bytes(SUFFIX))]) arg2 = "nsslapd-pluginarg2: userpassword" topology_st.standalone.stop() dse_ldif = topology_st.standalone.confdir + '/dse.ldif' os.system('mv %s %s.47431' % (dse_ldif, dse_ldif)) os.system( 'sed -e "s/\\(%s\\)/\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1/" %s.47431 > %s' % ( arg2, dse_ldif, dse_ldif)) topology_st.standalone.start() cmdline = 'egrep -i "%s" %s' % (expected, topology_st.standalone.errlog) p = os.popen(cmdline, "r") line = p.readline() if line == "": log.error('Expected error "%s" not logged in %s' % (expected, topology_st.standalone.errlog)) assert False else: log.debug('line: %s' % line) log.info('Expected error "%s" logged in %s' % (expected, topology_st.standalone.errlog)) log.info("Ticket 47431 - 1: done") @pytest.mark.ds47431 def test_multiple_value(topology_st, enable_plugin): """Check two values belonging to one arg is fixed :id: 20c802bc-332f-4e8d-bcfb-8cd28123d695 :setup: Standalone instance, enable 7bit plugin :steps: 1. Modify the entry for cn=7-bit check,cn=plugins,cn=config as : nsslapd-pluginarg0 : uid nsslapd-pluginarg0 : mail nsslapd-pluginarg1 : userpassword nsslapd-pluginarg2 : , nsslapd-pluginarg3 : dc=example,dc=com nsslapd-pluginarg4 : None (Note : While modifying add two attributes entries for nsslapd-pluginarg0) 2. Check two values belonging to one arg is fixed :expectedresults: 1. Entries should be modified successfully 2. Operation should be successful """ log.info("Ticket 47431 - 2: Check two values belonging to one arg is fixed...") topology_st.standalone.modify_s(DN_7BITPLUGIN, [(ldap.MOD_REPLACE, 'nsslapd-pluginarg0', b"uid"), (ldap.MOD_ADD, 'nsslapd-pluginarg0', b"mail"), (ldap.MOD_REPLACE, 'nsslapd-pluginarg1', b"userpassword"), (ldap.MOD_REPLACE, 'nsslapd-pluginarg2', b","), (ldap.MOD_REPLACE, 'nsslapd-pluginarg3', ensure_bytes(SUFFIX)), (ldap.MOD_DELETE, 'nsslapd-pluginarg4', None)]) # PLUGIN LOG LEVEL topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', b'65536')]) topology_st.standalone.restart() cmdline = 'egrep -i %s %s' % ("NS7bitAttr_Init", topology_st.standalone.errlog) p = os.popen(cmdline, "r") i = 0 while ATTRS[i]: line = p.readline() log.debug('line - %s' % line) log.debug('ATTRS[%d] %s' % (i, ATTRS[i])) if line == "": break elif line.find(ATTRS[i]) >= 0: log.debug('%s was logged' % ATTRS[i]) else: log.error('%s was not logged.' % ATTRS[i]) assert False i = i + 1 log.info("Ticket 47431 - 2: done") @pytest.mark.ds47431 def test_missing_args(topology_st, enable_plugin): """Check missing args are fixed :id: b2814399-7ed2-4fe0-981d-b0bdbbe31cfb :setup: Standalone instance, enable 7bit plugin :steps: 1. Modify the entry for cn=7-bit check,cn=plugins,cn=config as : nsslapd-pluginarg0 : None nsslapd-pluginarg1 : uid nsslapd-pluginarg2 : None nsslapd-pluginarg3 : mail nsslapd-pluginarg5 : userpassword nsslapd-pluginarg7 : , nsslapd-pluginarg9 : dc=example,dc=com (Note: While modifying add 2 entries as None) 2. Change the nsslapd-errorlog-level to 65536 3. Check missing agrs are fixed :expectedresults: 1. Entries should be modified successfully 2. Operation should be successful 3. Operation should be successful """ log.info("Ticket 47431 - 3: Check missing args are fixed...") topology_st.standalone.modify_s(DN_7BITPLUGIN, [(ldap.MOD_DELETE, 'nsslapd-pluginarg0', None), (ldap.MOD_REPLACE, 'nsslapd-pluginarg1', b"uid"), (ldap.MOD_DELETE, 'nsslapd-pluginarg2', None), (ldap.MOD_REPLACE, 'nsslapd-pluginarg3', b"mail"), (ldap.MOD_REPLACE, 'nsslapd-pluginarg5', b"userpassword"), (ldap.MOD_REPLACE, 'nsslapd-pluginarg7', b","), (ldap.MOD_REPLACE, 'nsslapd-pluginarg9', ensure_bytes(SUFFIX))]) # PLUGIN LOG LEVEL topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', b'65536')]) topology_st.standalone.stop() os.system('mv %s %s.47431' % (topology_st.standalone.errlog, topology_st.standalone.errlog)) os.system('touch %s' % (topology_st.standalone.errlog)) topology_st.standalone.start() cmdline = 'egrep -i %s %s' % ("NS7bitAttr_Init", topology_st.standalone.errlog) p = os.popen(cmdline, "r") i = 0 while ATTRS[i]: line = p.readline() if line == "": break elif line.find(ATTRS[i]) >= 0: log.debug('%s was logged' % ATTRS[i]) else: log.error('%s was not logged.' % ATTRS[i]) assert False i = i + 1 log.info("Ticket 47431 - 3: done") log.info('Test complete') if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/plugins/attruniq_test.py000066400000000000000000000056711421664411400273220ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2021 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import pytest import ldap import logging from lib389.plugins import AttributeUniquenessPlugin from lib389.idm.user import UserAccounts from lib389.idm.group import Groups from lib389._constants import DEFAULT_SUFFIX from lib389.topologies import topology_st pytestmark = pytest.mark.tier1 logging.getLogger(__name__).setLevel(logging.DEBUG) log = logging.getLogger(__name__) MAIL_ATTR_VALUE = 'non-uniq@value.net' def test_modrdn_attr_uniqueness(topology_st): """Test that we can not add two entries that have the same attr value that is defined by the plugin :id: dd763830-78b8-452e-888d-1d83d2e623f1 :setup: Standalone instance :steps: 1. Create two groups 2. Setup PLUGIN_ATTR_UNIQUENESS plugin for 'mail' attribute for the group2 3. Enable PLUGIN_ATTR_UNIQUENESS plugin as "ON" 4. Add two test users at group1 and add not uniq 'mail' attribute to each of them 5. Move user1 to group2 6. Move user2 to group2 7. Move user2 back to group1 :expectedresults: 1. Success 2. Success 3. Success 4. Success 5. Success 6. Modrdn operation should FAIL 7. Success """ log.debug('Create two groups') groups = Groups(topology_st.standalone, DEFAULT_SUFFIX) group1 = groups.create(properties={'cn': 'group1'}) group2 = groups.create(properties={'cn': 'group2'}) attruniq = AttributeUniquenessPlugin(topology_st.standalone, dn="cn=attruniq,cn=plugins,cn=config") log.debug(f'Setup PLUGIN_ATTR_UNIQUENESS plugin for {MAIL_ATTR_VALUE} attribute for the group2') attruniq.create(properties={'cn': 'attruniq'}) attruniq.add_unique_attribute('mail') attruniq.add_unique_subtree(group2.dn) attruniq.enable_all_subtrees() log.debug(f'Enable PLUGIN_ATTR_UNIQUENESS plugin as "ON"') attruniq.enable() topology_st.standalone.restart() log.debug(f'Add two test users at group1 and add not uniq {MAIL_ATTR_VALUE} attribute to each of them') users = UserAccounts(topology_st.standalone, basedn=group1.dn, rdn=None) user1 = users.create_test_user(1) user2 = users.create_test_user(2) user1.add('mail', MAIL_ATTR_VALUE) user2.add('mail', MAIL_ATTR_VALUE) log.debug('Move user1 to group2') user1.rename(f'uid={user1.rdn}', group2.dn) log.debug('Move user2 to group2') with pytest.raises(ldap.CONSTRAINT_VIOLATION) as excinfo: user2.rename(f'uid={user2.rdn}', group2.dn) log.fatal(f'Failed: Attribute "mail" with {MAIL_ATTR_VALUE} is accepted') assert 'attribute value already exist' in str(excinfo.value) log.debug(excinfo.value) log.debug('Move user2 to group1') user2.rename(f'uid={user2.rdn}', group1.dn)389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/plugins/cos_test.py000066400000000000000000000207331421664411400262330ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2016 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # ''' Created on Nov 27th, 2018 @author: tbordaz ''' import logging import subprocess import pytest from lib389 import Entry from lib389.utils import * from lib389.plugins import * from lib389._constants import * from lib389.topologies import topology_st as topo pytestmark = pytest.mark.tier1 def add_user(server, uid, testbase, locality=None, tel=None, title=None): dn = 'uid=%s,%s' % (uid, testbase) log.fatal('Adding user (%s): ' % dn) server.add_s(Entry((dn, {'objectclass': ['top', 'person', 'organizationalPerson', 'inetOrgPerson'], 'cn': 'user_%s' % uid, 'sn': 'user_%s' % uid, 'uid': uid, 'l': locality, 'title': title, 'telephoneNumber': tel, 'description': 'description real'}))) @pytest.mark.ds50053 def test_cos_operational_default(topo): """operational-default cosAttribute should not overwrite an existing value :id: 12fadff9-e14a-4c64-a3ee-51152cb8fcfb :setup: Standalone Instance :steps: 1. Create a user entry with attribute 'l' and 'telephonenumber' (real attribute with real value) 2. Create cos that defines 'l' as operational-default (virt. attr. with value != real value) 3. Create cos that defines 'telephone' as default (virt. attr. with value != real value) 4. Check that telephone is retrieved with real value 5. Check that 'l' is retrieved with real value :expectedresults: 1. should succeed 2. should succeed 3. should succeed """ REAL = 'real' VIRTUAL = 'virtual' TEL_REAL = '1234 is %s' % REAL TEL_VIRT = '4321 is %s' % VIRTUAL LOC_REAL = 'here is %s' % REAL LOC_VIRT = 'there is %s' % VIRTUAL TITLE_REAL = 'title is %s' % REAL inst = topo[0] PEOPLE = 'ou=people,%s' % SUFFIX add_user(inst, 'user_0', PEOPLE, locality=LOC_REAL, tel=TEL_REAL, title=TITLE_REAL) # locality cos operational-default LOC_COS_TEMPLATE = "cn=locality_template,%s" % PEOPLE LOC_COS_DEFINITION = "cn=locality_definition,%s" % PEOPLE inst.add_s(Entry((LOC_COS_TEMPLATE, { 'objectclass': ['top', 'extensibleObject', 'costemplate', 'ldapsubentry'], 'l': LOC_VIRT}))) inst.add_s(Entry((LOC_COS_DEFINITION, { 'objectclass': ['top', 'LdapSubEntry', 'cosSuperDefinition', 'cosPointerDefinition'], 'cosTemplateDn': LOC_COS_TEMPLATE, 'cosAttribute': 'l operational-default'}))) # telephone cos default TEL_COS_TEMPLATE = "cn=telephone_template,%s" % PEOPLE TEL_COS_DEFINITION = "cn=telephone_definition,%s" % PEOPLE inst.add_s(Entry((TEL_COS_TEMPLATE, { 'objectclass': ['top', 'extensibleObject', 'costemplate', 'ldapsubentry'], 'telephonenumber': TEL_VIRT}))) inst.add_s(Entry((TEL_COS_DEFINITION, { 'objectclass': ['top', 'LdapSubEntry', 'cosSuperDefinition', 'cosPointerDefinition'], 'cosTemplateDn': TEL_COS_TEMPLATE, 'cosAttribute': 'telephonenumber default'}))) # seeAlso cos operational SEEALSO_VIRT = "dc=%s,dc=example,dc=com" % VIRTUAL SEEALSO_COS_TEMPLATE = "cn=seealso_template,%s" % PEOPLE SEEALSO_COS_DEFINITION = "cn=seealso_definition,%s" % PEOPLE inst.add_s(Entry((SEEALSO_COS_TEMPLATE, { 'objectclass': ['top', 'extensibleObject', 'costemplate', 'ldapsubentry'], 'seealso': SEEALSO_VIRT}))) inst.add_s(Entry((SEEALSO_COS_DEFINITION, { 'objectclass': ['top', 'LdapSubEntry', 'cosSuperDefinition', 'cosPointerDefinition'], 'cosTemplateDn': SEEALSO_COS_TEMPLATE, 'cosAttribute': 'seealso operational'}))) # description cos override DESC_VIRT = "desc is %s" % VIRTUAL DESC_COS_TEMPLATE = "cn=desc_template,%s" % PEOPLE DESC_COS_DEFINITION = "cn=desc_definition,%s" % PEOPLE inst.add_s(Entry((DESC_COS_TEMPLATE, { 'objectclass': ['top', 'extensibleObject', 'costemplate', 'ldapsubentry'], 'description': DESC_VIRT}))) inst.add_s(Entry((DESC_COS_DEFINITION, { 'objectclass': ['top', 'LdapSubEntry', 'cosSuperDefinition', 'cosPointerDefinition'], 'cosTemplateDn': DESC_COS_TEMPLATE, 'cosAttribute': 'description override'}))) # title cos override TITLE_VIRT = [] for i in range(2): TITLE_VIRT.append("title is %s %d" % (VIRTUAL, i)) TITLE_COS_TEMPLATE = "cn=title_template,%s" % PEOPLE TITLE_COS_DEFINITION = "cn=title_definition,%s" % PEOPLE inst.add_s(Entry((TITLE_COS_TEMPLATE, { 'objectclass': ['top', 'extensibleObject', 'costemplate', 'ldapsubentry'], 'title': TITLE_VIRT}))) inst.add_s(Entry((TITLE_COS_DEFINITION, { 'objectclass': ['top', 'LdapSubEntry', 'cosSuperDefinition', 'cosPointerDefinition'], 'cosTemplateDn': TITLE_COS_TEMPLATE, 'cosAttribute': 'title merge-schemes'}))) # note that the search requests both attributes (it is required for operational*) ents = inst.search_s(SUFFIX, ldap.SCOPE_SUBTREE, "uid=user_0", ["telephonenumber", "l"]) assert len(ents) == 1 ent = ents[0] # Check telephonenumber (specifier default) with real value => real assert ent.hasAttr('telephonenumber') value = ent.getValue('telephonenumber') log.info('Returned telephonenumber (exp. real): %s' % value) log.info('Returned telephonenumber: %d' % value.find(REAL.encode())) assert value.find(REAL.encode()) != -1 # Check 'locality' (specifier operational-default) with real value => real assert ent.hasAttr('l') value = ent.getValue('l') log.info('Returned l (exp. real): %s ' % value) log.info('Returned l: %d' % value.find(REAL.encode())) assert value.find(REAL.encode()) != -1 # Check 'seealso' (specifier operational) without real value => virtual assert not ent.hasAttr('seealso') ents = inst.search_s(SUFFIX, ldap.SCOPE_SUBTREE, "uid=user_0", ["seealso"]) assert len(ents) == 1 ent = ents[0] value = ent.getValue('seealso') log.info('Returned seealso (exp. virtual): %s' % value) log.info('Returned seealso: %d' % value.find(VIRTUAL.encode())) assert value.find(VIRTUAL.encode()) != -1 # Check 'description' (specifier override) with real value => virtual assert not ent.hasAttr('description') ents = inst.search_s(SUFFIX, ldap.SCOPE_SUBTREE, "uid=user_0") assert len(ents) == 1 ent = ents[0] value = ent.getValue('description') log.info('Returned description (exp. virtual): %s' % value) log.info('Returned description: %d' % value.find(VIRTUAL.encode())) assert value.find(VIRTUAL.encode()) != -1 # Check 'title' (specifier merge-schemes) with real value => real value returned ents = inst.search_s(SUFFIX, ldap.SCOPE_SUBTREE, "uid=user_0") assert len(ents) == 1 ent = ents[0] found_real = False found_virtual = False for value in ent.getValues('title'): log.info('Returned title (exp. real): %s' % value) if value.find(VIRTUAL.encode()) != -1: found_virtual = True if value.find(REAL.encode()) != -1: found_real = True assert not found_virtual assert found_real # Check 'title ((specifier merge-schemes) without real value => real value returned ents = inst.search_s(SUFFIX, ldap.SCOPE_SUBTREE, "uid=user_0") assert len(ents) == 1 inst.modify_s(ents[0].dn,[(ldap.MOD_DELETE, 'title', None)]) inst.restart() ents = inst.search_s(SUFFIX, ldap.SCOPE_SUBTREE, "uid=user_0") assert len(ents) == 1 ent = ents[0] found_real = False found_virtual = False count = 0 for value in ent.getValues('title'): log.info('Returned title(exp. virt): %s' % value) count = count + 1 if value.find(VIRTUAL.encode()) != -1: found_virtual = True if value.find(REAL.encode()) != -1: found_real = True assert not found_real assert found_virtual assert count == 2 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/plugins/deref_aci_test.py000066400000000000000000000113711421664411400273460ustar00rootroot00000000000000import os import logging import pytest import ldap from lib389._constants import DEFAULT_SUFFIX, PASSWORD from lib389.idm.organizationalunit import OrganizationalUnits from lib389.idm.user import UserAccounts, TEST_USER_PROPERTIES from lib389.idm.group import Groups from lib389.topologies import topology_st as topo pytestmark = pytest.mark.tier1 DEBUGGING = os.getenv("DEBUGGING", default=None) if DEBUGGING: logging.getLogger(__name__).setLevel(logging.DEBUG) else: logging.getLogger(__name__).setLevel(logging.INFO) log = logging.getLogger(__name__) ACCTS_DN = "ou=accounts,dc=example,dc=com" USERS_DN = "ou=users,ou=accounts,dc=example,dc=com" GROUPS_DN = "ou=groups,ou=accounts,dc=example,dc=com" ADMIN_GROUP_DN = "cn=admins,ou=groups,ou=accounts,dc=example,dc=com" ADMIN_DN = "uid=admin,ou=users,ou=accounts,dc=example,dc=com" ACCTS_ACI = ('(targetattr="userPassword")(version 3.0; acl "allow password ' + 'search"; allow(search) userdn = "ldap:///all";)') USERS_ACI = ('(targetattr = "cn || createtimestamp || description || displayname || entryusn || gecos ' + '|| gidnumber || givenname || homedirectory || initials || ' + 'loginshell || manager || modifytimestamp || objectclass || sn || title || uid || uidnumber")' + '(targetfilter = "(objectclass=posixaccount)")' + '(version 3.0;acl "Read Attributes";allow (compare,read,search) userdn = "ldap:///anyone";)') GROUPS_ACIS = [ ( '(targetattr = "businesscategory || cn || createtimestamp || description |' + '| entryusn || gidnumber || mepmanagedby || modifytimestamp || o || objectclass || ou || own' + 'er || seealso")(targetfilter = "(objectclass=posixgroup)")(version 3.0;acl' + '"permission:System: Read Groups";allow (compare,re' + 'ad,search) userdn = "ldap:///anyone";)' ), ( '(targetattr = "member || memberof || memberuid")(targetfilter = '+ '"(objectclass=posixgroup)")(version 3.0;acl' + '"permission:System: Read Group Membership";allow (compare,read' + ',search) userdn = "ldap:///all";)' ) ] def test_deref_and_access_control(topo): """Test that the deref plugin honors access control rules correctly The setup mimics a generic IPA DIT with its ACI's. The userpassword attribute should not be returned :id: bedb6af2-b765-479d-808c-df0348e0ec95 :setup: Standalone Instance :steps: 1. Create container entries with aci's 2. Perform deref search and make sure userpassword is not returned :expectedresults: 1. Success 2. Success """ topo.standalone.config.set('nsslapd-schemacheck', 'off') if DEBUGGING: topo.standalone.config.enable_log('audit') topo.standalone.config.set('nsslapd-errorlog-level', '128') # Accounts ou1 = OrganizationalUnits(topo.standalone, DEFAULT_SUFFIX) ou1.create(properties={ 'ou': 'accounts', 'aci': ACCTS_ACI }) # Users ou2 = OrganizationalUnits(topo.standalone, ACCTS_DN) ou2.create(properties={ 'ou': 'users', 'aci': USERS_ACI }) # Groups ou3 = OrganizationalUnits(topo.standalone, ACCTS_DN) ou3.create(properties={ 'ou': 'groups', 'aci': GROUPS_ACIS }) # Create User users = UserAccounts(topo.standalone, USERS_DN, rdn=None) user_props = TEST_USER_PROPERTIES.copy() user_props.update( { 'uid': 'user', 'objectclass': ['posixAccount', 'extensibleObject'], 'userpassword': PASSWORD } ) user = users.create(properties=user_props) # Create Admin user user_props = TEST_USER_PROPERTIES.copy() user_props.update( { 'uid': 'admin', 'objectclass': ['posixAccount', 'extensibleObject', 'inetuser'], 'userpassword': PASSWORD, 'memberOf': ADMIN_GROUP_DN } ) users.create(properties=user_props) # Create Admin group groups = Groups(topo.standalone, GROUPS_DN, rdn=None) group_props = { 'cn': 'admins', 'gidNumber': '123', 'objectclass': ['posixGroup', 'extensibleObject'], 'member': ADMIN_DN } groups.create(properties=group_props) # Bind as user, then perform deref search on admin user user.rebind(PASSWORD) result, control_response = topo.standalone.dereference( 'member:cn,userpassword', base=ADMIN_GROUP_DN, scope=ldap.SCOPE_BASE) log.info('Check, that the dereference search result does not have userpassword') assert result[0][2][0].entry[0]['attrVals'][0]['type'] != 'userpassword' if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main(["-s", CURRENT_FILE]) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/plugins/dna_interval_test.py000066400000000000000000000052341421664411400301140ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2021 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # """Test DNA plugin functionality""" import logging import pytest from lib389._constants import DEFAULT_SUFFIX from lib389.plugins import DNAPlugin, DNAPluginConfigs from lib389.idm.organizationalunit import OrganizationalUnits from lib389.idm.user import UserAccounts from lib389.topologies import topology_st pytestmark = pytest.mark.tier1 log = logging.getLogger(__name__) def test_dna_interval(topology_st): """Test the dna interval works :id: 3982d698-e16b-4945-9eb4-eecaa4bac5f7 :setup: Standalone Instance :steps: 1. Set DNAZZ interval to 10 2. Create user that trigger DNA to assign a value 3. Verify DNA is working 4. Make update to entry that triggers DNA again 5. Verify interval is applied as expected :expectedresults: 1. Success 2. Success 3. Success 4. Success 5. Success """ inst = topology_st.standalone plugin = DNAPlugin(inst) ous = OrganizationalUnits(inst, DEFAULT_SUFFIX) ou_people = ous.get("People") log.info("Add dna plugin config entry...") configs = DNAPluginConfigs(inst, plugin.dn) configs.create(properties={'cn': 'dna config', 'dnaType': 'uidNumber', 'dnaMaxValue': '1000', 'dnaMagicRegen': '-1', 'dnaFilter': '(objectclass=top)', 'dnaScope': ou_people.dn, 'dnaNextValue': '10', 'dnaInterval': '10'}) log.info("Enable the DNA plugin and restart...") plugin.enable() inst.restart() # Create user and check interval log.info("Test DNA is working...") users = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX) log.info('Adding user1') user = users.create(properties={ 'sn': 'interval', 'cn': 'interval', 'uid': 'interval', 'uidNumber': '-1', # Magic regen value 'gidNumber': '111', 'givenname': 'interval', 'homePhone': '0861234567', 'carLicense': '131D16674', 'mail': 'interval@whereever.com', 'homeDirectory': '/home/interval'}) # Verify DNA works assert user.get_attr_val_utf8_l('uidNumber') == '10' # Make update and verify interval was applied log.info("Test DNA interval assignment is working...") user.replace('uidNumber', '-1') assert user.get_attr_val_utf8_l('uidNumber') == '20' 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/plugins/dna_test.py000066400000000000000000000055761421664411400262210ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2019 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # """Test DNA plugin functionality""" import logging import pytest from lib389._constants import DEFAULT_SUFFIX from lib389.plugins import DNAPlugin, DNAPluginSharedConfigs, DNAPluginConfigs from lib389.idm.organizationalunit import OrganizationalUnits from lib389.idm.user import UserAccounts from lib389.topologies import topology_st import ldap pytestmark = pytest.mark.tier1 log = logging.getLogger(__name__) @pytest.mark.ds47937 def test_dnatype_only_valid(topology_st): """Test that DNA plugin only accepts valid attributes for "dnaType" :id: 0878ecff-5fdc-47d7-8c8f-edf4556f9746 :setup: Standalone Instance :steps: 1. Create a use entry 2. Create DNA shared config entry container 3. Create DNA shared config entry 4. Add DNA plugin config entry 5. Enable DNA plugin 6. Restart the instance 7. Replace dnaType with invalid value :expectedresults: 1. Successful 2. Successful 3. Successful 4. Successful 5. Successful 6. Successful 7. Unwilling to perform exception should be raised """ inst = topology_st.standalone plugin = DNAPlugin(inst) log.info("Creating an entry...") users = UserAccounts(inst, DEFAULT_SUFFIX) users.create_test_user(uid=1) log.info("Creating \"ou=ranges\"...") ous = OrganizationalUnits(inst, DEFAULT_SUFFIX) ou_ranges = ous.create(properties={'ou': 'ranges'}) ou_people = ous.get("People") log.info("Creating DNA shared config entry...") shared_configs = DNAPluginSharedConfigs(inst, ou_ranges.dn) shared_configs.create(properties={'dnaHostname': str(inst.host), 'dnaPortNum': str(inst.port), 'dnaRemainingValues': '9501'}) log.info("Add dna plugin config entry...") configs = DNAPluginConfigs(inst, plugin.dn) config = configs.create(properties={'cn': 'dna config', 'dnaType': 'description', 'dnaMaxValue': '10000', 'dnaMagicRegen': '0', 'dnaFilter': '(objectclass=top)', 'dnaScope': ou_people.dn, 'dnaNextValue': '500', 'dnaSharedCfgDN': ou_ranges.dn}) log.info("Enable the DNA plugin...") plugin.enable() log.info("Restarting the server...") inst.restart() log.info("Apply an invalid attribute to the DNA config(dnaType: foo)...") with pytest.raises(ldap.UNWILLING_TO_PERFORM): config.replace('dnaType', 'foo') 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/plugins/entryusn_test.py000066400000000000000000000165231421664411400273400ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2020 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import os import ldap import logging import pytest import time from lib389._constants import DEFAULT_SUFFIX from lib389.config import Config from lib389.plugins import USNPlugin, MemberOfPlugin from lib389.idm.group import Groups from lib389.idm.user import UserAccounts from lib389.idm.organizationalunit import OrganizationalUnit from lib389.tombstone import Tombstones from lib389.rootdse import RootDSE from lib389.topologies import topology_st, topology_m2 pytestmark = pytest.mark.tier1 log = logging.getLogger(__name__) USER_NUM = 10 GROUP_NUM = 3 def check_entryusn_no_duplicates(entryusn_list): """Check that all values in the list are unique""" if len(entryusn_list) > len(set(entryusn_list)): raise AssertionError(f"EntryUSN values have duplicates, please, check logs") def check_lastusn_after_restart(inst): """Check that last usn is the same after restart""" root_dse = RootDSE(inst) last_usn_before = root_dse.get_attr_val_int("lastusn;userroot") inst.restart() last_usn_after = root_dse.get_attr_val_int("lastusn;userroot") assert last_usn_after == last_usn_before @pytest.fixture(scope="module") def setup(topology_st, request): """ Enable USN plug-in Enable MEMBEROF plugin Add test entries """ inst = topology_st.standalone log.info("Enable the USN plugin...") plugin = USNPlugin(inst) plugin.enable() log.info("Enable the MEMBEROF plugin...") plugin = MemberOfPlugin(inst) plugin.enable() inst.restart() users_list = [] log.info("Adding test entries...") users = UserAccounts(inst, DEFAULT_SUFFIX) for id in range(USER_NUM): user = users.create_test_user(uid=id) users_list.append(user) groups_list = [] log.info("Adding test groups...") groups = Groups(inst, DEFAULT_SUFFIX) for id in range(GROUP_NUM): group = groups.create(properties={'cn': f'test_group{id}'}) groups_list.append(group) def fin(): for user in users_list: try: user.delete() except ldap.NO_SUCH_OBJECT: pass for group in groups_list: try: group.delete() except ldap.NO_SUCH_OBJECT: pass request.addfinalizer(fin) return {"users": users_list, "groups": groups_list} def test_entryusn_no_duplicates(topology_st, setup): """Verify that entryUSN is not duplicated after memberOf operation :id: 1a7d382d-1214-4d56-b9c2-9c4ed57d1683 :setup: Standalone instance, Groups and Users, USN and memberOf are enabled :steps: 1. Add a member to group 1 2. Add a member to group 1 and 2 3. Check that entryUSNs are different 4. Check that lastusn before and after a restart are the same :expectedresults: 1. Success 2. Success 3. Success 4. Success """ inst = topology_st.standalone config = Config(inst) config.replace('nsslapd-accesslog-level', '260') # Internal op config.replace('nsslapd-errorlog-level', '65536') config.replace('nsslapd-plugin-logging', 'on') entryusn_list = [] users = setup["users"] groups = setup["groups"] groups[0].replace('member', users[0].dn) entryusn_list.append(users[0].get_attr_val_int('entryusn')) log.info(f"{users[0].dn}_1: {entryusn_list[-1:]}") entryusn_list.append(groups[0].get_attr_val_int('entryusn')) log.info(f"{groups[0].dn}_1: {entryusn_list[-1:]}") check_entryusn_no_duplicates(entryusn_list) groups[1].replace('member', [users[0].dn, users[1].dn]) entryusn_list.append(users[0].get_attr_val_int('entryusn')) log.info(f"{users[0].dn}_2: {entryusn_list[-1:]}") entryusn_list.append(users[1].get_attr_val_int('entryusn')) log.info(f"{users[1].dn}_2: {entryusn_list[-1:]}") entryusn_list.append(groups[1].get_attr_val_int('entryusn')) log.info(f"{groups[1].dn}_2: {entryusn_list[-1:]}") check_entryusn_no_duplicates(entryusn_list) check_lastusn_after_restart(inst) def test_entryusn_is_same_after_failure(topology_st, setup): """Verify that entryUSN is the same after failed operation :id: 1f227533-370a-48c1-b920-9b3b0bcfc32e :setup: Standalone instance, Groups and Users, USN and memberOf are enabled :steps: 1. Get current group's entryUSN value 2. Try to modify the group with an invalid syntax 3. Get new group's entryUSN value and compare with old 4. Check that lastusn before and after a restart are the same :expectedresults: 1. Success 2. Invalid Syntax error 3. Should be the same 4. Success """ inst = topology_st.standalone users = setup["users"] # We need this update so we get the latest USN pointed to our entry users[0].replace('description', 'update') entryusn_before = users[0].get_attr_val_int('entryusn') users[0].replace('description', 'update') try: users[0].replace('uid', 'invalid update') except ldap.NOT_ALLOWED_ON_RDN: pass users[0].replace('description', 'second update') entryusn_after = users[0].get_attr_val_int('entryusn') # entryUSN should be OLD + 2 (only two user updates) assert entryusn_after == (entryusn_before + 2) check_lastusn_after_restart(inst) def test_entryusn_after_repl_delete(topology_m2): """Verify that entryUSN is incremented on 1 after delete operation which creates a tombstone :id: 1704cf65-41bc-4347-bdaf-20fc2431b218 :setup: An instance with replication, Users, USN enabled :steps: 1. Try to delete a user 2. Check the tombstone has the incremented USN 3. Try to delete ou=People with users 4. Check the entry has a not incremented entryUSN :expectedresults: 1. Success 2. Success 3. Should fail with Not Allowed On Non-leaf error 4. Success """ inst = topology_m2.ms["supplier1"] plugin = USNPlugin(inst) plugin.enable() inst.restart() users = UserAccounts(inst, DEFAULT_SUFFIX) try: user_1 = users.create_test_user() user_rdn = user_1.rdn tombstones = Tombstones(inst, DEFAULT_SUFFIX) user_1.replace('description', 'update_ts') user_usn = user_1.get_attr_val_int('entryusn') user_1.delete() time.sleep(1) # Gives a little time for tombstone creation to complete ts = tombstones.get(user_rdn) ts_usn = ts.get_attr_val_int('entryusn') assert (user_usn + 1) == ts_usn user_1 = users.create_test_user() org = OrganizationalUnit(inst, f"ou=People,{DEFAULT_SUFFIX}") org.replace('description', 'update_ts') ou_usn_before = org.get_attr_val_int('entryusn') try: org.delete() except ldap.NOT_ALLOWED_ON_NONLEAF: pass ou_usn_after = org.get_attr_val_int('entryusn') assert ou_usn_before == ou_usn_after finally: try: user_1.delete() except ldap.NO_SUCH_OBJECT: pass if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/plugins/managed_entry_test.py000066400000000000000000000412011421664411400302550ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2020 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import pytest import time from lib389.topologies import topology_st as topo from lib389.idm.user import UserAccounts from lib389.idm.account import Account from lib389._constants import DEFAULT_SUFFIX from lib389.idm.group import Groups from lib389.config import Config from lib389.idm.organizationalunit import OrganizationalUnits, OrganizationalUnit from lib389.plugins import MEPTemplates, MEPConfigs, ManagedEntriesPlugin, MEPTemplate from lib389.idm.nscontainer import nsContainers from lib389.idm.domain import Domain import ldap pytestmark = pytest.mark.tier1 USER_PASSWORD = 'password' @pytest.fixture(scope="module") def _create_inital(topo): """ Will create entries for this module """ meps = MEPTemplates(topo.standalone, DEFAULT_SUFFIX) mep_template1 = meps.create( properties={'cn': 'UPG Template', 'mepRDNAttr': 'cn', 'mepStaticAttr': 'objectclass: posixGroup', 'mepMappedAttr': 'cn: $uid|gidNumber: $gidNumber|description: User private group for $uid'.split( '|')}) conf_mep = MEPConfigs(topo.standalone) conf_mep.create(properties={'cn': 'UPG Definition1', 'originScope': f'cn=Users,{DEFAULT_SUFFIX}', 'originFilter': 'objectclass=posixaccount', 'managedBase': f'cn=Groups,{DEFAULT_SUFFIX}', 'managedTemplate': mep_template1.dn}) container = nsContainers(topo.standalone, DEFAULT_SUFFIX) for cn in ['Users', 'Groups']: container.create(properties={'cn': cn}) def test_binddn_tracking(topo, _create_inital): """Test Managed Entries basic functionality :id: ea2ddfd4-aaec-11ea-8416-8c16451d917b :setup: Standalone Instance :steps: 1. Set nsslapd-plugin-binddn-tracking attribute under cn=config 2. Add user 3. Managed Entry Plugin runs against managed entries upon any update without validating 4. verify creation of User Private Group with its time stamp value 5. Modify the SN attribute which is not mapped with managed entry 6. run ModRDN operation and check the User Private group 7. Check the time stamp of UPG should be changed now 8. Check the creatorsname should be user dn and internalCreatorsname should be plugin name 9. Check if a managed group entry was created :expected results: 1. Success 2. Success 3. Success 4. Success 5. Success 6. Success 7. Success 8. Success 9. Success """ config = Config(topo.standalone) # set nsslapd-plugin-binddn-tracking attribute under cn=config config.replace('nsslapd-plugin-binddn-tracking', 'on') # Add user user = UserAccounts(topo.standalone, f'cn=Users,{DEFAULT_SUFFIX}', rdn=None).create_test_user() assert user.get_attr_val_utf8('mepManagedEntry') == f'cn=test_user_1000,cn=Groups,{DEFAULT_SUFFIX}' entry = Account(topo.standalone, f'cn=test_user_1000,cn=Groups,{DEFAULT_SUFFIX}') # Managed Entry Plugin runs against managed entries upon any update without validating # verify creation of User Private Group with its time stamp value stamp1 = entry.get_attr_val_utf8('modifyTimestamp') user.replace('sn', 'NewSN_modified') stamp2 = entry.get_attr_val_utf8('modifyTimestamp') # Modify the SN attribute which is not mapped with managed entry # Check the time stamp of UPG should not be changed assert stamp1 == stamp2 time.sleep(1) # run ModRDN operation and check the User Private group user.rename(new_rdn='uid=UserNewRDN', newsuperior='cn=Users,dc=example,dc=com') assert user.get_attr_val_utf8('mepManagedEntry') == f'cn=UserNewRDN,cn=Groups,{DEFAULT_SUFFIX}' entry = Account(topo.standalone, f'cn=UserNewRDN,cn=Groups,{DEFAULT_SUFFIX}') stamp3 = entry.get_attr_val_utf8('modifyTimestamp') # Check the time stamp of UPG should be changed now assert stamp2 != stamp3 time.sleep(1) user.replace('gidNumber', '1') stamp4 = entry.get_attr_val_utf8('modifyTimestamp') assert stamp4 != stamp3 # Check the creatorsname should be user dn and internalCreatorsname should be plugin name assert entry.get_attr_val_utf8('creatorsname') == 'cn=directory manager' assert entry.get_attr_val_utf8('internalCreatorsname') == 'cn=Managed Entries,cn=plugins,cn=config' assert entry.get_attr_val_utf8('modifiersname') == 'cn=directory manager' user.delete() config.replace('nsslapd-plugin-binddn-tracking', 'off') class WithObjectClass(Account): def __init__(self, instance, dn=None): super(WithObjectClass, self).__init__(instance, dn) self._rdn_attribute = 'uid' self._create_objectclasses = ['top', 'person', 'inetorgperson'] def test_mentry01(topo, _create_inital): """Test Managed Entries basic functionality :id: 9b87493b-0493-46f9-8364-6099d0e5d806 :setup: Standalone Instance :steps: 1. Check the plug-in status 2. Add Template and definition entry 3. Add our org units 4. Add users with PosixAccount ObjectClass and verify creation of User Private Group 5. Disable the plug-in and check the status 6. Enable the plug-in and check the status the plug-in is disabled and creation of UPG should fail 7. Add users with PosixAccount ObjectClass and verify creation of User Private Group 8. Add users, run ModRDN operation and check the User Private group 9. Add users, run LDAPMODIFY to change the gidNumber and check the User Private group 10. Checking whether creation of User Private group fails for existing group entry 11. Checking whether adding of posixAccount objectClass to existing user creates UPG 12. Running ModRDN operation and checking the user private groups mepManagedBy attribute 13. Deleting mepManagedBy attribute and running ModRDN operation to check if it creates a new UPG 14. Change the RDN of template entry, DSA Unwilling to perform error expected 15. Change the RDN of cn=Users to cn=TestUsers and check UPG are deleted :expected results: 1. Success 2. Success 3. Success 4. Success 5. Success 6. Success 7. Success 8. Success 9. Success 10. Success 11. Success 12. Success 13. Success 14. Fail(Unwilling to perform ) 15. Success """ # Check the plug-in status mana = ManagedEntriesPlugin(topo.standalone) assert mana.status() # Add Template and definition entry org1 = OrganizationalUnits(topo.standalone, DEFAULT_SUFFIX).create(properties={'ou': 'Users'}) org2 = OrganizationalUnit(topo.standalone, f'ou=Groups,{DEFAULT_SUFFIX}') meps = MEPTemplates(topo.standalone, DEFAULT_SUFFIX) mep_template1 = meps.create(properties={ 'cn': 'UPG Template1', 'mepRDNAttr': 'cn', 'mepStaticAttr': 'objectclass: posixGroup', 'mepMappedAttr': 'cn: $uid|gidNumber: $gidNumber|description: User private group for $uid'.split('|')}) conf_mep = MEPConfigs(topo.standalone) conf_mep.create(properties={ 'cn': 'UPG Definition2', 'originScope': org1.dn, 'originFilter': 'objectclass=posixaccount', 'managedBase': org2.dn, 'managedTemplate': mep_template1.dn}) # Add users with PosixAccount ObjectClass and verify creation of User Private Group user = UserAccounts(topo.standalone, f'ou=Users,{DEFAULT_SUFFIX}', rdn=None).create_test_user() assert user.get_attr_val_utf8('mepManagedEntry') == f'cn=test_user_1000,ou=Groups,{DEFAULT_SUFFIX}' # Disable the plug-in and check the status mana.disable() user.delete() topo.standalone.restart() # Add users with PosixAccount ObjectClass when the plug-in is disabled and creation of UPG should fail user = UserAccounts(topo.standalone, f'ou=Users,{DEFAULT_SUFFIX}', rdn=None).create_test_user() assert not user.get_attr_val_utf8('mepManagedEntry') # Enable the plug-in and check the status mana.enable() user.delete() topo.standalone.restart() # Add users with PosixAccount ObjectClass and verify creation of User Private Group user = UserAccounts(topo.standalone, f'ou=Users,{DEFAULT_SUFFIX}', rdn=None).create_test_user() assert user.get_attr_val_utf8('mepManagedEntry') == f'cn=test_user_1000,ou=Groups,{DEFAULT_SUFFIX}' # Add users, run ModRDN operation and check the User Private group # Add users, run LDAPMODIFY to change the gidNumber and check the User Private group user.rename(new_rdn='uid=UserNewRDN', newsuperior='ou=Users,dc=example,dc=com') assert user.get_attr_val_utf8('mepManagedEntry') == f'cn=UserNewRDN,ou=Groups,{DEFAULT_SUFFIX}' user.replace('gidNumber', '20209') entry = Account(topo.standalone, f'cn=UserNewRDN,ou=Groups,{DEFAULT_SUFFIX}') assert entry.get_attr_val_utf8('gidNumber') == '20209' user.replace_many(('sn', 'new_modified_sn'), ('gidNumber', '31309')) assert entry.get_attr_val_utf8('gidNumber') == '31309' user.delete() # Checking whether creation of User Private group fails for existing group entry Groups(topo.standalone, f'ou=Groups,{DEFAULT_SUFFIX}', rdn=None).create(properties={'cn': 'MENTRY_14'}) user = UserAccounts(topo.standalone, f'ou=Users,{DEFAULT_SUFFIX}', rdn=None).create_test_user() with pytest.raises(ldap.NO_SUCH_OBJECT): entry.status() user.delete() # Checking whether adding of posixAccount objectClass to existing user creates UPG # Add Users without posixAccount objectClass users = WithObjectClass(topo.standalone, f'uid=test_test, ou=Users,{DEFAULT_SUFFIX}') user_properties1 = {'uid': 'test_test', 'cn': 'test', 'sn': 'test', 'mail': 'sasa@sasa.com', 'telephoneNumber': '123'} user = users.create(properties=user_properties1) assert not user.get_attr_val_utf8('mepManagedEntry') # Add posixAccount objectClass user.replace_many(('objectclass', ['top', 'person', 'inetorgperson', 'posixAccount']), ('homeDirectory', '/home/ok'), ('uidNumber', '61603'), ('gidNumber', '61603')) assert not user.get_attr_val_utf8('mepManagedEntry') user = UserAccounts(topo.standalone, f'ou=Users,{DEFAULT_SUFFIX}', rdn=None).create_test_user() entry = Account(topo.standalone, 'cn=test_user_1000,ou=Groups,dc=example,dc=com') # Add inetuser objectClass user.replace_many( ('objectclass', ['top', 'account', 'posixaccount', 'inetOrgPerson', 'organizationalPerson', 'nsMemberOf', 'nsAccount', 'person', 'mepOriginEntry', 'inetuser']), ('memberOf', entry.dn)) assert entry.status() user.delete() user = UserAccounts(topo.standalone, f'ou=Users,{DEFAULT_SUFFIX}', rdn=None).create_test_user() entry = Account(topo.standalone, 'cn=test_user_1000,ou=Groups,dc=example,dc=com') # Add groupofNames objectClass user.replace_many( ('objectclass', ['top', 'account', 'posixaccount', 'inetOrgPerson', 'organizationalPerson', 'nsMemberOf', 'nsAccount', 'person', 'mepOriginEntry', 'groupofNames']), ('memberOf', user.dn)) assert entry.status() # Running ModRDN operation and checking the user private groups mepManagedBy # attribute was also reset because the modrdn on the origin will do a modrdn # on checkManagedEntry to match the new rdn value of the origin entry checkManagedEntry = UserAccounts(topo.standalone, f'ou=Groups,{DEFAULT_SUFFIX}', rdn=None) check_entry = checkManagedEntry.create(properties={ 'objectclass': ['top', 'extensibleObject'], 'uid': 'CheckModRDN', 'uidNumber': '12', 'gidNumber': '12', 'homeDirectory': '/home', 'sn': 'tmp', 'cn': 'tmp', }) user.replace('mepManagedEntry', check_entry.dn) user.rename(new_rdn='uid=UserNewRDN', newsuperior='ou=Users,dc=example,dc=com') assert user.get_attr_val_utf8_l('mepManagedEntry') == f'cn=UserNewRDN,ou=Groups,{DEFAULT_SUFFIX}'.lower() # Deleting mepManagedBy attribute and running ModRDN operation to check if it creates a new UPG user.remove('mepManagedEntry', f'cn=UserNewRDN,ou=Groups,{DEFAULT_SUFFIX}') user.rename(new_rdn='uid=UserNewRDN1', newsuperior='ou=Users,dc=example,dc=com') assert user.get_attr_val_utf8('mepManagedEntry') == f'cn=UserNewRDN1,ou=Groups,{DEFAULT_SUFFIX}' # Change the RDN of template entry, DSA Unwilling to perform error expected mep = MEPTemplate(topo.standalone, f'cn=UPG Template,{DEFAULT_SUFFIX}') with pytest.raises(ldap.UNWILLING_TO_PERFORM): mep.rename(new_rdn='cn=UPG Template2', newsuperior='dc=example,dc=com') # Change the RDN of cn=Users to cn=TestUsers and check UPG are deleted before = user.get_attr_val_utf8('mepManagedEntry') user.rename(new_rdn='uid=Anuj', newsuperior='ou=Users,dc=example,dc=com') assert user.get_attr_val_utf8('mepManagedEntry') != before def test_managed_entry_removal(topo): """Check that we can't remove managed entry manually :id: cf9c5be5-97ef-46fc-b199-8346acf4c296 :setup: Standalone Instance :steps: 1. Enable the plugin 2. Restart the instance 3. Add our org units 4. Set up config entry and template entry for the org units 5. Add an entry that meets the MEP scope 6. Check if a managed group entry was created 7. Try to remove the entry while bound as Admin (non-DM) 8. Remove the entry while bound as DM 9. Check that the managing entry can be deleted too :expectedresults: 1. Success 2. Success 3. Success 4. Success 5. Success 6. Success 7. Should fail 8. Success 9. Success """ inst = topo.standalone # Add ACI so we can test that non-DM user can't delete managed entry domain = Domain(inst, DEFAULT_SUFFIX) ACI_TARGET = f"(target = \"ldap:///{DEFAULT_SUFFIX}\")" ACI_TARGETATTR = "(targetattr = *)" ACI_ALLOW = "(version 3.0; acl \"Admin Access\"; allow (all) " ACI_SUBJECT = "(userdn = \"ldap:///anyone\");)" ACI_BODY = ACI_TARGET + ACI_TARGETATTR + ACI_ALLOW + ACI_SUBJECT domain.add('aci', ACI_BODY) # stop the plugin, and start it plugin = ManagedEntriesPlugin(inst) plugin.disable() plugin.enable() # Add our org units ous = OrganizationalUnits(inst, DEFAULT_SUFFIX) ou_people = ous.create(properties={'ou': 'managed_people'}) ou_groups = ous.create(properties={'ou': 'managed_groups'}) mep_templates = MEPTemplates(inst, DEFAULT_SUFFIX) mep_template1 = mep_templates.create(properties={ 'cn': 'MEP template', 'mepRDNAttr': 'cn', 'mepStaticAttr': 'objectclass: groupOfNames|objectclass: extensibleObject'.split('|'), 'mepMappedAttr': 'cn: $cn|uid: $cn|gidNumber: $uidNumber'.split('|') }) mep_configs = MEPConfigs(inst) mep_configs.create(properties={'cn': 'config', 'originScope': ou_people.dn, 'originFilter': 'objectclass=posixAccount', 'managedBase': ou_groups.dn, 'managedTemplate': mep_template1.dn}) inst.restart() # Add an entry that meets the MEP scope test_users_m1 = UserAccounts(inst, DEFAULT_SUFFIX, rdn='ou={}'.format(ou_people.rdn)) managing_entry = test_users_m1.create_test_user(1001) managing_entry.reset_password(USER_PASSWORD) user_bound_conn = managing_entry.bind(USER_PASSWORD) # Get the managed entry managed_groups = Groups(inst, ou_groups.dn, rdn=None) managed_entry = managed_groups.get(managing_entry.rdn) # Check that the managed entry was created assert managed_entry.exists() # Try to remove the entry while bound as Admin (non-DM) managed_groups_user_conn = Groups(user_bound_conn, ou_groups.dn, rdn=None) managed_entry_user_conn = managed_groups_user_conn.get(managed_entry.rdn) with pytest.raises(ldap.UNWILLING_TO_PERFORM): managed_entry_user_conn.delete() assert managed_entry_user_conn.exists() # Remove the entry while bound as DM managed_entry.delete() assert not managed_entry.exists() # Check that the managing entry can be deleted too managing_entry.delete() assert not managing_entry.exists() if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/plugins/memberof_test.py000066400000000000000000003455251421664411400272540ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2016 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import pytest from lib389.tasks import * from lib389.utils import * from lib389.topologies import topology_st from lib389._constants import PLUGIN_MEMBER_OF, SUFFIX pytestmark = pytest.mark.tier1 DEBUGGING = os.getenv('DEBUGGING', False) logging.getLogger(__name__).setLevel(logging.DEBUG) log = logging.getLogger(__name__) MEMBEROF_PLUGIN_DN = ('cn=' + PLUGIN_MEMBER_OF + ',cn=plugins,cn=config') USER1_DN = 'uid=user1,' + DEFAULT_SUFFIX USER2_DN = 'uid=user2,' + DEFAULT_SUFFIX GROUP_DN = 'cn=group,' + DEFAULT_SUFFIX PLUGIN_TYPE = 'nsslapd-pluginType' PLUGIN_MEMBEROF_GRP_ATTR = 'memberofgroupattr' PLUGIN_ENABLED = 'nsslapd-pluginEnabled' USER_RDN = "user" USERS_CONTAINER = "ou=people,%s" % SUFFIX GROUP_RDN = "group" GROUPS_CONTAINER = "ou=groups,%s" % SUFFIX def _set_memberofgroupattr_add(topology_st, values): topology_st.standalone.modify_s(MEMBEROF_PLUGIN_DN, [(ldap.MOD_ADD, PLUGIN_MEMBEROF_GRP_ATTR, ensure_bytes(values))]) def _get_user_rdn(ext): return ensure_bytes("uid=%s_%s" % (USER_RDN, ext)) def _get_user_dn(ext): return ensure_bytes("%s,%s" % (ensure_str(_get_user_rdn(ext)), USERS_CONTAINER)) def _get_group_rdn(ext): return ensure_bytes("cn=%s_%s" % (GROUP_RDN, ext)) def _get_group_dn(ext): return ensure_bytes("%s,%s" % (ensure_str(_get_group_rdn(ext)), GROUPS_CONTAINER)) def _create_user(topology_st, ext): user_dn = ensure_str(_get_user_dn(ext)) topology_st.standalone.add_s(Entry((user_dn, { 'objectclass': 'top extensibleObject'.split(), 'uid': ensure_str(_get_user_rdn(ext)) }))) log.info("Create user %s" % user_dn) return ensure_bytes(user_dn) def _delete_user(topology_st, ext): user_dn = ensure_str(_get_user_dn(ext)) topology_st.standalone.delete_s(user_dn) log.info("Delete user %s" % user_dn) def _create_group(topology_st, ext): group_dn = ensure_str(_get_group_dn(ext)) topology_st.standalone.add_s(Entry((group_dn, { 'objectclass': 'top groupOfNames groupOfUniqueNames extensibleObject'.split(), 'ou': ensure_str(_get_group_rdn(ext)) }))) log.info("Create group %s" % group_dn) return ensure_bytes(group_dn) def _delete_group(topology_st, ext): group_dn = ensure_str(_get_group_dn(ext)) topology_st.standalone.delete_s(group_dn) log.info("Delete group %s" % group_dn) def _check_memberattr(topology_st, entry, memberattr, value): log.info("Check %s.%s = %s" % (entry, memberattr, value)) entry = topology_st.standalone.getEntry(ensure_str(entry), ldap.SCOPE_BASE, '(objectclass=*)', [memberattr]) if not entry.hasAttr(ensure_str(memberattr)): return False found = False for val in entry.getValues(ensure_str(memberattr)): log.info("%s: %s" % (memberattr, ensure_str(val))) if ensure_str(value.lower()) == ensure_str(val.lower()): found = True break return found def _check_memberof(topology_st, member, group): log.info("Lookup memberof from %s" % member) entry = topology_st.standalone.getEntry(ensure_str(member), ldap.SCOPE_BASE, '(objectclass=*)', ['memberof']) if not entry.hasAttr('memberof'): return False found = False for val in entry.getValues('memberof'): log.info("memberof: %s" % ensure_str(val)) if ensure_str(group.lower()) == ensure_str(val.lower()): found = True log.info("--> membership verified") break return found def test_betxnpostoperation_replace(topology_st): """Test modify the memberof plugin operation to use the new type :id: d222af17-17a6-48a0-8f22-a38306726a91 :setup: Standalone instance :steps: 1. Set plugin type to betxnpostoperation 2. Check is was changed :expectedresults: 1. Success 2. Success """ topology_st.standalone.modify_s(MEMBEROF_PLUGIN_DN, [(ldap.MOD_REPLACE, PLUGIN_TYPE, b'betxnpostoperation')]) topology_st.standalone.restart() ent = topology_st.standalone.getEntry(MEMBEROF_PLUGIN_DN, ldap.SCOPE_BASE, "(objectclass=*)", [PLUGIN_TYPE]) assert ent.hasAttr(PLUGIN_TYPE) assert ent.getValue(PLUGIN_TYPE) == b'betxnpostoperation' def test_memberofgroupattr_add(topology_st): """Check multiple grouping attributes supported :id: d222af17-17a6-48a0-8f22-a38306726a92 :setup: Standalone instance :steps: 1. Add memberofgroupattr - 'uniqueMember' 2. Check we have 'uniqueMember' and 'member' values :expectedresults: 1. Success 2. Success """ _set_memberofgroupattr_add(topology_st, 'uniqueMember') ent = topology_st.standalone.getEntry(MEMBEROF_PLUGIN_DN, ldap.SCOPE_BASE, "(objectclass=*)", [PLUGIN_MEMBEROF_GRP_ATTR]) assert ent.hasAttr(PLUGIN_MEMBEROF_GRP_ATTR) assert b'member'.lower() in [x.lower() for x in ent.getValues(PLUGIN_MEMBEROF_GRP_ATTR)] assert b'uniqueMember'.lower() in [x.lower() for x in ent.getValues(PLUGIN_MEMBEROF_GRP_ATTR)] def test_enable(topology_st): """Check the plug-in is started :id: d222af17-17a6-48a0-8f22-a38306726a93 :setup: Standalone instance :steps: 1. Enable the plugin 2. Restart the instance :expectedresults: 1. Success 2. Server should start and plugin should be on """ log.info("Enable MemberOf plugin") topology_st.standalone.plugins.enable(name=PLUGIN_MEMBER_OF) topology_st.standalone.restart() ent = topology_st.standalone.getEntry(MEMBEROF_PLUGIN_DN, ldap.SCOPE_BASE, "(objectclass=*)", [PLUGIN_ENABLED]) assert ent.hasAttr(PLUGIN_ENABLED) assert ent.getValue(PLUGIN_ENABLED).lower() == b'on' def test_member_add(topology_st): """MemberOf attribute should be successfully added to both the users :id: d222af17-17a6-48a0-8f22-a38306726a94 :setup: Standalone instance :steps: 1. Create user and groups 2. Add the users as members to the groups 3. Check the membership :expectedresults: 1. Success 2. Success 3. Success """ memofenh1 = _create_user(topology_st, 'memofenh1') memofenh2 = _create_user(topology_st, 'memofenh2') memofegrp1 = _create_group(topology_st, 'memofegrp1') memofegrp2 = _create_group(topology_st, 'memofegrp2') mods = [(ldap.MOD_ADD, 'member', memofenh1), (ldap.MOD_ADD, 'uniqueMember', memofenh2)] log.info("Update %s is memberof %s (member)" % (memofenh1, memofegrp1)) log.info("Update %s is memberof %s (uniqueMember)" % (memofenh2, memofegrp1)) topology_st.standalone.modify_s(ensure_str(memofegrp1), mods) log.info("Update %s is memberof %s (member)" % (memofenh1, memofegrp2)) log.info("Update %s is memberof %s (uniqueMember)" % (memofenh2, memofegrp2)) topology_st.standalone.modify_s(ensure_str(memofegrp2), mods) # assert enh1 is member of grp1 and grp2 assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1) assert _check_memberof(topology_st, member=memofenh1, group=memofegrp2) # assert enh2 is member of grp1 and grp2 assert _check_memberof(topology_st, member=memofenh2, group=memofegrp1) assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2) def test_member_delete_gr1(topology_st): """Partial removal of memberofgroupattr: removing member attribute from Group1 :id: d222af17-17a6-48a0-8f22-a38306726a95 :setup: Standalone instance :steps: 1. Delete a member: enh1 in grp1 2. Check the states of the members were changed accordingly :expectedresults: 1. Success 2. Success """ memofenh1 = _get_user_dn('memofenh1') memofenh2 = _get_user_dn('memofenh2') memofegrp1 = _get_group_dn('memofegrp1') memofegrp2 = _get_group_dn('memofegrp2') log.info("Update %s is no longer memberof %s (member)" % (memofenh1, memofegrp1)) mods = [(ldap.MOD_DELETE, 'member', memofenh1)] topology_st.standalone.modify_s(ensure_str(memofegrp1), mods) # assert enh1 is NOT member of grp1 and is member of grp2 assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp1) assert _check_memberof(topology_st, member=memofenh1, group=memofegrp2) # assert enh2 is member of grp1 and is member of grp2 assert _check_memberof(topology_st, member=memofenh2, group=memofegrp1) assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2) def test_member_delete_gr2(topology_st): """Partial removal of memberofgroupattr: removing uniqueMember attribute from Group2 :id: d222af17-17a6-48a0-8f22-a38306726a96 :setup: Standalone instance :steps: 1. Delete a uniqueMember: enh2 in grp2 2. Check the states of the members were changed accordingly :expectedresults: 1. Success 2. Success """ memofenh1 = _get_user_dn('memofenh1') memofenh2 = _get_user_dn('memofenh2') memofegrp1 = _get_group_dn('memofegrp1') memofegrp2 = _get_group_dn('memofegrp2') log.info("Update %s is no longer memberof %s (uniqueMember)" % (memofenh1, memofegrp1)) mods = [(ldap.MOD_DELETE, 'uniqueMember', memofenh2)] topology_st.standalone.modify_s(ensure_str(memofegrp2), mods) # assert enh1 is NOT member of grp1 and is member of grp2 assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp1) assert _check_memberof(topology_st, member=memofenh1, group=memofegrp2) # assert enh2 is member of grp1 and is NOT member of grp2 assert _check_memberof(topology_st, member=memofenh2, group=memofegrp1) assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp2) def test_member_delete_all(topology_st): """Complete removal of memberofgroupattr :id: d222af17-17a6-48a0-8f22-a38306726a97 :setup: Standalone instance :steps: 1. Delete the rest of the members 2. Check the states of the members were changed accordingly :expectedresults: 1. Success 2. Success """ memofenh1 = _get_user_dn('memofenh1') memofenh2 = _get_user_dn('memofenh2') memofegrp1 = _get_group_dn('memofegrp1') memofegrp2 = _get_group_dn('memofegrp2') log.info("Update %s is no longer memberof %s (uniqueMember)" % (memofenh2, memofegrp1)) mods = [(ldap.MOD_DELETE, 'uniqueMember', memofenh2)] topology_st.standalone.modify_s(ensure_str(memofegrp1), mods) log.info("Update %s is no longer memberof %s (member)" % (memofenh1, memofegrp2)) mods = [(ldap.MOD_DELETE, 'member', memofenh1)] topology_st.standalone.modify_s(ensure_str(memofegrp2), mods) # assert enh1 is NOT member of grp1 and is member of grp2 assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp1) assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp2) # assert enh2 is member of grp1 and is NOT member of grp2 assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp1) assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp2) def test_member_after_restart(topology_st): """MemberOf attribute should be present on both the users :id: d222af17-17a6-48a0-8f22-a38306726a98 :setup: Standalone instance :steps: 1. Add a couple of members to the groups 2. Restart the instance 3. Check the states of the members were changed accordingly :expectedresults: 1. Success 2. Success 3. Success """ memofenh1 = _get_user_dn('memofenh1') memofenh2 = _get_user_dn('memofenh2') memofegrp1 = _get_group_dn('memofegrp1') memofegrp2 = _get_group_dn('memofegrp2') mods = [(ldap.MOD_ADD, 'member', memofenh1)] log.info("Update %s is memberof %s (member)" % (memofenh1, memofegrp1)) topology_st.standalone.modify_s(ensure_str(memofegrp1), mods) mods = [(ldap.MOD_ADD, 'uniqueMember', memofenh2)] log.info("Update %s is memberof %s (uniqueMember)" % (memofenh2, memofegrp2)) topology_st.standalone.modify_s(ensure_str(memofegrp2), mods) # assert enh1 is member of grp1 and is NOT member of grp2 assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1) assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp2) # assert enh2 is NOT member of grp1 and is member of grp2 assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp1) assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2) log.info("Remove uniqueMember as a memberofgrpattr") topology_st.standalone.modify_s(MEMBEROF_PLUGIN_DN, [(ldap.MOD_DELETE, PLUGIN_MEMBEROF_GRP_ATTR, [b'uniqueMember'])]) topology_st.standalone.restart() log.info("Assert that this change of configuration did change the already set values") # assert enh1 is member of grp1 and is NOT member of grp2 assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1) assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp2) # assert enh2 is NOT member of grp1 and is member of grp2 assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp1) assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2) _set_memberofgroupattr_add(topology_st, 'uniqueMember') topology_st.standalone.restart() def test_memberofgroupattr_uid(topology_st): """MemberOf attribute should not be added to the user since memberuid is not a DN syntax attribute :id: d222af17-17a6-48a0-8f22-a38306726a99 :setup: Standalone instance :steps: 1. Try to add memberUid to the group :expectedresults: 1. It should fail with Unwilling to perform error """ try: _set_memberofgroupattr_add(topology_st, 'memberUid') log.error("Setting 'memberUid' as memberofgroupattr should be rejected") assert False except ldap.UNWILLING_TO_PERFORM: log.error("Setting 'memberUid' as memberofgroupattr is rejected (expected)") assert True def test_member_add_duplicate_usr1(topology_st): """Duplicate member attribute to groups :id: d222af17-17a6-48a0-8f22-a38306726a10 :setup: Standalone instance :steps: 1. Try to add a member: enh1 which already exists :expectedresults: 1. It should fail with Type of value exists error """ memofenh1 = _get_user_dn('memofenh1') memofegrp1 = _get_group_dn('memofegrp1') # assert enh1 is member of grp1 assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1) mods = [(ldap.MOD_ADD, 'member', memofenh1)] log.info("Try %s is memberof %s (member)" % (memofenh1, memofegrp1)) try: topology_st.standalone.modify_s(ensure_str(memofegrp1), mods) log.error( "Should not be allowed to add %s member of %s (because it was already member)" % (memofenh1, memofegrp1)) assert False except ldap.TYPE_OR_VALUE_EXISTS: log.error("%s already member of %s --> fail (expected)" % (memofenh1, memofegrp1)) assert True def test_member_add_duplicate_usr2(topology_st): """Duplicate uniqueMember attributes to groups :id: d222af17-17a6-48a0-8f22-a38306726a11 :setup: Standalone instance :steps: 1. Try to add a uniqueMember: enh2 which already exists :expectedresults: 1. It should fail with Type of value exists error """ memofenh1 = _get_user_dn('memofenh1') memofenh2 = _get_user_dn('memofenh2') memofegrp1 = _get_group_dn('memofegrp1') memofegrp2 = _get_group_dn('memofegrp2') log.info("Check initial status") # assert enh1 is member of grp1 and is NOT member of grp2 assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1) assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp2) # assert enh2 is NOT member of grp1 and is member of grp2 assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp1) assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2) mods = [(ldap.MOD_ADD, 'uniqueMember', memofenh2)] log.info("Try %s is memberof %s (member)" % (memofenh2, memofegrp2)) try: topology_st.standalone.modify_s(ensure_str(memofegrp2), mods) log.error( "Should not be allowed to add %s member of %s (because it was already member)" % (memofenh2, memofegrp2)) assert False except ldap.TYPE_OR_VALUE_EXISTS: log.error("%s already member of %s --> fail (expected)" % (memofenh2, memofegrp2)) assert True log.info("Check final status") # assert enh1 is member of grp1 and is NOT member of grp2 assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1) assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp2) # assert enh2 is NOT member of grp1 and is member of grp2 assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp1) assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2) #def test_memberof_MultiGrpAttr_012(topology_st): # """ # MemberURL attritbute should reflect the modrdn changes in the group. # # This test has been covered in MODRDN test suite # # At the beginning: # memofenh1 is memberof memofegrp1 # memofenh2 is memberof memofegrp2 # # At the end # memofenh1 is memberof memofegrp1 # memofenh2 is memberof memofegrp2 # """ # pass #def test_memberof_MultiGrpAttr_013(topology_st): # """ # MemberURL attritbute should reflect the modrdn changes in the group. # # This test has been covered in MODRDN test suite # # At the beginning: # memofenh1 is memberof memofegrp1 # memofenh2 is memberof memofegrp2 # # At the end # memofenh1 is memberof memofegrp1 # memofenh2 is memberof memofegrp2 # """ # pass def test_member_uniquemember_same_user(topology_st): """Check the situation when both member and uniqueMember pointing to the same user :id: d222af17-17a6-48a0-8f22-a38306726a13 :setup: Standalone instance, grp3, enh1 is member of - grp1 (member) - not grp2 enh2 is member of - not grp1 - grp2 (uniquemember) :steps: 1. Add member: enh1 and uniqueMember: enh1 to grp3 2. Assert enh1 is member of - grp1 (member) - not grp2 - grp3 (member uniquemember) 3. Delete member: enh1 from grp3 4. Add member: enh2 to grp3 5. Assert enh1 is member of - grp1 (member) - not grp2 - grp3 (uniquemember) 6. Assert enh2 is member of - not grp1 - grp2 (uniquemember) - grp3 (member) :expectedresults: 1. Success 2. Success 3. Success 4. Success 5. Success 6. Success """ memofenh1 = _get_user_dn('memofenh1') memofenh2 = _get_user_dn('memofenh2') memofegrp1 = _get_group_dn('memofegrp1') memofegrp2 = _get_group_dn('memofegrp2') log.info("Check initial status") # assert enh1 is member of grp1 and is NOT member of grp2 assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1) assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp2) # assert enh2 is NOT member of grp1 and is member of grp2 assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp1) assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2) memofegrp3 = _create_group(topology_st, 'memofegrp3') mods = [(ldap.MOD_ADD, 'member', memofenh1), (ldap.MOD_ADD, 'uniqueMember', memofenh1)] log.info("Update %s is memberof %s (member)" % (memofenh1, memofegrp3)) log.info("Update %s is memberof %s (uniqueMember)" % (memofenh1, memofegrp3)) topology_st.standalone.modify_s(ensure_str(memofegrp3), mods) # assert enh1 is member of # - grp1 (member) # - not grp2 # - grp3 (member uniquemember) assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1) assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp2) assert _check_memberof(topology_st, member=memofenh1, group=memofegrp3) mods = [(ldap.MOD_DELETE, 'member', memofenh1)] log.info("Update %s is not memberof %s (member)" % (memofenh1, memofegrp3)) topology_st.standalone.modify_s(ensure_str(memofegrp3), mods) mods = [(ldap.MOD_ADD, 'member', memofenh2)] log.info("Update %s is memberof %s (member)" % (memofenh2, memofegrp3)) topology_st.standalone.modify_s(ensure_str(memofegrp3), mods) # assert enh1 is member of # - grp1 (member) # - not grp2 # - grp3 (uniquemember) assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1) assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp2) assert _check_memberof(topology_st, member=memofenh1, group=memofegrp3) # assert enh2 is member of # - not grp1 # - grp2 (uniquemember) # - grp3 (member) assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp1) assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2) assert _check_memberof(topology_st, member=memofenh2, group=memofegrp3) ent = topology_st.standalone.getEntry(ensure_str(memofegrp3), ldap.SCOPE_BASE, "(objectclass=*)", ['member', 'uniqueMember']) assert ent.hasAttr('member') assert ensure_bytes(memofenh1) not in ent.getValues('member') assert ensure_bytes(memofenh2) in ent.getValues('member') assert ent.hasAttr('uniqueMember') assert ensure_bytes(memofenh1) in ent.getValues('uniqueMember') assert ensure_bytes(memofenh2) not in ent.getValues('uniqueMember') log.info("Checking final status") # assert enh1 is member of # - grp1 (member) # - not grp2 # - grp3 (uniquemember) assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1) assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp2) assert _check_memberof(topology_st, member=memofenh1, group=memofegrp3) # assert enh2 is member of # - not grp1 # - grp2 (uniquemember) # - grp3 (member) assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp1) assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2) assert _check_memberof(topology_st, member=memofenh2, group=memofegrp3) def test_member_not_exists(topology_st): """Check the situation when we add non-existing users to member attribute :id: d222af17-17a6-48a0-8f22-a38306726a14 :setup: Standalone instance, grp015, enh1 is member of - grp1 (member) - not grp2 - grp3 (uniquemember) enh2 is member of - not grp1 - grp2 (uniquemember) - grp3 (member) :steps: 1. Add member: dummy1 and uniqueMember: dummy2 to grp015 2. Assert enh1 is member of - grp1 (member) - not grp2 - grp3 (uniquemember) - not grp015 3. Assert enh2 is member of - not grp1 - grp2 (uniquemember) - grp3 (member) - not grp015 :expectedresults: 1. Success 2. Success 3. Success """ memofenh1 = _get_user_dn('memofenh1') memofenh2 = _get_user_dn('memofenh2') dummy1 = _get_user_dn('dummy1') dummy2 = _get_user_dn('dummy2') memofegrp1 = _get_group_dn('memofegrp1') memofegrp2 = _get_group_dn('memofegrp2') memofegrp3 = _get_group_dn('memofegrp3') log.info("Checking Initial status") # assert enh1 is member of # - grp1 (member) # - not grp2 # - grp3 (uniquemember) assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1) assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp2) assert _check_memberof(topology_st, member=memofenh1, group=memofegrp3) # assert enh2 is member of # - not grp1 # - grp2 (uniquemember) # - grp3 (member) assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp1) assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2) assert _check_memberof(topology_st, member=memofenh2, group=memofegrp3) memofegrp015 = _create_group(topology_st, 'memofegrp015') mods = [(ldap.MOD_ADD, 'member', dummy1), (ldap.MOD_ADD, 'uniqueMember', dummy2)] log.info("Update %s is memberof %s (member)" % (dummy1, memofegrp015)) log.info("Update %s is memberof %s (uniqueMember)" % (dummy2, memofegrp015)) topology_st.standalone.modify_s(ensure_str(memofegrp015), mods) ent = topology_st.standalone.getEntry(ensure_str(memofegrp015), ldap.SCOPE_BASE, "(objectclass=*)", ['member', 'uniqueMember']) assert ent.hasAttr('member') assert ensure_bytes(dummy1) in ent.getValues('member') assert ensure_bytes(dummy2) not in ent.getValues('member') assert ent.hasAttr('uniqueMember') assert ensure_bytes(dummy1) not in ent.getValues('uniqueMember') assert ensure_bytes(dummy2) in ent.getValues('uniqueMember') # assert enh1 is member of # - grp1 (member) # - not grp2 # - grp3 (uniquemember) assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1) assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp2) assert _check_memberof(topology_st, member=memofenh1, group=memofegrp3) assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp015) # assert enh2 is member of # - not grp1 # - grp2 (uniquemember) # - grp3 (member) # assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp1) assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2) assert _check_memberof(topology_st, member=memofenh2, group=memofegrp3) assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp015) def test_member_not_exists_complex(topology_st): """Check the situation when we modify non-existing users member attribute :id: d222af17-17a6-48a0-8f22-a38306726a15 :setup: Standalone instance, enh1 is member of - grp1 (member) - not grp2 - grp3 (uniquemember) - not grp015 enh2 is member of - not grp1 - grp2 (uniquemember) - grp3 (member) - not grp015 :steps: 1. Add member: enh1 and uniqueMember: enh1 to grp016 2. Assert enh1 is member of - grp1 (member) - not grp2 - grp3 (uniquemember) - not grp15 - grp16 (member uniquemember) 3. Assert enh2 is member of - not grp1 - grp2 (uniquemember) - grp3 (member) - not grp15 - not grp16 4. Add member: dummy1 and uniqueMember: dummy2 to grp016 5. Assert enh1 is member of - grp1 (member) - not grp2 - grp3 (uniquemember) - not grp15 - grp16 (member uniquemember) 6. Assert enh2 is member of - not grp1 - grp2 (uniquemember) - grp3 (member) - not grp15 - not grp16 :expectedresults: 1. Success 2. Success 3. Success 4. Success 5. Success 6. Success """ memofenh1 = _get_user_dn('memofenh1') memofenh2 = _get_user_dn('memofenh2') dummy1 = _get_user_dn('dummy1') memofegrp1 = _get_group_dn('memofegrp1') memofegrp2 = _get_group_dn('memofegrp2') memofegrp3 = _get_group_dn('memofegrp3') memofegrp015 = _get_group_dn('memofegrp015') # assert enh1 is member of # - grp1 (member) # - not grp2 # - grp3 (uniquemember) # - not grp15 assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1) assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp2) assert _check_memberof(topology_st, member=memofenh1, group=memofegrp3) assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp015) # assert enh2 is member of # - not grp1 # - grp2 (uniquemember) # - grp3 (member) # - not grp15 assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp1) assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2) assert _check_memberof(topology_st, member=memofenh2, group=memofegrp3) assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp015) memofegrp016 = _create_group(topology_st, 'memofegrp016') mods = [(ldap.MOD_ADD, 'member', memofenh1), (ldap.MOD_ADD, 'uniqueMember', memofenh1)] log.info("Update %s is memberof %s (member)" % (memofenh1, memofegrp016)) log.info("Update %s is memberof %s (uniqueMember)" % (memofenh1, memofegrp016)) topology_st.standalone.modify_s(ensure_str(memofegrp016), mods) # assert enh1 is member of # - grp1 (member) # - not grp2 # - grp3 (uniquemember) # - not grp15 # - grp16 (member uniquemember) assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1) assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp2) assert _check_memberof(topology_st, member=memofenh1, group=memofegrp3) assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp015) assert _check_memberof(topology_st, member=memofenh1, group=memofegrp016) # assert enh2 is member of # - not grp1 # - grp2 (uniquemember) # - grp3 (member) # - not grp15 # - not grp16 assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp1) assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2) assert _check_memberof(topology_st, member=memofenh2, group=memofegrp3) assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp015) assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp016) mods = [(ldap.MOD_ADD, 'member', dummy1), ] log.info("Update %s is memberof %s (member)" % (dummy1, memofegrp016)) topology_st.standalone.modify_s(ensure_str(memofegrp016), mods) ent = topology_st.standalone.getEntry(ensure_str(memofegrp016), ldap.SCOPE_BASE, "(objectclass=*)", ['member', 'uniqueMember']) assert ent.hasAttr('member') assert ensure_bytes(dummy1) in ent.getValues('member') assert ent.hasAttr('uniqueMember') assert ensure_bytes(dummy1) not in ent.getValues('uniqueMember') mods = [(ldap.MOD_ADD, 'uniqueMember', dummy1), ] log.info("Update %s is memberof %s (uniqueMember)" % (dummy1, memofegrp016)) topology_st.standalone.modify_s(ensure_str(memofegrp016), mods) ent = topology_st.standalone.getEntry(ensure_str(memofegrp016), ldap.SCOPE_BASE, "(objectclass=*)", ['member', 'uniqueMember']) assert ent.hasAttr('member') assert ensure_bytes(dummy1) in ent.getValues('member') assert ent.hasAttr('uniqueMember') assert ensure_bytes(dummy1) in ent.getValues('uniqueMember') # assert enh1 is member of # - grp1 (member) # - not grp2 # - grp3 (uniquemember) # - not grp15 # - grp16 (member uniquemember) assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1) assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp2) assert _check_memberof(topology_st, member=memofenh1, group=memofegrp3) assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp015) assert _check_memberof(topology_st, member=memofenh1, group=memofegrp016) # assert enh2 is member of # - not grp1 # - grp2 (uniquemember) # - grp3 (member) # - not grp15 # - not grp16 assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp1) assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2) assert _check_memberof(topology_st, member=memofenh2, group=memofegrp3) assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp015) assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp016) def test_complex_group_scenario_1(topology_st): """Check the situation when user1 and user2 are memberof grp017 user2 is member of grp017 but not with a memberof attribute (memberUid) :id: d222af17-17a6-48a0-8f22-a38306726a16 :setup: Standalone instance, grp017, enh1 is member of - grp1 (member) - not grp2 - grp3 (uniquemember) - not grp015 - grp016 (member uniquemember) enh2 is member of - not grp1 - grp2 (uniquemember) - grp3 (member) - not grp015 - not grp016 :steps: 1. Create user1 as grp17 (member) 2. Create user2 as grp17 (uniqueMember) 3. Create user3 as grp17 (memberuid) (not memberof attribute) 4. Assert enh1 is member of - grp1 (member) - not grp2 - grp3 (uniquemember) - not grp15 - grp16 (member uniquemember) - not grp17 5. Assert enh2 is member of - not grp1 - grp2 (uniquemember) - grp3 (member) - not grp15 - not grp16 - not grp17 6. Assert user1 is member of - not grp1 - not grp2 - not grp3 - not grp15 - not grp16 - grp17 (member) 7. Assert user2 is member of - not grp1 - not grp2 - not grp3 - not grp15 - not grp16 - grp17 (uniqueMember) 8. Assert user3 is member of - not grp1 - not grp2 - not grp3 - not grp15 - not grp16 - NOT grp17 (memberuid) :expectedresults: 1. Success 2. Success 3. Success 4. Success 5. Success 6. Success 7. Success 8. Success """ memofenh1 = _get_user_dn('memofenh1') memofenh2 = _get_user_dn('memofenh2') memofegrp1 = _get_group_dn('memofegrp1') memofegrp2 = _get_group_dn('memofegrp2') memofegrp3 = _get_group_dn('memofegrp3') memofegrp015 = _get_group_dn('memofegrp015') memofegrp016 = _get_group_dn('memofegrp016') # assert enh1 is member of # - grp1 (member) # - not grp2 # - grp3 (uniquemember) # - not grp15 # - grp16 (member uniquemember) assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1) assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp2) assert _check_memberof(topology_st, member=memofenh1, group=memofegrp3) assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp015) assert _check_memberof(topology_st, member=memofenh1, group=memofegrp016) # assert enh2 is member of # - not grp1 # - grp2 (uniquemember) # - grp3 (member) # - not grp15 # - not grp16 assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp1) assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2) assert _check_memberof(topology_st, member=memofenh2, group=memofegrp3) assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp015) assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp016) # # create user1 # - not grp1 # - not grp2 # - not grp3 # - not grp15 # - not grp16 # - grp17 (member) # # create user2 # - not grp1 # - not grp2 # - not grp3 # - not grp15 # - not grp16 # - grp17 (uniqueMember) # # create user3 # - not grp1 # - not grp2 # - not grp3 # - not grp15 # - not grp16 # - grp17 (memberuid) (not memberof attribute) memofuser1 = _create_user(topology_st, 'memofuser1') memofuser2 = _create_user(topology_st, 'memofuser2') memofuser3 = _create_user(topology_st, 'memofuser3') memofegrp017 = _create_group(topology_st, 'memofegrp017') mods = [(ldap.MOD_ADD, 'member', memofuser1), (ldap.MOD_ADD, 'uniqueMember', memofuser2), (ldap.MOD_ADD, 'memberuid', memofuser3)] log.info("Update %s is memberof %s (member)" % (memofuser1, memofegrp017)) log.info("Update %s is memberof %s (uniqueMember)" % (memofuser2, memofegrp017)) log.info("Update %s is memberof %s (memberuid)" % (memofuser3, memofegrp017)) topology_st.standalone.modify_s(ensure_str(memofegrp017), mods) # assert enh1 is member of # - grp1 (member) # - not grp2 # - grp3 (uniquemember) # - not grp15 # - grp16 (member uniquemember) # - not grp17 assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1) assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp2) assert _check_memberof(topology_st, member=memofenh1, group=memofegrp3) assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp015) assert _check_memberof(topology_st, member=memofenh1, group=memofegrp016) assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp017) # assert enh2 is member of # - not grp1 # - grp2 (uniquemember) # - grp3 (member) # - not grp15 # - not grp16 # - not grp17 assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp1) assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2) assert _check_memberof(topology_st, member=memofenh2, group=memofegrp3) assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp015) assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp016) assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp017) # assert user1 is member of # - not grp1 # - not grp2 # - not grp3 # - not grp15 # - not grp16 # - grp17 (member) assert not _check_memberof(topology_st, member=memofuser1, group=memofegrp1) assert not _check_memberof(topology_st, member=memofuser1, group=memofegrp2) assert not _check_memberof(topology_st, member=memofuser1, group=memofegrp3) assert not _check_memberof(topology_st, member=memofuser1, group=memofegrp015) assert not _check_memberof(topology_st, member=memofuser1, group=memofegrp016) assert _check_memberof(topology_st, member=memofuser1, group=memofegrp017) # assert user2 is member of # - not grp1 # - not grp2 # - not grp3 # - not grp15 # - not grp16 # - grp17 (uniqueMember) assert not _check_memberof(topology_st, member=memofuser2, group=memofegrp1) assert not _check_memberof(topology_st, member=memofuser2, group=memofegrp2) assert not _check_memberof(topology_st, member=memofuser2, group=memofegrp3) assert not _check_memberof(topology_st, member=memofuser2, group=memofegrp015) assert not _check_memberof(topology_st, member=memofuser2, group=memofegrp016) assert _check_memberof(topology_st, member=memofuser2, group=memofegrp017) # assert user3 is member of # - not grp1 # - not grp2 # - not grp3 # - not grp15 # - not grp16 # - NOT grp17 (memberuid) assert not _check_memberof(topology_st, member=memofuser3, group=memofegrp1) assert not _check_memberof(topology_st, member=memofuser3, group=memofegrp2) assert not _check_memberof(topology_st, member=memofuser3, group=memofegrp3) assert not _check_memberof(topology_st, member=memofuser3, group=memofegrp015) assert not _check_memberof(topology_st, member=memofuser3, group=memofegrp016) assert not _check_memberof(topology_st, member=memofuser3, group=memofegrp017) def test_complex_group_scenario_2(topology_st): """Check the situation when user1 and user2 are memberof grp018 user2 is member of grp018 but not with a memberof attribute (memberUid) :id: d222af17-17a6-48a0-8f22-a38306726a17 :setup: Standalone instance, grp018, enh1 is member of - grp1 (member) - not grp2 - grp3 (uniquemember) - not grp015 - grp016 (member uniquemember) - not grp17 enh2 is member of - not grp1 - grp2 (uniquemember) - grp3 (member) - not grp015 - not grp016 - not grp017 user1 is member of - not grp1 - not grp2 - not grp3 - not grp015 - not grp016 - grp017 (member) user2 is member of - not grp1 - not grp2 - not grp3 - not grp015 - not grp016 - grp017 (uniquemember) user3 is member of - not grp1 - not grp2 - not grp3 - not grp015 - not grp016 - not grp017 (memberuid) :steps: 1. Add user1 as a member of grp18 (member, uniquemember) 2. Assert user1 is member of - not grp1 - not grp2 - not grp3 - not grp15 - not grp16 - grp17 (member) - grp18 (member, uniquemember) 3. Delete user1 member/uniquemember attributes from grp018 4. Assert user1 is member of - not grp1 - not grp2 - not grp3 - not grp15 - not grp16 - grp17 (member) - NOT grp18 (memberUid) 5. Delete user1, user2, user3, grp17 entries 6. Assert enh1 is member of - grp1 (member) - not grp2 - grp3 (uniquemember) - not grp15 - grp16 (member uniquemember) - not grp018 7. Assert enh2 is member of - not grp1 - grp2 (uniquemember) - grp3 (member) - not grp15 - not grp16 - not grp018 :expectedresults: 1. Success 2. Success 3. Success 4. Success 5. Success 6. Success 7. Success """ memofenh1 = _get_user_dn('memofenh1') memofenh2 = _get_user_dn('memofenh2') memofuser1 = _get_user_dn('memofuser1') memofuser2 = _get_user_dn('memofuser2') memofuser3 = _get_user_dn('memofuser3') memofegrp1 = _get_group_dn('memofegrp1') memofegrp2 = _get_group_dn('memofegrp2') memofegrp3 = _get_group_dn('memofegrp3') memofegrp015 = _get_group_dn('memofegrp015') memofegrp016 = _get_group_dn('memofegrp016') memofegrp017 = _get_group_dn('memofegrp017') # assert enh1 is member of # - grp1 (member) # - not grp2 # - grp3 (uniquemember) # - not grp15 # - grp16 (member uniquemember) # - not grp17 assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1) assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp2) assert _check_memberof(topology_st, member=memofenh1, group=memofegrp3) assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp015) assert _check_memberof(topology_st, member=memofenh1, group=memofegrp016) assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp017) # assert enh2 is member of # - not grp1 # - grp2 (uniquemember) # - grp3 (member) # - not grp15 # - not grp16 # - not grp17 assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp1) assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2) assert _check_memberof(topology_st, member=memofenh2, group=memofegrp3) assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp015) assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp016) assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp017) # assert user1 is member of # - not grp1 # - not grp2 # - not grp3 # - not grp15 # - not grp16 # - grp17 (member) assert not _check_memberof(topology_st, member=memofuser1, group=memofegrp1) assert not _check_memberof(topology_st, member=memofuser1, group=memofegrp2) assert not _check_memberof(topology_st, member=memofuser1, group=memofegrp3) assert not _check_memberof(topology_st, member=memofuser1, group=memofegrp015) assert not _check_memberof(topology_st, member=memofuser1, group=memofegrp016) assert _check_memberof(topology_st, member=memofuser1, group=memofegrp017) # assert user2 is member of # - not grp1 # - not grp2 # - not grp3 # - not grp15 # - not grp16 # - grp17 (uniqueMember) assert not _check_memberof(topology_st, member=memofuser2, group=memofegrp1) assert not _check_memberof(topology_st, member=memofuser2, group=memofegrp2) assert not _check_memberof(topology_st, member=memofuser2, group=memofegrp3) assert not _check_memberof(topology_st, member=memofuser2, group=memofegrp015) assert not _check_memberof(topology_st, member=memofuser2, group=memofegrp016) assert _check_memberof(topology_st, member=memofuser2, group=memofegrp017) # assert user3 is member of # - not grp1 # - not grp2 # - not grp3 # - not grp15 # - not grp16 # - NOT grp17 (memberuid) assert not _check_memberof(topology_st, member=memofuser3, group=memofegrp1) assert not _check_memberof(topology_st, member=memofuser3, group=memofegrp2) assert not _check_memberof(topology_st, member=memofuser3, group=memofegrp3) assert not _check_memberof(topology_st, member=memofuser3, group=memofegrp015) assert not _check_memberof(topology_st, member=memofuser3, group=memofegrp016) assert not _check_memberof(topology_st, member=memofuser3, group=memofegrp017) # # Create a group grp018 with user1 member/uniquemember memofegrp018 = _create_group(topology_st, 'memofegrp018') mods = [(ldap.MOD_ADD, 'member', memofuser1), (ldap.MOD_ADD, 'uniqueMember', memofuser1), (ldap.MOD_ADD, 'memberuid', memofuser1)] log.info("Update %s is memberof %s (member)" % (memofuser1, memofegrp017)) log.info("Update %s is memberof %s (uniqueMember)" % (memofuser1, memofegrp017)) log.info("Update %s is memberof %s (memberuid)" % (memofuser1, memofegrp017)) topology_st.standalone.modify_s(ensure_str(memofegrp018), mods) # assert user1 is member of # - not grp1 # - not grp2 # - not grp3 # - not grp15 # - not grp16 # - grp17 (member) # - grp18 (member, uniquemember) assert not _check_memberof(topology_st, member=memofuser1, group=memofegrp1) assert not _check_memberof(topology_st, member=memofuser1, group=memofegrp2) assert not _check_memberof(topology_st, member=memofuser1, group=memofegrp3) assert not _check_memberof(topology_st, member=memofuser1, group=memofegrp015) assert not _check_memberof(topology_st, member=memofuser1, group=memofegrp016) assert _check_memberof(topology_st, member=memofuser1, group=memofegrp017) assert _check_memberof(topology_st, member=memofuser1, group=memofegrp018) mods = [(ldap.MOD_DELETE, 'member', memofuser1), (ldap.MOD_DELETE, 'uniqueMember', memofuser1)] log.info("Update %s is no longer memberof %s (member)" % (memofuser1, memofegrp018)) log.info("Update %s is no longer memberof %s (uniqueMember)" % (memofuser1, memofegrp018)) topology_st.standalone.modify_s(ensure_str(memofegrp018), mods) # assert user1 is member of # - not grp1 # - not grp2 # - not grp3 # - not grp15 # - not grp16 # - grp17 (member) # - NOT grp18 (memberUid) assert not _check_memberof(topology_st, member=memofuser1, group=memofegrp1) assert not _check_memberof(topology_st, member=memofuser1, group=memofegrp2) assert not _check_memberof(topology_st, member=memofuser1, group=memofegrp3) assert not _check_memberof(topology_st, member=memofuser1, group=memofegrp015) assert not _check_memberof(topology_st, member=memofuser1, group=memofegrp016) assert _check_memberof(topology_st, member=memofuser1, group=memofegrp017) assert not _check_memberof(topology_st, member=memofuser1, group=memofegrp018) # DEL user1, user2, user3, grp17 topology_st.standalone.delete_s(ensure_str(memofuser1)) topology_st.standalone.delete_s(ensure_str(memofuser2)) topology_st.standalone.delete_s(ensure_str(memofuser3)) topology_st.standalone.delete_s(ensure_str(memofegrp017)) # assert enh1 is member of # - grp1 (member) # - not grp2 # - grp3 (uniquemember) # - not grp15 # - grp16 (member uniquemember) # - not grp018 assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1) assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp2) assert _check_memberof(topology_st, member=memofenh1, group=memofegrp3) assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp015) assert _check_memberof(topology_st, member=memofenh1, group=memofegrp016) assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp018) # assert enh2 is member of # - not grp1 # - grp2 (uniquemember) # - grp3 (member) # - not grp15 # - not grp16 # - not grp018 assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp1) assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2) assert _check_memberof(topology_st, member=memofenh2, group=memofegrp3) assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp015) assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp016) assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp018) def test_complex_group_scenario_3(topology_st): """Test a complex memberOf case: Add user2 to grp19_2, Add user3 to grp19_3, Add grp19_2 and grp_19_3 to grp19_1 :id: d222af17-17a6-48a0-8f22-a38306726a18 :setup: Standalone instance, enh1 is member of - grp1 (member) - not grp2 - grp3 (uniquemember) - not grp015 - grp016 (member uniquemember) - not grp018 enh2 is member of - not grp1 - grp2 (uniquemember) - grp3 (member) - not grp015 - not grp016 - not grp018 :steps: 1. Create user2 and user3 2. Create a group grp019_2 with user2 member 3. Create a group grp019_3 with user3 member 4. Create a group grp019_1 with memofegrp019_2, memofegrp019_3 member 5. Assert memofegrp019_1 is member of - not grp1 - not grp2 - not grp3 - not grp15 - not grp16 - not grp018 - not grp19_1 - not grp019_2 - not grp019_3 6. Assert memofegrp019_2 is member of - not grp1 - not grp2 - not grp3 - not grp15 - not grp16 - not grp018 - grp19_1 - not grp019_2 - not grp019_3 7. Assert memofegrp019_3 is member of - not grp1 - not grp2 - not grp3 - not grp15 - not grp16 - not grp018 - grp19_1 - not grp019_2 - not grp019_3 8. Assert memofuser2 is member of - not grp1 - not grp2 - not grp3 - not grp15 - not grp16 - not grp018 - grp19_1 - grp019_2 - not grp019_3 9. Assert memofuser3 is member of - not grp1 - not grp2 - not grp3 - not grp15 - not grp16 - not grp018 - grp19_1 - not grp019_2 - grp019_3 10. Delete user2, user3, and all grp19* entries 11. Assert enh1 is member of - grp1 (member) - not grp2 - grp3 (uniquemember) - not grp15 - grp16 (member uniquemember) - not grp018 12. Assert enh2 is member of - not grp1 - grp2 (uniquemember) - grp3 (member) - not grp15 - not grp16 - not grp018 :expectedresults: 1. Success 2. Success 3. Success 4. Success 5. Success 6. Success 7. Success 8. Success 9. Success 10. Success 11. Success 12. Success """ memofenh1 = _get_user_dn('memofenh1') memofenh2 = _get_user_dn('memofenh2') memofegrp1 = _get_group_dn('memofegrp1') memofegrp2 = _get_group_dn('memofegrp2') memofegrp3 = _get_group_dn('memofegrp3') memofegrp015 = _get_group_dn('memofegrp015') memofegrp016 = _get_group_dn('memofegrp016') memofegrp018 = _get_group_dn('memofegrp018') # assert enh1 is member of # - grp1 (member) # - not grp2 # - grp3 (uniquemember) # - not grp15 # - grp16 (member uniquemember) # - not grp018 assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1) assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp2) assert _check_memberof(topology_st, member=memofenh1, group=memofegrp3) assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp015) assert _check_memberof(topology_st, member=memofenh1, group=memofegrp016) assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp018) # assert enh2 is member of # - not grp1 # - grp2 (uniquemember) # - grp3 (member) # - not grp15 # - not grp16 # - not grp018 assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp1) assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2) assert _check_memberof(topology_st, member=memofenh2, group=memofegrp3) assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp015) assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp016) assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp018) memofuser2 = _create_user(topology_st, 'memofuser2') memofuser3 = _create_user(topology_st, 'memofuser3') # Create a group grp019_2 with user2 member memofegrp019_2 = _create_group(topology_st, 'memofegrp019_2') mods = [(ldap.MOD_ADD, 'member', memofuser2)] topology_st.standalone.modify_s(ensure_str(memofegrp019_2), mods) # Create a group grp019_3 with user3 member memofegrp019_3 = _create_group(topology_st, 'memofegrp019_3') mods = [(ldap.MOD_ADD, 'member', memofuser3)] topology_st.standalone.modify_s(ensure_str(memofegrp019_3), mods) mods = [(ldap.MOD_ADD, 'objectClass', b'inetUser')] topology_st.standalone.modify_s(ensure_str(memofegrp019_2), mods) topology_st.standalone.modify_s(ensure_str(memofegrp019_3), mods) # Create a group grp019_1 with memofegrp019_2, memofegrp019_3 member memofegrp019_1 = _create_group(topology_st, 'memofegrp019_1') mods = [(ldap.MOD_ADD, 'member', memofegrp019_2), (ldap.MOD_ADD, 'member', memofegrp019_3)] topology_st.standalone.modify_s(ensure_str(memofegrp019_1), mods) # assert memofegrp019_1 is member of # - not grp1 # - not grp2 # - not grp3 # - not grp15 # - not grp16 # - not grp018 # - not grp19_1 # - not grp019_2 # - not grp019_3 assert not _check_memberof(topology_st, member=memofegrp019_1, group=memofegrp1) assert not _check_memberof(topology_st, member=memofegrp019_1, group=memofegrp2) assert not _check_memberof(topology_st, member=memofegrp019_1, group=memofegrp3) assert not _check_memberof(topology_st, member=memofegrp019_1, group=memofegrp015) assert not _check_memberof(topology_st, member=memofegrp019_1, group=memofegrp016) assert not _check_memberof(topology_st, member=memofegrp019_1, group=memofegrp018) assert not _check_memberof(topology_st, member=memofegrp019_1, group=memofegrp019_1) assert not _check_memberof(topology_st, member=memofegrp019_1, group=memofegrp019_2) assert not _check_memberof(topology_st, member=memofegrp019_1, group=memofegrp019_3) # assert memofegrp019_2 is member of # - not grp1 # - not grp2 # - not grp3 # - not grp15 # - not grp16 # - not grp018 # - grp19_1 # - not grp019_2 # - not grp019_3 assert not _check_memberof(topology_st, member=memofegrp019_2, group=memofegrp1) assert not _check_memberof(topology_st, member=memofegrp019_2, group=memofegrp2) assert not _check_memberof(topology_st, member=memofegrp019_2, group=memofegrp3) assert not _check_memberof(topology_st, member=memofegrp019_2, group=memofegrp015) assert not _check_memberof(topology_st, member=memofegrp019_2, group=memofegrp016) assert not _check_memberof(topology_st, member=memofegrp019_2, group=memofegrp018) assert _check_memberof(topology_st, member=memofegrp019_2, group=memofegrp019_1) assert not _check_memberof(topology_st, member=memofegrp019_2, group=memofegrp019_2) assert not _check_memberof(topology_st, member=memofegrp019_2, group=memofegrp019_3) # assert memofegrp019_3 is member of # - not grp1 # - not grp2 # - not grp3 # - not grp15 # - not grp16 # - not grp018 # - grp19_1 # - not grp019_2 # - not grp019_3 assert not _check_memberof(topology_st, member=memofegrp019_2, group=memofegrp1) assert not _check_memberof(topology_st, member=memofegrp019_2, group=memofegrp2) assert not _check_memberof(topology_st, member=memofegrp019_2, group=memofegrp3) assert not _check_memberof(topology_st, member=memofegrp019_2, group=memofegrp015) assert not _check_memberof(topology_st, member=memofegrp019_2, group=memofegrp016) assert not _check_memberof(topology_st, member=memofegrp019_2, group=memofegrp018) assert _check_memberof(topology_st, member=memofegrp019_2, group=memofegrp019_1) assert not _check_memberof(topology_st, member=memofegrp019_2, group=memofegrp019_2) assert not _check_memberof(topology_st, member=memofegrp019_2, group=memofegrp019_3) # assert memofuser2 is member of # - not grp1 # - not grp2 # - not grp3 # - not grp15 # - not grp16 # - not grp018 # - grp19_1 # - grp019_2 # - not grp019_3 assert not _check_memberof(topology_st, member=memofuser2, group=memofegrp1) assert not _check_memberof(topology_st, member=memofuser2, group=memofegrp2) assert not _check_memberof(topology_st, member=memofuser2, group=memofegrp3) assert not _check_memberof(topology_st, member=memofuser2, group=memofegrp015) assert not _check_memberof(topology_st, member=memofuser2, group=memofegrp016) assert not _check_memberof(topology_st, member=memofuser2, group=memofegrp018) assert _check_memberof(topology_st, member=memofuser2, group=memofegrp019_1) assert _check_memberof(topology_st, member=memofuser2, group=memofegrp019_2) assert not _check_memberof(topology_st, member=memofuser2, group=memofegrp019_3) # assert memofuser3 is member of # - not grp1 # - not grp2 # - not grp3 # - not grp15 # - not grp16 # - not grp018 # - grp19_1 # - not grp019_2 # - grp019_3 assert not _check_memberof(topology_st, member=memofuser3, group=memofegrp1) assert not _check_memberof(topology_st, member=memofuser3, group=memofegrp2) assert not _check_memberof(topology_st, member=memofuser3, group=memofegrp3) assert not _check_memberof(topology_st, member=memofuser3, group=memofegrp015) assert not _check_memberof(topology_st, member=memofuser3, group=memofegrp016) assert not _check_memberof(topology_st, member=memofuser3, group=memofegrp018) assert _check_memberof(topology_st, member=memofuser3, group=memofegrp019_1) assert not _check_memberof(topology_st, member=memofuser3, group=memofegrp019_2) assert _check_memberof(topology_st, member=memofuser3, group=memofegrp019_3) # DEL user2, user3, grp19* topology_st.standalone.delete_s(ensure_str(memofuser2)) topology_st.standalone.delete_s(ensure_str(memofuser3)) topology_st.standalone.delete_s(ensure_str(memofegrp019_1)) topology_st.standalone.delete_s(ensure_str(memofegrp019_2)) topology_st.standalone.delete_s(ensure_str(memofegrp019_3)) # assert enh1 is member of # - grp1 (member) # - not grp2 # - grp3 (uniquemember) # - not grp15 # - grp16 (member uniquemember) # - not grp018 assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1) assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp2) assert _check_memberof(topology_st, member=memofenh1, group=memofegrp3) assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp015) assert _check_memberof(topology_st, member=memofenh1, group=memofegrp016) assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp018) # assert enh2 is member of # - not grp1 # - grp2 (uniquemember) # - grp3 (member) # - not grp15 # - not grp16 # - not grp018 assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp1) assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2) assert _check_memberof(topology_st, member=memofenh2, group=memofegrp3) assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp015) assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp016) assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp018) def test_complex_group_scenario_4(topology_st): """Test a complex memberOf case: Add user1 and grp[1-5] Add user1 member of grp[1-4] Add grp[1-4] member of grp5 Check user1 is member of grp[1-5] :id: d223af17-17a6-48a0-8f22-a38306726a19 :setup: Standalone instance, enh1 is member of - grp1 (member) - not grp2 - grp3 (uniquemember) - not grp015 - grp016 (member uniquemember) - not grp018 enh2 is member of - not grp1 - grp2 (uniquemember) - grp3 (member) - not grp015 - not grp016 - not grp018 :steps: 1. Create user1 2. Create grp[1-5] that can be inetUser (having memberof) 3. Add user1 to grp[1-4] (uniqueMember) 4. Create grp5 with grp[1-4] as member 5. Assert user1 is a member grp[1-5] 6. Delete user1 and all grp20 entries :expectedresults: 1. Success 2. Success 3. Success 4. Success 5. Success 6. Success """ memofenh1 = _get_user_dn('memofenh1') memofenh2 = _get_user_dn('memofenh2') memofegrp1 = _get_group_dn('memofegrp1') memofegrp2 = _get_group_dn('memofegrp2') memofegrp3 = _get_group_dn('memofegrp3') memofegrp015 = _get_group_dn('memofegrp015') memofegrp016 = _get_group_dn('memofegrp016') memofegrp018 = _get_group_dn('memofegrp018') # assert enh1 is member of # - grp1 (member) # - not grp2 # - grp3 (uniquemember) # - not grp15 # - grp16 (member uniquemember) # - not grp018 assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1) assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp2) assert _check_memberof(topology_st, member=memofenh1, group=memofegrp3) assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp015) assert _check_memberof(topology_st, member=memofenh1, group=memofegrp016) assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp018) # assert enh2 is member of # - not grp1 # - grp2 (uniquemember) # - grp3 (member) # - not grp15 # - not grp16 # - not grp018 assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp1) assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2) assert _check_memberof(topology_st, member=memofenh2, group=memofegrp3) assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp015) assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp016) assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp018) # create user1 memofuser1 = _create_user(topology_st, 'memofuser1') # create grp[1-5] that can be inetUser (having memberof) memofegrp020_1 = _create_group(topology_st, 'memofegrp020_1') memofegrp020_2 = _create_group(topology_st, 'memofegrp020_2') memofegrp020_3 = _create_group(topology_st, 'memofegrp020_3') memofegrp020_4 = _create_group(topology_st, 'memofegrp020_4') memofegrp020_5 = _create_group(topology_st, 'memofegrp020_5') mods = [(ldap.MOD_ADD, 'objectClass', b'inetUser')] for grp in [memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4, memofegrp020_5]: topology_st.standalone.modify_s(ensure_str(grp), mods) # add user1 to grp[1-4] (uniqueMember) mods = [(ldap.MOD_ADD, 'uniqueMember', memofuser1)] for grp in [memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4]: topology_st.standalone.modify_s(ensure_str(grp), mods) # create grp5 with grp[1-4] as member mods = [] for grp in [memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4]: mods.append((ldap.MOD_ADD, 'member', grp)) topology_st.standalone.modify_s(ensure_str(memofegrp020_5), mods) assert _check_memberof(topology_st, member=memofuser1, group=memofegrp020_1) assert _check_memberof(topology_st, member=memofuser1, group=memofegrp020_2) assert _check_memberof(topology_st, member=memofuser1, group=memofegrp020_3) assert _check_memberof(topology_st, member=memofuser1, group=memofegrp020_4) assert _check_memberof(topology_st, member=memofuser1, group=memofegrp020_5) # DEL user1, grp20* topology_st.standalone.delete_s(ensure_str(memofuser1)) for grp in [memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4, memofegrp020_5]: topology_st.standalone.delete_s(ensure_str(grp)) def test_complex_group_scenario_5(topology_st): """Test a complex memberOf case: Add user[1-4] and Grp[1-4] Add userX as uniquemember of GrpX Add Grp5 Grp[1-4] as members of Grp5 user1 as member of Grp5 Check that user1 is member of Grp1 and Grp5 Check that user* are members of Grp5 :id: d222af17-17a6-48a0-8f22-a38306726a20 :setup: Standalone instance, enh1 is member of - grp1 (member) - not grp2 - grp3 (uniquemember) - not grp015 - grp016 (member uniquemember) - not grp018 enh2 is member of - not grp1 - grp2 (uniquemember) - grp3 (member) - not grp015 - not grp016 - not grp018 :steps: 1. Create user1-4 2. Create grp[1-4] that can be inetUser (having memberof) 3. Add userX (uniquemember) to grpX 4. Create grp5 with grp[1-4] as member + user1 5. Assert user[1-4] are member of grp20_5 6. Assert userX is uniqueMember of grpX 7. Check that user[1-4] is only 'uniqueMember' of the grp20_[1-4] 8. Check that grp20_[1-4] are only 'member' of grp20_5 9. Check that user1 are only 'member' of grp20_5 10. Assert enh1 is member of - grp1 (member) - not grp2 - grp3 (uniquemember) - not grp15 - grp16 (member uniquemember) - not grp018 - not grp20* 11. Assert enh2 is member of - not grp1 - grp2 (uniquemember) - grp3 (member) - not grp - not grp16 - not grp018 - not grp20* :expectedresults: 1. Success 2. Success 3. Success 4. Success 5. Success 6. Success 7. Success 8. Success 9. Success 10. Success 11. Success """ memofenh1 = _get_user_dn('memofenh1') memofenh2 = _get_user_dn('memofenh2') memofegrp1 = _get_group_dn('memofegrp1') memofegrp2 = _get_group_dn('memofegrp2') memofegrp3 = _get_group_dn('memofegrp3') memofegrp015 = _get_group_dn('memofegrp015') memofegrp016 = _get_group_dn('memofegrp016') memofegrp018 = _get_group_dn('memofegrp018') # assert enh1 is member of # - grp1 (member) # - not grp2 # - grp3 (uniquemember) # - not grp15 # - grp16 (member uniquemember) # - not grp018 assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1) assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp2) assert _check_memberof(topology_st, member=memofenh1, group=memofegrp3) assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp015) assert _check_memberof(topology_st, member=memofenh1, group=memofegrp016) assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp018) # assert enh2 is member of # - not grp1 # - grp2 (uniquemember) # - grp3 (member) # - not grp15 # - not grp16 # - not grp018 assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp1) assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2) assert _check_memberof(topology_st, member=memofenh2, group=memofegrp3) assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp015) assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp016) assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp018) # create user1-4 memofuser1 = _create_user(topology_st, 'memofuser1') memofuser2 = _create_user(topology_st, 'memofuser2') memofuser3 = _create_user(topology_st, 'memofuser3') memofuser4 = _create_user(topology_st, 'memofuser4') # create grp[1-4] that can be inetUser (having memberof) # add userX (uniquemember) to grpX memofegrp020_1 = _create_group(topology_st, 'memofegrp020_1') memofegrp020_2 = _create_group(topology_st, 'memofegrp020_2') memofegrp020_3 = _create_group(topology_st, 'memofegrp020_3') memofegrp020_4 = _create_group(topology_st, 'memofegrp020_4') for x in [(memofegrp020_1, memofuser1), (memofegrp020_2, memofuser2), (memofegrp020_3, memofuser3), (memofegrp020_4, memofuser4)]: mods = [(ldap.MOD_ADD, 'objectClass', b'inetUser'), (ldap.MOD_ADD, 'uniqueMember', x[1])] topology_st.standalone.modify_s(ensure_str(x[0]), mods) # create grp5 with grp[1-4] as member + user1 memofegrp020_5 = _create_group(topology_st, 'memofegrp020_5') mods = [(ldap.MOD_ADD, 'member', memofuser1)] for grp in [memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4]: mods.append((ldap.MOD_ADD, 'member', grp)) topology_st.standalone.modify_s(ensure_str(memofegrp020_5), mods) # assert user[1-4] are member of grp20_5 for user in [memofuser1, memofuser2, memofuser3, memofuser4]: assert _check_memberof(topology_st, member=user, group=memofegrp020_5) # assert userX is uniqueMember of grpX assert _check_memberof(topology_st, member=memofuser1, group=memofegrp020_1) assert _check_memberof(topology_st, member=memofuser2, group=memofegrp020_2) assert _check_memberof(topology_st, member=memofuser3, group=memofegrp020_3) assert _check_memberof(topology_st, member=memofuser4, group=memofegrp020_4) # check that user[1-4] is only 'uniqueMember' of the grp20_[1-4] for x in [(memofegrp020_1, memofuser1), (memofegrp020_2, memofuser2), (memofegrp020_3, memofuser3), (memofegrp020_4, memofuser4)]: assert _check_memberattr(topology_st, x[0], 'uniqueMember', x[1]) assert not _check_memberattr(topology_st, x[0], 'member', x[1]) # check that grp20_[1-4] are only 'member' of grp20_5 # check that user1 are only 'member' of grp20_5 for x in [memofuser1, memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4]: assert _check_memberattr(topology_st, memofegrp020_5, 'member', x) assert not _check_memberattr(topology_st, memofegrp020_5, 'uniqueMember', x) for user in [memofuser2, memofuser3, memofuser4]: assert not _check_memberattr(topology_st, memofegrp020_5, 'member', user) assert not _check_memberattr(topology_st, memofegrp020_5, 'uniqueMember', user) # assert enh1 is member of # - grp1 (member) # - not grp2 # - grp3 (uniquemember) # - not grp15 # - grp16 (member uniquemember) # - not grp018 # - not grp20* assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1) assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp2) assert _check_memberof(topology_st, member=memofenh1, group=memofegrp3) assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp015) assert _check_memberof(topology_st, member=memofenh1, group=memofegrp016) assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp018) assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp020_1) assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp020_2) assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp020_3) assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp020_4) assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp020_5) # assert enh2 is member of # - not grp1 # - grp2 (uniquemember) # - grp3 (member) # - not grp15 # - not grp16 # - not grp018 # - not grp20* assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp1) assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2) assert _check_memberof(topology_st, member=memofenh2, group=memofegrp3) assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp015) assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp016) assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp018) assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp020_1) assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp020_2) assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp020_3) assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp020_4) assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp020_5) def test_complex_group_scenario_6(topology_st): """Test a complex memberOf case: add userX as member/uniqueMember of GrpX add Grp5 as uniquemember of GrpX (this create a loop) :id: d222af17-17a6-48a0-8f22-a38306726a21 :setup: Standalone instance enh1 is member of - grp1 (member) - not grp2 - grp3 (uniquemember) - not grp15 - grp16 (member uniquemember) - not grp018 - not grp20* enh2 is member of - not grp1 - grp2 (uniquemember) - grp3 (member) - not grp15 - not grp16 - not grp018 - not grp20* user1 is member of grp20_5 userX is uniquemember of grp20_X grp[1-4] are member of grp20_5 :steps: 1. Add user[1-4] (member) to grp020_[1-4] 2. Check that user[1-4] are 'member' and 'uniqueMember' of the grp20_[1-4] 3. Add Grp[1-4] (uniqueMember) to grp5 4. Assert user[1-4] are member of grp20_[1-4] 5. Assert that all groups are members of each others because Grp5 is member of all grp20_[1-4] 6. Assert user[1-5] is uniqueMember of grp[1-5] 7. Assert enh1 is member of - grp1 (member) - not grp2 - grp3 (uniquemember) - not grp15 - grp16 (member uniquemember) - not grp018 - not grp20* 8. Assert enh2 is member of - not grp1 - grp2 (uniquemember) - grp3 (member) - not grp15 - not grp16 - not grp018 - not grp20* :expectedresults: 1. Success 2. Success 3. Success 4. Success 5. Success 6. Success 7. Success 8. Success """ memofenh1 = _get_user_dn('memofenh1') memofenh2 = _get_user_dn('memofenh2') memofegrp1 = _get_group_dn('memofegrp1') memofegrp2 = _get_group_dn('memofegrp2') memofegrp3 = _get_group_dn('memofegrp3') memofegrp015 = _get_group_dn('memofegrp015') memofegrp016 = _get_group_dn('memofegrp016') memofegrp018 = _get_group_dn('memofegrp018') memofuser1 = _get_user_dn('memofuser1') memofuser2 = _get_user_dn('memofuser2') memofuser3 = _get_user_dn('memofuser3') memofuser4 = _get_user_dn('memofuser4') memofegrp020_1 = _get_group_dn('memofegrp020_1') memofegrp020_2 = _get_group_dn('memofegrp020_2') memofegrp020_3 = _get_group_dn('memofegrp020_3') memofegrp020_4 = _get_group_dn('memofegrp020_4') memofegrp020_5 = _get_group_dn('memofegrp020_5') # assert user[1-4] are member of grp20_5 for user in [memofuser1, memofuser2, memofuser3, memofuser4]: assert _check_memberof(topology_st, member=user, group=memofegrp020_5) # assert userX is member of grpX assert _check_memberof(topology_st, member=memofuser1, group=memofegrp020_1) assert _check_memberof(topology_st, member=memofuser2, group=memofegrp020_2) assert _check_memberof(topology_st, member=memofuser3, group=memofegrp020_3) assert _check_memberof(topology_st, member=memofuser4, group=memofegrp020_4) # assert enh1 is member of # - grp1 (member) # - not grp2 # - grp3 (uniquemember) # - not grp15 # - grp16 (member uniquemember) # - not grp018 # - not grp20* assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1) assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp2) assert _check_memberof(topology_st, member=memofenh1, group=memofegrp3) assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp015) assert _check_memberof(topology_st, member=memofenh1, group=memofegrp016) assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp018) assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp020_1) assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp020_2) assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp020_3) assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp020_4) assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp020_5) # assert enh2 is member of # - not grp1 # - grp2 (uniquemember) # - grp3 (member) # - not grp15 # - not grp16 # - not grp018 # - not grp20* assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp1) assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2) assert _check_memberof(topology_st, member=memofenh2, group=memofegrp3) assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp015) assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp016) assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp018) assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp020_1) assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp020_2) assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp020_3) assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp020_4) assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp020_5) # check that user[1-4] is only 'uniqueMember' of the grp20_[1-4] for x in [(memofegrp020_1, memofuser1), (memofegrp020_2, memofuser2), (memofegrp020_3, memofuser3), (memofegrp020_4, memofuser4)]: assert _check_memberattr(topology_st, x[0], 'uniqueMember', x[1]) assert not _check_memberattr(topology_st, x[0], 'member', x[1]) # check that grp20_[1-4] are only 'member' of grp20_5 # check that user1 is only 'member' of grp20_5 for x in [memofuser1, memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4]: assert _check_memberattr(topology_st, memofegrp020_5, 'member', x) assert not _check_memberattr(topology_st, memofegrp020_5, 'uniqueMember', x) # check that user2-4 are neither 'member' nor 'uniquemember' of grp20_5 for user in [memofuser2, memofuser3, memofuser4]: assert not _check_memberattr(topology_st, memofegrp020_5, 'member', user) assert not _check_memberattr(topology_st, memofegrp020_5, 'uniqueMember', user) # add userX (member) to grpX for x in [(memofegrp020_1, memofuser1), (memofegrp020_2, memofuser2), (memofegrp020_3, memofuser3), (memofegrp020_4, memofuser4)]: mods = [(ldap.MOD_ADD, 'member', x[1])] topology_st.standalone.modify_s(ensure_str(x[0]), mods) # check that user[1-4] are 'member' and 'uniqueMember' of the grp20_[1-4] for x in [(memofegrp020_1, memofuser1), (memofegrp020_2, memofuser2), (memofegrp020_3, memofuser3), (memofegrp020_4, memofuser4)]: assert _check_memberattr(topology_st, x[0], 'uniqueMember', x[1]) assert _check_memberattr(topology_st, x[0], 'member', x[1]) # add Grp[1-4] (uniqueMember) to grp5 # it creates a membership loop !!! mods = [(ldap.MOD_ADD, 'uniqueMember', memofegrp020_5)] for grp in [memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4]: topology_st.standalone.modify_s(ensure_str(grp), mods) time.sleep(5) # assert user[1-4] are member of grp20_[1-4] for user in [memofuser1, memofuser2, memofuser3, memofuser4]: assert _check_memberof(topology_st, member=user, group=memofegrp020_5) assert _check_memberof(topology_st, member=user, group=memofegrp020_4) assert _check_memberof(topology_st, member=user, group=memofegrp020_3) assert _check_memberof(topology_st, member=user, group=memofegrp020_2) assert _check_memberof(topology_st, member=user, group=memofegrp020_1) # assert that all groups are members of each others because Grp5 # is member of all grp20_[1-4] for grp in [memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4]: for owner in [memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4]: if grp == owner: # no member of itself assert not _check_memberof(topology_st, member=grp, group=owner) else: assert _check_memberof(topology_st, member=grp, group=owner) for grp in [memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4]: assert _check_memberof(topology_st, member=grp, group=memofegrp020_5) # assert userX is uniqueMember of grpX assert _check_memberof(topology_st, member=memofuser1, group=memofegrp020_1) assert _check_memberof(topology_st, member=memofuser2, group=memofegrp020_2) assert _check_memberof(topology_st, member=memofuser3, group=memofegrp020_3) assert _check_memberof(topology_st, member=memofuser4, group=memofegrp020_4) assert _check_memberof(topology_st, member=memofuser4, group=memofegrp020_5) # assert enh1 is member of # - grp1 (member) # - not grp2 # - grp3 (uniquemember) # - not grp15 # - grp16 (member uniquemember) # - not grp018 # - not grp20* assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1) assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp2) assert _check_memberof(topology_st, member=memofenh1, group=memofegrp3) assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp015) assert _check_memberof(topology_st, member=memofenh1, group=memofegrp016) assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp018) assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp020_1) assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp020_2) assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp020_3) assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp020_4) assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp020_5) # assert enh2 is member of # - not grp1 # - grp2 (uniquemember) # - grp3 (member) # - not grp15 # - not grp16 # - not grp018 # - not grp20* assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp1) assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2) assert _check_memberof(topology_st, member=memofenh2, group=memofegrp3) assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp015) assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp016) assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp018) assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp020_1) assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp020_2) assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp020_3) assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp020_4) assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp020_5) def verify_post_023(topology_st, memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4, memofegrp020_5, memofuser1, memofuser2, memofuser3, memofuser4): """ /----member ---> G1 ---uniqueMember -------\ / V G5 ------------------------>member ---------- --->U1 | |----member ---> G2 ---member/uniqueMember -> U2 |<--uniquemember-/ | |----member ---> G3 ---member/uniqueMember -> U3 |<--uniquemember-/ |----member ---> G4 ---member/uniqueMember -> U4 |<--uniquemember-/ """ for x in [memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4]: assert _check_memberattr(topology_st, memofegrp020_5, 'member', x) assert not _check_memberattr(topology_st, memofegrp020_5, 'uniqueMember', x) for x in [memofegrp020_2, memofegrp020_3, memofegrp020_4]: assert not _check_memberattr(topology_st, x, 'member', memofegrp020_5) assert _check_memberattr(topology_st, x, 'uniqueMember', memofegrp020_5) # check that user[1-4] is only 'uniqueMember' of the grp20_[1-4] for x in [(memofegrp020_2, memofuser2), (memofegrp020_3, memofuser3), (memofegrp020_4, memofuser4)]: assert _check_memberattr(topology_st, x[0], 'uniqueMember', x[1]) assert _check_memberattr(topology_st, x[0], 'member', x[1]) assert _check_memberattr(topology_st, memofegrp020_1, 'uniqueMember', memofuser1) assert not _check_memberattr(topology_st, memofegrp020_1, 'member', memofuser1) assert not _check_memberattr(topology_st, memofegrp020_1, 'uniqueMember', memofegrp020_5) assert not _check_memberattr(topology_st, memofegrp020_1, 'member', memofegrp020_5) for x in [memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4, memofuser1, memofuser2, memofuser3, memofuser4]: assert _check_memberof(topology_st, member=x, group=memofegrp020_5) for x in [memofegrp020_2, memofegrp020_3, memofegrp020_4]: assert _check_memberof(topology_st, member=memofegrp020_5, group=x) for user in [memofuser1, memofuser2, memofuser3, memofuser4]: assert _check_memberof(topology_st, member=user, group=memofegrp020_5) assert _check_memberof(topology_st, member=user, group=memofegrp020_4) assert _check_memberof(topology_st, member=user, group=memofegrp020_3) assert _check_memberof(topology_st, member=user, group=memofegrp020_2) for grp in [memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4, memofegrp020_5]: assert _check_memberof(topology_st, member=memofuser1, group=grp) def test_complex_group_scenario_7(topology_st): """Check the user removal from the complex membership topology :id: d222af17-17a6-48a0-8f22-a38306726a22 :setup: Standalone instance, enh1 is member of - grp1 (member) - not grp2 - grp3 (uniquemember) - not grp15 - grp16 (member uniquemember) - not grp018 - not grp20* enh2 is member of - not grp1 - grp2 (uniquemember) - grp3 (member) - not grp15 - not grp16 - not grp018 - not grp20* grp[1-4] are member of grp20_5 user1 is member (member) of group_5 grp5 is uniqueMember of grp20_[1-4] user[1-4] is member/uniquemember of grp20_[1-4] :steps: 1. Delete user1 as 'member' of grp20_1 2. Delete grp020_5 as 'uniqueMember' of grp20_1 3. Check the result membership :expectedresults: 1. Success 2. Success 3. The result should be like this :: /----member ---> G1 ---uniqueMember -------\ / V G5 ------------------------>member ---------- --->U1 | |----member ---> G2 ---member/uniqueMember -> U2 |<--uniquemember-/ | |----member ---> G3 ---member/uniqueMember -> U3 |<--uniquemember-/ |----member ---> G4 ---member/uniqueMember -> U4 |<--uniquemember-/ """ memofenh1 = _get_user_dn('memofenh1') memofenh2 = _get_user_dn('memofenh2') memofegrp1 = _get_group_dn('memofegrp1') memofegrp2 = _get_group_dn('memofegrp2') memofegrp3 = _get_group_dn('memofegrp3') memofegrp015 = _get_group_dn('memofegrp015') memofegrp016 = _get_group_dn('memofegrp016') memofegrp018 = _get_group_dn('memofegrp018') memofuser1 = _get_user_dn('memofuser1') memofuser2 = _get_user_dn('memofuser2') memofuser3 = _get_user_dn('memofuser3') memofuser4 = _get_user_dn('memofuser4') memofegrp020_1 = _get_group_dn('memofegrp020_1') memofegrp020_2 = _get_group_dn('memofegrp020_2') memofegrp020_3 = _get_group_dn('memofegrp020_3') memofegrp020_4 = _get_group_dn('memofegrp020_4') memofegrp020_5 = _get_group_dn('memofegrp020_5') # assert user[1-4] are member of grp20_[1-4] for user in [memofuser1, memofuser2, memofuser3, memofuser4]: assert _check_memberof(topology_st, member=user, group=memofegrp020_5) assert _check_memberof(topology_st, member=user, group=memofegrp020_4) assert _check_memberof(topology_st, member=user, group=memofegrp020_3) assert _check_memberof(topology_st, member=user, group=memofegrp020_2) assert _check_memberof(topology_st, member=user, group=memofegrp020_1) # assert that all groups are members of each others because Grp5 # is member of all grp20_[1-4] for grp in [memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4]: for owner in [memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4]: if grp == owner: # no member of itself assert not _check_memberof(topology_st, member=grp, group=owner) else: assert _check_memberof(topology_st, member=grp, group=owner) for grp in [memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4]: assert _check_memberof(topology_st, member=grp, group=memofegrp020_5) # assert userX is uniqueMember of grpX assert _check_memberof(topology_st, member=memofuser1, group=memofegrp020_1) assert _check_memberof(topology_st, member=memofuser2, group=memofegrp020_2) assert _check_memberof(topology_st, member=memofuser3, group=memofegrp020_3) assert _check_memberof(topology_st, member=memofuser4, group=memofegrp020_4) assert _check_memberof(topology_st, member=memofuser4, group=memofegrp020_5) # assert enh1 is member of # - grp1 (member) # - not grp2 # - grp3 (uniquemember) # - not grp15 # - grp16 (member uniquemember) # - not grp018 # - not grp20* assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1) assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp2) assert _check_memberof(topology_st, member=memofenh1, group=memofegrp3) assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp015) assert _check_memberof(topology_st, member=memofenh1, group=memofegrp016) assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp018) assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp020_1) assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp020_2) assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp020_3) assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp020_4) assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp020_5) # assert enh2 is member of # - not grp1 # - grp2 (uniquemember) # - grp3 (member) # - not grp15 # - not grp16 # - not grp018 # - not grp20* assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp1) assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2) assert _check_memberof(topology_st, member=memofenh2, group=memofegrp3) assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp015) assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp016) assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp018) assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp020_1) assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp020_2) assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp020_3) assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp020_4) assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp020_5) # check that user[1-4] is only 'uniqueMember' of the grp20_[1-4] for x in [(memofegrp020_1, memofuser1), (memofegrp020_2, memofuser2), (memofegrp020_3, memofuser3), (memofegrp020_4, memofuser4)]: assert _check_memberattr(topology_st, x[0], 'uniqueMember', x[1]) assert _check_memberattr(topology_st, x[0], 'member', x[1]) # check that grp20_[1-4] are 'uniqueMember' and 'member' of grp20_5 # check that user1 is only 'member' of grp20_5 for x in [memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4]: assert _check_memberattr(topology_st, memofegrp020_5, 'member', x) assert not _check_memberattr(topology_st, memofegrp020_5, 'uniqueMember', x) assert _check_memberattr(topology_st, memofegrp020_5, 'member', memofuser1) assert not _check_memberattr(topology_st, memofegrp020_5, 'uniqueMember', memofuser1) # DEL user1 as 'member' of grp20_1 mods = [(ldap.MOD_DELETE, 'member', memofuser1)] topology_st.standalone.modify_s(ensure_str(memofegrp020_1), mods) mods = [(ldap.MOD_DELETE, 'uniqueMember', memofegrp020_5)] topology_st.standalone.modify_s(ensure_str(memofegrp020_1), mods) """ /----member ---> G1 ---uniqueMember -------\ / V G5 ------------------------>member ---------- --->U1 | |----member ---> G2 ---member/uniqueMember -> U2 |<--uniquemember-/ | |----member ---> G3 ---member/uniqueMember -> U3 |<--uniquemember-/ |----member ---> G4 ---member/uniqueMember -> U4 |<--uniquemember-/ """ verify_post_023(topology_st, memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4, memofegrp020_5, memofuser1, memofuser2, memofuser3, memofuser4) def verify_post_024(topology_st, memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4, memofegrp020_5, memofuser1, memofuser2, memofuser3, memofuser4): """ /----member ---> G1 ---member/uniqueMember -\ / V G5 ------------------------>member ---------- --->U1 | |----member ---> G2 ---member/uniqueMember -> U2 |<--uniquemember-/ | |----member ---> G3 ---member/uniqueMember -> U3 |<--uniquemember-/ |----member ---> G4 ---member/uniqueMember -> U4 |<--uniquemember-/ """ for x in [memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4]: assert _check_memberattr(topology_st, memofegrp020_5, 'member', x) assert not _check_memberattr(topology_st, memofegrp020_5, 'uniqueMember', x) for x in [memofegrp020_2, memofegrp020_3, memofegrp020_4]: assert not _check_memberattr(topology_st, x, 'member', memofegrp020_5) assert _check_memberattr(topology_st, x, 'uniqueMember', memofegrp020_5) # check that user[1-4] is only 'uniqueMember' of the grp20_[1-4] for x in [(memofegrp020_1, memofuser1), (memofegrp020_2, memofuser2), (memofegrp020_3, memofuser3), (memofegrp020_4, memofuser4)]: assert _check_memberattr(topology_st, x[0], 'uniqueMember', x[1]) assert _check_memberattr(topology_st, x[0], 'member', x[1]) assert not _check_memberattr(topology_st, memofegrp020_1, 'uniqueMember', memofegrp020_5) assert not _check_memberattr(topology_st, memofegrp020_1, 'member', memofegrp020_5) for x in [memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4, memofuser1, memofuser2, memofuser3, memofuser4]: assert _check_memberof(topology_st, member=x, group=memofegrp020_5) for x in [memofegrp020_2, memofegrp020_3, memofegrp020_4]: assert _check_memberof(topology_st, member=memofegrp020_5, group=x) for user in [memofuser1, memofuser2, memofuser3, memofuser4]: assert _check_memberof(topology_st, member=user, group=memofegrp020_5) assert _check_memberof(topology_st, member=user, group=memofegrp020_4) assert _check_memberof(topology_st, member=user, group=memofegrp020_3) assert _check_memberof(topology_st, member=user, group=memofegrp020_2) for grp in [memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4, memofegrp020_5]: assert _check_memberof(topology_st, member=memofuser1, group=grp) def test_complex_group_scenario_8(topology_st): """Check the user add operation to the complex membership topology :id: d222af17-17a6-48a0-8f22-a38306726a23 :setup: Standalone instance, :: /----member ---> G1 ---uniqueMember -------\ / V G5 ------------------------>member ---------- --->U1 | |----member ---> G2 ---member/uniqueMember -> U2 |<--uniquemember-/ | |----member ---> G3 ---member/uniqueMember -> U3 |<--uniquemember-/ |----member ---> G4 ---member/uniqueMember -> U4 |<--uniquemember-/ :steps: 1. Add user1 to grp020_1 2. Check the result membership :expectedresults: 1. Success 2. The result should be like this :: /----member ---> G1 ---member/uniqueMember -\ / V G5 ------------------------>member ---------- --->U1 | |----member ---> G2 ---member/uniqueMember -> U2 |<--uniquemember-/ | |----member ---> G3 ---member/uniqueMember -> U3 |<--uniquemember-/ |----member ---> G4 ---member/uniqueMember -> U4 |<--uniquemember-/ """ memofuser1 = _get_user_dn('memofuser1') memofuser2 = _get_user_dn('memofuser2') memofuser3 = _get_user_dn('memofuser3') memofuser4 = _get_user_dn('memofuser4') memofegrp020_1 = _get_group_dn('memofegrp020_1') memofegrp020_2 = _get_group_dn('memofegrp020_2') memofegrp020_3 = _get_group_dn('memofegrp020_3') memofegrp020_4 = _get_group_dn('memofegrp020_4') memofegrp020_5 = _get_group_dn('memofegrp020_5') verify_post_023(topology_st, memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4, memofegrp020_5, memofuser1, memofuser2, memofuser3, memofuser4) # ADD user1 as 'member' of grp20_1 mods = [(ldap.MOD_ADD, 'member', memofuser1)] topology_st.standalone.modify_s(ensure_str(memofegrp020_1), mods) verify_post_024(topology_st, memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4, memofegrp020_5, memofuser1, memofuser2, memofuser3, memofuser4) def verify_post_025(topology_st, memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4, memofegrp020_5, memofuser1, memofuser2, memofuser3, memofuser4): """ /----member ---> G1 / G5 ------------------------>member ---------- --->U1 | |----member ---> G2 |----member ---> G3 |----member ---> G4 """ for x in [memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4]: assert _check_memberattr(topology_st, memofegrp020_5, 'member', x) assert not _check_memberattr(topology_st, memofegrp020_5, 'uniqueMember', x) for x in [memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4]: assert not _check_memberattr(topology_st, x, 'member', memofegrp020_5) assert not _check_memberattr(topology_st, x, 'uniqueMember', memofegrp020_5) # check that user[1-4] is only 'uniqueMember' of the grp20_[1-4] for x in [(memofegrp020_1, memofuser1), (memofegrp020_2, memofuser2), (memofegrp020_3, memofuser3), (memofegrp020_4, memofuser4)]: assert not _check_memberattr(topology_st, x[0], 'uniqueMember', x[1]) assert not _check_memberattr(topology_st, x[0], 'member', x[1]) for x in [memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4, memofuser1]: assert _check_memberof(topology_st, member=x, group=memofegrp020_5) for x in [memofuser2, memofuser3, memofuser4]: assert not _check_memberof(topology_st, member=x, group=memofegrp020_5) assert _check_memberof(topology_st, member=memofuser1, group=memofegrp020_5) for user in [memofuser1, memofuser2, memofuser3, memofuser4]: for grp in [memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4]: assert not _check_memberof(topology_st, member=user, group=grp) def test_complex_group_scenario_9(topology_st): """Check the massive user deletion from the complex membership topology :id: d222af17-17a6-48a0-8f22-a38306726a24 :setup: Standalone instance, :: /----member ---> G1 ---member/uniqueMember -\ / V G5 ------------------------>member ---------- --->U1 | |----member ---> G2 ---member/uniqueMember -> U2 |<--uniquemember-/ | |----member ---> G3 ---member/uniqueMember -> U3 |<--uniquemember-/ |----member ---> G4 ---member/uniqueMember -> U4 |<--uniquemember-/ :steps: 1. Delete user[1-5] as 'member' and 'uniqueMember' from grp20_[1-5] 2. Check the result membership :expectedresults: 1. Success 2. The result should be like this :: /----member ---> G1 / G5 ------------------------>member ---------- --->U1 | |----member ---> G2 |----member ---> G3 |----member ---> G4 """ memofuser1 = _get_user_dn('memofuser1') memofuser2 = _get_user_dn('memofuser2') memofuser3 = _get_user_dn('memofuser3') memofuser4 = _get_user_dn('memofuser4') memofegrp020_1 = _get_group_dn('memofegrp020_1') memofegrp020_2 = _get_group_dn('memofegrp020_2') memofegrp020_3 = _get_group_dn('memofegrp020_3') memofegrp020_4 = _get_group_dn('memofegrp020_4') memofegrp020_5 = _get_group_dn('memofegrp020_5') verify_post_024(topology_st, memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4, memofegrp020_5, memofuser1, memofuser2, memofuser3, memofuser4) # ADD inet # for user in [memofuser1, memofuser2, memofuser3, memofuser4]: # mods = [(ldap.MOD_ADD, 'objectClass', 'inetUser')] # topology_st.standalone.modify_s(user, mods) for x in [(memofegrp020_1, memofuser1), (memofegrp020_2, memofuser2), (memofegrp020_3, memofuser3), (memofegrp020_4, memofuser4)]: mods = [(ldap.MOD_DELETE, 'member', x[1]), (ldap.MOD_DELETE, 'uniqueMember', x[1])] topology_st.standalone.modify_s(ensure_str(x[0]), mods) """ /----member ---> G1 / G5 ------------------------>member ---------- --->U1 | |----member ---> G2 |<--uniquemember-/ | |----member ---> G3 |<--uniquemember-/ |----member ---> G4 |<--uniquemember-/ """ for x in [memofegrp020_2, memofegrp020_3, memofegrp020_4]: mods = [(ldap.MOD_DELETE, 'uniqueMember', memofegrp020_5)] topology_st.standalone.modify_s(ensure_str(x), mods) """ /----member ---> G1 / G5 ------------------------>member ---------- --->U1 | |----member ---> G2 |----member ---> G3 |----member ---> G4 """ verify_post_025(topology_st, memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4, memofegrp020_5, memofuser1, memofuser2, memofuser3, memofuser4) #unstable or unstatus tests, skipped for now @pytest.mark.flaky(max_runs=2, min_passes=1) def test_memberof_auto_add_oc(topology_st): """Test the auto add objectclass (OC) feature. The plugin should add a predefined objectclass that will allow memberOf to be added to an entry. :id: d222af17-17a6-48a0-8f22-a38306726a25 :setup: Standalone instance :steps: 1. Enable dynamic plugins 2. Enable memberOf plugin 3. Test that the default add OC works. 4. Add a group that already includes one user 5. Assert memberOf on user1 6. Delete user1 and the group 7. Test invalid value (config validation) 8. Add valid objectclass 9. Add two users 10. Add a group that already includes one user 11. Add a user to the group :expectedresults: 1. Success 2. Success 3. Success 4. Success 5. Success 6. Success 7. Success 8. Success 9. Success 10. Success 11. Success """ # enable dynamic plugins try: topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-dynamic-plugins', b'on')]) except ldap.LDAPError as e: ldap.error('Failed to enable dynamic plugins! ' + e.message['desc']) assert False # Enable the plugin topology_st.standalone.plugins.enable(name=PLUGIN_MEMBER_OF) # Test that the default add OC works. try: topology_st.standalone.add_s(Entry((USER1_DN, {'objectclass': 'top', 'objectclass': 'person', 'objectclass': 'organizationalPerson', 'objectclass': 'inetorgperson', 'sn': 'last', 'cn': 'full', 'givenname': 'user1', 'uid': 'user1' }))) except ldap.LDAPError as e: log.fatal('Failed to add user1 entry, error: ' + e.message['desc']) assert False # Add a group(that already includes one user try: topology_st.standalone.add_s(Entry((GROUP_DN, {'objectclass': 'top', 'objectclass': 'groupOfNames', 'cn': 'group', 'member': USER1_DN }))) except ldap.LDAPError as e: log.fatal('Failed to add group entry, error: ' + e.message['desc']) assert False # Assert memberOf on user1 _check_memberof(topology_st, USER1_DN, GROUP_DN) # Reset for the next test .... topology_st.standalone.delete_s(USER1_DN) topology_st.standalone.delete_s(GROUP_DN) # Test invalid value (config validation) topology_st.standalone.plugins.enable(name=PLUGIN_MEMBER_OF) try: topology_st.standalone.modify_s(MEMBEROF_PLUGIN_DN, [(ldap.MOD_REPLACE, 'memberofAutoAddOC', b'invalid123')]) log.fatal('Incorrectly added invalid objectclass!') assert False except ldap.UNWILLING_TO_PERFORM: log.info('Correctly rejected invalid objectclass') except ldap.LDAPError as e: ldap.error('Unexpected error adding invalid objectclass - error: ' + e.message['desc']) assert False # Add valid objectclass topology_st.standalone.plugins.enable(name=PLUGIN_MEMBER_OF) try: topology_st.standalone.modify_s(MEMBEROF_PLUGIN_DN, [(ldap.MOD_REPLACE, 'memberofAutoAddOC', b'inetuser')]) except ldap.LDAPError as e: log.fatal('Failed to configure memberOf plugin: error ' + e.message['desc']) assert False # Add two users try: topology_st.standalone.add_s(Entry((USER1_DN, {'objectclass': 'top', 'objectclass': 'person', 'objectclass': 'organizationalPerson', 'objectclass': 'inetorgperson', 'sn': 'last', 'cn': 'full', 'givenname': 'user1', 'uid': 'user1' }))) except ldap.LDAPError as e: log.fatal('Failed to add user1 entry, error: ' + e.message['desc']) assert False try: topology_st.standalone.add_s(Entry((USER2_DN, {'objectclass': 'top', 'objectclass': 'person', 'objectclass': 'organizationalPerson', 'objectclass': 'inetorgperson', 'sn': 'last', 'cn': 'full', 'givenname': 'user2', 'uid': 'user2' }))) except ldap.LDAPError as e: log.fatal('Failed to add user2 entry, error: ' + e.message['desc']) assert False # Add a group(that already includes one user try: topology_st.standalone.add_s(Entry((GROUP_DN, {'objectclass': 'top', 'objectclass': 'groupOfNames', 'cn': 'group', 'member': USER1_DN }))) except ldap.LDAPError as e: log.fatal('Failed to add group entry, error: ' + e.message['desc']) assert False # Add a user to the group try: topology_st.standalone.modify_s(GROUP_DN, [(ldap.MOD_ADD, 'member', ensure_bytes(USER2_DN))]) except ldap.LDAPError as e: log.fatal('Failed to add user2 to group: error ' + e.message['desc']) assert False log.info('Test complete.') if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/plugins/pluginpath_validation_test.py000066400000000000000000000074561421664411400320430ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2016 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import pytest from lib389.tasks import * from lib389.utils import * from lib389.topologies import topology_st from lib389.plugins import WhoamiPlugin pytestmark = pytest.mark.tier1 logging.getLogger(__name__).setLevel(logging.DEBUG) log = logging.getLogger(__name__) @pytest.mark.ds47384 @pytest.mark.ds47601 def test_pluginpath_validation(topology_st): """Test pluginpath validation: relative and absolute paths With the inclusion of ticket 47601 - we do allow plugin paths outside the default location :id: 99f1fb2f-051d-4fd9-93d0-592dcd9b4c22 :setup: Standalone instance :steps: 1. Copy the library to a temporary directory 2. Add valid plugin paths * using the absolute path to the current library * using new remote location 3. Set plugin path back to the default 4. Check invalid path (no library present) 5. Check invalid relative path (no library present) :expectedresults: 1. This should pass 2. This should pass 3. This should pass 4. This should fail 5. This should fail """ inst = topology_st.standalone whoami = WhoamiPlugin(inst) # /tmp nowadays comes with noexec bit set on some systems # so instead let's write somewhere where dirsrv user has access tmp_dir = inst.get_bak_dir() plugin_dir = inst.get_plugin_dir() # Copy the library to our tmp directory try: shutil.copy('%s/libwhoami-plugin.so' % plugin_dir, tmp_dir) except IOError as e: log.fatal('Failed to copy %s/libwhoami-plugin.so to the tmp directory %s, error: %s' % ( plugin_dir, tmp_dir, e.strerror)) assert False # # Test adding valid plugin paths # # Try using the absolute path to the current library whoami.replace('nsslapd-pluginPath', '%s/libwhoami-plugin' % plugin_dir) # Try using new remote location # If SELinux is enabled, plugin can't be loaded as it's not labeled properly if selinux_present(): import selinux if selinux.is_selinux_enabled(): with pytest.raises(ldap.UNWILLING_TO_PERFORM): whoami.replace('nsslapd-pluginPath', '%s/libwhoami-plugin' % tmp_dir) # Label it with lib_t, so it can be executed # We can't use selinux.setfilecon() here, because py.test needs to have mac_admin capability # Instead we can call chcon directly: subprocess.check_call(['/usr/bin/chcon', '-t', 'lib_t', '%s/libwhoami-plugin.so' % tmp_dir]) # And try to change the path again whoami.replace('nsslapd-pluginPath', '%s/libwhoami-plugin' % tmp_dir) else: whoami.replace('nsslapd-pluginPath', '%s/libwhoami-plugin' % tmp_dir) # Set plugin path back to the default whoami.replace('nsslapd-pluginPath', 'libwhoami-plugin') # # Test invalid path (no library present) # with pytest.raises(ldap.UNWILLING_TO_PERFORM): whoami.replace('nsslapd-pluginPath', '/bin/libwhoami-plugin') # No exception?! This is an error log.error('Invalid plugin path was incorrectly accepted by the server!') # # Test invalid relative path (no library present) # with pytest.raises(ldap.UNWILLING_TO_PERFORM): whoami.replace('nsslapd-pluginPath', '../libwhoami-plugin') # No exception?! This is an error log.error('Invalid plugin path was incorrectly accepted by the server!') log.info('Test complete') if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/plugins/referint_test.py000066400000000000000000000107711421664411400272660ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2021 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # ''' Created on Dec 12, 2019 @author: tbordaz ''' import logging import pytest from lib389 import Entry from lib389.plugins import ReferentialIntegrityPlugin from lib389._constants import DEFAULT_SUFFIX from lib389.idm.user import UserAccounts from lib389.idm.group import Groups from lib389.topologies import topology_st as topo pytestmark = pytest.mark.tier1 log = logging.getLogger(__name__) ESCAPED_RDN_BASE = "foo\\,oo" def _user_get_dn(no): uid = '%s%d' % (ESCAPED_RDN_BASE, no) dn = 'uid=%s,%s' % (uid, DEFAULT_SUFFIX) return (uid, dn) def add_escaped_user(server, no): (uid, dn) = _user_get_dn(no) log.fatal('Adding user (%s): ' % dn) users = UserAccounts(server, DEFAULT_SUFFIX, None) user_properties = { 'objectclass': ['top', 'person', 'organizationalPerson', 'inetOrgPerson', 'posixAccount'], 'uid': uid, 'cn' : uid, 'sn' : uid, 'uidNumber' : '1000', 'gidNumber' : '2000', 'homeDirectory' : '/home/testuser', } users.create(properties=user_properties) return dn def test_referential_false_failure(topo): """On MODRDN referential integrity can erroneously fail :id: f77aeb80-c4c4-471b-8c1b-4733b714778b :setup: Standalone Instance :steps: 1. Configure the plugin 2. Create a group - 1rst member the one that will be move - more than 128 members - last member is a DN containing escaped char 3. Rename the 1rst member :expectedresults: 1. should succeed 2. should succeed 3. should succeed """ inst = topo[0] # stop the plugin, and start it plugin = ReferentialIntegrityPlugin(inst) plugin.disable() plugin.enable() ############################################################################ # Configure plugin ############################################################################ GROUP_CONTAINER = "ou=groups,%s" % DEFAULT_SUFFIX plugin.replace('referint-membership-attr', 'member') plugin.replace('nsslapd-plugincontainerscope', GROUP_CONTAINER) ############################################################################ # Creates a group with members having escaped DN ############################################################################ # Add some users and a group users = UserAccounts(inst, DEFAULT_SUFFIX, None) user1 = users.create_test_user(uid=1001) user2 = users.create_test_user(uid=1002) groups = Groups(inst, GROUP_CONTAINER, None) group = groups.create(properties={'cn': 'group'}) group.add('member', user2.dn) group.add('member', user1.dn) # Add more than 128 members so that referint follows the buggy path for i in range(130): escaped_user = add_escaped_user(inst, i) group.add('member', escaped_user) ############################################################################ # Check that the MODRDN succeeds ########################################################################### # Here we need to restart so that member values are taken in the right order # the last value is the escaped one inst.restart() # Here if the bug is fixed, referential is able to update the member value user1.rename('uid=new_test_user_1001', newsuperior=DEFAULT_SUFFIX, deloldrdn=False) def test_invalid_referint_log(topo): """If there is an invalid log line in the referint log, make sure the server does not crash at startup :id: 34807b5a-ab17-4281-ae48-4e3513e19145 :setup: Standalone Instance :steps: 1. Set the referint log delay 2. Create invalid log 3. Start the server (no crash) :expectedresults: 1. Success 2. Success 3. Success """ inst = topo.standalone # Set delay - required for log parsing at server startup plugin = ReferentialIntegrityPlugin(inst) plugin.enable() plugin.set_update_delay('2') logfile = plugin.get_log_file() inst.restart() # Create invalid log inst.stop() with open(logfile, 'w') as log_fh: log_fh.write("CRASH\n") # Start the instance inst.start() assert inst.status() if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/plugins/rootdn_plugin_test.py000066400000000000000000000562431421664411400303370ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2019 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import logging import socket import ldap import pytest import uuid import time from lib389 import DirSrv from lib389.utils import * from lib389.tasks import * from lib389.tools import DirSrvTools from lib389.topologies import topology_st from lib389.idm.directorymanager import DirectoryManager from lib389.plugins import RootDNAccessControlPlugin pytestmark = pytest.mark.tier1 logging.getLogger(__name__).setLevel(logging.DEBUG) log = logging.getLogger(__name__) localhost = DirSrvTools.getLocalhost() hostname = socket.gethostname() @pytest.fixture(scope="function") def rootdn_cleanup(topology_st): """Do a cleanup of the config area before the test """ log.info('Cleaning up the config area') plugin = RootDNAccessControlPlugin(topology_st.standalone) plugin.remove_all_allow_host() plugin.remove_all_deny_host() plugin.remove_all_allow_ip() plugin.remove_all_deny_ip() @pytest.fixture(scope="module") def rootdn_setup(topology_st): """Initialize our setup to test the Root DN Access Control Plugin Test the following access control type: - Allowed IP address * - Denied IP address * - Specific time window - Days allowed access - Allowed host * - Denied host * * means multiple valued """ log.info('Initializing root DN test suite...') # Enable dynamic plugins topology_st.standalone.config.set('nsslapd-dynamic-plugins', 'on') # Enable the plugin global plugin plugin = RootDNAccessControlPlugin(topology_st.standalone) plugin.enable() log.info('test_rootdn_init: Initialized root DN test suite.') def rootdn_bind(inst, uri=None, fail=False): """Helper function to test root DN bind """ newinst = DirSrv(verbose=False) args = {SER_PORT: inst.port, SER_SERVERID_PROP: inst.serverid} newinst.allocate(args) newinst.open(uri=uri, connOnly=True) # This binds as root dn def test_rootdn_access_specific_time(topology_st, rootdn_setup, rootdn_cleanup, timeout=5): """Test binding inside and outside of a specific time :id: a0ef30e5-538b-46fa-9762-01a4435a15e8 :setup: Standalone instance, rootdn plugin set up :steps: 1. Get the current time, and bump it ahead twohours 2. Bind as Root DN 3. Set config to allow the entire day 4. Bind as Root DN 5. Cleanup :expectedresults: 1. Success 2. Should fail 3. Success 4. Success 5. Success """ log.info('Running test_rootdn_access_specific_time...') dm = DirectoryManager(topology_st.standalone) # Get the current time, and bump it ahead twohours current_hour = time.strftime("%H") if int(current_hour) > 12: open_time = '0200' close_time = '0400' else: open_time = '1600' close_time = '1800' assert plugin.replace_many(('rootdn-open-time', open_time), ('rootdn-close-time', close_time)) attr_updated = 0 for i in range(0, timeout): if (plugin.get_attr_val_utf8('rootdn-open-time') == open_time) and (plugin.get_attr_val_utf8('rootdn-close-time') == close_time): attr_updated = 1 break else: time.sleep(.5) if not attr_updated : raise Exception ("rootdn-open-time and rootdn-close-time were not updated") # Bind as Root DN - should fail for i in range(0, timeout): try: dm.bind() except ldap.UNWILLING_TO_PERFORM: break else: time.sleep(.5) # Set config to allow the entire day open_time = '0000' close_time = '2359' assert plugin.replace_many(('rootdn-open-time', open_time), ('rootdn-close-time', close_time)) attr_updated = 0 for i in range(0, timeout): if (plugin.get_attr_val_utf8('rootdn-open-time') == open_time) and (plugin.get_attr_val_utf8('rootdn-close-time') == close_time): attr_updated = 1 break else: time.sleep(.5) if not attr_updated : raise Exception ("rootdn-open-time and rootdn-close-time were not updated") # Bind as Root DN - should succeed for i in range(0, timeout): try: dm.bind() break except: time.sleep(.5) # Cleanup - undo the changes we made so the next test has a clean slate assert plugin.apply_mods([(ldap.MOD_DELETE, 'rootdn-open-time'), (ldap.MOD_DELETE, 'rootdn-close-time')]) def test_rootdn_access_day_of_week(topology_st, rootdn_setup, rootdn_cleanup, timeout=5): """Test the days of week feature :id: a0ef30e5-538b-46fa-9762-01a4435a15e1 :setup: Standalone instance, rootdn plugin set up :steps: 1. Set the deny days 2. Bind as Root DN 3. Set the allow days 4. Bind as Root DN :expectedresults: 1. Success 2. Should fail 3. Success 4. Success """ log.info('Running test_rootdn_access_day_of_week...') dm = DirectoryManager(topology_st.standalone) days = ('Sun', 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat') day = int(time.strftime("%w", time.gmtime())) if day == 6: # Handle the roll over from Saturday into Sunday deny_days = days[1] + ', ' + days[2] allow_days = days[6] + ',' + days[0] elif day > 3: deny_days = days[0] + ', ' + days[1] allow_days = days[day] + ',' + days[day - 1] else: deny_days = days[4] + ',' + days[5] allow_days = days[day] + ',' + days[day + 1] log.info('Today: ' + days[day]) log.info('Allowed days: ' + allow_days) log.info('Deny days: ' + deny_days) # Set the deny days plugin.set_days_allowed(deny_days) attr_updated = 0 for i in range(0, timeout): if (str(plugin.get_days_allowed()) == deny_days): attr_updated = 1 break else: time.sleep(.5) if not attr_updated : raise Exception ("rootdn-days-allowed was not updated") # Bind as Root DN - should fail for i in range(0, timeout): try: dm.bind() except ldap.UNWILLING_TO_PERFORM: break else: time.sleep(.5) # Set the allow days plugin.set_days_allowed(allow_days) attr_updated = 0 for i in range(0, timeout): if (str(plugin.get_days_allowed()) == allow_days): attr_updated = 1 break else: time.sleep(.5) if not attr_updated : raise Exception ("rootdn-days-allowed was not updated") # Bind as Root DN - should succeed for i in range(0, timeout): try: dm.bind() break except: time.sleep(.5) def test_rootdn_access_denied_ip(topology_st, rootdn_setup, rootdn_cleanup, timeout=5): """Test denied IP feature - we can just test denying 127.0.0.1 :id: a0ef30e5-538b-46fa-9762-01a4435a15e2 :setup: Standalone instance, rootdn plugin set up :steps: 1. Set rootdn-deny-ip to '127.0.0.1' and '::1' 2. Bind as Root DN 3. Change the denied IP so root DN succeeds 4. Bind as Root DN :expectedresults: 1. Success 2. Should fail 3. Success 4. Success """ log.info('Running test_rootdn_access_denied_ip...') plugin.add_deny_ip('127.0.0.1') plugin.add_deny_ip('::1') attr_updated = 0 for i in range(0, timeout): if ('127.0.0.1' in str(plugin.get_deny_ip())): attr_updated = 1 break else: time.sleep(.5) if not attr_updated : raise Exception ("rootdn-deny-ip was not updated") # Bind as Root DN - should fail uri = 'ldap://{}:{}'.format('127.0.0.1', topology_st.standalone.port) for i in range(0, timeout): try: rootdn_bind(topology_st.standalone, uri=uri) except ldap.UNWILLING_TO_PERFORM: break else: time.sleep(.5) # Change the denied IP so root DN succeeds plugin.apply_mods([(ldap.MOD_REPLACE, 'rootdn-deny-ip', '255.255.255.255')]) attr_updated = 0 for i in range(0, timeout): if ('255.255.255.255' in str(plugin.get_deny_ip())): attr_updated = 1 break else: time.sleep(.5) if not attr_updated : raise Exception ("rootdn-deny-ip was not updated") # Bind as Root DN - should succeed for i in range(0, timeout): try: rootdn_bind(topology_st.standalone, uri=uri) break except: time.sleep(.5) def test_rootdn_access_denied_host(topology_st, rootdn_setup, rootdn_cleanup, timeout=5): """Test denied Host feature - we can just test denying localhost :id: a0ef30e5-538b-46fa-9762-01a4435a15e3 :setup: Standalone instance, rootdn plugin set up :steps: 1. Set rootdn-deny-host to hostname (localhost if not accessable) 2. Bind as Root DN 3. Change the denied host so root DN succeeds 4. Bind as Root DN :expectedresults: 1. Success 2. Should fail 3. Success 4. Success """ log.info('Running test_rootdn_access_denied_host...') hostname = socket.gethostname() plugin.add_deny_host(hostname) if localhost != hostname: plugin.add_deny_host(localhost) attr_updated = 0 for i in range(0, timeout): if (str(plugin.get_deny_host()) == hostname) or (str(plugin.get_deny_host()) == localhost): attr_updated = 1 break else: time.sleep(.5) if not attr_updated : raise Exception ("rootdn-deny-host was not updated") # Bind as Root DN - should fail uri = 'ldap://{}:{}'.format(localhost, topology_st.standalone.port) for i in range(0, timeout): try: rootdn_bind(topology_st.standalone, uri=uri) except ldap.UNWILLING_TO_PERFORM: break else: time.sleep(.5) # Change the denied host so root DN bind succeeds rand_host = 'i.dont.exist.{}'.format(uuid.uuid4()) plugin.apply_mods([(ldap.MOD_REPLACE, 'rootdn-deny-host', rand_host)]) attr_updated = 0 for i in range(0, timeout): if (plugin.get_deny_host() == rand_host): attr_updated = 1 break else: time.sleep(.5) if not attr_updated : raise Exception ("rootdn-deny-host was not updated") # Bind as Root DN - should succeed for i in range(0, timeout): try: rootdn_bind(topology_st.standalone, uri=uri) break except: time.sleep(.5) def test_rootdn_access_allowed_ip(topology_st, rootdn_setup, rootdn_cleanup, timeout=5): """Test allowed ip feature :id: a0ef30e5-538b-46fa-9762-01a4435a15e4 :setup: Standalone instance, rootdn plugin set up :steps: 1. Set allowed ip to 255.255.255.255 - blocks the Root DN 2. Bind as Root DN 3. Allow localhost 4. Bind as Root DN :expectedresults: 1. Success 2. Should fail 3. Success 4. Success """ log.info('Running test_rootdn_access_allowed_ip...') # Set allowed ip to 255.255.255.255 - blocks the Root DN plugin.add_allow_ip('255.255.255.255') attr_updated = 0 for i in range(0, timeout): if ('255.255.255.255' in plugin.get_allow_ip()): attr_updated = 1 break else: time.sleep(.5) if not attr_updated : raise Exception ("rootdn-allow-ip was not updated") # Bind as Root DN - should fail uri = 'ldap://{}:{}'.format(localhost, topology_st.standalone.port) for i in range(0, timeout): try: rootdn_bind(topology_st.standalone, uri=uri) except ldap.UNWILLING_TO_PERFORM: break else: time.sleep(.5) # Allow localhost plugin.add_allow_ip('127.0.0.1') plugin.add_allow_ip('::1') attr_updated = 0 for i in range(0, timeout): if ('127.0.0.1' in plugin.get_allow_ip()): attr_updated = 1 break else: time.sleep(.5) if not attr_updated : raise Exception ("rootdn-allow-ip was not updated") # Bind as Root DN - should succeed for i in range(0, timeout): try: rootdn_bind(topology_st.standalone, uri=uri) break except: time.sleep(.5) def test_rootdn_access_allowed_host(topology_st, rootdn_setup, rootdn_cleanup, timeout=5): """Test allowed host feature :id: a0ef30e5-538b-46fa-9762-01a4435a15e5 :setup: Standalone instance, rootdn plugin set up :steps: 1. Set allowed host to an unknown host - blocks the Root DN 2. Bind as Root DN 3. Allow localhost 4. Bind as Root DN :expectedresults: 1. Success 2. Should fail 3. Success 4. Success """ log.info('Running test_rootdn_access_allowed_host...') # Set allowed host to an unknown host - blocks the Root DN rand_host = 'i.dont.exist.{}'.format(uuid.uuid4()) plugin.add_allow_host(rand_host) attr_updated = 0 for i in range(0, timeout): if (str(plugin.get_allow_host()) == rand_host): attr_updated = 1 break else: time.sleep(.5) if not attr_updated : raise Exception ("rootdn-allow-host was not updated") # Bind as Root DN - should fail uri = 'ldap://{}:{}'.format(localhost, topology_st.standalone.port) for i in range(0, timeout): try: rootdn_bind(topology_st.standalone, uri=uri) except ldap.UNWILLING_TO_PERFORM: break else: time.sleep(.5) # Allow localhost plugin.remove_all_allow_host() plugin.add_allow_host(localhost) if hostname != localhost: plugin.add_allow_host(hostname) attr_updated = 0 for i in range(0, timeout): if (str(plugin.get_allow_host()) == hostname) or (str(plugin.get_allow_host()) == localhost): attr_updated = 1 break else: time.sleep(.5) if not attr_updated : raise Exception ("rootdn-allow-host was not updated") # Bind as Root DN - should succeed for i in range(0, timeout): try: rootdn_bind(topology_st.standalone, uri=uri) break except: time.sleep(.5) def test_rootdn_config_validate(topology_st, rootdn_setup, rootdn_cleanup): """Test plugin configuration validation :id: a0ef30e5-538b-46fa-9762-01a4435a15e6 :setup: Standalone instance, rootdn plugin set up :steps: 1. Replace 'rootdn-open-time' with '0000' 2. Add 'rootdn-open-time': '0000' and 'rootdn-open-time': '0001' 3. Replace 'rootdn-open-time' with '-1' and 'rootdn-close-time' with '0000' 4. Replace 'rootdn-open-time' with '2400' and 'rootdn-close-time' with '0000' 5. Replace 'rootdn-open-time' with 'aaaaa' and 'rootdn-close-time' with '0000' 6. Replace 'rootdn-close-time' with '0000' 7. Add 'rootdn-close-time': '0000' and 'rootdn-close-time': '0001' 8. Replace 'rootdn-open-time' with '0000' and 'rootdn-close-time' with '-1' 9. Replace 'rootdn-open-time' with '0000' and 'rootdn-close-time' with '2400' 10. Replace 'rootdn-open-time' with '0000' and 'rootdn-close-time' with 'aaaaa' 11. Add 'rootdn-days-allowed': 'Mon' and 'rootdn-days-allowed': 'Tue' 12. Replace 'rootdn-days-allowed' with 'Mon1' 13. Replace 'rootdn-days-allowed' with 'Tue, Mon1' 14. Replace 'rootdn-days-allowed' with 'm111m' 15. Replace 'rootdn-days-allowed' with 'Gur' 16. Replace 'rootdn-allow-ip' with '12.12.Z.12' 17. Replace 'rootdn-allow-ip' with '123.234.345.456' 18. Replace 'rootdn-allow-ip' with ':::' 19. Replace 'rootdn-deny-ip' with '12.12.Z.12' 20. Replace 'rootdn-deny-ip' with '123.234.345.456' 21. Replace 'rootdn-deny-ip' with ':::' 22. Replace 'rootdn-allow-host' with 'host._.com' 23. Replace 'rootdn-deny-host' with 'host.####.com' :expectedresults: 1. Should fail 2. Should fail 3. Should fail 4. Should fail 5. Should fail 6. Should fail 7. Should fail 8. Should fail 9. Should fail 10. Should fail 11. Should fail 12. Should fail 13. Should fail 14. Should fail 15. Should fail 16. Should fail 17. Should fail 18. Should fail 19. Should fail 20. Should fail 21. Should fail 22. Should fail 23. Should fail """ # Test invalid values for all settings with pytest.raises(ldap.UNWILLING_TO_PERFORM): log.info('Add just "rootdn-open-time"') plugin.apply_mods([(ldap.MOD_REPLACE, 'rootdn-open-time', '0000')]) log.info('Add multiple "rootdn-open-time"') plugin.apply_mods([(ldap.MOD_ADD, 'rootdn-open-time', '0000'), (ldap.MOD_ADD, 'rootdn-open-time', '0001')]) log.info('Add invalid "rootdn-open-time" -1 ') plugin.apply_mods([(ldap.MOD_REPLACE, 'rootdn-open-time', '-1'), (ldap.MOD_REPLACE, 'rootdn-close-time', '0000')]) log.info('Add invalid "rootdn-open-time" 2400') plugin.apply_mods([(ldap.MOD_REPLACE, 'rootdn-open-time', '2400'), (ldap.MOD_REPLACE, 'rootdn-close-time', '0000')]) log.info('Add invalid "rootdn-open-time" aaaaa') plugin.apply_mods([(ldap.MOD_REPLACE, 'rootdn-open-time','aaaaa'), (ldap.MOD_REPLACE, 'rootdn-close-time', '0000')]) # Test rootdn-close-time log.info('Add just "rootdn-close-time"') plugin.apply_mods([(ldap.MOD_REPLACE, 'rootdn-close-time', '0000')]) log.info('Add multiple "rootdn-close-time"') plugin.apply_mods([(ldap.MOD_ADD, 'rootdn-close-time', '0000'), (ldap.MOD_ADD, 'rootdn-close-time', '0001')]) log.info('Add invalid "rootdn-close-time" -1 ') plugin.apply_mods([(ldap.MOD_REPLACE, 'rootdn-open-time', '0000'), (ldap.MOD_REPLACE, 'rootdn-close-time', '-1')]) log.info('Add invalid "rootdn-close-time" 2400') plugin.apply_mods([(ldap.MOD_REPLACE, 'rootdn-open-time', '0000'), (ldap.MOD_REPLACE, 'rootdn-close-time', '2400')]) log.info('Add invalid "rootdn-open-time" aaaaa') plugin.apply_mods([(ldap.MOD_REPLACE, 'rootdn-open-time','0000'), (ldap.MOD_REPLACE, 'rootdn-close-time','aaaaa')]) # Test days allowed log.info('Add multiple "rootdn-days-allowed"') plugin.apply_mods([(ldap.MOD_ADD, 'rootdn-days-allowed', 'Mon'), (ldap.MOD_ADD, 'rootdn-days-allowed', 'Tue')]) log.info('Add invalid "rootdn-days-allowed"') plugin.apply_mods([(ldap.MOD_REPLACE, 'rootdn-days-allowed', 'Mon1')]) plugin.apply_mods([(ldap.MOD_REPLACE, 'rootdn-days-allowed', 'Tue, Mon1')]) plugin.apply_mods([(ldap.MOD_REPLACE, 'rootdn-days-allowed', 'm111m')]) plugin.apply_mods([(ldap.MOD_REPLACE, 'rootdn-days-allowed', 'Gur')]) # Test allow ips log.info('Add invalid "rootdn-allow-ip"') plugin.apply_mods([(ldap.MOD_REPLACE, 'rootdn-allow-ip', '12.12.Z.12')]) plugin.apply_mods([(ldap.MOD_REPLACE, 'rootdn-allow-ip', '123.234.345.456')]) plugin.apply_mods([(ldap.MOD_REPLACE, 'rootdn-allow-ip', ':::')]) # Test deny ips log.info('Add invalid "rootdn-deny-ip"') plugin.apply_mods([(ldap.MOD_REPLACE, 'rootdn-deny-ip', '12.12.Z.12')]) plugin.apply_mods([(ldap.MOD_REPLACE, 'rootdn-deny-ip', '123.234.345.456')]) plugin.apply_mods([(ldap.MOD_REPLACE, 'rootdn-deny-ip', ':::')]) # Test allow hosts log.info('Add invalid "rootdn-allow-host"') plugin.apply_mods([(ldap.MOD_REPLACE, 'rootdn-allow-host', 'host._.com')]) # Test deny hosts log.info('Add invalid "rootdn-deny-host"') plugin.apply_mods([(ldap.MOD_REPLACE, 'rootdn-deny-host', 'host.####.com')]) @pytest.mark.ds50800 @pytest.mark.bz1807537 @pytest.mark.xfail(ds_is_older('1.3.11', '1.4.3.5'), reason="May fail because of bz1807537") def test_rootdn_access_denied_ip_wildcard(topology_st, rootdn_setup, rootdn_cleanup, timeout=5): """Test denied IP feature with a wildcard :id: 73c74f62-9ac2-4bb6-8a63-bacc8d8bbf93 :setup: Standalone instance, rootdn plugin set up :steps: 1. Set rootdn-deny-ip to '127.*' 2. Bind as Root DN 3. Change the denied IP so root DN succeeds 4. Bind as Root DN :expectedresults: 1. Success 2. Should fail 3. Success 4. Success """ log.info('Running test_rootdn_access_denied_ip_wildcard...') plugin.add_deny_ip('127.*') # Bind as Root DN - should fail uri = 'ldap://{}:{}'.format('127.0.0.1', topology_st.standalone.port) for i in range(0, timeout): try: rootdn_bind(topology_st.standalone, uri=uri) except ldap.UNWILLING_TO_PERFORM: break else: time.sleep(.5) # Change the denied IP so root DN succeeds plugin.apply_mods([(ldap.MOD_REPLACE, 'rootdn-deny-ip', '255.255.255.255')]) # Bind as Root DN - should succeed for i in range(0, timeout): try: rootdn_bind(topology_st.standalone, uri=uri) break except: time.sleep(.5) @pytest.mark.ds50800 @pytest.mark.bz1807537 @pytest.mark.xfail(ds_is_older('1.3.11', '1.4.3.5'), reason="May fail because of bz1807537") def test_rootdn_access_allowed_ip_wildcard(topology_st, rootdn_setup, rootdn_cleanup, timeout=5): """Test allowed ip feature :id: c3e22c61-9ed2-4e89-8243-6ff686ecad9b :setup: Standalone instance, rootdn plugin set up :steps: 1. Set allowed ip to 255.255.255.255 - blocks the Root DN 2. Bind as Root DN 3. Allow 127.* 4. Bind as Root DN :expectedresults: 1. Success 2. Should fail 3. Success 4. Success """ log.info('Running test_rootdn_access_allowed_ip...') # Set allowed ip to 255.255.255.255 - blocks the Root DN plugin.add_allow_ip('255.255.255.255') time.sleep(.5) # Bind as Root DN - should fail uri = 'ldap://{}:{}'.format('127.0.0.1', topology_st.standalone.port) for i in range(0, timeout): try: rootdn_bind(topology_st.standalone, uri=uri) except ldap.UNWILLING_TO_PERFORM: break else: time.sleep(.5) # Allow localhost plugin.add_allow_ip('127.*') # Bind as Root DN - should succeed for i in range(0, timeout): try: rootdn_bind(topology_st.standalone, uri=uri) break except: time.sleep(.5) if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/psearch/000077500000000000000000000000001421664411400237755ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/psearch/__init__.py000066400000000000000000000001001421664411400260750ustar00rootroot00000000000000""" :Requirement: 389-ds-base: Persistent Search control """ 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/psearch/psearch_test.py000066400000000000000000000054741421664411400270450ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2021 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import ldap import os import pytest from lib389._constants import DEFAULT_SUFFIX from lib389.topologies import topology_st from lib389.idm.group import Groups from ldap.controls.psearch import PersistentSearchControl,EntryChangeNotificationControl pytestmark = pytest.mark.tier1 def _run_psearch(inst, msg_id): """Run a search with EntryChangeNotificationControl""" results = [] while True: try: _, data, _, _, _, _ = inst.result4(msgid=msg_id, all=0, timeout=1.0, add_ctrls=1, add_intermediates=1, resp_ctrl_classes={EntryChangeNotificationControl.controlType:EntryChangeNotificationControl}) # See if there are any entry changes for dn, entry, srv_ctrls in data: ecn_ctrls = filter(lambda c: c.controlType == EntryChangeNotificationControl.controlType, srv_ctrls) if ecn_ctrls: inst.log.info('%s has changed!' % dn) results.append(dn) except ldap.TIMEOUT: # There are no more results, so we timeout. inst.log.info('No more results') return results def test_psearch(topology_st): """Check basic Persistent Search control functionality :id: 4b395ef4-c3ff-49d1-a680-b9fdffa633bd :setup: Standalone instance :steps: 1. Run an extended search with a Persistent Search control 2. Create a new group (could be any entry) 3. Run an extended search with a Persistent Search control again 4. Check that entry DN is in the result :expectedresults: 1. Operation should be successful 2. Group should be successfully created 3. Operation should be successful 4. Entry DN should be in the result """ # Create the search control psc = PersistentSearchControl() # do a search extended with the control msg_id = topology_st.standalone.search_ext(base=DEFAULT_SUFFIX, scope=ldap.SCOPE_SUBTREE, attrlist=['*'], serverctrls=[psc]) # Get the result for the message id with result4 _run_psearch(topology_st.standalone, msg_id) # Change an entry / add one groups = Groups(topology_st.standalone, DEFAULT_SUFFIX) group = groups.create(properties={'cn': 'group1', 'description': 'testgroup'}) # Now run the result again and see what's there. results = _run_psearch(topology_st.standalone, msg_id) # assert our group is in the changeset. assert(group.dn.lower() == results[0]) if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/pwp_storage/000077500000000000000000000000001421664411400247025ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/pwp_storage/__init__.py000066400000000000000000000000761421664411400270160ustar00rootroot00000000000000""" :Requirement: 389-ds-base: Password Storage Scheme """ 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/pwp_storage/storage_test.py000066400000000000000000000117151421664411400277640ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2019 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK ---- """ This file contains the test for password storage scheme """ import os import subprocess import shutil import pytest from lib389.topologies import topology_st as topo from lib389.idm.user import UserAccounts, UserAccount from lib389._constants import DEFAULT_SUFFIX from lib389.config import Config from lib389.password_plugins import PBKDF2Plugin, SSHA512Plugin from lib389.utils import ds_is_older pytestmark = pytest.mark.tier1 def user_config(topo, field_value): """ Will set storage schema and create user. """ Config(topo.standalone).replace("passwordStorageScheme", field_value) user = UserAccounts(topo.standalone, DEFAULT_SUFFIX).create_test_user() user.set('userpassword', 'ItsMeAnuj') return user LIST_FOR_PARAMETERIZATION = ["CRYPT", "SHA", "SSHA", "SHA256", "SSHA256", "SHA384", "SSHA384", "SHA512", "SSHA512", "MD5", "PBKDF2_SHA256"] @pytest.mark.parametrize("value", LIST_FOR_PARAMETERIZATION, ids=LIST_FOR_PARAMETERIZATION) def test_check_password_scheme(topo, value): """Check all password scheme. :id: 196bccfc-33a6-11ea-a2a5-8c16451d917b :parametrized: yes :setup: Standalone :steps: 1. Change password scheme and create user with password. 2. check password scheme is set . 3. Delete user :expected results: 1. Pass 2. Pass 3. Pass """ user = user_config(topo, value) assert '{' + f'{value.lower()}' + '}' in \ UserAccount(topo.standalone, user.dn).get_attr_val_utf8('userpassword').lower() user.delete() def test_clear_scheme(topo): """Check clear password scheme. :id: 2420aadc-33a6-11ea-b59a-8c16451d917b :setup: Standalone :steps: 1. Change password scheme and create user with password. 2. check password scheme is set . 3. Delete user :expected results: 1. Pass 2. Pass 3. Pass """ user = user_config(topo, "CLEAR") assert "ItsMeAnuj" in UserAccount(topo.standalone, user.dn).get_attr_val_utf8('userpassword') user.delete() def test_check_two_scheme(topo): """Check password scheme SHA and CRYPT :id: 2b677f1e-33a6-11ea-a371-8c16451d917b :setup: Standalone :steps: 1. Change password scheme and create user with password. 2. check password scheme is set . 3. Delete user :expected results: 1. Pass 2. Pass 3. Pass """ for schema, value in [("nsslapd-rootpwstoragescheme", "SHA"), ("passwordStorageScheme", "CRYPT")]: Config(topo.standalone).replace(schema, value) topo.standalone.restart() user = UserAccounts(topo.standalone, DEFAULT_SUFFIX).create_test_user() user.set('userpassword', 'ItsMeAnuj') assert '{' + f'{"CRYPT".lower()}' + '}' \ in UserAccount(topo.standalone, user.dn).get_attr_val_utf8('userpassword').lower() user.delete() @pytest.mark.skipif(ds_is_older('1.4'), reason="Not implemented") def test_check_pbkdf2_sha256(topo): """Check password scheme PBKDF2_SHA256. :id: 31612e7e-33a6-11ea-a750-8c16451d917b :setup: Standalone :steps: 1. Try to delete PBKDF2_SHA256. 2. Should not deleted PBKDF2_SHA256 and server should up. :expected results: 1. Pass 2. Pass """ value = 'PBKDF2_SHA256' user = user_config(topo, value) assert '{' + f'{value.lower()}' + '}' in \ UserAccount(topo.standalone, user.dn).get_attr_val_utf8('userpassword').lower() plg = PBKDF2Plugin(topo.standalone) plg._protected = False plg.delete() topo.standalone.restart() assert Config(topo.standalone).get_attr_val_utf8('passwordStorageScheme') == 'PBKDF2_SHA256' assert topo.standalone.status() user.delete() def test_check_ssha512(topo): """Check password scheme SSHA512. :id: 9db023d2-33a1-11ea-b68c-8c16451d917b :setup: Standalone :steps: 1. Try to delete SSHA512Plugin. 2. Should deleted SSHA512Plugin and server should not up. 3. Restore dse file to recover :expected results: 1. Pass 2. Pass 3. Pass """ value = 'SSHA512' config_dir = topo.standalone.get_config_dir() user = user_config(topo, value) assert '{' + f'{value.lower()}' + '}' in \ UserAccount(topo.standalone, user.dn).get_attr_val_utf8('userpassword').lower() plg = SSHA512Plugin(topo.standalone) plg._protected = False plg.delete() with pytest.raises(subprocess.CalledProcessError): topo.standalone.restart() shutil.copy(config_dir + '/dse.ldif.startOK', config_dir + '/dse.ldif') topo.standalone.restart() user.delete() if __name__ == "__main__": CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s -v %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/referint_plugin/000077500000000000000000000000001421664411400255445ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/referint_plugin/__init__.py000066400000000000000000000001031421664411400276470ustar00rootroot00000000000000""" :Requirement: 389-ds-base: Referential Integrity Plugin """ 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/referint_plugin/rename_test.py000066400000000000000000000123411421664411400304250ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2019 William Brown # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import pytest from lib389._constants import DEFAULT_SUFFIX from lib389.topologies import topology_m2 from lib389.replica import ReplicationManager from lib389.idm.group import Groups from lib389.idm.user import nsUserAccounts from lib389.idm.organizationalunit import OrganizationalUnit as OrganisationalUnit from lib389.plugins import AutoMembershipPlugin, ReferentialIntegrityPlugin, AutoMembershipDefinitions, MemberOfPlugin pytestmark = pytest.mark.tier2 UCOUNT = 400 def _enable_plugins(inst, group_dn): # Enable automember amp = AutoMembershipPlugin(inst) amp.enable() # Create the automember definition automembers = AutoMembershipDefinitions(inst) automember = automembers.create(properties={ 'cn': 'testgroup_definition', 'autoMemberScope': DEFAULT_SUFFIX, 'autoMemberFilter': 'objectclass=nsAccount', 'autoMemberDefaultGroup': group_dn, 'autoMemberGroupingAttr': 'member:dn', }) # Enable MemberOf mop = MemberOfPlugin(inst) mop.enable() # Enable referint rip = ReferentialIntegrityPlugin(inst) # We only need to enable the plugin, the default configuration is sane and # correctly coveres member as an enforced attribute. rip.enable() # Restart to make sure it's enabled and good to go. inst.restart() def test_rename_large_subtree(topology_m2): """ A report stated that the following configuration would lead to an operation failure: ou=int,ou=account,dc=... ou=s1,ou=int,ou=account,dc=... ou=s2,ou=int,ou=account,dc=... rename ou=s1 to re-parent to ou=account, leaving: ou=int,ou=account,dc=... ou=s1,ou=account,dc=... ou=s2,ou=account,dc=... The ou=s1 if it has < 100 entries below, is able to be reparented. If ou=s1 has > 400 entries, it fails. Other conditions was the presence of referential integrity - so one would assume that all users under s1 are a member of some group external to this. :id: 5915c38d-b3c2-4b7c-af76-8a1e002e27f7 :setup: standalone instance :steps: 1. Enable automember plugin 2. Add UCOUNT users, and ensure they are members of a group. 3. Enable refer-int plugin 4. Move ou=s1 to a new parent :expectedresults: 1. The plugin is enabled 2. The users are members of the group 3. The plugin is enabled 4. The rename operation of ou=s1 succeeds """ st = topology_m2.ms["supplier1"] m2 = topology_m2.ms["supplier2"] # Create a default group gps = Groups(st, DEFAULT_SUFFIX) # Keep the group so we can get it's DN out. group = gps.create(properties={ 'cn': 'default_group' }) _enable_plugins(st, group.dn) _enable_plugins(m2, group.dn) # Now unlike normal, we bypass the plural-create method, because we need control # over the exact DN of the OU to create. # Create the ou=account # We don't need to set a DN here because ... ou_account = OrganisationalUnit(st) # It's set in the .create step. ou_account.create( basedn = DEFAULT_SUFFIX, properties={ 'ou': 'account' }) # create the ou=int,ou=account ou_int = OrganisationalUnit(st) ou_int.create( basedn = ou_account.dn, properties={ 'ou': 'int' }) # Create the ou=s1,ou=int,ou=account ou_s1 = OrganisationalUnit(st) ou_s1.create( basedn = ou_int.dn, properties={ 'ou': 's1' }) # Pause replication repl = ReplicationManager(DEFAULT_SUFFIX) repl.disable_to_supplier(m2, [st, ]) # Create the users 1 -> UCOUNT in ou=s1 nsu = nsUserAccounts(st, basedn=ou_s1.dn, rdn=None) for i in range(1000, 1000 + UCOUNT): nsu.create_test_user(uid=i) # Enable replication repl.enable_to_supplier(m2, [st, ]) # Assert they are in the group as we expect members = group.get_attr_vals_utf8('member') assert len(members) == UCOUNT # Wait for replication repl.wait_for_replication(st, m2, timeout=60) for i in range(0, 5): # Move ou=s1 to ou=account as parent. We have to provide the rdn, # even though it's not changing. ou_s1.rename('ou=s1', newsuperior=ou_account.dn) members = group.get_attr_vals_utf8('member') assert len(members) == UCOUNT # Check that we really did refer-int properly, and ou=int is not in the members. for member in members: assert 'ou=int' not in member # Now move it back ou_s1.rename('ou=s1', newsuperior=ou_int.dn) members = group.get_attr_vals_utf8('member') assert len(members) == UCOUNT for member in members: assert 'ou=int' in member # Check everythig on the other side is good. repl.wait_for_replication(st, m2, timeout=60) group2 = Groups(m2, DEFAULT_SUFFIX).get('default_group') members = group2.get_attr_vals_utf8('member') assert len(members) == UCOUNT for member in members: assert 'ou=int' in member 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/replication/000077500000000000000000000000001421664411400246615ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/replication/__init__.py000066400000000000000000000007371421664411400270010ustar00rootroot00000000000000""" :Requirement: 389-ds-base: Replication """ import time import ldap from lib389._constants import DEFAULT_SUFFIX def get_repl_entries(topo, entry_name, attr_list): """Get a list of test entries from all suppliers""" entries_list = [] time.sleep(10) for inst in topo.all_insts.values(): entries = inst.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, "uid={}".format(entry_name), attr_list) entries_list += entries return entries_list 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/replication/acceptance_test.py000066400000000000000000000601141421664411400303620ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2017 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import pytest import logging from lib389.replica import Replicas from lib389.tasks import * from lib389.utils import * from lib389.topologies import topology_m4 as topo_m4 from lib389.topologies import topology_m2 as topo_m2 from . import get_repl_entries from lib389.idm.user import UserAccount from lib389.replica import ReplicationManager from lib389._constants import * pytestmark = pytest.mark.tier0 TEST_ENTRY_NAME = 'mmrepl_test' TEST_ENTRY_DN = 'uid={},{}'.format(TEST_ENTRY_NAME, DEFAULT_SUFFIX) NEW_SUFFIX_NAME = 'test_repl' NEW_SUFFIX = 'o={}'.format(NEW_SUFFIX_NAME) NEW_BACKEND = 'repl_base' DEBUGGING = os.getenv("DEBUGGING", default=False) if DEBUGGING: logging.getLogger(__name__).setLevel(logging.DEBUG) else: logging.getLogger(__name__).setLevel(logging.INFO) log = logging.getLogger(__name__) @pytest.fixture(scope="function") def create_entry(topo_m4, request): """Add test entry to supplier1""" log.info('Adding entry {}'.format(TEST_ENTRY_DN)) test_user = UserAccount(topo_m4.ms["supplier1"], TEST_ENTRY_DN) if test_user.exists(): log.info('Deleting entry {}'.format(TEST_ENTRY_DN)) test_user.delete() test_user.create(properties={ 'uid': TEST_ENTRY_NAME, 'cn': TEST_ENTRY_NAME, 'sn': TEST_ENTRY_NAME, 'userPassword': TEST_ENTRY_NAME, 'uidNumber' : '1000', 'gidNumber' : '2000', 'homeDirectory' : '/home/mmrepl_test', }) @pytest.fixture(scope="function") def new_suffix(topo_m4, request): """Add a new suffix and enable a replication on it""" for num in range(1, 5): log.info('Adding suffix:{} and backend: {} to supplier{}'.format(NEW_SUFFIX, NEW_BACKEND, num)) topo_m4.ms["supplier{}".format(num)].backend.create(NEW_SUFFIX, {BACKEND_NAME: NEW_BACKEND}) topo_m4.ms["supplier{}".format(num)].mappingtree.create(NEW_SUFFIX, NEW_BACKEND) try: topo_m4.ms["supplier{}".format(num)].add_s(Entry((NEW_SUFFIX, { 'objectclass': 'top', 'objectclass': 'organization', 'o': NEW_SUFFIX_NAME, 'description': NEW_SUFFIX_NAME }))) except ldap.LDAPError as e: log.error('Failed to add suffix ({}): error ({})'.format(NEW_SUFFIX, e.message['desc'])) raise def fin(): for num in range(1, 5): log.info('Deleting suffix:{} and backend: {} from supplier{}'.format(NEW_SUFFIX, NEW_BACKEND, num)) topo_m4.ms["supplier{}".format(num)].mappingtree.delete(NEW_SUFFIX) topo_m4.ms["supplier{}".format(num)].backend.delete(NEW_SUFFIX) request.addfinalizer(fin) def test_add_entry(topo_m4, create_entry): """Check that entries are replicated after add operation :id: 024250f1-5f7e-4f3b-a9f5-27741e6fd405 :setup: Four suppliers replication setup, an entry :steps: 1. Check entry on all other suppliers :expectedresults: 1. The entry should be replicated to all suppliers """ entries = get_repl_entries(topo_m4, TEST_ENTRY_NAME, ["uid"]) assert all(entries), "Entry {} wasn't replicated successfully".format(TEST_ENTRY_DN) def test_modify_entry(topo_m4, create_entry): """Check that entries are replicated after modify operation :id: 36764053-622c-43c2-a132-d7a3ab7d9aaa :setup: Four suppliers replication setup, an entry :steps: 1. Modify the entry on supplier1 - add attribute 2. Wait for replication to happen 3. Check entry on all other suppliers 4. Modify the entry on supplier1 - replace attribute 5. Wait for replication to happen 6. Check entry on all other suppliers 7. Modify the entry on supplier1 - delete attribute 8. Wait for replication to happen 9. Check entry on all other suppliers :expectedresults: 1. Attribute should be successfully added 2. Some time should pass 3. The change should be present on all suppliers 4. Attribute should be successfully replaced 5. Some time should pass 6. The change should be present on all suppliers 7. Attribute should be successfully deleted 8. Some time should pass 9. The change should be present on all suppliers """ log.info('Modifying entry {} - add operation'.format(TEST_ENTRY_DN)) test_user = UserAccount(topo_m4.ms["supplier1"], TEST_ENTRY_DN) test_user.add('mail', '{}@redhat.com'.format(TEST_ENTRY_NAME)) time.sleep(1) all_user = topo_m4.all_get_dsldapobject(TEST_ENTRY_DN, UserAccount) for u in all_user: assert "{}@redhat.com".format(TEST_ENTRY_NAME) in u.get_attr_vals_utf8('mail') log.info('Modifying entry {} - replace operation'.format(TEST_ENTRY_DN)) test_user.replace('mail', '{}@greenhat.com'.format(TEST_ENTRY_NAME)) time.sleep(1) all_user = topo_m4.all_get_dsldapobject(TEST_ENTRY_DN, UserAccount) for u in all_user: assert "{}@greenhat.com".format(TEST_ENTRY_NAME) in u.get_attr_vals_utf8('mail') log.info('Modifying entry {} - delete operation'.format(TEST_ENTRY_DN)) test_user.remove('mail', '{}@greenhat.com'.format(TEST_ENTRY_NAME)) time.sleep(1) all_user = topo_m4.all_get_dsldapobject(TEST_ENTRY_DN, UserAccount) for u in all_user: assert "{}@greenhat.com".format(TEST_ENTRY_NAME) not in u.get_attr_vals_utf8('mail') def test_delete_entry(topo_m4, create_entry): """Check that entry deletion is replicated after delete operation :id: 18437262-9d6a-4b98-a47a-6182501ab9bc :setup: Four suppliers replication setup, an entry :steps: 1. Delete the entry from supplier1 2. Check entry on all other suppliers :expectedresults: 1. The entry should be deleted 2. The change should be present on all suppliers """ log.info('Deleting entry {} during the test'.format(TEST_ENTRY_DN)) topo_m4.ms["supplier1"].delete_s(TEST_ENTRY_DN) entries = get_repl_entries(topo_m4, TEST_ENTRY_NAME, ["uid"]) assert not entries, "Entry deletion {} wasn't replicated successfully".format(TEST_ENTRY_DN) @pytest.mark.parametrize("delold", [0, 1]) def test_modrdn_entry(topo_m4, create_entry, delold): """Check that entries are replicated after modrdn operation :id: 02558e6d-a745-45ae-8d88-34fe9b16adc9 :parametrized: yes :setup: Four suppliers replication setup, an entry :steps: 1. Make modrdn operation on entry on supplier1 with both delold 1 and 0 2. Check entry on all other suppliers :expectedresults: 1. Modrdn operation should be successful 2. The change should be present on all suppliers """ newrdn_name = 'newrdn' newrdn_dn = 'uid={},{}'.format(newrdn_name, DEFAULT_SUFFIX) log.info('Modify entry RDN {}'.format(TEST_ENTRY_DN)) try: topo_m4.ms["supplier1"].modrdn_s(TEST_ENTRY_DN, 'uid={}'.format(newrdn_name), delold) except ldap.LDAPError as e: log.error('Failed to modrdn entry (%s): error (%s)' % (TEST_ENTRY_DN, e.message['desc'])) raise e try: entries_new = get_repl_entries(topo_m4, newrdn_name, ["uid"]) assert all(entries_new), "Entry {} wasn't replicated successfully".format(newrdn_name) if delold == 0: entries_old = get_repl_entries(topo_m4, TEST_ENTRY_NAME, ["uid"]) assert all(entries_old), "Entry with old rdn {} wasn't replicated successfully".format(TEST_ENTRY_DN) else: entries_old = get_repl_entries(topo_m4, TEST_ENTRY_NAME, ["uid"]) assert not entries_old, "Entry with old rdn {} wasn't removed in replicas successfully".format( TEST_ENTRY_DN) finally: log.info('Remove entry with new RDN {}'.format(newrdn_dn)) topo_m4.ms["supplier1"].delete_s(newrdn_dn) def test_modrdn_after_pause(topo_m4): """Check that changes are properly replicated after replica pause :id: 6271dc9c-a993-4a9e-9c6d-05650cdab282 :setup: Four suppliers replication setup, an entry :steps: 1. Pause all replicas 2. Make modrdn operation on entry on supplier1 3. Resume all replicas 4. Wait for replication to happen 5. Check entry on all other suppliers :expectedresults: 1. Replicas should be paused 2. Modrdn operation should be successful 3. Replicas should be resumed 4. Some time should pass 5. The change should be present on all suppliers """ newrdn_name = 'newrdn' newrdn_dn = 'uid={},{}'.format(newrdn_name, DEFAULT_SUFFIX) log.info('Adding entry {}'.format(TEST_ENTRY_DN)) try: topo_m4.ms["supplier1"].add_s(Entry((TEST_ENTRY_DN, { 'objectclass': 'top person'.split(), 'objectclass': 'organizationalPerson', 'objectclass': 'inetorgperson', 'cn': TEST_ENTRY_NAME, 'sn': TEST_ENTRY_NAME, 'uid': TEST_ENTRY_NAME }))) except ldap.LDAPError as e: log.error('Failed to add entry (%s): error (%s)' % (TEST_ENTRY_DN, e.message['desc'])) raise e log.info('Pause all replicas') topo_m4.pause_all_replicas() log.info('Modify entry RDN {}'.format(TEST_ENTRY_DN)) try: topo_m4.ms["supplier1"].modrdn_s(TEST_ENTRY_DN, 'uid={}'.format(newrdn_name)) except ldap.LDAPError as e: log.error('Failed to modrdn entry (%s): error (%s)' % (TEST_ENTRY_DN, e.message['desc'])) raise e log.info('Resume all replicas') topo_m4.resume_all_replicas() log.info('Wait for replication to happen') time.sleep(3) try: entries_new = get_repl_entries(topo_m4, newrdn_name, ["uid"]) assert all(entries_new), "Entry {} wasn't replicated successfully".format(newrdn_name) finally: log.info('Remove entry with new RDN {}'.format(newrdn_dn)) topo_m4.ms["supplier1"].delete_s(newrdn_dn) @pytest.mark.bz842441 def test_modify_stripattrs(topo_m4): """Check that we can modify nsds5replicastripattrs :id: f36abed8-e262-4f35-98aa-71ae55611aaa :setup: Four suppliers replication setup :steps: 1. Modify nsds5replicastripattrs attribute on any agreement 2. Search for the modified attribute :expectedresults: It should be contain the value 1. nsds5replicastripattrs should be successfully set 2. The modified attribute should be the one we set """ m1 = topo_m4.ms["supplier1"] agreement = m1.agreement.list(suffix=DEFAULT_SUFFIX)[0].dn attr_value = b'modifiersname modifytimestamp' log.info('Modify nsds5replicastripattrs with {}'.format(attr_value)) m1.modify_s(agreement, [(ldap.MOD_REPLACE, 'nsds5replicastripattrs', [attr_value])]) log.info('Check nsds5replicastripattrs for {}'.format(attr_value)) entries = m1.search_s(agreement, ldap.SCOPE_BASE, "objectclass=*", ['nsds5replicastripattrs']) assert attr_value in entries[0].data['nsds5replicastripattrs'] def test_new_suffix(topo_m4, new_suffix): """Check that we can enable replication on a new suffix :id: d44a9ed4-26b0-4189-b0d0-b2b336ddccbd :setup: Four suppliers replication setup, a new suffix :steps: 1. Enable replication on the new suffix 2. Check if replication works 3. Disable replication on the new suffix :expectedresults: 1. Replication on the new suffix should be enabled 2. Replication should work 3. Replication on the new suffix should be disabled """ m1 = topo_m4.ms["supplier1"] m2 = topo_m4.ms["supplier2"] repl = ReplicationManager(NEW_SUFFIX) repl.create_first_supplier(m1) repl.join_supplier(m1, m2) repl.test_replication(m1, m2) repl.test_replication(m2, m1) repl.remove_supplier(m1) repl.remove_supplier(m2) def test_many_attrs(topo_m4, create_entry): """Check a replication with many attributes (add and delete) :id: d540b358-f67a-43c6-8df5-7c74b3cb7523 :setup: Four suppliers replication setup, a test entry :steps: 1. Add 10 new attributes to the entry 2. Delete few attributes: one from the beginning, two from the middle and one from the end 3. Check that the changes were replicated in the right order :expectedresults: 1. The attributes should be successfully added 2. Delete operations should be successful 3. The changes should be replicated in the right order """ m1 = topo_m4.ms["supplier1"] add_list = ensure_list_bytes(map(lambda x: "test{}".format(x), range(10))) delete_list = ensure_list_bytes(map(lambda x: "test{}".format(x), [0, 4, 7, 9])) test_user = UserAccount(topo_m4.ms["supplier1"], TEST_ENTRY_DN) log.info('Modifying entry {} - 10 add operations'.format(TEST_ENTRY_DN)) for add_name in add_list: test_user.add('description', add_name) log.info('Check that everything was properly replicated after an add operation') entries = get_repl_entries(topo_m4, TEST_ENTRY_NAME, ["description"]) for entry in entries: assert all(entry.getValues("description")[i] == add_name for i, add_name in enumerate(add_list)) log.info('Modifying entry {} - 4 delete operations for {}'.format(TEST_ENTRY_DN, str(delete_list))) for delete_name in delete_list: test_user.remove('description', delete_name) log.info('Check that everything was properly replicated after a delete operation') entries = get_repl_entries(topo_m4, TEST_ENTRY_NAME, ["description"]) for entry in entries: for i, value in enumerate(entry.getValues("description")): assert value == [name for name in add_list if name not in delete_list][i] assert value not in delete_list def test_double_delete(topo_m4, create_entry): """Check that double delete of the entry doesn't crash server :id: 5b85a5af-df29-42c7-b6cb-965ec5aa478e :feature: Multi supplier replication :setup: Four suppliers replication setup, a test entry :steps: 1. Delete the entry 2. Delete the entry on the second supplier 3. Check that server is alive :expectedresults: Server hasn't crash """ log.info('Deleting entry {} from supplier1'.format(TEST_ENTRY_DN)) topo_m4.ms["supplier1"].delete_s(TEST_ENTRY_DN) log.info('Deleting entry {} from supplier2'.format(TEST_ENTRY_DN)) try: topo_m4.ms["supplier2"].delete_s(TEST_ENTRY_DN) except ldap.NO_SUCH_OBJECT: log.info("Entry {} wasn't found supplier2. It is expected.".format(TEST_ENTRY_DN)) log.info('Make searches to check if server is alive') entries = get_repl_entries(topo_m4, TEST_ENTRY_NAME, ["uid"]) assert not entries, "Entry deletion {} wasn't replicated successfully".format(TEST_ENTRY_DN) def test_password_repl_error(topo_m4, create_entry): """Check that error about userpassword replication is properly logged :id: d4f12dc0-cd2c-4b92-9b8d-d764a60f0698 :feature: Multi supplier replication :setup: Four suppliers replication setup, a test entry :steps: 1. Change userpassword on supplier 1 2. Restart the servers to flush the logs 3. Check the error log for an replication error :expectedresults: We don't have a replication error in the error log """ m1 = topo_m4.ms["supplier1"] m2 = topo_m4.ms["supplier2"] TEST_ENTRY_NEW_PASS = 'new_{}'.format(TEST_ENTRY_NAME) log.info('Clean the error log') m2.deleteErrorLogs() log.info('Set replication loglevel') m2.config.loglevel((ErrorLog.REPLICA,)) log.info('Modifying entry {} - change userpassword on supplier 2'.format(TEST_ENTRY_DN)) test_user_m1 = UserAccount(topo_m4.ms["supplier1"], TEST_ENTRY_DN) test_user_m2 = UserAccount(topo_m4.ms["supplier2"], TEST_ENTRY_DN) test_user_m3 = UserAccount(topo_m4.ms["supplier3"], TEST_ENTRY_DN) test_user_m4 = UserAccount(topo_m4.ms["supplier4"], TEST_ENTRY_DN) test_user_m1.set('userpassword', TEST_ENTRY_NEW_PASS) log.info('Restart the servers to flush the logs') for num in range(1, 5): topo_m4.ms["supplier{}".format(num)].restart(timeout=10) m1_conn = test_user_m1.bind(TEST_ENTRY_NEW_PASS) m2_conn = test_user_m2.bind(TEST_ENTRY_NEW_PASS) m3_conn = test_user_m3.bind(TEST_ENTRY_NEW_PASS) m4_conn = test_user_m4.bind(TEST_ENTRY_NEW_PASS) log.info('Check the error log for the error with {}'.format(TEST_ENTRY_DN)) assert not m2.ds_error_log.match('.*can.t add a change for uid={}.*'.format(TEST_ENTRY_NAME)) def test_invalid_agmt(topo_m4): """Test adding that an invalid agreement is properly rejected and does not crash the server :id: 92f10f46-1be1-49ca-9358-784359397bc2 :setup: MMR with four suppliers :steps: 1. Add invalid agreement (nsds5ReplicaEnabled set to invalid value) 2. Verify the server is still running :expectedresults: 1. Invalid repl agreement should be rejected 2. Server should be still running """ m1 = topo_m4.ms["supplier1"] # Add invalid agreement (nsds5ReplicaEnabled set to invalid value) AGMT_DN = 'cn=whatever,cn=replica,cn="dc=example,dc=com",cn=mapping tree,cn=config' try: invalid_props = {RA_ENABLED: 'True', # Invalid value RA_SCHEDULE: '0001-2359 0123456'} m1.agreement.create(suffix=DEFAULT_SUFFIX, host='localhost', port=389, properties=invalid_props) except ldap.UNWILLING_TO_PERFORM: m1.log.info('Invalid repl agreement correctly rejected') except ldap.LDAPError as e: m1.log.fatal('Got unexpected error adding invalid agreement: ' + str(e)) assert False else: m1.log.fatal('Invalid agreement was incorrectly accepted by the server') assert False # Verify the server is still running try: m1.simple_bind_s(DN_DM, PASSWORD) except ldap.LDAPError as e: m1.log.fatal('Failed to bind: ' + str(e)) assert False def test_warining_for_invalid_replica(topo_m4): """Testing logs to indicate the inconsistency when configuration is performed. :id: dd689d03-69b8-4bf9-a06e-2acd19d5e2c8 :setup: MMR with four suppliers :steps: 1. Setup nsds5ReplicaBackoffMin to 20 2. Setup nsds5ReplicaBackoffMax to 10 :expectedresults: 1. nsds5ReplicaBackoffMin should set to 20 2. An error should be generated and also logged in the error logs. """ replicas = Replicas(topo_m4.ms["supplier1"]) replica = replicas.list()[0] log.info('Set nsds5ReplicaBackoffMin to 20') replica.set('nsds5ReplicaBackoffMin', '20') with pytest.raises(ldap.UNWILLING_TO_PERFORM): log.info('Set nsds5ReplicaBackoffMax to 10') replica.set('nsds5ReplicaBackoffMax', '10') log.info('Resetting configuration: nsds5ReplicaBackoffMin') replica.remove_all('nsds5ReplicaBackoffMin') log.info('Check the error log for the error') assert topo_m4.ms["supplier1"].ds_error_log.match('.*nsds5ReplicaBackoffMax.*10.*invalid.*') @pytest.mark.ds51082 def test_csnpurge_large_valueset(topo_m2): """Test csn generator test :id: 63e2bdb2-0a8f-4660-9465-7b80a9f72a74 :setup: MMR with 2 suppliers :steps: 1. Create a test_user 2. add a large set of values (more than 10) 3. delete all the values (more than 10) 4. configure the replica to purge those values (purgedelay=5s) 5. Waiting for 6 second 6. do a series of update :expectedresults: 1. Should succeeds 2. Should succeeds 3. Should succeeds 4. Should succeeds 5. Should succeeds 6. Should not crash """ m1 = topo_m2.ms["supplier2"] test_user = UserAccount(m1, TEST_ENTRY_DN) if test_user.exists(): log.info('Deleting entry {}'.format(TEST_ENTRY_DN)) test_user.delete() test_user.create(properties={ 'uid': TEST_ENTRY_NAME, 'cn': TEST_ENTRY_NAME, 'sn': TEST_ENTRY_NAME, 'userPassword': TEST_ENTRY_NAME, 'uidNumber' : '1000', 'gidNumber' : '2000', 'homeDirectory' : '/home/mmrepl_test', }) # create a large value set so that it is sorted for i in range(1,20): test_user.add('description', 'value {}'.format(str(i))) # delete all values of the valueset for i in range(1,20): test_user.remove('description', 'value {}'.format(str(i))) # set purging delay to 5 second and wait more that 5second replicas = Replicas(m1) replica = replicas.list()[0] log.info('nsds5ReplicaPurgeDelay to 5') replica.set('nsds5ReplicaPurgeDelay', '5') time.sleep(6) # add some new values to the valueset containing entries that should be purged for i in range(21,25): test_user.add('description', 'value {}'.format(str(i))) @pytest.mark.ds51244 def test_urp_trigger_substring_search(topo_m2): """Test that a ADD of a entry with a '*' in its DN, triggers an internal search with a escaped DN :id: 9869bb39-419f-42c3-a44b-c93eb0b77667 :customerscenario: True :setup: MMR with 2 suppliers :steps: 1. enable internal operation loggging for plugins 2. Create on M1 a test_user with a '*' in its DN 3. Check the test_user is replicated 4. Check in access logs that the internal search does not contain '*' :expectedresults: 1. Should succeeds 2. Should succeeds 3. Should succeeds 4. Should succeeds """ m1 = topo_m2.ms["supplier1"] m2 = topo_m2.ms["supplier2"] # Enable loggging of internal operation logging to capture URP intop log.info('Set nsslapd-plugin-logging to on') for inst in (m1, m2): inst.config.loglevel([AccessLog.DEFAULT, AccessLog.INTERNAL], service='access') inst.config.set('nsslapd-plugin-logging', 'on') inst.restart() # add a user with a DN containing '*' test_asterisk_uid = 'asterisk_*_in_value' test_asterisk_dn = 'uid={},{}'.format(test_asterisk_uid, DEFAULT_SUFFIX) test_user = UserAccount(m1, test_asterisk_dn) if test_user.exists(): log.info('Deleting entry {}'.format(test_asterisk_dn)) test_user.delete() test_user.create(properties={ 'uid': test_asterisk_uid, 'cn': test_asterisk_uid, 'sn': test_asterisk_uid, 'userPassword': test_asterisk_uid, 'uidNumber' : '1000', 'gidNumber' : '2000', 'homeDirectory' : '/home/asterisk', }) # check that the ADD was replicated on M2 test_user_m2 = UserAccount(m2, test_asterisk_dn) for i in range(1,5): if test_user_m2.exists(): break else: log.info('Entry not yet replicated on M2, wait a bit') time.sleep(2) # check that M2 access logs does not "(&(objectclass=nstombstone)(nscpentrydn=uid=asterisk_*_in_value,dc=example,dc=com))" log.info('Check that on M2, URP as not triggered such internal search') pattern = ".*\(Internal\).*SRCH.*\(&\(objectclass=nstombstone\)\(nscpentrydn=uid=asterisk_\*_in_value,dc=example,dc=com.*" found = m2.ds_access_log.match(pattern) log.info("found line: %s" % found) assert not found @pytest.mark.skipif(ds_is_older('1.4.4'), reason="Not implemented") def test_csngen_task(topo_m2): """Test csn generator test :id: b976849f-dbed-447e-91a7-c877d5d71fd0 :setup: MMR with 2 suppliers :steps: 1. Create a csngen_test task 2. Check that debug messages "_csngen_gen_tester_main" are in errors logs :expectedresults: 1. Should succeeds 2. Should succeeds """ m1 = topo_m2.ms["supplier1"] csngen_task = csngenTestTask(m1) csngen_task.create(properties={ 'ttl': '300' }) time.sleep(10) log.info('Check the error log contains strings showing csn generator is tested') assert m1.searchErrorsLog("_csngen_gen_tester_main") if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/replication/cascading_test.py000066400000000000000000000116021421664411400302060ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2018 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import logging import pytest import os import ldap from lib389._constants import * from lib389.replica import ReplicationManager from lib389.plugins import MemberOfPlugin from lib389.agreement import Agreements from lib389.idm.user import UserAccount, TEST_USER_PROPERTIES from lib389.idm.group import Groups from lib389.topologies import topology_m1h1c1 as topo pytestmark = pytest.mark.tier1 DEBUGGING = os.getenv("DEBUGGING", default=False) if DEBUGGING: logging.getLogger(__name__).setLevel(logging.DEBUG) else: logging.getLogger(__name__).setLevel(logging.INFO) log = logging.getLogger(__name__) BIND_DN = 'uid=tuser1,ou=People,dc=example,dc=com' BIND_RDN = 'tuser1' def config_memberof(server): """Configure memberOf plugin and configure fractional to prevent total init to send memberof """ memberof = MemberOfPlugin(server) memberof.enable() memberof.set_autoaddoc('nsMemberOf') server.restart() agmts = Agreements(server) for agmt in agmts.list(): log.info('update %s to add nsDS5ReplicatedAttributeListTotal' % agmt.dn) agmt.replace_many(('nsDS5ReplicatedAttributeListTotal', '(objectclass=*) $ EXCLUDE '), ('nsDS5ReplicatedAttributeList', '(objectclass=*) $ EXCLUDE memberOf')) def test_basic_with_hub(topo): """Check that basic operations work in cascading replication, this includes testing plugins that perform internal operatons, and replicated password policy state attributes. :id: 4ac85552-45bc-477b-89a4-226dfff8c6cc :setup: 1 supplier, 1 hub, 1 consumer :steps: 1. Enable memberOf plugin and set password account lockout settings 2. Restart the instance 3. Add a user 4. Add a group 5. Test that the replication works 6. Add the user as a member to the group 7. Test that the replication works 8. Issue bad binds to update passwordRetryCount 9. Test that replicaton works 10. Check that passwordRetyCount was replicated :expectedresults: 1. Should be a success 2. Should be a success 3. Should be a success 4. Should be a success 5. Should be a success 6. Should be a success 7. Should be a success 8. Should be a success 9. Should be a success 10. Should be a success """ repl_manager = ReplicationManager(DEFAULT_SUFFIX) supplier = topo.ms["supplier1"] consumer = topo.cs["consumer1"] hub = topo.hs["hub1"] for inst in topo: config_memberof(inst) inst.config.set('passwordlockout', 'on') inst.config.set('passwordlockoutduration', '60') inst.config.set('passwordmaxfailure', '3') inst.config.set('passwordIsGlobalPolicy', 'on') # Create user user1 = UserAccount(supplier, BIND_DN) user_props = TEST_USER_PROPERTIES.copy() user_props.update({'sn': BIND_RDN, 'cn': BIND_RDN, 'uid': BIND_RDN, 'inetUserStatus': '1', 'objectclass': 'extensibleObject', 'userpassword': PASSWORD}) user1.create(properties=user_props, basedn=SUFFIX) # Create group groups = Groups(supplier, DEFAULT_SUFFIX) group = groups.create(properties={'cn': 'group'}) # Test replication repl_manager.test_replication(supplier, consumer) # Trigger memberOf plugin by adding user to group group.replace('member', user1.dn) # Test replication once more repl_manager.test_replication(supplier, consumer) # Issue bad password to update passwordRetryCount try: supplier.simple_bind_s(user1.dn, "badpassword") except: pass # Test replication one last time supplier.simple_bind_s(DN_DM, PASSWORD) repl_manager.test_replication(supplier, consumer) # Finally check if passwordRetyCount was replicated to the hub and consumer user1 = UserAccount(hub, BIND_DN) count = user1.get_attr_val_int('passwordRetryCount') if count is None: log.fatal('PasswordRetyCount was not replicated to hub') assert False if int(count) != 1: log.fatal('PasswordRetyCount has unexpected value: {}'.format(count)) assert False user1 = UserAccount(consumer, BIND_DN) count = user1.get_attr_val_int('passwordRetryCount') if count is None: log.fatal('PasswordRetyCount was not replicated to consumer') assert False if int(count) != 1: log.fatal('PasswordRetyCount has unexpected value: {}'.format(count)) assert False if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main(["-s", CURRENT_FILE]) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/replication/changelog_encryption_test.py000066400000000000000000000045001421664411400324720ustar00rootroot00000000000000import logging import pytest import os import time from lib389._constants import DEFAULT_SUFFIX, DN_CHANGELOG, DN_USERROOT_LDBM from lib389.topologies import topology_m1c1 as topo from lib389.dseldif import DSEldif from lib389.utils import ds_supports_new_changelog from lib389.replica import Replicas pytestmark = pytest.mark.tier1 DEBUGGING = os.getenv("DEBUGGING", default=False) if DEBUGGING: logging.getLogger(__name__).setLevel(logging.DEBUG) else: logging.getLogger(__name__).setLevel(logging.INFO) log = logging.getLogger(__name__) def test_cl_encryption_setup_process(topo): """Take an already working replication deployment, and setup changelog encryption :id: 1a1b7d29-69f5-4f0e-91c4-e7f66140ff17 :setup: Supplier Instance, Consumer Instance :steps: 1. Enable TLS for the server 2. Export changelog 3. Enable changelog encryption 4. Import changelog 5. Verify replication is still working :expectedresults: 1. Success 2. Success 3. Success 4. Success 5. Success """ supplier = topo.ms['supplier1'] consumer = topo.cs['consumer1'] # Enable TLS log.info('Enable TLS ...') supplier.enable_tls() consumer.enable_tls() # Export changelog log.info('Export changelog ...') replicas = Replicas(supplier) replica = replicas.get(DEFAULT_SUFFIX) replica.begin_task_cl2ldif() replica.task_finished() # Enable changelog encryption log.info('Enable changelog encryption ...') dse_ldif = DSEldif(supplier) supplier.stop() if ds_supports_new_changelog(): changelog = 'cn=changelog,{}'.format(DN_USERROOT_LDBM) else: changelog = DN_CHANGELOG dse_ldif.replace(changelog, 'nsslapd-encryptionalgorithm', 'AES') if dse_ldif.get(changelog, 'nsSymmetricKey'): dse_ldif.delete(changelog, 'nsSymmetricKey') supplier.start() # Import changelog log.info('Import changelog ...') replica.begin_task_ldif2cl() replica.task_finished() # Verify replication is still working log.info('Test replication is still working ...') assert replica.test_replication([consumer]) if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main(["-s", CURRENT_FILE]) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/replication/changelog_test.py000066400000000000000000000777141421664411400302410ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2017 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import os import logging import ldap import ldif import pytest import time import subprocess import glob from lib389.properties import TASK_WAIT from lib389.replica import Replicas from lib389.idm.user import UserAccounts from lib389.topologies import topology_m2 as topo from lib389._constants import * from lib389.plugins import RetroChangelogPlugin from lib389.dseldif import DSEldif from lib389.tasks import * from lib389.utils import * from lib389.utils import ensure_bytes, ds_supports_new_changelog pytestmark = pytest.mark.tier1 TEST_ENTRY_NAME = 'replusr' NEW_RDN_NAME = 'cl5usr' if ds_supports_new_changelog(): CHANGELOG = 'cn=changelog,{}'.format(DN_USERROOT_LDBM) else: CHANGELOG = 'cn=changelog5,cn=config' RETROCHANGELOG = 'cn=Retro Changelog Plugin,cn=plugins,cn=config' MAXAGE = 'nsslapd-changelogmaxage' TRIMINTERVAL = 'nsslapd-changelogtrim-interval' COMPACTDBINTERVAL = 'nsslapd-changelogcompactdb-interval' FILTER = '(cn=*)' DEBUGGING = os.getenv('DEBUGGING', default=False) if DEBUGGING: logging.getLogger(__name__).setLevel(logging.DEBUG) else: logging.getLogger(__name__).setLevel(logging.INFO) log = logging.getLogger(__name__) def _perform_ldap_operations(topo): """Add a test user, modify description, modrdn user and delete it""" log.info('Adding user {}'.format(TEST_ENTRY_NAME)) users = UserAccounts(topo.ms['supplier1'], DEFAULT_SUFFIX) user_properties = { 'uid': TEST_ENTRY_NAME, 'cn': TEST_ENTRY_NAME, 'sn': TEST_ENTRY_NAME, 'uidNumber': '1001', 'gidNumber': '2001', 'userpassword': PASSWORD, 'description': 'userdesc', 'homeDirectory': '/home/{}'.format(TEST_ENTRY_NAME)} tuser = users.create(properties=user_properties) tuser.replace('description', 'newdesc') log.info('Modify RDN of user {}'.format(tuser.dn)) try: topo.ms['supplier1'].modrdn_s(tuser.dn, 'uid={}'.format(NEW_RDN_NAME), 0) except ldap.LDAPError as e: log.fatal('Failed to modrdn entry {}'.format(tuser.dn)) raise e tuser = users.get(NEW_RDN_NAME) log.info('Deleting user: {}'.format(tuser.dn)) tuser.delete() def _create_changelog_dump(topo): """Dump changelog using nss5task and check if ldap operations are logged""" log.info('Dump changelog using nss5task and check if ldap operations are logged') if ds_supports_new_changelog(): changelog_dir = topo.ms['supplier1'].get_ldif_dir() changelog_end = '_cl.ldif' else: changelog_dir = topo.ms['supplier1'].get_changelog_dir() changelog_end = '.ldif' replicas = Replicas(topo.ms["supplier1"]) replica = replicas.get(DEFAULT_SUFFIX) log.info('Remove ldif files, if present in: {}'.format(changelog_dir)) for files in os.listdir(changelog_dir): if files.endswith(changelog_end): changelog_file = os.path.join(changelog_dir, files) try: os.remove(changelog_file) except OSError as e: log.fatal('Failed to remove ldif file: {}'.format(changelog_file)) raise e log.info('Existing changelog ldif file: {} removed'.format(changelog_file)) else: log.info('No existing changelog ldif files present') log.info('Running nsds5task to dump changelog database to a file') replica.begin_task_cl2ldif() log.info('Check if changelog ldif file exist in: {}'.format(changelog_dir)) for files in os.listdir(changelog_dir): if files.endswith(changelog_end): changelog_ldif = os.path.join(changelog_dir, files) log.info('Changelog ldif file exist: {}'.format(changelog_ldif)) return changelog_ldif else: log.fatal('Changelog ldif file does not exist in: {}'.format(changelog_dir)) assert False def _check_changelog_ldif(topo, changelog_ldif): """Check changelog ldif file for required ldap operations""" log.info('Checking changelog ldif file for ldap operations') assert os.stat(changelog_ldif).st_size > 0, 'Changelog file has no contents' with open(changelog_ldif, 'r') as fh: content = fh.read() ldap_operations = set() log.info('Checking if all required changetype operations are present') for entry_ldif in content.split('\n\n'): for line in entry_ldif.split('\n'): if line.startswith('changetype: '): ldap_operations.add(line.split(': ')[1]) valid_operations = set(ldif.valid_changetype_dict.keys()) log.info('Valid ldap operations: {}'.format(valid_operations)) log.info('Ldap operations found: {}'.format(ldap_operations)) assert ldap_operations == valid_operations, 'Changelog ldif file does not contain all \ changetype operations' def get_ldap_error_msg(e, type): return e.args[0][type] @pytest.fixture(scope="module") def changelog_init(topo): """ changlog dir is not configuarable, just enable cn=Retro Changelog Plugin,cn=plugins,cn=config """ log.info('Testing Ticket 47669 - Test duration syntax in the changelogs') # bind as directory manager topo.ms["supplier1"].log.info("Bind as %s" % DN_DM) topo.ms["supplier1"].simple_bind_s(DN_DM, PASSWORD) if not ds_supports_new_changelog(): try: changelogdir = os.path.join(os.path.dirname(topo.ms["supplier1"].dbdir), 'changelog') topo.ms["supplier1"].modify_s(CHANGELOG, [(ldap.MOD_REPLACE, 'nsslapd-changelogdir', ensure_bytes(changelogdir))]) except ldap.LDAPError as e: log.error('Failed to modify ' + CHANGELOG + ': error {}'.format(get_ldap_error_msg(e,'desc'))) assert False try: topo.ms["supplier1"].modify_s(RETROCHANGELOG, [(ldap.MOD_REPLACE, 'nsslapd-pluginEnabled', b'on')]) except ldap.LDAPError as e: log.error('Failed to enable ' + RETROCHANGELOG + ': error {}'.format(get_ldap_error_msg(e, 'desc'))) assert False # restart the server topo.ms["supplier1"].restart(timeout=10) def add_and_check(topo, plugin, attr, val, isvalid): """ Helper function to add/replace attr: val and check the added value """ if isvalid: log.info('Test %s: %s -- valid' % (attr, val)) try: topo.ms["supplier1"].modify_s(plugin, [(ldap.MOD_REPLACE, attr, ensure_bytes(val))]) except ldap.LDAPError as e: log.error('Failed to add ' + attr + ': ' + val + ' to ' + plugin + ': error {}'.format(get_ldap_error_msg(e,'desc'))) assert False else: log.info('Test %s: %s -- invalid' % (attr, val)) if plugin == CHANGELOG: try: topo.ms["supplier1"].modify_s(plugin, [(ldap.MOD_REPLACE, attr, ensure_bytes(val))]) except ldap.LDAPError as e: log.error('Expectedly failed to add ' + attr + ': ' + val + ' to ' + plugin + ': error {}'.format(get_ldap_error_msg(e,'desc'))) else: try: topo.ms["supplier1"].modify_s(plugin, [(ldap.MOD_REPLACE, attr, ensure_bytes(val))]) except ldap.LDAPError as e: log.error('Failed to add ' + attr + ': ' + val + ' to ' + plugin + ': error {}'.format(get_ldap_error_msg(e,'desc'))) try: entries = topo.ms["supplier1"].search_s(plugin, ldap.SCOPE_BASE, FILTER, [attr]) if isvalid: if not entries[0].hasValue(attr, val): log.fatal('%s does not have expected (%s: %s)' % (plugin, attr, val)) assert False else: if plugin == CHANGELOG: if entries[0].hasValue(attr, val): log.fatal('%s has unexpected (%s: %s)' % (plugin, attr, val)) assert False else: if not entries[0].hasValue(attr, val): log.fatal('%s does not have expected (%s: %s)' % (plugin, attr, val)) assert False except ldap.LDAPError as e: log.fatal('Unable to search for entry %s: error %s' % (plugin, e.message['desc'])) assert False def remove_ldif_files_from_changelogdir(topo, extension): """ Remove existing ldif files from changelog dir """ if ds_supports_new_changelog(): changelog_dir = topo.ms['supplier1'].get_ldif_dir() else: changelog_dir = topo.ms['supplier1'].get_changelog_dir() log.info('Remove %s files, if present in: %s' % (extension, changelog_dir)) for files in os.listdir(changelog_dir): if files.endswith(extension): changelog_file = os.path.join(changelog_dir, files) try: os.remove(changelog_file) except OSError as e: log.fatal('Failed to remove %s file: %s' % (extension,changelog_file)) raise e else: log.info('Existing changelog %s file: %s removed' % (extension,changelog_file)) @pytest.mark.xfail(ds_is_older('1.3.10.1', '1.4.3'), reason="bug bz1685059") @pytest.mark.skip(reason="does not work for prefix builds") @pytest.mark.bz1685059 @pytest.mark.ds50498 @pytest.mark.bz1769296 def test_cldump_files_removed(topo): """Verify bz1685059 : cl-dump generated ldif files are removed at the end, -l option is the way to keep them :id: fbb2f2a3-167b-4bc6-b513-9e0318b09edc :setup: Replication with two supplier, nsslapd-changelogdir is '/var/lib/dirsrv/slapd-supplier1/changelog' retrochangelog plugin disabled :steps: 1. Clean the changelog directory, removing .ldif files present, if any 2. Clean the changelog directory, removing .done files present, if any 3. Perform ldap operations to record replication changes 4. Try a cl-dump call with invalid arguments to secure the next steps and to check bz1769296 5. Launch cl-dump cli without -l option 6. Wait so that all cl-dump tasks be finished 7. Check that all .ldif.done generated files have been removed from the changelog dir 8. Launch cl-dump cli with -l option 9. Wait so that all cl-dump tasks be finished 10. Check that the generated .ldif.done files are present in the changelog dir :expectedresults: 1. No remaining .ldif file in the changelog directory 2. No remaining .ldif.done file in the changelog directory 3. ldap operations are replicated and recorded in changelog 4. A result code different from 0 is raised 5. cl-dump is successfully executed 6. cl-dump process has finished 7. No .ldif.done files in the changelog dir 8. cl-dump is successfully executed 9. cl-dump process has finished 10. .ldif.done generated files are present in the changelog dir """ changelog_dir = topo.ms['supplier1'].get_changelog_dir() # Remove existing .ldif files in changelog dir remove_ldif_files_from_changelogdir(topo, '.ldif') # Remove existing .ldif.done files in changelog dir remove_ldif_files_from_changelogdir(topo, '.done') _perform_ldap_operations(topo) # This part to make sure that an error in the cl-dump script execution will be detected, # primary condition before executing the core goal of this case : management of cl-dump generated files. # As of today the returned code by cl-dump.pl is incorrect when run with invalid arguments (bz1769296) # This piece of code will serve as reproducer and verification mean for bz1769296 log.info("Use cl-dump perl script without -l option : no generated ldif files should remain in %s " % changelog_dir) cmdline=['/usr/bin/cl-dump', '-h', HOST_SUPPLIER_1, '-p', 'invalid port', '-D', DN_DM, '-w', PASSWORD] log.info('Command used : %s' % cmdline) proc = subprocess.Popen(cmdline, stdout=subprocess.PIPE) msg = proc.communicate() log.info('output message : %s' % msg[0]) assert proc.returncode != 0 # Now the core goal of the test case # Using cl-dump without -l option log.info("Use cl-dump perl script without -l option : no generated ldif files should remain in %s " % changelog_dir) cmdline=['/usr/bin/cl-dump', '-h', HOST_SUPPLIER_1, '-p', str(PORT_SUPPLIER_1), '-D', DN_DM, '-w', PASSWORD] log.info('Command used : %s' % cmdline) proc = subprocess.Popen(cmdline, stdout=subprocess.PIPE) proc.communicate() assert proc.returncode == 0 log.info('Wait for all cl-dump files to be generated') time.sleep(1) log.info('Check if cl-dump generated .ldif.done files are present - should not') for files in os.listdir(changelog_dir): if files.endswith('.done'): log.fatal('cl-dump generated .ldif.done files are present in %s - they should not' % changelog_dir) assert False else: log.info('All cl-dump generated .ldif files have been successfully removed from %s ' % changelog_dir) # Using cl-dump with -l option log.info("Use cl-dump perl script with -l option : generated ldif files should be kept in %s " % changelog_dir) cmdline=['/usr/bin/cl-dump', '-h', HOST_SUPPLIER_1, '-p', str(PORT_SUPPLIER_1), '-D', DN_DM, '-w', PASSWORD, '-l'] log.info('Command used : %s' % cmdline) proc = subprocess.Popen(cmdline, stdout=subprocess.PIPE) msg = proc.communicate() assert proc.returncode == 0 log.info('Wait for all cl-dump files to be generated') time.sleep(1) log.info('Check if cl-dump generated .ldif.done files are present - should be') for files in os.listdir(changelog_dir): if files.endswith('.done'): cldump_file = os.path.join(changelog_dir, files) log.info('Success : ldif file %s is present' % cldump_file) break else: log.fatal('.ldif.done files are not present in %s - they should be' % changelog_dir) assert False @pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented") def test_dsconf_dump_changelog_files_removed(topo): """Verify that the python counterpart of cl-dump (using dsconf) has a correct management of generated files :id: e41dcf90-098a-4386-acb5-789384579bf7 :setup: Replication with two supplier, nsslapd-changelogdir is '/var/lib/dirsrv/slapd-supplier1/changelog' retrochangelog plugin disabled :steps: 1. Clean the changelog directory, removing .ldif files present, if any 2. Clean the changelog directory, removing .ldif.done files present, if any 3. Perform ldap operations to record replication changes 4. Try a dsconf call with invalid arguments to secure the next steps 5. Launch dsconf export-changelog cli without -l option 6. Wait so that all dsconf tasks be finished 7. Check that all .ldif.done generated files have been removed from the changelog dir 8. Launch dsconf export-changelog cli with -l option 9. Wait so that all dsconf tasks be finished 10. Check that the generated .ldif.done files are present in the changelog dir :expectedresults: 1. No remaining .ldif file in the changelog directory 2. No remaining .ldif.done file in the changelog directory 3. ldap operations are replicated and recorded in changelog 4. A result code different from 0 is raised 5. dsconf export-changelog is successfully executed 6. dsconf process has finished 7. No .ldif.done files in the changelog dir 8. dsconf export-changelog is successfully executed 9. dsconf process has finished 10. .ldif.done generated files are present in the changelog dir """ if ds_supports_new_changelog(): changelog_dir = topo.ms['supplier1'].get_ldif_dir() else: changelog_dir = topo.ms['supplier1'].get_changelog_dir() instance = topo.ms['supplier1'] instance_url = 'ldap://%s:%s' % (HOST_SUPPLIER_1, PORT_SUPPLIER_1) # Remove existing .ldif files in changelog dir remove_ldif_files_from_changelogdir(topo, '.ldif') # Remove existing .ldif.done files from changelog dir remove_ldif_files_from_changelogdir(topo, '.done') _perform_ldap_operations(topo) # This part to make sure that an error in the python dsconf export-changelog execution will be detected, # primary condition before executing the core goal of this case : management of generated files. log.info("Use dsconf export-changelog with invalid parameters") cmdline=['/usr/sbin/dsconf', instance_url, '-D', DN_DM, '-w', 'badpasswd', 'replication', 'export-changelog'] log.info('Command used : %s' % cmdline) proc = subprocess.Popen(cmdline, stdout=subprocess.PIPE) msg = proc.communicate() log.info('output message : %s' % msg[0]) assert proc.returncode != 0 # Now the core goal of the test case # Using dsconf replication changelog without -l option log.info('Use dsconf replication changelog without -l option: no generated ldif files should be present in %s ' % changelog_dir) cmdline=['/usr/sbin/dsconf', instance_url, '-D', DN_DM, '-w', PASSWORD, 'replication', 'export-changelog', 'default', '-r', DEFAULT_SUFFIX] log.info('Command used : %s' % cmdline) proc = subprocess.Popen(cmdline, stdout=subprocess.PIPE) proc.communicate() assert proc.returncode == 0 log.info('Wait for all dsconf export-changelog files to be generated') time.sleep(1) log.info('Check if dsconf export-changelog generated .ldif.done files are present - should not') for files in os.listdir(changelog_dir): if files.endswith('.done'): log.fatal('export-changelog generated .ldif.done files are present in %s - they should not' % changelog_dir) assert False else: log.info('All dsconf export-changelog generated .ldif files have been successfully removed from %s ' % changelog_dir) # Using dsconf replication changelog without -l option log.info('Use dsconf replication changelog with -l option: generated ldif files should be kept in %s ' % changelog_dir) cmdline=['/usr/sbin/dsconf', instance_url, '-D', DN_DM, '-w', PASSWORD, 'replication', 'export-changelog', 'to-ldif', '-o', changelog_dir + '/test.ldif', '-r', DEFAULT_SUFFIX, '-l'] log.info('Command used : %s' % cmdline) proc = subprocess.Popen(cmdline, stdout=subprocess.PIPE) proc.communicate() assert proc.returncode == 0 log.info('Wait for all dsconf export-changelog files to be generated') time.sleep(1) log.info('Check if dsconf export-changelog generated .ldif.done files are present - should be') for files in os.listdir(changelog_dir): if files.endswith('.done'): cldump_file = os.path.join(changelog_dir, files) log.info('Success : ldif file %s is present' % cldump_file) break else: log.fatal('.ldif.done files are not present in %s - they should be' % changelog_dir) assert False def test_verify_changelog(topo): """Check if changelog dump file contains required ldap operations :id: 15ead076-8c18-410b-90eb-c2fe9eab966b :setup: Replication with two suppliers. :steps: 1. Add user to server. 2. Perform ldap modify, modrdn and delete operations. 3. Dump the changelog to a file using nsds5task. 4. Check if changelog is updated with ldap operations. :expectedresults: 1. Add user should PASS. 2. Ldap operations should PASS. 3. Changelog should be dumped successfully. 4. Changelog dump file should contain ldap operations """ log.info('LDAP operations add, modify, modrdn and delete') _perform_ldap_operations(topo) changelog_ldif = _create_changelog_dump(topo) _check_changelog_ldif(topo, changelog_ldif) def test_verify_changelog_online_backup(topo): """Check ldap operations in changelog dump file after online backup :id: 4001c34f-35b4-439e-8c2d-fa7e30375219 :setup: Replication with two suppliers. :steps: 1. Add user to server. 2. Take online backup using db2bak task. 3. Restore the database using bak2db task. 4. Perform ldap modify, modrdn and delete operations. 5. Dump the changelog to a file using nsds5task. 6. Check if changelog is updated with ldap operations. :expectedresults: 1. Add user should PASS. 2. Backup of database should PASS. 3. Restore of database should PASS. 4. Ldap operations should PASS. 5. Changelog should be dumped successfully. 6. Changelog dump file should contain ldap operations """ backup_dir = os.path.join(topo.ms['supplier1'].get_bak_dir(), 'online_backup') log.info('Run db2bak script to take database backup') try: topo.ms['supplier1'].tasks.db2bak(backup_dir=backup_dir, args={TASK_WAIT: True}) except ValueError: log.fatal('test_changelog5: Online backup failed') assert False if ds_supports_new_changelog(): backup_checkdir = os.path.join(backup_dir, DEFAULT_BENAME, BDB_CL_FILENAME) else: backup_checkdir = os.path.join(backup_dir, '.repl_changelog_backup', DEFAULT_CHANGELOG_DB) if glob.glob(f'{backup_checkdir}*'): log.info('Database backup is created successfully') else: log.fatal('test_changelog5: backup directory does not exist : {}*'.format(backup_checkdir)) assert False log.info('Run bak2db to restore directory server') try: topo.ms['supplier1'].tasks.bak2db(backup_dir=backup_dir, args={TASK_WAIT: True}) except ValueError: log.fatal('test_changelog5: Online restore failed') assert False log.info('LDAP operations add, modify, modrdn and delete') _perform_ldap_operations(topo) changelog_ldif = _create_changelog_dump(topo) _check_changelog_ldif(topo, changelog_ldif) def test_verify_changelog_offline_backup(topo): """Check ldap operations in changelog dump file after offline backup :id: feed290d-57dd-46e4-9ab3-422c77589867 :setup: Replication with two suppliers. :steps: 1. Add user to server. 2. Stop server and take offline backup using db2bak. 3. Restore the database using bak2db. 4. Perform ldap modify, modrdn and delete operations. 5. Start the server and dump the changelog using nsds5task. 6. Check if changelog is updated with ldap operations. :expectedresults: 1. Add user should PASS. 2. Backup of database should PASS. 3. Restore of database should PASS. 4. Ldap operations should PASS. 5. Changelog should be dumped successfully. 6. Changelog dump file should contain ldap operations """ backup_dir = os.path.join(topo.ms['supplier1'].get_bak_dir(), 'offline_backup') topo.ms['supplier1'].stop() log.info('Run db2bak to take database backup') try: topo.ms['supplier1'].db2bak(backup_dir) except ValueError: log.fatal('test_changelog5: Offline backup failed') assert False log.info('Run bak2db to restore directory server') try: topo.ms['supplier1'].bak2db(backup_dir) except ValueError: log.fatal('test_changelog5: Offline restore failed') assert False topo.ms['supplier1'].start() if ds_supports_new_changelog(): backup_checkdir = os.path.join(backup_dir, DEFAULT_BENAME, BDB_CL_FILENAME) else: backup_checkdir = os.path.join(backup_dir, '.repl_changelog_backup', DEFAULT_CHANGELOG_DB) if glob.glob(f'{backup_checkdir}*'): log.info('Database backup is created successfully') else: log.fatal('test_changelog5: backup directory does not exist : {}*'.format(backup_checkdir)) assert False log.info('LDAP operations add, modify, modrdn and delete') _perform_ldap_operations(topo) changelog_ldif = _create_changelog_dump(topo) _check_changelog_ldif(topo, changelog_ldif) @pytest.mark.ds47669 def test_changelog_maxage(topo, changelog_init): """Check nsslapd-changelog max age values :id: d284ff27-03b2-412c-ac74-ac4f2d2fae3b :setup: Replication with two supplier, change nsslapd-changelogdir to '/var/lib/dirsrv/slapd-supplier1/changelog' and set cn=Retro Changelog Plugin,cn=plugins,cn=config to 'on' :steps: 1. Set nsslapd-changelogmaxage in cn=changelog5,cn=config to values - '12345','10s','30M','12h','2D','4w' 2. Set nsslapd-changelogmaxage in cn=changelog5,cn=config to values - '-123','xyz' :expectedresults: 1. Operation should be successful 2. Operation should be unsuccessful """ log.info('1. Test nsslapd-changelogmaxage in cn=changelog5,cn=config') # bind as directory manager topo.ms["supplier1"].log.info("Bind as %s" % DN_DM) topo.ms["supplier1"].simple_bind_s(DN_DM, PASSWORD) add_and_check(topo, CHANGELOG, MAXAGE, '12345', True) add_and_check(topo, CHANGELOG, MAXAGE, '10s', True) add_and_check(topo, CHANGELOG, MAXAGE, '30M', True) add_and_check(topo, CHANGELOG, MAXAGE, '12h', True) add_and_check(topo, CHANGELOG, MAXAGE, '2D', True) add_and_check(topo, CHANGELOG, MAXAGE, '4w', True) add_and_check(topo, CHANGELOG, MAXAGE, '-123', False) add_and_check(topo, CHANGELOG, MAXAGE, 'xyz', False) @pytest.mark.ds47669 def test_ticket47669_changelog_triminterval(topo, changelog_init): """Check nsslapd-changelog triminterval values :id: 8f850c37-7e7c-49dd-a4e0-9344638616d6 :setup: Replication with two supplier, change nsslapd-changelogdir to '/var/lib/dirsrv/slapd-supplier1/changelog' and set cn=Retro Changelog Plugin,cn=plugins,cn=config to 'on' :steps: 1. Set nsslapd-changelogtrim-interval in cn=changelog5,cn=config to values - '12345','10s','30M','12h','2D','4w' 2. Set nsslapd-changelogtrim-interval in cn=changelog5,cn=config to values - '-123','xyz' :expectedresults: 1. Operation should be successful 2. Operation should be unsuccessful """ log.info('2. Test nsslapd-changelogtrim-interval in cn=changelog5,cn=config') # bind as directory manager topo.ms["supplier1"].log.info("Bind as %s" % DN_DM) topo.ms["supplier1"].simple_bind_s(DN_DM, PASSWORD) add_and_check(topo, CHANGELOG, TRIMINTERVAL, '12345', True) add_and_check(topo, CHANGELOG, TRIMINTERVAL, '10s', True) add_and_check(topo, CHANGELOG, TRIMINTERVAL, '30M', True) add_and_check(topo, CHANGELOG, TRIMINTERVAL, '12h', True) add_and_check(topo, CHANGELOG, TRIMINTERVAL, '2D', True) add_and_check(topo, CHANGELOG, TRIMINTERVAL, '4w', True) add_and_check(topo, CHANGELOG, TRIMINTERVAL, '-123', False) add_and_check(topo, CHANGELOG, TRIMINTERVAL, 'xyz', False) @pytest.mark.ds47669 @pytest.mark.skipif(ds_supports_new_changelog(), reason="changelog compaction is done by the backend itself, with id2entry as well, nsslapd-changelogcompactdb-interval is no longer supported") def test_changelog_compactdbinterval(topo, changelog_init): """Check nsslapd-changelog compactdbinterval values :id: 0f4b3118-9dfa-4c2a-945c-72847b42a48c :setup: Replication with two supplier, change nsslapd-changelogdir to '/var/lib/dirsrv/slapd-supplier1/changelog' and set cn=Retro Changelog Plugin,cn=plugins,cn=config to 'on' :steps: 1. Set nsslapd-changelogcompactdb-interval in cn=changelog5,cn=config to values - '12345','10s','30M','12h','2D','4w' 2. Set nsslapd-changelogcompactdb-interval in cn=changelog5,cn=config to values - '-123','xyz' :expectedresults: 1. Operation should be successful 2. Operation should be unsuccessful """ log.info('3. Test nsslapd-changelogcompactdb-interval in cn=changelog5,cn=config') # bind as directory manager topo.ms["supplier1"].log.info("Bind as %s" % DN_DM) topo.ms["supplier1"].simple_bind_s(DN_DM, PASSWORD) add_and_check(topo, CHANGELOG, COMPACTDBINTERVAL, '12345', True) add_and_check(topo, CHANGELOG, COMPACTDBINTERVAL, '10s', True) add_and_check(topo, CHANGELOG, COMPACTDBINTERVAL, '30M', True) add_and_check(topo, CHANGELOG, COMPACTDBINTERVAL, '12h', True) add_and_check(topo, CHANGELOG, COMPACTDBINTERVAL, '2D', True) add_and_check(topo, CHANGELOG, COMPACTDBINTERVAL, '4w', True) add_and_check(topo, CHANGELOG, COMPACTDBINTERVAL, '-123', False) add_and_check(topo, CHANGELOG, COMPACTDBINTERVAL, 'xyz', False) @pytest.mark.ds47669 def test_retrochangelog_maxage(topo, changelog_init): """Check nsslapd-retrochangelog max age values :id: 0cb84d81-3e86-4dbf-84a2-66aefd8281db :setup: Replication with two supplier, change nsslapd-changelogdir to '/var/lib/dirsrv/slapd-supplier1/changelog' and set cn=Retro Changelog Plugin,cn=plugins,cn=config to 'on' :steps: 1. Set nsslapd-changelogmaxage in cn=Retro Changelog Plugin,cn=plugins,cn=config to values - '12345','10s','30M','12h','2D','4w' 2. Set nsslapd-changelogmaxage in cn=Retro Changelog Plugin,cn=plugins,cn=config to values - '-123','xyz' :expectedresults: 1. Operation should be successful 2. Operation should be unsuccessful """ log.info('4. Test nsslapd-changelogmaxage in cn=Retro Changelog Plugin,cn=plugins,cn=config') # bind as directory manager topo.ms["supplier1"].log.info("Bind as %s" % DN_DM) topo.ms["supplier1"].simple_bind_s(DN_DM, PASSWORD) add_and_check(topo, RETROCHANGELOG, MAXAGE, '12345', True) add_and_check(topo, RETROCHANGELOG, MAXAGE, '10s', True) add_and_check(topo, RETROCHANGELOG, MAXAGE, '30M', True) add_and_check(topo, RETROCHANGELOG, MAXAGE, '12h', True) add_and_check(topo, RETROCHANGELOG, MAXAGE, '2D', True) add_and_check(topo, RETROCHANGELOG, MAXAGE, '4w', True) add_and_check(topo, RETROCHANGELOG, MAXAGE, '-123', False) add_and_check(topo, RETROCHANGELOG, MAXAGE, 'xyz', False) topo.ms["supplier1"].log.info("ticket47669 was successfully verified.") @pytest.mark.ds50736 def test_retrochangelog_trimming_crash(topo, changelog_init): """Check that when retroCL nsslapd-retrocthangelog contains invalid value, then the instance does not crash at shutdown :id: 5d9bd7ca-e9bf-4be9-8fc8-902aa5513052 :customerscenario: True :setup: Replication with two supplier, change nsslapd-changelogdir to '/var/lib/dirsrv/slapd-supplier1/changelog' and set cn=Retro Changelog Plugin,cn=plugins,cn=config to 'on' :steps: 1. Set nsslapd-changelogmaxage in cn=Retro Changelog Plugin,cn=plugins,cn=config to value '-1' This value is invalid. To disable retroCL trimming it should be set to 0 2. Do several restart 3. check there is no 'Detected Disorderly Shutdown' message (crash) 4. restore valid value for nsslapd-changelogmaxage '1w' :expectedresults: 1. Operation should be successful 2. Operation should be successful 3. Operation should be successful 4. Operation should be successful """ log.info('1. Test retroCL trimming crash in cn=Retro Changelog Plugin,cn=plugins,cn=config') # set the nsslapd-changelogmaxage directly on dse.ldif # because the set value is invalid topo.ms["supplier1"].log.info("ticket50736 start verification") topo.ms["supplier1"].stop() retroPlugin = RetroChangelogPlugin(topo.ms["supplier1"]) dse_ldif = DSEldif(topo.ms["supplier1"]) dse_ldif.replace(retroPlugin.dn, 'nsslapd-changelogmaxage', '-1') topo.ms["supplier1"].start() # The crash should be systematic, but just in case do several restart # with a delay to let all plugin init for i in range(5): time.sleep(1) topo.ms["supplier1"].stop() topo.ms["supplier1"].start() assert not topo.ms["supplier1"].detectDisorderlyShutdown() topo.ms["supplier1"].log.info("ticket 50736 was successfully verified.") if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/replication/changelog_trimming_test.py000066400000000000000000000120501421664411400321250ustar00rootroot00000000000000import logging import pytest import os import ldap import time from lib389._constants import * from lib389.properties import * from lib389.topologies import topology_m1 as topo from lib389.replica import Changelog5 from lib389.idm.domain import Domain from lib389.utils import ensure_bytes, ds_supports_new_changelog pytestmark = pytest.mark.tier1 DEBUGGING = os.getenv("DEBUGGING", default=False) if DEBUGGING: logging.getLogger(__name__).setLevel(logging.DEBUG) else: logging.getLogger(__name__).setLevel(logging.INFO) log = logging.getLogger(__name__) CHANGELOG = 'cn=changelog,{}'.format(DN_USERROOT_LDBM) MAXAGE = 'nsslapd-changelogmaxage' MAXENTRIES = 'nsslapd-changelogmaxentries' TRIMINTERVAL = 'nsslapd-changelogtrim-interval' def do_mods(supplier, num): """Perform a num of mods on the default suffix """ domain = Domain(supplier, DEFAULT_SUFFIX) for i in range(num): domain.replace('description', 'change %s' % i) def set_value(supplier, attr, val): """ Helper function to add/replace attr: val and check the added value """ try: supplier.modify_s(CHANGELOG, [(ldap.MOD_REPLACE, attr, ensure_bytes(val))]) except ldap.LDAPError as e: log.error('Failed to add ' + attr + ': ' + val + ' to ' + plugin + ': error {}'.format(get_ldap_error_msg(e,'desc'))) assert False @pytest.fixture(scope="module") def setup_max_entries(topo, request): """Configure logging and changelog max entries """ supplier = topo.ms["supplier1"] supplier.config.loglevel((ErrorLog.REPLICA,), 'error') if ds_supports_new_changelog(): set_value(supplier, MAXENTRIES, '2') set_value(supplier, TRIMINTERVAL, '300') else: cl = Changelog5(supplier) cl.set_trim_interval('300') @pytest.fixture(scope="module") def setup_max_age(topo, request): """Configure logging and changelog max age """ supplier = topo.ms["supplier1"] supplier.config.loglevel((ErrorLog.REPLICA,), 'error') if ds_supports_new_changelog(): set_value(supplier, MAXAGE, '5') set_value(supplier, TRIMINTERVAL, '300') else: cl = Changelog5(supplier) cl.set_max_age('5') cl.set_trim_interval('300') def test_max_age(topo, setup_max_age): """Test changing the trimming interval works with max age :id: b5de04a5-4d92-49ea-a725-1d278a1c647c :setup: single supplier :steps: 1. Perform modification to populate changelog 2. Adjust the changelog trimming interval 3. Check is trimming occurrs within the new interval :expectedresults: 1. Modifications are successful 2. The changelog trimming interval is correctly lowered 3. Trimming occurs """ log.info("Testing changelog trimming interval with max age...") supplier = topo.ms["supplier1"] if not ds_supports_new_changelog(): cl = Changelog5(supplier) # Do mods to build if cl entries do_mods(supplier, 10) time.sleep(1) # Trimming should not have occurred if supplier.searchErrorsLog("Trimmed") is True: log.fatal('Trimming event unexpectedly occurred') assert False if ds_supports_new_changelog(): set_value(supplier, TRIMINTERVAL, '5') else: cl.set_trim_interval('5') time.sleep(3) # Trimming should not have occurred if supplier.searchErrorsLog("Trimmed") is True: log.fatal('Trimming event unexpectedly occurred') assert False time.sleep(3) # Trimming should have occurred if supplier.searchErrorsLog("Trimmed") is False: log.fatal('Trimming event did not occur') assert False def test_max_entries(topo, setup_max_entries): """Test changing the trimming interval works with max entries :id: b5de04a5-4d92-49ea-a725-1d278a1c647d :setup: single supplier :steps: 1. Perform modification to populate changelog 2. Adjust the changelog trimming interval 3. Check is trimming occurrs within the new interval :expectedresults: 1. Modifications are successful 2. The changelog trimming interval is correctly lowered 3. Trimming occurs """ log.info("Testing changelog triming interval with max entries...") supplier = topo.ms["supplier1"] if not ds_supports_new_changelog(): cl = Changelog5(supplier) # reset errors log supplier.deleteErrorLogs() # Do mods to build if cl entries do_mods(supplier, 10) time.sleep(1) # Trimming should have occurred if supplier.searchErrorsLog("Trimmed") is True: log.fatal('Trimming event unexpectedly occurred') assert False if ds_supports_new_changelog(): set_value(supplier, TRIMINTERVAL, '5') else: cl.set_trim_interval('5') time.sleep(6) # Trimming should have occurred if supplier.searchErrorsLog("Trimmed") is False: log.fatal('Trimming event did not occur') assert False if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/replication/cleanallruv_max_tasks_test.py000066400000000000000000000042421421664411400326560ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2019 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import threading import pytest import random from lib389 import DirSrv from lib389.tasks import * from lib389.utils import * from lib389.topologies import topology_m4, topology_m2 from lib389._constants import * pytestmark = pytest.mark.tier1 @pytest.mark.skipif(ds_is_older("1.4.1.6"), reason="Not implemented") def test_max_tasks(topology_m4): """Test we can not create more than 64 cleaning tasks This test needs to be a standalone test becuase there is no easy way to "restore" the instance after running this test :id: c34d0b40-3c3e-4f53-8656-5e4c2a310a1f :setup: Replication setup with four suppliers :steps: 1. Stop suppliers 3 & 4 2. Create over 64 tasks between m1 and m2 3. Check logs to see if (>64) tasks were rejected :expectedresults: 1. Success 2. Success 3. Success """ # Stop suppliers 3 & 4 m1 = topology_m4.ms["supplier1"] m2 = topology_m4.ms["supplier2"] m3 = topology_m4.ms["supplier3"] m4 = topology_m4.ms["supplier4"] m3.stop() m4.stop() # Add over 64 tasks between supplier1 & 2 to try to exceed the 64 task limit for i in range(1, 64): cruv_task = CleanAllRUVTask(m1) cruv_task.create(properties={ 'replica-id': str(i), 'replica-base-dn': DEFAULT_SUFFIX, 'replica-force-cleaning': 'no', # This forces these tasks to stick around }) cruv_task = CleanAllRUVTask(m2) cruv_task.create(properties={ 'replica-id': "10" + str(i), 'replica-base-dn': DEFAULT_SUFFIX, 'replica-force-cleaning': 'yes', # This allows the tasks to propagate }) # Check the errors log for our error message in supplier 1 assert m1.searchErrorsLog('Exceeded maximum number of active CLEANALLRUV tasks') if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/replication/cleanallruv_test.py000066400000000000000000000722731421664411400306150ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2019 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import threading import pytest import random from lib389 import DirSrv from lib389.tasks import * from lib389.utils import * from lib389.topologies import topology_m4, topology_m2 from lib389._constants import * from lib389.idm.directorymanager import DirectoryManager from lib389.replica import ReplicationManager, Replicas from lib389.tasks import CleanAllRUVTask from lib389.idm.user import UserAccounts from lib389.config import LDBMConfig from lib389.config import CertmapLegacy from lib389.idm.services import ServiceAccounts pytestmark = pytest.mark.tier1 logging.getLogger(__name__).setLevel(logging.DEBUG) log = logging.getLogger(__name__) class AddUsers(threading.Thread): def __init__(self, inst, num_users): threading.Thread.__init__(self) self.daemon = True self.inst = inst self.num_users = num_users def run(self): """Start adding users""" dm = DirectoryManager(self.inst) conn = dm.bind() users = UserAccounts(conn, DEFAULT_SUFFIX) u_range = list(range(self.num_users)) random.shuffle(u_range) for idx in u_range: try: users.create(properties={ 'uid': 'testuser%s' % idx, 'cn' : 'testuser%s' % idx, 'sn' : 'user%s' % idx, 'uidNumber' : '%s' % (1000 + idx), 'gidNumber' : '%s' % (1000 + idx), 'homeDirectory' : '/home/testuser%s' % idx }) # One of the suppliers was probably put into read only mode - just break out except ldap.UNWILLING_TO_PERFORM: break except ldap.ALREADY_EXISTS: pass conn.close() def remove_supplier4_agmts(msg, topology_m4): """Remove all the repl agmts to supplier4. """ log.info('%s: remove all the agreements to supplier 4...' % msg) repl = ReplicationManager(DEFAULT_SUFFIX) # This will delete m4 frm the topo *and* remove all incoming agreements # to m4. repl.remove_supplier(topology_m4.ms["supplier4"], [topology_m4.ms["supplier1"], topology_m4.ms["supplier2"], topology_m4.ms["supplier3"]]) def check_ruvs(msg, topology_m4, m4rid): """Check suppliers 1- 3 for supplier 4's rid.""" for inst in (topology_m4.ms["supplier1"], topology_m4.ms["supplier2"], topology_m4.ms["supplier3"]): clean = False replicas = Replicas(inst) replica = replicas.get(DEFAULT_SUFFIX) log.info('check_ruvs for replica %s:%s (suffix:rid)' % (replica.get_suffix(), replica.get_rid())) count = 0 while not clean and count < 20: ruv = replica.get_ruv() if m4rid in ruv._rids: time.sleep(5) count = count + 1 else: clean = True if not clean: raise Exception("Supplier %s was not cleaned in time." % inst.serverid) return True def task_done(topology_m4, task_dn, timeout=60): """Check if the task is complete""" attrlist = ['nsTaskLog', 'nsTaskStatus', 'nsTaskExitCode', 'nsTaskCurrentItem', 'nsTaskTotalItems'] done = False count = 0 while not done and count < timeout: try: entry = topology_m4.ms["supplier1"].getEntry(task_dn, attrlist=attrlist) if entry is not None: if entry.hasAttr('nsTaskExitCode'): done = True break else: done = True break except ldap.NO_SUCH_OBJECT: done = True break except ldap.LDAPError: break time.sleep(1) count += 1 return done def restore_supplier4(topology_m4): """In our tests will always be removing supplier 4, so we need a common way to restore it for another test """ # Restart the remaining suppliers to allow rid 4 to be reused. for inst in topology_m4.ms.values(): inst.restart() repl = ReplicationManager(DEFAULT_SUFFIX) repl.join_supplier(topology_m4.ms["supplier1"], topology_m4.ms["supplier4"]) # Add the 2,3 -> 4 agmt. repl.ensure_agreement(topology_m4.ms["supplier2"], topology_m4.ms["supplier4"]) repl.ensure_agreement(topology_m4.ms["supplier3"], topology_m4.ms["supplier4"]) # And in reverse ... repl.ensure_agreement(topology_m4.ms["supplier4"], topology_m4.ms["supplier2"]) repl.ensure_agreement(topology_m4.ms["supplier4"], topology_m4.ms["supplier3"]) log.info('Supplier 4 has been successfully restored.') @pytest.fixture() def m4rid(request, topology_m4): log.debug("Wait a bit before the reset - it is required for the slow machines") time.sleep(5) log.debug("-------------- BEGIN RESET of m4 -----------------") repl = ReplicationManager(DEFAULT_SUFFIX) repl.test_replication_topology(topology_m4.ms.values()) # What is supplier4's rid? m4rid = repl.get_rid(topology_m4.ms["supplier4"]) def fin(): try: # Restart the suppliers and rerun cleanallruv for inst in topology_m4.ms.values(): inst.restart() cruv_task = CleanAllRUVTask(topology_m4.ms["supplier1"]) cruv_task.create(properties={ 'replica-id': m4rid, 'replica-base-dn': DEFAULT_SUFFIX, 'replica-force-cleaning': 'no', }) cruv_task.wait() except ldap.UNWILLING_TO_PERFORM: # In some casse we already cleaned rid4, so if we fail, it's okay pass restore_supplier4(topology_m4) # Make sure everything works. repl.test_replication_topology(topology_m4.ms.values()) request.addfinalizer(fin) log.debug("-------------- FINISH RESET of m4 -----------------") return m4rid def test_clean(topology_m4, m4rid): """Check that cleanallruv task works properly :id: e9b3ce5c-e17c-409e-aafc-e97d630f2878 :setup: Replication setup with four suppliers :steps: 1. Check that replication works on all suppliers 2. Disable replication on supplier 4 3. Remove agreements to supplier 4 from other suppliers 4. Run a cleanallruv task on supplier 1 with a 'force' option 'on' 5. Check that everything was cleaned :expectedresults: 1. Replication should work properly on all suppliers 2. Operation should be successful 3. Agreements to supplier 4 should be removed 4. Cleanallruv task should be successfully executed 5. Everything should be cleaned """ log.info('Running test_clean...') # Disable supplier 4 # Remove the agreements from the other suppliers that point to supplier 4 log.info('test_clean: disable supplier 4...') remove_supplier4_agmts("test_clean", topology_m4) # Run the task log.info('test_clean: run the cleanAllRUV task...') cruv_task = CleanAllRUVTask(topology_m4.ms["supplier1"]) cruv_task.create(properties={ 'replica-id': m4rid, 'replica-base-dn': DEFAULT_SUFFIX, 'replica-force-cleaning': 'no' }) cruv_task.wait() # Check the other supplier's RUV for 'replica 4' log.info('test_clean: check all the suppliers have been cleaned...') clean = check_ruvs("test_clean", topology_m4, m4rid) assert clean log.info('test_clean PASSED, restoring supplier 4...') @pytest.mark.flaky(max_runs=2, min_passes=1) def test_clean_restart(topology_m4, m4rid): """Check that cleanallruv task works properly after a restart :id: c6233bb3-092c-4919-9ac9-80dd02cc6e02 :setup: Replication setup with four suppliers :steps: 1. Disable replication on supplier 4 2. Remove agreements to supplier 4 from other suppliers 3. Stop supplier 3 4. Run a cleanallruv task on supplier 1 5. Stop supplier 1 6. Start supplier 3 7. Make sure that no crash happened 8. Start supplier 1 9. Make sure that no crash happened 10. Check that everything was cleaned :expectedresults: 1. Operation should be successful 2. Agreements to supplier 4 should be removed 3. Supplier 3 should be stopped 4. Cleanallruv task should be successfully executed 5. Supplier 1 should be stopped 6. Supplier 3 should be started 7. No crash should happened 8. Supplier 1 should be started 9. No crash should happened 10. Everything should be cleaned """ log.info('Running test_clean_restart...') # Disable supplier 4 log.info('test_clean: disable supplier 4...') # Remove the agreements from the other suppliers that point to supplier 4 remove_supplier4_agmts("test_clean", topology_m4) # Stop supplier 3 to keep the task running, so we can stop supplier 1... topology_m4.ms["supplier3"].stop() # Run the task log.info('test_clean: run the cleanAllRUV task...') cruv_task = CleanAllRUVTask(topology_m4.ms["supplier1"]) cruv_task.create(properties={ 'replica-id': m4rid, 'replica-base-dn': DEFAULT_SUFFIX, 'replica-force-cleaning': 'no', 'replica-certify-all': 'yes' }) # Sleep a bit, then stop supplier 1 time.sleep(5) topology_m4.ms["supplier1"].stop() # Now start supplier 3 & 1, and make sure we didn't crash topology_m4.ms["supplier3"].start() if topology_m4.ms["supplier3"].detectDisorderlyShutdown(): log.fatal('test_clean_restart: Supplier 3 previously crashed!') assert False topology_m4.ms["supplier1"].start(timeout=30) if topology_m4.ms["supplier1"].detectDisorderlyShutdown(): log.fatal('test_clean_restart: Supplier 1 previously crashed!') assert False # Check the other supplier's RUV for 'replica 4' log.info('test_clean_restart: check all the suppliers have been cleaned...') clean = check_ruvs("test_clean_restart", topology_m4, m4rid) assert clean log.info('test_clean_restart PASSED, restoring supplier 4...') @pytest.mark.flaky(max_runs=2, min_passes=1) def test_clean_force(topology_m4, m4rid): """Check that multiple tasks with a 'force' option work properly :id: f8810dfe-d2d2-4dd9-ba03-5fc14896fabe :setup: Replication setup with four suppliers :steps: 1. Stop supplier 3 2. Add a bunch of updates to supplier 4 3. Disable replication on supplier 4 4. Start supplier 3 5. Remove agreements to supplier 4 from other suppliers 6. Run a cleanallruv task on supplier 1 with a 'force' option 'on' 7. Check that everything was cleaned :expectedresults: 1. Supplier 3 should be stopped 2. Operation should be successful 3. Replication on supplier 4 should be disabled 4. Supplier 3 should be started 5. Agreements to supplier 4 should be removed 6. Operation should be successful 7. Everything should be cleaned """ log.info('Running test_clean_force...') # Stop supplier 3, while we update supplier 4, so that 3 is behind the other suppliers topology_m4.ms["supplier3"].stop() # Add a bunch of updates to supplier 4 m4_add_users = AddUsers(topology_m4.ms["supplier4"], 1500) m4_add_users.start() m4_add_users.join() # Start supplier 3, it should be out of sync with the other replicas... topology_m4.ms["supplier3"].start() # Remove the agreements from the other suppliers that point to supplier 4 remove_supplier4_agmts("test_clean_force", topology_m4) # Run the task, use "force" because supplier 3 is not in sync with the other replicas # in regards to the replica 4 RUV log.info('test_clean: run the cleanAllRUV task...') cruv_task = CleanAllRUVTask(topology_m4.ms["supplier1"]) cruv_task.create(properties={ 'replica-id': m4rid, 'replica-base-dn': DEFAULT_SUFFIX, 'replica-force-cleaning': 'yes' }) cruv_task.wait() # Check the other supplier's RUV for 'replica 4' log.info('test_clean_force: check all the suppliers have been cleaned...') clean = check_ruvs("test_clean_force", topology_m4, m4rid) assert clean log.info('test_clean_force PASSED, restoring supplier 4...') @pytest.mark.flaky(max_runs=2, min_passes=1) def test_abort(topology_m4, m4rid): """Test the abort task basic functionality :id: b09a6887-8de0-4fac-8e41-73ccbaaf7a08 :setup: Replication setup with four suppliers :steps: 1. Disable replication on supplier 4 2. Remove agreements to supplier 4 from other suppliers 3. Stop supplier 2 4. Run a cleanallruv task on supplier 1 5. Run a cleanallruv abort task on supplier 1 :expectedresults: No hanging tasks left 1. Replication on supplier 4 should be disabled 2. Agreements to supplier 4 should be removed 3. Supplier 2 should be stopped 4. Operation should be successful 5. Operation should be successful """ log.info('Running test_abort...') # Remove the agreements from the other suppliers that point to supplier 4 remove_supplier4_agmts("test_abort", topology_m4) # Stop supplier 2 log.info('test_abort: stop supplier 2 to freeze the cleanAllRUV task...') topology_m4.ms["supplier2"].stop() # Run the task log.info('test_abort: add the cleanAllRUV task...') cruv_task = CleanAllRUVTask(topology_m4.ms["supplier1"]) cruv_task.create(properties={ 'replica-id': m4rid, 'replica-base-dn': DEFAULT_SUFFIX, 'replica-force-cleaning': 'no', 'replica-certify-all': 'yes' }) # Wait a bit time.sleep(2) # Abort the task cruv_task.abort() # Check supplier 1 does not have the clean task running log.info('test_abort: check supplier 1 no longer has a cleanAllRUV task...') if not task_done(topology_m4, cruv_task.dn): log.fatal('test_abort: CleanAllRUV task was not aborted') assert False # Start supplier 2 log.info('test_abort: start supplier 2 to begin the restore process...') topology_m4.ms["supplier2"].start() log.info('test_abort PASSED, restoring supplier 4...') @pytest.mark.flaky(max_runs=2, min_passes=1) def test_abort_restart(topology_m4, m4rid): """Test the abort task can handle a restart, and then resume :id: b66e33d4-fe85-4e1c-b882-75da80f70ab3 :setup: Replication setup with four suppliers :steps: 1. Disable replication on supplier 4 2. Remove agreements to supplier 4 from other suppliers 3. Stop supplier 3 4. Run a cleanallruv task on supplier 1 5. Run a cleanallruv abort task on supplier 1 6. Restart supplier 1 7. Make sure that no crash happened 8. Start supplier 3 9. Check supplier 1 does not have the clean task running 10. Check that errors log doesn't have 'Aborting abort task' message :expectedresults: 1. Replication on supplier 4 should be disabled 2. Agreements to supplier 4 should be removed 3. Supplier 3 should be stopped 4. Operation should be successful 5. Operation should be successful 6. Supplier 1 should be restarted 7. No crash should happened 8. Supplier 3 should be started 9. Check supplier 1 shouldn't have the clean task running 10. Errors log shouldn't have 'Aborting abort task' message """ log.info('Running test_abort_restart...') # Remove the agreements from the other suppliers that point to supplier 4 remove_supplier4_agmts("test_abort", topology_m4) # Stop supplier 3 log.info('test_abort_restart: stop supplier 3 to freeze the cleanAllRUV task...') topology_m4.ms["supplier3"].stop() # Run the task log.info('test_abort_restart: add the cleanAllRUV task...') cruv_task = CleanAllRUVTask(topology_m4.ms["supplier1"]) cruv_task.create(properties={ 'replica-id': m4rid, 'replica-base-dn': DEFAULT_SUFFIX, 'replica-force-cleaning': 'no', 'replica-certify-all': 'yes' }) # Wait a bit time.sleep(2) # Abort the task cruv_task.abort(certify=True) # Check supplier 1 does not have the clean task running log.info('test_abort_abort: check supplier 1 no longer has a cleanAllRUV task...') if not task_done(topology_m4, cruv_task.dn): log.fatal('test_abort_restart: CleanAllRUV task was not aborted') assert False # Now restart supplier 1, and make sure the abort process completes topology_m4.ms["supplier1"].restart() if topology_m4.ms["supplier1"].detectDisorderlyShutdown(): log.fatal('test_abort_restart: Supplier 1 previously crashed!') assert False # Start supplier 3 topology_m4.ms["supplier3"].start() # Need to wait 5 seconds before server processes any leftover tasks time.sleep(6) # Check supplier 1 tried to run abort task. We expect the abort task to be aborted. if not topology_m4.ms["supplier1"].searchErrorsLog('Aborting abort task'): log.fatal('test_abort_restart: Abort task did not restart') assert False log.info('test_abort_restart PASSED, restoring supplier 4...') @pytest.mark.flaky(max_runs=2, min_passes=1) def test_abort_certify(topology_m4, m4rid): """Test the abort task with a replica-certify-all option :id: 78959966-d644-44a8-b98c-1fcf21b45eb0 :setup: Replication setup with four suppliers :steps: 1. Disable replication on supplier 4 2. Remove agreements to supplier 4 from other suppliers 3. Stop supplier 2 4. Run a cleanallruv task on supplier 1 5. Run a cleanallruv abort task on supplier 1 with a replica-certify-all option :expectedresults: No hanging tasks left 1. Replication on supplier 4 should be disabled 2. Agreements to supplier 4 should be removed 3. Supplier 2 should be stopped 4. Operation should be successful 5. Operation should be successful """ log.info('Running test_abort_certify...') # Remove the agreements from the other suppliers that point to supplier 4 remove_supplier4_agmts("test_abort_certify", topology_m4) # Stop supplier 2 log.info('test_abort_certify: stop supplier 2 to freeze the cleanAllRUV task...') topology_m4.ms["supplier2"].stop() # Run the task log.info('test_abort_certify: add the cleanAllRUV task...') cruv_task = CleanAllRUVTask(topology_m4.ms["supplier1"]) cruv_task.create(properties={ 'replica-id': m4rid, 'replica-base-dn': DEFAULT_SUFFIX, 'replica-force-cleaning': 'no', 'replica-certify-all': 'yes' }) # Wait a bit time.sleep(2) # Abort the task log.info('test_abort_certify: abort the cleanAllRUV task...') abort_task = cruv_task.abort(certify=True) # Wait a while and make sure the abort task is still running log.info('test_abort_certify...') if task_done(topology_m4, abort_task.dn, 10): log.fatal('test_abort_certify: abort task incorrectly finished') assert False # Now start supplier 2 so it can be aborted log.info('test_abort_certify: start supplier 2 to allow the abort task to finish...') topology_m4.ms["supplier2"].start() # Wait for the abort task to stop if not task_done(topology_m4, abort_task.dn, 90): log.fatal('test_abort_certify: The abort CleanAllRUV task was not aborted') assert False # Check supplier 1 does not have the clean task running log.info('test_abort_certify: check supplier 1 no longer has a cleanAllRUV task...') if not task_done(topology_m4, cruv_task.dn): log.fatal('test_abort_certify: CleanAllRUV task was not aborted') assert False log.info('test_abort_certify PASSED, restoring supplier 4...') @pytest.mark.flaky(max_runs=2, min_passes=1) def test_stress_clean(topology_m4, m4rid): """Put each server(m1 - m4) under a stress, and perform the entire clean process :id: a8263cd6-f068-4357-86e0-e7c34504c8c5 :setup: Replication setup with four suppliers :steps: 1. Add a bunch of updates to all suppliers 2. Put supplier 4 to read-only mode 3. Disable replication on supplier 4 4. Remove agreements to supplier 4 from other suppliers 5. Run a cleanallruv task on supplier 1 6. Check that everything was cleaned :expectedresults: 1. Operation should be successful 2. Supplier 4 should be put to read-only mode 3. Replication on supplier 4 should be disabled 4. Agreements to supplier 4 should be removed 5. Operation should be successful 6. Everything should be cleaned """ log.info('Running test_stress_clean...') log.info('test_stress_clean: put all the suppliers under load...') ldbm_config = LDBMConfig(topology_m4.ms["supplier4"]) # Put all the suppliers under load # not too high load else it takes a long time to converge and # the test result becomes instable m1_add_users = AddUsers(topology_m4.ms["supplier1"], 500) m1_add_users.start() m2_add_users = AddUsers(topology_m4.ms["supplier2"], 500) m2_add_users.start() m3_add_users = AddUsers(topology_m4.ms["supplier3"], 500) m3_add_users.start() m4_add_users = AddUsers(topology_m4.ms["supplier4"], 500) m4_add_users.start() # Allow sometime to get replication flowing in all directions log.info('test_stress_clean: allow some time for replication to get flowing...') time.sleep(5) # Put supplier 4 into read only mode ldbm_config.set('nsslapd-readonly', 'on') # We need to wait for supplier 4 to push its changes out log.info('test_stress_clean: allow some time for supplier 4 to push changes out (60 seconds)...') time.sleep(30) # Remove the agreements from the other suppliers that point to supplier 4 remove_supplier4_agmts("test_stress_clean", topology_m4) # Run the task cruv_task = CleanAllRUVTask(topology_m4.ms["supplier1"]) cruv_task.create(properties={ 'replica-id': m4rid, 'replica-base-dn': DEFAULT_SUFFIX, 'replica-force-cleaning': 'no' }) cruv_task.wait() # Wait for the update to finish log.info('test_stress_clean: wait for all the updates to finish...') m1_add_users.join() m2_add_users.join() m3_add_users.join() m4_add_users.join() # Check the other supplier's RUV for 'replica 4' log.info('test_stress_clean: check if all the replicas have been cleaned...') clean = check_ruvs("test_stress_clean", topology_m4, m4rid) assert clean log.info('test_stress_clean: PASSED, restoring supplier 4...') # Sleep for a bit to replication complete log.info("Sleep for 120 seconds to allow replication to complete...") repl = ReplicationManager(DEFAULT_SUFFIX) repl.test_replication_topology([ topology_m4.ms["supplier1"], topology_m4.ms["supplier2"], topology_m4.ms["supplier3"], ], timeout=120) # Turn off readonly mode ldbm_config.set('nsslapd-readonly', 'off') @pytest.mark.flaky(max_runs=2, min_passes=1) def test_multiple_tasks_with_force(topology_m4, m4rid): """Check that multiple tasks with a 'force' option work properly :id: eb76a93d-8d1c-405e-9f25-6e8d5a781098 :setup: Replication setup with four suppliers :steps: 1. Stop supplier 3 2. Add a bunch of updates to supplier 4 3. Disable replication on supplier 4 4. Start supplier 3 5. Remove agreements to supplier 4 from other suppliers 6. Run a cleanallruv task on supplier 1 with a 'force' option 'on' 7. Run one more cleanallruv task on supplier 1 with a 'force' option 'off' 8. Check that everything was cleaned :expectedresults: 1. Supplier 3 should be stopped 2. Operation should be successful 3. Replication on supplier 4 should be disabled 4. Supplier 3 should be started 5. Agreements to supplier 4 should be removed 6. Operation should be successful 7. Operation should be successful 8. Everything should be cleaned """ log.info('Running test_multiple_tasks_with_force...') # Stop supplier 3, while we update supplier 4, so that 3 is behind the other suppliers topology_m4.ms["supplier3"].stop() # Add a bunch of updates to supplier 4 m4_add_users = AddUsers(topology_m4.ms["supplier4"], 1500) m4_add_users.start() m4_add_users.join() # Start supplier 3, it should be out of sync with the other replicas... topology_m4.ms["supplier3"].start() # Disable supplier 4 # Remove the agreements from the other suppliers that point to supplier 4 remove_supplier4_agmts("test_multiple_tasks_with_force", topology_m4) # Run the task, use "force" because supplier 3 is not in sync with the other replicas # in regards to the replica 4 RUV log.info('test_multiple_tasks_with_force: run the cleanAllRUV task with "force" on...') cruv_task = CleanAllRUVTask(topology_m4.ms["supplier1"]) cruv_task.create(properties={ 'replica-id': m4rid, 'replica-base-dn': DEFAULT_SUFFIX, 'replica-force-cleaning': 'yes', 'replica-certify-all': 'no' }) log.info('test_multiple_tasks_with_force: run the cleanAllRUV task with "force" off...') # NOTE: This must be try not py.test raises, because the above may or may # not have completed yet .... try: cruv_task_fail = CleanAllRUVTask(topology_m4.ms["supplier1"]) cruv_task_fail.create(properties={ 'replica-id': m4rid, 'replica-base-dn': DEFAULT_SUFFIX, 'replica-force-cleaning': 'no', 'replica-certify-all': 'no' }) cruv_task_fail.wait() except ldap.UNWILLING_TO_PERFORM: pass # Wait for the force task .... cruv_task.wait() # Check the other supplier's RUV for 'replica 4' log.info('test_multiple_tasks_with_force: check all the suppliers have been cleaned...') clean = check_ruvs("test_clean_force", topology_m4, m4rid) assert clean # Check supplier 1 does not have the clean task running log.info('test_abort: check supplier 1 no longer has a cleanAllRUV task...') if not task_done(topology_m4, cruv_task.dn): log.fatal('test_abort: CleanAllRUV task was not aborted') assert False @pytest.mark.bz1466441 @pytest.mark.ds50370 def test_clean_shutdown_crash(topology_m2): """Check that server didn't crash after shutdown when running CleanAllRUV task :id: c34d0b40-3c3e-4f53-8656-5e4c2a310aaf :setup: Replication setup with two suppliers :steps: 1. Enable TLS on both suppliers 2. Reconfigure both agreements to use TLS Client auth 3. Stop supplier2 4. Run the CleanAllRUV task 5. Restart supplier1 6. Check if supplier1 didn't crash 7. Restart supplier1 again 8. Check if supplier1 didn't crash :expectedresults: 1. Success 2. Success 3. Success 4. Success 5. Success 6. Success 7. Success 8. Success """ m1 = topology_m2.ms["supplier1"] m2 = topology_m2.ms["supplier2"] repl = ReplicationManager(DEFAULT_SUFFIX) cm_m1 = CertmapLegacy(m1) cm_m2 = CertmapLegacy(m2) certmaps = cm_m1.list() certmaps['default']['DNComps'] = None certmaps['default']['CmapLdapAttr'] = 'nsCertSubjectDN' cm_m1.set(certmaps) cm_m2.set(certmaps) log.info('Enabling TLS') [i.enable_tls() for i in topology_m2] log.info('Creating replication dns') services = ServiceAccounts(m1, DEFAULT_SUFFIX) repl_m1 = services.get('%s:%s' % (m1.host, m1.sslport)) repl_m1.set('nsCertSubjectDN', m1.get_server_tls_subject()) repl_m2 = services.get('%s:%s' % (m2.host, m2.sslport)) repl_m2.set('nsCertSubjectDN', m2.get_server_tls_subject()) log.info('Changing auth type') replica_m1 = Replicas(m1).get(DEFAULT_SUFFIX) agmt_m1 = replica_m1.get_agreements().list()[0] agmt_m1.replace_many( ('nsDS5ReplicaBindMethod', 'SSLCLIENTAUTH'), ('nsDS5ReplicaTransportInfo', 'SSL'), ('nsDS5ReplicaPort', '%s' % m2.sslport), ) agmt_m1.remove_all('nsDS5ReplicaBindDN') replica_m2 = Replicas(m2).get(DEFAULT_SUFFIX) agmt_m2 = replica_m2.get_agreements().list()[0] agmt_m2.replace_many( ('nsDS5ReplicaBindMethod', 'SSLCLIENTAUTH'), ('nsDS5ReplicaTransportInfo', 'SSL'), ('nsDS5ReplicaPort', '%s' % m1.sslport), ) agmt_m2.remove_all('nsDS5ReplicaBindDN') log.info('Stopping supplier2') m2.stop() log.info('Run the cleanAllRUV task') cruv_task = CleanAllRUVTask(m1) cruv_task.create(properties={ 'replica-id': repl.get_rid(m1), 'replica-base-dn': DEFAULT_SUFFIX, 'replica-force-cleaning': 'no', 'replica-certify-all': 'yes' }) m1.restart() log.info('Check if supplier1 crashed') assert not m1.detectDisorderlyShutdown() log.info('Repeat') m1.restart() assert not m1.detectDisorderlyShutdown() if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/replication/conflict_resolve_test.py000066400000000000000000001223341421664411400316370ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2020 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import time import logging import ldap import pytest import re from itertools import permutations from lib389._constants import * from lib389.idm.nscontainer import nsContainers from lib389.idm.user import UserAccounts, UserAccount from lib389.idm.group import Groups from lib389.idm.organizationalunit import OrganizationalUnits from lib389.replica import ReplicationManager from lib389.agreement import Agreements from lib389.plugins import MemberOfPlugin pytestmark = pytest.mark.tier1 DEBUGGING = os.getenv("DEBUGGING", default=False) if DEBUGGING: logging.getLogger(__name__).setLevel(logging.DEBUG) else: logging.getLogger(__name__).setLevel(logging.INFO) log = logging.getLogger(__name__) def _create_user(users, user_num, group_num=2000, sleep=False): """Creates user entry""" user = users.create_test_user(user_num, group_num) if sleep: time.sleep(1) return user def _rename_user(users, user_num, new_num, sleep=False): """Rename user entry""" assert user_num != new_num, "New user number should not be the same as the old one" user = users.get('test_user_{}'.format(user_num)) user.rename('uid=test_user_{}'.format(new_num)) if sleep: time.sleep(1) def _modify_user(users, user_num, sleep=False): """Modify user entry""" user = users.get('test_user_{}'.format(user_num)) user.replace("homeDirectory", "/home/test_user0{}".format(user_num)) if sleep: time.sleep(1) time.sleep(1) def _delete_user(users, user_num, sleep=False): """Delete user entry""" user = users.get('test_user_{}'.format(user_num)) user.delete() if sleep: time.sleep(1) time.sleep(1) def _create_group(groups, num, member, sleep=False): """Creates group entry""" group_props = {'cn': 'test_group_{}'.format(num), 'member': member} group = groups.create(properties=group_props) if sleep: time.sleep(1) return group def _delete_group(groups, num, sleep=False): """Delete group entry""" group = groups.get('test_group_{}'.format(num)) group.delete() if sleep: time.sleep(1) def _create_container(inst, dn, name, sleep=False): """Creates container entry""" conts = nsContainers(inst, dn) cont = conts.create(properties={'cn': name}) if sleep: time.sleep(1) return cont def _delete_container(cont, sleep=False): """Deletes container entry""" cont.delete() if sleep: time.sleep(1) def _test_base(topology): """Add test container for entries, enable plugin logging, audit log, error log for replica and access log for internal """ M1 = topology.ms["supplier1"] conts = nsContainers(M1, SUFFIX) base_m2 = conts.ensure_state(properties={'cn': 'test_container'}) for inst in topology: inst.config.loglevel([ErrorLog.DEFAULT, ErrorLog.REPLICA], service='error') inst.config.loglevel([AccessLog.DEFAULT, AccessLog.INTERNAL], service='access') inst.config.set('nsslapd-plugin-logging', 'on') inst.config.enable_log('audit') inst.restart() return base_m2 def _delete_test_base(inst, base_m2_dn): """Delete test container with entries and entry conflicts""" ents = inst.search_s(base_m2_dn, ldap.SCOPE_SUBTREE, filterstr="(|(objectclass=*)(objectclass=ldapsubentry))") for ent in sorted(ents, key=lambda e: len(e.dn), reverse=True): log.debug("Delete entry children {}".format(ent.dn)) try: inst.delete_ext_s(ent.dn) except ldap.NO_SUCH_OBJECT: # For the case with objectclass: glue entries pass @pytest.fixture def base_m2(topology_m2, request): tb = _test_base(topology_m2) def fin(): if not DEBUGGING: _delete_test_base(topology_m2.ms["supplier1"], tb.dn) request.addfinalizer(fin) return tb @pytest.fixture def base_m3(topology_m3, request): tb = _test_base(topology_m3) def fin(): if not DEBUGGING: _delete_test_base(topology_m3.ms["supplier1"], tb.dn) request.addfinalizer(fin) return tb class TestTwoSuppliers: def test_add_modrdn(self, topology_m2, base_m2): """Check that conflict properly resolved for create - modrdn operations :id: 77f09b18-03d1-45da-940b-1ad2c2908ebb :setup: Two supplier replication, test container for entries, enable plugin logging, audit log, error log for replica and access log for internal :steps: 1. Add five users to m1 and wait for replication to happen 2. Pause replication 3. Create an entry on m1 and m2 4. Create an entry on m1 and rename on m2 5. Rename an entry on m1 and create on m2 6. Rename an entry on m1 and rename on m2 7. Rename an entry on m1 and rename on m2. Use different entries but rename them to the same entry 8. Resume replication 9. Check that the entries on both suppliers are the same and replication is working :expectedresults: 1. It should pass 2. It should pass 3. It should pass 4. It should pass 5. It should pass 6. It should pass 7. It should pass 8. It should pass """ M1 = topology_m2.ms["supplier1"] M2 = topology_m2.ms["supplier2"] test_users_m1 = UserAccounts(M1, base_m2.dn, rdn=None) test_users_m2 = UserAccounts(M2, base_m2.dn, rdn=None) repl = ReplicationManager(SUFFIX) for user_num in range(1000, 1005): _create_user(test_users_m1, user_num) repl.test_replication(M1, M2) topology_m2.pause_all_replicas() log.info("Test create - modrdn") user_num += 1 _create_user(test_users_m1, user_num, sleep=True) _create_user(test_users_m2, user_num, sleep=True) user_num += 1 _create_user(test_users_m1, user_num, sleep=True) _rename_user(test_users_m2, 1000, user_num, sleep=True) user_num += 1 _rename_user(test_users_m1, 1001, user_num, sleep=True) _create_user(test_users_m2, user_num, sleep=True) user_num += 1 _rename_user(test_users_m1, 1002, user_num, sleep=True) _rename_user(test_users_m2, 1002, user_num, sleep=True) user_num += 1 _rename_user(test_users_m1, 1003, user_num, sleep=True) _rename_user(test_users_m2, 1004, user_num) topology_m2.resume_all_replicas() repl.test_replication_topology(topology_m2) user_dns_m1 = [user.dn for user in test_users_m1.list()] user_dns_m2 = [user.dn for user in test_users_m2.list()] assert set(user_dns_m1) == set(user_dns_m2) def test_complex_add_modify_modrdn_delete(self, topology_m2, base_m2): """Check that conflict properly resolved for complex operations which involve add, modify, modrdn and delete :id: 77f09b18-03d1-45da-940b-1ad2c2908eb1 :customerscenario: True :setup: Two supplier replication, test container for entries, enable plugin logging, audit log, error log for replica and access log for internal :steps: 1. Add ten users to m1 and wait for replication to happen 2. Pause replication 3. Test add-del on m1 and add on m2 4. Test add-mod on m1 and add on m2 5. Test add-modrdn on m1 and add on m2 6. Test multiple add, modrdn 7. Test Add-del on both suppliers 8. Test modrdn-modrdn 9. Test modrdn-del 10. Resume replication 11. Check that the entries on both suppliers are the same and replication is working :expectedresults: 1. It should pass 2. It should pass 3. It should pass 4. It should pass 5. It should pass 6. It should pass 7. It should pass 8. It should pass 9. It should pass 10. It should pass 11. It should pass """ M1 = topology_m2.ms["supplier1"] M2 = topology_m2.ms["supplier2"] test_users_m1 = UserAccounts(M1, base_m2.dn, rdn=None) test_users_m2 = UserAccounts(M2, base_m2.dn, rdn=None) repl = ReplicationManager(SUFFIX) for user_num in range(1100, 1110): _create_user(test_users_m1, user_num) repl.test_replication(M1, M2) topology_m2.pause_all_replicas() log.info("Test add-del on M1 and add on M2") user_num += 1 _create_user(test_users_m1, user_num) _delete_user(test_users_m1, user_num, sleep=True) _create_user(test_users_m2, user_num, sleep=True) user_num += 1 _create_user(test_users_m1, user_num, sleep=True) _create_user(test_users_m2, user_num, sleep=True) _delete_user(test_users_m1, user_num, sleep=True) user_num += 1 _create_user(test_users_m2, user_num, sleep=True) _create_user(test_users_m1, user_num) _delete_user(test_users_m1, user_num) log.info("Test add-mod on M1 and add on M2") user_num += 1 _create_user(test_users_m1, user_num) _modify_user(test_users_m1, user_num, sleep=True) _create_user(test_users_m2, user_num, sleep=True) user_num += 1 _create_user(test_users_m1, user_num, sleep=True) _create_user(test_users_m2, user_num, sleep=True) _modify_user(test_users_m1, user_num, sleep=True) user_num += 1 _create_user(test_users_m2, user_num, sleep=True) _create_user(test_users_m1, user_num) _modify_user(test_users_m1, user_num) log.info("Test add-modrdn on M1 and add on M2") user_num += 1 _create_user(test_users_m1, user_num) _rename_user(test_users_m1, user_num, user_num+20, sleep=True) _create_user(test_users_m2, user_num, sleep=True) user_num += 1 _create_user(test_users_m1, user_num, sleep=True) _create_user(test_users_m2, user_num, sleep=True) _rename_user(test_users_m1, user_num, user_num+20, sleep=True) user_num += 1 _create_user(test_users_m2, user_num, sleep=True) _create_user(test_users_m1, user_num) _rename_user(test_users_m1, user_num, user_num+20) log.info("Test multiple add, modrdn") user_num += 1 _create_user(test_users_m1, user_num, sleep=True) _create_user(test_users_m2, user_num, sleep=True) _rename_user(test_users_m1, user_num, user_num+20) _create_user(test_users_m1, user_num, sleep=True) _modify_user(test_users_m2, user_num, sleep=True) log.info("Add - del on both suppliers") user_num += 1 _create_user(test_users_m1, user_num) _delete_user(test_users_m1, user_num, sleep=True) _create_user(test_users_m2, user_num) _delete_user(test_users_m2, user_num, sleep=True) log.info("Test modrdn - modrdn") user_num += 1 _rename_user(test_users_m1, 1109, 1129, sleep=True) _rename_user(test_users_m2, 1109, 1129, sleep=True) log.info("Test modrdn - del") user_num += 1 _rename_user(test_users_m1, 1100, 1120, sleep=True) _delete_user(test_users_m2, 1100) user_num += 1 _delete_user(test_users_m2, 1101, sleep=True) _rename_user(test_users_m1, 1101, 1121) topology_m2.resume_all_replicas() repl.test_replication_topology(topology_m2) time.sleep(30) user_dns_m1 = [user.dn for user in test_users_m1.list()] user_dns_m2 = [user.dn for user in test_users_m2.list()] assert set(user_dns_m1) == set(user_dns_m2) def test_memberof_groups(self, topology_m2, base_m2): """Check that conflict properly resolved for operations with memberOf and groups :id: 77f09b18-03d1-45da-940b-1ad2c2908eb3 :setup: Two supplier replication, test container for entries, enable plugin logging, audit log, error log for replica and access log for internal :steps: 1. Enable memberOf plugin 2. Add 30 users to m1 and wait for replication to happen 3. Pause replication 4. Create a group on m1 and m2 5. Create a group on m1 and m2, delete from m1 6. Create a group on m1, delete from m1, and create on m2, 7. Create a group on m2 and m1, delete from m1 8. Create two different groups on m2 9. Resume replication 10. Check that the entries on both suppliers are the same and replication is working :expectedresults: 1. It should pass 2. It should pass 3. It should pass 4. It should pass 5. It should pass 6. It should pass 7. It should pass 8. It should pass 9. It should pass 10. It should pass """ pytest.xfail("Issue 49591 - work in progress") M1 = topology_m2.ms["supplier1"] M2 = topology_m2.ms["supplier2"] test_users_m1 = UserAccounts(M1, base_m2.dn, rdn=None) test_groups_m1 = Groups(M1, base_m2.dn, rdn=None) test_groups_m2 = Groups(M2, base_m2.dn, rdn=None) repl = ReplicationManager(SUFFIX) for inst in topology_m2.ms.values(): memberof = MemberOfPlugin(inst) memberof.enable() agmt = Agreements(inst).list()[0] agmt.replace_many(('nsDS5ReplicatedAttributeListTotal', '(objectclass=*) $ EXCLUDE '), ('nsDS5ReplicatedAttributeList', '(objectclass=*) $ EXCLUDE memberOf')) inst.restart() user_dns = [] for user_num in range(10): user_trio = [] for num in range(0, 30, 10): user = _create_user(test_users_m1, 1200 + user_num + num) user_trio.append(user.dn) user_dns.append(user_trio) repl.test_replication(M1, M2) topology_m2.pause_all_replicas() log.info("Check a simple conflict") group_num = 0 _create_group(test_groups_m1, group_num, user_dns[group_num], sleep=True) _create_group(test_groups_m2, group_num, user_dns[group_num], sleep=True) log.info("Check a add - del") group_num += 1 _create_group(test_groups_m1, group_num, user_dns[group_num], sleep=True) _create_group(test_groups_m2, group_num, user_dns[group_num], sleep=True) _delete_group(test_groups_m1, group_num) group_num += 1 _create_group(test_groups_m1, group_num, user_dns[group_num]) _delete_group(test_groups_m1, group_num, sleep=True) _create_group(test_groups_m2, group_num, user_dns[group_num]) group_num += 1 _create_group(test_groups_m2, group_num, user_dns[group_num], sleep=True) _create_group(test_groups_m1, group_num, user_dns[group_num]) _delete_group(test_groups_m1, group_num, sleep=True) group_num += 1 _create_group(test_groups_m2, group_num, user_dns[group_num]) group_num += 1 _create_group(test_groups_m2, group_num, user_dns[group_num]) topology_m2.resume_all_replicas() repl.test_replication_topology(topology_m2) group_dns_m1 = [group.dn for group in test_groups_m1.list()] group_dns_m2 = [group.dn for group in test_groups_m2.list()] assert set(group_dns_m1) == set(group_dns_m2) def test_managed_entries(self, topology_m2): """Check that conflict properly resolved for operations with managed entries :id: 77f09b18-03d1-45da-940b-1ad2c2908eb4 :setup: Two supplier replication, test container for entries, enable plugin logging, audit log, error log for replica and access log for internal :steps: 1. Create ou=managed_users and ou=managed_groups under test container 2. Configure managed entries plugin and add a template to test container 3. Add a user to m1 and wait for replication to happen 4. Pause replication 5. Create a user on m1 and m2 with a same group ID on both supplier 6. Create a user on m1 and m2 with a different group ID on both supplier 7. Resume replication 8. Check that the entries on both suppliers are the same and replication is working :expectedresults: 1. It should pass 2. It should pass 3. It should pass 4. It should pass 5. It should pass 6. It should pass 7. It should pass 8. It should pass """ pytest.xfail("Issue 49591 - work in progress") M1 = topology_m2.ms["supplier1"] M2 = topology_m2.ms["supplier2"] repl = ReplicationManager(SUFFIX) ous = OrganizationalUnits(M1, DEFAULT_SUFFIX) ou_people = ous.create(properties={'ou': 'managed_people'}) ou_groups = ous.create(properties={'ou': 'managed_groups'}) test_users_m1 = UserAccounts(M1, DEFAULT_SUFFIX, rdn='ou={}'.format(ou_people.rdn)) test_users_m2 = UserAccounts(M2, DEFAULT_SUFFIX, rdn='ou={}'.format(ou_people.rdn)) # TODO: Refactor ManagedPlugin class functionality (also add configs and templates) conts = nsContainers(M1, SUFFIX) template = conts.create(properties={ 'objectclass': 'top mepTemplateEntry extensibleObject'.split(), 'cn': 'MEP Template', 'mepRDNAttr': 'cn', 'mepStaticAttr': ['objectclass: posixGroup', 'objectclass: extensibleObject'], 'mepMappedAttr': ['cn: $uid', 'uid: $cn', 'gidNumber: $uidNumber'] }) repl.test_replication(M1, M2) for inst in topology_m2.ms.values(): conts = nsContainers(inst, "cn={},{}".format(PLUGIN_MANAGED_ENTRY, DN_PLUGIN)) conts.create(properties={'objectclass': 'top extensibleObject'.split(), 'cn': 'config', 'originScope': ou_people.dn, 'originFilter': 'objectclass=posixAccount', 'managedBase': ou_groups.dn, 'managedTemplate': template.dn}) inst.restart() _create_user(test_users_m1, 1, 1) topology_m2.pause_all_replicas() _create_user(test_users_m1, 2, 2, sleep=True) _create_user(test_users_m2, 2, 2, sleep=True) _create_user(test_users_m1, 3, 3, sleep=True) _create_user(test_users_m2, 3, 33) topology_m2.resume_all_replicas() repl.test_replication_topology(topology_m2) user_dns_m1 = [user.dn for user in test_users_m1.list()] user_dns_m2 = [user.dn for user in test_users_m2.list()] assert set(user_dns_m1) == set(user_dns_m2) def test_nested_entries_with_children(self, topology_m2, base_m2): """Check that conflict properly resolved for operations with nested entries with children :id: 77f09b18-03d1-45da-940b-1ad2c2908eb5 :setup: Two supplier replication, test container for entries, enable plugin logging, audit log, error log for replica and access log for internal :steps: 1. Add 15 containers to m1 and wait for replication to happen 2. Pause replication 3. Create parent-child on supplier2 and supplier1 4. Create parent-child on supplier1 and supplier2 5. Create parent-child on supplier1 and supplier2 different child rdn 6. Create parent-child on supplier1 and delete parent on supplier2 7. Create parent on supplier1, delete it and parent-child on supplier2, delete them 8. Create parent on supplier1, delete it and parent-two children on supplier2 9. Create parent-two children on supplier1 and parent-child on supplier2, delete them 10. Create three subsets inside existing container entry, applying only part of changes on m2 11. Create more combinations of the subset with parent-child on m1 and parent on m2 12. Delete container on m1, modify user1 on m1, create parent on m2 and modify user2 on m2 13. Resume replication 14. Check that the entries on both suppliers are the same and replication is working :expectedresults: 1. It should pass 2. It should pass 3. It should pass 4. It should pass 5. It should pass 6. It should pass 7. It should pass 8. It should pass 9. It should pass 10. It should pass 11. It should pass 12. It should pass 13. It should pass 14. It should pass """ pytest.xfail("Issue 49591 - work in progress") M1 = topology_m2.ms["supplier1"] M2 = topology_m2.ms["supplier2"] repl = ReplicationManager(SUFFIX) test_users_m1 = UserAccounts(M1, base_m2.dn, rdn=None) test_users_m2 = UserAccounts(M2, base_m2.dn, rdn=None) _create_user(test_users_m1, 4000) _create_user(test_users_m1, 4001) cont_list = [] for num in range(15): cont = _create_container(M1, base_m2.dn, 'sub{}'.format(num)) cont_list.append(cont) repl.test_replication(M1, M2) topology_m2.pause_all_replicas() log.info("Create parent-child on supplier2 and supplier1") _create_container(M2, base_m2.dn, 'p0', sleep=True) cont_p = _create_container(M1, base_m2.dn, 'p0', sleep=True) _create_container(M1, cont_p.dn, 'c0', sleep=True) _create_container(M2, cont_p.dn, 'c0', sleep=True) log.info("Create parent-child on supplier1 and supplier2") cont_p = _create_container(M1, base_m2.dn, 'p1', sleep=True) _create_container(M2, base_m2.dn, 'p1', sleep=True) _create_container(M1, cont_p.dn, 'c1', sleep=True) _create_container(M2, cont_p.dn, 'c1', sleep=True) log.info("Create parent-child on supplier1 and supplier2 different child rdn") cont_p = _create_container(M1, base_m2.dn, 'p2', sleep=True) _create_container(M2, base_m2.dn, 'p2', sleep=True) _create_container(M1, cont_p.dn, 'c2', sleep=True) _create_container(M2, cont_p.dn, 'c3', sleep=True) log.info("Create parent-child on supplier1 and delete parent on supplier2") cont_num = 0 cont_p_m1 = _create_container(M1, cont_list[cont_num].dn, 'p0', sleep=True) cont_p_m2 = _create_container(M2, cont_list[cont_num].dn, 'p0', sleep=True) _create_container(M1, cont_p_m1.dn, 'c0', sleep=True) _delete_container(cont_p_m2) cont_num += 1 cont_p_m2 = _create_container(M2, cont_list[cont_num].dn, 'p0', sleep=True) cont_p_m1 = _create_container(M1, cont_list[cont_num].dn, 'p0') _create_container(M1, cont_p_m1.dn, 'c0', sleep=True) _delete_container(cont_p_m2, sleep=True) log.info("Create parent on supplier1, delete it and parent-child on supplier2, delete them") cont_num += 1 cont_p_m1 = _create_container(M1, cont_list[cont_num].dn, 'p0') _delete_container(cont_p_m1, sleep=True) cont_p_m2 = _create_container(M2, cont_list[cont_num].dn, 'p0') cont_c_m2 = _create_container(M2, cont_p_m2.dn, 'c0') _delete_container(cont_c_m2) _delete_container(cont_p_m2) cont_num += 1 cont_p_m2 = _create_container(M2, cont_list[cont_num].dn, 'p0') cont_c_m2 = _create_container(M2, cont_p_m2.dn, 'c0') _delete_container(cont_c_m2) _delete_container(cont_p_m2, sleep=True) cont_p_m1 = _create_container(M1, cont_list[cont_num].dn, 'p0') _delete_container(cont_p_m1) log.info("Create parent on supplier1, delete it and parent-two children on supplier2") cont_num += 1 cont_p_m1 = _create_container(M1, cont_list[cont_num].dn, 'p0') _delete_container(cont_p_m1, sleep=True) cont_p_m2 = _create_container(M2, cont_list[cont_num].dn, 'p0') _create_container(M2, cont_p_m2.dn, 'c0') _create_container(M2, cont_p_m2.dn, 'c1') cont_num += 1 cont_p_m2 = _create_container(M2, cont_list[cont_num].dn, 'p0') _create_container(M2, cont_p_m2.dn, 'c0') _create_container(M2, cont_p_m2.dn, 'c1', sleep=True) cont_p_m1 = _create_container(M1, cont_list[cont_num].dn, 'p0') _delete_container(cont_p_m1, sleep=True) log.info("Create parent-two children on supplier1 and parent-child on supplier2, delete them") cont_num += 1 cont_p_m2 = _create_container(M2, cont_list[cont_num].dn, 'p0') cont_c_m2 = _create_container(M2, cont_p_m2.dn, 'c0') _delete_container(cont_c_m2) _delete_container(cont_p_m2, sleep=True) cont_p_m1 = _create_container(M1, cont_list[cont_num].dn, 'p0') _create_container(M1, cont_p_m1.dn, 'c0') _create_container(M1, cont_p_m1.dn, 'c1') cont_num += 1 cont_p_m1 = _create_container(M1, cont_list[cont_num].dn, 'p0') _create_container(M1, cont_p_m1.dn, 'c0') _create_container(M1, cont_p_m1.dn, 'c1', sleep=True) cont_p_m2 = _create_container(M2, cont_list[cont_num].dn, 'p0') cont_c_m2 = _create_container(M2, cont_p_m2.dn, 'c0') _delete_container(cont_c_m2) _delete_container(cont_p_m2, sleep=True) log.info("Create three subsets inside existing container entry, applying only part of changes on m2") cont_num += 1 cont_p_m1 = _create_container(M1, cont_list[cont_num].dn, 'p0') _create_container(M1, cont_p_m1.dn, 'c0') _create_container(M1, cont_p_m1.dn, 'c1', sleep=True) _create_container(M2, cont_list[cont_num].dn, 'p0', sleep=True) cont_num += 1 cont_p_m1 = _create_container(M1, cont_list[cont_num].dn, 'p0') _create_container(M1, cont_p_m1.dn, 'c0') _create_container(M1, cont_p_m1.dn, 'c1', sleep=True) cont_p_m2 = _create_container(M2, cont_list[cont_num].dn, 'p0') _create_container(M2, cont_p_m2.dn, 'c0', sleep=True) cont_num += 1 cont_p_m1 = _create_container(M1, cont_list[cont_num].dn, 'p0') _create_container(M1, cont_p_m1.dn, 'c0') _create_container(M1, cont_p_m1.dn, 'c1', sleep=True) cont_p_m2 = _create_container(M2, cont_list[cont_num].dn, 'p0') cont_c_m2 = _create_container(M2, cont_p_m2.dn, 'c0') _delete_container(cont_c_m2, sleep=True) log.info("Create more combinations of the subset with parent-child on m1 and parent on m2") cont_num += 1 cont_p_m1 = _create_container(M1, cont_list[cont_num].dn, 'p0', sleep=True) cont_p_m2 = _create_container(M2, cont_list[cont_num].dn, 'p0', sleep=True) _delete_container(cont_p_m1, sleep=True) cont_c_m2 = _create_container(M2, cont_p_m2.dn, 'c0') _delete_container(cont_c_m2) _delete_container(cont_p_m2, sleep=True) cont_num += 1 cont_p_m1 = _create_container(M1, cont_list[cont_num].dn, 'p0', sleep=True) cont_p_m2 = _create_container(M2, cont_list[cont_num].dn, 'p0', sleep=True) _delete_container(cont_p_m1, sleep=True) _create_container(M2, cont_p_m2.dn, 'c0', sleep=True) cont_num += 1 cont_p_m1 = _create_container(M1, cont_list[cont_num].dn, 'p0', sleep=True) cont_p_m2 = _create_container(M2, cont_list[cont_num].dn, 'p0', sleep=True) cont_c_m1 = _create_container(M1, cont_p_m1.dn, 'c0', sleep=True) _create_container(M2, cont_p_m2.dn, 'c0', sleep=True) _delete_container(cont_c_m1, sleep=True) _create_container(M2, cont_p_m2.dn, 'c1', sleep=True) _delete_container(cont_p_m1, sleep=True) log.info("Delete container on m1, modify user1 on m1, create parent on m2 and modify user2 on m2") cont_num += 1 _delete_container(cont_list[cont_num]) _modify_user(test_users_m1, 4000, sleep=True) _create_container(M2, cont_list[cont_num].dn, 'p0') _modify_user(test_users_m2, 4001) topology_m2.resume_all_replicas() repl.test_replication_topology(topology_m2, timeout=60) conts_dns = {} for num in range(1, 3): inst = topology_m2.ms["supplier{}".format(num)] conts_dns[inst.serverid] = [] conts = nsContainers(inst, base_m2.dn) for cont in conts.list(): conts_p = nsContainers(inst, cont.dn) for cont_p in conts_p.list(): conts_c = nsContainers(inst, cont_p.dn) conts_dns[inst.serverid].extend([cont_c.dn for cont_c in conts_c.list()]) conts_dns[inst.serverid].extend([cont_p.dn for cont_p in conts_p.list()]) conts_dns[inst.serverid].extend([cont.dn for cont in conts.list()]) assert set(conts_dns[M1.serverid]) == set(conts_dns[M2.serverid]) user_dns_m1 = [user.dn for user in test_users_m1.list()] user_dns_m2 = [user.dn for user in test_users_m2.list()] assert set(user_dns_m1) == set(user_dns_m2) def test_conflict_attribute_multi_valued(self, topology_m2, base_m2): """A RDN attribute being multi-valued, checks that after several operations MODRDN and MOD_REPL its RDN values are the same on both servers :id: 225b3522-8ed7-4256-96f9-5fab9b7044a5 :setup: Two supplier replication, audit log, error log for replica and access log for internal :steps: 1. Create a test entry uid=user_test_1000,... 2. Pause all replication agreements 3. On M1 rename it into uid=foo1,... 4. On M2 rename it into uid=foo2,... 5. On M1 MOD_REPL uid:foo1 6. Resume all replication agreements 7. Check that entry on M1 has uid=foo1, foo2 8. Check that entry on M2 has uid=foo1, foo2 9. Check that entry on M1 and M2 has the same uid values :expectedresults: 1. It should pass 2. It should pass 3. It should pass 4. It should pass 5. It should pass 6. It should pass 7. It should pass 8. It should pass 9. It should pass """ M1 = topology_m2.ms["supplier1"] M2 = topology_m2.ms["supplier2"] # add a test user test_users_m1 = UserAccounts(M1, base_m2.dn, rdn=None) user_1 = test_users_m1.create_test_user(uid=1000) test_users_m2 = UserAccount(M2, user_1.dn) # Waiting fo the user to be replicated for i in range(0,4): time.sleep(1) if test_users_m2.exists(): break assert(test_users_m2.exists()) # Stop replication agreements topology_m2.pause_all_replicas() # On M1 rename test entry in uid=foo1 original_dn = user_1.dn user_1.rename('uid=foo1') time.sleep(1) # On M2 rename test entry in uid=foo2 M2.rename_s(original_dn, 'uid=foo2') time.sleep(2) # on M1 MOD_REPL uid into foo1 user_1.replace('uid', 'foo1') # resume replication agreements topology_m2.resume_all_replicas() time.sleep(5) # check that on M1, the entry 'uid' has two values 'foo1' and 'foo2' final_dn = re.sub('^.*1000,', 'uid=foo2,', original_dn) final_user_m1 = UserAccount(M1, final_dn) for val in final_user_m1.get_attr_vals_utf8('uid'): log.info("Check %s is on M1" % val) assert(val in ['foo1', 'foo2']) # check that on M2, the entry 'uid' has two values 'foo1' and 'foo2' final_user_m2 = UserAccount(M2, final_dn) for val in final_user_m2.get_attr_vals_utf8('uid'): log.info("Check %s is on M1" % val) assert(val in ['foo1', 'foo2']) # check that the entry have the same uid values for val in final_user_m1.get_attr_vals_utf8('uid'): log.info("Check M1.uid %s is also on M2" % val) assert(val in final_user_m2.get_attr_vals_utf8('uid')) for val in final_user_m2.get_attr_vals_utf8('uid'): log.info("Check M2.uid %s is also on M1" % val) assert(val in final_user_m1.get_attr_vals_utf8('uid')) def test_conflict_attribute_single_valued(self, topology_m2, base_m2): """A RDN attribute being signle-valued, checks that after several operations MODRDN and MOD_REPL its RDN values are the same on both servers :id: c38ae613-5d1e-47cf-b051-c7284e64b817 :setup: Two supplier replication, test container for entries, enable plugin logging, audit log, error log for replica and access log for internal :steps: 1. Create a test entry uid=user_test_1000,... 2. Pause all replication agreements 3. On M1 rename it into employeenumber=foo1,... 4. On M2 rename it into employeenumber=foo2,... 5. On M1 MOD_REPL employeenumber:foo1 6. Resume all replication agreements 7. Check that entry on M1 has employeenumber=foo1 8. Check that entry on M2 has employeenumber=foo1 9. Check that entry on M1 and M2 has the same employeenumber values :expectedresults: 1. It should pass 2. It should pass 3. It should pass 4. It should pass 5. It should pass 6. It should pass 7. It should pass 8. It should pass 9. It should pass """ M1 = topology_m2.ms["supplier1"] M2 = topology_m2.ms["supplier2"] # add a test user with a dummy 'uid' extra value because modrdn removes # uid that conflict with 'account' objectclass test_users_m1 = UserAccounts(M1, base_m2.dn, rdn=None) user_1 = test_users_m1.create_test_user(uid=1000) user_1.add('objectclass', 'extensibleobject') user_1.add('uid', 'dummy') test_users_m2 = UserAccount(M2, user_1.dn) # Waiting fo the user to be replicated for i in range(0,4): time.sleep(1) if test_users_m2.exists(): break assert(test_users_m2.exists()) # Stop replication agreements topology_m2.pause_all_replicas() # On M1 rename test entry in employeenumber=foo1 original_dn = user_1.dn user_1.rename('employeenumber=foo1') time.sleep(1) # On M2 rename test entry in employeenumber=foo2 M2.rename_s(original_dn, 'employeenumber=foo2') time.sleep(2) # on M1 MOD_REPL uid into foo1 user_1.replace('employeenumber', 'foo1') # resume replication agreements topology_m2.resume_all_replicas() time.sleep(5) # check that on M1, the entry 'employeenumber' has value 'foo1' final_dn = re.sub('^.*1000,', 'employeenumber=foo2,', original_dn) final_user_m1 = UserAccount(M1, final_dn) for val in final_user_m1.get_attr_vals_utf8('employeenumber'): log.info("Check %s is on M1" % val) assert(val in ['foo1']) # check that on M2, the entry 'employeenumber' has values 'foo1' final_user_m2 = UserAccount(M2, final_dn) for val in final_user_m2.get_attr_vals_utf8('employeenumber'): log.info("Check %s is on M2" % val) assert(val in ['foo1']) # check that the entry have the same uid values for val in final_user_m1.get_attr_vals_utf8('employeenumber'): log.info("Check M1.uid %s is also on M2" % val) assert(val in final_user_m2.get_attr_vals_utf8('employeenumber')) for val in final_user_m2.get_attr_vals_utf8('employeenumber'): log.info("Check M2.uid %s is also on M1" % val) assert(val in final_user_m1.get_attr_vals_utf8('employeenumber')) class TestThreeSuppliers: def test_nested_entries(self, topology_m3, base_m3): """Check that conflict properly resolved for operations with nested entries with children :id: 77f09b18-03d1-45da-940b-1ad2c2908eb6 :setup: Three supplier replication, test container for entries, enable plugin logging, audit log, error log for replica and access log for internal :steps: 1. Add 15 containers to m1 and wait for replication to happen 2. Pause replication 3. Create two child entries under each of two entries 4. Create three child entries under each of three entries 5. Create two parents on m1 and m2, then on m1 - create a child and delete one parent, on m2 - delete one parent and create a child 6. Test a few more parent-child combinations with three instances 7. Resume replication 8. Check that the entries on both suppliers are the same and replication is working :expectedresults: 1. It should pass 2. It should pass 3. It should pass 4. It should pass 5. It should pass 6. It should pass 7. It should pass 8. It should pass """ pytest.xfail("Issue 49591 - work in progress") M1 = topology_m3.ms["supplier1"] M2 = topology_m3.ms["supplier2"] M3 = topology_m3.ms["supplier3"] repl = ReplicationManager(SUFFIX) cont_list = [] for num in range(11): cont = _create_container(M1, base_m3.dn, 'sub{}'.format(num)) cont_list.append(cont) repl.test_replication(M1, M2) repl.test_replication(M1, M3) topology_m3.pause_all_replicas() log.info("Create two child entries under each of two entries") cont_num = -1 for num in range(2): cont_num += 1 _create_container(M1, cont_list[cont_num].dn, 'p0', sleep=True) _create_container(M2, cont_list[cont_num].dn, 'p1', sleep=True) log.info("Create three child entries under each of three entries") for num in range(3): cont_num += 1 _create_container(M1, cont_list[cont_num].dn, 'p0', sleep=True) _create_container(M2, cont_list[cont_num].dn, 'p1', sleep=True) _create_container(M3, cont_list[cont_num].dn, 'p2', sleep=True) log.info("Create two parents on m1 and m2, then on m1 - create a child and delete one parent," "on m2 - delete one parent and create a child") for inst1, inst2 in ((M1, M2), (M2, M1)): cont_num += 1 cont_p_m1_1 = _create_container(inst1, cont_list[cont_num].dn, 'p0') cont_p_m1_2 = _create_container(inst1, cont_list[cont_num].dn, 'p1', sleep=True) cont_p_m2_1 = _create_container(inst2, cont_list[cont_num].dn, 'p0') cont_p_m2_2 = _create_container(inst2, cont_list[cont_num].dn, 'p1', sleep=True) _create_container(inst1, cont_p_m1_1.dn, 'c0', sleep=True) _delete_container(cont_p_m2_1, sleep=True) _delete_container(cont_p_m1_2, sleep=True) _create_container(inst2, cont_p_m2_2.dn, 'c0', sleep=True) log.info("Test a few more parent-child combinations on three instances") for inst1, inst2, inst3 in ((M1, M2, M3), (M2, M1, M3), (M3, M1, M2)): cont_num += 1 cont_p_m1 = _create_container(inst1, cont_list[cont_num].dn, 'p0') _delete_container(cont_p_m1, sleep=True) cont_p_m2 = _create_container(inst2, cont_list[cont_num].dn, 'p0') cont_c_m2 = _create_container(inst2, cont_p_m2.dn, 'c0') _delete_container(cont_c_m2) _delete_container(cont_p_m2, sleep=True) cont_p_m3 = _create_container(inst3, cont_list[cont_num].dn, 'p0') _create_container(inst3, cont_p_m3.dn, 'c0') _create_container(inst3, cont_p_m3.dn, 'c1', sleep=True) topology_m3.resume_all_replicas() repl.test_replication_topology(topology_m3) conts_dns = {} for num in range(1, 4): inst = topology_m3.ms["supplier{}".format(num)] conts_dns[inst.serverid] = [] conts = nsContainers(inst, base_m3.dn) for cont in conts.list(): conts_p = nsContainers(inst, cont.dn) for cont_p in conts_p.list(): conts_c = nsContainers(inst, cont_p.dn) conts_dns[inst.serverid].extend([cont_c.dn for cont_c in conts_c.list()]) conts_dns[inst.serverid].extend([cont_p.dn for cont_p in conts_p.list()]) conts_dns[inst.serverid].extend([cont.dn for cont in conts.list()]) for conts1, conts2 in permutations(conts_dns.values(), 2): assert set(conts1) == set(conts2) if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/replication/conftest.py000066400000000000000000000025161421664411400270640ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2018 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import os import logging import pytest from lib389.topologies import create_topology from lib389._constants import ReplicaRole DEBUGGING = os.getenv('DEBUGGING', default=False) if DEBUGGING: logging.getLogger(__name__).setLevel(logging.DEBUG) else: logging.getLogger(__name__).setLevel(logging.INFO) log = logging.getLogger(__name__) # Redefine some fixtures so we can use them with class scope @pytest.fixture(scope="class") def topology_m2(request): """Create Replication Deployment with two suppliers""" topology = create_topology({ReplicaRole.SUPPLIER: 2}) def fin(): if DEBUGGING: [inst.stop() for inst in topology] else: [inst.delete() for inst in topology] request.addfinalizer(fin) return topology @pytest.fixture(scope="class") def topology_m3(request): """Create Replication Deployment with three suppliers""" topology = create_topology({ReplicaRole.SUPPLIER: 3}) def fin(): if DEBUGGING: [inst.stop() for inst in topology] else: [inst.delete() for inst in topology] request.addfinalizer(fin) return topology 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/replication/encryption_cl5_test.py000066400000000000000000000134471421664411400312400ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2017 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import logging import pytest import pdb from lib389.utils import ensure_bytes, ds_supports_new_changelog from lib389.replica import ReplicationManager from lib389.dseldif import DSEldif from lib389.idm.user import UserAccounts, TEST_USER_PROPERTIES from lib389.topologies import topology_m2 from lib389._constants import * pytestmark = pytest.mark.tier1 ATTRIBUTE = 'unhashed#user#password' DEBUGGING = os.getenv("DEBUGGING", default=False) if DEBUGGING: logging.getLogger(__name__).setLevel(logging.DEBUG) else: logging.getLogger(__name__).setLevel(logging.INFO) log = logging.getLogger(__name__) @pytest.fixture(scope="module") def topology_with_tls(topology_m2): """Enable TLS on all suppliers""" [i.enable_tls() for i in topology_m2] repl = ReplicationManager(DEFAULT_SUFFIX) repl.test_replication(topology_m2.ms['supplier1'], topology_m2.ms['supplier2']) return topology_m2 def _enable_changelog_encryption(inst, encrypt_algorithm): """Configure changelog encryption for supplier""" dse_ldif = DSEldif(inst) log.info('Configuring changelog encryption:{} for: {}'.format(inst.serverid, encrypt_algorithm)) inst.stop() if ds_supports_new_changelog(): changelog = 'cn=changelog,{}'.format(DN_USERROOT_LDBM) else: changelog = DN_CHANGELOG dse_ldif.replace(changelog, 'nsslapd-encryptionalgorithm', encrypt_algorithm) if dse_ldif.get(changelog, 'nsSymmetricKey'): dse_ldif.delete(changelog, 'nsSymmetricKey') inst.start() def _check_unhashed_userpw_encrypted(inst, change_type, user_dn, user_pw, is_encrypted): """Check if unhashed#user#password attribute value is encrypted or not""" if ds_supports_new_changelog(): log.info('Running dbscan -f to check {} attr'.format(ATTRIBUTE)) dbscanOut = inst.dbscan(DEFAULT_BENAME, 'replication_changelog') else: changelog_dbdir = os.path.join(os.path.dirname(inst.dbdir), DEFAULT_CHANGELOG_DB) for changelog_dbfile in glob.glob(f'{changelog_dbdir}*/*.db*'): log.info('Changelog dbfile file exist: {}'.format(changelog_dbfile)) log.info('Running dbscan -f to check {} attr'.format(ATTRIBUTE)) dbscanOut = inst.dbscan(DEFAULT_CHANGELOG_DB, changelog_dbfile) count = 0 for entry in dbscanOut.split(b'dbid: '): if ensure_bytes('operation: {}'.format(change_type)) in entry and\ ensure_bytes(ATTRIBUTE) in entry and ensure_bytes(user_dn.lower()) in entry.lower(): count += 1 user_pw_attr = ensure_bytes('{}: {}'.format(ATTRIBUTE, user_pw)) if is_encrypted: assert user_pw_attr not in entry, 'Changelog entry contains clear text password' else: assert user_pw_attr in entry, 'Changelog entry does not contain clear text password' assert count, 'Operation type and DN of the entry not matched in changelog' #unstable or unstatus tests, skipped for now @pytest.mark.flaky(max_runs=2, min_passes=1) def test_algorithm_unhashed(topology_with_tls): """Check encryption algorithm AES And check unhashed#user#password attribute for encryption. :id: b7a37bf8-4b2e-4dbd-9891-70117d67558c :parametrized: yes :setup: Replication with two suppliers and SSL configured. :steps: 1. Enable changelog encrytion on supplier1 2. Add a user to supplier1/supplier2 3. Run dbscan -f on m1 to check unhashed#user#password attribute is encrypted. 4. Run dbscan -f on m2 to check unhashed#user#password attribute is in cleartext. 5. Modify password in supplier2/supplier1 6. Run dbscan -f on m1 to check unhashed#user#password attribute is encrypted. 7. Run dbscan -f on m2 to check unhashed#user#password attribute is in cleartext. :expectedresults: 1. It should pass 2. It should pass 3. It should pass 4. It should pass 5. It should pass 6. It should pass 7. It should pass """ encryption = 'AES' m1 = topology_with_tls.ms['supplier1'] m2 = topology_with_tls.ms['supplier2'] m1.config.set('nsslapd-unhashed-pw-switch', 'on') m2.config.set('nsslapd-unhashed-pw-switch', 'on') test_passw = 'm2Test199' _enable_changelog_encryption(m1, encryption) for inst1, inst2 in ((m1, m2), (m2, m1)): # need to create a user specific to the encryption # else the two runs will hit the same user user_props={ 'uid': 'testuser_%s' % encryption, 'cn' : 'testuser_%s' % encryption, 'sn' : 'user', 'uidNumber' : '1000', 'gidNumber' : '1000', 'homeDirectory' : '/home/testuser_%s' % encryption } user_props["userPassword"] = PASSWORD users = UserAccounts(inst1, DEFAULT_SUFFIX) tuser = users.create(properties=user_props) _check_unhashed_userpw_encrypted(m1, 'add', tuser.dn, PASSWORD, True) _check_unhashed_userpw_encrypted(m2, 'add', tuser.dn, PASSWORD, False) users = UserAccounts(inst2, DEFAULT_SUFFIX) tuser = users.get(tuser.rdn) tuser.set('userPassword', test_passw) _check_unhashed_userpw_encrypted(m1, 'modify', tuser.dn, test_passw, True) _check_unhashed_userpw_encrypted(m2, 'modify', tuser.dn, test_passw, False) tuser.delete() if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s {}".format(CURRENT_FILE)) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/replication/multiple_changelogs_test.py000066400000000000000000000126301421664411400323210ustar00rootroot00000000000000import ldap import logging import pytest import os import threading import time from lib389._constants import * from lib389.topologies import topology_m1c1 as topo from lib389.idm.directorymanager import DirectoryManager from lib389.idm.domain import Domain from lib389.backend import Backend from lib389.replica import Replicas, ReplicationManager log = logging.getLogger(__name__) SECOND_SUFFIX = 'dc=second_suffix' MOD_COUNT = 50 class DoMods(threading.Thread): """modify the suffix entry""" def __init__(self, inst, task): """ Initialize the thread """ threading.Thread.__init__(self) self.daemon = True self.inst = inst self.name = inst.serverid self.task = task def run(self): """ Start adding users """ idx = 0 conn = DirectoryManager(self.inst).bind() domain = Domain(conn, DEFAULT_SUFFIX) while idx < MOD_COUNT: try: domain.replace('description', str(idx)) except: if self.task == "import": # Failures are expected during an import pass else: # export, should not fail log.fatal('Updates should not fail during an export') assert False idx += 1 def test_multiple_changelogs(topo): """Test the multiple suffixes can be replicated with the new per backend changelog. :id: eafcdb57-4ea2-4887-a0a8-9e4d295f4f4d :setup: Supplier Instance, Consumer Instance :steps: 1. Create s second suffix 2. Enable replication for second backend 3. Perform some updates on both backends and make sure replication is working for both backends :expectedresults: 1. Success 2. Success 3. Success """ supplier = topo.ms['supplier1'] consumer = topo.cs['consumer1'] # Create second suffix dc=second_backend on both replicas for inst in [supplier, consumer]: # Create the backends props = {'cn': 'secondRoot', 'nsslapd-suffix': SECOND_SUFFIX} be = Backend(inst) be.create(properties=props) be.create_sample_entries('001004002') # Setup replication for second suffix repl = ReplicationManager(SECOND_SUFFIX) repl.create_first_supplier(supplier) repl.join_consumer(supplier, consumer) # Test replication works for each backend for suffix in [DEFAULT_SUFFIX, SECOND_SUFFIX]: replicas = Replicas(supplier) replica = replicas.get(suffix) log.info("Testing replication for: " + suffix) assert replica.test_replication([consumer]) def test_multiple_changelogs_export_import(topo): """Test that we can export and import the replication changelog :id: b74fcaaf-a13f-4ee0-98f9-248b281f8700 :setup: Supplier Instance, Consumer Instance :steps: 1. Create s second suffix 2. Enable replication for second backend 3. Perform some updates on a backend, and export the changelog 4. Do an export and import while the server is idle 5. Do an import while the server is under load :expectedresults: 1. Success 2. Success 3. Success 4. Success 5. Success """ SECOND_SUFFIX = 'dc=second_suffix' supplier = topo.ms['supplier1'] consumer = topo.cs['consumer1'] supplier.config.set('nsslapd-errorlog-level', '0') # Create second suffix dc=second_backend on both replicas for inst in [supplier, consumer]: # Create the backends props = {'cn': 'secondRoot', 'nsslapd-suffix': SECOND_SUFFIX} be = Backend(inst) try: be.create(properties=props) be.create_sample_entries('001004002') except ldap.UNWILLING_TO_PERFORM: pass # Setup replication for second suffix try: repl = ReplicationManager(SECOND_SUFFIX) repl.create_first_supplier(supplier) repl.join_consumer(supplier, consumer) except ldap.ALREADY_EXISTS: pass # Put the replica under load, and export the changelog replicas = Replicas(supplier) replica = replicas.get(DEFAULT_SUFFIX) doMods1 = DoMods(supplier, task="export") doMods1.start() replica.begin_task_cl2ldif() doMods1.join() replica.task_finished() # allow some time to pass, and test replication time.sleep(1) assert replica.test_replication([consumer]) # While idle, go an export and import, and make sure replication still works log.info("Testing idle server with CL export and import...") replica.begin_task_cl2ldif() replica.task_finished() replica.begin_task_ldif2cl() replica.task_finished() assert replica.test_replication([consumer]) # stability test, put the replica under load, import the changelog, and make # sure server did not crash. log.info("Testing busy server with CL import...") doMods2 = DoMods(supplier, task="import") doMods2.start() replica.begin_task_ldif2cl() doMods2.join() replica.task_finished() # Replication will be broken so no need to test it. This is just make sure # the import works, and the server is stable assert supplier.status() assert consumer.status() if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main(["-s", CURRENT_FILE]) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/replication/promote_demote_test.py000066400000000000000000000035671421664411400313270ustar00rootroot00000000000000import logging import pytest import os from lib389._constants import DEFAULT_SUFFIX, ReplicaRole from lib389.topologies import topology_m1h1c1 as topo from lib389.replica import Replicas, ReplicationManager, Agreements pytestmark = pytest.mark.tier1 log = logging.getLogger(__name__) def test_promote_demote(topo): """Test promoting and demoting a replica :id: 75edff64-f987-4ed5-a03d-9bee73c0fbf0 :setup: 2 Supplier Instances :steps: 1. Promote Hub to a Supplier 2. Test replication works 3. Demote the supplier to a consumer 4. Test replication works 5. Promote consumer to supplier 6. Test replication works :expectedresults: 1. Success 2. Success 3. Success 4. Success 5. Success 6. Success """ supplier = topo.ms["supplier1"] supplier_replica = Replicas(supplier).get(DEFAULT_SUFFIX) bind_dn = supplier_replica.get_attr_val_utf8('nsDS5ReplicaBindDN') hub = topo.hs["hub1"] hub_replica = Replicas(hub).get(DEFAULT_SUFFIX) consumer = topo.cs["consumer1"] repl = ReplicationManager(DEFAULT_SUFFIX) # promote replica hub_replica.promote(ReplicaRole.SUPPLIER, binddn=bind_dn, rid='55') repl.test_replication(supplier, consumer) # Demote the replica hub_replica.demote(ReplicaRole.CONSUMER) repl.test_replication(supplier, hub) # promote replica and init it hub_replica.promote(ReplicaRole.SUPPLIER, binddn=bind_dn, rid='56') agmt = Agreements(supplier).list()[0] agmt.begin_reinit() agmt.wait_reinit() # init consumer agmt = Agreements(hub).list()[0] agmt.begin_reinit() agmt.wait_reinit() repl.test_replication(supplier, consumer) if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main(["-s", CURRENT_FILE]) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/replication/regression_i2_test.py000066400000000000000000000060231421664411400310450ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2021 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import os import logging import pytest from lib389.utils import * from lib389._constants import * from lib389.replica import Replicas, ReplicationManager from lib389.dseldif import * from lib389.topologies import topology_i2 as topo_i2 pytestmark = pytest.mark.tier1 NEW_SUFFIX_NAME = 'test_repl' NEW_SUFFIX = 'o={}'.format(NEW_SUFFIX_NAME) NEW_BACKEND = 'repl_base' CHANGELOG = 'cn=changelog,{}'.format(DN_USERROOT_LDBM) MAXAGE_ATTR = 'nsslapd-changelogmaxage' MAXAGE_STR = '30' TRIMINTERVAL_STR = '5' TRIMINTERVAL = 'nsslapd-changelogtrim-interval' DEBUGGING = os.getenv("DEBUGGING", default=False) if DEBUGGING: logging.getLogger(__name__).setLevel(logging.DEBUG) else: logging.getLogger(__name__).setLevel(logging.INFO) log = logging.getLogger(__name__) def test_special_symbol_replica_agreement(topo_i2): """ Check if agreement starts with "cn=->..." then after upgrade does it get removed. :id: 68aa0072-4dd4-4e33-b107-cb383a439125 :setup: two standalone instance :steps: 1. Create and Enable Replication on standalone2 and role as consumer 2. Create and Enable Replication on standalone1 and role as supplier 3. Create a Replication agreement starts with "cn=->..." 4. Perform an upgrade operation over the supplier 5. Check if the agreement is still present or not. :expectedresults: 1. It should be successful 2. It should be successful 3. It should be successful 4. It should be successful 5. It should be successful """ supplier = topo_i2.ins["standalone1"] consumer = topo_i2.ins["standalone2"] consumer.replica.enableReplication(suffix=DEFAULT_SUFFIX, role=ReplicaRole.CONSUMER, replicaId=CONSUMER_REPLICAID) repl = ReplicationManager(DEFAULT_SUFFIX) repl.create_first_supplier(supplier) properties = {RA_NAME: '-\\3meTo_{}:{}'.format(consumer.host, str(consumer.port)), RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} supplier.agreement.create(suffix=SUFFIX, host=consumer.host, port=consumer.port, properties=properties) supplier.agreement.init(SUFFIX, consumer.host, consumer.port) replica_server = Replicas(supplier).get(DEFAULT_SUFFIX) supplier.upgrade('online') agmt = replica_server.get_agreements().list()[0] assert agmt.get_attr_val_utf8('cn') == '-\\3meTo_{}:{}'.format(consumer.host, str(consumer.port)) if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/replication/regression_m2_test.py000066400000000000000000000772351421664411400310660ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2021 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import os import re import time import logging import ldif import ldap import pytest import subprocess from lib389.idm.user import TEST_USER_PROPERTIES, UserAccounts from lib389.pwpolicy import PwPolicyManager from lib389.utils import * from lib389._constants import * from lib389.idm.organizationalunit import OrganizationalUnits from lib389.idm.user import UserAccount from lib389.idm.group import Groups, Group from lib389.idm.domain import Domain from lib389.idm.directorymanager import DirectoryManager from lib389.replica import Replicas, ReplicationManager, ReplicaRole from lib389.agreement import Agreements from lib389 import pid_from_file from lib389.dseldif import * from lib389.topologies import topology_m2 as topo_m2, TopologyMain, create_topology, _remove_ssca_db pytestmark = pytest.mark.tier1 NEW_SUFFIX_NAME = 'test_repl' NEW_SUFFIX = 'o={}'.format(NEW_SUFFIX_NAME) NEW_BACKEND = 'repl_base' CHANGELOG = 'cn=changelog,{}'.format(DN_USERROOT_LDBM) MAXAGE_ATTR = 'nsslapd-changelogmaxage' MAXAGE_STR = '30' TRIMINTERVAL_STR = '5' TRIMINTERVAL = 'nsslapd-changelogtrim-interval' DEBUGGING = os.getenv("DEBUGGING", default=False) if DEBUGGING: logging.getLogger(__name__).setLevel(logging.DEBUG) else: logging.getLogger(__name__).setLevel(logging.INFO) log = logging.getLogger(__name__) def find_start_location(file, no): log_pattern = re.compile("slapd_daemon - slapd started.") count = 0 while True: line = file.readline() log.debug("_pattern_errorlog: [%d] %s" % (file.tell(), line)) found = log_pattern.search(line) if (found): count = count + 1 if (count == no): return file.tell() if (line == ''): break return -1 def pattern_errorlog(file, log_pattern, start_location=0): count = 0 log.debug("_pattern_errorlog: start from the beginning") file.seek(start_location) # Use a while true iteration because 'for line in file: hit a # python bug that break file.tell() while True: line = file.readline() log.debug("_pattern_errorlog: [%d] %s" % (file.tell(), line)) found = log_pattern.search(line) if (found): count = count + 1 if (line == ''): break log.debug("_pattern_errorlog: complete (count=%d)" % count) return count def _move_ruv(ldif_file): """ Move RUV entry in an ldif file to the top""" with open(ldif_file) as f: parser = ldif.LDIFRecordList(f) parser.parse() ldif_list = parser.all_records for dn in ldif_list: if dn[0].startswith('nsuniqueid=ffffffff-ffffffff-ffffffff-ffffffff'): ruv_index = ldif_list.index(dn) ldif_list.insert(0, ldif_list.pop(ruv_index)) break with open(ldif_file, 'w') as f: ldif_writer = ldif.LDIFWriter(f) for dn, entry in ldif_list: ldif_writer.unparse(dn, entry) def _remove_replication_data(ldif_file): """ Remove the replication data from ldif file: db2lif without -r includes some of the replica data like - nsUniqueId - keepalive entries This function filters the ldif fil to remove these data """ with open(ldif_file) as f: parser = ldif.LDIFRecordList(f) parser.parse() ldif_list = parser.all_records # Iterate on a copy of the ldif entry list for dn, entry in ldif_list[:]: if dn.startswith('cn=repl keep alive'): ldif_list.remove((dn, entry)) else: entry.pop('nsUniqueId') with open(ldif_file, 'w') as f: ldif_writer = ldif.LDIFWriter(f) for dn, entry in ldif_list: ldif_writer.unparse(dn, entry) @pytest.fixture(scope="function") def topo_with_sigkill(request): """Create Replication Deployment with two suppliers""" topology = create_topology({ReplicaRole.SUPPLIER: 2}) def _kill_ns_slapd(inst): pid = str(pid_from_file(inst.ds_paths.pid_file)) cmd = ['kill', '-9', pid] subprocess.Popen(cmd, stdout=subprocess.PIPE) def fin(): # Kill the hanging process at the end of test to prevent failures in the following tests if DEBUGGING: [_kill_ns_slapd(inst) for inst in topology] else: [_kill_ns_slapd(inst) for inst in topology] assert _remove_ssca_db(topology) [inst.stop() for inst in topology if inst.exists()] [inst.delete() for inst in topology if inst.exists()] request.addfinalizer(fin) return topology @pytest.fixture() def create_entry(topo_m2, request): """Add test entry using UserAccounts""" log.info('Adding a test entry user') users = UserAccounts(topo_m2.ms["supplier1"], DEFAULT_SUFFIX) tuser = users.ensure_state(properties=TEST_USER_PROPERTIES) return tuser def add_ou_entry(server, idx, parent): ous = OrganizationalUnits(server, parent) name = 'OU%d' % idx ous.create(properties={'ou': '%s' % name}) def add_user_entry(server, idx, parent): users = UserAccounts(server, DEFAULT_SUFFIX, rdn=parent) user_properties = { 'uid': 'tuser%d' % idx, 'givenname': 'test', 'cn': 'Test User%d' % idx, 'sn': 'user%d' % idx, 'userpassword': PW_DM, 'uidNumber': '1000%d' % idx, 'gidNumber': '2000%d' % idx, 'homeDirectory': '/home/{}'.format('tuser%d' % idx) } users.create(properties=user_properties) def del_user_entry(server, idx, parent): users = UserAccounts(server, DEFAULT_SUFFIX, rdn=parent) test_user = users.get('tuser%d' % idx) test_user.delete() def rename_entry(server, idx, ou_name, new_parent): users = UserAccounts(server, DEFAULT_SUFFIX, rdn=ou_name) name = 'tuser%d' % idx rdn = 'uid=%s' % name test_user = users.get(name) test_user.rename(new_rdn=rdn, newsuperior=new_parent) def add_ldapsubentry(server, parent): pwp = PwPolicyManager(server) policy_props = {'passwordStorageScheme': 'ssha', 'passwordCheckSyntax': 'on', 'passwordInHistory': '6', 'passwordChange': 'on', 'passwordMinAge': '0', 'passwordExp': 'off', 'passwordMustChange': 'off',} log.info('Create password policy for subtree {}'.format(parent)) pwp.create_subtree_policy(parent, policy_props) def test_double_delete(topo_m2, create_entry): """Check that double delete of the entry doesn't crash server :id: 3496c82d-636a-48c9-973c-2455b12164cc :setup: Two suppliers replication setup, a test entry :steps: 1. Delete the entry on the first supplier 2. Delete the entry on the second supplier 3. Check that server is alive :expectedresults: 1. Entry should be successfully deleted from first supplier 2. Entry should be successfully deleted from second aster 3. Server should me alive """ m1 = topo_m2.ms["supplier1"] m2 = topo_m2.ms["supplier2"] repl = ReplicationManager(DEFAULT_SUFFIX) repl.disable_to_supplier(m1, [m2]) repl.disable_to_supplier(m2, [m1]) log.info('Deleting entry {} from supplier1'.format(create_entry.dn)) topo_m2.ms["supplier1"].delete_s(create_entry.dn) log.info('Deleting entry {} from supplier2'.format(create_entry.dn)) topo_m2.ms["supplier2"].delete_s(create_entry.dn) repl.enable_to_supplier(m2, [m1]) repl.enable_to_supplier(m1, [m2]) repl.test_replication(m1, m2) repl.test_replication(m2, m1) @pytest.mark.bz1506831 def test_repl_modrdn(topo_m2): """Test that replicated MODRDN does not break replication :id: a3e17698-9eb4-41e0-b537-8724b9915fa6 :setup: Two suppliers replication setup :steps: 1. Add 3 test OrganizationalUnits A, B and C 2. Add 1 test user under OU=A 3. Add same test user under OU=B 4. Stop Replication 5. Apply modrdn to M1 - move test user from OU A -> C 6. Apply modrdn on M2 - move test user from OU B -> C 7. Start Replication 8. Check that there should be only one test entry under ou=C on both suppliers 9. Check that the replication is working fine both ways M1 <-> M2 :expectedresults: 1. This should pass 2. This should pass 3. This should pass 4. This should pass 5. This should pass 6. This should pass 7. This should pass 8. This should pass 9. This should pass """ supplier1 = topo_m2.ms["supplier1"] supplier2 = topo_m2.ms["supplier2"] repl = ReplicationManager(DEFAULT_SUFFIX) log.info("Add test entries - Add 3 OUs and 2 same users under 2 different OUs") OUs = OrganizationalUnits(supplier1, DEFAULT_SUFFIX) OU_A = OUs.create(properties={ 'ou': 'A', 'description': 'A', }) OU_B = OUs.create(properties={ 'ou': 'B', 'description': 'B', }) OU_C = OUs.create(properties={ 'ou': 'C', 'description': 'C', }) users = UserAccounts(supplier1, DEFAULT_SUFFIX, rdn='ou={}'.format(OU_A.rdn)) tuser_A = users.create(properties=TEST_USER_PROPERTIES) users = UserAccounts(supplier1, DEFAULT_SUFFIX, rdn='ou={}'.format(OU_B.rdn)) tuser_B = users.create(properties=TEST_USER_PROPERTIES) repl.test_replication(supplier1, supplier2) repl.test_replication(supplier2, supplier1) log.info("Stop Replication") topo_m2.pause_all_replicas() log.info("Apply modrdn to M1 - move test user from OU A -> C") supplier1.rename_s(tuser_A.dn, 'uid=testuser1', newsuperior=OU_C.dn, delold=1) log.info("Apply modrdn on M2 - move test user from OU B -> C") supplier2.rename_s(tuser_B.dn, 'uid=testuser1', newsuperior=OU_C.dn, delold=1) log.info("Start Replication") topo_m2.resume_all_replicas() log.info("Wait for sometime for repl to resume") repl.test_replication(supplier1, supplier2) repl.test_replication(supplier2, supplier1) log.info("Check that there should be only one test entry under ou=C on both suppliers") users = UserAccounts(supplier1, DEFAULT_SUFFIX, rdn='ou={}'.format(OU_C.rdn)) assert len(users.list()) == 1 users = UserAccounts(supplier2, DEFAULT_SUFFIX, rdn='ou={}'.format(OU_C.rdn)) assert len(users.list()) == 1 log.info("Check that the replication is working fine both ways, M1 <-> M2") repl.test_replication(supplier1, supplier2) repl.test_replication(supplier2, supplier1) def test_password_repl_error(topo_m2, create_entry): """Check that error about userpassword replication is properly logged :id: 714130ff-e4f0-4633-9def-c1f4b24abfef :setup: Four suppliers replication setup, a test entry :steps: 1. Change userpassword on the first supplier 2. Restart the servers to flush the logs 3. Check the error log for an replication error :expectedresults: 1. Password should be successfully changed 2. Server should be successfully restarted 3. There should be no replication errors in the error log """ m1 = topo_m2.ms["supplier1"] m2 = topo_m2.ms["supplier2"] TEST_ENTRY_NEW_PASS = 'new_pass' log.info('Clean the error log') m2.deleteErrorLogs() log.info('Set replication loglevel') m2.config.loglevel((ErrorLog.REPLICA,)) log.info('Modifying entry {} - change userpassword on supplier 1'.format(create_entry.dn)) create_entry.set('userpassword', TEST_ENTRY_NEW_PASS) repl = ReplicationManager(DEFAULT_SUFFIX) repl.wait_for_replication(m1, m2) log.info('Restart the servers to flush the logs') for num in range(1, 3): topo_m2.ms["supplier{}".format(num)].restart() try: log.info('Check that password works on supplier 2') create_entry_m2 = UserAccount(m2, create_entry.dn) create_entry_m2.bind(TEST_ENTRY_NEW_PASS) log.info('Check the error log for the error with {}'.format(create_entry.dn)) assert not m2.ds_error_log.match('.*can.t add a change for {}.*'.format(create_entry.dn)) finally: log.info('Set the default loglevel') m2.config.loglevel((ErrorLog.DEFAULT,)) def test_invalid_agmt(topo_m2): """Test adding that an invalid agreement is properly rejected and does not crash the server :id: 6c3b2a7e-edcd-4327-a003-6bd878ff722b :setup: Four suppliers replication setup :steps: 1. Add invalid agreement (nsds5ReplicaEnabled set to invalid value) 2. Verify the server is still running :expectedresults: 1. Invalid repl agreement should be rejected 2. Server should be still running """ m1 = topo_m2.ms["supplier1"] m2 = topo_m2.ms["supplier2"] repl = ReplicationManager(DEFAULT_SUFFIX) replicas = Replicas(m1) replica = replicas.get(DEFAULT_SUFFIX) agmts = replica.get_agreements() # Add invalid agreement (nsds5ReplicaEnabled set to invalid value) with pytest.raises(ldap.UNWILLING_TO_PERFORM): agmts.create(properties={ 'cn': 'whatever', 'nsDS5ReplicaRoot': DEFAULT_SUFFIX, 'nsDS5ReplicaBindDN': 'cn=replication manager,cn=config', 'nsDS5ReplicaBindMethod': 'simple', 'nsDS5ReplicaTransportInfo': 'LDAP', 'nsds5replicaTimeout': '5', 'description': "test agreement", 'nsDS5ReplicaHost': m2.host, 'nsDS5ReplicaPort': str(m2.port), 'nsDS5ReplicaCredentials': 'whatever', 'nsds5ReplicaEnabled': 'YEAH MATE, LETS REPLICATE' }) # Verify the server is still running repl = ReplicationManager(DEFAULT_SUFFIX) repl.test_replication(m1, m2) repl.test_replication(m2, m1) def test_fetch_bindDnGroup(topo_m2): """Check the bindDNGroup is fetched on first replication session :id: 5f1b1f59-6744-4260-b091-c82d22130025 :setup: 2 Supplier Instances :steps: 1. Create a replication bound user and group, but the user *not* member of the group 2. Check that replication is working 3. Some preparation is required because of lib389 magic that already define a replication via group - define the group as groupDN for replication and 60sec as fetch interval - pause RA in both direction - Define the user as bindDn of the RAs 4. restart servers. It sets the fetch time to 0, so next session will refetch the group 5. Before resuming RA, add user to groupDN (on both side as replication is not working at that time) 6. trigger an update and check replication is working and there is no failure logged on supplier side 'does not have permission to supply replication updates to the replica' :expectedresults: 1. Success 2. Success 3. Success 4. Success 5. Success 6. Success """ # If you need any test suite initialization, # please, write additional fixture for that (including finalizer). # Topology for suites are predefined in lib389/topologies.py. # If you need host, port or any other data about instance, # Please, use the instance object attributes for that (for example, topo.ms["supplier1"].serverid) M1 = topo_m2.ms['supplier1'] M2 = topo_m2.ms['supplier2'] # Enable replication log level. Not really necessary M1.modify_s('cn=config', [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', b'8192')]) M2.modify_s('cn=config', [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', b'8192')]) # Create a group and a user PEOPLE = "ou=People,%s" % SUFFIX PASSWD = 'password' REPL_MGR_BOUND_DN = 'repl_mgr_bound_dn' uid = REPL_MGR_BOUND_DN.encode() users = UserAccounts(M1, PEOPLE, rdn=None) user_props = TEST_USER_PROPERTIES.copy() user_props.update({'uid': uid, 'cn': uid, 'sn': '_%s' % uid, 'userpassword': PASSWD.encode(), 'description': b'value creation'}) create_user = users.create(properties=user_props) groups_M1 = Groups(M1, DEFAULT_SUFFIX) group_properties = { 'cn': 'group1', 'description': 'testgroup'} group_M1 = groups_M1.create(properties=group_properties) group_M2 = Group(M2, group_M1.dn) assert(not group_M1.is_member(create_user.dn)) # Check that M1 and M2 are in sync repl = ReplicationManager(DEFAULT_SUFFIX) repl.wait_for_replication(M1, M2, timeout=20) # Define the group as the replication manager and fetch interval as 60sec replicas = Replicas(M1) replica = replicas.list()[0] replica.apply_mods([(ldap.MOD_REPLACE, 'nsDS5ReplicaBindDnGroupCheckInterval', '60'), (ldap.MOD_REPLACE, 'nsDS5ReplicaBindDnGroup', group_M1.dn)]) replicas = Replicas(M2) replica = replicas.list()[0] replica.apply_mods([(ldap.MOD_REPLACE, 'nsDS5ReplicaBindDnGroupCheckInterval', '60'), (ldap.MOD_REPLACE, 'nsDS5ReplicaBindDnGroup', group_M1.dn)]) # Then pause the replication agreement to prevent them trying to acquire # while the user is not member of the group topo_m2.pause_all_replicas() # Define the user as the bindDN of the RAs for inst in (M1, M2): agmts = Agreements(inst) agmt = agmts.list()[0] agmt.replace('nsDS5ReplicaBindDN', create_user.dn.encode()) agmt.replace('nsds5ReplicaCredentials', PASSWD.encode()) # Key step # The restart will fetch the group/members define in the replica # # The user NOT member of the group replication will not work until bindDNcheckInterval # # With the fix, the first fetch is not taken into account (fetch time=0) # so on the first session, the group will be fetched M1.restart() M2.restart() # Replication being broken here we need to directly do the same update. # Sorry not found another solution except total update group_M1.add_member(create_user.dn) group_M2.add_member(create_user.dn) topo_m2.resume_all_replicas() # trigger updates to be sure to have a replication session, giving some time M1.modify_s(create_user.dn, [(ldap.MOD_ADD, 'description', b'value_1_1')]) M2.modify_s(create_user.dn, [(ldap.MOD_ADD, 'description', b'value_2_2')]) time.sleep(10) # Check replication is working ents = M1.search_s(create_user.dn, ldap.SCOPE_BASE, '(objectclass=*)') for ent in ents: assert (ent.hasAttr('description')) found = 0 for val in ent.getValues('description'): if (val == b'value_1_1'): found = found + 1 elif (val == b'value_2_2'): found = found + 1 assert (found == 2) ents = M2.search_s(create_user.dn, ldap.SCOPE_BASE, '(objectclass=*)') for ent in ents: assert (ent.hasAttr('description')) found = 0 for val in ent.getValues('description'): if (val == b'value_1_1'): found = found + 1 elif (val == b'value_2_2'): found = found + 1 assert (found == 2) # Check in the logs that the member was detected in the group although # at startup it was not member of the group regex = re.compile("does not have permission to supply replication updates to the replica.") errorlog_M1 = open(M1.errlog, "r") errorlog_M2 = open(M1.errlog, "r") # Find the last restart position restart_location_M1 = find_start_location(errorlog_M1, 2) assert (restart_location_M1 != -1) restart_location_M2 = find_start_location(errorlog_M2, 2) assert (restart_location_M2 != -1) # Then check there is no failure to authenticate count = pattern_errorlog(errorlog_M1, regex, start_location=restart_location_M1) assert(count <= 1) count = pattern_errorlog(errorlog_M2, regex, start_location=restart_location_M2) assert(count <= 1) def test_plugin_bind_dn_tracking_and_replication(topo_m2): """Testing nsslapd-plugin-binddn-tracking does not cause issues around access control and reconfiguring replication/repl agmt. :id: dd689d03-69b8-4bf9-a06e-2acd19d5e2c9 :setup: 2 supplier topology :steps: 1. Turn on plugin binddn tracking 2. Add some users 3. Make an update as a user 4. Make an update to the replica config 5. Make an update to the repliocation agreement :expectedresults: 1. Success 2. Success 3. Success 4. Success 5. Success """ m1 = topo_m2.ms["supplier1"] # Turn on bind dn tracking m1.config.set('nsslapd-plugin-binddn-tracking', 'on') # Add two users users = UserAccounts(m1, DEFAULT_SUFFIX) user1 = users.create_test_user(uid=1011) user1.set('userpassword', PASSWORD) user2 = users.create_test_user(uid=1012) # Add an aci acival = '(targetattr ="cn")(version 3.0;acl "Test bind dn tracking"' + \ ';allow (all) (userdn = "ldap:///{}");)'.format(user1.dn) Domain(m1, DEFAULT_SUFFIX).add('aci', acival) # Bind as user and make an update user1.rebind(PASSWORD) user2.set('cn', 'new value') dm = DirectoryManager(m1) dm.rebind() # modify replica replica = Replicas(m1).get(DEFAULT_SUFFIX) replica.set(REPL_PROTOCOL_TIMEOUT, "30") # modify repl agmt agmt = replica.get_agreements().list()[0] agmt.set(REPL_PROTOCOL_TIMEOUT, "20") @pytest.mark.bz1314956 @pytest.mark.ds48755 def test_moving_entry_make_online_init_fail(topo_m2): """ Moving an entry could make the online init fail :id: e3895be7-884a-4e9f-80e3-24e9a5167c9e :setup: Two suppliers replication setup :steps: 1. Generate DIT_0 2. Generate password policy for DIT_0 3. Create users for DIT_0 4. Turn idx % 2 == 0 users into tombstones 5. Generate DIT_1 6. Move 'ou=OU0,ou=OU0,dc=example,dc=com' to DIT_1 7. Move 'ou=OU0,dc=example,dc=com' to DIT_1 8. Move idx % 2 == 1 users to 'ou=OU0,ou=OU0,ou=OU1,dc=example,dc=com' 9. Init replicas 10. Number of entries should match on both suppliers :expectedresults: 1. Success 2. Success 3. Success 4. Success 5. Success 6. Success 7. Success 8. Success 9. Success 10. Success """ M1 = topo_m2.ms["supplier1"] M2 = topo_m2.ms["supplier2"] log.info("Generating DIT_0") idx = 0 add_ou_entry(M1, idx, DEFAULT_SUFFIX) log.info("Created entry: ou=OU0, dc=example, dc=com") ou0 = 'ou=OU%d' % idx first_parent = '%s,%s' % (ou0, DEFAULT_SUFFIX) add_ou_entry(M1, idx, first_parent) log.info("Created entry: ou=OU0, ou=OU0, dc=example, dc=com") add_ldapsubentry(M1, first_parent) ou_name = 'ou=OU%d,ou=OU%d' % (idx, idx) second_parent = 'ou=OU%d,%s' % (idx, first_parent) for idx in range(0, 9): add_user_entry(M1, idx, ou_name) if idx % 2 == 0: log.info("Turning tuser%d into a tombstone entry" % idx) del_user_entry(M1, idx, ou_name) log.info('%s => %s => %s => 10 USERS' % (DEFAULT_SUFFIX, first_parent, second_parent)) log.info("Generating DIT_1") idx = 1 add_ou_entry(M1, idx, DEFAULT_SUFFIX) log.info("Created entry: ou=OU1,dc=example,dc=com") third_parent = 'ou=OU%d,%s' % (idx, DEFAULT_SUFFIX) add_ou_entry(M1, idx, third_parent) log.info("Created entry: ou=OU1, ou=OU1, dc=example, dc=com") add_ldapsubentry(M1, third_parent) log.info("Moving %s to DIT_1" % second_parent) OrganizationalUnits(M1, second_parent).get('OU0').rename(ou0, newsuperior=third_parent) log.info("Moving %s to DIT_1" % first_parent) fourth_parent = '%s,%s' % (ou0, third_parent) OrganizationalUnits(M1, first_parent).get('OU0').rename(ou0, newsuperior=fourth_parent) fifth_parent = '%s,%s' % (ou0, fourth_parent) ou_name = 'ou=OU0,ou=OU1' log.info("Moving USERS to %s" % fifth_parent) for idx in range(0, 9): if idx % 2 == 1: rename_entry(M1, idx, ou_name, fifth_parent) log.info('%s => %s => %s => %s => 10 USERS' % (DEFAULT_SUFFIX, third_parent, fourth_parent, fifth_parent)) log.info("Run Initialization.") repl = ReplicationManager(DEFAULT_SUFFIX) repl.wait_for_replication(M1, M2, timeout=5) m1entries = M1.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(|(objectclass=ldapsubentry)(objectclass=nstombstone)(nsuniqueid=*))') m2entries = M2.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(|(objectclass=ldapsubentry)(objectclass=nstombstone)(nsuniqueid=*))') log.info("m1entry count - %d", len(m1entries)) log.info("m2entry count - %d", len(m2entries)) assert len(m1entries) == len(m2entries) def get_keepalive_entries(instance, replica): # Returns the keep alive entries that exists with the suffix of the server instance try: entries = instance.search_s(replica.get_suffix(), ldap.SCOPE_ONELEVEL, "(&(objectclass=ldapsubentry)(cn=repl keep alive*))", ['cn', 'nsUniqueId', 'modifierTimestamp']) except ldap.LDAPError as e: log.fatal('Failed to retrieve keepalive entry (%s) on instance %s: error %s' % (dn, instance, str(e))) assert False # No error, so lets log the keepalive entries if log.isEnabledFor(logging.DEBUG): for ret in entries: log.debug("Found keepalive entry:\n"+str(ret)); return entries def verify_keepalive_entries(topo, expected): # Check that keep alive entries exists (or not exists) for every suppliers on every suppliers # Note: The testing method is quite basic: counting that there is one keepalive entry per supplier. # that is ok for simple test cases like test_online_init_should_create_keepalive_entries but # not for the general case as keep alive associated with no more existing supplier may exists # (for example after: db2ldif / demote a supplier / ldif2db / init other suppliers) # ==> if the function is somehow pushed in lib389, a check better than simply counting the entries # should be done. for supplierId in topo.ms: supplier = topo.ms[supplierId] for replica in Replicas(supplier).list(): if (replica.get_role() != ReplicaRole.SUPPLIER): continue replica_info = f'supplier: {supplierId} RID: {replica.get_rid()} suffix: {replica.get_suffix()}' log.debug(f'Checking keepAliveEntries on {replica_info}') keepaliveEntries = get_keepalive_entries(supplier, replica); expectedCount = len(topo.ms) if expected else 0 foundCount = len(keepaliveEntries) if (foundCount == expectedCount): log.debug(f'Found {foundCount} keepalive entries as expected on {replica_info}.') else: log.error(f'{foundCount} Keepalive entries are found ' f'while {expectedCount} were expected on {replica_info}.') assert False def test_online_init_should_create_keepalive_entries(topo_m2): """Check that keep alive entries are created when initializinf a supplier from another one :id: d5940e71-d18a-4b71-aaf7-b9185361fffe :setup: Two suppliers replication setup :steps: 1. Generate ldif without replication data 2 Init both suppliers from that ldif 3 Check that keep alive entries does not exists 4 Perform on line init of supplier2 from supplier1 5 Check that keep alive entries exists :expectedresults: 1. No error while generating ldif 2. No error while importing the ldif file 3. No keepalive entrie should exists on any suppliers 4. No error while initializing supplier2 5. All keepalive entries should exist on every suppliers """ repl = ReplicationManager(DEFAULT_SUFFIX) m1 = topo_m2.ms["supplier1"] m2 = topo_m2.ms["supplier2"] # Step 1: Generate ldif without replication data m1.stop() m2.stop() ldif_file = '%s/norepl.ldif' % m1.get_ldif_dir() m1.db2ldif(bename=DEFAULT_BENAME, suffixes=[DEFAULT_SUFFIX], excludeSuffixes=None, repl_data=False, outputfile=ldif_file, encrypt=False) # Remove replication metadata that are still in the ldif _remove_replication_data(ldif_file) # Step 2: Init both suppliers from that ldif m1.ldif2db(DEFAULT_BENAME, None, None, None, ldif_file) m2.ldif2db(DEFAULT_BENAME, None, None, None, ldif_file) m1.start() m2.start() """ Replica state is now as if CLI setup has been done using: dsconf supplier1 replication enable --suffix "${SUFFIX}" --role supplier dsconf supplier2 replication enable --suffix "${SUFFIX}" --role supplier dsconf supplier1 replication create-manager --name "${REPLICATION_MANAGER_NAME}" --passwd "${REPLICATION_MANAGER_PASSWORD}" dsconf supplier2 replication create-manager --name "${REPLICATION_MANAGER_NAME}" --passwd "${REPLICATION_MANAGER_PASSWORD}" dsconf supplier1 repl-agmt create --suffix "${SUFFIX}" dsconf supplier2 repl-agmt create --suffix "${SUFFIX}" """ # Step 3: No keepalive entrie should exists on any suppliers verify_keepalive_entries(topo_m2, False) # Step 4: Perform on line init of supplier2 from supplier1 agmt = Agreements(m1).list()[0] agmt.begin_reinit() (done, error) = agmt.wait_reinit() assert done is True assert error is False # Step 5: All keepalive entries should exists on every suppliers # Verify the keep alive entry once replication is in sync # (that is the step that fails when bug is not fixed) repl.wait_for_ruv(m2,m1) verify_keepalive_entries(topo_m2, True); @pytest.mark.ds49915 @pytest.mark.bz1626375 def test_online_reinit_may_hang(topo_with_sigkill): """Online reinitialization may hang when the first entry of the DB is RUV entry instead of the suffix :id: cded6afa-66c0-4c65-9651-993ba3f7a49c :setup: 2 Supplier Instances :steps: 1. Export the database 2. Move RUV entry to the top in the ldif file 3. Import the ldif file 4. Check that replication is still working 5. Online replica initializaton :expectedresults: 1. Ldif file should be created successfully 2. RUV entry should be on top in the ldif file 3. Import should be successful 4. Replication should work 5. Server should not hang and consume 100% CPU """ M1 = topo_with_sigkill.ms["supplier1"] M2 = topo_with_sigkill.ms["supplier2"] M1.stop() ldif_file = '%s/supplier1.ldif' % M1.get_ldif_dir() M1.db2ldif(bename=DEFAULT_BENAME, suffixes=[DEFAULT_SUFFIX], excludeSuffixes=None, repl_data=True, outputfile=ldif_file, encrypt=False) _move_ruv(ldif_file) M1.ldif2db(DEFAULT_BENAME, None, None, None, ldif_file) M1.start() # After this server may hang # Exporting idle server with replication data and reimporting # should not break replication (Unless we hit issue 5098) # So let check that replication is still working. repl = ReplicationManager(DEFAULT_SUFFIX) repl.test_replication_topology(topo_with_sigkill) agmt = Agreements(M1).list()[0] agmt.begin_reinit() (done, error) = agmt.wait_reinit() assert done is True assert error is False repl = ReplicationManager(DEFAULT_SUFFIX) repl.test_replication_topology(topo_with_sigkill) if DEBUGGING: # Add debugging steps(if any)... pass if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/replication/regression_m2c2_test.py000066400000000000000000000265731421664411400313120ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2021 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import os import logging import pytest from lib389.utils import * from lib389._constants import * from lib389.replica import Replicas, ReplicationManager from lib389.agreement import Agreements from lib389.dseldif import * from lib389.topologies import topology_m2c2 as topo_m2c2 pytestmark = pytest.mark.tier1 NEW_SUFFIX_NAME = 'test_repl' NEW_SUFFIX = 'o={}'.format(NEW_SUFFIX_NAME) NEW_BACKEND = 'repl_base' CHANGELOG = 'cn=changelog,{}'.format(DN_USERROOT_LDBM) MAXAGE_ATTR = 'nsslapd-changelogmaxage' MAXAGE_STR = '30' TRIMINTERVAL_STR = '5' TRIMINTERVAL = 'nsslapd-changelogtrim-interval' DEBUGGING = os.getenv("DEBUGGING", default=False) if DEBUGGING: logging.getLogger(__name__).setLevel(logging.DEBUG) else: logging.getLogger(__name__).setLevel(logging.INFO) log = logging.getLogger(__name__) def get_agreement(agmts, consumer): # Get agreement towards consumer among the agremment list for agmt in agmts.list(): if (agmt.get_attr_val_utf8('nsDS5ReplicaPort') == str(consumer.port) and agmt.get_attr_val_utf8('nsDS5ReplicaHost') == consumer.host): return agmt return None def test_ruv_url_not_added_if_different_uuid(topo_m2c2): """Check that RUV url is not updated if RUV generation uuid are different :id: 7cc30a4e-0ffd-4758-8f00-e500279af344 :setup: Two suppliers + two consumers replication setup :steps: 1. Generate ldif without replication data 2. Init both suppliers from that ldif (to clear the ruvs and generates different generation uuid) 3. Perform on line init from supplier1 to consumer1 and from supplier2 to consumer2 4. Perform update on both suppliers 5. Check that c1 RUV does not contains URL towards m2 6. Check that c2 RUV does contains URL towards m2 7. Perform on line init from supplier1 to supplier2 8. Perform update on supplier2 9. Check that c1 RUV does contains URL towards m2 :expectedresults: 1. No error while generating ldif 2. No error while importing the ldif file 3. No error and Initialization done. 4. No error 5. supplier2 replicaid should not be in the consumer1 RUV 6. supplier2 replicaid should be in the consumer2 RUV 7. No error and Initialization done. 8. No error 9. supplier2 replicaid should be in the consumer1 RUV """ # Variables initialization repl = ReplicationManager(DEFAULT_SUFFIX) m1 = topo_m2c2.ms["supplier1"] m2 = topo_m2c2.ms["supplier2"] c1 = topo_m2c2.cs["consumer1"] c2 = topo_m2c2.cs["consumer2"] replica_m1 = Replicas(m1).get(DEFAULT_SUFFIX) replica_m2 = Replicas(m2).get(DEFAULT_SUFFIX) replica_c1 = Replicas(c1).get(DEFAULT_SUFFIX) replica_c2 = Replicas(c2).get(DEFAULT_SUFFIX) replicid_m2 = replica_m2.get_rid() agmts_m1 = Agreements(m1, replica_m1.dn) agmts_m2 = Agreements(m2, replica_m2.dn) m1_m2 = get_agreement(agmts_m1, m2) m1_c1 = get_agreement(agmts_m1, c1) m1_c2 = get_agreement(agmts_m1, c2) m2_m1 = get_agreement(agmts_m2, m1) m2_c1 = get_agreement(agmts_m2, c1) m2_c2 = get_agreement(agmts_m2, c2) # Step 1: Generate ldif without replication data m1.stop() m2.stop() ldif_file = '%s/norepl.ldif' % m1.get_ldif_dir() m1.db2ldif(bename=DEFAULT_BENAME, suffixes=[DEFAULT_SUFFIX], excludeSuffixes=None, repl_data=False, outputfile=ldif_file, encrypt=False) # Remove replication metadata that are still in the ldif # _remove_replication_data(ldif_file) # Step 2: Init both suppliers from that ldif m1.ldif2db(DEFAULT_BENAME, None, None, None, ldif_file) m2.ldif2db(DEFAULT_BENAME, None, None, None, ldif_file) m1.start() m2.start() # Step 3: Perform on line init from supplier1 to consumer1 # and from supplier2 to consumer2 m1_c1.begin_reinit() m2_c2.begin_reinit() (done, error) = m1_c1.wait_reinit() assert done is True assert error is False (done, error) = m2_c2.wait_reinit() assert done is True assert error is False # Step 4: Perform update on both suppliers repl.test_replication(m1, c1) repl.test_replication(m2, c2) # Step 5: Check that c1 RUV does not contains URL towards m2 ruv = replica_c1.get_ruv() log.debug(f"c1 RUV: {ruv}") url = ruv._rid_url.get(replica_m2.get_rid()) if url is None: log.debug(f"No URL for RID {replica_m2.get_rid()} in RUV") else: log.debug(f"URL for RID {replica_m2.get_rid()} in RUV is {url}") log.error(f"URL for RID {replica_m2.get_rid()} found in RUV") # Note: this assertion fails if issue 2054 is not fixed. assert False # Step 6: Check that c2 RUV does contains URL towards m2 ruv = replica_c2.get_ruv() log.debug(f"c1 RUV: {ruv} {ruv._rids} ") url = ruv._rid_url.get(replica_m2.get_rid()) if url is None: log.error(f"No URL for RID {replica_m2.get_rid()} in RUV") assert False else: log.debug(f"URL for RID {replica_m2.get_rid()} in RUV is {url}") # Step 7: Perform on line init from supplier1 to supplier2 m1_m2.begin_reinit() (done, error) = m1_m2.wait_reinit() assert done is True assert error is False # Step 8: Perform update on supplier2 repl.test_replication(m2, c1) # Step 9: Check that c1 RUV does contains URL towards m2 ruv = replica_c1.get_ruv() log.debug(f"c1 RUV: {ruv} {ruv._rids} ") url = ruv._rid_url.get(replica_m2.get_rid()) if url is None: log.error(f"No URL for RID {replica_m2.get_rid()} in RUV") assert False else: log.debug(f"URL for RID {replica_m2.get_rid()} in RUV is {url}") def test_csngen_state_not_updated_if_different_uuid(topo_m2c2): """Check that csngen remote offset is not updated if RUV generation uuid are different :id: 77694b8e-22ae-11eb-89b2-482ae39447e5 :setup: Two suppliers + two consumers replication setup :steps: 1. Disable m1<->m2 agreement to avoid propagate timeSkew 2. Generate ldif without replication data 3. Increase time skew on supplier2 4. Init both suppliers from that ldif (to clear the ruvs and generates different generation uuid) 5. Perform on line init from supplier1 to consumer1 and supplier2 to consumer2 6. Perform update on both suppliers 7: Check that c1 has no time skew 8: Check that c2 has time skew 9. Init supplier2 from supplier1 10. Perform update on supplier2 11. Check that c1 has time skew :expectedresults: 1. No error 2. No error while generating ldif 3. No error 4. No error while importing the ldif file 5. No error and Initialization done. 6. No error 7. c1 time skew should be lesser than threshold 8. c2 time skew should be higher than threshold 9. No error and Initialization done. 10. No error 11. c1 time skew should be higher than threshold """ # Variables initialization repl = ReplicationManager(DEFAULT_SUFFIX) m1 = topo_m2c2.ms["supplier1"] m2 = topo_m2c2.ms["supplier2"] c1 = topo_m2c2.cs["consumer1"] c2 = topo_m2c2.cs["consumer2"] replica_m1 = Replicas(m1).get(DEFAULT_SUFFIX) replica_m2 = Replicas(m2).get(DEFAULT_SUFFIX) replica_c1 = Replicas(c1).get(DEFAULT_SUFFIX) replica_c2 = Replicas(c2).get(DEFAULT_SUFFIX) replicid_m2 = replica_m2.get_rid() agmts_m1 = Agreements(m1, replica_m1.dn) agmts_m2 = Agreements(m2, replica_m2.dn) m1_m2 = get_agreement(agmts_m1, m2) m1_c1 = get_agreement(agmts_m1, c1) m1_c2 = get_agreement(agmts_m1, c2) m2_m1 = get_agreement(agmts_m2, m1) m2_c1 = get_agreement(agmts_m2, c1) m2_c2 = get_agreement(agmts_m2, c2) # Step 1: Disable m1<->m2 agreement to avoid propagate timeSkew m1_m2.pause() m2_m1.pause() # Step 2: Generate ldif without replication data m1.stop() m2.stop() ldif_file = '%s/norepl.ldif' % m1.get_ldif_dir() m1.db2ldif(bename=DEFAULT_BENAME, suffixes=[DEFAULT_SUFFIX], excludeSuffixes=None, repl_data=False, outputfile=ldif_file, encrypt=False) # Remove replication metadata that are still in the ldif # _remove_replication_data(ldif_file) # Step 3: Increase time skew on supplier2 timeSkew = 6*3600 # We can modify supplier2 time skew # But the time skew on the consumer may be smaller # depending on when the cnsgen generation time is updated # and when first csn get replicated. # Since we use timeSkew has threshold value to detect # whether there are time skew or not, # lets add a significative margin (longer than the test duration) # to avoid any risk of erroneous failure timeSkewMargin = 300 DSEldif(m2)._increaseTimeSkew(DEFAULT_SUFFIX, timeSkew+timeSkewMargin) # Step 4: Init both suppliers from that ldif m1.ldif2db(DEFAULT_BENAME, None, None, None, ldif_file) m2.ldif2db(DEFAULT_BENAME, None, None, None, ldif_file) m1.start() m2.start() # Step 5: Perform on line init from supplier1 to consumer1 # and from supplier2 to consumer2 m1_c1.begin_reinit() m2_c2.begin_reinit() (done, error) = m1_c1.wait_reinit() assert done is True assert error is False (done, error) = m2_c2.wait_reinit() assert done is True assert error is False # Step 6: Perform update on both suppliers repl.test_replication(m1, c1) repl.test_replication(m2, c2) # Step 7: Check that c1 has no time skew # Stop server to insure that dse.ldif is uptodate c1.stop() c1_nsState = DSEldif(c1).readNsState(DEFAULT_SUFFIX)[0] c1_timeSkew = int(c1_nsState['time_skew']) log.debug(f"c1 time skew: {c1_timeSkew}") if (c1_timeSkew >= timeSkew): log.error(f"c1 csngen state has unexpectedly been synchronized with m2: time skew {c1_timeSkew}") assert False c1.start() # Step 8: Check that c2 has time skew # Stop server to insure that dse.ldif is uptodate c2.stop() c2_nsState = DSEldif(c2).readNsState(DEFAULT_SUFFIX)[0] c2_timeSkew = int(c2_nsState['time_skew']) log.debug(f"c2 time skew: {c2_timeSkew}") if (c2_timeSkew < timeSkew): log.error(f"c2 csngen state has not been synchronized with m2: time skew {c2_timeSkew}") assert False c2.start() # Step 9: Perform on line init from supplier1 to supplier2 m1_c1.pause() m1_m2.resume() m1_m2.begin_reinit() (done, error) = m1_m2.wait_reinit() assert done is True assert error is False # Step 10: Perform update on supplier2 repl.test_replication(m2, c1) # Step 11: Check that c1 has time skew # Stop server to insure that dse.ldif is uptodate c1.stop() c1_nsState = DSEldif(c1).readNsState(DEFAULT_SUFFIX)[0] c1_timeSkew = int(c1_nsState['time_skew']) log.debug(f"c1 time skew: {c1_timeSkew}") if (c1_timeSkew < timeSkew): log.error(f"c1 csngen state has not been synchronized with m2: time skew {c1_timeSkew}") assert False if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/replication/regression_m3_test.py000066400000000000000000000144461421664411400310620ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2021 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import os import time import logging import ldap import pytest from lib389.idm.user import TEST_USER_PROPERTIES, UserAccounts from lib389.utils import * from lib389._constants import * from lib389.replica import Changelog5 from lib389.dseldif import * from lib389.topologies import topology_m3 as topo_m3 pytestmark = pytest.mark.tier1 NEW_SUFFIX_NAME = 'test_repl' NEW_SUFFIX = 'o={}'.format(NEW_SUFFIX_NAME) NEW_BACKEND = 'repl_base' CHANGELOG = 'cn=changelog,{}'.format(DN_USERROOT_LDBM) MAXAGE_ATTR = 'nsslapd-changelogmaxage' MAXAGE_STR = '30' TRIMINTERVAL_STR = '5' TRIMINTERVAL = 'nsslapd-changelogtrim-interval' DEBUGGING = os.getenv("DEBUGGING", default=False) if DEBUGGING: logging.getLogger(__name__).setLevel(logging.DEBUG) else: logging.getLogger(__name__).setLevel(logging.INFO) log = logging.getLogger(__name__) def test_cleanallruv_repl(topo_m3): """Test that cleanallruv could not break replication if anchor csn in ruv originated in deleted replica :id: 46faba9a-897e-45b8-98dc-aec7fa8cec9a :setup: 3 Suppliers :steps: 1. Configure error log level to 8192 in all suppliers 2. Modify nsslapd-changelogmaxage=30 and nsslapd-changelogtrim-interval=5 for M1 and M2 3. Add test users to 3 suppliers 4. Launch ClearRuv but withForce 5. Check the users after CleanRUV, because of changelog trimming, it will effect the CLs :expectedresults: 1. Error logs should be configured successfully 2. Modify should be successful 3. Test users should be added successfully 4. ClearRuv should be launched successfully 5. Users should be present according to the changelog trimming effect """ M1 = topo_m3.ms["supplier1"] M2 = topo_m3.ms["supplier2"] M3 = topo_m3.ms["supplier3"] log.info("Change the error log levels for all suppliers") for s in (M1, M2, M3): s.config.replace('nsslapd-errorlog-level', "8192") log.info("Get the replication agreements for all 3 suppliers") m1_m2 = M1.agreement.list(suffix=SUFFIX, consumer_host=M2.host, consumer_port=M2.port) m1_m3 = M1.agreement.list(suffix=SUFFIX, consumer_host=M3.host, consumer_port=M3.port) m3_m1 = M3.agreement.list(suffix=SUFFIX, consumer_host=M1.host, consumer_port=M1.port) log.info("Modify nsslapd-changelogmaxage=30 and nsslapd-changelogtrim-interval=5 for M1 and M2") if ds_supports_new_changelog(): CHANGELOG = 'cn=changelog,{}'.format(DN_USERROOT_LDBM) # set_value(M1, MAXAGE_ATTR, MAXAGE_STR) try: M1.modify_s(CHANGELOG, [(ldap.MOD_REPLACE, MAXAGE_ATTR, ensure_bytes(MAXAGE_STR))]) except ldap.LDAPError as e: log.error('Failed to add ' + MAXAGE_ATTR, + ': ' + MAXAGE_STR + ' to ' + CHANGELOG + ': error {}'.format(get_ldap_error_msg(e,'desc'))) assert False # set_value(M2, TRIMINTERVAL, TRIMINTERVAL_STR) try: M2.modify_s(CHANGELOG, [(ldap.MOD_REPLACE, TRIMINTERVAL, ensure_bytes(TRIMINTERVAL_STR))]) except ldap.LDAPError as e: log.error('Failed to add ' + TRIMINTERVAL, + ': ' + TRIMINTERVAL_STR + ' to ' + CHANGELOG + ': error {}'.format(get_ldap_error_msg(e,'desc'))) assert False else: log.info("Get the changelog enteries for M1 and M2") changelog_m1 = Changelog5(M1) changelog_m1.set_max_age(MAXAGE_STR) changelog_m1.set_trim_interval(TRIMINTERVAL_STR) log.info("Add test users to 3 suppliers") users_m1 = UserAccounts(M1, DEFAULT_SUFFIX) users_m2 = UserAccounts(M2, DEFAULT_SUFFIX) users_m3 = UserAccounts(M3, DEFAULT_SUFFIX) user_props = TEST_USER_PROPERTIES.copy() user_props.update({'uid': "testuser10"}) user10 = users_m1.create(properties=user_props) user_props.update({'uid': "testuser20"}) user20 = users_m2.create(properties=user_props) user_props.update({'uid': "testuser30"}) user30 = users_m3.create(properties=user_props) # ::important:: the testuser31 is the oldest csn in M2, # because it will be cleared by changelog trimming user_props.update({'uid': "testuser31"}) user31 = users_m3.create(properties=user_props) user_props.update({'uid': "testuser11"}) user11 = users_m1.create(properties=user_props) user_props.update({'uid': "testuser21"}) user21 = users_m2.create(properties=user_props) # this is to trigger changelog trim and interval values time.sleep(40) # Here M1, M2, M3 should have 11,21,31 and 10,20,30 are CL cleared M2.stop() M1.agreement.pause(m1_m2[0].dn) user_props.update({'uid': "testuser32"}) user32 = users_m3.create(properties=user_props) user_props.update({'uid': "testuser33"}) user33 = users_m3.create(properties=user_props) user_props.update({'uid': "testuser12"}) user12 = users_m1.create(properties=user_props) M3.agreement.pause(m3_m1[0].dn) M3.agreement.resume(m3_m1[0].dn) time.sleep(40) # Here because of changelog trimming testusers 31 and 32 are CL cleared # ClearRuv is launched but with Force M3.stop() M1.tasks.cleanAllRUV(suffix=SUFFIX, replicaid='3', force=True, args={TASK_WAIT: False}) # here M1 should clear 31 M2.start() M1.agreement.pause(m1_m2[0].dn) M1.agreement.resume(m1_m2[0].dn) time.sleep(10) # Check the users after CleanRUV expected_m1_users = [user31.dn, user11.dn, user21.dn, user32.dn, user33.dn, user12.dn] expected_m1_users = [x.lower() for x in expected_m1_users] expected_m2_users = [user31.dn, user11.dn, user21.dn, user12.dn] expected_m2_users = [x.lower() for x in expected_m2_users] current_m1_users = [user.dn for user in users_m1.list()] current_m1_users = [x.lower() for x in current_m1_users] current_m2_users = [user.dn for user in users_m2.list()] current_m2_users = [x.lower() for x in current_m2_users] assert set(expected_m1_users).issubset(current_m1_users) assert set(expected_m2_users).issubset(current_m2_users) if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/replication/repl_agmt_bootstrap_test.py000066400000000000000000000103711421664411400323430ustar00rootroot00000000000000import logging import pytest import os import time from lib389._constants import DEFAULT_SUFFIX from lib389.topologies import topology_m2 as topo from lib389.replica import BootstrapReplicationManager, Replicas from lib389.idm.user import TEST_USER_PROPERTIES, UserAccounts, UserAccount from lib389.idm.group import Group pytestmark = pytest.mark.tier1 DEBUGGING = os.getenv("DEBUGGING", default=False) if DEBUGGING: logging.getLogger(__name__).setLevel(logging.DEBUG) else: logging.getLogger(__name__).setLevel(logging.INFO) log = logging.getLogger(__name__) BOOTSTRAP_MGR_DN = 'uid=replication manager,cn=config' BOOTSTRAP_MGR_PWD = 'boostrap_manager_password' BIND_GROUP_DN = 'cn=replication_managers,' + DEFAULT_SUFFIX def test_repl_agmt_bootstrap_credentials(topo): """Test that the agreement bootstrap credentials works if the default credentials fail for some reason. :id: 38c8095c-d958-415a-b602-74854b7882b3 :customerscenario: True :setup: 2 Supplier Instances :steps: 1. Change the bind dn group member passwords 2. Verify replication is not working 3. Create a new repl manager on supplier 2 for bootstrapping 4. Add bootstrap credentials to agmt on supplier 1 5. Verify replication is now working with bootstrap creds 6. Trigger new repl session and default credentials are used first :expectedresults: 1. Success 2. Success 3. Success 4. Success 5. Success 6. Success """ # Gather all of our objects for the test m1 = topo.ms["supplier1"] m2 = topo.ms["supplier2"] supplier1_replica = Replicas(m1).get(DEFAULT_SUFFIX) supplier2_replica = Replicas(m2).get(DEFAULT_SUFFIX) supplier2_users = UserAccounts(m2, DEFAULT_SUFFIX) m1_agmt = supplier1_replica.get_agreements().list()[0] num_of_original_users = len(supplier2_users.list()) # Change the member's passwords which should break replication bind_group = Group(m2, dn=BIND_GROUP_DN) members = bind_group.list_members() for member_dn in members: member = UserAccount(m2, dn=member_dn) member.replace('userPassword', 'not_right') time.sleep(3) m1_agmt.pause() m1_agmt.resume() # Verify replication is not working, a new user should not be replicated users = UserAccounts(m1, DEFAULT_SUFFIX) test_user = users.ensure_state(properties=TEST_USER_PROPERTIES) time.sleep(3) assert len(supplier2_users.list()) == num_of_original_users # Create a repl manager on replica repl_mgr = BootstrapReplicationManager(m2, dn=BOOTSTRAP_MGR_DN) mgr_properties = { 'uid': 'replication manager', 'cn': 'replication manager', 'userPassword': BOOTSTRAP_MGR_PWD, } repl_mgr.create(properties=mgr_properties) # Update supplier 2 config supplier2_replica.remove_all('nsDS5ReplicaBindDNGroup') supplier2_replica.remove_all('nsDS5ReplicaBindDnGroupCheckInterval') supplier2_replica.replace('nsDS5ReplicaBindDN', BOOTSTRAP_MGR_DN) # Add bootstrap credentials to supplier1 agmt, and restart agmt m1_agmt.replace('nsds5ReplicaBootstrapTransportInfo', 'LDAP') m1_agmt.replace('nsds5ReplicaBootstrapBindMethod', 'SIMPLE') m1_agmt.replace('nsds5ReplicaBootstrapCredentials', BOOTSTRAP_MGR_PWD) m1_agmt.replace('nsds5ReplicaBootstrapBindDN', BOOTSTRAP_MGR_DN) m1_agmt.pause() m1_agmt.resume() # Verify replication is working. The user should have been replicated time.sleep(3) assert len(supplier2_users.list()) > num_of_original_users # Finally check if the default credentials are used on the next repl # session. Clear out the logs, and disable log buffering. Then # trigger a replication update/session. m1_agmt.pause() m2.stop() m2.deleteLog(m2.accesslog) # Clear out the logs m2.start() m2.config.set('nsslapd-accesslog-logbuffering', 'off') m1_agmt.resume() test_user.delete() time.sleep(3) # We know if the default credentials are used it will fail (err=49) results = m2.ds_access_log.match('.* err=49 .*') assert len(results) > 0 if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main(["-s", CURRENT_FILE]) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/replication/replica_config_test.py000066400000000000000000000244511421664411400312440ustar00rootroot00000000000000import logging import pytest import copy import os import ldap from lib389._constants import * from lib389.topologies import topology_st as topo from lib389.replica import Replicas from lib389.agreement import Agreements from lib389.utils import ds_is_older pytestmark = pytest.mark.tier1 DEBUGGING = os.getenv("DEBUGGING", default=False) if DEBUGGING: logging.getLogger(__name__).setLevel(logging.DEBUG) else: logging.getLogger(__name__).setLevel(logging.INFO) log = logging.getLogger(__name__) notnum = 'invalid' too_big = '9223372036854775807' overflow = '9999999999999999999999999999999999999999999999999999999999999999999' replica_dict = {'nsDS5ReplicaRoot': 'dc=example,dc=com', 'nsDS5ReplicaType': '3', 'nsDS5Flags': '1', 'nsDS5ReplicaId': '65534', 'nsds5ReplicaPurgeDelay': '604800', 'nsDS5ReplicaBindDN': 'cn=u', 'cn': 'replica'} agmt_dict = {'cn': 'test_agreement', 'nsDS5ReplicaRoot': 'dc=example,dc=com', 'nsDS5ReplicaHost': 'localhost.localdomain', 'nsDS5ReplicaPort': '5555', 'nsDS5ReplicaBindDN': 'uid=tester', 'nsds5ReplicaCredentials': 'password', 'nsDS5ReplicaTransportInfo': 'LDAP', 'nsDS5ReplicaBindMethod': 'SIMPLE'} repl_add_attrs = [('nsDS5ReplicaType', '-1', '4', overflow, notnum, '1'), ('nsDS5Flags', '-1', '2', overflow, notnum, '1'), ('nsDS5ReplicaId', '0', '65536', overflow, notnum, '1'), ('nsds5ReplicaPurgeDelay', '-2', too_big, overflow, notnum, '1'), ('nsDS5ReplicaBindDnGroupCheckInterval', '-2', too_big, overflow, notnum, '1'), ('nsds5ReplicaTombstonePurgeInterval', '-2', too_big, overflow, notnum, '1'), ('nsds5ReplicaProtocolTimeout', '-1', too_big, overflow, notnum, '1'), ('nsds5ReplicaReleaseTimeout', '-1', too_big, overflow, notnum, '1'), ('nsds5ReplicaBackoffMin', '0', too_big, overflow, notnum, '3'), ('nsds5ReplicaBackoffMax', '0', too_big, overflow, notnum, '6')] repl_mod_attrs = [('nsDS5Flags', '-1', '2', overflow, notnum, '1'), ('nsds5ReplicaPurgeDelay', '-2', too_big, overflow, notnum, '1'), ('nsDS5ReplicaBindDnGroupCheckInterval', '-2', too_big, overflow, notnum, '1'), ('nsds5ReplicaTombstonePurgeInterval', '-2', too_big, overflow, notnum, '1'), ('nsds5ReplicaProtocolTimeout', '-1', too_big, overflow, notnum, '1'), ('nsds5ReplicaReleaseTimeout', '-1', too_big, overflow, notnum, '1'), ('nsds5ReplicaBackoffMin', '0', too_big, overflow, notnum, '3'), ('nsds5ReplicaBackoffMax', '0', too_big, overflow, notnum, '6')] agmt_attrs = [ ('nsds5ReplicaPort', '0', '65535', overflow, notnum, '389'), ('nsds5ReplicaTimeout', '-1', too_big, overflow, notnum, '6'), ('nsds5ReplicaBusyWaitTime', '-1', too_big, overflow, notnum, '6'), ('nsds5ReplicaSessionPauseTime', '-1', too_big, overflow, notnum, '6'), ('nsds5ReplicaFlowControlWindow', '-1', too_big, overflow, notnum, '6'), ('nsds5ReplicaFlowControlPause', '-1', too_big, overflow, notnum, '6'), ('nsds5ReplicaProtocolTimeout', '-1', too_big, overflow, notnum, '6') ] def replica_reset(topo): """Purge all existing replica details""" replicas = Replicas(topo.standalone) for r in replicas.list(): r.delete() def replica_setup(topo): """Add a valid replica config entry to modify """ replicas = Replicas(topo.standalone) for r in replicas.list(): r.delete() return replicas.create(properties=replica_dict) def agmt_reset(topo): """Purge all existing agreements for testing""" agmts = Agreements(topo.standalone) for a in agmts.list(): a.delete() def agmt_setup(topo): """Add a valid replica config entry to modify """ # Reset the agreements too. replica = replica_setup(topo) agmts = Agreements(topo.standalone, basedn=replica.dn) for a in agmts.list(): a.delete() return agmts.create(properties=agmt_dict) def perform_invalid_create(many, properties, attr, value): my_properties = copy.deepcopy(properties) my_properties[attr] = value with pytest.raises(ldap.LDAPError) as ei: many.create(properties=my_properties) return ei.value def perform_invalid_modify(o, attr, value): with pytest.raises(ldap.LDAPError) as ei: o.replace(attr, value) return ei.value @pytest.mark.parametrize("attr, too_small, too_big, overflow, notnum, valid", repl_add_attrs) def test_replica_num_add(topo, attr, too_small, too_big, overflow, notnum, valid): """Test all the number values you can set for a replica config entry :id: a8b47d4a-a089-4d70-8070-e6181209bf92 :parametrized: yes :setup: standalone instance :steps: 1. Use a value that is too small 2. Use a value that is too big 3. Use a value that overflows the int 4. Use a value with character value (not a number) 5. Use a valid value :expectedresults: 1. Add is rejected 2. Add is rejected 3. Add is rejected 4. Add is rejected 5. Add is allowed """ replica_reset(topo) replicas = Replicas(topo.standalone) # Test too small perform_invalid_create(replicas, replica_dict, attr, too_small) # Test too big perform_invalid_create(replicas, replica_dict, attr, too_big) # Test overflow perform_invalid_create(replicas, replica_dict, attr, overflow) # test not a number perform_invalid_create(replicas, replica_dict, attr, notnum) # Test valid value my_replica = copy.deepcopy(replica_dict) my_replica[attr] = valid replicas.create(properties=my_replica) @pytest.mark.parametrize("attr, too_small, too_big, overflow, notnum, valid", repl_mod_attrs) def test_replica_num_modify(topo, attr, too_small, too_big, overflow, notnum, valid): """Test all the number values you can set for a replica config entry :id: a8b47d4a-a089-4d70-8070-e6181209bf93 :parametrized: yes :setup: standalone instance :steps: 1. Replace a value that is too small 2. Repalce a value that is too big 3. Replace a value that overflows the int 4. Replace a value with character value (not a number) 5. Replace a vlue with a valid value :expectedresults: 1. Value is rejected 2. Value is rejected 3. Value is rejected 4. Value is rejected 5. Value is allowed """ replica = replica_setup(topo) # Value too small perform_invalid_modify(replica, attr, too_small) # Value too big perform_invalid_modify(replica, attr, too_big) # Value overflow perform_invalid_modify(replica, attr, overflow) # Value not a number perform_invalid_modify(replica, attr, notnum) # Value is valid replica.replace(attr, valid) @pytest.mark.xfail(reason="Agreement validation current does not work.") @pytest.mark.parametrize("attr, too_small, too_big, overflow, notnum, valid", agmt_attrs) def test_agmt_num_add(topo, attr, too_small, too_big, overflow, notnum, valid): """Test all the number values you can set for a replica config entry :id: a8b47d4a-a089-4d70-8070-e6181209bf94 :parametrized: yes :setup: standalone instance :steps: 1. Use a value that is too small 2. Use a value that is too big 3. Use a value that overflows the int 4. Use a value with character value (not a number) 5. Use a valid value :expectedresults: 1. Add is rejected 2. Add is rejected 3. Add is rejected 4. Add is rejected 5. Add is allowed """ agmt_reset(topo) replica = replica_setup(topo) agmts = Agreements(topo.standalone, basedn=replica.dn) # Test too small perform_invalid_create(agmts, agmt_dict, attr, too_small) # Test too big perform_invalid_create(agmts, agmt_dict, attr, too_big) # Test overflow perform_invalid_create(agmts, agmt_dict, attr, overflow) # test not a number perform_invalid_create(agmts, agmt_dict, attr, notnum) # Test valid value my_agmt = copy.deepcopy(agmt_dict) my_agmt[attr] = valid agmts.create(properties=my_agmt) @pytest.mark.xfail(reason="Agreement validation current does not work.") @pytest.mark.parametrize("attr, too_small, too_big, overflow, notnum, valid", agmt_attrs) def test_agmt_num_modify(topo, attr, too_small, too_big, overflow, notnum, valid): """Test all the number values you can set for a replica config entry :id: a8b47d4a-a089-4d70-8070-e6181209bf95 :parametrized: yes :setup: standalone instance :steps: 1. Replace a value that is too small 2. Replace a value that is too big 3. Replace a value that overflows the int 4. Replace a value with character value (not a number) 5. Replace a vlue with a valid value :expectedresults: 1. Value is rejected 2. Value is rejected 3. Value is rejected 4. Value is rejected 5. Value is allowed """ agmt = agmt_setup(topo) # Value too small perform_invalid_modify(agmt, attr, too_small) # Value too big perform_invalid_modify(agmt, attr, too_big) # Value overflow perform_invalid_modify(agmt, attr, overflow) # Value not a number perform_invalid_modify(agmt, attr, notnum) # Value is valid agmt.replace(attr, valid) @pytest.mark.skipif(ds_is_older('1.4.1.4'), reason="Not implemented") @pytest.mark.bz1546739 def test_same_attr_yields_same_return_code(topo): """Test that various operations with same incorrect attribute value yield same return code """ attr = 'nsDS5ReplicaId' replica_reset(topo) replicas = Replicas(topo.standalone) e = perform_invalid_create(replicas, replica_dict, attr, too_big) assert type(e) is ldap.UNWILLING_TO_PERFORM replica = replica_setup(topo) e = perform_invalid_modify(replica, attr, too_big) assert type(e) is ldap.UNWILLING_TO_PERFORM if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main(["-s", CURRENT_FILE]) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/replication/ruvstore_test.py000066400000000000000000000160341421664411400301670ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2017 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import os import logging import ldap import pytest from ldif import LDIFParser from lib389.replica import Replicas from lib389.backend import Backends from lib389.idm.domain import Domain from lib389.idm.user import UserAccounts from lib389.topologies import topology_m2 as topo from lib389._constants import * pytestmark = pytest.mark.tier1 TEST_ENTRY_NAME = 'rep2lusr' NEW_RDN_NAME = 'ruvusr' ATTRIBUTES = ['objectClass', 'nsUniqueId', 'nsds50ruv', 'nsruvReplicaLastModified'] USER_PROPERTIES = { 'uid': TEST_ENTRY_NAME, 'cn': TEST_ENTRY_NAME, 'sn': TEST_ENTRY_NAME, 'uidNumber': '1001', 'gidNumber': '2001', 'userpassword': PASSWORD, 'description': 'userdesc', 'homeDirectory': '/home/testuser' } DEBUGGING = os.getenv('DEBUGGING', default=False) if DEBUGGING: logging.getLogger(__name__).setLevel(logging.DEBUG) else: logging.getLogger(__name__).setLevel(logging.INFO) log = logging.getLogger(__name__) class MyLDIF(LDIFParser): def __init__(self, input): LDIFParser.__init__(self, input) def handle(self, dn, entry): if 'nsuniqueid=' + REPLICA_RUV_UUID in dn: for attr in ATTRIBUTES: assert entry.get(attr), 'Failed to find attribute: {}'.format(attr) log.info('Attribute found in RUV: {}'.format(attr)) def _perform_ldap_operations(topo): """Add a test user, modify description, modrdn user and delete it""" users = UserAccounts(topo.ms['supplier1'], DEFAULT_SUFFIX) log.info('Adding user to supplier1') tuser = users.create(properties=USER_PROPERTIES) tuser.replace('description', 'newdesc') log.info('Modify RDN of user: {}'.format(tuser.dn)) try: topo.ms['supplier1'].modrdn_s(tuser.dn, 'uid={}'.format(NEW_RDN_NAME), 0) except ldap.LDAPError as e: log.fatal('Failed to modrdn entry: {}'.format(tuser.dn)) raise e tuser = users.get(NEW_RDN_NAME) log.info('Deleting user: {}'.format(tuser.dn)) tuser.delete() def _compare_memoryruv_and_databaseruv(topo, operation_type): """Compare the memoryruv and databaseruv for ldap operations""" log.info('Checking memory ruv for ldap: {} operation'.format(operation_type)) replicas = Replicas(topo.ms['supplier1']) replica = replicas.list()[0] memory_ruv = replica.get_attr_val_utf8('nsds50ruv') log.info('Checking database ruv for ldap: {} operation'.format(operation_type)) entry = replicas.get_ruv_entry(DEFAULT_SUFFIX) database_ruv = entry.getValues('nsds50ruv')[0] assert memory_ruv == database_ruv def test_ruv_entry_backup(topo): """Check if db2ldif stores the RUV details in the backup file :id: cbe2c473-8578-4caf-ac0a-841140e41e66 :setup: Replication with two suppliers. :steps: 1. Add user to server. 2. Perform ldap modify, modrdn and delete operations. 3. Stop the server and backup the database using db2ldif task. 4. Start the server and check if correct RUV is stored in the backup file. :expectedresults: 1. Add user should PASS. 2. Ldap operations should PASS. 3. Database backup using db2ldif task should PASS. 4. Backup file should contain the correct RUV details. """ log.info('LDAP operations add, modify, modrdn and delete') _perform_ldap_operations(topo) output_file = os.path.join(topo.ms['supplier1'].get_ldif_dir(), 'supplier1.ldif') log.info('Stopping the server instance to run db2ldif task to create backup file') topo.ms['supplier1'].stop() topo.ms['supplier1'].db2ldif(bename=DEFAULT_BENAME, suffixes=[DEFAULT_SUFFIX], excludeSuffixes=[], encrypt=False, repl_data=True, outputfile=output_file) log.info('Starting the server after backup') topo.ms['supplier1'].start() log.info('Checking if backup file contains RUV and required attributes') with open(output_file, 'r') as ldif_file: parser = MyLDIF(ldif_file) parser.parse() @pytest.mark.xfail(reason="No method to safety access DB ruv currently exists online.") def test_memoryruv_sync_with_databaseruv(topo): """Check if memory ruv and database ruv are synced :id: 5f38ac5f-6353-460d-bf60-49cafffda5b3 :setup: Replication with two suppliers. :steps: 1. Add user to server and compare memory ruv and database ruv. 2. Modify description of user and compare memory ruv and database ruv. 3. Modrdn of user and compare memory ruv and database ruv. 4. Delete user and compare memory ruv and database ruv. :expectedresults: 1. For add user, the memory ruv and database ruv should be the same. 2. For modify operation, the memory ruv and database ruv should be the same. 3. For modrdn operation, the memory ruv and database ruv should be the same. 4. For delete operation, the memory ruv and database ruv should be the same. """ log.info('Adding user: {} to supplier1'.format(TEST_ENTRY_NAME)) users = UserAccounts(topo.ms['supplier1'], DEFAULT_SUFFIX) tuser = users.create(properties=USER_PROPERTIES) _compare_memoryruv_and_databaseruv(topo, 'add') log.info('Modify user: {} description'.format(TEST_ENTRY_NAME)) tuser.replace('description', 'newdesc') _compare_memoryruv_and_databaseruv(topo, 'modify') log.info('Modify RDN of user: {}'.format(tuser.dn)) try: topo.ms['supplier1'].modrdn_s(tuser.dn, 'uid={}'.format(NEW_RDN_NAME), 0) except ldap.LDAPError as e: log.fatal('Failed to modrdn entry: {}'.format(tuser.dn)) raise e _compare_memoryruv_and_databaseruv(topo, 'modrdn') tuser = users.get(NEW_RDN_NAME) log.info('Delete user: {}'.format(tuser.dn)) tuser.delete() _compare_memoryruv_and_databaseruv(topo, 'delete') def test_ruv_after_reindex(topo): """Test that the tombstone RUV entry is not corrupted after a reindex task :id: 988c0fab-1905-4dc5-a45d-fbf195843a33 :setup: 2 suppliers :steps: 1. Reindex database 2. Perform some updates 3. Check error log does not have "_entryrdn_insert_key" errors :expectedresults: 1. Success 2. Success 3. Success """ inst = topo.ms['supplier1'] suffix = Domain(inst, "ou=people," + DEFAULT_SUFFIX) backends = Backends(inst) backend = backends.get(DEFAULT_BENAME) # Reindex nsuniqueid backend.reindex(attrs=['nsuniqueid'], wait=True) # Do some updates for idx in range(0, 5): suffix.replace('description', str(idx)) # Check error log for RUV entryrdn errors. Stopping instance forces RUV # to be written and quickly exposes the error inst.stop() assert not inst.searchErrorsLog("entryrdn_insert_key") if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main('-s {}'.format(CURRENT_FILE)) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/replication/series_of_repl_bugs_test.py000066400000000000000000000271031421664411400323150ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2020 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import pytest from lib389.tasks import * from lib389.utils import * from lib389.topologies import topology_m2 as topo_m2 from lib389.topologies import topology_m1c1 as m1c1 from lib389.topologies import topology_st as topo from lib389.idm.user import UserAccount, UserAccounts from lib389.plugins import USNPlugin from lib389.replica import ReplicationManager from lib389.tombstone import Tombstones from lib389.agreement import Agreements from lib389._constants import * pytestmark = pytest.mark.tier1 @pytest.fixture(scope="function") def _delete_after(request, topo_m2): def last(): m1 = topo_m2.ms["supplier1"] if UserAccounts(m1, DEFAULT_SUFFIX, rdn=None).list(): for user in UserAccounts(m1, DEFAULT_SUFFIX, rdn=None).list(): user.delete() request.addfinalizer(last) @pytest.mark.bz830337 def test_deletions_are_not_replicated(topo_m2): """usn + mmr = deletions are not replicated :id: aa4f67ce-a64c-11ea-a6fd-8c16451d917b :setup: MMR with 2 suppliers :steps: 1. Enable USN plugin on both servers 2. Enable USN plugin on Supplier 2 3. Add user 4. Check that user propagated to Supplier 2 5. Check user`s USN on Supplier 1 6. Check user`s USN on Supplier 2 7. Delete user 8. Check that deletion of user propagated to Supplier 1 :expected results: 1. Should succeeds 2. Should succeeds 3. Should succeeds 4. Should succeeds 5. Should succeeds 6. Should succeeds 7. Should succeeds 8. Should succeeds """ m1 = topo_m2.ms["supplier1"] m2 = topo_m2.ms["supplier2"] # Enable USN plugin on both servers usn1 = USNPlugin(m1) usn2 = USNPlugin(m2) for usn_usn in [usn1, usn2]: usn_usn.enable() for instance in [m1, m2]: instance.restart() # Add user user = UserAccounts(m1, DEFAULT_SUFFIX, rdn=None).create_test_user(uid=1, gid=1) repl_manager = ReplicationManager(DEFAULT_SUFFIX) repl_manager.wait_for_replication(m1, m2, timeout=100) # Check that user propagated to Supplier 2 assert user.dn in [i.dn for i in UserAccounts(m2, DEFAULT_SUFFIX, rdn=None).list()] user2 = UserAccount(m2, f'uid=test_user_1,{DEFAULT_SUFFIX}') # Check user`s USN on Supplier 1 assert user.get_attr_val_utf8('entryusn') # Check user`s USN on Supplier 2 assert user2.get_attr_val_utf8('entryusn') # Delete user user2.delete() repl_manager.wait_for_replication(m1, m2, timeout=100) # Check that deletion of user propagated to Supplier 1 with pytest.raises(ldap.NO_SUCH_OBJECT): user.status() @pytest.mark.bz891866 def test_error_20(topo_m2, _delete_after): """DS returns error 20 when replacing values of a multi-valued attribute (only when replication is enabled) :id: a55bccc6-a64c-11ea-bac8-8c16451d917b :setup: MMR with 2 suppliers :steps: 1. Add user 2. Change multivalue attribute :expected results: 1. Should succeeds 2. Should succeeds """ m1 = topo_m2.ms["supplier1"] m2 = topo_m2.ms["supplier2"] # Add user user = UserAccounts(m1, DEFAULT_SUFFIX, rdn=None).create_test_user(uid=1, gid=1) repl_manager = ReplicationManager(DEFAULT_SUFFIX) repl_manager.wait_for_replication(m1, m2, timeout=100) # Change multivalue attribute assert user.replace_many(('cn', 'BUG 891866'), ('cn', 'Test')) @pytest.mark.bz914305 def test_segfaults(topo_m2, _delete_after): """ns-slapd segfaults while trying to delete a tombstone entry :id: 9f8f7388-a64c-11ea-b5f7-8c16451d917b :setup: MMR with 2 suppliers :steps: 1. Add new user 2. Delete user - should leave tombstone entry 3. Search for tombstone entry 4. Try to delete tombstone entry 5. Check if server is still alive :expected results: 1. Should succeeds 2. Should succeeds 3. Should succeeds 4. Should succeeds 5. Should succeeds """ m1 = topo_m2.ms["supplier1"] # Add user user = UserAccounts(m1, DEFAULT_SUFFIX, rdn=None).create_test_user(uid=10, gid=1) # Delete user - should leave tombstone entry user.delete() tombstones = Tombstones(m1, DEFAULT_SUFFIX) # Search for tombstone entry fil = tombstones.filter("(&(objectClass=nstombstone)(uid=test_user_10))") assert fil # Try to delete tombstone entry for user in fil: user.delete() # Check if server is still alive assert m1.status() def test_adding_deleting(topo_m2, _delete_after): """Adding attribute with 11 values to entry :id: 99842b1e-a64c-11ea-b8e3-8c16451d917b :setup: MMR with 2 suppliers :steps: 1. Adding entry 2. Adding attribute with 11 values to entry 3. Removing 4 values from the attribute in the entry :expected results: 1. Should succeeds 2. Should succeeds 3. Should succeeds """ m1 = topo_m2.ms["supplier1"] # Adding entry user = UserAccounts(m1, DEFAULT_SUFFIX, rdn=None).create_test_user(uid=1, gid=1) # Adding attribute with 11 values to entry for val1, val2 in [('description', 'first description'), ('description', 'second description'), ('description', 'third description'), ('description', 'fourth description'), ('description', 'fifth description'), ('description', 'sixth description'), ('description', 'seventh description'), ('description', 'eighth description'), ('description', 'nineth description'), ('description', 'tenth description'), ('description', 'eleventh description')]: user.add(val1, val2) # Removing 4 values from the attribute in the entry for val1, val2 in [('description', 'first description'), ('description', 'second description'), ('description', 'third description'), ('description', 'fourth description')]: user.remove(val1, val2) def test_deleting_twice(topo_m2): """Deleting entry twice crashed a server :id: 94045560-a64c-11ea-93d6-8c16451d917b :setup: MMR with 2 suppliers :steps: 1. Adding entry 2. Deleting the same entry from s1 3. Deleting the same entry from s2 after some seconds :expected results: 1. Should succeeds 2. Should succeeds 3. Should succeeds """ m1 = topo_m2.ms["supplier1"] m2 = topo_m2.ms["supplier2"] # Adding entry user1 = UserAccounts(m1, DEFAULT_SUFFIX, rdn=None).create_test_user(uid=1, gid=1) repl_manager = ReplicationManager(DEFAULT_SUFFIX) repl_manager.wait_for_replication(m1, m2, timeout=100) user2 = UserAccount(m2, f'uid=test_user_1,{DEFAULT_SUFFIX}') assert user2.status() # Deleting the same entry from s1 user1.delete() repl_manager.wait_for_replication(m1, m2, timeout=100) # Deleting the same entry from s2 after some seconds with pytest.raises(ldap.NO_SUCH_OBJECT): user2.delete() assert m1.status() assert m2.status() def test_rename_entry(topo_m2, _delete_after): """Rename entry crashed a server :id: 3866f9d6-a946-11ea-a3f8-8c16451d917b :setup: MMR with 2 suppliers :steps: 1. Adding entry 2. Stop Agreement for both 3. Change description 4. Change will not reflect on other supplier 5. Turn on agreement on both 6. Change will reflect on other supplier :expected results: 1. Should succeeds 2. Should succeeds 3. Should succeeds 4. Should succeeds 5. Should succeeds 6. Should succeeds """ m1 = topo_m2.ms["supplier1"] m2 = topo_m2.ms["supplier2"] # Adding entry user1 = UserAccounts(m1, DEFAULT_SUFFIX, rdn=None).create_test_user(uid=1, gid=1) repl_manager = ReplicationManager(DEFAULT_SUFFIX) repl_manager.wait_for_replication(m1, m2, timeout=100) user2 = UserAccount(m2, user1.dn) assert user2.status() # Stop Agreement for both agree1 = Agreements(m1).list()[0] agree2 = Agreements(m2).list()[0] for agree in [agree1, agree2]: agree.pause() # change description user1.replace('description', 'New Des') assert user1.get_attr_val_utf8('description') # Change will not reflect on other supplier with pytest.raises(AssertionError): assert user2.get_attr_val_utf8('description') # Turn on agreement on both for agree in [agree1, agree2]: agree.resume() repl_manager.wait_for_replication(m1, m2, timeout=100) for instance in [user1, user2]: assert instance.get_attr_val_utf8('description') def test_userpassword_attribute(topo_m2, _delete_after): """Modifications of userpassword attribute in an MMR environment were successful however a error message was displayed in the error logs which was curious. :id: bdcf0464-a947-11ea-9f0d-8c16451d917b :setup: MMR with 2 suppliers :steps: 1. Add the test user to S1 2. Check that user's has been propogated to Supplier 2 3. modify user's userpassword attribute on supplier 2 4. check the error logs on suppler 1 to make sure the error message is not there :expected results: 1. Should succeeds 2. Should succeeds 3. Should succeeds 4. Should succeeds """ m1 = topo_m2.ms["supplier1"] m2 = topo_m2.ms["supplier2"] # Add the test user to S1 user1 = UserAccounts(m1, DEFAULT_SUFFIX, rdn=None).create_test_user(uid=1, gid=1) repl_manager = ReplicationManager(DEFAULT_SUFFIX) repl_manager.wait_for_replication(m1, m2, timeout=100) # Check that user's has been propogated to Supplier 2 user2 = UserAccount(m2, user1.dn) assert user2.status() # modify user's userpassword attribute on supplier 2 user2.replace('userpassword', 'fred1') repl_manager.wait_for_replication(m1, m2, timeout=100) assert user1.get_attr_val_utf8('userpassword') # check the error logs on suppler 1 to make sure the error message is not there assert not m1.searchErrorsLog("can\'t add a change for uid=") def _create_and_delete_tombstone(topo_m2, id): m1 = topo_m2.ms["supplier1"] # Add new user user1 = UserAccounts(m1, DEFAULT_SUFFIX, rdn=None).create_test_user(uid=id, gid=id) # Delete user - should leave tombstone entry user1.delete() tombstones = Tombstones(m1, DEFAULT_SUFFIX) # Search for tombstone entry fil = tombstones.filter("(&(objectClass=nstombstone)(uid=test_user_{}*))".format(id))[0] assert fil fil.rename("uid=engineer") assert m1 def test_tombstone_modrdn(topo_m2): """rhds90 crash on tombstone modrdn :id: 846f5042-a948-11ea-ade2-8c16451d917b :setup: MMR with 2 suppliers :steps: 1. Add new user 2. Delete user - should leave tombstone entry 3. Search for tombstone entry 4. Try to modrdn with deleteoldrdn :expected results: 1. Should succeeds 2. Should succeeds 3. Should succeeds 4. Should succeeds """ for id_id in [11, 12, 13, 14]: _create_and_delete_tombstone(topo_m2, id_id) if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/replication/single_master_test.py000066400000000000000000000133451421664411400311340ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2017 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import pytest from lib389.tasks import * from lib389.utils import * from lib389.idm.user import UserAccounts, TEST_USER_PROPERTIES from lib389.replica import ReplicationManager, Replicas from lib389.backend import Backends from lib389.topologies import topology_m1c1 as topo_r # Replication from lib389.topologies import topology_i2 as topo_nr # No replication from lib389._constants import (ReplicaRole, DEFAULT_SUFFIX, REPLICAID_SUPPLIER_1, REPLICATION_BIND_DN, REPLICATION_BIND_PW, REPLICATION_BIND_METHOD, REPLICATION_TRANSPORT, DEFAULT_BACKUPDIR, RA_NAME, RA_BINDDN, RA_BINDPW, RA_METHOD, RA_TRANSPORT_PROT, defaultProperties) import json pytestmark = pytest.mark.tier1 DEBUGGING = os.getenv("DEBUGGING", default=False) if DEBUGGING: logging.getLogger(__name__).setLevel(logging.DEBUG) else: logging.getLogger(__name__).setLevel(logging.INFO) log = logging.getLogger(__name__) def test_mail_attr_repl(topo_r): """Check that no crash happens during mail attribute replication :id: 959edc84-05be-4bf9-a541-53afae482052 :customerscenario: True :setup: Replication setup with supplier and consumer instances, test user on supplier :steps: 1. Check that user was replicated to consumer 2. Back up mail database file 3. Remove mail attribute from the user entry 4. Restore mail database 5. Search for the entry with a substring 'mail=user*' 6. Search for the entry once again to make sure that server is alive :expectedresults: 1. The user should be replicated to consumer 2. Operation should be successful 3. The mail attribute should be removed 4. Operation should be successful 5. Search should be successful 6. No crash should happen """ supplier = topo_r.ms["supplier1"] consumer = topo_r.cs["consumer1"] repl = ReplicationManager(DEFAULT_SUFFIX) m_users = UserAccounts(topo_r.ms["supplier1"], DEFAULT_SUFFIX) m_user = m_users.ensure_state(properties=TEST_USER_PROPERTIES) m_user.ensure_present('mail', 'testuser@redhat.com') log.info("Check that replication is working") repl.wait_for_replication(supplier, consumer) c_users = UserAccounts(topo_r.cs["consumer1"], DEFAULT_SUFFIX) c_user = c_users.get('testuser') c_bes = Backends(consumer) c_be = c_bes.get(DEFAULT_SUFFIX) db_dir = c_be.get_attr_val_utf8('nsslapd-directory') mail_db = list(filter(lambda fl: fl.startswith("mail"), os.listdir(db_dir))) assert mail_db, "mail.* wasn't found in {}" mail_db_path = os.path.join(db_dir, mail_db[0]) backup_path = os.path.join(DEFAULT_BACKUPDIR, mail_db[0]) consumer.stop() log.info("Back up {} to {}".format(mail_db_path, backup_path)) shutil.copyfile(mail_db_path, backup_path) consumer.start() log.info("Remove 'mail' attr from supplier") m_user.remove_all('mail') log.info("Wait for the replication to happen") repl.wait_for_replication(supplier, consumer) consumer.stop() log.info("Restore {} to {}".format(backup_path, mail_db_path)) shutil.copyfile(backup_path, mail_db_path) consumer.start() log.info("Make a search for mail attribute in attempt to crash server") c_user.get_attr_val("mail") log.info("Make sure that server hasn't crashed") repl.test_replication(supplier, consumer) def test_lastupdate_attr_before_init(topo_nr): """Check that LastUpdate replica attributes show right values :id: bc8ce431-ff65-41f5-9331-605cbcaaa887 :customerscenario: True :setup: Replication setup with supplier and consumer instances without initialization :steps: 1. Check nsds5replicaLastUpdateStart value 2. Check nsds5replicaLastUpdateEnd value 3. Check nsds5replicaLastUpdateStatus value 4. Check nsds5replicaLastUpdateStatusJSON is parsable :expectedresults: 1. nsds5replicaLastUpdateStart should be equal to 0 2. nsds5replicaLastUpdateEnd should be equal to 0 3. nsds5replicaLastUpdateStatus should not be equal to "Replica acquired successfully: Incremental update started" 4. Success """ supplier = topo_nr.ins["standalone1"] consumer = topo_nr.ins["standalone2"] repl = ReplicationManager(DEFAULT_SUFFIX) repl.create_first_supplier(supplier) # Manually create an un-synced consumer. consumer_replicas = Replicas(consumer) consumer_replicas.create(properties={ 'cn': 'replica', 'nsDS5ReplicaRoot': DEFAULT_SUFFIX, 'nsDS5ReplicaId': '65535', 'nsDS5Flags': '0', 'nsDS5ReplicaType': '2', }) agmt = repl.ensure_agreement(supplier, consumer) with pytest.raises(Exception): repl.wait_for_replication(supplier, consumer, timeout=5) assert agmt.get_attr_val_utf8('nsds5replicaLastUpdateStart') == "19700101000000Z" assert agmt.get_attr_val_utf8("nsds5replicaLastUpdateEnd") == "19700101000000Z" assert "replica acquired successfully" not in agmt.get_attr_val_utf8_l("nsds5replicaLastUpdateStatus") # make sure the JSON attribute is parsable json_status = agmt.get_attr_val_utf8("nsds5replicaLastUpdateStatusJSON") if json_status is not None: json_obj = json.loads(json_status) log.debug("JSON status message: {}".format(json_obj)) if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/replication/tls_client_auth_repl_test.py000066400000000000000000000137261421664411400325060ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2017 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import logging import os import pytest from lib389.utils import ds_is_older from lib389.idm.services import ServiceAccounts from lib389.config import CertmapLegacy from lib389._constants import DEFAULT_SUFFIX from lib389.replica import ReplicationManager, Replicas from lib389.topologies import topology_m2 as topo_m2 pytestmark = pytest.mark.tier1 DEBUGGING = os.getenv("DEBUGGING", default=False) if DEBUGGING: logging.getLogger(__name__).setLevel(logging.DEBUG) else: logging.getLogger(__name__).setLevel(logging.INFO) log = logging.getLogger(__name__) @pytest.fixture(scope="module") def tls_client_auth(topo_m2): """Enable TLS on both suppliers and reconfigure both agreements to use TLS Client auth """ m1 = topo_m2.ms['supplier1'] m2 = topo_m2.ms['supplier2'] if ds_is_older('1.4.0.6'): transport = 'SSL' else: transport = 'LDAPS' # Create the certmap before we restart for enable_tls cm_m1 = CertmapLegacy(m1) cm_m2 = CertmapLegacy(m2) # We need to configure the same maps for both .... certmaps = cm_m1.list() certmaps['default']['DNComps'] = None certmaps['default']['CmapLdapAttr'] = 'nsCertSubjectDN' cm_m1.set(certmaps) cm_m2.set(certmaps) [i.enable_tls() for i in topo_m2] # Create the replication dns services = ServiceAccounts(m1, DEFAULT_SUFFIX) repl_m1 = services.get('%s:%s' % (m1.host, m1.sslport)) repl_m1.set('nsCertSubjectDN', m1.get_server_tls_subject()) repl_m2 = services.get('%s:%s' % (m2.host, m2.sslport)) repl_m2.set('nsCertSubjectDN', m2.get_server_tls_subject()) # Check the replication is "done". repl = ReplicationManager(DEFAULT_SUFFIX) repl.wait_for_replication(m1, m2) # Now change the auth type replica_m1 = Replicas(m1).get(DEFAULT_SUFFIX) agmt_m1 = replica_m1.get_agreements().list()[0] agmt_m1.replace_many( ('nsDS5ReplicaBindMethod', 'SSLCLIENTAUTH'), ('nsDS5ReplicaTransportInfo', transport), ('nsDS5ReplicaPort', str(m2.sslport)), ) agmt_m1.remove_all('nsDS5ReplicaBindDN') replica_m2 = Replicas(m2).get(DEFAULT_SUFFIX) agmt_m2 = replica_m2.get_agreements().list()[0] agmt_m2.replace_many( ('nsDS5ReplicaBindMethod', 'SSLCLIENTAUTH'), ('nsDS5ReplicaTransportInfo', transport), ('nsDS5ReplicaPort', str(m1.sslport)), ) agmt_m2.remove_all('nsDS5ReplicaBindDN') repl.test_replication_topology(topo_m2) return topo_m2 def test_ssl_transport(tls_client_auth): """Test different combinations for nsDS5ReplicaTransportInfo values :id: 922d16f8-662a-4915-a39e-0aecd7c8e6e2 :setup: Two supplier replication, enabled TLS client auth :steps: 1. Set nsDS5ReplicaTransportInfoCheck: SSL or StartTLS or TLS 2. Restart the instance 3. Check that replication works 4. Set nsDS5ReplicaTransportInfoCheck: LDAPS back :expectedresults: 1. Success 2. Success 3. Replication works 4. Success """ m1 = tls_client_auth.ms['supplier1'] m2 = tls_client_auth.ms['supplier2'] repl = ReplicationManager(DEFAULT_SUFFIX) replica_m1 = Replicas(m1).get(DEFAULT_SUFFIX) replica_m2 = Replicas(m2).get(DEFAULT_SUFFIX) agmt_m1 = replica_m1.get_agreements().list()[0] agmt_m2 = replica_m2.get_agreements().list()[0] if ds_is_older('1.4.0.6'): check_list = (('TLS', False),) else: check_list = (('SSL', True), ('StartTLS', False), ('TLS', False)) for transport, secure_port in check_list: agmt_m1.replace_many(('nsDS5ReplicaTransportInfo', transport), ('nsDS5ReplicaPort', '{}'.format(m2.port if not secure_port else m2.sslport))) agmt_m2.replace_many(('nsDS5ReplicaTransportInfo', transport), ('nsDS5ReplicaPort', '{}'.format(m1.port if not secure_port else m1.sslport))) repl.test_replication_topology(tls_client_auth) if ds_is_older('1.4.0.6'): agmt_m1.replace_many(('nsDS5ReplicaTransportInfo', 'SSL'), ('nsDS5ReplicaPort', str(m2.sslport))) agmt_m2.replace_many(('nsDS5ReplicaTransportInfo', 'SSL'), ('nsDS5ReplicaPort', str(m1.sslport))) else: agmt_m1.replace_many(('nsDS5ReplicaTransportInfo', 'LDAPS'), ('nsDS5ReplicaPort', str(m2.sslport))) agmt_m2.replace_many(('nsDS5ReplicaTransportInfo', 'LDAPS'), ('nsDS5ReplicaPort', str(m1.sslport))) repl.test_replication_topology(tls_client_auth) def test_extract_pemfiles(tls_client_auth): """Test TLS client authentication between two suppliers operates as expected with 'on' and 'off' options of nsslapd-extract-pemfiles :id: 922d16f8-662a-4915-a39e-0aecd7c8e6e1 :setup: Two supplier replication, enabled TLS client auth :steps: 1. Check that nsslapd-extract-pemfiles default value is right 2. Check that replication works with both 'on' and 'off' values :expectedresults: 1. Success 2. Replication works """ m1 = tls_client_auth.ms['supplier1'] m2 = tls_client_auth.ms['supplier2'] repl = ReplicationManager(DEFAULT_SUFFIX) if ds_is_older('1.3.7'): default_val = 'off' else: default_val = 'on' attr_val = m1.config.get_attr_val_utf8('nsslapd-extract-pemfiles') log.info("Check that nsslapd-extract-pemfiles is {}".format(default_val)) assert attr_val == default_val for extract_pemfiles in ('on', 'off'): log.info("Set nsslapd-extract-pemfiles = '{}' and check replication works)") m1.config.set('nsslapd-extract-pemfiles', extract_pemfiles) m2.config.set('nsslapd-extract-pemfiles', extract_pemfiles) repl.test_replication_topology(tls_client_auth) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/replication/tombstone_fixup_test.py000066400000000000000000000075741421664411400315340ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2017 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import pytest from lib389.tasks import * from lib389.utils import * from lib389.topologies import topology_m1 from lib389.tombstone import Tombstones from lib389.idm.user import UserAccounts, TEST_USER_PROPERTIES from lib389.replica import ReplicationManager from lib389._constants import (defaultProperties, DEFAULT_SUFFIX, ReplicaRole, REPLICAID_SUPPLIER_1, REPLICA_PRECISE_PURGING, REPLICA_PURGE_DELAY, REPLICA_PURGE_INTERVAL) pytestmark = pytest.mark.tier2 def test_precise_tombstone_purging(topology_m1): """ Test precise tombstone purging :id: adb86f50-ae76-4ed6-82b4-3cdc30ccab79 :setup: supplier1 instance :steps: 1. Create and Delete entry to create a tombstone 2. export ldif, edit, and import ldif 3. Check tombstones do not contain nsTombstoneCSN 4. Run fixup task, and verify tombstones now have nsTombstone CSN 5. Configure tombstone purging 6. Verify tombstones are purged :expectedresults: 1. Success 2. Success 3. Success 4. Success 5. Success 6. Success """ m1 = topology_m1.ms['supplier1'] m1_tasks = Tasks(m1) # Create tombstone entry users = UserAccounts(m1, DEFAULT_SUFFIX) user = users.create_test_user(uid=1001) user.delete() # Verify tombstone was created tombstones = Tombstones(m1, DEFAULT_SUFFIX) assert len(tombstones.list()) == 1 # Export db, strip nsTombstoneCSN, and import it ldif_file = "{}/export.ldif".format(m1.get_ldif_dir()) args = {EXPORT_REPL_INFO: True, TASK_WAIT: True} m1_tasks.exportLDIF(DEFAULT_SUFFIX, None, ldif_file, args) time.sleep(.5) # Strip LDIF of nsTombstoneCSN, getthe LDIF lines, the n create new ldif ldif = open(ldif_file, "r") lines = ldif.readlines() ldif.close() time.sleep(.5) ldif = open(ldif_file, "w") for line in lines: if not line.lower().startswith('nstombstonecsn'): ldif.write(line) ldif.close() time.sleep(.5) # import the new ldif file log.info('Import replication LDIF file...') args = {TASK_WAIT: True} m1_tasks.importLDIF(DEFAULT_SUFFIX, None, ldif_file, args) time.sleep(.5) # Search for the tombstone again tombstones = Tombstones(m1, DEFAULT_SUFFIX) assert len(tombstones.list()) == 1 # # Part 3 - test fixup task using the strip option. # args = {TASK_WAIT: True, TASK_TOMB_STRIP: True} m1_tasks.fixupTombstones(DEFAULT_BENAME, args) time.sleep(.5) # Search for tombstones with nsTombstoneCSN - better not find any for ts in tombstones.list(): assert not ts.present("nsTombstoneCSN") # Now run the fixup task args = {TASK_WAIT: True} m1_tasks.fixupTombstones(DEFAULT_BENAME, args) time.sleep(.5) # Search for tombstones with nsTombstoneCSN - better find some tombstones = Tombstones(m1, DEFAULT_SUFFIX) assert len(tombstones.list()) == 1 # # Part 4 - Test tombstone purging # args = {REPLICA_PRECISE_PURGING: b'on', REPLICA_PURGE_DELAY: b'5', REPLICA_PURGE_INTERVAL: b'5'} m1.replica.setProperties(DEFAULT_SUFFIX, None, None, args) # Wait for the interval to pass log.info('Wait for tombstone purge interval to pass...') time.sleep(6) # Add an entry to trigger replication users.create_test_user(uid=1002) # Wait for the interval to pass again log.info('Wait for tombstone purge interval to pass again...') time.sleep(6) # search for tombstones, there should be none tombstones = Tombstones(m1, DEFAULT_SUFFIX) assert len(tombstones.list()) == 0 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/replication/tombstone_test.py000066400000000000000000000033311421664411400303040ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2017 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import pytest from lib389.tasks import * from lib389.utils import * from lib389.topologies import topology_m1 from lib389.tombstone import Tombstones from lib389.idm.user import UserAccounts, TEST_USER_PROPERTIES pytestmark = pytest.mark.tier1 def test_purge_success(topology_m1): """Verify that tombstones are created successfully :id: adb86f50-ae76-4ed6-82b4-3cdc30ccab78 :setup: Standalone instance :steps: 1. Enable replication to unexisting instance 2. Add an entry to the replicated suffix 3. Delete the entry 4. Check that tombstone entry exists (objectclass=nsTombstone) :expectedresults: Tombstone entry exist 1. Operation should be successful 2. The entry should be successfully added 3. The entry should be successfully deleted 4. Tombstone entry should exist """ m1 = topology_m1.ms['supplier1'] users = UserAccounts(m1, DEFAULT_SUFFIX) user = users.create(properties=TEST_USER_PROPERTIES) tombstones = Tombstones(m1, DEFAULT_SUFFIX) assert len(tombstones.list()) == 0 user.delete() assert len(tombstones.list()) == 1 assert len(users.list()) == 0 ts = tombstones.get('testuser') assert ts.exists() if not ds_is_older('1.4.0'): ts.revive() assert len(users.list()) == 1 user_revived = users.get('testuser') if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/replication/wait_for_async_feature_test.py000066400000000000000000000156711421664411400330260ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2016 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # from collections import Counter import pytest from lib389.tasks import * from lib389.utils import * from lib389.topologies import topology_m2 from lib389._constants import SUFFIX, DEFAULT_SUFFIX, ErrorLog from lib389.agreement import Agreements from lib389.idm.organizationalunit import OrganizationalUnits pytestmark = pytest.mark.tier1 logging.getLogger(__name__).setLevel(logging.DEBUG) log = logging.getLogger(__name__) installation1_prefix = None @pytest.fixture(params=[(None, (4, 11)), ('2000', (0, 2)), ('0', (4, 11)), ('-5', (4, 11))]) def waitfor_async_attr(topology_m2, request): """Sets attribute on all replicas""" attr_value = request.param[0] expected_result = request.param[1] # Run through all suppliers for supplier in topology_m2.ms.values(): agmt = Agreements(supplier).list()[0] if attr_value: agmt.set_wait_for_async_results(attr_value) else: try: # Sometimes we can double remove this. agmt.remove_wait_for_async_results() except ldap.NO_SUCH_ATTRIBUTE: pass return (attr_value, expected_result) @pytest.fixture def entries(topology_m2, request): """Adds entries to the supplier1""" supplier1 = topology_m2.ms["supplier1"] test_list = [] log.info("Add 100 nested entries under replicated suffix on %s" % supplier1.serverid) ous = OrganizationalUnits(supplier1, DEFAULT_SUFFIX) for i in range(100): ou = ous.create(properties={ 'ou' : 'test_ou_%s' % i, }) test_list.append(ou) log.info("Delete created entries") for test_ou in test_list: test_ou.delete() def fin(): log.info("Clear the errors log in the end of the test case") with open(supplier1.errlog, 'w') as errlog: errlog.writelines("") request.addfinalizer(fin) def test_not_int_value(topology_m2): """Tests not integer value :id: 67c9994f-9251-425a-8197-8d12ad9beafc :setup: Replication with two suppliers :steps: 1. Try to set some string value to nsDS5ReplicaWaitForAsyncResults :expectedresults: 1. Invalid syntax error should be raised """ supplier1 = topology_m2.ms["supplier1"] agmt = Agreements(supplier1).list()[0] with pytest.raises(ldap.INVALID_SYNTAX): agmt.set_wait_for_async_results("ws2") def test_multi_value(topology_m2): """Tests multi value :id: 1932301a-db29-407e-b27e-4466a876d1d3 :setup: Replication with two suppliers :steps: 1. Set nsDS5ReplicaWaitForAsyncResults to some int 2. Try to add one more int value to nsDS5ReplicaWaitForAsyncResults :expectedresults: 1. nsDS5ReplicaWaitForAsyncResults should be set 2. Object class violation error should be raised """ supplier1 = topology_m2.ms["supplier1"] agmt = Agreements(supplier1).list()[0] agmt.set_wait_for_async_results('100') with pytest.raises(ldap.OBJECT_CLASS_VIOLATION): agmt.add('nsDS5ReplicaWaitForAsyncResults', '101') def test_value_check(topology_m2, waitfor_async_attr): """Checks that value has been set correctly :id: 3e81afe9-5130-410d-a1bb-d798d8ab8519 :parametrized: yes :setup: Replication with two suppliers, wait for async set on all suppliers, try: None, '2000', '0', '-5' :steps: 1. Search for nsDS5ReplicaWaitForAsyncResults on supplier 1 2. Search for nsDS5ReplicaWaitForAsyncResults on supplier 2 :expectedresults: 1. nsDS5ReplicaWaitForAsyncResults should be set correctly 2. nsDS5ReplicaWaitForAsyncResults should be set correctly """ attr_value = waitfor_async_attr[0] for supplier in topology_m2.ms.values(): agmt = Agreements(supplier).list()[0] server_value = agmt.get_wait_for_async_results_utf8() assert server_value == attr_value def test_behavior_with_value(topology_m2, waitfor_async_attr, entries): """Tests replication behavior with valid nsDS5ReplicaWaitForAsyncResults attribute values :id: 117b6be2-cdab-422e-b0c7-3b88bbeec036 :parametrized: yes :setup: Replication with two suppliers, wait for async set on all suppliers, try: None, '2000', '0', '-5' :steps: 1. Set Replication Debugging loglevel for the errorlog 2. Set nsslapd-logging-hr-timestamps-enabled to 'off' on both suppliers 3. Gather all sync attempts, group by timestamp 4. Take the most common timestamp and assert it has appeared in the set range :expectedresults: 1. Replication Debugging loglevel should be set 2. nsslapd-logging-hr-timestamps-enabled should be set 3. Operation should be successful 4. Errors log should have all timestamp appear """ supplier1 = topology_m2.ms["supplier1"] supplier2 = topology_m2.ms["supplier2"] log.info("Set Replication Debugging loglevel for the errorlog") supplier1.config.loglevel((ErrorLog.REPLICA,)) supplier2.config.loglevel((ErrorLog.REPLICA,)) sync_dict = Counter() min_ap = waitfor_async_attr[1][0] max_ap = waitfor_async_attr[1][1] time.sleep(20) log.info("Gather all sync attempts within Counter dict, group by timestamp") with open(supplier1.errlog, 'r') as errlog: errlog_filtered = filter(lambda x: "waitfor_async_results" in x, errlog) # Watch only over unsuccessful sync attempts for line in errlog_filtered: if line.split()[3] != line.split()[4]: # A timestamp looks like: # [03/Jan/2018:14:35:15.806396035 +1000] LOGMESSAGE HERE # We want to assert a range of "seconds", so we need to reduce # this to a reasonable amount. IE: # [03/Jan/2018:14:35:15 # So to achieve this we split on ] and . IE. # [03/Jan/2018:14:35:15.806396035 +1000] LOGMESSAGE HERE # ^ split here first # ^ now split here # [03/Jan/2018:14:35:15 # ^ final result timestamp = line.split(']')[0].split('.')[0] sync_dict[timestamp] += 1 log.info("Take the most common timestamp and assert it has appeared " \ "in the range from %s to %s times" % (min_ap, max_ap)) most_common_val = sync_dict.most_common(1)[0][1] log.debug("%s <= %s <= %s" % (min_ap, most_common_val, max_ap)) assert min_ap <= most_common_val <= max_ap if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/resource_limits/000077500000000000000000000000001421664411400255605ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/resource_limits/__init__.py000066400000000000000000000000661421664411400276730ustar00rootroot00000000000000""" :Requirement: 389-ds-base: Resource Limits """ 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/resource_limits/fdlimits_test.py000066400000000000000000000050471421664411400310120ustar00rootroot00000000000000import logging import pytest import os import ldap import resource from lib389._constants import * from lib389.topologies import topology_st from lib389.utils import ds_is_older, ensure_str from subprocess import check_output pytestmark = pytest.mark.tier1 logging.getLogger(__name__).setLevel(logging.INFO) log = logging.getLogger(__name__) FD_ATTR = "nsslapd-maxdescriptors" GLOBAL_LIMIT = resource.getrlimit(resource.RLIMIT_NOFILE)[1] SYSTEMD_LIMIT = ensure_str(check_output("systemctl show -p LimitNOFILE dirsrv@standalone1".split(" ")).strip()).split('=')[1] CUSTOM_VAL = str(int(SYSTEMD_LIMIT) - 10) TOO_HIGH_VAL = str(GLOBAL_LIMIT * 2) TOO_HIGH_VAL2 = str(int(SYSTEMD_LIMIT) * 2) TOO_LOW_VAL = "0" @pytest.mark.skipif(ds_is_older("1.4.1.2"), reason="Not implemented") def test_fd_limits(topology_st): """Test the default limits, and custom limits :id: fa0a5106-612f-428f-84c0-9c85c34d0433 :setup: Standalone Instance :steps: 1. Check default limit 2. Change default limit 3. Check invalid/too high limits are rejected 4. Check invalid/too low limit is rejected :expectedresults: 1. Success 2. Success 3. Success 4. Success """ # Check systemd default max_fd = topology_st.standalone.config.get_attr_val_utf8(FD_ATTR) assert max_fd == SYSTEMD_LIMIT # Check custom value is applied topology_st.standalone.config.set(FD_ATTR, CUSTOM_VAL) max_fd = topology_st.standalone.config.get_attr_val_utf8(FD_ATTR) assert max_fd == CUSTOM_VAL # # Attempt to use value that is higher than the global system limit with pytest.raises(ldap.UNWILLING_TO_PERFORM): topology_st.standalone.config.set(FD_ATTR, TOO_HIGH_VAL) max_fd = topology_st.standalone.config.get_attr_val_utf8(FD_ATTR) assert max_fd == CUSTOM_VAL # Attempt to use value that is higher than the value defined in the systemd service with pytest.raises(ldap.UNWILLING_TO_PERFORM): topology_st.standalone.config.set(FD_ATTR, TOO_HIGH_VAL2) max_fd = topology_st.standalone.config.get_attr_val_utf8(FD_ATTR) assert max_fd == CUSTOM_VAL # Attempt to use val that is too low with pytest.raises(ldap.OPERATIONS_ERROR): topology_st.standalone.config.set(FD_ATTR, TOO_LOW_VAL) max_fd = topology_st.standalone.config.get_attr_val_utf8(FD_ATTR) assert max_fd == CUSTOM_VAL log.info("Test PASSED") if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main(["-s", CURRENT_FILE]) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/retrocl/000077500000000000000000000000001421664411400240225ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/retrocl/__init__.py000066400000000000000000000000751421664411400261350ustar00rootroot00000000000000""" :Requirement: 389-ds-base: Retro Changelog plugin """ 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/retrocl/basic_test.py000066400000000000000000000244551421664411400265260ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2021 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- import logging import ldap import pytest from lib389.topologies import topology_st from lib389.plugins import RetroChangelogPlugin from lib389._constants import * from lib389.utils import * from lib389.tasks import * from lib389.cli_base import FakeArgs, connect_instance, disconnect_instance from lib389.cli_base.dsrc import dsrc_arg_concat from lib389.cli_conf.plugins.retrochangelog import retrochangelog_add from lib389.idm.user import UserAccount, UserAccounts from lib389.idm.domain import Domain from lib389._mapped_object import DSLdapObjects pytestmark = pytest.mark.tier1 USER1_DN = 'uid=user1,ou=people,'+ DEFAULT_SUFFIX USER2_DN = 'uid=user2,ou=people,'+ DEFAULT_SUFFIX USER_PW = 'password' ATTR_HOMEPHONE = 'homePhone' ATTR_CARLICENSE = 'carLicense' log = logging.getLogger(__name__) #unstable or unstatus tests, skipped for now @pytest.mark.flaky(max_runs=2, min_passes=1) def test_retrocl_exclude_attr_add(topology_st): """ Test exclude attribute feature of the retrocl plugin for add operation :id: 3481650f-2070-45ef-9600-2500cfc51559 :setup: Standalone instance :steps: 1. Enable dynamic plugins 2. Confige retro changelog plugin 3. Add an entry 4. Ensure entry attrs are in the changelog 5. Exclude an attr 6. Add another entry 7. Ensure excluded attr is not in the changelog :expectedresults: 1. Success 2. Success 3. Success 4. Success 5. Success 6. Success 7. Success """ st = topology_st.standalone log.info('Configure retrocl plugin') rcl = RetroChangelogPlugin(st) rcl.disable() rcl.enable() rcl.replace('nsslapd-attribute', 'nsuniqueid:targetUniqueId') log.info('Restarting instance') try: st.restart() except ldap.LDAPError as e: ldap.error('Failed to restart instance ' + e.args[0]['desc']) assert False users = UserAccounts(st, DEFAULT_SUFFIX) log.info('Adding user1') try: users.create(properties={ 'sn': '1', 'cn': 'user 1', 'uid': 'user1', 'uidNumber': '11', 'gidNumber': '111', 'givenname': 'user1', 'homePhone': '0861234567', 'carLicense': '131D16674', 'mail': 'user1@whereever.com', 'homeDirectory': '/home/user1', 'userpassword': USER_PW}) except ldap.ALREADY_EXISTS: pass except ldap.LDAPError as e: log.error("Failed to add user1: " + str(e)) log.info('Verify homePhone and carLicense attrs are in the changelog changestring') try: retro_changelog_suffix = DSLdapObjects(st, basedn=RETROCL_SUFFIX) cllist = retro_changelog_suffix.filter(f'(targetDn={USER1_DN})') except ldap.LDAPError as e: log.fatal("Changelog search failed, error: " + str(e)) assert False assert len(cllist) > 0 if cllist[0].present('changes'): clstr = str(cllist[0].get_attr_vals_utf8('changes')) assert ATTR_HOMEPHONE in clstr assert ATTR_CARLICENSE in clstr log.info('Excluding attribute ' + ATTR_HOMEPHONE) args = FakeArgs() args.connections = [st.host + ':' + str(st.port) + ':' + DN_DM + ':' + PW_DM] args.instance = 'standalone1' args.basedn = None args.binddn = None args.starttls = False args.pwdfile = None args.bindpw = None args.prompt = False args.exclude_attrs = ATTR_HOMEPHONE args.func = retrochangelog_add dsrc_inst = dsrc_arg_concat(args, None) inst = connect_instance(dsrc_inst, False, args) result = args.func(inst, None, log, args) disconnect_instance(inst) assert result is None log.info('Restarting instance') try: st.restart() except ldap.LDAPError as e: ldap.error('Failed to restart instance ' + e.args[0]['desc']) assert False log.info('Adding user2') try: users.create(properties={ 'sn': '2', 'cn': 'user 2', 'uid': 'user2', 'uidNumber': '22', 'gidNumber': '222', 'givenname': 'user2', 'homePhone': '0879088363', 'carLicense': '04WX11038', 'mail': 'user2@whereever.com', 'homeDirectory': '/home/user2', 'userpassword': USER_PW}) except ldap.ALREADY_EXISTS: pass except ldap.LDAPError as e: log.error("Failed to add user2: " + str(e)) log.info('Verify homePhone attr is not in the changelog changestring') try: cllist = retro_changelog_suffix.filter(f'(targetDn={USER2_DN})') assert len(cllist) > 0 if cllist[0].present('changes'): clstr = str(cllist[0].get_attr_vals_utf8('changes')) assert ATTR_HOMEPHONE not in clstr assert ATTR_CARLICENSE in clstr except ldap.LDAPError as e: log.fatal("Changelog search failed, error: " + str(e)) assert False #unstable or unstatus tests, skipped for now @pytest.mark.flaky(max_runs=2, min_passes=1) def test_retrocl_exclude_attr_mod(topology_st): """ Test exclude attribute feature of the retrocl plugin for mod operation :id: f6bef689-685b-4f86-a98d-f7e6b1fcada3 :setup: Standalone instance :steps: 1. Enable dynamic plugins 2. Confige retro changelog plugin 3. Add user1 entry 4. Ensure entry attrs are in the changelog 5. Exclude an attr 6. Modify user1 entry 7. Ensure excluded attr is not in the changelog :expectedresults: 1. Success 2. Success 3. Success 4. Success 5. Success 6. Success 7. Success """ st = topology_st.standalone log.info('Configure retrocl plugin') rcl = RetroChangelogPlugin(st) rcl.disable() rcl.enable() rcl.replace('nsslapd-attribute', 'nsuniqueid:targetUniqueId') log.info('Restarting instance') try: st.restart() except ldap.LDAPError as e: ldap.error('Failed to restart instance ' + e.args[0]['desc']) assert False users = UserAccounts(st, DEFAULT_SUFFIX) log.info('Adding user1') try: user1 = users.create(properties={ 'sn': '1', 'cn': 'user 1', 'uid': 'user1', 'uidNumber': '11', 'gidNumber': '111', 'givenname': 'user1', 'homePhone': '0861234567', 'carLicense': '131D16674', 'mail': 'user1@whereever.com', 'homeDirectory': '/home/user1', 'userpassword': USER_PW}) except ldap.ALREADY_EXISTS: user1 = UserAccount(st, dn=USER1_DN) except ldap.LDAPError as e: log.error("Failed to add user1: " + str(e)) log.info('Verify homePhone and carLicense attrs are in the changelog changestring') try: retro_changelog_suffix = DSLdapObjects(st, basedn=RETROCL_SUFFIX) cllist = retro_changelog_suffix.filter(f'(targetDn={USER1_DN})') except ldap.LDAPError as e: log.fatal("Changelog search failed, error: " + str(e)) assert False assert len(cllist) > 0 if cllist[0].present('changes'): clstr = str(cllist[0].get_attr_vals_utf8('changes')) assert ATTR_HOMEPHONE in clstr assert ATTR_CARLICENSE in clstr log.info('Excluding attribute ' + ATTR_CARLICENSE) args = FakeArgs() args.connections = [st.host + ':' + str(st.port) + ':' + DN_DM + ':' + PW_DM] args.instance = 'standalone1' args.basedn = None args.binddn = None args.starttls = False args.pwdfile = None args.bindpw = None args.prompt = False args.exclude_attrs = ATTR_CARLICENSE args.func = retrochangelog_add dsrc_inst = dsrc_arg_concat(args, None) inst = connect_instance(dsrc_inst, False, args) result = args.func(inst, None, log, args) disconnect_instance(inst) assert result is None log.info('Restarting instance') try: st.restart() except ldap.LDAPError as e: ldap.error('Failed to restart instance ' + e.args[0]['desc']) assert False log.info('Modify user1 carLicense attribute') try: user1.replace(ATTR_CARLICENSE, "123WX321") except ldap.LDAPError as e: log.fatal('test_retrocl_exclude_attr_mod: Failed to update user1 attribute: error ' + e.message['desc']) assert False log.info('Verify carLicense attr is not in the changelog changestring') try: cllist = retro_changelog_suffix.filter(f'(targetDn={USER1_DN})') assert len(cllist) > 0 # There will be 2 entries in the changelog for this user, we are only #interested in the second one, the modify operation. if cllist[1].present('changes'): clstr = str(cllist[1].get_attr_vals_utf8('changes')) assert ATTR_CARLICENSE not in clstr except ldap.LDAPError as e: log.fatal("Changelog search failed, error: " + str(e)) assert False def test_retrocl_trimming(topology_st): """Test retrocl trimming works :id: 54c6747f-6772-43b7-8b03-09e13fa0c205 :setup: Standalone Instance :steps: 1. Enable Retro changelog 2. Add a bunch of entries 3. Configure trimming 4. Verify trimming occurred :expectedresults: 1. Success 2. Success 3. Success 4. Success """ inst = topology_st.standalone # Configure plugin log.info('Configure retrocl plugin') rcl = RetroChangelogPlugin(inst) rcl.enable() inst.restart() # Do some updates suffix = Domain(inst, DEFAULT_SUFFIX) for idx in range(0, 10): suffix.replace('description', str(idx)) # Setup trimming rcl.replace('nsslapd-changelog-trim-interval', '2') rcl.replace('nsslapd-changelogmaxage', '5s') inst.config.set('nsslapd-errorlog-level', '65536') # plugin logging inst.restart() # Verify trimming occurs time.sleep(5) assert inst.searchErrorsLog("trim_changelog: removed ") # Clean up inst.config.set('nsslapd-errorlog-level', '0') if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/retrocl/retrocl_indexing_test.py000066400000000000000000000043741421664411400310020ustar00rootroot00000000000000import logging import pytest import os from lib389._constants import RETROCL_SUFFIX, DEFAULT_SUFFIX from lib389.topologies import topology_st as topo from lib389.plugins import RetroChangelogPlugin from lib389.idm.user import UserAccounts from lib389._mapped_object import DSLdapObjects log = logging.getLogger(__name__) def test_indexing_is_online(topo): """Test that the changenmumber index is online right after enabling the plugin :id: 16f4c001-9e0c-4448-a2b3-08ac1e85d40f :setup: Standalone Instance :steps: 1. Enable retro cl 2. Perform some updates 3. Search for "(changenumber>=-1)", and it is not partially unindexed 4. Search for "(&(changenumber>=-1)(targetuniqueid=*))", and it is not partially unindexed :expectedresults: 1. Success 2. Success 3. Success 4. Success """ # Enable plugin topo.standalone.config.set('nsslapd-accesslog-logbuffering', 'off') plugin = RetroChangelogPlugin(topo.standalone) plugin.enable() topo.standalone.restart() # Do a bunch of updates users = UserAccounts(topo.standalone, DEFAULT_SUFFIX) user_entry = users.create(properties={ 'sn': '1', 'cn': 'user 1', 'uid': 'user1', 'uidNumber': '11', 'gidNumber': '111', 'givenname': 'user1', 'homePhone': '0861234567', 'carLicense': '131D16674', 'mail': 'user1@whereever.com', 'homeDirectory': '/home' }) for count in range(0, 10): user_entry.replace('mail', f'test{count}@test.com') # Search the retro cl, and check for error messages filter_simple = '(changenumber>=-1)' filter_compound = '(&(changenumber>=-1)(targetuniqueid=*))' retro_changelog_suffix = DSLdapObjects(topo.standalone, basedn=RETROCL_SUFFIX) retro_changelog_suffix.filter(filter_simple) assert not topo.standalone.searchAccessLog('Partially Unindexed Filter') # Search the retro cl again with compound filter retro_changelog_suffix.filter(filter_compound) assert not topo.standalone.searchAccessLog('Partially Unindexed Filter') if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main(["-s", CURRENT_FILE]) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/rewriters/000077500000000000000000000000001421664411400243765ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/rewriters/__init__.py000066400000000000000000000000601421664411400265030ustar00rootroot00000000000000""" :Requirement: 389-ds-base: Rewriters """ 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/rewriters/adfilter_test.py000066400000000000000000000161171421664411400276070ustar00rootroot00000000000000import pytest import glob import base64 import re from lib389.tasks import * from lib389.rewriters import * from lib389.idm.user import UserAccounts from lib389.utils import * from lib389.topologies import topology_st from lib389._constants import DEFAULT_SUFFIX, HOST_STANDALONE, PORT_STANDALONE samba_missing = False try: from samba.dcerpc import security from samba.ndr import ndr_pack, ndr_unpack except: samba_missing = True pass log = logging.getLogger(__name__) # Skip on versions 1.4.2 and before. Rewriters are expected in 1.4.3 pytestmark = [pytest.mark.tier2, pytest.mark.skipif(ds_is_older('1.4.3'), reason="Not implemented")] PW = 'password' # # Necessary because objectcategory relies on cn=xxx RDN # while userAccount creates uid=xxx RDN # def _create_user(inst, schema_container, name, salt): dn = 'cn=%s,%s' % (name, schema_container) inst.add_s(Entry(( dn, { 'objectClass': 'top person extensibleobject'.split(), 'cn': name, 'sn': name, 'objectcategory': dn, "description" : salt, 'userpassword': PW }))) def test_adfilter_objectCategory(topology_st): """ Test adfilter objectCategory rewriter function """ librewriters = os.path.join( topology_st.standalone.ds_paths.lib_dir, 'dirsrv/librewriters.so') assert librewriters rewriters = AdRewriters(topology_st.standalone) ad_rewriter = rewriters.ensure_state(properties={"cn": "adfilter", "nsslapd-libpath": librewriters}) ad_rewriter.add('nsslapd-filterrewriter', "adfilter_rewrite_objectCategory") ad_rewriter.create_containers(DEFAULT_SUFFIX) schema_container = ad_rewriter.get_schema_dn() objectcategory_attr = '( NAME \'objectCategory\' DESC \'test of objectCategory\' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 )' topology_st.standalone.schema.add_schema('attributetypes', [ensure_bytes(objectcategory_attr)]) topology_st.standalone.restart(60) # Add a user for i in range(0, 20): _create_user(topology_st.standalone, schema_container, "user_%d" % i, str(i)) # Check EQUALITY filter rewrite => it should match only one entry for i in range(0, 20): ents = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(objectCategory=user_%d)' % i) assert len(ents) == 1 # Check SUBSTRING search is not replaced ents = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(objectCategory=user_*)') assert len(ents) == 0 # Check PRESENCE search is not replaced so it selects all entries having objectCategory ents = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(objectCategory=*)') assert len(ents) == 20 log.info('Test PASSED') def sid_to_objectsid(sid): return base64.b64encode(ndr_pack(security.dom_sid(sid))).decode('utf-8') def objectsid_to_sid(objectsid): sid = ndr_unpack(security.dom_sid, base64.b64decode(objectsid)) return str(sid) @pytest.mark.skipif(samba_missing, reason="It is missing samba python bindings") def test_adfilter_objectSid(topology_st): """ Test adfilter objectCategory rewriter function :id: fc5880ff-4305-47ba-84fb-38429e264e9e :setup: Standalone instance :steps: 1. add a objectsid rewriter (from librewriters.so) 2. add a dummy schema definition of objectsid to prevent nsslapd-verify-filter-schema 3. restart the server (to load the rewriter) 4. Add "samba" container/users 5. Searches using objectsid in string format :expectedresults: 1. Add operation should PASS. 2. Add operations should PASS. 3. restart should PASS 4. Add "samba" users should PASS 5. Search returns only one entry """ librewriters = os.path.join( topology_st.standalone.ds_paths.lib_dir, 'dirsrv/librewriters.so') assert librewriters rewriters = AdRewriters(topology_st.standalone) ad_rewriter = rewriters.ensure_state(properties={"cn": "adfilter", "nsslapd-libpath": librewriters}) ad_rewriter.add('nsslapd-filterrewriter', "adfilter_rewrite_objectsid") ad_rewriter.create_containers(DEFAULT_SUFFIX) schema_container = ad_rewriter.get_schema_dn() # to prevent nsslapd-verify-filter-schema to reject searches with objectsid objectcategory_attr = '( NAME \'objectsid\' DESC \'test of objectsid\' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40 )' topology_st.standalone.schema.add_schema('attributetypes', [ensure_bytes(objectcategory_attr)]) topology_st.standalone.restart() # Contains a list of b64encoded SID from https://github.com/SSSD/sssd/blob/supplier/src/tests/intg/data/ad_data.ldif SIDs = ["AQUAAAAAAAUVAAAADcfLTVzC66zo0l8EUAQAAA==", "AQUAAAAAAAUVAAAADcfLTVzC66zo0l8E9gEAAA==", "AQUAAAAAAAUVAAAADcfLTVzC66zo0l8EAwIAAA==", "AQUAAAAAAAUVAAAADcfLTVzC66zo0l8EBAIAAA==", "AQUAAAAAAAUVAAAADcfLTVzC66zo0l8EBgIAAA==", "AQUAAAAAAAUVAAAADcfLTVzC66zo0l8EBwIAAA==", "AQUAAAAAAAUVAAAADcfLTVzC66zo0l8EBQIAAA==", "AQUAAAAAAAUVAAAADcfLTVzC66zo0l8EAAIAAA==", "AQUAAAAAAAUVAAAADcfLTVzC66zo0l8EAQIAAA==", "AQUAAAAAAAUVAAAADcfLTVzC66zo0l8EAgIAAA==", "AQUAAAAAAAUVAAAADcfLTVzC66zo0l8ECAIAAA==", "AQUAAAAAAAUVAAAADcfLTVzC66zo0l8EKQIAAA==", "AQUAAAAAAAUVAAAADcfLTVzC66zo0l8EOwIAAA==", "AQUAAAAAAAUVAAAADcfLTVzC66zo0l8EPAIAAA==", "AQUAAAAAAAUVAAAADcfLTVzC66zo0l8ECQIAAA==", "AQUAAAAAAAUVAAAADcfLTVzC66zo0l8E8gEAAA==", "AQUAAAAAAAUVAAAADcfLTVzC66zo0l8ETQQAAA==", "AQUAAAAAAAUVAAAADcfLTVzC66zo0l8ETgQAAA==", "AQUAAAAAAAUVAAAADcfLTVzC66zo0l8EeUMBAA==", "AQUAAAAAAAUVAAAADcfLTVzC66zo0l8EekMBAA==", "AQUAAAAAAAUVAAAADcfLTVzC66zo0l8Ee0MBAA==", "AQUAAAAAAAUVAAAADcfLTVzC66zo0l8EfEMBAA==", "AQUAAAAAAAUVAAAADcfLTVzC66zo0l8ETwQAAA==", "AQUAAAAAAAUVAAAADcfLTVzC66zo0l8EUQQAAA==", "AQUAAAAAAAUVAAAADcfLTVzC66zo0l8ESUMBAA==", "AQUAAAAAAAUVAAAADcfLTVzC66zo0l8ESkMBAA==", "AQUAAAAAAAUVAAAADcfLTVzC66zo0l8ES0MBAA==", "AQUAAAAAAAUVAAAADcfLTVzC66zo0l8ETEMBAA==", "AQUAAAAAAAUVAAAADcfLTVzC66zo0l8E9AEAAA==", "AQUAAAAAAAUVAAAADcfLTVzC66zo0l8E9QEAAA=="] # Add a container and "samba" like users containing objectsid users = UserAccounts(topology_st.standalone, schema_container, rdn=None) i = 0 for sid in SIDs: decoded = base64.b64decode(sid) user = users.create_test_user(uid=i) user.add('objectclass', 'extensibleobject') user.replace('objectsid', decoded) user.replace('objectSidString', objectsid_to_sid(sid)) i = i + 1 # Check that objectsid rewrite can retrieve the "samba" user # using either a string objectsid (i.e. S-1-5...) or a blob objectsid for sid_blob in SIDs: sid_string = objectsid_to_sid(sid_blob) ents_sid_string = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(objectsid=%s)' % sid_string) assert len(ents_sid_string) == 1 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/rewriters/basic_test.py000066400000000000000000000032431421664411400270720ustar00rootroot00000000000000import pytest import glob from lib389.tasks import * from lib389.utils import * from lib389.topologies import topology_st from lib389._constants import DEFAULT_SUFFIX, HOST_STANDALONE, PORT_STANDALONE log = logging.getLogger(__name__) # Skip on versions 1.4.2 and before. Rewriters are expected in 1.4.3 pytestmark = [pytest.mark.tier2, pytest.mark.skipif(ds_is_older('1.4.3'), reason="Not implemented")] rewriters_container = "cn=rewriters,cn=config" def test_rewriters_container(topology_st): """ Test checks that rewriters container exists """ # Check container of rewriters ents = topology_st.standalone.search_s(rewriters_container, ldap.SCOPE_BASE, '(objectclass=*)') assert len(ents) == 1 log.info('Test PASSED') def test_foo_filter_rewriter(topology_st): """ Test that example filter rewriter 'foo' is register and search use it """ libslapd = os.path.join( topology_st.standalone.ds_paths.lib_dir, 'dirsrv/libslapd.so.0') # register foo filter rewriters topology_st.standalone.add_s(Entry(( "cn=foo_filter,cn=rewriters,cn=config", { "objectClass": "top", "objectClass": "extensibleObject", "cn": "foo_filter", "nsslapd-libpath": libslapd, "nsslapd-filterrewriter": "example_foo2cn_filter_rewriter", } ))) topology_st.standalone.restart(60) # Check that the filter 'foo=foo' is rewritten into 'cn=foo' ents = topology_st.standalone.search_s(rewriters_container, ldap.SCOPE_SUBTREE, '(foo=foo_filter)') assert len(ents) > 0 assert ents[0].dn == "cn=foo_filter,cn=rewriters,cn=config" log.info('Test PASSED') 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/roles/000077500000000000000000000000001421664411400234745ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/roles/__init__.py000066400000000000000000000000541421664411400256040ustar00rootroot00000000000000""" :Requirement: 389-ds-base: Roles """ 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/roles/basic_test.py000066400000000000000000000452051421664411400261740ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2020 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- """ Importing necessary Modules. """ import logging import time import os import pytest from lib389._constants import PW_DM, DEFAULT_SUFFIX from lib389.idm.user import UserAccount, UserAccounts from lib389.idm.organization import Organization from lib389.idm.organizationalunit import OrganizationalUnit from lib389.topologies import topology_st as topo from lib389.idm.role import FilteredRoles, ManagedRoles, NestedRoles from lib389.idm.domain import Domain logging.getLogger(__name__).setLevel(logging.INFO) log = logging.getLogger(__name__) pytestmark = pytest.mark.tier1 DNBASE = "o=acivattr,{}".format(DEFAULT_SUFFIX) ENG_USER = "cn=enguser1,ou=eng,{}".format(DNBASE) SALES_UESER = "cn=salesuser1,ou=sales,{}".format(DNBASE) ENG_MANAGER = "cn=engmanager1,ou=eng,{}".format(DNBASE) SALES_MANAGER = "cn=salesmanager1,ou=sales,{}".format(DNBASE) SALES_OU = "ou=sales,{}".format(DNBASE) ENG_OU = "ou=eng,{}".format(DNBASE) FILTERROLESALESROLE = "cn=FILTERROLESALESROLE,{}".format(DNBASE) FILTERROLEENGROLE = "cn=FILTERROLEENGROLE,{}".format(DNBASE) def test_filterrole(topo, request): """Test Filter Role :id: 8ada4064-786b-11e8-8634-8c16451d917b :setup: server :steps: 1. Add test entry 2. Add ACI 3. Search nsconsole role :expectedresults: 1. Entry should be added 2. Operation should succeed 3. Operation should succeed """ Organization(topo.standalone).create(properties={"o": "acivattr"}, basedn=DEFAULT_SUFFIX) properties = { 'ou': 'eng', } ou_ou = OrganizationalUnit(topo.standalone, "ou=eng,o=acivattr,{}".format(DEFAULT_SUFFIX)) ou_ou.create(properties=properties) properties = {'ou': 'sales'} ou_ou = OrganizationalUnit(topo.standalone, "ou=sales,o=acivattr,{}".format(DEFAULT_SUFFIX)) ou_ou.create(properties=properties) roles = FilteredRoles(topo.standalone, DNBASE) roles.create(properties={'cn': 'FILTERROLEENGROLE', 'nsRoleFilter': 'cn=eng*'}) roles.create(properties={'cn': 'FILTERROLESALESROLE', 'nsRoleFilter': 'cn=sales*'}) properties = { 'uid': 'salesuser1', 'cn': 'salesuser1', 'sn': 'user', 'uidNumber': '1000', 'gidNumber': '2000', 'homeDirectory': '/home/' + 'salesuser1', 'userPassword': PW_DM } user = UserAccount(topo.standalone, 'cn=salesuser1,ou=sales,o=acivattr,{}'.format(DEFAULT_SUFFIX)) user.create(properties=properties) properties = { 'uid': 'salesmanager1', 'cn': 'salesmanager1', 'sn': 'user', 'uidNumber': '1000', 'gidNumber': '2000', 'homeDirectory': '/home/' + 'salesmanager1', 'userPassword': PW_DM, } user = UserAccount(topo.standalone, 'cn=salesmanager1,ou=sales,o=acivattr,{}'.format(DEFAULT_SUFFIX)) user.create(properties=properties) properties = { 'uid': 'enguser1', 'cn': 'enguser1', 'sn': 'user', 'uidNumber': '1000', 'gidNumber': '2000', 'homeDirectory': '/home/' + 'enguser1', 'userPassword': PW_DM } user = UserAccount(topo.standalone, 'cn=enguser1,ou=eng,o=acivattr,{}'.format(DEFAULT_SUFFIX)) user.create(properties=properties) properties = { 'uid': 'engmanager1', 'cn': 'engmanager1', 'sn': 'user', 'uidNumber': '1000', 'gidNumber': '2000', 'homeDirectory': '/home/' + 'engmanager1', 'userPassword': PW_DM } user = UserAccount(topo.standalone, 'cn=engmanager1,ou=eng,o=acivattr,{}'.format(DEFAULT_SUFFIX)) user.create(properties=properties) # user with cn=sales* will automatically memeber of nsfilterrole # cn=filterrolesalesrole,o=acivattr,dc=example,dc=com assert UserAccount(topo.standalone, 'cn=salesuser1,ou=sales,o=acivattr,dc=example,dc=com').\ get_attr_val_utf8('nsrole') == 'cn=filterrolesalesrole,o=acivattr,dc=example,dc=com' # same goes to SALES_MANAGER assert UserAccount(topo.standalone, SALES_MANAGER).get_attr_val_utf8( 'nsrole') == 'cn=filterrolesalesrole,o=acivattr,dc=example,dc=com' # user with cn=eng* will automatically memeber of nsfilterrole # cn=filterroleengrole,o=acivattr,dc=example,dc=com assert UserAccount(topo.standalone, 'cn=enguser1,ou=eng,o=acivattr,dc=example,dc=com').\ get_attr_val_utf8('nsrole') == 'cn=filterroleengrole,o=acivattr,dc=example,dc=com' # same goes to ENG_MANAGER assert UserAccount(topo.standalone, ENG_MANAGER).get_attr_val_utf8( 'nsrole') == 'cn=filterroleengrole,o=acivattr,dc=example,dc=com' for dn_dn in [ENG_USER, SALES_UESER, ENG_MANAGER, SALES_MANAGER, FILTERROLESALESROLE, FILTERROLEENGROLE, ENG_OU, SALES_OU, DNBASE]: UserAccount(topo.standalone, dn_dn).delete() def fin(): topo.standalone.restart() try: filtered_roles = FilteredRoles(topo.standalone, DEFAULT_SUFFIX) for i in filtered_roles.list(): i.delete() except: pass topo.standalone.config.set('nsslapd-ignore-virtual-attrs', 'on') request.addfinalizer(fin) def test_managedrole(topo, request): """Test Managed Role :id: d52a9c00-3bf6-11e9-9b7b-8c16451d917b :setup: server :steps: 1. Add test entry 2. Add ACI 3. Search managed role entries :expectedresults: 1. Entry should be added 2. Operation should succeed 3. Operation should succeed """ # Create Managed role entry roles = ManagedRoles(topo.standalone, DEFAULT_SUFFIX) role = roles.create(properties={"cn": 'ROLE1'}) # Create user and Assign the role to the entry uas = UserAccounts(topo.standalone, DEFAULT_SUFFIX, rdn=None) uas.create(properties={ 'uid': 'Fail', 'cn': 'Fail', 'sn': 'user', 'uidNumber': '1000', 'gidNumber': '2000', 'homeDirectory': '/home/' + 'Fail', 'nsRoleDN': role.dn, 'userPassword': PW_DM }) # Create user and do not Assign any role to the entry uas.create( properties={ 'uid': 'Success', 'cn': 'Success', 'sn': 'user', 'uidNumber': '1000', 'gidNumber': '2000', 'homeDirectory': '/home/' + 'Success', 'userPassword': PW_DM }) # Assert that Manage role entry is created and its searchable assert ManagedRoles(topo.standalone, DEFAULT_SUFFIX).list()[0].dn \ == 'cn=ROLE1,dc=example,dc=com' # Set an aci that will deny ROLE1 manage role Domain(topo.standalone, DEFAULT_SUFFIX).\ add('aci', '(targetattr="*")(version 3.0; aci "role aci";' ' deny(all) roledn="ldap:///{}";)'.format(role.dn),) # Add self user modification and anonymous aci ANON_ACI = "(targetattr=\"*\")(version 3.0; acl \"Anonymous Read access\"; allow (read,search,compare) userdn = \"ldap:///anyone\";)" suffix = Domain(topo.standalone, DEFAULT_SUFFIX) suffix.add('aci', ANON_ACI) # Crate a connection with cn=Fail which is member of ROLE1 conn = UserAccount(topo.standalone, "uid=Fail,{}".format(DEFAULT_SUFFIX)).bind(PW_DM) # Access denied to ROLE1 members assert not ManagedRoles(conn, DEFAULT_SUFFIX).list() # Now create a connection with cn=Success which is not a member of ROLE1 conn = UserAccount(topo.standalone, "uid=Success,{}".format(DEFAULT_SUFFIX)).bind(PW_DM) # Access allowed here assert ManagedRoles(conn, DEFAULT_SUFFIX).list() for i in uas.list(): i.delete() for i in roles.list(): i.delete() def fin(): topo.standalone.restart() try: role = ManagedRoles(topo.standalone, DEFAULT_SUFFIX).get('ROLE1') role.delete() except: pass topo.standalone.config.set('nsslapd-ignore-virtual-attrs', 'on') request.addfinalizer(fin) @pytest.fixture(scope="function") def _final(request, topo): """ Removes and Restores ACIs after the test. """ aci_list = Domain(topo.standalone, DEFAULT_SUFFIX).get_attr_vals('aci') def finofaci(): """ Removes and Restores ACIs and other users after the test. And restore nsslapd-ignore-virtual-attrs to default """ domain = Domain(topo.standalone, DEFAULT_SUFFIX) domain.remove_all('aci') managed_roles = ManagedRoles(topo.standalone, DEFAULT_SUFFIX) nested_roles = NestedRoles(topo.standalone, DEFAULT_SUFFIX) users = UserAccounts(topo.standalone, DEFAULT_SUFFIX) for i in managed_roles.list() + nested_roles.list() + users.list(): i.delete() for i in aci_list: domain.add("aci", i) topo.standalone.config.set('nsslapd-ignore-virtual-attrs', 'on') request.addfinalizer(finofaci) def test_nestedrole(topo, _final): """Test Nested Role :id: 867b40c0-7fcf-4332-afc7-bd01025b77f2 :setup: Standalone server :steps: 1. Add test entry 2. Add ACI 3. Search managed role entries :expectedresults: 1. Entry should be added 2. Operation should succeed 3. Operation should succeed """ # Create Managed role entry managed_roles = ManagedRoles(topo.standalone, DEFAULT_SUFFIX) managed_role1 = managed_roles.create(properties={"cn": 'managed_role1'}) managed_role2 = managed_roles.create(properties={"cn": 'managed_role2'}) # Create nested role entry nested_roles = NestedRoles(topo.standalone, DEFAULT_SUFFIX) nested_role = nested_roles.create(properties={"cn": 'nested_role', "nsRoleDN": [managed_role1.dn, managed_role2.dn]}) # Create user and assign managed role to it users = UserAccounts(topo.standalone, DEFAULT_SUFFIX) user1 = users.create_test_user(uid=1, gid=1) user1.set('nsRoleDN', managed_role1.dn) user1.set('userPassword', PW_DM) # Create another user and assign managed role to it user2 = users.create_test_user(uid=2, gid=2) user2.set('nsRoleDN', managed_role2.dn) user2.set('userPassword', PW_DM) # Create another user and do not assign any role to it user3 = users.create_test_user(uid=3, gid=3) user3.set('userPassword', PW_DM) # Create a ACI with deny access to nested role entry Domain(topo.standalone, DEFAULT_SUFFIX).\ add('aci', f'(targetattr="*")(version 3.0; aci ' f'"role aci"; deny(all) roledn="ldap:///{nested_role.dn}";)') # Create connection with 'uid=test_user_1,ou=People,dc=example,dc=com' member of managed_role1 # and search while bound as the user conn = users.get('test_user_1').bind(PW_DM) assert not UserAccounts(conn, DEFAULT_SUFFIX).list() # Create connection with 'uid=test_user_2,ou=People,dc=example,dc=com' member of managed_role2 # and search while bound as the user conn = users.get('test_user_2').bind(PW_DM) assert not UserAccounts(conn, DEFAULT_SUFFIX).list() # Create connection with 'uid=test_user_3,ou=People,dc=example,dc=com' and # search while bound as the user conn = users.get('test_user_3').bind(PW_DM) assert UserAccounts(conn, DEFAULT_SUFFIX).list() def test_vattr_on_filtered_role(topo, request): """Test nsslapd-ignore-virtual-attrs configuration attribute The attribute is ON by default. If a filtered role is added it is moved to OFF :id: 88b3ad3c-f39a-4eb7-a8c9-07c685f11908 :setup: Standalone instance :steps: 1. Check the attribute nsslapd-ignore-virtual-attrs is present in cn=config 2. Check the default value of attribute nsslapd-ignore-virtual-attrs should be ON 3. Create a filtered role 4. Check the value of nsslapd-ignore-virtual-attrs should be OFF 5. Check a message "roles_cache_trigger_update_role - Because of virtual attribute.." in error logs :expectedresults: 1. This should be successful 2. This should be successful 3. This should be successful 4. This should be successful 5. This should be successful """ log.info("Check the attribute nsslapd-ignore-virtual-attrs is present in cn=config") assert topo.standalone.config.present('nsslapd-ignore-virtual-attrs') log.info("Check the default value of attribute nsslapd-ignore-virtual-attrs should be ON") assert topo.standalone.config.get_attr_val_utf8('nsslapd-ignore-virtual-attrs') == "on" log.info("Create a filtered role") try: Organization(topo.standalone).create(properties={"o": "acivattr"}, basedn=DEFAULT_SUFFIX) except: pass roles = FilteredRoles(topo.standalone, DNBASE) roles.create(properties={'cn': 'FILTERROLEENGROLE', 'nsRoleFilter': 'cn=eng*'}) log.info("Check the default value of attribute nsslapd-ignore-virtual-attrs should be OFF") assert topo.standalone.config.present('nsslapd-ignore-virtual-attrs', 'off') topo.standalone.stop() assert topo.standalone.searchErrorsLog("roles_cache_trigger_update_role - Because of virtual attribute definition \(role\), nsslapd-ignore-virtual-attrs was set to \'off\'") def fin(): topo.standalone.restart() try: filtered_roles = FilteredRoles(topo.standalone, DEFAULT_SUFFIX) for i in filtered_roles.list(): i.delete() except: pass topo.standalone.config.set('nsslapd-ignore-virtual-attrs', 'on') request.addfinalizer(fin) def test_vattr_on_filtered_role_restart(topo, request): """Test nsslapd-ignore-virtual-attrs configuration attribute If it exists a filtered role definition at restart then nsslapd-ignore-virtual-attrs should be set to 'off' :id: 972183f7-d18f-40e0-94ab-580e7b7d78d0 :setup: Standalone instance :steps: 1. Check the attribute nsslapd-ignore-virtual-attrs is present in cn=config 2. Check the default value of attribute nsslapd-ignore-virtual-attrs should be ON 3. Create a filtered role 4. Check the value of nsslapd-ignore-virtual-attrs should be OFF 5. restart the instance 6. Check the presence of virtual attribute is detected 7. Check the value of nsslapd-ignore-virtual-attrs should be OFF :expectedresults: 1. This should be successful 2. This should be successful 3. This should be successful 4. This should be successful 5. This should be successful 6. This should be successful 7. This should be successful """ log.info("Check the attribute nsslapd-ignore-virtual-attrs is present in cn=config") assert topo.standalone.config.present('nsslapd-ignore-virtual-attrs') log.info("Check the default value of attribute nsslapd-ignore-virtual-attrs should be ON") assert topo.standalone.config.get_attr_val_utf8('nsslapd-ignore-virtual-attrs') == "on" log.info("Create a filtered role") try: Organization(topo.standalone).create(properties={"o": "acivattr"}, basedn=DEFAULT_SUFFIX) except: pass roles = FilteredRoles(topo.standalone, DNBASE) roles.create(properties={'cn': 'FILTERROLEENGROLE', 'nsRoleFilter': 'cn=eng*'}) log.info("Check the default value of attribute nsslapd-ignore-virtual-attrs should be OFF") assert topo.standalone.config.present('nsslapd-ignore-virtual-attrs', 'off') log.info("Check the virtual attribute definition is found (after a required delay)") topo.standalone.restart() time.sleep(5) assert topo.standalone.searchErrorsLog("Found a role/cos definition in") assert topo.standalone.searchErrorsLog("roles_cache_trigger_update_role - Because of virtual attribute definition \(role\), nsslapd-ignore-virtual-attrs was set to \'off\'") log.info("Check the default value of attribute nsslapd-ignore-virtual-attrs should be OFF") assert topo.standalone.config.present('nsslapd-ignore-virtual-attrs', 'off') def fin(): topo.standalone.restart() try: filtered_roles = FilteredRoles(topo.standalone, DEFAULT_SUFFIX) for i in filtered_roles.list(): i.delete() except: pass topo.standalone.config.set('nsslapd-ignore-virtual-attrs', 'on') request.addfinalizer(fin) def test_vattr_on_managed_role(topo, request): """Test nsslapd-ignore-virtual-attrs configuration attribute The attribute is ON by default. If a managed role is added it is moved to OFF :id: 664b722d-c1ea-41e4-8f6c-f9c87a212346 :setup: Standalone instance :steps: 1. Check the attribute nsslapd-ignore-virtual-attrs is present in cn=config 2. Check the default value of attribute nsslapd-ignore-virtual-attrs should be ON 3. Create a managed role 4. Check the value of nsslapd-ignore-virtual-attrs should be OFF 5. Check a message "roles_cache_trigger_update_role - Because of virtual attribute.." in error logs :expectedresults: 1. This should be successful 2. This should be successful 3. This should be successful 4. This should be successful 5. This should be successful """ log.info("Check the attribute nsslapd-ignore-virtual-attrs is present in cn=config") assert topo.standalone.config.present('nsslapd-ignore-virtual-attrs') log.info("Check the default value of attribute nsslapd-ignore-virtual-attrs should be ON") assert topo.standalone.config.get_attr_val_utf8('nsslapd-ignore-virtual-attrs') == "on" log.info("Create a managed role") roles = ManagedRoles(topo.standalone, DEFAULT_SUFFIX) role = roles.create(properties={"cn": 'ROLE1'}) log.info("Check the default value of attribute nsslapd-ignore-virtual-attrs should be OFF") assert topo.standalone.config.present('nsslapd-ignore-virtual-attrs', 'off') topo.standalone.stop() assert topo.standalone.searchErrorsLog("roles_cache_trigger_update_role - Because of virtual attribute definition \(role\), nsslapd-ignore-virtual-attrs was set to \'off\'") def fin(): topo.standalone.restart() try: filtered_roles = ManagedRoles(topo.standalone, DEFAULT_SUFFIX) for i in filtered_roles.list(): i.delete() except: pass topo.standalone.config.set('nsslapd-ignore-virtual-attrs', 'on') request.addfinalizer(fin) if __name__ == "__main__": CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s -v %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/sasl/000077500000000000000000000000001421664411400233125ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/sasl/__init__.py000066400000000000000000000000651421664411400254240ustar00rootroot00000000000000""" :Requirement: 389-ds-base: SASL Mechanism """ 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/sasl/allowed_mechs_test.py000066400000000000000000000206021421664411400275310ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2017 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import pytest import os from lib389.topologies import topology_st from lib389.utils import ds_is_older pytestmark = pytest.mark.tier1 def test_basic_feature(topology_st): """Test the alloweed sasl mechanism feature :id: b0453b91-9955-4e8f-9d2f-a6bf440022b1 :setup: Standalone instance :steps: 1. Get the default list of mechanisms 2. Set allowed mechanism PLAIN 3. Verify the list 4. Restart the server 5. Verify that list is still correct 6. Edit mechanisms to allow just PLAIN and EXTERNAL 7. Verify the list 8. Edit mechanisms to allow just PLAIN and GSSAPI 9. Verify the list 10. Restart the server 11. Verify that list is still correct 12. Edit mechanisms to allow just PLAIN, GSSAPI, and ANONYMOUS 13. Verify the list 14. Restart the server 15. Verify that list is still correct 16. Edit mechanisms to allow just PLAIN and ANONYMOUS 17. Verify the list 18. Restart the server 19. Verify that list is still correct 20. Reset the allowed list to nothing, 21. Verify that the returned mechanisms are the default ones 22. Restart the server 23. Verify that list is still correct :expectedresults: 1. GSSAPI, PLAIN and EXTERNAL mechanisms should be acquired 2. Operation should be successful 3. List should have - PLAIN, EXTERNAL; shouldn't have - GSSAPI 4. Server should be restarted 5. List should have - PLAIN, EXTERNAL; shouldn't have - GSSAPI 6. Operation should be successful 7. List should have - PLAIN, EXTERNAL; shouldn't have - GSSAPI 8. Operation should be successful 9. List should have - PLAIN, EXTERNAL, GSSAPI 10. Server should be restarted 11. List should have - PLAIN, EXTERNAL, GSSAPI 12. Operation should be successful 13. List should have - PLAIN, EXTERNAL, GSSAPI, ANONYMOUS 14. Server should be restarted 15. List should have - PLAIN, EXTERNAL, GSSAPI, ANONYMOUS 16. Operation should be successful 17. List should have - PLAIN, EXTERNAL, ANONYMOUS; shouldn't have - GSSAPI 18. Server should be restarted 19. List should have - PLAIN, EXTERNAL, ANONYMOUS; shouldn't have - GSSAPI 20. Operation should be successful 21. List should have - PLAIN, EXTERNAL, GSSAPI 22. Server should be restarted 23. List should have - PLAIN, EXTERNAL, GSSAPI """ standalone = topology_st.standalone # Get the supported mechanisms. This should contain PLAIN, GSSAPI, EXTERNAL at least standalone.log.info("Test we have some of the default mechanisms") orig_mechs = standalone.rootdse.supported_sasl() print(orig_mechs) assert('GSSAPI' in orig_mechs) assert('PLAIN' in orig_mechs) assert('EXTERNAL' in orig_mechs) # Now edit the supported mechanisms. Check them again. standalone.log.info("Edit mechanisms to allow just PLAIN") standalone.config.set('nsslapd-allowed-sasl-mechanisms', 'PLAIN') limit_mechs = standalone.rootdse.supported_sasl() assert('PLAIN' in limit_mechs) assert('EXTERNAL' in limit_mechs) # Should always be in the allowed list, even if not set. assert('GSSAPI' not in limit_mechs) # Should not be there! # Restart the server a few times and make sure nothing changes standalone.log.info("Restart server and make sure we still have correct allowed mechs") standalone.restart() standalone.restart() limit_mechs = standalone.rootdse.supported_sasl() assert('PLAIN' in limit_mechs) assert('EXTERNAL' in limit_mechs) assert('GSSAPI' not in limit_mechs) # Set EXTERNAL, even though its always supported standalone.log.info("Edit mechanisms to allow just PLAIN and EXTERNAL") standalone.config.set('nsslapd-allowed-sasl-mechanisms', 'PLAIN, EXTERNAL') limit_mechs = standalone.rootdse.supported_sasl() assert('PLAIN' in limit_mechs) assert('EXTERNAL' in limit_mechs) assert('GSSAPI' not in limit_mechs) # Now edit the supported mechanisms. Check them again. standalone.log.info("Edit mechanisms to allow just PLAIN and GSSAPI") standalone.config.set('nsslapd-allowed-sasl-mechanisms', 'PLAIN, GSSAPI') limit_mechs = standalone.rootdse.supported_sasl() assert('PLAIN' in limit_mechs) assert('EXTERNAL' in limit_mechs) assert('GSSAPI' in limit_mechs) assert(len(limit_mechs) == 3) # Restart server twice and make sure the allowed list is the same standalone.restart() standalone.restart() # For ticket 49379 (test double restart) limit_mechs = standalone.rootdse.supported_sasl() assert('PLAIN' in limit_mechs) assert('EXTERNAL' in limit_mechs) assert('GSSAPI' in limit_mechs) assert(len(limit_mechs) == 3) # Add ANONYMOUS to the supported mechanisms and test again. standalone.log.info("Edit mechanisms to allow just PLAIN, GSSAPI, and ANONYMOUS") standalone.config.set('nsslapd-allowed-sasl-mechanisms', 'PLAIN, GSSAPI, ANONYMOUS') limit_mechs = standalone.rootdse.supported_sasl() assert('PLAIN' in limit_mechs) assert('EXTERNAL' in limit_mechs) assert('GSSAPI' in limit_mechs) assert('ANONYMOUS' in limit_mechs) assert(len(limit_mechs) == 4) # Restart server and make sure the allowed list is the same standalone.restart() standalone.restart() # For ticket 49379 (test double restart) limit_mechs = standalone.rootdse.supported_sasl() assert('PLAIN' in limit_mechs) assert('EXTERNAL' in limit_mechs) assert('GSSAPI' in limit_mechs) assert('ANONYMOUS' in limit_mechs) assert(len(limit_mechs) == 4) # Remove GSSAPI standalone.log.info("Edit mechanisms to allow just PLAIN and ANONYMOUS") standalone.config.set('nsslapd-allowed-sasl-mechanisms', 'PLAIN, ANONYMOUS') limit_mechs = standalone.rootdse.supported_sasl() assert('PLAIN' in limit_mechs) assert('EXTERNAL' in limit_mechs) assert('GSSAPI' not in limit_mechs) assert('ANONYMOUS' in limit_mechs) assert(len(limit_mechs) == 3) # Restart server and make sure the allowed list is the same standalone.restart() limit_mechs = standalone.rootdse.supported_sasl() assert('PLAIN' in limit_mechs) assert('EXTERNAL' in limit_mechs) assert('GSSAPI' not in limit_mechs) assert('ANONYMOUS' in limit_mechs) assert(len(limit_mechs) == 3) # Do a config reset standalone.log.info("Reset allowed mechaisms") standalone.config.reset('nsslapd-allowed-sasl-mechanisms') # check the supported list is the same as our first check. standalone.log.info("Check that we have the original set of mechanisms") final_mechs = standalone.rootdse.supported_sasl() assert(set(final_mechs) == set(orig_mechs)) # Check it after a restart standalone.log.info("Check that we have the original set of mechanisms after a restart") standalone.restart() final_mechs = standalone.rootdse.supported_sasl() assert(set(final_mechs) == set(orig_mechs)) @pytest.mark.bz1816854 @pytest.mark.ds50869 @pytest.mark.xfail(ds_is_older('1.3.11', '1.4.3.6'), reason="May fail because of bz1816854") def test_config_set_few_mechs(topology_st): """Test that we can successfully set multiple values to nsslapd-allowed-sasl-mechanisms :id: d7c3c58b-4fbe-42ab-a8d4-9dd362916d5f :setup: Standalone instance :steps: 1. Set nsslapd-allowed-sasl-mechanisms to "PLAIN GSSAPI" 2. Verify nsslapd-allowed-sasl-mechanisms has the values :expectedresults: 1. Operation should be successful 2. Operation should be successful """ standalone = topology_st.standalone standalone.log.info("Set nsslapd-allowed-sasl-mechanisms to 'PLAIN GSSAPI'") standalone.config.set('nsslapd-allowed-sasl-mechanisms', 'PLAIN GSSAPI') standalone.log.info("Verify nsslapd-allowed-sasl-mechanisms has the values") allowed_mechs = standalone.config.get_attr_val_utf8('nsslapd-allowed-sasl-mechanisms') assert('PLAIN' in allowed_mechs) assert('GSSAPI' in allowed_mechs) if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/sasl/plain_test.py000066400000000000000000000066701421664411400260370ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2017 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import pytest import ldap from lib389.topologies import topology_st # This pulls in logging I think from lib389.utils import * from lib389._constants import DEFAULT_SUFFIX, DEFAULT_SECURE_PORT from lib389.sasl import PlainSASL from lib389.idm.services import ServiceAccounts, ServiceAccount pytestmark = pytest.mark.tier1 log = logging.getLogger(__name__) def test_basic_feature(topology_st): """Check basic SASL functionality for PLAIN mechanism :id: 75ddc6fa-aa5a-4025-9c71-1abad20c91fc :setup: Standalone instance :steps: 1. Stop the instance 2. Clean up confdir from previous cert and key files 3. Create RSA files: CA, key and cert 4. Start the instance 5. Create RSA entry 6. Set nsslapd-secureport to 636 and nsslapd-security to 'on' 7. Restart the instance 8. Create a user 9. Check we can bind 10. Check that PLAIN is listed in supported mechs 11. Set up Plain SASL credentials 12. Try to open a connection without TLS 13. Try to open a connection with TLS 14. Try to open a connection with a wrong password :expectedresults: 1. The instance should stop 2. Confdir should be clean 3. RSA files should be created 4. The instance should start 5. RSA entry should be created 6. nsslapd-secureport and nsslapd-security should be set successfully 7. The instance should be restarted 8. User should be created 9. Bind should be successful 10. PLAIN should be listed in supported mechs 11. Plain SASL should be successfully set 12. AUTH_UNKNOWN exception should be raised 13. The connection should open 14. INVALID_CREDENTIALS exception should be raised """ standalone = topology_st.standalone standalone.enable_tls() # Create a user sas = ServiceAccounts(standalone, DEFAULT_SUFFIX) sas._basedn = DEFAULT_SUFFIX sa = sas.create(properties={'cn': 'testaccount', 'userPassword': 'password'}) # Check we can bind. This will raise exceptions if it fails. sa.bind('password') # Check that PLAIN is listed in supported mechns. assert(standalone.rootdse.supports_sasl_plain()) # The sasl parameters don't change, so set them up now. # Do we need the sasl map dn:? auth_tokens = PlainSASL("dn:%s" % sa.dn, 'password') # Check that it fails without TLS with pytest.raises(ldap.AUTH_UNKNOWN): conn = sa.sasl_bind(uri=standalone.get_ldap_uri(), saslmethod='PLAIN', sasltoken=auth_tokens, connOnly=True) # We *have* to use REQCERT NEVER here because python ldap fails cert verification for .... some reason that even # I can not solve. I think it's leaking state across connections in start_tls_s? # Check that it works with TLS conn = sa.sasl_bind(uri=standalone.get_ldaps_uri(), saslmethod='PLAIN', sasltoken=auth_tokens, connOnly=True) conn.close() # Check that it correct fails our bind if we don't have the password. auth_tokens = PlainSASL("dn:%s" % sa.dn, 'password-wrong') with pytest.raises(ldap.INVALID_CREDENTIALS): conn = sa.sasl_bind(uri=standalone.get_ldaps_uri(), saslmethod='PLAIN', sasltoken=auth_tokens, connOnly=True) # Done! 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/sasl/regression_test.py000066400000000000000000000151321421664411400271050ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2020 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import os import pytest from lib389.tasks import * from lib389.utils import * from lib389.topologies import topology_m2 from lib389._constants import * from lib389.replica import ReplicationManager pytestmark = [pytest.mark.tier1, pytest.mark.skipif(ds_is_older('1.3.5'), reason="Not implemented")] logging.getLogger(__name__).setLevel(logging.DEBUG) log = logging.getLogger(__name__) ISSUER = 'cn=CAcert' CACERT = 'CAcertificate' M1SERVERCERT = 'Server-Cert1' M2SERVERCERT = 'Server-Cert2' M1LDAPSPORT = '41636' M2LDAPSPORT = '42636' M1SUBJECT = 'CN=' + os.uname()[1] + ',OU=389 Directory Server' M2SUBJECT = 'CN=' + os.uname()[1] + ',OU=390 Directory Server' def add_entry(server, name, rdntmpl, start, num): log.info("\n######################### Adding %d entries to %s ######################\n" % (num, name)) for i in range(num): ii = start + i dn = '%s%d,%s' % (rdntmpl, ii, DEFAULT_SUFFIX) server.add_s(Entry((dn, {'objectclass': 'top person extensibleObject'.split(), 'uid': '%s%d' % (rdntmpl, ii), 'cn': '%s user%d' % (name, ii), 'sn': 'user%d' % (ii)}))) def check_pems(confdir, mycacert, myservercert, myserverkey, notexist): log.info("\n######################### Check PEM files (%s, %s, %s)%s in %s ######################\n" % (mycacert, myservercert, myserverkey, notexist, confdir)) global cacert cacert = f"{mycacert}.pem" if os.path.isfile(cacert): if notexist == "": log.info('%s is successfully generated.' % cacert) else: log.info('%s is incorrecly generated.' % cacert) assert False else: if notexist == "": log.fatal('%s is not generated.' % cacert) assert False else: log.info('%s is correctly not generated.' % cacert) servercert = f"{myservercert}.pem" if os.path.isfile(servercert): if notexist == "": log.info('%s is successfully generated.' % servercert) else: log.info('%s is incorrecly generated.' % servercert) assert False else: if notexist == "": log.fatal('%s was not generated.' % servercert) assert False else: log.info('%s is correctly not generated.' % servercert) serverkey = f"{myserverkey}.pem" if os.path.isfile(serverkey): if notexist == "": log.info('%s is successfully generated.' % serverkey) else: log.info('%s is incorrectly generated.' % serverkey) assert False else: if notexist == "": log.fatal('%s was not generated.' % serverkey) assert False else: log.info('%s is correctly not generated.' % serverkey) def relocate_pem_files(topology_m2): log.info("######################### Relocate PEM files on supplier1 ######################") certdir_prefix = "/dev/shm" mycacert = os.path.join(certdir_prefix, "MyCA") topology_m2.ms["supplier1"].encryption.set('CACertExtractFile', mycacert) myservercert = os.path.join(certdir_prefix, "MyServerCert1") myserverkey = os.path.join(certdir_prefix, "MyServerKey1") topology_m2.ms["supplier1"].rsa.apply_mods([(ldap.MOD_REPLACE, 'ServerCertExtractFile', myservercert), (ldap.MOD_REPLACE, 'ServerKeyExtractFile', myserverkey)]) log.info("##### restart supplier1") topology_m2.ms["supplier1"].restart() check_pems(certdir_prefix, mycacert, myservercert, myserverkey, "") @pytest.mark.ds47536 def test_openldap_no_nss_crypto(topology_m2): """Check that we allow usage of OpenLDAP libraries that don't use NSS for crypto :id: 0a622f3d-8ba5-4df2-a1de-1fb2237da40a :setup: Replication with two suppliers: supplier_1 ----- startTLS -----> supplier_2; supplier_1 <-- TLS_clientAuth -- supplier_2; nsslapd-extract-pemfiles set to 'on' on both suppliers without specifying cert names :steps: 1. Add 5 users to supplier 1 and 2 2. Check that the users were successfully replicated 3. Relocate PEM files on supplier 1 4. Check PEM files in supplier 1 config directory 5. Add 5 users more to supplier 1 and 2 6. Check that the users were successfully replicated 7. Export userRoot on supplier 1 :expectedresults: 1. Users should be successfully added 2. Users should be successfully replicated 3. Operation should be successful 4. PEM files should be found 5. Users should be successfully added 6. Users should be successfully replicated 7. Operation should be successful """ log.info("Ticket 47536 - Allow usage of OpenLDAP libraries that don't use NSS for crypto") m1 = topology_m2.ms["supplier1"] m2 = topology_m2.ms["supplier2"] [i.enable_tls() for i in topology_m2] repl = ReplicationManager(DEFAULT_SUFFIX) repl.test_replication(m1, m2) add_entry(m1, 'supplier1', 'uid=m1user', 0, 5) add_entry(m2, 'supplier2', 'uid=m2user', 0, 5) repl.wait_for_replication(m1, m2) repl.wait_for_replication(m2, m1) log.info('##### Searching for entries on supplier1...') entries = m1.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(uid=*)') assert 11 == len(entries) log.info('##### Searching for entries on supplier2...') entries = m2.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(uid=*)') assert 11 == len(entries) relocate_pem_files(topology_m2) add_entry(m1, 'supplier1', 'uid=m1user', 10, 5) add_entry(m2, 'supplier2', 'uid=m2user', 10, 5) repl.wait_for_replication(m1, m2) repl.wait_for_replication(m2, m1) log.info('##### Searching for entries on supplier1...') entries = m1.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(uid=*)') assert 21 == len(entries) log.info('##### Searching for entries on supplier2...') entries = m2.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(uid=*)') assert 21 == len(entries) output_file = os.path.join(m1.get_ldif_dir(), "supplier1.ldif") m1.tasks.exportLDIF(benamebase='userRoot', output_file=output_file, args={'wait': True}) log.info("Ticket 47536 - PASSED") if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/schema/000077500000000000000000000000001421664411400236105ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/schema/__init__.py000066400000000000000000000000761421664411400257240ustar00rootroot00000000000000""" :Requirement: 389-ds-base: Directory Server Schema """ 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/schema/eduperson_test.py000066400000000000000000000051431421664411400272300ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2017 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import os import logging import pytest import ldap from lib389.idm.user import UserAccounts from lib389.topologies import topology_st as topology from lib389._constants import DEFAULT_SUFFIX pytestmark = pytest.mark.tier1 DEBUGGING = os.getenv('DEBUGGING', False) if DEBUGGING is not False: DEBUGGING = True if DEBUGGING: logging.getLogger(__name__).setLevel(logging.DEBUG) else: logging.getLogger(__name__).setLevel(logging.INFO) log = logging.getLogger(__name__) def test_account_locking(topology): """Test the eduperson schema works :id: f2f15449-a822-4ec6-b4ea-bd6db6240a6c :setup: Standalone instance :steps: 1. Add a common user 2. Extend the user with eduPerson objectClass 3. Add attributes in eduPerson :expectedresults: 1. User should be added with its properties 2. User should be extended with eduPerson as the objectClass 3. eduPerson should be added """ if DEBUGGING: # Add debugging steps(if any)... pass users = UserAccounts(topology.standalone, DEFAULT_SUFFIX) user_properties = { 'uid': 'testuser', 'cn' : 'testuser', 'sn' : 'user', 'uidNumber' : '1000', 'gidNumber' : '2000', 'homeDirectory' : '/home/testuser', } testuser = users.create(properties=user_properties) # Extend the user with eduPerson testuser.add('objectClass', 'eduPerson') # now add eduPerson attrs testuser.add('eduPersonAffiliation', 'value') # From 2002 testuser.add('eduPersonNickName', 'value') # From 2002 testuser.add('eduPersonOrgDN', 'ou=People,%s' % DEFAULT_SUFFIX) # From 2002 testuser.add('eduPersonOrgUnitDN', 'ou=People,%s' % DEFAULT_SUFFIX) # From 2002 testuser.add('eduPersonPrimaryAffiliation', 'value') # From 2002 testuser.add('eduPersonPrincipalName', 'value') # From 2002 testuser.add('eduPersonEntitlement', 'value') # From 2002 testuser.add('eduPersonPrimaryOrgUnitDN', 'ou=People,%s' % DEFAULT_SUFFIX) # From 2002 testuser.add('eduPersonScopedAffiliation', 'value') # From 2003 testuser.add('eduPersonTargetedID', 'value') # From 2003 testuser.add('eduPersonAssurance', 'value') # From 2008 testuser.add('eduPersonPrincipalNamePrior', 'value') # From 2012 testuser.add('eduPersonUniqueId', 'value') # From 2013 testuser.add('eduPersonOrcid', 'value') # From 2016 log.info('Test PASSED') 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/schema/schema_reload_test.py000066400000000000000000000304061421664411400300120ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2017 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- import logging import pytest import time, ldap, re, os from lib389.schema import Schema from lib389.utils import ensure_bytes from lib389.topologies import topology_st as topo from lib389._constants import DEFAULT_SUFFIX, DN_DM, PW_DM from lib389._mapped_object import DSLdapObjects from lib389.idm.user import UserAccounts pytestmark = pytest.mark.tier1 DEBUGGING = os.getenv("DEBUGGING", default=False) if DEBUGGING: logging.getLogger(__name__).setLevel(logging.DEBUG) else: logging.getLogger(__name__).setLevel(logging.INFO) log = logging.getLogger(__name__) INVALID_SCHEMA = 'givenName $ cn $ MoZiLLaATTRiBuTe' def test_schema_reload_with_searches(topo): """Test that during the schema reload task there is a small window where the new schema is not loaded into the asi hashtables - this results in searches not returning entries. :id: 375f1fdc-a9ef-45de-984d-0b79a40ff219 :setup: Standalone instance :steps: 1. Create a test user 2. Run a schema_reload task while searching for our user 3. While we wait for the task to complete search for our user 4. Check the user is still being returned and if task is complete :expectedresults: 1. Operation should be successful 2. Operation should be successful 3. Operation should be successful 4. Operation should be successful """ log.info('Test the searches still work as expected during schema reload tasks') # Add a user users = UserAccounts(topo.standalone, DEFAULT_SUFFIX) user = users.create_test_user(uid=1) # Run a schema_reload tasks while searching for our user.Since # this is a race condition, run it several times. schema = Schema(topo.standalone) task = schema.reload(schema_dir=topo.standalone.schemadir) # While we wait for the task to complete search for our user search_count = 0 while search_count < 10: # Now check the user is still being returned # Check if task is complete assert user.exists() if task.get_exit_code() == 0: break time.sleep(1) search_count += 1 def test_schema_operation(topo): """Test that the cases in original schema are preserved. Test that duplicated schema except cases are not loaded Test to use a custom schema :id: e7448863-ac62-4b49-b013-4efa412c0455 :setup: Standalone instance :steps: 1. Create a test schema with cases 2. Run a schema_reload task 3. Check the attribute is present 4. Case 2: Check duplicated schema except cases are not loaded 5. Case 2-1: Use the custom schema :expectedresults: 1. Operation should be successful 2. Operation should be successful 3. Operation should be successful 4. Operation should be successful 5. Operation should be successful """ log.info('case 1: Test the cases in the original schema are preserved.') schema_filename = topo.standalone.schemadir + '/98test.ldif' try: with open(schema_filename, "w") as schema_file: schema_file.write("dn: cn=schema\n") schema_file.write("attributetypes: ( 8.9.10.11.12.13.14 NAME " + "'MoZiLLaaTTRiBuTe' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 " + " X-ORIGIN 'Mozilla Dummy Schema' )\n") schema_file.write("objectclasses: ( 1.2.3.4.5.6.7 NAME 'MozillaObject' " + "SUP top MUST ( objectclass $ cn ) MAY ( MoZiLLaaTTRiBuTe )" + " X-ORIGIN 'user defined' )')\n") os.chmod(schema_filename, 0o777) except OSError as e: log.fatal("Failed to create schema file: " + "{} Error: {}".format(schema_filename, str(e))) # run the schema reload task with the default schemadir schema = Schema(topo.standalone) task = schema.reload(schema_dir=topo.standalone.schemadir) task.wait() subschema = topo.standalone.schema.get_subschema() at_obj = subschema.get_obj(ldap.schema.AttributeType, 'MoZiLLaaTTRiBuTe') assert at_obj is not None, "The attribute was not found on server" log.info('Case 2: Duplicated schema except cases are not loaded.') schema_filename = topo.standalone.schemadir + '/97test.ldif' try: with open(schema_filename, "w") as schema_file: Mozattr1 = "MOZILLAATTRIBUTE" schema_file.write("dn: cn=schema\n") schema_file.write("attributetypes: ( 8.9.10.11.12.13.14 NAME " + "'MOZILLAATTRIBUTE' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 " + "X-ORIGIN 'Mozilla Dummy Schema' )\n") schema_file.write("objectclasses: ( 1.2.3.4.5.6.7 NAME 'MozillaObject' "+ "SUP top MUST ( objectclass $ cn ) MAY ( MOZILLAATTRIBUTE ) "+ "X-ORIGIN 'user defined' )')\n") os.chmod(schema_filename, 0o777) except OSError as e: log.fatal("Failed to create schema file: " + "{} Error: {}".format(schema_filename, str(e))) # run the schema reload task with the default schemadir task = schema.reload(schema_dir=topo.standalone.schemadir) task.wait() subschema_duplicate = topo.standalone.schema.get_subschema() at_obj_duplicate = subschema_duplicate.get_obj(ldap.schema.AttributeType, 'MOZILLAATTRIBUTE') moz = re.findall('MOZILLAATTRIBUTE',str(at_obj_duplicate)) if moz: log.error('case 2: MOZILLAATTRIBUTE is in the objectclasses list -- FAILURE') assert False else: log.info('case 2: MOZILLAATTRIBUTE is not in the objectclasses list -- PASS') Mozattr2 = "mozillaattribute" log.info(f'Case 2-1: Use the custom schema with {Mozattr2}') name = "test_user" ld = ldap.initialize(topo.standalone.get_ldap_uri()) ld.simple_bind_s(DN_DM,PW_DM) ld.add_s(f"cn={name},{DEFAULT_SUFFIX}",[('objectclass', [b'top', b'person', b'MozillaObject']), ('sn', [ensure_bytes(name)]), ('cn', [ensure_bytes(name)]), (Mozattr2, [ensure_bytes(name)]) ]) mozattrval = DSLdapObjects(topo.standalone,DEFAULT_SUFFIX).filter('(objectclass=mozillaobject)')[0] assert mozattrval.get_attr_val_utf8('mozillaattribute') == name def test_valid_schema(topo): """Test schema-reload task with valid schema :id: 2ab304c0-3e58-4d34-b23b-a14b5997c7a8 :setup: Standalone instance :steps: 1. Create schema file with valid schema 2. Run schema-reload.pl script 3. Run ldapsearch and check if schema was added :expectedresults: 1. File creation should work 2. The schema reload task should be successful 3. Searching the server should return the new schema """ log.info("Test schema-reload task with valid schema") # Step 1 - Create schema file log.info("Create valid schema file (99user.ldif)...") schema_filename = (topo.standalone.schemadir + "/99user.ldif") try: with open(schema_filename, 'w') as schema_file: schema_file.write("dn: cn=schema\n") schema_file.write("attributetypes: ( 8.9.10.11.12.13.13 NAME " + "'ValidAttribute' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15" + " X-ORIGIN 'Mozilla Dummy Schema' )\n") schema_file.write("objectclasses: ( 1.2.3.4.5.6.7.8 NAME 'TestObject' " + "SUP top MUST ( objectclass $ cn ) MAY ( givenName $ " + "sn $ ValidAttribute ) X-ORIGIN 'user defined' )')\n") os.chmod(schema_filename, 0o777) except OSError as e: log.fatal("Failed to create schema file: " + "{} Error: {}".format(schema_filename, str(e))) # Step 2 - Run the schema-reload task log.info("Run the schema-reload task...") schema = Schema(topo.standalone) task = schema.reload(schema_dir=topo.standalone.schemadir) task.wait() assert task.get_exit_code() == 0, "The schema reload task failed" # Step 3 - Verify valid schema was added to the server log.info("Check cn=schema to verify the valid schema was added") subschema = topo.standalone.schema.get_subschema() oc_obj = subschema.get_obj(ldap.schema.ObjectClass, 'TestObject') assert oc_obj is not None, "The new objectclass was not found on server" at_obj = subschema.get_obj(ldap.schema.AttributeType, 'ValidAttribute') assert at_obj is not None, "The new attribute was not found on server" def test_invalid_schema(topo): """Test schema-reload task with invalid schema :id: 2ab304c0-3e58-4d34-b23b-a14b5997c7a9 :setup: Standalone instance :steps: 1. Create schema files with invalid schema 2. Run schema-reload.pl script 3. Run ldapsearch and check if schema was added :expectedresults: 1. File creation should work 2. The schema reload task should return an error 3. Searching the server should not return the invalid schema """ log.info("Test schema-reload task with invalid schema") # Step 1 - Create schema files: one valid, one invalid log.info("Create valid schema file (98user.ldif)...") schema_filename = (topo.standalone.schemadir + "/98user.ldif") try: with open(schema_filename, 'w') as schema_file: schema_file.write("dn: cn=schema\n") schema_file.write("attributetypes: ( 8.9.10.11.12.13.14 NAME " + "'MozillaAttribute' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15" + " X-ORIGIN 'Mozilla Dummy Schema' )\n") schema_file.write("objectclasses: ( 1.2.3.4.5.6.7 NAME 'MoZiLLaOBJeCT' " + "SUP top MUST ( objectclass $ cn ) MAY ( givenName $ " + "sn $ MozillaAttribute ) X-ORIGIN 'user defined' )')\n") os.chmod(schema_filename, 0o777) except OSError as e: log.fatal("Failed to create schema file: " + "{} Error: {}".format(schema_filename, str(e))) log.info("Create invalid schema file (99user.ldif)...") schema_filename = (topo.standalone.schemadir + "/99user.ldif") try: with open(schema_filename, 'w') as schema_file: schema_file.write("dn: cn=schema\n") # Same attribute/objclass names, but different OIDs and MAY attributes schema_file.write("attributetypes: ( 8.9.10.11.12.13.140 NAME " + "'MozillaAttribute' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15" + " X-ORIGIN 'Mozilla Dummy Schema' )\n") schema_file.write("objectclasses: ( 1.2.3.4.5.6.70 NAME 'MoZiLLaOBJeCT' " + "SUP top MUST ( objectclass $ cn ) MAY ( givenName $ " + "cn $ MoZiLLaATTRiBuTe ) X-ORIGIN 'user defined' )')\n") os.chmod(schema_filename, 0o777) except OSError as e: log.fatal("Failed to create schema file: " + "{} Error: {}".format(schema_filename, str(e))) # Step 2 - Run the schema-reload task log.info("Run the schema-reload task, it should fail...") schema = Schema(topo.standalone) task = schema.reload(schema_dir=topo.standalone.schemadir) task.wait() assert task.get_exit_code() != 0, f"The schema reload task incorectly reported success{task.get_exit_code()}" # Step 3 - Verify invalid schema was not added to the server log.info("Check cn=schema to verify the invalid schema was not added") subschema = topo.standalone.schema.get_subschema() oc_obj = subschema.get_obj(ldap.schema.ObjectClass, 'MoZiLLaOBJeCT') if oc_obj is not None and INVALID_SCHEMA in str(oc_obj): log.fatal("The invalid schema was returned from the server: " + str(oc_obj)) assert False else: log.info("The invalid schema is not present on the server") if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/schema/schema_replication_test.py000066400000000000000000000770301421664411400310610ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2016 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # ''' Created on Nov 7, 2013 @author: tbordaz ''' import logging import re import time import ldap import pytest from lib389 import Entry from lib389._constants import * from lib389.topologies import topology_m1c1 from lib389.utils import * # Skip on older versions pytestmark = [pytest.mark.tier1, pytest.mark.skipif(ds_is_older('1.3'), reason="Not implemented")] logging.getLogger(__name__).setLevel(logging.DEBUG) log = logging.getLogger(__name__) TEST_REPL_DN = "cn=test_repl, %s" % SUFFIX ENTRY_DN = "cn=test_entry, %s" % SUFFIX MUST_OLD = "(postalAddress $ preferredLocale)" MUST_NEW = "(postalAddress $ preferredLocale $ telexNumber)" MAY_OLD = "(postalCode $ street)" MAY_NEW = "(postalCode $ street $ postOfficeBox)" def _header(topology_m1c1, label): topology_m1c1.ms["supplier1"].log.info("\n\n###############################################") topology_m1c1.ms["supplier1"].log.info("#######") topology_m1c1.ms["supplier1"].log.info("####### %s" % label) topology_m1c1.ms["supplier1"].log.info("#######") topology_m1c1.ms["supplier1"].log.info("###################################################") def pattern_errorlog(file, log_pattern): try: pattern_errorlog.last_pos += 1 except AttributeError: pattern_errorlog.last_pos = 0 found = None log.debug("_pattern_errorlog: start at offset %d" % pattern_errorlog.last_pos) file.seek(pattern_errorlog.last_pos) # Use a while true iteration because 'for line in file: hit a # python bug that break file.tell() while True: line = file.readline() log.debug("_pattern_errorlog: [%d] %s" % (file.tell(), line)) found = log_pattern.search(line) if ((line == '') or (found)): break log.debug("_pattern_errorlog: end at offset %d" % file.tell()) pattern_errorlog.last_pos = file.tell() return found def _oc_definition(oid_ext, name, must=None, may=None): oid = "1.2.3.4.5.6.7.8.9.10.%d" % oid_ext desc = 'To test ticket 47490' sup = 'person' if not must: must = MUST_OLD if not may: may = MAY_OLD new_oc = "( %s NAME '%s' DESC '%s' SUP %s AUXILIARY MUST %s MAY %s )" % (oid, name, desc, sup, must, may) return new_oc def add_OC(instance, oid_ext, name): new_oc = _oc_definition(oid_ext, name) instance.schema.add_schema('objectClasses', ensure_bytes(new_oc)) def mod_OC(instance, oid_ext, name, old_must=None, old_may=None, new_must=None, new_may=None): old_oc = _oc_definition(oid_ext, name, old_must, old_may) new_oc = _oc_definition(oid_ext, name, new_must, new_may) instance.schema.del_schema('objectClasses', ensure_bytes(old_oc)) instance.schema.add_schema('objectClasses', ensure_bytes(new_oc)) def support_schema_learning(topology_m1c1): """ with https://fedorahosted.org/389/ticket/47721, the supplier and consumer can learn schema definitions when a replication occurs. Before that ticket: replication of the schema fails requiring administrative operation In the test the schemaCSN (supplier consumer) differs After that ticket: replication of the schema succeeds (after an initial phase of learning) In the test the schema CSN (supplier consumer) are in sync This function returns True if 47721 is fixed in the current release False else """ ent = topology_m1c1.cs["consumer1"].getEntry(DN_CONFIG, ldap.SCOPE_BASE, "(cn=config)", ['nsslapd-versionstring']) if ent.hasAttr('nsslapd-versionstring'): val = ent.getValue('nsslapd-versionstring') version = ensure_str(val).split('/')[1].split('.') # something like ['1', '3', '1', '23', 'final_fix'] major = int(version[0]) minor = int(version[1]) if major > 1: return True if minor > 3: # version is 1.4 or after return True if minor == 3: if version[2].isdigit(): if int(version[2]) >= 3: return True return False def trigger_update(topology_m1c1): """ It triggers an update on the supplier. This will start a replication session and a schema push """ try: trigger_update.value += 1 except AttributeError: trigger_update.value = 1 replace = [(ldap.MOD_REPLACE, 'telephonenumber', ensure_bytes(str(trigger_update.value)))] topology_m1c1.ms["supplier1"].modify_s(ENTRY_DN, replace) # wait 10 seconds that the update is replicated loop = 0 while loop <= 10: try: ent = topology_m1c1.cs["consumer1"].getEntry(ENTRY_DN, ldap.SCOPE_BASE, "(objectclass=*)", ['telephonenumber']) val = ent.telephonenumber or "0" if int(val) == trigger_update.value: return # the expected value is not yet replicated. try again time.sleep(1) loop += 1 log.debug("trigger_update: receive %s (expected %d)" % (val, trigger_update.value)) except ldap.NO_SUCH_OBJECT: time.sleep(1) loop += 1 def trigger_schema_push(topology_m1c1): ''' Trigger update to create a replication session. In case of 47721 is fixed and the replica needs to learn the missing definition, then the first replication session learn the definition and the second replication session push the schema (and the schemaCSN. This is why there is two updates and replica agreement is stopped/start (to create a second session) ''' agreements = topology_m1c1.ms["supplier1"].agreement.list(suffix=SUFFIX, consumer_host=topology_m1c1.cs["consumer1"].host, consumer_port=topology_m1c1.cs["consumer1"].port) assert (len(agreements) == 1) ra = agreements[0] trigger_update(topology_m1c1) topology_m1c1.ms["supplier1"].agreement.pause(ra.dn) topology_m1c1.ms["supplier1"].agreement.resume(ra.dn) trigger_update(topology_m1c1) @pytest.fixture(scope="module") def schema_replication_init(topology_m1c1): """Initialize the test environment """ log.debug("test_schema_replication_init topology_m1c1 %r (supplier %r, consumer %r" % ( topology_m1c1, topology_m1c1.ms["supplier1"], topology_m1c1.cs["consumer1"])) # check if a warning message is logged in the # error log of the supplier topology_m1c1.ms["supplier1"].errorlog_file = open(topology_m1c1.ms["supplier1"].errlog, "r") # This entry will be used to trigger attempt of schema push topology_m1c1.ms["supplier1"].add_s(Entry((ENTRY_DN, { 'objectclass': "top person".split(), 'sn': 'test_entry', 'cn': 'test_entry'}))) @pytest.mark.ds47490 def test_schema_replication_one(topology_m1c1, schema_replication_init): """Check supplier schema is a superset (one extra OC) of consumer schema, then schema is pushed and there is no message in the error log :id: d6c6ff30-b3ae-4001-80ff-0fb18563a393 :setup: Supplier Consumer, check if a warning message is logged in the error log of the supplier and add a test entry to trigger attempt of schema push. :steps: 1. Update the schema of supplier, so it will be superset of consumer 2. Push the Schema (no error) 3. Check both supplier and consumer has same schemaCSN 4. Check the startup/final state :expectedresults: 1. Operation should be successful 2. Operation should be successful 3. Operation should be successful 4. State at startup: - supplier default schema - consumer default schema Final state - supplier +supplierNewOCA - consumer +supplierNewOCA """ _header(topology_m1c1, "Extra OC Schema is pushed - no error") log.debug("test_schema_replication_one topology_m1c1 %r (supplier %r, consumer %r" % ( topology_m1c1, topology_m1c1.ms["supplier1"], topology_m1c1.cs["consumer1"])) # update the schema of the supplier so that it is a superset of # consumer. Schema should be pushed add_OC(topology_m1c1.ms["supplier1"], 2, 'supplierNewOCA') trigger_schema_push(topology_m1c1) supplier_schema_csn = topology_m1c1.ms["supplier1"].schema.get_schema_csn() consumer_schema_csn = topology_m1c1.cs["consumer1"].schema.get_schema_csn() # Check the schemaCSN was updated on the consumer log.debug("test_schema_replication_one supplier_schema_csn=%s", supplier_schema_csn) log.debug("ctest_schema_replication_one onsumer_schema_csn=%s", consumer_schema_csn) assert supplier_schema_csn == consumer_schema_csn # Check the error log of the supplier does not contain an error regex = re.compile(r"must not be overwritten \(set replication log for additional info\)") res = pattern_errorlog(topology_m1c1.ms["supplier1"].errorlog_file, regex) if res is not None: assert False @pytest.mark.ds47490 def test_schema_replication_two(topology_m1c1, schema_replication_init): """Check consumer schema is a superset (one extra OC) of supplier schema, then schema is pushed and there is a message in the error log :id: b5db9b75-a9a7-458e-86ec-2a8e7bd1c014 :setup: Supplier Consumer, check if a warning message is logged in the error log of the supplier and add a test entry to trigger attempt of schema push. :steps: 1. Update the schema of consumer, so it will be superset of supplier 2. Update the schema of supplier so ti make it's nsSchemaCSN larger than consumer 3. Push the Schema (error should be generated) 4. Check supplier learns the missing definition 5. Check the error logs 6. Check the startup/final state :expectedresults: 1. Operation should be successful 2. Operation should be successful 3. Operation should be successful 4. Operation should be successful 5. Operation should be successful 6. State at startup - supplier +supplierNewOCA - consumer +supplierNewOCA Final state - supplier +supplierNewOCA +supplierNewOCB - consumer +supplierNewOCA +consumerNewOCA """ _header(topology_m1c1, "Extra OC Schema is pushed - (ticket 47721 allows to learn missing def)") # add this OC on consumer. Supplier will no push the schema add_OC(topology_m1c1.cs["consumer1"], 1, 'consumerNewOCA') # add a new OC on the supplier so that its nsSchemaCSN is larger than the consumer (wait 2s) time.sleep(2) add_OC(topology_m1c1.ms["supplier1"], 3, 'supplierNewOCB') # now push the scheam trigger_schema_push(topology_m1c1) supplier_schema_csn = topology_m1c1.ms["supplier1"].schema.get_schema_csn() consumer_schema_csn = topology_m1c1.cs["consumer1"].schema.get_schema_csn() # Check the schemaCSN was NOT updated on the consumer # with 47721, supplier learns the missing definition log.debug("test_schema_replication_two supplier_schema_csn=%s", supplier_schema_csn) log.debug("test_schema_replication_two consumer_schema_csn=%s", consumer_schema_csn) if support_schema_learning(topology_m1c1): assert supplier_schema_csn == consumer_schema_csn else: assert supplier_schema_csn != consumer_schema_csn # Check the error log of the supplier does not contain an error # This message may happen during the learning phase regex = re.compile(r"must not be overwritten \(set replication log for additional info\)") res = pattern_errorlog(topology_m1c1.ms["supplier1"].errorlog_file, regex) @pytest.mark.ds47490 def test_schema_replication_three(topology_m1c1, schema_replication_init): """Check supplier schema is again a superset (one extra OC), then schema is pushed and there is no message in the error log :id: 45888895-76bc-4cc3-9f90-33a69d027116 :setup: Supplier Consumer, check if a warning message is logged in the error log of the supplier and add a test entry to trigger attempt of schema push. :steps: 1. Update the schema of supplier 2. Push the Schema (no error) 3. Check the schemaCSN was NOT updated on the consumer 4. Check the error logs for no errors 5. Check the startup/final state :expectedresults: 1. Operation should be successful 2. Operation should be successful 3. Operation should be successful 4. Operation should be successful 5. State at startup - supplier +supplierNewOCA +supplierNewOCB - consumer +supplierNewOCA +consumerNewOCA Final state - supplier +supplierNewOCA +supplierNewOCB +consumerNewOCA - consumer +supplierNewOCA +supplierNewOCB +consumerNewOCA """ _header(topology_m1c1, "Extra OC Schema is pushed - no error") # Do an upate to trigger the schema push attempt # add this OC on consumer. Supplier will no push the schema add_OC(topology_m1c1.ms["supplier1"], 1, 'consumerNewOCA') # now push the scheam trigger_schema_push(topology_m1c1) supplier_schema_csn = topology_m1c1.ms["supplier1"].schema.get_schema_csn() consumer_schema_csn = topology_m1c1.cs["consumer1"].schema.get_schema_csn() # Check the schemaCSN was NOT updated on the consumer log.debug("test_schema_replication_three supplier_schema_csn=%s", supplier_schema_csn) log.debug("test_schema_replication_three consumer_schema_csn=%s", consumer_schema_csn) assert supplier_schema_csn == consumer_schema_csn # Check the error log of the supplier does not contain an error regex = re.compile(r"must not be overwritten \(set replication log for additional info\)") res = pattern_errorlog(topology_m1c1.ms["supplier1"].errorlog_file, regex) if res is not None: assert False @pytest.mark.ds47490 def test_schema_replication_four(topology_m1c1, schema_replication_init): """Check supplier schema is again a superset (OC with more MUST), then schema is pushed and there is no message in the error log :id: 39304242-2641-4eb8-a9fb-5ff0cf80718f :setup: Supplier Consumer, check if a warning message is logged in the error log of the supplier and add a test entry to trigger attempt of schema push. :steps: 1. Add telenumber to 'supplierNewOCA' on the supplier 2. Push the Schema (no error) 3. Check the schemaCSN was updated on the consumer 4. Check the error log of the supplier does not contain an error 5. Check the startup/final state :expectedresults: 1. Operation should be successful 2. Operation should be successful 3. Operation should be successful 4. Operation should be successful 5. State at startup - supplier +supplierNewOCA +supplierNewOCB +consumerNewOCA - consumer +supplierNewOCA +supplierNewOCB +consumerNewOCA Final state - supplier +supplierNewOCA +supplierNewOCB +consumerNewOCA +must=telexnumber - consumer +supplierNewOCA +supplierNewOCB +consumerNewOCA +must=telexnumber """ _header(topology_m1c1, "Same OC - extra MUST: Schema is pushed - no error") mod_OC(topology_m1c1.ms["supplier1"], 2, 'supplierNewOCA', old_must=MUST_OLD, new_must=MUST_NEW, old_may=MAY_OLD, new_may=MAY_OLD) trigger_schema_push(topology_m1c1) supplier_schema_csn = topology_m1c1.ms["supplier1"].schema.get_schema_csn() consumer_schema_csn = topology_m1c1.cs["consumer1"].schema.get_schema_csn() # Check the schemaCSN was updated on the consumer log.debug("test_schema_replication_four supplier_schema_csn=%s", supplier_schema_csn) log.debug("ctest_schema_replication_four onsumer_schema_csn=%s", consumer_schema_csn) assert supplier_schema_csn == consumer_schema_csn # Check the error log of the supplier does not contain an error regex = re.compile(r"must not be overwritten \(set replication log for additional info\)") res = pattern_errorlog(topology_m1c1.ms["supplier1"].errorlog_file, regex) if res is not None: assert False @pytest.mark.ds47490 def test_schema_replication_five(topology_m1c1, schema_replication_init): """Check consumer schema is a superset (OC with more MUST), then schema is pushed (fix for 47721) and there is a message in the error log :id: 498527df-28c8-4e1a-bc9e-799fd2b7b2bb :setup: Supplier Consumer, check if a warning message is logged in the error log of the supplier and add a test entry to trigger attempt of schema push. :steps: 1. Add telenumber to 'consumerNewOCA' on the consumer 2. Add a new OC on the supplier so that its nsSchemaCSN is larger than the consumer 3. Push the Schema 4. Check the schemaCSN was NOT updated on the consumer 5. Check the error log of the supplier contain an error 6. Check the startup/final state :expectedresults: 1. Operation should be successful 2. Operation should be successful 3. Operation should be successful 4. Operation should be successful 5. Operation should be successful 6. State at startup - supplier +supplierNewOCA +supplierNewOCB +consumerNewOCA +must=telexnumber - consumer +supplierNewOCA +supplierNewOCB +consumerNewOCA +must=telexnumber Final state - supplier +supplierNewOCA +supplierNewOCB +consumerNewOCA +supplierNewOCC +must=telexnumber - consumer +supplierNewOCA +supplierNewOCB +consumerNewOCA +must=telexnumber +must=telexnumber Note: replication log is enabled to get more details """ _header(topology_m1c1, "Same OC - extra MUST: Schema is pushed - (fix for 47721)") # get more detail why it fails topology_m1c1.ms["supplier1"].enableReplLogging() # add telenumber to 'consumerNewOCA' on the consumer mod_OC(topology_m1c1.cs["consumer1"], 1, 'consumerNewOCA', old_must=MUST_OLD, new_must=MUST_NEW, old_may=MAY_OLD, new_may=MAY_OLD) # add a new OC on the supplier so that its nsSchemaCSN is larger than the consumer (wait 2s) time.sleep(2) add_OC(topology_m1c1.ms["supplier1"], 4, 'supplierNewOCC') trigger_schema_push(topology_m1c1) supplier_schema_csn = topology_m1c1.ms["supplier1"].schema.get_schema_csn() consumer_schema_csn = topology_m1c1.cs["consumer1"].schema.get_schema_csn() # Check the schemaCSN was NOT updated on the consumer # with 47721, supplier learns the missing definition log.debug("test_schema_replication_five supplier_schema_csn=%s", supplier_schema_csn) log.debug("ctest_schema_replication_five onsumer_schema_csn=%s", consumer_schema_csn) if support_schema_learning(topology_m1c1): assert supplier_schema_csn == consumer_schema_csn else: assert supplier_schema_csn != consumer_schema_csn # Check the error log of the supplier does not contain an error # This message may happen during the learning phase regex = re.compile(r"must not be overwritten \(set replication log for additional info\)") res = pattern_errorlog(topology_m1c1.ms["supplier1"].errorlog_file, regex) @pytest.mark.ds47490 def test_schema_replication_six(topology_m1c1, schema_replication_init): """Check supplier schema is again a superset (OC with more MUST), then schema is pushed and there is no message in the error log :id: ed57b0cc-6a10-4f89-94ae-9f18542b1954 :setup: Supplier Consumer, check if a warning message is logged in the error log of the supplier and add a test entry to trigger attempt of schema push. :steps: 1. Add telenumber to 'consumerNewOCA' on the supplier 2. Push the Schema (no error) 3. Check the schemaCSN was NOT updated on the consumer 4. Check the error log of the supplier does not contain an error 5. Check the startup/final state :expectedresults: 1. Operation should be successful 2. Operation should be successful 3. Operation should be successful 4. Operation should be successful 5. State at startup - supplier +supplierNewOCA +supplierNewOCB +consumerNewOCA +supplierNewOCC +must=telexnumber - consumer +supplierNewOCA +supplierNewOCB +consumerNewOCA +must=telexnumber +must=telexnumber Final state - supplier +supplierNewOCA +supplierNewOCB +consumerNewOCA +supplierNewOCC +must=telexnumber +must=telexnumber - consumer +supplierNewOCA +supplierNewOCB +consumerNewOCA +supplierNewOCC +must=telexnumber +must=telexnumber Note: replication log is enabled to get more details """ _header(topology_m1c1, "Same OC - extra MUST: Schema is pushed - no error") # add telenumber to 'consumerNewOCA' on the consumer mod_OC(topology_m1c1.ms["supplier1"], 1, 'consumerNewOCA', old_must=MUST_OLD, new_must=MUST_NEW, old_may=MAY_OLD, new_may=MAY_OLD) trigger_schema_push(topology_m1c1) supplier_schema_csn = topology_m1c1.ms["supplier1"].schema.get_schema_csn() consumer_schema_csn = topology_m1c1.cs["consumer1"].schema.get_schema_csn() # Check the schemaCSN was NOT updated on the consumer log.debug("test_schema_replication_six supplier_schema_csn=%s", supplier_schema_csn) log.debug("ctest_schema_replication_six onsumer_schema_csn=%s", consumer_schema_csn) assert supplier_schema_csn == consumer_schema_csn # Check the error log of the supplier does not contain an error # This message may happen during the learning phase regex = re.compile(r"must not be overwritten \(set replication log for additional info\)") res = pattern_errorlog(topology_m1c1.ms["supplier1"].errorlog_file, regex) if res is not None: assert False @pytest.mark.ds47490 def test_schema_replication_seven(topology_m1c1, schema_replication_init): """Check supplier schema is again a superset (OC with more MAY), then schema is pushed and there is no message in the error log :id: 8725055a-b3f8-4d1d-a4d6-bb7dccf644d0 :setup: Supplier Consumer, check if a warning message is logged in the error log of the supplier and add a test entry to trigger attempt of schema push. :steps: 1. Add telenumber to 'supplierNewOCA' on the supplier 2. Push the Schema (no error) 3. Check the schemaCSN was updated on the consumer 4. Check the error log of the supplier does not contain an error 5. Check the startup/final state :expectedresults: 1. Operation should be successful 2. Operation should be successful 3. Operation should be successful 4. Operation should be successful 5. State at startup - supplier +supplierNewOCA +supplierNewOCB +consumerNewOCA +supplierNewOCC +must=telexnumber +must=telexnumber - consumer +supplierNewOCA +supplierNewOCB +consumerNewOCA +supplierNewOCC +must=telexnumber +must=telexnumber Final state - supplier +supplierNewOCA +supplierNewOCB +consumerNewOCA +supplierNewOCC +must=telexnumber +must=telexnumber +may=postOfficeBox - consumer +supplierNewOCA +supplierNewOCB +consumerNewOCA +supplierNewOCC +must=telexnumber +must=telexnumber +may=postOfficeBox """ _header(topology_m1c1, "Same OC - extra MAY: Schema is pushed - no error") mod_OC(topology_m1c1.ms["supplier1"], 2, 'supplierNewOCA', old_must=MUST_NEW, new_must=MUST_NEW, old_may=MAY_OLD, new_may=MAY_NEW) trigger_schema_push(topology_m1c1) supplier_schema_csn = topology_m1c1.ms["supplier1"].schema.get_schema_csn() consumer_schema_csn = topology_m1c1.cs["consumer1"].schema.get_schema_csn() # Check the schemaCSN was updated on the consumer log.debug("test_schema_replication_seven supplier_schema_csn=%s", supplier_schema_csn) log.debug("ctest_schema_replication_seven consumer_schema_csn=%s", consumer_schema_csn) assert supplier_schema_csn == consumer_schema_csn # Check the error log of the supplier does not contain an error regex = re.compile(r"must not be overwritten \(set replication log for additional info\)") res = pattern_errorlog(topology_m1c1.ms["supplier1"].errorlog_file, regex) if res is not None: assert False @pytest.mark.ds47490 def test_schema_replication_eight(topology_m1c1, schema_replication_init): """Check consumer schema is a superset (OC with more MAY), then schema is pushed (fix for 47721) and there is message in the error log :id: 2310d150-a71a-498d-add8-4056beeb58c6 :setup: Supplier Consumer, check if a warning message is logged in the error log of the supplier and add a test entry to trigger attempt of schema push. :steps: 1. Add telenumber to 'consumerNewOCA' on the consumer 2. Modify OC on the supplier so that its nsSchemaCSN is larger than the consumer 3. Push the Schema (no error) 4. Check the schemaCSN was updated on the consumer 5. Check the error log of the supplier does not contain an error 6. Check the startup/final state :expectedresults: 1. Operation should be successful 2. Operation should be successful 3. Operation should be successful 4. Operation should be successful 5. Operation should be successful 6. State at startup - supplier +supplierNewOCA +supplierNewOCB +consumerNewOCA +supplierNewOCC +must=telexnumber +must=telexnumber +may=postOfficeBox - consumer +supplierNewOCA +supplierNewOCB +consumerNewOCA +supplierNewOCC +must=telexnumber +must=telexnumber +may=postOfficeBox Final state - supplier +supplierNewOCA +supplierNewOCB +consumerNewOCA +supplierNewOCC +must=telexnumber +must=telexnumber +may=postOfficeBox +may=postOfficeBox - consumer +supplierNewOCA +supplierNewOCB +consumerNewOCA +supplierNewOCC +must=telexnumber +must=telexnumber +may=postOfficeBox +may=postOfficeBox """ _header(topology_m1c1, "Same OC - extra MAY: Schema is pushed (fix for 47721)") mod_OC(topology_m1c1.cs["consumer1"], 1, 'consumerNewOCA', old_must=MUST_NEW, new_must=MUST_NEW, old_may=MAY_OLD, new_may=MAY_NEW) # modify OC on the supplier so that its nsSchemaCSN is larger than the consumer (wait 2s) time.sleep(2) mod_OC(topology_m1c1.ms["supplier1"], 4, 'supplierNewOCC', old_must=MUST_OLD, new_must=MUST_OLD, old_may=MAY_OLD, new_may=MAY_NEW) trigger_schema_push(topology_m1c1) supplier_schema_csn = topology_m1c1.ms["supplier1"].schema.get_schema_csn() consumer_schema_csn = topology_m1c1.cs["consumer1"].schema.get_schema_csn() # Check the schemaCSN was not updated on the consumer # with 47721, supplier learns the missing definition log.debug("test_schema_replication_eight supplier_schema_csn=%s", supplier_schema_csn) log.debug("ctest_schema_replication_eight onsumer_schema_csn=%s", consumer_schema_csn) if support_schema_learning(topology_m1c1): assert supplier_schema_csn == consumer_schema_csn else: assert supplier_schema_csn != consumer_schema_csn # Check the error log of the supplier does not contain an error # This message may happen during the learning phase regex = re.compile(r"must not be overwritten \(set replication log for additional info\)") res = pattern_errorlog(topology_m1c1.ms["supplier1"].errorlog_file, regex) @pytest.mark.ds47490 def test_schema_replication_nine(topology_m1c1, schema_replication_init): """Check consumer schema is a superset (OC with more MAY), then schema is not pushed and there is message in the error log :id: 851b24c6-b1e0-466f-9714-aa2940fbfeeb :setup: Supplier Consumer, check if a warning message is logged in the error log of the supplier and add a test entry to trigger attempt of schema push. :steps: 1. Add postOfficeBox to 'consumerNewOCA' on the supplier 3. Push the Schema 4. Check the schemaCSN was updated on the consumer 5. Check the error log of the supplier does contain an error 6. Check the startup/final state :expectedresults: 1. Operation should be successful 2. Operation should be successful 3. Operation should be successful 4. Operation should be successful 5. Operation should be successful 6. State at startup - supplier +supplierNewOCA +supplierNewOCB +consumerNewOCA +supplierNewOCC +must=telexnumber +must=telexnumber +may=postOfficeBox +may=postOfficeBox - consumer +supplierNewOCA +supplierNewOCB +consumerNewOCA +supplierNewOCC +must=telexnumber +must=telexnumber +may=postOfficeBox +may=postOfficeBox Final state - supplier +supplierNewOCA +supplierNewOCB +consumerNewOCA +supplierNewOCC +must=telexnumber +must=telexnumber +may=postOfficeBox +may=postOfficeBox +may=postOfficeBox - consumer +supplierNewOCA +supplierNewOCB +consumerNewOCA +supplierNewOCC +must=telexnumber +must=telexnumber +may=postOfficeBox +may=postOfficeBox +may=postOfficeBox """ _header(topology_m1c1, "Same OC - extra MAY: Schema is pushed - no error") mod_OC(topology_m1c1.ms["supplier1"], 1, 'consumerNewOCA', old_must=MUST_NEW, new_must=MUST_NEW, old_may=MAY_OLD, new_may=MAY_NEW) trigger_schema_push(topology_m1c1) supplier_schema_csn = topology_m1c1.ms["supplier1"].schema.get_schema_csn() consumer_schema_csn = topology_m1c1.cs["consumer1"].schema.get_schema_csn() # Check the schemaCSN was updated on the consumer log.debug("test_schema_replication_nine supplier_schema_csn=%s", supplier_schema_csn) log.debug("ctest_schema_replication_nine onsumer_schema_csn=%s", consumer_schema_csn) assert supplier_schema_csn == consumer_schema_csn # Check the error log of the supplier does not contain an error regex = re.compile(r"must not be overwritten \(set replication log for additional info\)") res = pattern_errorlog(topology_m1c1.ms["supplier1"].errorlog_file, regex) if res is not None: assert False log.info('Testcase PASSED') if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/schema/schema_test.py000066400000000000000000000451371421664411400264730ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2016 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # ''' Created on Dec 18, 2013 @author: rmeggins ''' import logging import ldap import pytest from ldap.cidict import cidict from ldap.schema import SubSchema from lib389.schema import SchemaLegacy from lib389._constants import * from lib389.topologies import topology_st, topology_m2 as topo_m2 from lib389.idm.user import UserAccounts, UserAccount from lib389.replica import ReplicationManager from lib389.utils import ensure_bytes pytestmark = pytest.mark.tier1 logging.getLogger(__name__).setLevel(logging.DEBUG) log = logging.getLogger(__name__) attrclass = ldap.schema.models.AttributeType occlass = ldap.schema.models.ObjectClass syntax_len_supported = False def ochasattr(subschema, oc, mustormay, attr, key): """See if the oc and any of its parents and ancestors have the given attr""" rc = False if not key in oc.__dict__: dd = cidict() for ii in oc.__dict__[mustormay]: dd[ii] = ii oc.__dict__[key] = dd if attr in oc.__dict__[key]: rc = True else: # look in parents for noroid in oc.sup: ocpar = subschema.get_obj(occlass, noroid) assert (ocpar) rc = ochasattr(subschema, ocpar, mustormay, attr, key) if rc: break return rc def ochasattrs(subschema, oc, mustormay, attrs): key = mustormay + "dict" ret = [] for attr in attrs: if not ochasattr(subschema, oc, mustormay, attr, key): ret.append(attr) return ret def mycmp(v1, v2): v1ary, v2ary = [v1], [v2] if isinstance(v1, list) or isinstance(v1, tuple): v1ary, v2ary = list(set([x.lower() for x in v1])), list(set([x.lower() for x in v2])) if not len(v1ary) == len(v2ary): return False for v1, v2 in zip(v1ary, v2ary): if isinstance(v1, str): if not len(v1) == len(v2): return False if not v1 == v2: return False return True def ocgetdiffs(ldschema, oc1, oc2): fields = ['obsolete', 'names', 'desc', 'must', 'may', 'kind', 'sup'] ret = '' for field in fields: v1, v2 = oc1.__dict__[field], oc2.__dict__[field] if field == 'may' or field == 'must': missing = ochasattrs(ldschema, oc1, field, oc2.__dict__[field]) if missing: ret = ret + '\t%s is missing %s\n' % (field, missing) missing = ochasattrs(ldschema, oc2, field, oc1.__dict__[field]) if missing: ret = ret + '\t%s is missing %s\n' % (field, missing) elif not mycmp(v1, v2): ret = ret + '\t%s differs: [%s] vs. [%s]\n' % (field, oc1.__dict__[field], oc2.__dict__[field]) return ret def atgetparfield(subschema, at, field): v = None for nameoroid in at.sup: atpar = subschema.get_obj(attrclass, nameoroid) assert (atpar) v = atpar.__dict__.get(field, atgetparfield(subschema, atpar, field)) if v is not None: break return v def atgetdiffs(ldschema, at1, at2): fields = ['names', 'desc', 'obsolete', 'sup', 'equality', 'ordering', 'substr', 'syntax', 'single_value', 'collective', 'no_user_mod', 'usage'] if syntax_len_supported: fields.append('syntax_len') ret = '' for field in fields: v1 = at1.__dict__.get(field) or atgetparfield(ldschema, at1, field) v2 = at2.__dict__.get(field) or atgetparfield(ldschema, at2, field) if not mycmp(v1, v2): ret = ret + '\t%s differs: [%s] vs. [%s]\n' % (field, at1.__dict__[field], at2.__dict__[field]) return ret def test_schema_comparewithfiles(topology_st): '''Compare the schema from ldap cn=schema with the schema files''' log.info('Running test_schema_comparewithfiles...') retval = True schemainst = topology_st.standalone ldschema = schemainst.schema.get_subschema() assert ldschema for fn in schemainst.schema.list_files(): try: fschema = schemainst.schema.file_to_subschema(fn) if fschema is None: raise Exception("Empty schema file %s" % fn) except: log.warning("Unable to parse %s as a schema file - skipping" % fn) continue log.info("Parsed %s as a schema file - checking" % fn) for oid in fschema.listall(occlass): se = fschema.get_obj(occlass, oid) assert se ldse = ldschema.get_obj(occlass, oid) if not ldse: log.error("objectclass in %s but not in %s: %s" % (fn, DN_SCHEMA, se)) retval = False continue ret = ocgetdiffs(ldschema, ldse, se) if ret: log.error("name %s oid %s\n%s" % (se.names[0], oid, ret)) retval = False for oid in fschema.listall(attrclass): se = fschema.get_obj(attrclass, oid) assert se ldse = ldschema.get_obj(attrclass, oid) if not ldse: log.error("attributetype in %s but not in %s: %s" % (fn, DN_SCHEMA, se)) retval = False continue ret = atgetdiffs(ldschema, ldse, se) if ret: log.error("name %s oid %s\n%s" % (se.names[0], oid, ret)) retval = False assert retval log.info('test_schema_comparewithfiles: PASSED') def test_gecos_directoryString(topology_st): """Check that gecos supports directoryString value :id: aee422bb-6299-4124-b5cd-d7393dac19d3 :setup: Standalone instance :steps: 1. Add a common user 2. replace gecos with a direstoryString value :expectedresults: 1. Success 2. Success """ users = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX) user_properties = { 'uid': 'testuser', 'cn' : 'testuser', 'sn' : 'user', 'uidNumber' : '1000', 'gidNumber' : '2000', 'homeDirectory' : '/home/testuser', } testuser = users.create(properties=user_properties) # Add a gecos UTF value testuser.replace('gecos', 'Hélène') def test_gecos_mixed_definition_topo(topo_m2, request): """Check that replication is still working if schema contains definitions that does not conform with a replicated entry :id: d5940e71-d18a-4b71-aaf7-b9185361fffe :setup: Two suppliers replication setup :steps: 1. Create a testuser on M1 2 Stop M1 and M2 3 Change gecos def on M2 to be IA5 4 Update testuser with gecos directoryString value 5 Check replication is still working :expectedresults: 1. success 2. success 3. success 4. success 5. success """ repl = ReplicationManager(DEFAULT_SUFFIX) m1 = topo_m2.ms["supplier1"] m2 = topo_m2.ms["supplier2"] # create a test user testuser_dn = 'uid={},{}'.format('testuser', DEFAULT_SUFFIX) testuser = UserAccount(m1, testuser_dn) try: testuser.create(properties={ 'uid': 'testuser', 'cn': 'testuser', 'sn': 'testuser', 'uidNumber' : '1000', 'gidNumber' : '2000', 'homeDirectory' : '/home/testuser', }) except ldap.ALREADY_EXISTS: pass repl.wait_for_replication(m1, m2) # Stop suppliers to update the schema m1.stop() m2.stop() # on M1: gecos is DirectoryString (default) # on M2: gecos is IA5 schema_filename = (m2.schemadir + "/99user.ldif") try: with open(schema_filename, 'w') as schema_file: schema_file.write("dn: cn=schema\n") schema_file.write("attributetypes: ( 1.3.6.1.1.1.1.2 NAME " + "'gecos' DESC 'The GECOS field; the common name' " + "EQUALITY caseIgnoreIA5Match " + "SUBSTR caseIgnoreIA5SubstringsMatch " + "SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 " + "SINGLE-VALUE )\n") os.chmod(schema_filename, 0o777) except OSError as e: log.fatal("Failed to update schema file: " + "{} Error: {}".format(schema_filename, str(e))) # start the instances m1.start() m2.start() # Check that gecos is IA5 on M2 schema = SchemaLegacy(m2) attributetypes = schema.query_attributetype('gecos') assert attributetypes[0].syntax == "1.3.6.1.4.1.1466.115.121.1.26" # Add a gecos UTF value on M1 testuser.replace('gecos', 'Hélène') # Check replication is still working testuser.replace('displayName', 'ascii value') repl.wait_for_replication(m1, m2) testuser_m2 = UserAccount(m2, testuser_dn) assert testuser_m2.exists() assert testuser_m2.get_attr_val_utf8('displayName') == 'ascii value' def fin(): m1.start() m2.start() testuser.delete() repl.wait_for_replication(m1, m2) # on M2 restore a default 99user.ldif m2.stop() os.remove(m2.schemadir + "/99user.ldif") schema_filename = (m2.schemadir + "/99user.ldif") try: with open(schema_filename, 'w') as schema_file: schema_file.write("dn: cn=schema\n") os.chmod(schema_filename, 0o777) except OSError as e: log.fatal("Failed to update schema file: " + "{} Error: {}".format(schema_filename, str(e))) m2.start() m1.start() request.addfinalizer(fin) def test_gecos_directoryString_wins_M1(topo_m2, request): """Check that if inital syntax are IA5(M2) and DirectoryString(M1) Then directoryString wins when nsSchemaCSN M1 is the greatest :id: ad119fa5-7671-45c8-b2ef-0b28ffb68fdb :setup: Two suppliers replication setup :steps: 1. Create a testuser on M1 2 Stop M1 and M2 3 Change gecos def on M2 to be IA5 4 Start M1 and M2 5 Update M1 schema so that M1 has greatest nsSchemaCSN 6 Update testuser with gecos directoryString value 7 Check replication is still working 8 Check gecos is DirectoryString on M1 and M2 :expectedresults: 1. success 2. success 3. success 4. success 5. success 6. success 7. success 8. success """ repl = ReplicationManager(DEFAULT_SUFFIX) m1 = topo_m2.ms["supplier1"] m2 = topo_m2.ms["supplier2"] # create a test user testuser_dn = 'uid={},{}'.format('testuser', DEFAULT_SUFFIX) testuser = UserAccount(m1, testuser_dn) try: testuser.create(properties={ 'uid': 'testuser', 'cn': 'testuser', 'sn': 'testuser', 'uidNumber' : '1000', 'gidNumber' : '2000', 'homeDirectory' : '/home/testuser', }) except ldap.ALREADY_EXISTS: pass repl.wait_for_replication(m1, m2) # Stop suppliers to update the schema m1.stop() m2.stop() # on M1: gecos is DirectoryString (default) # on M2: gecos is IA5 schema_filename = (m2.schemadir + "/99user.ldif") try: with open(schema_filename, 'w') as schema_file: schema_file.write("dn: cn=schema\n") schema_file.write("attributetypes: ( 1.3.6.1.1.1.1.2 NAME " + "'gecos' DESC 'The GECOS field; the common name' " + "EQUALITY caseIgnoreIA5Match " + "SUBSTR caseIgnoreIA5SubstringsMatch " + "SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 " + "SINGLE-VALUE )\n") os.chmod(schema_filename, 0o777) except OSError as e: log.fatal("Failed to update schema file: " + "{} Error: {}".format(schema_filename, str(e))) # start the instances m1.start() m2.start() # Check that gecos is IA5 on M2 schema = SchemaLegacy(m2) attributetypes = schema.query_attributetype('gecos') assert attributetypes[0].syntax == "1.3.6.1.4.1.1466.115.121.1.26" # update M1 schema to increase its nsschemaCSN new_at = "( dummy-oid NAME 'dummy' DESC 'dummy attribute' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE X-ORIGIN 'RFC 2307' )" m1.schema.add_schema('attributetypes', ensure_bytes(new_at)) # Add a gecos UTF value on M1 testuser.replace('gecos', 'Hélène') # Check replication is still working testuser.replace('displayName', 'ascii value') repl.wait_for_replication(m1, m2) testuser_m2 = UserAccount(m2, testuser_dn) assert testuser_m2.exists() assert testuser_m2.get_attr_val_utf8('displayName') == 'ascii value' # Check that gecos is DirectoryString on M1 schema = SchemaLegacy(m1) attributetypes = schema.query_attributetype('gecos') assert attributetypes[0].syntax == "1.3.6.1.4.1.1466.115.121.1.15" # Check that gecos is DirectoryString on M2 schema = SchemaLegacy(m2) attributetypes = schema.query_attributetype('gecos') assert attributetypes[0].syntax == "1.3.6.1.4.1.1466.115.121.1.15" def fin(): m1.start() m2.start() testuser.delete() m1.schema.del_schema('attributetypes', ensure_bytes(new_at)) repl.wait_for_replication(m1, m2) # on M2 restore a default 99user.ldif m2.stop() os.remove(m2.schemadir + "/99user.ldif") schema_filename = (m2.schemadir + "/99user.ldif") try: with open(schema_filename, 'w') as schema_file: schema_file.write("dn: cn=schema\n") os.chmod(schema_filename, 0o777) except OSError as e: log.fatal("Failed to update schema file: " + "{} Error: {}".format(schema_filename, str(e))) m2.start() m1.start() request.addfinalizer(fin) def test_gecos_directoryString_wins_M2(topo_m2, request): """Check that if inital syntax are IA5(M2) and DirectoryString(M1) Then directoryString wins when nsSchemaCSN M2 is the greatest :id: 2da7f1b1-f86d-4072-a940-ba56d4bc8348 :setup: Two suppliers replication setup :steps: 1. Create a testuser on M1 2 Stop M1 and M2 3 Change gecos def on M2 to be IA5 4 Start M1 and M2 5 Update M2 schema so that M2 has greatest nsSchemaCSN 6 Update testuser on M2 and trigger replication to M1 7 Update testuser on M2 with gecos directoryString value 8 Check replication is still working 9 Check gecos is DirectoryString on M1 and M2 :expectedresults: 1. success 2. success 3. success 4. success 5. success 6. success 7. success 8. success 9. success """ repl = ReplicationManager(DEFAULT_SUFFIX) m1 = topo_m2.ms["supplier1"] m2 = topo_m2.ms["supplier2"] # create a test user testuser_dn = 'uid={},{}'.format('testuser', DEFAULT_SUFFIX) testuser = UserAccount(m1, testuser_dn) try: testuser.create(properties={ 'uid': 'testuser', 'cn': 'testuser', 'sn': 'testuser', 'uidNumber' : '1000', 'gidNumber' : '2000', 'homeDirectory' : '/home/testuser', }) except ldap.ALREADY_EXISTS: pass testuser.replace('displayName', 'to trigger replication M1-> M2') repl.wait_for_replication(m1, m2) # Stop suppliers to update the schema m1.stop() m2.stop() # on M1: gecos is DirectoryString (default) # on M2: gecos is IA5 schema_filename = (m2.schemadir + "/99user.ldif") try: with open(schema_filename, 'w') as schema_file: schema_file.write("dn: cn=schema\n") schema_file.write("attributetypes: ( 1.3.6.1.1.1.1.2 NAME " + "'gecos' DESC 'The GECOS field; the common name' " + "EQUALITY caseIgnoreIA5Match " + "SUBSTR caseIgnoreIA5SubstringsMatch " + "SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 " + "SINGLE-VALUE )\n") os.chmod(schema_filename, 0o777) except OSError as e: log.fatal("Failed to update schema file: " + "{} Error: {}".format(schema_filename, str(e))) # start the instances m1.start() m2.start() # Check that gecos is IA5 on M2 schema = SchemaLegacy(m2) attributetypes = schema.query_attributetype('gecos') assert attributetypes[0].syntax == "1.3.6.1.4.1.1466.115.121.1.26" # update M2 schema to increase its nsschemaCSN new_at = "( dummy-oid NAME 'dummy' DESC 'dummy attribute' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE X-ORIGIN 'RFC 2307' )" m2.schema.add_schema('attributetypes', ensure_bytes(new_at)) # update just to trigger replication M2->M1 # and update of M2 schema testuser_m2 = UserAccount(m2, testuser_dn) testuser_m2.replace('displayName', 'to trigger replication M2-> M1') # Add a gecos UTF value on M1 testuser.replace('gecos', 'Hélène') # Check replication is still working testuser.replace('displayName', 'ascii value') repl.wait_for_replication(m1, m2) assert testuser_m2.exists() assert testuser_m2.get_attr_val_utf8('displayName') == 'ascii value' # Check that gecos is DirectoryString on M1 schema = SchemaLegacy(m1) attributetypes = schema.query_attributetype('gecos') assert attributetypes[0].syntax == "1.3.6.1.4.1.1466.115.121.1.15" # Check that gecos is DirectoryString on M2 schema = SchemaLegacy(m2) attributetypes = schema.query_attributetype('gecos') assert attributetypes[0].syntax == "1.3.6.1.4.1.1466.115.121.1.15" def fin(): m1.start() m2.start() testuser.delete() m1.schema.del_schema('attributetypes', ensure_bytes(new_at)) repl.wait_for_replication(m1, m2) # on M2 restore a default 99user.ldif m2.stop() os.remove(m2.schemadir + "/99user.ldif") schema_filename = (m2.schemadir + "/99user.ldif") try: with open(schema_filename, 'w') as schema_file: schema_file.write("dn: cn=schema\n") os.chmod(schema_filename, 0o777) except OSError as e: log.fatal("Failed to update schema file: " + "{} Error: {}".format(schema_filename, str(e))) m2.start() request.addfinalizer(fin) if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/schema/x_attribute_descr_oid_test.py000066400000000000000000000033711421664411400315720ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2022 William Brown # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import os import logging import pytest import ldap from lib389.topologies import topology_st as topology from lib389._constants import DEFAULT_SUFFIX from lib389.schema import Schema pytestmark = pytest.mark.tier1 def test_x_descr_oid(topology): """Test import of an attribute using descr-oid format that starts with an X-. This should "fail" with a descriptive error message. :id: 9308bdbd-363c-45a9-8223-9a6c925dba37 :setup: Standalone instance :steps: 1. Add invalid x-attribute 2. Add valid x-attribute 3. Add invalid x-object 4. Add valid x-object :expectedresults: 1. raises INVALID_SYNTAX 2. success 3. raises INVALID_SYNTAX 4. success """ inst = topology.standalone schema = Schema(inst) with pytest.raises(ldap.INVALID_SYNTAX): schema.add('attributeTypes', "( x-attribute-oid NAME 'x-attribute' DESC 'desc' EQUALITY caseIgnoreIA5Match SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 X-ORIGIN 'user defined' )") schema.add('attributeTypes', "( 1.2.3.4.5.6.7.8.9.10 NAME 'x-attribute' DESC 'desc' EQUALITY caseIgnoreIA5Match SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 X-ORIGIN 'user defined' )") with pytest.raises(ldap.INVALID_SYNTAX): schema.add('objectClasses', "( x-object-oid NAME 'x-object' DESC 'desc' SUP TOP AUXILIARY MAY ( x-attribute ) X-ORIGIN 'user defined' )") schema.add('objectClasses', "( 1.2.3.4.5.6.7.8.9.11 NAME 'x-object' DESC 'desc' SUP TOP AUXILIARY MAY ( x-attribute ) X-ORIGIN 'user defined' )") 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/setup_ds/000077500000000000000000000000001421664411400241765ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/setup_ds/__init__.py000066400000000000000000000004211421664411400263040ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2019 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- """ :Requirement: 389-ds-base: Basic Directory Server Operations """ 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/setup_ds/dscreate_test.py000066400000000000000000000075051421664411400274100ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2021 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- import sys import pytest from lib389 import DirSrv from lib389.cli_base import LogCapture from lib389.instance.setup import SetupDs from lib389.instance.remove import remove_ds_instance from lib389.instance.options import General2Base, Slapd2Base from lib389._constants import * from lib389.utils import ds_is_older pytestmark = [pytest.mark.tier0, pytest.mark.skipif(ds_is_older('1.4.1.2'), reason="Needs a compatible systemd unit, see PR#50213")] INSTANCE_PORT = 54321 INSTANCE_SECURE_PORT = 54322 INSTANCE_SERVERID = 'standalone' DEBUGGING = True MAJOR, MINOR, _, _, _ = sys.version_info class TopologyInstance(object): def __init__(self, standalone): # For these tests, we don't want to open the instance. # instance.open() self.standalone = standalone # Need a teardown to destroy the instance. @pytest.fixture def topology(request): instance = DirSrv(verbose=DEBUGGING) instance.log.debug("Instance allocated") args = {SER_PORT: INSTANCE_PORT, SER_SERVERID_PROP: INSTANCE_SERVERID} instance.allocate(args) if instance.exists(): instance.delete() def fin(): if instance.exists() and not DEBUGGING: instance.delete() request.addfinalizer(fin) return TopologyInstance(instance) def test_setup_ds_minimal_dry(topology): # Unset PYTHONPATH to avoid mixing old CLI tools and new lib389 tmp_env = os.environ if "PYTHONPATH" in tmp_env: del tmp_env["PYTHONPATH"] # Create the setupDs lc = LogCapture() # Give it the right types. sds = SetupDs(verbose=DEBUGGING, dryrun=True, log=lc.log) # Get the dicts from Type2Base, as though they were from _validate_ds_2_config # IE get the defaults back just from Slapd2Base.collect # Override instance name, root password, port and secure port. general_options = General2Base(lc.log) general_options.verify() general = general_options.collect() slapd_options = Slapd2Base(lc.log) slapd_options.set('instance_name', INSTANCE_SERVERID) slapd_options.set('port', INSTANCE_PORT) slapd_options.set('secure_port', INSTANCE_SECURE_PORT) slapd_options.set('root_password', PW_DM) slapd_options.verify() slapd = slapd_options.collect() sds.create_from_args(general, slapd, {}, None) insts = topology.standalone.list(serverid=INSTANCE_SERVERID) # Assert we did not change the system. assert(len(insts) == 0) def test_setup_ds_minimal(topology): # Create the setupDs lc = LogCapture() # Give it the right types. sds = SetupDs(verbose=DEBUGGING, dryrun=False, log=lc.log) # Get the dicts from Type2Base, as though they were from _validate_ds_2_config # IE get the defaults back just from Slapd2Base.collect # Override instance name, root password, port and secure port. general_options = General2Base(lc.log) general_options.verify() general = general_options.collect() slapd_options = Slapd2Base(lc.log) slapd_options.set('instance_name', INSTANCE_SERVERID) slapd_options.set('port', INSTANCE_PORT) slapd_options.set('secure_port', INSTANCE_SECURE_PORT) slapd_options.set('root_password', PW_DM) slapd_options.verify() slapd = slapd_options.collect() sds.create_from_args(general, slapd, {}, None) insts = topology.standalone.list(serverid=INSTANCE_SERVERID) # Assert we did change the system. assert(len(insts) == 1) # Make sure we can connect topology.standalone.open() # Make sure we can start stop. topology.standalone.stop() topology.standalone.start() # Okay, actually remove the instance remove_ds_instance(topology.standalone) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/setup_ds/remove_test.py000066400000000000000000000037301421664411400271070ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2018 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- import os import subprocess import pytest import logging from lib389 import DirSrv from lib389.instance.remove import remove_ds_instance from lib389._constants import ReplicaRole from lib389.topologies import create_topology from lib389.utils import ds_is_older pytestmark = pytest.mark.tier0 @pytest.fixture(scope="function") def topology_st(request): """Create DS standalone instance""" topology = create_topology({ReplicaRole.STANDALONE: 1}) def fin(): if topology.standalone.exists(): topology.standalone.delete() request.addfinalizer(fin) return topology @pytest.mark.skipif(ds_is_older('1.4.3'), reason="Backend split, lib389 supports only cn=bdb,cn=config...") @pytest.mark.parametrize("simple_allocate", (True, False)) def test_basic(topology_st, simple_allocate): """Check that all DS directories and systemd items were removed :id: 9e8bbcda-358d-4e9c-a38c-9b4c3b63308e :parametrized: yes """ inst = topology_st.standalone # FreeIPA uses local_simple_allocate for the removal process if simple_allocate: inst = DirSrv(verbose=inst.verbose) inst.local_simple_allocate(topology_st.standalone.serverid) remove_ds_instance(inst) paths = [inst.ds_paths.backup_dir, inst.ds_paths.cert_dir, inst.ds_paths.config_dir, inst.ds_paths.db_dir, inst.get_changelog_dir(), inst.ds_paths.ldif_dir, inst.ds_paths.lock_dir, inst.ds_paths.log_dir] for path in paths: assert not os.path.exists(path) try: subprocess.check_output(['systemctl', 'is-enabled', 'dirsrv@{}'.format(inst.serverid)], encoding='utf-8') except subprocess.CalledProcessError as ex: assert "disabled" in ex.output 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/snmp/000077500000000000000000000000001421664411400233255ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/snmp/__init__.py000066400000000000000000000000531421664411400254340ustar00rootroot00000000000000""" :Requirement: 389-ds-base: SNMP """ 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/state/000077500000000000000000000000001421664411400234705ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/state/__init__.py000066400000000000000000000001121421664411400255730ustar00rootroot00000000000000""" :Requirement: 389-ds-base: 389-ds-base: Operational Attributes """ 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/state/mmt_state_test.py000066400000000000000000000447141421664411400271100ustar00rootroot00000000000000import os import logging import ldap import pytest from lib389.idm.user import UserAccounts from lib389.topologies import topology_m2 as topo from lib389._constants import * pytestmark = pytest.mark.tier1 DEBUGGING = os.getenv("DEBUGGING", default=False) if DEBUGGING: logging.getLogger(__name__).setLevel(logging.DEBUG) else: logging.getLogger(__name__).setLevel(logging.INFO) log = logging.getLogger(__name__) BINVALUE1 = 'thedeadbeef1' BINVALUE2 = 'thedeadbeef2' BINVALUE3 = 'thedeadbeef3' USER_PROPERTIES = { 'uid': 'state1usr', 'cn': 'state1usr', 'sn': 'state1usr', 'uidNumber': '1001', 'gidNumber': '2001', 'userpassword': PASSWORD, 'homeDirectory': '/home/testuser' } def _check_user_oper_attrs(topo, tuser, attr_name, attr_value, oper_type, exp_values, oper_attr): """Check if list of operational attributes present for a given entry""" log.info('Checking if operational attrs vucsn, adcsn and vdcsn present for: {}'.format(tuser)) entry = topo.ms["supplier1"].search_s(tuser.dn, ldap.SCOPE_BASE, 'objectclass=*',['nscpentrywsi']) if oper_attr: for line in str(entry).split('\n'): if attr_name + ';' in line: if not 'DELETE' in oper_type: assert any(attr in line for attr in exp_values) and oper_attr in line else: assert 'deleted' in line and oper_attr in line and attr_value in line @pytest.mark.parametrize("attr_name, attr_value, oper_type, exp_values, oper_attr", [('description', 'Test1usr1', 'ldap.MOD_ADD', ['Test1usr1'], 'vucsn'), ('description', 'Test1usr2', 'ldap.MOD_ADD', ['Test1usr1', 'Test1usr2'], 'vucsn'), ('description', 'Test1usr3', 'ldap.MOD_ADD', ['Test1usr1', 'Test1usr2', 'Test1usr3'], 'vucsn'), ('description', 'Test1usr4', 'ldap.MOD_REPLACE', ['Test1usr4'], 'adcsn'), ('description', 'Test1usr4', 'ldap.MOD_DELETE', [], 'vdcsn')]) def test_check_desc_attr_state(topo, attr_name, attr_value, oper_type, exp_values, oper_attr): """Modify user's description attribute and check if description attribute is added/modified/deleted and operational attributes vucsn, adcsn and vdcsn are present. :id: f0830538-02cf-11e9-8be0-8c16451d917b :parametrized: yes :setup: Replication with two suppliers. :steps: 1. Add user to Supplier1 without description attribute. 2. Add description attribute to user. 3. Check if only one description attribute exist. 4. Check if operational attribute vucsn exist. 5. Add second description attribute to user. 6. Check if two description attributes exist. 7. Check if operational attribute vucsn exist. 8. Add third description attribute to user. 9. Check if three description attributes exist. 10. Check if operational attribute vucsn exist. 11. Replace description attribute for the user. 12. Check if only one description attribute exist. 13. Check if operational attribute adcsn exist. 14. Delete description attribute for the user. 15. Check if no description attribute exist. 16. Check if no operational attribute vdcsn exist. :expectedresults: 1. Add user to M1 should PASS. 2. Adding description attribute should PASS 3. Only one description attribute should be present. 4. Vucsn attribute should be present. 5. Adding a new description attribute should PASS 6. Two description attribute should be present. 7. Vucsn attribute should be present. 8. Adding a new description attribute should PASS 9. Three description attribute should be present. 10. Vucsn attribute should be present. 11. Replacing new description attribute should PASS 12. Only one description attribute should be present. 13. Adcsn attribute should be present. 14. Deleting description attribute should PASS 15. No description attribute should be present. 16. Vdcsn attribute should be present. """ test_entry = 'state1test' log.info('Add user: {}'.format(test_entry)) users = UserAccounts(topo.ms['supplier1'], DEFAULT_SUFFIX) try: tuser = users.get(test_entry) except ldap.NO_SUCH_OBJECT: USER_PROPERTIES.update(dict.fromkeys(['uid', 'cn'], test_entry)) tuser = users.create(properties=USER_PROPERTIES) tuser.set(attr_name, attr_value, eval(oper_type)) log.info('Check if list of description attrs present for: {}'.format(test_entry)) assert sorted([i.decode() for i in tuser.get_attr_vals(attr_name)]) == sorted(exp_values) log.info('Checking for operational attributes') _check_user_oper_attrs(topo, tuser, attr_name, attr_value, oper_type, exp_values, oper_attr) @pytest.mark.parametrize("attr_name, attr_value, oper_type, exp_values, oper_attr", [('cn', 'TestCN1', 'ldap.MOD_ADD', ['TestCN1', 'TestCNusr1'], 'vucsn'), ('cn', 'TestCN2', 'ldap.MOD_ADD', ['TestCN1', 'TestCN2', 'TestCNusr1'], 'vucsn'), ('cn', 'TestnewCN3', 'ldap.MOD_REPLACE', ['TestnewCN3'], 'adcsn'), ('cn', 'TestnewCN3', 'ldap.MOD_DELETE', None, None)]) def test_check_cn_attr_state(topo, attr_name, attr_value, oper_type, exp_values, oper_attr): """Modify user's cn attribute and check if cn attribute is added/modified/deleted and operational attributes vucsn, adcsn and vdcsn are present. :id: 19614bae-02d0-11e9-a295-8c16451d917b :parametrized: yes :setup: Replication with two suppliers. :steps: 1. Add user to Supplier1 with cn attribute. 2. Add a new cn attribute to user. 3. Check if two cn attributes exist. 4. Check if operational attribute vucsn exist for each cn attribute. 5. Add a new cn attribute to user. 6. Check if three cn attributes exist. 7. Check if operational attribute vucsn exist for each cn attribute. 8. Replace cn attribute for the user. 9. Check if only one cn attribute exist. 10. Check if operational attribute adcsn exist. 11. Delete cn attribute from user and check if it fails. :expectedresults: 1. Add user to M1 should PASS. 2. Adding a new cn attribute should PASS 3. Two cn attribute should be present. 4. Vucsn attribute should be present. 5. Adding a new cn attribute should PASS 6. Three cn attribute should be present. 7. Vucsn attribute should be present. 8. Replacing new cn attribute should PASS 9. Only one cn attribute should be present. 10. Operational attribute adcsn should be present. 11. Deleting cn attribute should fail with ObjectClass violation error. """ test_entry = 'TestCNusr1' log.info('Add user: {}'.format(test_entry)) users = UserAccounts(topo.ms['supplier1'], DEFAULT_SUFFIX) try: tuser = users.get(test_entry) except ldap.NO_SUCH_OBJECT: USER_PROPERTIES.update(dict.fromkeys(['uid', 'cn'], test_entry)) tuser = users.create(properties=USER_PROPERTIES) if 'MOD_DELETE' in oper_type: with pytest.raises(ldap.OBJECT_CLASS_VIOLATION): tuser.set(attr_name, attr_value, eval(oper_type)) else: tuser.set(attr_name, attr_value, eval(oper_type)) log.info('Check if list of cn attrs present for: {}'.format(test_entry)) assert sorted([i.decode() for i in tuser.get_attr_vals(attr_name)]) == sorted(exp_values) log.info('Checking for operational attributes') _check_user_oper_attrs(topo, tuser, attr_name, attr_value, oper_type, exp_values, oper_attr) @pytest.mark.parametrize("attr_name, attr_value, oper_type, exp_values, oper_attr", [('preferredlanguage', 'Chinese', 'ldap.MOD_REPLACE', ['Chinese'], 'vucsn'), ('preferredlanguage', 'French', 'ldap.MOD_ADD', None, None), ('preferredlanguage', 'German', 'ldap.MOD_REPLACE', ['German'], 'adcsn'), ('preferredlanguage', 'German', 'ldap.MOD_DELETE', [], 'vdcsn')]) def test_check_single_value_attr_state(topo, attr_name, attr_value, oper_type, exp_values, oper_attr): """Modify user's preferredlanguage attribute and check if preferredlanguage attribute is added/modified/deleted and operational attributes vucsn, adcsn and vdcsn are present. :id: 22fd645e-02d0-11e9-a9e4-8c16451d917b :parametrized: yes :setup: Replication with two suppliers. :steps: 1. Add user to Supplier1 without preferredlanguage attribute. 2. Add a new preferredlanguage attribute to user. 3. Check if one preferredlanguage attributes exist. 4. Check if operational attribute vucsn exist. 5. Add a new preferredlanguage attribute for the user and check if its rejected. 6. Replace preferredlanguage attribute for the user. 7. Check if only one preferredlanguage attribute exist. 8. Check if operational attribute adcsn exist with preferredlanguage. :expectedresults: 1. Add user to M1 should PASS. 2. Adding a new preferredlanguage attribute should PASS 3. Only one preferredlanguage attribute should be present. 4. Vucsn attribute should be present. 5. Adding a new preferredlanguage should fail with ObjectClass violation error. 6. Replace preferredlanguage should PASS. 7. Only one preferredlanguage attribute should be present. 8. Operational attribute adcsn should be present with preferredlanguage. """ test_entry = 'Langusr1' log.info('Add user: {}'.format(test_entry)) users = UserAccounts(topo.ms['supplier1'], DEFAULT_SUFFIX) try: tuser = users.get(test_entry) except ldap.NO_SUCH_OBJECT: USER_PROPERTIES.update(dict.fromkeys(['uid', 'cn'], test_entry)) tuser = users.create(properties=USER_PROPERTIES) if 'MOD_ADD' in oper_type: with pytest.raises(ldap.OBJECT_CLASS_VIOLATION): tuser.set(attr_name, attr_value, eval(oper_type)) else: tuser.set(attr_name, attr_value, eval(oper_type)) log.info('Check if list of cn attrs present for: {}'.format(test_entry)) assert sorted([i.decode() for i in tuser.get_attr_vals(attr_name)]) == sorted(exp_values) log.info('Checking for operational attributes') _check_user_oper_attrs(topo, tuser, attr_name, attr_value, oper_type, exp_values, oper_attr) @pytest.mark.parametrize("attr_name, attr_value, oper_type, exp_values, oper_attr", [('roomnumber;office', 'Tower1', 'ldap.MOD_ADD', ['Tower1'], 'vucsn'), ('roomnumber;office', 'Tower2', 'ldap.MOD_ADD', ['Tower1', 'Tower2'], 'vucsn'), ('roomnumber;office', 'Tower3', 'ldap.MOD_ADD', ['Tower1', 'Tower2', 'Tower3'], 'vucsn'), ('roomnumber;office', 'Tower4', 'ldap.MOD_REPLACE', ['Tower4'], 'adcsn'), ('roomnumber;office', 'Tower4', 'ldap.MOD_DELETE', [], 'vucsn')]) def test_check_subtype_attr_state(topo, attr_name, attr_value, oper_type, exp_values, oper_attr): """Modify user's roomnumber;office attribute subtype and check if roomnumber;office attribute is added/modified/deleted and operational attributes vucsn, adcsn and vdcsn are present. :id: 29ab87a4-02d0-11e9-b104-8c16451d917b :parametrized: yes :setup: Replication with two suppliers. :steps: 1. Add user to Supplier1 without roomnumber;office attribute. 2. Add roomnumber;office attribute to user. 3. Check if only one roomnumber;office attribute exist. 4. Check if operational attribute vucsn exist. 5. Add second roomnumber;office attribute to user. 6. Check if two roomnumber;office attributes exist. 7. Check if operational attribute vucsn exist. 8. Add third roomnumber;office attribute to user. 9. Check if three roomnumber;office attributes exist. 10. Check if operational attribute vucsn exist. 11. Replace roomnumber;office attribute for the user. 12. Check if only one roomnumber;office attribute exist. 13. Check if operational attribute adcsn exist. 14. Delete roomnumber;office attribute for the user. 15. Check if no roomnumber;office attribute exist. 16. Check if no operational attribute vdcsn exist. :expectedresults: 1. Add user to M1 should PASS. 2. Adding roomnumber;office attribute should PASS 3. Only one roomnumber;office attribute should be present. 4. Vucsn attribute should be present. 5. Adding a new roomnumber;office attribute should PASS 6. Two roomnumber;office attribute should be present. 7. Vucsn attribute should be present. 8. Adding a new roomnumber;office attribute should PASS 9. Three roomnumber;office attribute should be present. 10. Vucsn attribute should be present. 11. Replacing new roomnumber;office attribute should PASS 12. Only one roomnumber;office attribute should be present. 13. Adcsn attribute should be present. 14. Deleting roomnumber;office attribute should PASS 15. No roomnumber;office attribute should be present. 16. Vdcsn attribute should be present. """ test_entry = 'roomoffice1usr' log.info('Add user: {}'.format(test_entry)) users = UserAccounts(topo.ms['supplier1'], DEFAULT_SUFFIX) try: tuser = users.get(test_entry) except ldap.NO_SUCH_OBJECT: USER_PROPERTIES.update(dict.fromkeys(['uid', 'cn'], test_entry)) tuser = users.create(properties=USER_PROPERTIES) tuser.set(attr_name, attr_value, eval(oper_type)) log.info('Check if list of roomnumber;office attributes are present for a given entry') assert sorted([i.decode() for i in tuser.get_attr_vals(attr_name)]) == sorted(exp_values) log.info('Checking if operational attributes are present for cn') _check_user_oper_attrs(topo, tuser, attr_name, attr_value, oper_type, exp_values, oper_attr) @pytest.mark.parametrize("attr_name, attr_value, oper_type, exp_values, oper_attr", [('jpegphoto', BINVALUE1, 'ldap.MOD_ADD', [BINVALUE1], 'vucsn'), ('jpegphoto', BINVALUE2, 'ldap.MOD_ADD', [BINVALUE1, BINVALUE2], 'vucsn'), ('jpegphoto', BINVALUE3, 'ldap.MOD_ADD', [BINVALUE1, BINVALUE2, BINVALUE3], 'vucsn'), ('jpegphoto', BINVALUE2, 'ldap.MOD_REPLACE', [BINVALUE2], 'adcsn'), ('jpegphoto', BINVALUE2, 'ldap.MOD_DELETE', [], 'vdcsn')]) def test_check_jpeg_attr_state(topo, attr_name, attr_value, oper_type, exp_values, oper_attr): """Modify user's jpegphoto attribute and check if jpegphoto attribute is added/modified/deleted and operational attributes vucsn, adcsn and vdcsn are present. :id: 312ac0d0-02d0-11e9-9d34-8c16451d917b :parametrized: yes :setup: Replication with two suppliers. :steps: 1. Add user to Supplier1 without jpegphoto attribute. 2. Add jpegphoto attribute to user. 3. Check if only one jpegphoto attribute exist. 4. Check if operational attribute vucsn exist. 5. Add second jpegphoto attribute to user. 6. Check if two jpegphoto attributes exist. 7. Check if operational attribute vucsn exist. 8. Add third jpegphoto attribute to user. 9. Check if three jpegphoto attributes exist. 10. Check if operational attribute vucsn exist. 11. Replace jpegphoto attribute for the user. 12. Check if only one jpegphoto attribute exist. 13. Check if operational attribute adcsn exist. 14. Delete jpegphoto attribute for the user. 15. Check if no jpegphoto attribute exist. 16. Check if no operational attribute vdcsn exist. :expectedresults: 1. Add user to M1 should PASS. 2. Adding jpegphoto attribute should PASS 3. Only one jpegphoto attribute should be present. 4. Vucsn attribute should be present. 5. Adding a new jpegphoto attribute should PASS 6. Two jpegphoto attribute should be present. 7. Vucsn attribute should be present. 8. Adding a new jpegphoto attribute should PASS 9. Three jpegphoto attribute should be present. 10. Vucsn attribute should be present. 11. Replacing new jpegphoto attribute should PASS 12. Only one jpegphoto attribute should be present. 13. Adcsn attribute should be present. 14. Deleting jpegphoto attribute should PASS 15. No jpegphoto attribute should be present. 16. Vdcsn attribute should be present. """ test_entry = 'testJpeg1usr' log.info('Add user: {}'.format(test_entry)) users = UserAccounts(topo.ms['supplier1'], DEFAULT_SUFFIX) try: tuser = users.get(test_entry) except ldap.NO_SUCH_OBJECT: USER_PROPERTIES.update(dict.fromkeys(['uid', 'cn'], test_entry)) tuser = users.create(properties=USER_PROPERTIES) tuser.set(attr_name, attr_value, eval(oper_type)) log.info('Check if list of jpeg attributes are present for a given entry') assert sorted([i.decode() for i in tuser.get_attr_vals(attr_name)]) == sorted(exp_values) log.info('Checking if operational attributes are present for cn') _check_user_oper_attrs(topo, tuser, attr_name, attr_value, oper_type, exp_values, oper_attr) if __name__ == "__main__": # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s -v %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/syncrepl_plugin/000077500000000000000000000000001421664411400255655ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/syncrepl_plugin/__init__.py000066400000000000000000000224151421664411400277020ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2020 William Brown # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- """ :Requirement: 389-ds-base: Sync Replication Plugin """ import logging import ldap import time from ldap.syncrepl import SyncreplConsumer from lib389 import DirSrv from lib389.idm.user import nsUserAccounts from lib389.topologies import topology_st as topology from lib389._constants import DEFAULT_SUFFIX log = logging.getLogger(__name__) OU_PEOPLE = "ou=people,%s" % DEFAULT_SUFFIX class ISyncRepl(DirSrv, SyncreplConsumer): """ This implements a test harness for checking syncrepl, and allowing us to check various actions or behaviours. During a "run" it stores the results in it's instance, so that they can be inspected later to ensure that syncrepl worked as expected. """ def __init__(self, inst, openldap=False): ### 🚧 WARNING 🚧 # There are bugs with python ldap sync repl in ALL VERSIONS below 3.3.1. # These tests WILL FAIL unless you have version 3.3.1 or higher! assert ldap.__version__ >= '3.3.1' self.inst = inst self.msgid = None self.last_cookie = None self.next_cookie = None self.cookie = None self.openldap = openldap if self.openldap: # In openldap mode, our initial cookie needs to be a rid. self.cookie = "rid=123" self.delete = [] self.present = [] self.entries = {} super().__init__() def result4(self, *args, **kwargs): return self.inst.result4(*args, **kwargs, escapehatch='i am sure') def search_ext(self, *args, **kwargs): return self.inst.search_ext(*args, **kwargs, escapehatch='i am sure') def syncrepl_search(self, base=DEFAULT_SUFFIX, scope=ldap.SCOPE_SUBTREE, mode='refreshOnly', cookie=None, **search_args): # Wipe the last result set. self.delete = [] self.present = [] self.entries = {} self.refdel = False self.next_cookie = None # Start the sync # If cookie is none, will call "get_cookie" we have. self.msgid = super().syncrepl_search(base, scope, mode, cookie, **search_args) log.debug(f'syncrepl_search -> {self.msgid}') assert self.msgid is not None def syncrepl_complete(self): log.debug(f'syncrepl_complete -> {self.msgid}') assert self.msgid is not None # Loop until the operation is complete. time.sleep(1) while super().syncrepl_poll(msgid=self.msgid) is True: pass assert self.next_cookie is not None self.last_cookie = self.cookie self.cookie = self.next_cookie def check_cookie(self): assert self.last_cookie != self.cookie def syncrepl_set_cookie(self, cookie): log.debug(f'set_cookie -> {cookie}') if self.openldap: assert self.cookie.startswith("rid=123") self.next_cookie = cookie def syncrepl_get_cookie(self): log.debug('get_cookie -> %s' % self.cookie) if self.openldap: assert self.cookie.startswith("rid=123") return self.cookie def syncrepl_present(self, uuids, refreshDeletes=False): log.debug(f'=====> refdel -> {refreshDeletes} uuids -> {uuids}') if refreshDeletes: # Indicate we recieved a refdel in the process. self.refdel = True if uuids is not None: self.present = self.present + uuids def syncrepl_delete(self, uuids): log.debug(f'delete -> {uuids}') self.delete = uuids def syncrepl_entry(self, dn, attrs, uuid): log.debug(f'entry -> {dn}') self.entries[dn] = (uuid, attrs) def syncrepl_refreshdone(self): log.debug('refreshdone') def syncstate_assert(st, sync): # How many entries do we have? # We setup sync under ou=people so we can modrdn out of the scope. r = st.search_ext_s( base=OU_PEOPLE, scope=ldap.SCOPE_SUBTREE, filterstr='(objectClass=*)', attrsonly=1, escapehatch='i am sure' ) # Initial sync log.debug("*test* initial") sync.syncrepl_search(base=OU_PEOPLE) sync.syncrepl_complete() # check we caught them all assert len(r) == len(sync.entries.keys()) assert len(r) == len(sync.present) assert 0 == len(sync.delete) if sync.openldap: assert True == sync.refdel else: assert False == sync.refdel # Add a new entry account = nsUserAccounts(st, DEFAULT_SUFFIX).create_test_user() # Find the primary uuid we expect to see in syncrepl. # This will be None if not present. acc_uuid = account.get_attr_val_utf8('entryuuid') if not sync.openldap: nsid = account.get_attr_val_utf8('nsuniqueid') # nsunique has a diff format, so we change it up. # 431cf081-b44311ea-83fdb082-f24d490e # Add a hyphen V # 431cf081-b443-11ea-83fdb082-f24d490e nsid_a = nsid[:13] + '-' + nsid[13:] # Add a hyphen V # 431cf081-b443-11ea-83fd-b082-f24d490e nsid_b = nsid_a[:23] + '-' + nsid_a[23:] # Remove a hyphen V # 431cf081-b443-11ea-83fd-b082-f24d490e acc_uuid = nsid_b[:28] + nsid_b[29:] # Tada! # 431cf081-b443-11ea-83fd-b082f24d490e log.debug(f"--> expected sync uuid (from nsuniqueid): {acc_uuid}") else: log.debug(f"--> expected sync uuid (from entryuuid): {acc_uuid}") # Check log.debug("*test* add") sync.syncrepl_search(base=OU_PEOPLE) sync.syncrepl_complete() sync.check_cookie() log.debug(f"sd: {sync.delete}, sp: {sync.present} sek: {sync.entries.keys()}") assert 1 == len(sync.entries.keys()) assert 1 == len(sync.present) #################################### assert sync.present == [acc_uuid] assert 0 == len(sync.delete) if sync.openldap: assert True == sync.refdel else: assert False == sync.refdel # Mod account.replace('description', 'change') # Check log.debug("*test* mod") sync.syncrepl_search(base=OU_PEOPLE) sync.syncrepl_complete() sync.check_cookie() log.debug(f"sd: {sync.delete}, sp: {sync.present} sek: {sync.entries.keys()}") assert 1 == len(sync.entries.keys()) assert 1 == len(sync.present) #################################### assert sync.present == [acc_uuid] assert 0 == len(sync.delete) if sync.openldap: assert True == sync.refdel else: assert False == sync.refdel ## ModRdn (remain in scope) account.rename('uid=test1_modrdn') # newsuperior=None # Check log.debug("*test* modrdn (in scope)") sync.syncrepl_search(base=OU_PEOPLE) sync.syncrepl_complete() sync.check_cookie() log.debug(f"sd: {sync.delete}, sp: {sync.present} sek: {sync.entries.keys()}") assert 1 == len(sync.entries.keys()) assert 1 == len(sync.present) #################################### assert sync.present == [acc_uuid] assert 0 == len(sync.delete) if sync.openldap: assert True == sync.refdel else: assert False == sync.refdel # import time # print("attach now ....") # time.sleep(45) ## Modrdn (out of scope, then back into scope) account.rename('uid=test1_modrdn', newsuperior=DEFAULT_SUFFIX) # Check it's gone. log.debug("*test* modrdn (move out of scope)") sync.syncrepl_search(base=OU_PEOPLE) sync.syncrepl_complete() sync.check_cookie() log.debug(f"sd: {sync.delete}, sp: {sync.present} sek: {sync.entries.keys()}") assert 0 == len(sync.entries.keys()) assert 0 == len(sync.present) ## WARNING: This test MAY FAIL here if you do not have a new enough python-ldap # due to an ASN.1 parsing bug. You require at least python-ldap 3.3.1 assert 1 == len(sync.delete) assert sync.delete == [acc_uuid] if sync.openldap: assert True == sync.refdel else: assert False == sync.refdel # Put it back account.rename('uid=test1_modrdn', newsuperior=OU_PEOPLE) log.debug("*test* modrdn (move in to scope)") sync.syncrepl_search(base=OU_PEOPLE) sync.syncrepl_complete() sync.check_cookie() log.debug(f"sd: {sync.delete}, sp: {sync.present} sek: {sync.entries.keys()}") assert 1 == len(sync.entries.keys()) assert 1 == len(sync.present) #################################### assert sync.present == [acc_uuid] assert 0 == len(sync.delete) if sync.openldap: assert True == sync.refdel else: assert False == sync.refdel ## Delete account.delete() # Check log.debug("*test* del") sync.syncrepl_search(base=OU_PEOPLE) sync.syncrepl_complete() # In a delete, the cookie isn't updated (?) sync.check_cookie() log.debug(f'{sync.entries.keys()}') log.debug(f'{sync.present}') log.debug(f'{sync.delete}') log.debug(f"sd: {sync.delete}, sp: {sync.present} sek: {sync.entries.keys()}") assert 0 == len(sync.entries.keys()) assert 0 == len(sync.present) assert 1 == len(sync.delete) assert sync.delete == [acc_uuid] #################################### if sync.openldap: assert True == sync.refdel else: assert False == sync.refdel 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/syncrepl_plugin/basic_test.py000066400000000000000000000467201421664411400302700ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2020 William Brown # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- import logging import ldap import time import threading from ldap.syncrepl import SyncreplConsumer from ldap.ldapobject import ReconnectLDAPObject import pytest from lib389 import DirSrv from lib389.idm.organizationalunit import OrganizationalUnits, OrganizationalUnit from lib389.idm.user import nsUserAccounts, UserAccounts from lib389.idm.group import Groups from lib389.topologies import topology_st as topology from lib389.topologies import topology_m2 as topo_m2 from lib389.paths import Paths from lib389.utils import ds_is_older from lib389.plugins import RetroChangelogPlugin, ContentSyncPlugin, AutoMembershipPlugin, MemberOfPlugin, MemberOfSharedConfig, AutoMembershipDefinitions, MEPTemplates, MEPConfigs, ManagedEntriesPlugin, MEPTemplate from lib389._constants import * from . import ISyncRepl, syncstate_assert default_paths = Paths() pytestmark = pytest.mark.tier1 log = logging.getLogger(__name__) @pytest.fixture(scope="function") def init_sync_repl_plugins(topology, request): """Prepare test environment (retroCL/sync_repl/ automember/memberof) and cleanup at the end of the test 1.: enable retroCL 2.: configure retroCL to log nsuniqueid as targetUniqueId 3.: enable content_sync plugin 4.: enable automember 5.: create (2) groups. Few groups can help to reproduce the concurrent updates problem. 6.: configure automember to provision those groups with 'member' 7.: enable and configure memberof plugin 8.: enable plugin log level 9.: restart the server """ inst = topology[0] inst.restart() # Enable/configure retroCL plugin = RetroChangelogPlugin(inst) plugin.disable() plugin.enable() plugin.set('nsslapd-attribute', 'nsuniqueid:targetuniqueid') # Enable sync plugin plugin = ContentSyncPlugin(inst) plugin.enable() # Enable automember plugin = AutoMembershipPlugin(inst) plugin.disable() plugin.enable() # Add the automember group groups = Groups(inst, DEFAULT_SUFFIX) group = [] for i in range(1,5): group.append(groups.create(properties={'cn': 'group%d' % i})) # Add the automember config entry am_configs = AutoMembershipDefinitions(inst) am_configs_cleanup = [] for g in group: am_config = am_configs.create(properties={'cn': 'config %s' % g.get_attr_val_utf8('cn'), 'autoMemberScope': DEFAULT_SUFFIX, 'autoMemberFilter': 'uid=*', 'autoMemberDefaultGroup': g.dn, 'autoMemberGroupingAttr': 'member:dn'}) am_configs_cleanup.append(am_config) # Enable and configure memberof plugin plugin = MemberOfPlugin(inst) plugin.disable() plugin.enable() plugin.replace_groupattr('member') memberof_config = MemberOfSharedConfig(inst, 'cn=memberOf config,{}'.format(DEFAULT_SUFFIX)) try: memberof_config.create(properties={'cn': 'memberOf config', 'memberOfGroupAttr': 'member', 'memberOfAttr': 'memberof'}) except ldap.ALREADY_EXISTS: pass # Enable plugin log level (usefull for debug) inst.setLogLevel(65536) inst.restart() def fin(): inst.restart() for am_config in am_configs_cleanup: am_config.delete() for g in group: try: g.delete() except: pass request.addfinalizer(fin) #unstable or unstatus tests, skipped for now #it fails, let's say 1 time out of 10, while decoding asn1 response @pytest.mark.flaky(max_runs=2, min_passes=1) @pytest.mark.skipif(ldap.__version__ < '3.3.1', reason="python ldap versions less that 3.3.1 have bugs in sync repl that will cause this to fail!") def test_syncrepl_basic(topology): """ Test basic functionality of the SyncRepl interface :id: f9fea826-8ae2-412a-8e88-b8e0ba939b06 :setup: Standalone instance :steps: 1. Enable Retro Changelog 2. Enable Syncrepl 3. Run the syncstate test to check refresh, add, delete, mod. :expectedresults: 1. Success 1. Success 1. Success """ st = topology.standalone # Enable RetroChangelog. rcl = RetroChangelogPlugin(st) rcl.enable() # Set the default targetid rcl.replace('nsslapd-attribute', 'nsuniqueid:targetUniqueId') # Enable sync repl csp = ContentSyncPlugin(st) csp.enable() # Restart DS st.restart() # Setup the syncer sync = ISyncRepl(st) # Run the checks syncstate_assert(st, sync) class TestSyncer(ReconnectLDAPObject, SyncreplConsumer): def __init__(self, *args, **kwargs): self.cookie = None self.cookies = [] ldap.ldapobject.ReconnectLDAPObject.__init__(self, *args, **kwargs) def syncrepl_set_cookie(self, cookie): # extract the changenumber from the cookie self.cookie = cookie self.cookies.append(cookie.split('#')[2]) log.info("XXXX Set cookie: %s" % cookie) def syncrepl_get_cookie(self): log.info("XXXX Get cookie: %s" % self.cookie) return self.cookie def syncrepl_present(self, uuids, refreshDeletes=False): log.info("XXXX syncrepl_present uuids %s %s" % ( uuids, refreshDeletes)) def syncrepl_delete(self, uuids): log.info("XXXX syncrepl_delete uuids %s" % uuids) def syncrepl_entry(self, dn, attrs, uuid): log.info("XXXX syncrepl_entry dn %s" % dn) def syncrepl_refreshdone(self): log.info("XXXX syncrepl_refreshdone") def get_cookies(self): return self.cookies class Sync_persist(threading.Thread, ReconnectLDAPObject, SyncreplConsumer): # This runs a sync_repl client in background # it registers a result that contain a list of the change numbers (from the cookie) # that are list as they are received def __init__(self, inst): threading.Thread.__init__(self) self.daemon = True self.inst = inst self.cookie = None self.conn = inst.clone({SER_ROOT_DN: 'cn=directory manager', SER_ROOT_PW: 'password'}) self.filterstr = '(|(objectClass=groupofnames)(objectClass=person))' self.attrs = [ 'objectclass', 'cn', 'displayname', 'gidnumber', 'givenname', 'homedirectory', 'mail', 'member', 'memberof', 'sn', 'uid', 'uidnumber', ] self.conn.open() self.result = [] def get_result(self): # used to return the cookies list to the requestor return self.result def run(self): """Start a sync repl client""" ldap_connection = TestSyncer(self.inst.toLDAPURL()) ldap_connection.simple_bind_s('cn=directory manager', 'password') ldap_search = ldap_connection.syncrepl_search( "dc=example,dc=com", ldap.SCOPE_SUBTREE, mode='refreshAndPersist', attrlist=self.attrs, filterstr=self.filterstr, cookie=None ) try: while ldap_connection.syncrepl_poll(all=1, msgid=ldap_search): pass except (ldap.SERVER_DOWN, ldap.CONNECT_ERROR) as e: print('syncrepl_poll: LDAP error (%s)', e) self.result = ldap_connection.get_cookies() log.info("ZZZ result = %s" % self.result) self.conn.unbind() def test_sync_repl_mep(topology, request): """Test sync repl with MEP plugin that triggers several updates on the same entry :id: d9515930-293e-42da-9835-9f255fa6111b :setup: Standalone Instance :steps: 1. enable retro/sync_repl/mep 2. Add mep Template and definition entry 3. start sync_repl client 4. Add users with PosixAccount ObjectClass (mep will update it several times) 5. Check that the received cookie are progressing :expected results: 1. Success 2. Success 3. Success 4. Success 5. Success """ inst = topology[0] # Enable/configure retroCL plugin = RetroChangelogPlugin(inst) plugin.disable() plugin.enable() plugin.set('nsslapd-attribute', 'nsuniqueid:targetuniqueid') # Enable sync plugin plugin = ContentSyncPlugin(inst) plugin.enable() # Check the plug-in status mana = ManagedEntriesPlugin(inst) plugin.enable() # Add Template and definition entry org1 = OrganizationalUnits(inst, DEFAULT_SUFFIX).create(properties={'ou': 'Users'}) org2 = OrganizationalUnit(inst, f'ou=Groups,{DEFAULT_SUFFIX}') meps = MEPTemplates(inst, DEFAULT_SUFFIX) mep_template1 = meps.create(properties={ 'cn': 'UPG Template1', 'mepRDNAttr': 'cn', 'mepStaticAttr': 'objectclass: posixGroup', 'mepMappedAttr': 'cn: $uid|gidNumber: $gidNumber|description: User private group for $uid'.split('|')}) conf_mep = MEPConfigs(inst) mep_config = conf_mep.create(properties={ 'cn': 'UPG Definition2', 'originScope': org1.dn, 'originFilter': 'objectclass=posixaccount', 'managedBase': org2.dn, 'managedTemplate': mep_template1.dn}) # Enable plugin log level (usefull for debug) inst.setLogLevel(65536) inst.restart() # create a sync repl client and wait 5 seconds to be sure it is running sync_repl = Sync_persist(inst) sync_repl.start() time.sleep(5) # Add users with PosixAccount ObjectClass and verify creation of User Private Group user = UserAccounts(inst, f'ou=Users,{DEFAULT_SUFFIX}', rdn=None).create_test_user() assert user.get_attr_val_utf8('mepManagedEntry') == f'cn=test_user_1000,ou=Groups,{DEFAULT_SUFFIX}' # stop the server to get the sync_repl result set (exit from while loop). # Only way I found to acheive that. # and wait a bit to let sync_repl thread time to set its result before fetching it. inst.stop() time.sleep(10) cookies = sync_repl.get_result() # checking that the cookie are in increasing and in an acceptable range (0..1000) assert len(cookies) > 0 prev = -1 for cookie in cookies: log.info('Check cookie %s' % cookie) assert int(cookie) >= 0 assert int(cookie) < 1000 assert int(cookie) > prev prev = int(cookie) sync_repl.join() log.info('test_sync_repl_map: PASS\n') def test_sync_repl_cookie(topology, init_sync_repl_plugins, request): """Test sync_repl cookie are progressing is an increasing order when there are nested updates :id: d7fbde25-5702-46ac-b38e-169d7a68e97c :setup: Standalone Instance :steps: 1.: initialization/cleanup done by init_sync_repl_plugins fixture 2.: create a thread dedicated to run a sync repl client 3.: Create (9) users that will generate nested updates (automember/memberof) 4.: stop sync repl client and collect the list of cookie.change_no 5.: check that cookies.change_no are in increasing order :expectedresults: 1.: succeeds 2.: succeeds 3.: succeeds 4.: succeeds 5.: succeeds """ inst = topology[0] # create a sync repl client and wait 5 seconds to be sure it is running sync_repl = Sync_persist(inst) sync_repl.start() time.sleep(5) # create users, that automember/memberof will generate nested updates users = UserAccounts(inst, DEFAULT_SUFFIX) users_set = [] for i in range(10001, 10010): users_set.append(users.create_test_user(uid=i)) # stop the server to get the sync_repl result set (exit from while loop). # Only way I found to acheive that. # and wait a bit to let sync_repl thread time to set its result before fetching it. inst.stop() time.sleep(10) cookies = sync_repl.get_result() # checking that the cookie are in increasing and in an acceptable range (0..1000) assert len(cookies) > 0 prev = -1 for cookie in cookies: log.info('Check cookie %s' % cookie) assert int(cookie) >= 0 assert int(cookie) < 1000 assert int(cookie) > prev prev = int(cookie) sync_repl.join() log.info('test_sync_repl_cookie: PASS\n') def fin(): inst.restart() for user in users_set: try: user.delete() except: pass request.addfinalizer(fin) return def test_sync_repl_cookie_add_del(topology, init_sync_repl_plugins, request): """Test sync_repl cookie are progressing is an increasing order when there add and del :id: 83e11038-6ed0-4a5b-ac77-e44887ab11e3 :setup: Standalone Instance :steps: 1.: initialization/cleanup done by init_sync_repl_plugins fixture 2.: create a thread dedicated to run a sync repl client 3.: Create (3) users that will generate nested updates (automember/memberof) 4.: Delete (3) users 5.: stop sync repl client and collect the list of cookie.change_no 6.: check that cookies.change_no are in increasing order :expectedresults: 1.: succeeds 2.: succeeds 3.: succeeds 4.: succeeds 5.: succeeds 6.: succeeds """ inst = topology[0] # create a sync repl client and wait 5 seconds to be sure it is running sync_repl = Sync_persist(inst) sync_repl.start() time.sleep(5) # create users, that automember/memberof will generate nested updates users = UserAccounts(inst, DEFAULT_SUFFIX) users_set = [] for i in range(10001, 10004): users_set.append(users.create_test_user(uid=i)) time.sleep(10) # delete users, that automember/memberof will generate nested updates for user in users_set: user.delete() # stop the server to get the sync_repl result set (exit from while loop). # Only way I found to acheive that. # and wait a bit to let sync_repl thread time to set its result before fetching it. inst.stop() cookies = sync_repl.get_result() # checking that the cookie are in increasing and in an acceptable range (0..1000) assert len(cookies) > 0 prev = -1 for cookie in cookies: log.info('Check cookie %s' % cookie) assert int(cookie) >= 0 assert int(cookie) < 1000 assert int(cookie) > prev prev = int(cookie) sync_repl.join() log.info('test_sync_repl_cookie_add_del: PASS\n') def fin(): pass request.addfinalizer(fin) return def test_sync_repl_cookie_with_failure(topology, init_sync_repl_plugins, request): """Test sync_repl cookie are progressing is the right order when there is a failure in nested updates :id: e0103448-170e-4080-8f22-c34606447ce2 :setup: Standalone Instance :steps: 1.: initialization/cleanup done by init_sync_repl_plugins fixture 2.: update group2 so that it will not accept 'member' attribute (set by memberof) 3.: create a thread dedicated to run a sync repl client 4.: Create a group that will be the only update received by sync repl client 5.: Create (9) users that will generate nested updates (automember/memberof). creation will fail because 'member' attribute is not allowed in group2 6.: stop sync repl client and collect the list of cookie.change_no 7.: check that the list of cookie.change_no contains only the group 'step 11' :expectedresults: 1.: succeeds 2.: succeeds 3.: succeeds 4.: succeeds 5.: Fails (expected) 6.: succeeds 7.: succeeds """ inst = topology[0] # Set group2 as a groupOfUniqueNames so that automember will fail to update that group # This will trigger a failure in internal MOD and a failure to add member group2 = Groups(inst, DEFAULT_SUFFIX).get('group2') group2.replace('objectclass', 'groupOfUniqueNames') # create a sync repl client and wait 5 seconds to be sure it is running sync_repl = Sync_persist(inst) sync_repl.start() time.sleep(5) # Add a test group just to check that sync_repl receives that SyncControlInfo cookie groups = Groups(inst, DEFAULT_SUFFIX) testgroup = groups.create(properties={'cn': 'group%d' % 10}) # create users, that automember/memberof will generate nested updates users = UserAccounts(inst, DEFAULT_SUFFIX) users_set = [] for i in range(1000,1010): try: users_set.append(users.create_test_user(uid=i)) # Automember should fail to add uid=1000 in group2 assert(False) except ldap.UNWILLING_TO_PERFORM: pass # stop the server to get the sync_repl result set (exit from while loop). # Only way I found to acheive that. # and wait a bit to let sync_repl thread time to set its result before fetching it. inst.stop() time.sleep(10) cookies = sync_repl.get_result() # checking that the cookie list contains only two entries # the one from the SyncInfo/RefreshDelete that indicates the end of the refresh # the the one from SyncStateControl related to the only updated entry (group10) assert len(cookies) == 2 prev = -1 for cookie in cookies: log.info('Check cookie %s' % cookie) assert int(cookie) >= 0 assert int(cookie) < 1000 assert int(cookie) > prev prev = int(cookie) sync_repl.join() log.info('test_sync_repl_cookie_with_failure: PASS\n') def fin(): inst.restart() for user in users_set: try: user.delete() except: pass testgroup.delete() request.addfinalizer(fin) def test_sync_repl_cenotaph(topo_m2, request): """Test the creation of a cenotaph while a sync repl client is running :id: 8ca1724a-cf42-4880-bf0f-be451f9bd3b4 :setup: MMR with 2 suppliers :steps: 1. Enable retroCL/content_sync 2. Run a sync repl client 3. create users 4. do a MODRDN of a user entry => creation of cenotaph 5. stop sync repl client :expectedresults: 1. Should succeeds 2. Should succeeds 3. Should succeeds 4. Should succeeds 5. Should succeeds """ m1 = topo_m2.ms["supplier1"] # Enable/configure retroCL plugin = RetroChangelogPlugin(m1) plugin.disable() plugin.enable() plugin.set('nsslapd-attribute', 'nsuniqueid:targetuniqueid') # Enable sync plugin plugin = ContentSyncPlugin(m1) plugin.enable() # Restart DS m1.restart() # create a sync repl client and wait 5 seconds to be sure it is running sync_repl = Sync_persist(m1) sync_repl.start() time.sleep(5) # create users users = UserAccounts(m1, DEFAULT_SUFFIX) users_set = [] for i in range(10001, 10003): users_set.append(users.create_test_user(uid=i)) # rename the entry that would trigger the creation of a cenotaph users_set[0].rename("uid=foo") # stop the server to get the sync_repl result set (exit from while loop). # Only way I found to acheive that. # and wait a bit to let sync_repl thread time to set its result before fetching it. m1.stop() time.sleep(2) def fin(): m1.restart() for user in users_set: try: user.delete() except: pass request.addfinalizer(fin) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/syncrepl_plugin/openldap_test.py000066400000000000000000000040451421664411400310030ustar00rootroot00000000000000# Copyright (C) 2020 William Brown # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- import logging import ldap import time import pytest from lib389.topologies import topology_st as topology from lib389.paths import Paths from lib389.utils import ds_is_older from lib389.plugins import RetroChangelogPlugin, ContentSyncPlugin from lib389._constants import ErrorLog, DEFAULT_SUFFIX from lib389.plugins import EntryUUIDPlugin from . import ISyncRepl, syncstate_assert default_paths = Paths() pytestmark = pytest.mark.tier1 log = logging.getLogger(__name__) @pytest.mark.skipif(ldap.__version__ < '3.3.1' or not default_paths.rust_enabled or ds_is_older('1.4.4.0'), reason="Sync repl does not support openldap compat in older versions, and without entryuuid") def test_syncrepl_openldap(topology): """ Test basic functionality of the openldap syncrepl compatability handler. :id: 03039178-2cc6-40bd-b32c-7d6de108828b :setup: Standalone instance :steps: 1. Enable Retro Changelog 2. Enable Syncrepl 3. Run the syncstate test to check refresh, add, delete, mod. :expectedresults: 1. Success 1. Success 1. Success """ st = topology.standalone # Ensure entryuuid is setup plug = EntryUUIDPlugin(st) task = plug.fixup(DEFAULT_SUFFIX) task.wait() st.config.loglevel(vals=(ErrorLog.DEFAULT,ErrorLog.PLUGIN)) assert(task.is_complete() and task.get_exit_code() == 0) # Enable RetroChangelog. rcl = RetroChangelogPlugin(st) rcl.enable() # Set the default targetid rcl.add('nsslapd-attribute', 'nsuniqueid:targetUniqueId') rcl.add('nsslapd-attribute', 'entryuuid:targetEntryUUID') # Enable sync repl csp = ContentSyncPlugin(st) csp.add('syncrepl-allow-openldap', 'on') csp.enable() # Restart DS st.restart() # Setup the syncer sync = ISyncRepl(st, openldap=True) # Run the checks syncstate_assert(st, sync) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/syntax/000077500000000000000000000000001421664411400236765ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/syntax/__init__.py000066400000000000000000000000551421664411400260070ustar00rootroot00000000000000""" :Requirement: 389-ds-base: Syntax """ 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/syntax/acceptance_test.py000066400000000000000000000155001421664411400273760ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2020 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- import ldap import pytest import os from lib389.schema import Schema from lib389.config import Config from lib389.idm.user import UserAccounts from lib389.idm.group import Group, Groups from lib389._constants import DEFAULT_SUFFIX from lib389.topologies import log, topology_st as topo pytestmark = pytest.mark.tier0 log = log.getChild(__name__) @pytest.fixture(scope="function") def validate_syntax_off(topo, request): config = Config(topo.standalone) config.replace("nsslapd-syntaxcheck", "off") def fin(): config.replace("nsslapd-syntaxcheck", "on") request.addfinalizer(fin) def test_valid(topo, validate_syntax_off): """Test syntax-validate task with valid entries :id: ec402a5b-bfb1-494d-b751-71b0d31a4d83 :setup: Standalone instance :steps: 1. Set nsslapd-syntaxcheck to off 2. Clean error log 3. Run syntax validate task 4. Assert that there are no errors in the error log 5. Set nsslapd-syntaxcheck to on :expectedresults: 1. It should succeed 2. It should succeed 3. It should succeed 4. It should succeed 5. It should succeed """ inst = topo.standalone log.info('Clean the error log') inst.deleteErrorLogs() schema = Schema(inst) log.info('Attempting to add task entry...') validate_task = schema.validate_syntax(DEFAULT_SUFFIX) validate_task.wait() exitcode = validate_task.get_exit_code() assert exitcode == 0 error_lines = inst.ds_error_log.match('.*Found 0 invalid entries.*') assert (len(error_lines) == 1) log.info('Found 0 invalid entries - Success') def test_invalid_uidnumber(topo, validate_syntax_off): """Test syntax-validate task with invalid uidNumber attribute value :id: 30fdcae6-ffa6-4ec4-8da9-6fb138fc1828 :setup: Standalone instance :steps: 1. Set nsslapd-syntaxcheck to off 2. Clean error log 3. Add a user with uidNumber attribute set to an invalid value (string) 4. Run syntax validate task 5. Assert that there is corresponding error in the error log 6. Set nsslapd-syntaxcheck to on :expectedresults: 1. It should succeed 2. It should succeed 3. It should succeed 4. It should succeed 5. It should succeed 6. It should succeed """ inst = topo.standalone log.info('Clean the error log') inst.deleteErrorLogs() users = UserAccounts(inst, DEFAULT_SUFFIX) users.create_test_user(uid="invalid_value") schema = Schema(inst) log.info('Attempting to add task entry...') validate_task = schema.validate_syntax(DEFAULT_SUFFIX) validate_task.wait() exitcode = validate_task.get_exit_code() assert exitcode == 0 error_lines = inst.ds_error_log.match('.*uidNumber: value #0 invalid per syntax.*') assert (len(error_lines) == 1) log.info('Found an invalid entry with wrong uidNumber - Success') def test_invalid_dn_syntax_crash(topo): """Add an entry with an escaped space, restart the server, and try to delete it. In this case the DN is not correctly parsed and causes cache revert to to dereference a NULL pointer. So the delete can fail as long as the server does not crash. :id: 62d87272-dfb8-4627-9ca1-dbe33082caf8 :setup: Standalone Instance :steps: 1. Add entry with leading escaped space in the RDN 2. Restart the server so the entry is rebuilt from the database 3. Delete the entry 4. The server should still be running :expectedresults: 1. Success 2. Success 3. Success 4. Success """ # Create group groups = Groups(topo.standalone, DEFAULT_SUFFIX) group = groups.create(properties={'cn': ' test'}) # Restart the server topo.standalone.restart() # Delete group try: group.delete() except ldap.NO_SUCH_OBJECT: # This is okay in this case as we are only concerned about a crash pass # Make sure server is still running groups.list() @pytest.mark.parametrize("props, rawdn", [ ({'cn': ' leadingSpace'}, "cn=\\20leadingSpace,ou=Groups,dc=example,dc=com"), ({'cn': 'trailingSpace '}, "cn=trailingSpace\\20,ou=Groups,dc=example,dc=com")]) def test_dn_syntax_spaces_delete(topo, props, rawdn): """Test that an entry with a space as the first character in the DN can be deleted without error. We also want to make sure the indexes are properly updated by repeatedly adding and deleting the entry, and that the entry cache is properly maintained. :id: b993f37c-c2b0-4312-992c-a9048ff98965 :customerscenario: True :parametrized: yes :setup: Standalone Instance :steps: 1. Create a group with a DN that has a space as the first/last character. 2. Delete group 3. Add group 4. Modify group 5. Restart server and modify entry 6. Delete group 7. Add group back 8. Delete group using specific DN :expectedresults: 1. Success 2. Success 3. Success 4. Success 5. Success 6. Success 7. Success 8. Success """ # Create group groups = Groups(topo.standalone, DEFAULT_SUFFIX) group = groups.create(properties=props.copy()) # Delete group (verifies DN/RDN parsing works and cache is correct) group.delete() # Add group again (verifies entryrdn index was properly updated) groups = Groups(topo.standalone, DEFAULT_SUFFIX) group = groups.create(properties=props.copy()) # Modify the group (verifies dn/rdn parsing is correct) group.replace('description', 'escaped space group') # Restart the server. This will pull the entry from the database and # convert it into a cache entry, which is different than how a client # first adds an entry and is put into the cache before being written to # disk. topo.standalone.restart() # Make sure we can modify the entry (verifies cache entry was created # correctly) group.replace('description', 'escaped space group after restart') # Make sure it can still be deleted (verifies cache again). group.delete() # Add it back so we can delete it using a specific DN (sanity test to verify # another DN/RDN parsing variation). groups = Groups(topo.standalone, DEFAULT_SUFFIX) group = groups.create(properties=props.copy()) group = Group(topo.standalone, dn=rawdn) group.delete() if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/syntax/mr_test.py000066400000000000000000000037651421664411400257400ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2020 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import logging import pytest import os import ldap from lib389.dbgen import dbgen_users from lib389._constants import * from lib389.topologies import topology_st as topo from lib389._controls import SSSRequestControl pytestmark = pytest.mark.tier1 DEBUGGING = os.getenv("DEBUGGING", default=False) if DEBUGGING: logging.getLogger(__name__).setLevel(logging.DEBUG) else: logging.getLogger(__name__).setLevel(logging.INFO) log = logging.getLogger(__name__) def test_sss_mr(topo): """Test matching rule/server side sort does not crash DS :id: 48c73d76-1694-420f-ab55-187135f2d260 :setup: Standalone Instance :steps: 1. Add sample entries to the database 2. Perform search using server side control (uid:2.5.13.3) :expectedresults: 1. Success 2. Success """ log.info("Creating LDIF...") ldif_dir = topo.standalone.get_ldif_dir() ldif_file = os.path.join(ldif_dir, 'mr-crash.ldif') dbgen_users(topo.standalone, 5, ldif_file, DEFAULT_SUFFIX) log.info("Importing LDIF...") topo.standalone.stop() assert topo.standalone.ldif2db(DEFAULT_BENAME, None, None, None, ldif_file) topo.standalone.start() log.info('Search using server side sorting using undefined mr in the attr...') sort_ctrl = SSSRequestControl(True, ['uid:2.5.13.3']) controls = [sort_ctrl] msg_id = topo.standalone.search_ext(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, "objectclass=*", serverctrls=controls) try: rtype, rdata, rmsgid, response_ctrl = topo.standalone.result3(msg_id) except ldap.OPERATIONS_ERROR: pass log.info("Test PASSED") if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main(["-s", CURRENT_FILE]) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/tls/000077500000000000000000000000001421664411400231525ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/tls/__init__.py000066400000000000000000000000771421664411400252670ustar00rootroot00000000000000""" :Requirement: 389-ds-base: Transport Layer Security """ 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/tls/cipher_test.py000066400000000000000000000052471421664411400260450ustar00rootroot00000000000000import pytest import os from lib389.config import Encryption from lib389.topologies import topology_st as topo pytestmark = pytest.mark.tier1 def test_long_cipher_list(topo): """Test a long cipher list, and makre sure it is not truncated :id: bc400f54-3966-49c8-b640-abbf4fb2377d :setup: Standalone Instance :steps: 1. Set nsSSL3Ciphers to a very long list of ciphers 2. Ciphers are applied correctly :expectedresults: 1. Success 2. Success """ ENABLED_CIPHER = "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384::AES-GCM::AEAD::256" DISABLED_CIPHER = "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256::AES-GCM::AEAD::128" CIPHER_LIST = ( "-all,-SSL_CK_RC4_128_WITH_MD5,-SSL_CK_RC4_128_EXPORT40_WITH_MD5,-SSL_CK_RC2_128_CBC_WITH_MD5," "-SSL_CK_RC2_128_CBC_EXPORT40_WITH_MD5,-SSL_CK_DES_64_CBC_WITH_MD5,-SSL_CK_DES_192_EDE3_CBC_WITH_MD5," "-TLS_RSA_WITH_RC4_128_MD5,-TLS_RSA_WITH_RC4_128_SHA,-TLS_RSA_WITH_3DES_EDE_CBC_SHA," "-TLS_RSA_WITH_DES_CBC_SHA,-SSL_RSA_FIPS_WITH_3DES_EDE_CBC_SHA,-SSL_RSA_FIPS_WITH_DES_CBC_SHA," "-TLS_RSA_EXPORT_WITH_RC4_40_MD5,-TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5,-TLS_RSA_WITH_NULL_MD5," "-TLS_RSA_WITH_NULL_SHA,-TLS_RSA_EXPORT1024_WITH_DES_CBC_SHA,-SSL_FORTEZZA_DMS_WITH_FORTEZZA_CBC_SHA," "-SSL_FORTEZZA_DMS_WITH_RC4_128_SHA,-SSL_FORTEZZA_DMS_WITH_NULL_SHA,-TLS_DHE_DSS_WITH_DES_CBC_SHA," "-TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA,-TLS_DHE_RSA_WITH_DES_CBC_SHA,-TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA," "+TLS_RSA_WITH_AES_128_CBC_SHA,-TLS_DHE_DSS_WITH_AES_128_CBC_SHA,-TLS_DHE_RSA_WITH_AES_128_CBC_SHA," "+TLS_RSA_WITH_AES_256_CBC_SHA,-TLS_DHE_DSS_WITH_AES_256_CBC_SHA,-TLS_DHE_RSA_WITH_AES_256_CBC_SHA," "-TLS_RSA_EXPORT1024_WITH_RC4_56_SHA,-TLS_DHE_DSS_WITH_RC4_128_SHA,-TLS_ECDHE_RSA_WITH_RC4_128_SHA," "-TLS_RSA_WITH_NULL_SHA,-TLS_RSA_EXPORT1024_WITH_DES_CBC_SHA,-SSL_CK_DES_192_EDE3_CBC_WITH_MD5," "-TLS_RSA_WITH_RC4_128_MD5,-TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,-TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA," "-TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,+TLS_AES_128_GCM_SHA256,+TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384" ) topo.standalone.enable_tls() enc = Encryption(topo.standalone) enc.set('nsSSL3Ciphers', CIPHER_LIST) topo.standalone.restart() enabled_ciphers = enc.get_attr_vals_utf8('nssslenabledciphers') assert ENABLED_CIPHER in enabled_ciphers assert DISABLED_CIPHER not in enabled_ciphers if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main(["-s", CURRENT_FILE]) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/tls/ssl_version_test.py000066400000000000000000000055711421664411400271410ustar00rootroot00000000000000import logging import pytest import os from lib389.config import Encryption from lib389.utils import ds_is_older from lib389.topologies import topology_st as topo pytestmark = pytest.mark.tier1 DEBUGGING = os.getenv("DEBUGGING", default=False) if DEBUGGING: logging.getLogger(__name__).setLevel(logging.DEBUG) else: logging.getLogger(__name__).setLevel(logging.INFO) log = logging.getLogger(__name__) def test_ssl_version_range(topo): """Specify a test case purpose or name here :id: bc400f54-3966-49c8-b640-abbf4fb2377e :customerscenario: True 1. Get current default range 2. Set sslVersionMin and verify it is applied after a restart 3. Set sslVersionMax and verify it is applied after a restart 4. Sanity test all the min/max versions :expectedresults: 1. Success 2. Success 3. Success 4. Success """ topo.standalone.enable_tls() enc = Encryption(topo.standalone) default_min = enc.get_attr_val_utf8('sslVersionMin') default_max = enc.get_attr_val_utf8('sslVersionMax') log.info(f"default min: {default_min} max: {default_max}") if DEBUGGING: topo.standalone.config.set('nsslapd-auditlog-logging-enabled', 'on') # Test that setting the min version is applied after a restart enc.replace('sslVersionMin', default_max) enc.replace('sslVersionMax', default_max) topo.standalone.restart() min = enc.get_attr_val_utf8('sslVersionMin') assert min == default_max # Test that setting the max version is applied after a restart enc.replace('sslVersionMin', default_min) enc.replace('sslVersionMax', default_min) topo.standalone.restart() max = enc.get_attr_val_utf8('sslVersionMax') assert max == default_min # 389-ds-base-1.4.3 == Fedora 32, 389-ds-base-1.4.4 == Fedora 33 # Starting from Fedora 33, cryptographic protocols (TLS 1.0 and TLS 1.1) were moved to LEGACY # So we should not check for the policies with our DEFAULT crypro setup # https://fedoraproject.org/wiki/Changes/StrongCryptoSettings2 if ds_is_older('1.4.4'): ssl_versions = [('sslVersionMin', ['TLS1.0', 'TLS1.1', 'TLS1.2', 'TLS1.0']), ('sslVersionMax', ['TLS1.0', 'TLS1.1', 'TLS1.2'])] else: ssl_versions = [('sslVersionMin', ['TLS1.2']), ('sslVersionMax', ['TLS1.2', 'TLS1.3'])] # Sanity test all the min/max versions for attr, versions in ssl_versions: for version in versions: # Test that the setting is correctly applied after a restart enc.replace(attr, version) topo.standalone.restart() current_val = enc.get_attr_val_utf8(attr) assert current_val == version if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main(["-s", CURRENT_FILE]) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/tls/tls_cert_namespace_test.py000066400000000000000000000107141421664411400304210ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2020 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import time import subprocess import pytest from glob import glob from lib389.utils import * from lib389.topologies import topology_st from lib389.paths import Paths pytestmark = pytest.mark.tier1 logging.getLogger(__name__).setLevel(logging.DEBUG) log = logging.getLogger(__name__) p = Paths() @pytest.mark.ds50889 @pytest.mark.bz1638875 @pytest.mark.skipif(p.with_systemd == False, reason='Will not run without systemd') @pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented") def test_pem_cert_in_private_namespace(topology_st): """Test if certificates are present in private /tmp namespace :id: 01bc27d0-6368-496a-9724-7fe1e8fb239b :customerscenario: True :setup: Standalone instance :steps: 1. Create DS instance 2. Enable TLS 3. Check if value of PrivateTmp == yes 4. Check if pem certificates are present in private /tmp 5. Check if pem certificates are not present in /etc/dirsrv/instance :expectedresults: 1. Success 2. Success 3. Success 4. Success 5. Success """ PEM_CHECK = ['Self-Signed-CA.pem', 'Server-Cert-Key.pem', 'Server-Cert.pem'] PRIVATE_TMP = 'PrivateTmp=yes' standalone = topology_st.standalone log.info('Enable TLS') standalone.enable_tls() log.info('Checking PrivateTmp value') cmdline = ['systemctl', 'show', '-p', 'PrivateTmp', 'dirsrv@{}.service'.format(standalone.serverid)] log.info('Command used : %s' % format_cmd_list(cmdline)) result = subprocess.check_output(cmdline) assert PRIVATE_TMP in ensure_str(result) log.info('Check files in private /tmp') cert_path = glob('/tmp/systemd-private-*-dirsrv@{}.service-*/tmp/slapd-{}/'.format(standalone.serverid, standalone.serverid)) assert os.path.exists(cert_path[0]) for item in PEM_CHECK: log.info('Check that {} is present in private /tmp'.format(item)) assert os.path.exists(cert_path[0] + item) log.info('Check instance cert directory') cert_path = '/etc/dirsrv/slapd-{}/'.format(standalone.serverid) assert os.path.exists(cert_path) for item in PEM_CHECK: log.info('Check that {} is not present in /etc/dirsrv/slapd-{}/ directory'.format(item, standalone.serverid)) assert not os.path.exists(cert_path + item) @pytest.mark.ds50952 @pytest.mark.bz1809279 @pytest.mark.xfail(ds_is_older("1.4.3"), reason="Might fail because of bz1809279") @pytest.mark.skipif(ds_is_older("1.4.0"), reason="Not implemented") def test_cert_category_authority(topology_st): """Test that certificate generated by instance has category: authority :id: b7e816e9-2786-4d76-9c5b-bb111b0870f2 :setup: Standalone instance :steps: 1. Create DS instance 2. Enable TLS 3. Check if Self-Signed-CA.pem is present 4. Trust the certificate 5. Search if the certificate has category: authority :expectedresults: 1. Success 2. Success 3. Success 4. Success 5. Success """ PEM_FILE = 'Self-Signed-CA.pem' standalone = topology_st.standalone log.info('Enable TLS') standalone.enable_tls() log.info('Get certificate path') if ds_is_older('1.4.3'): cert_path = glob('/etc/dirsrv/slapd-{}/'.format(standalone.serverid)) else: cert_path = glob('/tmp/systemd-private-*-dirsrv@{}.service-*/tmp/slapd-{}/'.format(standalone.serverid, standalone.serverid)) log.info('Check that {} is present'.format(PEM_FILE)) signed_cert = cert_path[0] + PEM_FILE assert os.path.exists(signed_cert) log.info('Trust the certificate') subprocess.check_output(['trust', 'anchor', signed_cert]) log.info('Search if our certificate has category: authority') result = subprocess.check_output(['trust', 'list']) assert re.search(r'^(.*)label: ssca[.]389ds[.]example[.]com\n(.*).*\n.*category: authority$', ensure_str(result), re.MULTILINE) if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE)389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/tls/tls_check_crl_test.py000066400000000000000000000032621421664411400273650ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2018 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import pytest import ldap from lib389.topologies import topology_st pytestmark = pytest.mark.tier1 def test_tls_check_crl(topology_st): """Test that TLS check_crl configurations work as expected. :id: 9dfc6c62-dcae-44a9-83e8-b15c8e61c609 :steps: 1. Enable TLS 2. Set invalid value 3. Set valid values 4. Check config reset :expectedresults: 1. TlS is setup 2. The invalid value is rejected 3. The valid values are used 4. The value can be reset """ standalone = topology_st.standalone # Enable TLS standalone.enable_tls() # Check all the valid values. assert(standalone.config.get_attr_val_utf8('nsslapd-tls-check-crl') == 'none') with pytest.raises(ldap.OPERATIONS_ERROR): standalone.config.set('nsslapd-tls-check-crl', 'tnhoeutnoeutn') assert(standalone.config.get_attr_val_utf8('nsslapd-tls-check-crl') == 'none') standalone.config.set('nsslapd-tls-check-crl', 'peer') assert(standalone.config.get_attr_val_utf8('nsslapd-tls-check-crl') == 'peer') standalone.config.set('nsslapd-tls-check-crl', 'none') assert(standalone.config.get_attr_val_utf8('nsslapd-tls-check-crl') == 'none') standalone.config.set('nsslapd-tls-check-crl', 'all') assert(standalone.config.get_attr_val_utf8('nsslapd-tls-check-crl') == 'all') standalone.config.remove_all('nsslapd-tls-check-crl') assert(standalone.config.get_attr_val_utf8('nsslapd-tls-check-crl') == 'none') 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/tls/tls_import_ca_chain_test.py000066400000000000000000000033141421664411400305650ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2022, William Brown # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import pytest import ldap import os from lib389.nss_ssl import NssSsl from lib389.topologies import topology_st pytestmark = pytest.mark.tier1 CA_CHAIN_FILE = os.path.join(os.path.dirname(__file__), '../../data/tls/tls_import_ca_chain.pem') CRT_CHAIN_FILE = os.path.join(os.path.dirname(__file__), '../../data/tls/tls_import_crt_chain.pem') KEY_CHAIN_FILE = os.path.join(os.path.dirname(__file__), '../../data/tls/tls_import_key_chain.pem') KEY_FILE = os.path.join(os.path.dirname(__file__), '../../data/tls/tls_import_key.pem') def test_tls_import_chain(topology_st): """Test that TLS import will correct report errors when there are multiple files in a chain. :id: b7ba71bd-112a-44a1-8a7e-8968249da419 :steps: 1. Attempt to import a ca chain :expectedresults: 1. The chain is rejected """ topology_st.standalone.stop() tls = NssSsl(dirsrv=topology_st.standalone) tls.reinit() with pytest.raises(ValueError): tls.add_cert(nickname='CA_CHAIN_1', input_file=CA_CHAIN_FILE) with pytest.raises(ValueError): tls.add_server_key_and_cert(KEY_FILE, CRT_CHAIN_FILE) with pytest.raises(ValueError): tls.add_server_key_and_cert(KEY_CHAIN_FILE, CRT_CHAIN_FILE) with pytest.raises(ValueError): tls.add_server_key_and_cert(KEY_FILE, KEY_CHAIN_FILE) with pytest.raises(ValueError): tls.import_rsa_crt(crt=CRT_CHAIN_FILE) with pytest.raises(ValueError): tls.import_rsa_crt(ca=CA_CHAIN_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/suites/tls/tls_ldaps_only_test.py000066400000000000000000000027231421664411400276150ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2020 William Brown 0 entries = M2.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, "(objectclass=*)") entries = M2.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, "(objectclass=*)") if __name__ == "__main__": # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/tickets/000077500000000000000000000000001421664411400225025ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/tickets/__init__.py000066400000000000000000000000001421664411400246010ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/tickets/ticket47560_test.py000066400000000000000000000132431421664411400260070ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2016 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import logging import time import ldap import pytest from lib389 import Entry from lib389._constants import * from lib389.properties import * from lib389.topologies import topology_st from lib389.utils import * pytestmark = pytest.mark.tier2 log = logging.getLogger(__name__) def test_ticket47560(topology_st): """ This test case does the following: SETUP - Create entry cn=group,SUFFIX - Create entry cn=member,SUFFIX - Update 'cn=member,SUFFIX' to add "memberOf: cn=group,SUFFIX" - Enable Memberof Plugins # Here the cn=member entry has a 'memberOf' but # cn=group entry does not contain 'cn=member' in its member TEST CASE - start the fixupmemberof task - read the cn=member entry - check 'memberOf is now empty TEARDOWN - Delete entry cn=group,SUFFIX - Delete entry cn=member,SUFFIX - Disable Memberof Plugins """ def _enable_disable_mbo(value): """ Enable or disable mbo plugin depending on 'value' ('on'/'off') """ # enable/disable the mbo plugin if value == 'on': topology_st.standalone.plugins.enable(name=PLUGIN_MEMBER_OF) else: topology_st.standalone.plugins.disable(name=PLUGIN_MEMBER_OF) log.debug("-------------> _enable_disable_mbo(%s)" % value) topology_st.standalone.stop(timeout=120) time.sleep(1) topology_st.standalone.start(timeout=120) time.sleep(3) # need to reopen a connection toward the instance topology_st.standalone.open() def _test_ticket47560_setup(): """ - Create entry cn=group,SUFFIX - Create entry cn=member,SUFFIX - Update 'cn=member,SUFFIX' to add "memberOf: cn=group,SUFFIX" - Enable Memberof Plugins """ log.debug("-------- > _test_ticket47560_setup\n") # # By default the memberof plugin is disabled create # - create a group entry # - create a member entry # - set the member entry as memberof the group entry # entry = Entry(group_DN) entry.setValues('objectclass', 'top', 'groupOfNames', 'inetUser') entry.setValues('cn', 'group') try: topology_st.standalone.add_s(entry) except ldap.ALREADY_EXISTS: log.debug("Entry %s already exists" % (group_DN)) entry = Entry(member_DN) entry.setValues('objectclass', 'top', 'person', 'organizationalPerson', 'inetorgperson', 'inetUser') entry.setValues('uid', 'member') entry.setValues('cn', 'member') entry.setValues('sn', 'member') try: topology_st.standalone.add_s(entry) except ldap.ALREADY_EXISTS: log.debug("Entry %s already exists" % (member_DN)) replace = [(ldap.MOD_REPLACE, 'memberof', ensure_bytes(group_DN))] topology_st.standalone.modify_s(member_DN, replace) # # enable the memberof plugin and restart the instance # _enable_disable_mbo('on') # # check memberof attribute is still present # filt = 'uid=member' ents = topology_st.standalone.search_s(member_DN, ldap.SCOPE_BASE, filt) assert len(ents) == 1 ent = ents[0] # print ent value = ensure_str(ent.getValue('memberof')) # print "memberof: %s" % (value) assert value == group_DN def _test_ticket47560_teardown(): """ - Delete entry cn=group,SUFFIX - Delete entry cn=member,SUFFIX - Disable Memberof Plugins """ log.debug("-------- > _test_ticket47560_teardown\n") # remove the entries group_DN and member_DN try: topology_st.standalone.delete_s(group_DN) except: log.warning("Entry %s fail to delete" % (group_DN)) try: topology_st.standalone.delete_s(member_DN) except: log.warning("Entry %s fail to delete" % (member_DN)) # # disable the memberof plugin and restart the instance # _enable_disable_mbo('off') group_DN = "cn=group,%s" % (SUFFIX) member_DN = "uid=member,%s" % (SUFFIX) # # Initialize the test case # _test_ticket47560_setup() # # start the test # - start the fixup task # - check the entry is fixed (no longer memberof the group) # log.debug("-------- > Start ticket tests\n") filt = 'uid=member' ents = topology_st.standalone.search_s(member_DN, ldap.SCOPE_BASE, filt) assert len(ents) == 1 ent = ents[0] log.debug("Unfixed entry %r\n" % ent) # run the fixup task topology_st.standalone.tasks.fixupMemberOf(suffix=SUFFIX, args={TASK_WAIT: True}) ents = topology_st.standalone.search_s(member_DN, ldap.SCOPE_BASE, filt) assert len(ents) == 1 ent = ents[0] log.debug("Fixed entry %r\n" % ent) if ensure_str(ent.getValue('memberof')) == group_DN: log.warning("Error the fixupMemberOf did not fix %s" % (member_DN)) result_successful = False else: result_successful = True # # cleanup up the test case # _test_ticket47560_teardown() assert result_successful is True log.info('Testcase PASSED') if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/tickets/ticket47573_test.py000066400000000000000000000202021421664411400260040ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2016 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # ''' Created on Nov 7, 2013 @author: tbordaz ''' import logging import re import time import ldap import pytest from lib389 import Entry from lib389._constants import * from lib389.topologies import topology_m1c1 from lib389.utils import * pytestmark = pytest.mark.tier2 logging.getLogger(__name__).setLevel(logging.DEBUG) log = logging.getLogger(__name__) TEST_REPL_DN = "cn=test_repl, %s" % SUFFIX ENTRY_DN = "cn=test_entry, %s" % SUFFIX MUST_OLD = "(postalAddress $ preferredLocale $ telexNumber)" MAY_OLD = "(postalCode $ street)" MUST_NEW = "(postalAddress $ preferredLocale)" MAY_NEW = "(telexNumber $ postalCode $ street)" def pattern_errorlog(file, log_pattern): try: pattern_errorlog.last_pos += 1 except AttributeError: pattern_errorlog.last_pos = 0 found = None log.debug("_pattern_errorlog: start at offset %d" % pattern_errorlog.last_pos) file.seek(pattern_errorlog.last_pos) # Use a while true iteration because 'for line in file: hit a # python bug that break file.tell() while True: line = file.readline() log.debug("_pattern_errorlog: [%d] %s" % (file.tell(), line)) found = log_pattern.search(line) if ((line == '') or (found)): break log.debug("_pattern_errorlog: end at offset %d" % file.tell()) pattern_errorlog.last_pos = file.tell() return found def _oc_definition(oid_ext, name, must=None, may=None): oid = "1.2.3.4.5.6.7.8.9.10.%d" % oid_ext desc = 'To test ticket 47573' sup = 'person' if not must: must = MUST_OLD if not may: may = MAY_OLD new_oc = "( %s NAME '%s' DESC '%s' SUP %s AUXILIARY MUST %s MAY %s )" % (oid, name, desc, sup, must, may) return ensure_bytes(new_oc) def add_OC(instance, oid_ext, name): new_oc = _oc_definition(oid_ext, name) instance.schema.add_schema('objectClasses', new_oc) def mod_OC(instance, oid_ext, name, old_must=None, old_may=None, new_must=None, new_may=None): old_oc = _oc_definition(oid_ext, name, old_must, old_may) new_oc = _oc_definition(oid_ext, name, new_must, new_may) instance.schema.del_schema('objectClasses', old_oc) instance.schema.add_schema('objectClasses', new_oc) def trigger_schema_push(topology_m1c1): """ It triggers an update on the supplier. This will start a replication session and a schema push """ try: trigger_schema_push.value += 1 except AttributeError: trigger_schema_push.value = 1 replace = [(ldap.MOD_REPLACE, 'telephonenumber', ensure_bytes(str(trigger_schema_push.value)))] topology_m1c1.ms["supplier1"].modify_s(ENTRY_DN, replace) # wait 10 seconds that the update is replicated loop = 0 while loop <= 10: try: ent = topology_m1c1.cs["consumer1"].getEntry(ENTRY_DN, ldap.SCOPE_BASE, "(objectclass=*)", ['telephonenumber']) val = ent.telephonenumber or "0" if int(val) == trigger_schema_push.value: return # the expected value is not yet replicated. try again time.sleep(1) loop += 1 log.debug("trigger_schema_push: receive %s (expected %d)" % (val, trigger_schema_push.value)) except ldap.NO_SUCH_OBJECT: time.sleep(1) loop += 1 def test_ticket47573_init(topology_m1c1): """ Initialize the test environment """ log.debug("test_ticket47573_init topology_m1c1 %r (supplier %r, consumer %r" % (topology_m1c1, topology_m1c1.ms["supplier1"], topology_m1c1.cs["consumer1"])) # the test case will check if a warning message is logged in the # error log of the supplier topology_m1c1.ms["supplier1"].errorlog_file = open(topology_m1c1.ms["supplier1"].errlog, "r") # This entry will be used to trigger attempt of schema push topology_m1c1.ms["supplier1"].add_s(Entry((ENTRY_DN, { 'objectclass': "top person".split(), 'sn': 'test_entry', 'cn': 'test_entry'}))) def test_ticket47573_one(topology_m1c1): """ Summary: Add a custom OC with MUST and MAY MUST = postalAddress $ preferredLocale MAY = telexNumber $ postalCode $ street Final state - supplier +OCwithMayAttr - consumer +OCwithMayAttr """ log.debug("test_ticket47573_one topology_m1c1 %r (supplier %r, consumer %r" % ( topology_m1c1, topology_m1c1.ms["supplier1"], topology_m1c1.cs["consumer1"])) # update the schema of the supplier so that it is a superset of # consumer. Schema should be pushed new_oc = _oc_definition(2, 'OCwithMayAttr', must=MUST_OLD, may=MAY_OLD) topology_m1c1.ms["supplier1"].schema.add_schema('objectClasses', new_oc) trigger_schema_push(topology_m1c1) supplier_schema_csn = topology_m1c1.ms["supplier1"].schema.get_schema_csn() consumer_schema_csn = topology_m1c1.cs["consumer1"].schema.get_schema_csn() # Check the schemaCSN was updated on the consumer log.debug("test_ticket47573_one supplier_schema_csn=%s", supplier_schema_csn) log.debug("ctest_ticket47573_one onsumer_schema_csn=%s", consumer_schema_csn) assert supplier_schema_csn == consumer_schema_csn # Check the error log of the supplier does not contain an error regex = re.compile("must not be overwritten \(set replication log for additional info\)") res = pattern_errorlog(topology_m1c1.ms["supplier1"].errorlog_file, regex) assert res is None def test_ticket47573_two(topology_m1c1): """ Summary: Change OCwithMayAttr to move a MAY attribute to a MUST attribute Final state - supplier OCwithMayAttr updated - consumer OCwithMayAttr updated """ # Update the objectclass so that a MAY attribute is moved to MUST attribute mod_OC(topology_m1c1.ms["supplier1"], 2, 'OCwithMayAttr', old_must=MUST_OLD, new_must=MUST_NEW, old_may=MAY_OLD, new_may=MAY_NEW) # now push the scheam trigger_schema_push(topology_m1c1) supplier_schema_csn = topology_m1c1.ms["supplier1"].schema.get_schema_csn() consumer_schema_csn = topology_m1c1.cs["consumer1"].schema.get_schema_csn() # Check the schemaCSN was NOT updated on the consumer log.debug("test_ticket47573_two supplier_schema_csn=%s", supplier_schema_csn) log.debug("test_ticket47573_two consumer_schema_csn=%s", consumer_schema_csn) assert supplier_schema_csn == consumer_schema_csn # Check the error log of the supplier does not contain an error regex = re.compile("must not be overwritten \(set replication log for additional info\)") res = pattern_errorlog(topology_m1c1.ms["supplier1"].errorlog_file, regex) assert res is None def test_ticket47573_three(topology_m1c1): ''' Create a entry with OCwithMayAttr OC ''' # Check replication is working fine dn = "cn=ticket47573, %s" % SUFFIX topology_m1c1.ms["supplier1"].add_s(Entry((dn, {'objectclass': "top person OCwithMayAttr".split(), 'sn': 'test_repl', 'cn': 'test_repl', 'postalAddress': 'here', 'preferredLocale': 'en', 'telexNumber': '12$us$21', 'postalCode': '54321'}))) loop = 0 ent = None while loop <= 10: try: ent = topology_m1c1.cs["consumer1"].getEntry(dn, ldap.SCOPE_BASE, "(objectclass=*)") break except ldap.NO_SUCH_OBJECT: time.sleep(1) loop += 1 if ent is None: assert False log.info('Testcase PASSED') if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/tickets/ticket47619_test.py000066400000000000000000000060661421664411400260210ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2016 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # ''' Created on Nov 7, 2013 @author: tbordaz ''' import logging import time import ldap import pytest from lib389 import Entry from lib389._constants import * from lib389.properties import * from lib389.topologies import topology_m1c1 pytestmark = pytest.mark.tier2 logging.getLogger(__name__).setLevel(logging.DEBUG) log = logging.getLogger(__name__) TEST_REPL_DN = "cn=test_repl, %s" % SUFFIX ENTRY_DN = "cn=test_entry, %s" % SUFFIX OTHER_NAME = 'other_entry' MAX_OTHERS = 100 ATTRIBUTES = ['street', 'countryName', 'description', 'postalAddress', 'postalCode', 'title', 'l', 'roomNumber'] def test_ticket47619_init(topology_m1c1): """ Initialize the test environment """ topology_m1c1.ms["supplier1"].plugins.enable(name=PLUGIN_RETRO_CHANGELOG) # topology_m1c1.ms["supplier1"].plugins.enable(name=PLUGIN_MEMBER_OF) # topology_m1c1.ms["supplier1"].plugins.enable(name=PLUGIN_REFER_INTEGRITY) topology_m1c1.ms["supplier1"].stop(timeout=10) topology_m1c1.ms["supplier1"].start(timeout=10) topology_m1c1.ms["supplier1"].log.info("test_ticket47619_init topology_m1c1 %r" % (topology_m1c1)) # the test case will check if a warning message is logged in the # error log of the supplier topology_m1c1.ms["supplier1"].errorlog_file = open(topology_m1c1.ms["supplier1"].errlog, "r") # add dummy entries for cpt in range(MAX_OTHERS): name = "%s%d" % (OTHER_NAME, cpt) topology_m1c1.ms["supplier1"].add_s(Entry(("cn=%s,%s" % (name, SUFFIX), { 'objectclass': "top person".split(), 'sn': name, 'cn': name}))) topology_m1c1.ms["supplier1"].log.info( "test_ticket47619_init: %d entries ADDed %s[0..%d]" % (MAX_OTHERS, OTHER_NAME, MAX_OTHERS - 1)) # Check the number of entries in the retro changelog time.sleep(2) ents = topology_m1c1.ms["supplier1"].search_s(RETROCL_SUFFIX, ldap.SCOPE_ONELEVEL, "(objectclass=*)") assert len(ents) == MAX_OTHERS def test_ticket47619_create_index(topology_m1c1): args = {INDEX_TYPE: 'eq'} for attr in ATTRIBUTES: topology_m1c1.ms["supplier1"].index.create(suffix=RETROCL_SUFFIX, attr=attr, args=args) topology_m1c1.ms["supplier1"].restart(timeout=10) def test_ticket47619_reindex(topology_m1c1): ''' Reindex all the attributes in ATTRIBUTES ''' args = {TASK_WAIT: True} for attr in ATTRIBUTES: rc = topology_m1c1.ms["supplier1"].tasks.reindex(suffix=RETROCL_SUFFIX, attrname=attr, args=args) assert rc == 0 def test_ticket47619_check_indexed_search(topology_m1c1): for attr in ATTRIBUTES: ents = topology_m1c1.ms["supplier1"].search_s(RETROCL_SUFFIX, ldap.SCOPE_SUBTREE, "(%s=hello)" % attr) assert len(ents) == 0 if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/tickets/ticket47640_test.py000066400000000000000000000052451421664411400260110ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2016 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import pytest from lib389.tasks import * from lib389.utils import * from lib389.topologies import topology_st from lib389._constants import PLUGIN_LINKED_ATTRS, DEFAULT_SUFFIX # Skip on older versions pytestmark = [pytest.mark.tier2, pytest.mark.skipif(ds_is_older('1.3.4'), reason="Not implemented")] logging.getLogger(__name__).setLevel(logging.DEBUG) log = logging.getLogger(__name__) def test_ticket47640(topology_st): ''' Linked Attrs Plugins - verify that if the plugin fails to update the link entry that the entire operation is aborted ''' # Enable Dynamic plugins, and the linked Attrs plugin try: topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-dynamic-plugins', b'on')]) except ldap.LDAPError as e: log.fatal('Failed to enable dynamic plugin!' + e.message['desc']) assert False try: topology_st.standalone.plugins.enable(name=PLUGIN_LINKED_ATTRS) except ValueError as e: log.fatal('Failed to enable linked attributes plugin!' + e.message['desc']) assert False # Add the plugin config entry try: topology_st.standalone.add_s(Entry(('cn=manager link,cn=Linked Attributes,cn=plugins,cn=config', { 'objectclass': 'top extensibleObject'.split(), 'cn': 'Manager Link', 'linkType': 'seeAlso', 'managedType': 'seeAlso' }))) except ldap.LDAPError as e: log.fatal('Failed to add linked attr config entry: error ' + e.message['desc']) assert False # Add an entry who has a link to an entry that does not exist OP_REJECTED = False try: topology_st.standalone.add_s(Entry(('uid=manager,' + DEFAULT_SUFFIX, { 'objectclass': 'top extensibleObject'.split(), 'uid': 'manager', 'seeAlso': 'uid=user,dc=example,dc=com' }))) except ldap.UNWILLING_TO_PERFORM: # Success log.info('Add operation correctly rejected.') OP_REJECTED = True except ldap.LDAPError as e: log.fatal('Add operation incorrectly rejected: error %s - ' + 'expected "unwilling to perform"' % e.message['desc']) assert False if not OP_REJECTED: log.fatal('Add operation incorrectly allowed') assert False log.info('Test complete') if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/tickets/ticket47653MMR_test.py000066400000000000000000000332461421664411400263730ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2016 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # ''' Created on Nov 7, 2013 @author: tbordaz ''' import logging import time import ldap import pytest from lib389 import Entry from lib389._constants import * from lib389.topologies import topology_m2 logging.getLogger(__name__).setLevel(logging.DEBUG) from lib389.utils import * # Skip on older versions pytestmark =[pytest.mark.tier2, pytest.mark.skipif(ds_is_older('1.3.2'), reason="Not implemented")] log = logging.getLogger(__name__) DEBUGGING = os.getenv("DEBUGGING", default=False) TEST_REPL_DN = "cn=test_repl, %s" % SUFFIX OC_NAME = 'OCticket47653' MUST = "(postalAddress $ postalCode)" MAY = "(member $ street)" OTHER_NAME = 'other_entry' MAX_OTHERS = 10 BIND_NAME = 'bind_entry' BIND_DN = 'cn=%s, %s' % (BIND_NAME, SUFFIX) BIND_PW = 'password' ENTRY_NAME = 'test_entry' ENTRY_DN = 'cn=%s, %s' % (ENTRY_NAME, SUFFIX) ENTRY_OC = "top person %s" % OC_NAME def _oc_definition(oid_ext, name, must=None, may=None): oid = "1.2.3.4.5.6.7.8.9.10.%d" % oid_ext desc = 'To test ticket 47490' sup = 'person' if not must: must = MUST if not may: may = MAY new_oc = "( %s NAME '%s' DESC '%s' SUP %s AUXILIARY MUST %s MAY %s )" % (oid, name, desc, sup, must, may) return ensure_bytes(new_oc) def test_ticket47653_init(topology_m2): """ It adds - Objectclass with MAY 'member' - an entry ('bind_entry') with which we bind to test the 'SELFDN' operation It deletes the anonymous aci """ topology_m2.ms["supplier1"].log.info("Add %s that allows 'member' attribute" % OC_NAME) new_oc = _oc_definition(2, OC_NAME, must=MUST, may=MAY) topology_m2.ms["supplier1"].schema.add_schema('objectClasses', new_oc) # entry used to bind with topology_m2.ms["supplier1"].log.info("Add %s" % BIND_DN) topology_m2.ms["supplier1"].add_s(Entry((BIND_DN, { 'objectclass': "top person".split(), 'sn': BIND_NAME, 'cn': BIND_NAME, 'userpassword': BIND_PW}))) if DEBUGGING: # enable acl error logging mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', ensure_bytes(str(128 + 8192)))] # ACL + REPL topology_m2.ms["supplier1"].modify_s(DN_CONFIG, mod) topology_m2.ms["supplier2"].modify_s(DN_CONFIG, mod) # remove all aci's and start with a clean slate mod = [(ldap.MOD_DELETE, 'aci', None)] topology_m2.ms["supplier1"].modify_s(SUFFIX, mod) topology_m2.ms["supplier2"].modify_s(SUFFIX, mod) # add dummy entries for cpt in range(MAX_OTHERS): name = "%s%d" % (OTHER_NAME, cpt) topology_m2.ms["supplier1"].add_s(Entry(("cn=%s,%s" % (name, SUFFIX), { 'objectclass': "top person".split(), 'sn': name, 'cn': name}))) def test_ticket47653_add(topology_m2): ''' This test ADD an entry on SUPPLIER1 where 47653 is fixed. Then it checks that entry is replicated on SUPPLIER2 (even if on SUPPLIER2 47653 is NOT fixed). Then update on SUPPLIER2 and check the update on SUPPLIER1 It checks that, bound as bind_entry, - we can not ADD an entry without the proper SELFDN aci. - with the proper ACI we can not ADD with 'member' attribute - with the proper ACI and 'member' it succeeds to ADD ''' topology_m2.ms["supplier1"].log.info("\n\n######################### ADD ######################\n") # bind as bind_entry topology_m2.ms["supplier1"].log.info("Bind as %s" % BIND_DN) topology_m2.ms["supplier1"].simple_bind_s(BIND_DN, BIND_PW) # Prepare the entry with multivalued members entry_with_members = Entry(ENTRY_DN) entry_with_members.setValues('objectclass', 'top', 'person', 'OCticket47653') entry_with_members.setValues('sn', ENTRY_NAME) entry_with_members.setValues('cn', ENTRY_NAME) entry_with_members.setValues('postalAddress', 'here') entry_with_members.setValues('postalCode', '1234') members = [] for cpt in range(MAX_OTHERS): name = "%s%d" % (OTHER_NAME, cpt) members.append("cn=%s,%s" % (name, SUFFIX)) members.append(BIND_DN) entry_with_members.setValues('member', members) # Prepare the entry with only one member value entry_with_member = Entry(ENTRY_DN) entry_with_member.setValues('objectclass', 'top', 'person', 'OCticket47653') entry_with_member.setValues('sn', ENTRY_NAME) entry_with_member.setValues('cn', ENTRY_NAME) entry_with_member.setValues('postalAddress', 'here') entry_with_member.setValues('postalCode', '1234') member = [] member.append(BIND_DN) entry_with_member.setValues('member', member) # entry to add WITH member being BIND_DN but WITHOUT the ACI -> ldap.INSUFFICIENT_ACCESS try: topology_m2.ms["supplier1"].log.info("Try to add Add %s (aci is missing): %r" % (ENTRY_DN, entry_with_member)) topology_m2.ms["supplier1"].add_s(entry_with_member) except Exception as e: topology_m2.ms["supplier1"].log.info("Exception (expected): %s" % type(e).__name__) assert isinstance(e, ldap.INSUFFICIENT_ACCESS) # Ok Now add the proper ACI topology_m2.ms["supplier1"].log.info("Bind as %s and add the ADD SELFDN aci" % DN_DM) topology_m2.ms["supplier1"].simple_bind_s(DN_DM, PASSWORD) ACI_TARGET = "(target = \"ldap:///cn=*,%s\")" % SUFFIX ACI_TARGETFILTER = "(targetfilter =\"(objectClass=%s)\")" % OC_NAME ACI_ALLOW = "(version 3.0; acl \"SelfDN add\"; allow (add)" ACI_SUBJECT = " userattr = \"member#selfDN\";)" ACI_BODY = ACI_TARGET + ACI_TARGETFILTER + ACI_ALLOW + ACI_SUBJECT mod = [(ldap.MOD_ADD, 'aci', ensure_bytes(ACI_BODY))] topology_m2.ms["supplier1"].modify_s(SUFFIX, mod) time.sleep(1) # bind as bind_entry topology_m2.ms["supplier1"].log.info("Bind as %s" % BIND_DN) topology_m2.ms["supplier1"].simple_bind_s(BIND_DN, BIND_PW) # entry to add WITHOUT member and WITH the ACI -> ldap.INSUFFICIENT_ACCESS try: topology_m2.ms["supplier1"].log.info("Try to add Add %s (member is missing)" % ENTRY_DN) topology_m2.ms["supplier1"].add_s(Entry((ENTRY_DN, { 'objectclass': ENTRY_OC.split(), 'sn': ENTRY_NAME, 'cn': ENTRY_NAME, 'postalAddress': 'here', 'postalCode': '1234'}))) except Exception as e: topology_m2.ms["supplier1"].log.info("Exception (expected): %s" % type(e).__name__) assert isinstance(e, ldap.INSUFFICIENT_ACCESS) time.sleep(1) # entry to add WITH memberS and WITH the ACI -> ldap.INSUFFICIENT_ACCESS # member should contain only one value try: topology_m2.ms["supplier1"].log.info("Try to add Add %s (with several member values)" % ENTRY_DN) topology_m2.ms["supplier1"].add_s(entry_with_members) except Exception as e: topology_m2.ms["supplier1"].log.info("Exception (expected): %s" % type(e).__name__) assert isinstance(e, ldap.INSUFFICIENT_ACCESS) time.sleep(2) topology_m2.ms["supplier1"].log.info("Try to add Add %s should be successful" % ENTRY_DN) try: topology_m2.ms["supplier1"].add_s(entry_with_member) except ldap.LDAPError as e: topology_m2.ms["supplier1"].log.info("Failed to add entry, error: " + e.message['desc']) assert False # # Now check the entry as been replicated # topology_m2.ms["supplier2"].simple_bind_s(DN_DM, PASSWORD) topology_m2.ms["supplier1"].log.info("Try to retrieve %s from Supplier2" % ENTRY_DN) loop = 0 while loop <= 10: try: ent = topology_m2.ms["supplier2"].getEntry(ENTRY_DN, ldap.SCOPE_BASE, "(objectclass=*)") break except ldap.NO_SUCH_OBJECT: time.sleep(1) loop += 1 assert loop <= 10 # Now update the entry on Supplier2 (as DM because 47653 is possibly not fixed on M2) topology_m2.ms["supplier1"].log.info("Update %s on M2" % ENTRY_DN) mod = [(ldap.MOD_REPLACE, 'description', b'test_add')] topology_m2.ms["supplier2"].modify_s(ENTRY_DN, mod) time.sleep(1) topology_m2.ms["supplier1"].simple_bind_s(DN_DM, PASSWORD) loop = 0 while loop <= 10: try: ent = topology_m2.ms["supplier1"].getEntry(ENTRY_DN, ldap.SCOPE_BASE, "(objectclass=*)") if ent.hasAttr('description') and (ensure_str(ent.getValue('description')) == 'test_add'): break except ldap.NO_SUCH_OBJECT: time.sleep(1) loop += 1 assert ensure_str(ent.getValue('description')) == 'test_add' def test_ticket47653_modify(topology_m2): ''' This test MOD an entry on SUPPLIER1 where 47653 is fixed. Then it checks that update is replicated on SUPPLIER2 (even if on SUPPLIER2 47653 is NOT fixed). Then update on SUPPLIER2 (bound as BIND_DN). This update may fail whether or not 47653 is fixed on SUPPLIER2 It checks that, bound as bind_entry, - we can not modify an entry without the proper SELFDN aci. - adding the ACI, we can modify the entry ''' # bind as bind_entry topology_m2.ms["supplier1"].log.info("Bind as %s" % BIND_DN) topology_m2.ms["supplier1"].simple_bind_s(BIND_DN, BIND_PW) topology_m2.ms["supplier1"].log.info("\n\n######################### MODIFY ######################\n") # entry to modify WITH member being BIND_DN but WITHOUT the ACI -> ldap.INSUFFICIENT_ACCESS try: topology_m2.ms["supplier1"].log.info("Try to modify %s (aci is missing)" % ENTRY_DN) mod = [(ldap.MOD_REPLACE, 'postalCode', b'9876')] topology_m2.ms["supplier1"].modify_s(ENTRY_DN, mod) except Exception as e: topology_m2.ms["supplier1"].log.info("Exception (expected): %s" % type(e).__name__) assert isinstance(e, ldap.INSUFFICIENT_ACCESS) # Ok Now add the proper ACI topology_m2.ms["supplier1"].log.info("Bind as %s and add the WRITE SELFDN aci" % DN_DM) topology_m2.ms["supplier1"].simple_bind_s(DN_DM, PASSWORD) ACI_TARGET = "(target = \"ldap:///cn=*,%s\")" % SUFFIX ACI_TARGETATTR = "(targetattr = *)" ACI_TARGETFILTER = "(targetfilter =\"(objectClass=%s)\")" % OC_NAME ACI_ALLOW = "(version 3.0; acl \"SelfDN write\"; allow (write)" ACI_SUBJECT = " userattr = \"member#selfDN\";)" ACI_BODY = ACI_TARGET + ACI_TARGETATTR + ACI_TARGETFILTER + ACI_ALLOW + ACI_SUBJECT mod = [(ldap.MOD_ADD, 'aci', ensure_bytes(ACI_BODY))] topology_m2.ms["supplier1"].modify_s(SUFFIX, mod) time.sleep(2) # bind as bind_entry topology_m2.ms["supplier1"].log.info("M1: Bind as %s" % BIND_DN) topology_m2.ms["supplier1"].simple_bind_s(BIND_DN, BIND_PW) time.sleep(1) # modify the entry and checks the value topology_m2.ms["supplier1"].log.info("M1: Try to modify %s. It should succeeds" % ENTRY_DN) mod = [(ldap.MOD_REPLACE, 'postalCode', b'1928')] topology_m2.ms["supplier1"].modify_s(ENTRY_DN, mod) topology_m2.ms["supplier1"].log.info("M1: Bind as %s" % DN_DM) topology_m2.ms["supplier1"].simple_bind_s(DN_DM, PASSWORD) topology_m2.ms["supplier1"].log.info("M1: Check the update of %s" % ENTRY_DN) ents = topology_m2.ms["supplier1"].search_s(ENTRY_DN, ldap.SCOPE_BASE, 'objectclass=*') assert len(ents) == 1 assert ensure_str(ents[0].postalCode) == '1928' # Now check the update has been replicated on M2 topology_m2.ms["supplier1"].log.info("M2: Bind as %s" % DN_DM) topology_m2.ms["supplier2"].simple_bind_s(DN_DM, PASSWORD) topology_m2.ms["supplier1"].log.info("M2: Try to retrieve %s" % ENTRY_DN) loop = 0 while loop <= 10: try: ent = topology_m2.ms["supplier2"].getEntry(ENTRY_DN, ldap.SCOPE_BASE, "(objectclass=*)") if ent.hasAttr('postalCode') and (ensure_str(ent.getValue('postalCode')) == '1928'): break except ldap.NO_SUCH_OBJECT: time.sleep(1) loop += 1 assert loop <= 10 assert ensure_str(ent.getValue('postalCode')) == '1928' # Now update the entry on Supplier2 bound as BIND_DN (update may fail if 47653 is not fixed on M2) topology_m2.ms["supplier1"].log.info("M2: Update %s (bound as %s)" % (ENTRY_DN, BIND_DN)) topology_m2.ms["supplier2"].simple_bind_s(BIND_DN, PASSWORD) time.sleep(1) fail = False try: mod = [(ldap.MOD_REPLACE, 'postalCode', b'1929')] topology_m2.ms["supplier2"].modify_s(ENTRY_DN, mod) fail = False except ldap.INSUFFICIENT_ACCESS: topology_m2.ms["supplier1"].log.info( "M2: Exception (INSUFFICIENT_ACCESS): that is fine the bug is possibly not fixed on M2") fail = True except Exception as e: topology_m2.ms["supplier1"].log.info("M2: Exception (not expected): %s" % type(e).__name__) assert 0 if not fail: # Check the update has been replicaed on M1 topology_m2.ms["supplier1"].log.info("M1: Bind as %s" % DN_DM) topology_m2.ms["supplier1"].simple_bind_s(DN_DM, PASSWORD) topology_m2.ms["supplier1"].log.info("M1: Check %s.postalCode=1929)" % (ENTRY_DN)) loop = 0 while loop <= 10: try: ent = topology_m2.ms["supplier1"].getEntry(ENTRY_DN, ldap.SCOPE_BASE, "(objectclass=*)") if ent.hasAttr('postalCode') and (ensure_str(ent.getValue('postalCode')) == '1929'): break except ldap.NO_SUCH_OBJECT: time.sleep(1) loop += 1 assert ensure_str(ent.getValue('postalCode')) == '1929' if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/tickets/ticket47676_test.py000066400000000000000000000232231421664411400260160ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2016 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # ''' Created on Nov 7, 2013 @author: tbordaz ''' import logging import time import ldap import pytest from lib389 import Entry from lib389._constants import * from lib389.topologies import topology_m2 from lib389.replica import ReplicationManager logging.getLogger(__name__).setLevel(logging.DEBUG) from lib389.utils import * # Skip on older versions pytestmark = [pytest.mark.tier2, pytest.mark.skipif(ds_is_older('1.3.2'), reason="Not implemented")] log = logging.getLogger(__name__) SCHEMA_DN = "cn=schema" TEST_REPL_DN = "cn=test_repl, %s" % SUFFIX OC_NAME = 'OCticket47676' OC_OID_EXT = 2 MUST = "(postalAddress $ postalCode)" MAY = "(member $ street)" OC2_NAME = 'OC2ticket47676' OC2_OID_EXT = 3 MUST_2 = "(postalAddress $ postalCode)" MAY_2 = "(member $ street)" REPL_SCHEMA_POLICY_CONSUMER = "cn=consumerUpdatePolicy,cn=replSchema,cn=config" REPL_SCHEMA_POLICY_SUPPLIER = "cn=supplierUpdatePolicy,cn=replSchema,cn=config" OTHER_NAME = 'other_entry' MAX_OTHERS = 10 BIND_NAME = 'bind_entry' BIND_DN = 'cn=%s, %s' % (BIND_NAME, SUFFIX) BIND_PW = 'password' ENTRY_NAME = 'test_entry' ENTRY_DN = 'cn=%s, %s' % (ENTRY_NAME, SUFFIX) ENTRY_OC = "top person %s" % OC_NAME BASE_OID = "1.2.3.4.5.6.7.8.9.10" def _oc_definition(oid_ext, name, must=None, may=None): oid = "%s.%d" % (BASE_OID, oid_ext) desc = 'To test ticket 47490' sup = 'person' if not must: must = MUST if not may: may = MAY new_oc = "( %s NAME '%s' DESC '%s' SUP %s AUXILIARY MUST %s MAY %s )" % (oid, name, desc, sup, must, may) return ensure_bytes(new_oc) def replication_check(topology_m2): repl = ReplicationManager(SUFFIX) supplier1 = topology_m2.ms["supplier1"] supplier2 = topology_m2.ms["supplier2"] return repl.test_replication(supplier1, supplier2) def test_ticket47676_init(topology_m2): """ It adds - Objectclass with MAY 'member' - an entry ('bind_entry') with which we bind to test the 'SELFDN' operation It deletes the anonymous aci """ topology_m2.ms["supplier1"].log.info("Add %s that allows 'member' attribute" % OC_NAME) new_oc = _oc_definition(OC_OID_EXT, OC_NAME, must=MUST, may=MAY) topology_m2.ms["supplier1"].schema.add_schema('objectClasses', new_oc) # entry used to bind with topology_m2.ms["supplier1"].log.info("Add %s" % BIND_DN) topology_m2.ms["supplier1"].add_s(Entry((BIND_DN, { 'objectclass': "top person".split(), 'sn': BIND_NAME, 'cn': BIND_NAME, 'userpassword': BIND_PW}))) # enable acl error logging mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', ensure_bytes(str(128 + 8192)))] # ACL + REPL topology_m2.ms["supplier1"].modify_s(DN_CONFIG, mod) topology_m2.ms["supplier2"].modify_s(DN_CONFIG, mod) # add dummy entries for cpt in range(MAX_OTHERS): name = "%s%d" % (OTHER_NAME, cpt) topology_m2.ms["supplier1"].add_s(Entry(("cn=%s,%s" % (name, SUFFIX), { 'objectclass': "top person".split(), 'sn': name, 'cn': name}))) def test_ticket47676_skip_oc_at(topology_m2): ''' This test ADD an entry on SUPPLIER1 where 47676 is fixed. Then it checks that entry is replicated on SUPPLIER2 (even if on SUPPLIER2 47676 is NOT fixed). Then update on SUPPLIER2. If the schema has successfully been pushed, updating Supplier2 should succeed ''' topology_m2.ms["supplier1"].log.info("\n\n######################### ADD ######################\n") # bind as 'cn=Directory manager' topology_m2.ms["supplier1"].log.info("Bind as %s and add the add the entry with specific oc" % DN_DM) topology_m2.ms["supplier1"].simple_bind_s(DN_DM, PASSWORD) # Prepare the entry with multivalued members entry = Entry(ENTRY_DN) entry.setValues('objectclass', 'top', 'person', 'OCticket47676') entry.setValues('sn', ENTRY_NAME) entry.setValues('cn', ENTRY_NAME) entry.setValues('postalAddress', 'here') entry.setValues('postalCode', '1234') members = [] for cpt in range(MAX_OTHERS): name = "%s%d" % (OTHER_NAME, cpt) members.append("cn=%s,%s" % (name, SUFFIX)) members.append(BIND_DN) entry.setValues('member', members) topology_m2.ms["supplier1"].log.info("Try to add Add %s should be successful" % ENTRY_DN) topology_m2.ms["supplier1"].add_s(entry) # # Now check the entry as been replicated # topology_m2.ms["supplier2"].simple_bind_s(DN_DM, PASSWORD) topology_m2.ms["supplier1"].log.info("Try to retrieve %s from Supplier2" % ENTRY_DN) replication_check(topology_m2) ent = topology_m2.ms["supplier2"].getEntry(ENTRY_DN, ldap.SCOPE_BASE, "(objectclass=*)") assert ent # Now update the entry on Supplier2 (as DM because 47676 is possibly not fixed on M2) topology_m2.ms["supplier1"].log.info("Update %s on M2" % ENTRY_DN) mod = [(ldap.MOD_REPLACE, 'description', b'test_add')] topology_m2.ms["supplier2"].modify_s(ENTRY_DN, mod) topology_m2.ms["supplier1"].simple_bind_s(DN_DM, PASSWORD) replication_check(topology_m2) ent = topology_m2.ms["supplier1"].getEntry(ENTRY_DN, ldap.SCOPE_BASE, "(objectclass=*)") assert ensure_str(ent.getValue('description')) == 'test_add' def test_ticket47676_reject_action(topology_m2): topology_m2.ms["supplier1"].log.info("\n\n######################### REJECT ACTION ######################\n") topology_m2.ms["supplier1"].simple_bind_s(DN_DM, PASSWORD) topology_m2.ms["supplier2"].simple_bind_s(DN_DM, PASSWORD) # make supplier1 to refuse to push the schema if OC_NAME is present in consumer schema mod = [(ldap.MOD_ADD, 'schemaUpdateObjectclassReject', ensure_bytes('%s' % (OC_NAME)))] # ACL + REPL topology_m2.ms["supplier1"].modify_s(REPL_SCHEMA_POLICY_SUPPLIER, mod) # Restart is required to take into account that policy topology_m2.ms["supplier1"].stop(timeout=10) topology_m2.ms["supplier1"].start(timeout=10) # Add a new OC on M1 so that schema CSN will change and M1 will try to push the schema topology_m2.ms["supplier1"].log.info("Add %s on M1" % OC2_NAME) new_oc = _oc_definition(OC2_OID_EXT, OC2_NAME, must=MUST, may=MAY) topology_m2.ms["supplier1"].schema.add_schema('objectClasses', new_oc) # Safety checking that the schema has been updated on M1 topology_m2.ms["supplier1"].log.info("Check %s is in M1" % OC2_NAME) ent = topology_m2.ms["supplier1"].getEntry(SCHEMA_DN, ldap.SCOPE_BASE, "(objectclass=*)", ["objectclasses"]) assert ent.hasAttr('objectclasses') found = False for objectclass in ent.getValues('objectclasses'): if str(objectclass).find(OC2_NAME) >= 0: found = True break assert found # Do an update of M1 so that M1 will try to push the schema topology_m2.ms["supplier1"].log.info("Update %s on M1" % ENTRY_DN) mod = [(ldap.MOD_REPLACE, 'description', b'test_reject')] topology_m2.ms["supplier1"].modify_s(ENTRY_DN, mod) # Check the replication occured and so also M1 attempted to push the schema topology_m2.ms["supplier1"].log.info("Check updated %s on M2" % ENTRY_DN) replication_check(topology_m2) ent = topology_m2.ms["supplier2"].getEntry(ENTRY_DN, ldap.SCOPE_BASE, "(objectclass=*)", ['description']) assert ensure_str(ent.getValue('description')) == 'test_reject' # Check that the schema has not been pushed topology_m2.ms["supplier1"].log.info("Check %s is not in M2" % OC2_NAME) ent = topology_m2.ms["supplier2"].getEntry(SCHEMA_DN, ldap.SCOPE_BASE, "(objectclass=*)", ["objectclasses"]) assert ent.hasAttr('objectclasses') found = False for objectclass in ent.getValues('objectclasses'): if str(objectclass).find(OC2_NAME) >= 0: found = True break assert not found topology_m2.ms["supplier1"].log.info("\n\n######################### NO MORE REJECT ACTION ######################\n") # make supplier1 to do no specific action on OC_NAME mod = [(ldap.MOD_DELETE, 'schemaUpdateObjectclassReject', ensure_bytes('%s' % (OC_NAME)))] # ACL + REPL topology_m2.ms["supplier1"].modify_s(REPL_SCHEMA_POLICY_SUPPLIER, mod) # Restart is required to take into account that policy topology_m2.ms["supplier1"].stop(timeout=10) topology_m2.ms["supplier1"].start(timeout=10) # Do an update of M1 so that M1 will try to push the schema topology_m2.ms["supplier1"].log.info("Update %s on M1" % ENTRY_DN) mod = [(ldap.MOD_REPLACE, 'description', b'test_no_more_reject')] topology_m2.ms["supplier1"].modify_s(ENTRY_DN, mod) # Check the replication occured and so also M1 attempted to push the schema topology_m2.ms["supplier1"].log.info("Check updated %s on M2" % ENTRY_DN) replication_check(topology_m2) ent = topology_m2.ms["supplier2"].getEntry(ENTRY_DN, ldap.SCOPE_BASE, "(objectclass=*)", ['description']) assert ensure_str(ent.getValue('description')) == 'test_no_more_reject' # Check that the schema has been pushed topology_m2.ms["supplier1"].log.info("Check %s is in M2" % OC2_NAME) ent = topology_m2.ms["supplier2"].getEntry(SCHEMA_DN, ldap.SCOPE_BASE, "(objectclass=*)", ["objectclasses"]) assert ent.hasAttr('objectclasses') found = False for objectclass in ent.getValues('objectclasses'): if str(objectclass).find(OC2_NAME) >= 0: found = True break assert found if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/tickets/ticket47714_test.py000066400000000000000000000227541421664411400260170ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2016 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import logging import time import ldap import pytest from lib389 import Entry from lib389._constants import * from lib389.topologies import topology_st log = logging.getLogger(__name__) from lib389.utils import * # Skip on older versions pytestmark = [pytest.mark.tier2, pytest.mark.skipif(ds_is_older('1.3.3'), reason="Not implemented")] ACCT_POLICY_CONFIG_DN = ('cn=config,cn=%s,cn=plugins,cn=config' % PLUGIN_ACCT_POLICY) ACCT_POLICY_DN = 'cn=Account Inactivation Policy,%s' % SUFFIX # Set inactivty high to prevent timing issues with debug options or gdb on test runs. INACTIVITY_LIMIT = '3000' SEARCHFILTER = '(objectclass=*)' TEST_USER = 'ticket47714user' TEST_USER_DN = 'uid=%s,%s' % (TEST_USER, SUFFIX) TEST_USER_PW = '%s' % TEST_USER def _header(topology_st, label): topology_st.standalone.log.info("\n\n###############################################") topology_st.standalone.log.info("#######") topology_st.standalone.log.info("####### %s" % label) topology_st.standalone.log.info("#######") topology_st.standalone.log.info("###############################################") def test_ticket47714_init(topology_st): """ 1. Add account policy entry to the DB 2. Add a test user to the DB """ _header(topology_st, 'Testing Ticket 47714 - [RFE] Update lastLoginTime also in Account Policy plugin if account lockout is based on passwordExpirationTime.') topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) log.info("\n######################### Adding Account Policy entry: %s ######################\n" % ACCT_POLICY_DN) topology_st.standalone.add_s( Entry((ACCT_POLICY_DN, {'objectclass': "top ldapsubentry extensibleObject accountpolicy".split(), 'accountInactivityLimit': INACTIVITY_LIMIT}))) log.info("\n######################### Adding Test User entry: %s ######################\n" % TEST_USER_DN) topology_st.standalone.add_s( Entry((TEST_USER_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), 'cn': TEST_USER, 'sn': TEST_USER, 'givenname': TEST_USER, 'userPassword': TEST_USER_PW, 'acctPolicySubentry': ACCT_POLICY_DN}))) def test_ticket47714_run_0(topology_st): """ Check this change has no inpact to the existing functionality. 1. Set account policy config without the new attr alwaysRecordLoginAttr 2. Bind as a test user 3. Bind as the test user again and check the lastLoginTime is updated 4. Waint longer than the accountInactivityLimit time and bind as the test user, which should fail with CONSTANT_VIOLATION. """ _header(topology_st, 'Account Policy - No new attr alwaysRecordLoginAttr in config') topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) # Modify Account Policy config entry topology_st.standalone.modify_s(ACCT_POLICY_CONFIG_DN, [(ldap.MOD_REPLACE, 'alwaysrecordlogin', b'yes'), (ldap.MOD_REPLACE, 'stateattrname', b'lastLoginTime'), (ldap.MOD_REPLACE, 'altstateattrname', b'createTimestamp'), (ldap.MOD_REPLACE, 'specattrname', b'acctPolicySubentry'), (ldap.MOD_REPLACE, 'limitattrname', b'accountInactivityLimit')]) # Enable the plugins topology_st.standalone.plugins.enable(name=PLUGIN_ACCT_POLICY) topology_st.standalone.restart() log.info("\n######################### Bind as %s ######################\n" % TEST_USER_DN) try: topology_st.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PW) except ldap.CONSTRAINT_VIOLATION as e: log.error('CONSTRAINT VIOLATION {}'.format(e.args[0]['desc'])) time.sleep(2) topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) entry = topology_st.standalone.search_s(TEST_USER_DN, ldap.SCOPE_BASE, SEARCHFILTER, ['lastLoginTime']) lastLoginTime0 = entry[0].lastLoginTime log.info("\n######################### Bind as %s again ######################\n" % TEST_USER_DN) try: topology_st.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PW) except ldap.CONSTRAINT_VIOLATION as e: log.error('CONSTRAINT VIOLATION {}'.format(e.args[0]['desc'])) time.sleep(2) topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) entry = topology_st.standalone.search_s(TEST_USER_DN, ldap.SCOPE_BASE, SEARCHFILTER, ['lastLoginTime']) lastLoginTime1 = entry[0].lastLoginTime log.info("First lastLoginTime: %s, Second lastLoginTime: %s" % (lastLoginTime0, lastLoginTime1)) assert lastLoginTime0 < lastLoginTime1 topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) # Now, change the inactivity limit, because that should trigger the account to now be locked. This is possible because the check is "delayed" until the usage of the account. topology_st.standalone.modify_s(ACCT_POLICY_DN, [(ldap.MOD_REPLACE, 'accountInactivityLimit', b'1'),]) time.sleep(2) entry = topology_st.standalone.search_s(ACCT_POLICY_DN, ldap.SCOPE_BASE, SEARCHFILTER) log.info("\n######################### %s ######################\n" % ACCT_POLICY_CONFIG_DN) log.info("accountInactivityLimit: %s" % entry[0].accountInactivityLimit) log.info("\n######################### %s DONE ######################\n" % ACCT_POLICY_CONFIG_DN) log.info("\n######################### Bind as %s again to fail ######################\n" % TEST_USER_DN) try: topology_st.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PW) except ldap.CONSTRAINT_VIOLATION as e: log.info('CONSTRAINT VIOLATION {}'.format(e.args[0]['desc'])) log.info("%s was successfully inactivated." % TEST_USER_DN) pass # Now reset the value high to prevent issues with the next test. topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) topology_st.standalone.modify_s(ACCT_POLICY_DN, [(ldap.MOD_REPLACE, 'accountInactivityLimit', ensure_bytes(INACTIVITY_LIMIT)),]) def test_ticket47714_run_1(topology_st): """ Verify a new config attr alwaysRecordLoginAttr 1. Set account policy config with the new attr alwaysRecordLoginAttr: lastLoginTime Note: bogus attr is set to stateattrname. altstateattrname type value is used for checking whether the account is idle or not. 2. Bind as a test user 3. Bind as the test user again and check the alwaysRecordLoginAttr: lastLoginTime is updated """ _header(topology_st, 'Account Policy - With new attr alwaysRecordLoginAttr in config') topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) topology_st.standalone.modify_s(TEST_USER_DN, [(ldap.MOD_DELETE, 'lastLoginTime', None)]) # Modify Account Policy config entry topology_st.standalone.modify_s(ACCT_POLICY_CONFIG_DN, [(ldap.MOD_REPLACE, 'alwaysrecordlogin', b'yes'), (ldap.MOD_REPLACE, 'stateattrname', b'bogus'), (ldap.MOD_REPLACE, 'altstateattrname', b'modifyTimestamp'), ( ldap.MOD_REPLACE, 'alwaysRecordLoginAttr', b'lastLoginTime'), (ldap.MOD_REPLACE, 'specattrname', b'acctPolicySubentry'), (ldap.MOD_REPLACE, 'limitattrname', b'accountInactivityLimit')]) # Enable the plugins topology_st.standalone.plugins.enable(name=PLUGIN_ACCT_POLICY) topology_st.standalone.restart() log.info("\n######################### Bind as %s ######################\n" % TEST_USER_DN) try: topology_st.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PW) except ldap.CONSTRAINT_VIOLATION as e: log.error('CONSTRAINT VIOLATION {}'.format(e.args[0]['desc'])) time.sleep(1) topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) entry = topology_st.standalone.search_s(TEST_USER_DN, ldap.SCOPE_BASE, SEARCHFILTER, ['lastLoginTime']) lastLoginTime0 = entry[0].lastLoginTime log.info("\n######################### Bind as %s again ######################\n" % TEST_USER_DN) try: topology_st.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PW) except ldap.CONSTRAINT_VIOLATION as e: log.error('CONSTRAINT VIOLATION {}'.format(e.args[0]['desc'])) time.sleep(1) topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) entry = topology_st.standalone.search_s(TEST_USER_DN, ldap.SCOPE_BASE, SEARCHFILTER, ['lastLoginTime']) lastLoginTime1 = entry[0].lastLoginTime log.info("First lastLoginTime: %s, Second lastLoginTime: %s" % (lastLoginTime0, lastLoginTime1)) assert lastLoginTime0 < lastLoginTime1 topology_st.standalone.log.info("ticket47714 was successfully verified.") if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/tickets/ticket47721_test.py000066400000000000000000000262551421664411400260150ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2016 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # ''' Created on Nov 7, 2013 @author: tbordaz ''' import logging import time import ldap import pytest from lib389 import Entry from lib389._constants import * from lib389.topologies import topology_m2 from lib389.replica import ReplicationManager from lib389.utils import * pytestmark = pytest.mark.tier2 logging.getLogger(__name__).setLevel(logging.DEBUG) log = logging.getLogger(__name__) SCHEMA_DN = "cn=schema" TEST_REPL_DN = "cn=test_repl, %s" % SUFFIX OC_NAME = 'OCticket47721' OC_OID_EXT = 2 MUST = "(postalAddress $ postalCode)" MAY = "(member $ street)" OC2_NAME = 'OC2ticket47721' OC2_OID_EXT = 3 MUST_2 = "(postalAddress $ postalCode)" MAY_2 = "(member $ street)" REPL_SCHEMA_POLICY_CONSUMER = "cn=consumerUpdatePolicy,cn=replSchema,cn=config" REPL_SCHEMA_POLICY_SUPPLIER = "cn=supplierUpdatePolicy,cn=replSchema,cn=config" OTHER_NAME = 'other_entry' MAX_OTHERS = 10 BIND_NAME = 'bind_entry' BIND_DN = 'cn=%s, %s' % (BIND_NAME, SUFFIX) BIND_PW = 'password' ENTRY_NAME = 'test_entry' ENTRY_DN = 'cn=%s, %s' % (ENTRY_NAME, SUFFIX) ENTRY_OC = "top person %s" % OC_NAME BASE_OID = "1.2.3.4.5.6.7.8.9.10" SLEEP_INTERVAL = 60 def _add_custom_at_definition(name='ATticket47721'): new_at = "( %s-oid NAME '%s' DESC 'test AT ticket 47721' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 X-ORIGIN ( 'Test 47721' 'user defined' ) )" % ( name, name) return ensure_bytes(new_at) def _chg_std_at_defintion(): new_at = "( 2.16.840.1.113730.3.1.569 NAME 'cosPriority' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 X-ORIGIN 'Netscape Directory Server' )" return ensure_bytes(new_at) def _add_custom_oc_defintion(name='OCticket47721'): new_oc = "( %s-oid NAME '%s' DESC 'An group of related automount objects' SUP top STRUCTURAL MUST ou X-ORIGIN 'draft-howard-rfc2307bis' )" % ( name, name) return ensure_bytes(new_oc) def _chg_std_oc_defintion(): new_oc = "( 5.3.6.1.1.1.2.0 NAME 'trustAccount' DESC 'Sets trust accounts information' SUP top AUXILIARY MUST trustModel MAY ( accessTo $ ou ) X-ORIGIN 'nss_ldap/pam_ldap' )" return ensure_bytes(new_oc) def replication_check(topology_m2): repl = ReplicationManager(SUFFIX) supplier1 = topology_m2.ms["supplier1"] supplier2 = topology_m2.ms["supplier2"] return repl.test_replication(supplier1, supplier2) def test_ticket47721_init(topology_m2): """ It adds - Objectclass with MAY 'member' - an entry ('bind_entry') with which we bind to test the 'SELFDN' operation It deletes the anonymous aci """ # entry used to bind with topology_m2.ms["supplier1"].log.info("Add %s" % BIND_DN) topology_m2.ms["supplier1"].add_s(Entry((BIND_DN, { 'objectclass': "top person".split(), 'sn': BIND_NAME, 'cn': BIND_NAME, 'userpassword': BIND_PW}))) # enable repl error logging mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', ensure_bytes(str(8192)))] # REPL logging topology_m2.ms["supplier1"].modify_s(DN_CONFIG, mod) topology_m2.ms["supplier2"].modify_s(DN_CONFIG, mod) # add dummy entries for cpt in range(MAX_OTHERS): name = "%s%d" % (OTHER_NAME, cpt) topology_m2.ms["supplier1"].add_s(Entry(("cn=%s,%s" % (name, SUFFIX), { 'objectclass': "top person".split(), 'sn': name, 'cn': name}))) def test_ticket47721_0(topology_m2): dn = "cn=%s0,%s" % (OTHER_NAME, SUFFIX) replication_check(topology_m2) ent = topology_m2.ms["supplier2"].getEntry(dn, ldap.SCOPE_BASE, "(objectclass=*)") assert ent def test_ticket47721_1(topology_m2): log.info('Running test 1...') # topology_m2.ms["supplier1"].log.info("Attach debugger\n\n") # time.sleep(30) new = _add_custom_at_definition() topology_m2.ms["supplier1"].log.info("Add (M2) %s " % new) topology_m2.ms["supplier2"].schema.add_schema('attributetypes', new) new = _chg_std_at_defintion() topology_m2.ms["supplier1"].log.info("Chg (M2) %s " % new) topology_m2.ms["supplier2"].schema.add_schema('attributetypes', new) new = _add_custom_oc_defintion() topology_m2.ms["supplier1"].log.info("Add (M2) %s " % new) topology_m2.ms["supplier2"].schema.add_schema('objectClasses', new) new = _chg_std_oc_defintion() topology_m2.ms["supplier1"].log.info("Chg (M2) %s " % new) topology_m2.ms["supplier2"].schema.add_schema('objectClasses', new) mod = [(ldap.MOD_REPLACE, 'description', b'Hello world 1')] dn = "cn=%s0,%s" % (OTHER_NAME, SUFFIX) topology_m2.ms["supplier2"].modify_s(dn, mod) replication_check(topology_m2) ent = topology_m2.ms["supplier1"].getEntry(dn, ldap.SCOPE_BASE, "(objectclass=*)") assert ensure_str(ent.getValue('description')) == 'Hello world 1' time.sleep(2) schema_csn_supplier1 = topology_m2.ms["supplier1"].schema.get_schema_csn() schema_csn_supplier2 = topology_m2.ms["supplier2"].schema.get_schema_csn() log.debug('Supplier 1 schemaCSN: %s' % schema_csn_supplier1) log.debug('Supplier 2 schemaCSN: %s' % schema_csn_supplier2) def test_ticket47721_2(topology_m2): log.info('Running test 2...') mod = [(ldap.MOD_REPLACE, 'description', b'Hello world 2')] dn = "cn=%s0,%s" % (OTHER_NAME, SUFFIX) topology_m2.ms["supplier1"].modify_s(dn, mod) replication_check(topology_m2) ent = topology_m2.ms["supplier2"].getEntry(dn, ldap.SCOPE_BASE, "(objectclass=*)") assert ensure_str(ent.getValue('description')) == 'Hello world 2' time.sleep(2) schema_csn_supplier1 = topology_m2.ms["supplier1"].schema.get_schema_csn() schema_csn_supplier2 = topology_m2.ms["supplier2"].schema.get_schema_csn() log.debug('Supplier 1 schemaCSN: %s' % schema_csn_supplier1) log.debug('Supplier 2 schemaCSN: %s' % schema_csn_supplier2) if schema_csn_supplier1 != schema_csn_supplier2: # We need to give the server a little more time, then check it again log.info('Schema CSNs are not in sync yet: m1 (%s) vs m2 (%s), wait a little...' % (schema_csn_supplier1, schema_csn_supplier2)) time.sleep(SLEEP_INTERVAL) schema_csn_supplier1 = topology_m2.ms["supplier1"].schema.get_schema_csn() schema_csn_supplier2 = topology_m2.ms["supplier2"].schema.get_schema_csn() assert schema_csn_supplier1 is not None assert schema_csn_supplier1 == schema_csn_supplier2 def test_ticket47721_3(topology_m2): ''' Check that the supplier can update its schema from consumer schema Update M2 schema, then trigger a replication M1->M2 ''' log.info('Running test 3...') # stop RA M2->M1, so that M1 can only learn being a supplier ents = topology_m2.ms["supplier2"].agreement.list(suffix=SUFFIX) assert len(ents) == 1 topology_m2.ms["supplier2"].agreement.pause(ents[0].dn) new = _add_custom_at_definition('ATtest3') topology_m2.ms["supplier1"].log.info("Update schema (M2) %s " % new) topology_m2.ms["supplier2"].schema.add_schema('attributetypes', new) time.sleep(1) new = _add_custom_oc_defintion('OCtest3') topology_m2.ms["supplier1"].log.info("Update schema (M2) %s " % new) topology_m2.ms["supplier2"].schema.add_schema('objectClasses', new) time.sleep(1) mod = [(ldap.MOD_REPLACE, 'description', b'Hello world 3')] dn = "cn=%s0,%s" % (OTHER_NAME, SUFFIX) topology_m2.ms["supplier1"].modify_s(dn, mod) replication_check(topology_m2) ent = topology_m2.ms["supplier2"].getEntry(dn, ldap.SCOPE_BASE, "(objectclass=*)") assert ensure_str(ent.getValue('description')) == 'Hello world 3' time.sleep(5) schema_csn_supplier1 = topology_m2.ms["supplier1"].schema.get_schema_csn() schema_csn_supplier2 = topology_m2.ms["supplier2"].schema.get_schema_csn() log.debug('Supplier 1 schemaCSN: %s' % schema_csn_supplier1) log.debug('Supplier 2 schemaCSN: %s' % schema_csn_supplier2) if schema_csn_supplier1 == schema_csn_supplier2: # We need to give the server a little more time, then check it again log.info('Schema CSNs are not in sync yet: m1 (%s) vs m2 (%s), wait a little...' % (schema_csn_supplier1, schema_csn_supplier2)) time.sleep(SLEEP_INTERVAL) schema_csn_supplier1 = topology_m2.ms["supplier1"].schema.get_schema_csn() schema_csn_supplier2 = topology_m2.ms["supplier2"].schema.get_schema_csn() assert schema_csn_supplier1 is not None # schema csn on M2 is larger that on M1. M1 only took the new definitions assert schema_csn_supplier1 != schema_csn_supplier2 def test_ticket47721_4(topology_m2): ''' Here M2->M1 agreement is disabled. with test_ticket47721_3, M1 schema and M2 should be identical BUT the nsschemacsn is M2>M1. But as the RA M2->M1 is disabled, M1 keeps its schemacsn. Update schema on M2 (nsschemaCSN update), update M2. Check they have the same schemacsn ''' log.info('Running test 4...') new = _add_custom_at_definition('ATtest4') topology_m2.ms["supplier1"].log.info("Update schema (M1) %s " % new) topology_m2.ms["supplier1"].schema.add_schema('attributetypes', new) new = _add_custom_oc_defintion('OCtest4') topology_m2.ms["supplier1"].log.info("Update schema (M1) %s " % new) topology_m2.ms["supplier1"].schema.add_schema('objectClasses', new) topology_m2.ms["supplier1"].log.info("trigger replication M1->M2: to update the schema") mod = [(ldap.MOD_REPLACE, 'description', b'Hello world 4')] dn = "cn=%s0,%s" % (OTHER_NAME, SUFFIX) topology_m2.ms["supplier1"].modify_s(dn, mod) replication_check(topology_m2) ent = topology_m2.ms["supplier2"].getEntry(dn, ldap.SCOPE_BASE, "(objectclass=*)") assert ensure_str(ent.getValue('description')) == 'Hello world 4' topology_m2.ms["supplier1"].log.info("trigger replication M1->M2: to push the schema") mod = [(ldap.MOD_REPLACE, 'description', b'Hello world 5')] dn = "cn=%s0,%s" % (OTHER_NAME, SUFFIX) topology_m2.ms["supplier1"].modify_s(dn, mod) replication_check(topology_m2) ent = topology_m2.ms["supplier2"].getEntry(dn, ldap.SCOPE_BASE, "(objectclass=*)") assert ensure_str(ent.getValue('description')) == 'Hello world 5' time.sleep(2) schema_csn_supplier1 = topology_m2.ms["supplier1"].schema.get_schema_csn() schema_csn_supplier2 = topology_m2.ms["supplier2"].schema.get_schema_csn() log.debug('Supplier 1 schemaCSN: %s' % schema_csn_supplier1) log.debug('Supplier 2 schemaCSN: %s' % schema_csn_supplier2) if schema_csn_supplier1 != schema_csn_supplier2: # We need to give the server a little more time, then check it again log.info('Schema CSNs are incorrectly in sync, wait a little...') time.sleep(SLEEP_INTERVAL) schema_csn_supplier1 = topology_m2.ms["supplier1"].schema.get_schema_csn() schema_csn_supplier2 = topology_m2.ms["supplier2"].schema.get_schema_csn() assert schema_csn_supplier1 is not None assert schema_csn_supplier1 == schema_csn_supplier2 if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/tickets/ticket47781_test.py000066400000000000000000000065741421664411400260250ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2016 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import logging import pytest from lib389.tasks import * from lib389.topologies import topology_st from lib389.replica import ReplicationManager from lib389._constants import (defaultProperties, DEFAULT_SUFFIX, ReplicaRole, REPLICAID_SUPPLIER_1, REPLICATION_BIND_DN, REPLICATION_BIND_PW, REPLICATION_BIND_METHOD, REPLICATION_TRANSPORT, RA_NAME, RA_BINDDN, RA_BINDPW, RA_METHOD, RA_TRANSPORT_PROT) pytestmark = pytest.mark.tier2 log = logging.getLogger(__name__) def test_ticket47781(topology_st): """ Testing for a deadlock after doing an online import of an LDIF with replication data. The replication agreement should be invalid. """ log.info('Testing Ticket 47781 - Testing for deadlock after importing LDIF with replication data') supplier = topology_st.standalone repl = ReplicationManager(DEFAULT_SUFFIX) repl.create_first_supplier(supplier) properties = {RA_NAME: r'meTo_$host:$port', RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} # The agreement should point to a server that does NOT exist (invalid port) repl_agreement = supplier.agreement.create(suffix=DEFAULT_SUFFIX, host=supplier.host, port=5555, properties=properties) # # add two entries # log.info('Adding two entries...') supplier.add_s(Entry(('cn=entry1,dc=example,dc=com', { 'objectclass': 'top person'.split(), 'sn': 'user', 'cn': 'entry1'}))) supplier.add_s(Entry(('cn=entry2,dc=example,dc=com', { 'objectclass': 'top person'.split(), 'sn': 'user', 'cn': 'entry2'}))) # # export the replication ldif # log.info('Exporting replication ldif...') args = {EXPORT_REPL_INFO: True} exportTask = Tasks(supplier) exportTask.exportLDIF(DEFAULT_SUFFIX, None, "/tmp/export.ldif", args) # # Restart the server # log.info('Restarting server...') supplier.stop() supplier.start() # # Import the ldif # log.info('Import replication LDIF file...') importTask = Tasks(supplier) args = {TASK_WAIT: True} importTask.importLDIF(DEFAULT_SUFFIX, None, "/tmp/export.ldif", args) os.remove("/tmp/export.ldif") # # Search for tombstones - we should not hang/timeout # log.info('Search for tombstone entries(should find one and not hang)...') supplier.set_option(ldap.OPT_NETWORK_TIMEOUT, 5) supplier.set_option(ldap.OPT_TIMEOUT, 5) entries = supplier.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, 'objectclass=nsTombstone') if not entries: log.fatal('Search failed to find any entries.') assert PR_False if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/tickets/ticket47787_test.py000066400000000000000000000347231421664411400260300ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2016 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # ''' Created on April 14, 2014 @author: tbordaz ''' import logging import re import time import ldap import pytest from lib389 import Entry from lib389._constants import * from lib389.topologies import topology_m2 from lib389.utils import * pytestmark = pytest.mark.tier2 logging.getLogger(__name__).setLevel(logging.DEBUG) log = logging.getLogger(__name__) # set this flag to False so that it will assert on failure _status_entry_both_server DEBUG_FLAG = False TEST_REPL_DN = "cn=test_repl, %s" % SUFFIX STAGING_CN = "staged user" PRODUCTION_CN = "accounts" EXCEPT_CN = "excepts" STAGING_DN = "cn=%s,%s" % (STAGING_CN, SUFFIX) PRODUCTION_DN = "cn=%s,%s" % (PRODUCTION_CN, SUFFIX) PROD_EXCEPT_DN = "cn=%s,%s" % (EXCEPT_CN, PRODUCTION_DN) STAGING_PATTERN = "cn=%s*,%s" % (STAGING_CN[:2], SUFFIX) PRODUCTION_PATTERN = "cn=%s*,%s" % (PRODUCTION_CN[:2], SUFFIX) BAD_STAGING_PATTERN = "cn=bad*,%s" % (SUFFIX) BAD_PRODUCTION_PATTERN = "cn=bad*,%s" % (SUFFIX) BIND_CN = "bind_entry" BIND_DN = "cn=%s,%s" % (BIND_CN, SUFFIX) BIND_PW = "password" NEW_ACCOUNT = "new_account" MAX_ACCOUNTS = 20 CONFIG_MODDN_ACI_ATTR = "nsslapd-moddn-aci" def _bind_manager(server): server.log.info("Bind as %s " % DN_DM) server.simple_bind_s(DN_DM, PASSWORD) def _bind_normal(server): server.log.info("Bind as %s " % BIND_DN) server.simple_bind_s(BIND_DN, BIND_PW) def _header(topology_m2, label): topology_m2.ms["supplier1"].log.info("\n\n###############################################") topology_m2.ms["supplier1"].log.info("#######") topology_m2.ms["supplier1"].log.info("####### %s" % label) topology_m2.ms["supplier1"].log.info("#######") topology_m2.ms["supplier1"].log.info("###############################################") def _status_entry_both_server(topology_m2, name=None, desc=None, debug=True): if not name: return topology_m2.ms["supplier1"].log.info("\n\n######################### Tombstone on M1 ######################\n") attr = 'description' found = False attempt = 0 while not found and attempt < 10: ent_m1 = _find_tombstone(topology_m2.ms["supplier1"], SUFFIX, 'sn', name) if attr in ent_m1.getAttrs(): found = True else: time.sleep(1) attempt = attempt + 1 assert ent_m1 topology_m2.ms["supplier1"].log.info("\n\n######################### Tombstone on M2 ######################\n") ent_m2 = _find_tombstone(topology_m2.ms["supplier2"], SUFFIX, 'sn', name) assert ent_m2 topology_m2.ms["supplier1"].log.info("\n\n######################### Description ######################\n%s\n" % desc) topology_m2.ms["supplier1"].log.info("M1 only\n") for attr in ent_m1.getAttrs(): if not debug: assert attr in ent_m2.getAttrs() if not attr in ent_m2.getAttrs(): topology_m2.ms["supplier1"].log.info(" %s" % attr) for val in ent_m1.getValues(attr): topology_m2.ms["supplier1"].log.info(" %s" % val) topology_m2.ms["supplier1"].log.info("M2 only\n") for attr in ent_m2.getAttrs(): if not debug: assert attr in ent_m1.getAttrs() if not attr in ent_m1.getAttrs(): topology_m2.ms["supplier1"].log.info(" %s" % attr) for val in ent_m2.getValues(attr): topology_m2.ms["supplier1"].log.info(" %s" % val) topology_m2.ms["supplier1"].log.info("M1 differs M2\n") if not debug: assert ent_m1.dn == ent_m2.dn if ent_m1.dn != ent_m2.dn: topology_m2.ms["supplier1"].log.info(" M1[dn] = %s\n M2[dn] = %s" % (ent_m1.dn, ent_m2.dn)) for attr1 in ent_m1.getAttrs(): if attr1 in ent_m2.getAttrs(): for val1 in ent_m1.getValues(attr1): found = False for val2 in ent_m2.getValues(attr1): if val1 == val2: found = True break if not debug: assert found if not found: topology_m2.ms["supplier1"].log.info(" M1[%s] = %s" % (attr1, val1)) for attr2 in ent_m2.getAttrs(): if attr2 in ent_m1.getAttrs(): for val2 in ent_m2.getValues(attr2): found = False for val1 in ent_m1.getValues(attr2): if val2 == val1: found = True break if not debug: assert found if not found: topology_m2.ms["supplier1"].log.info(" M2[%s] = %s" % (attr2, val2)) def _pause_RAs(topology_m2): topology_m2.ms["supplier1"].log.info("\n\n######################### Pause RA M1<->M2 ######################\n") ents = topology_m2.ms["supplier1"].agreement.list(suffix=SUFFIX) assert len(ents) == 1 topology_m2.ms["supplier1"].agreement.pause(ents[0].dn) ents = topology_m2.ms["supplier2"].agreement.list(suffix=SUFFIX) assert len(ents) == 1 topology_m2.ms["supplier2"].agreement.pause(ents[0].dn) def _resume_RAs(topology_m2): topology_m2.ms["supplier1"].log.info("\n\n######################### resume RA M1<->M2 ######################\n") ents = topology_m2.ms["supplier1"].agreement.list(suffix=SUFFIX) assert len(ents) == 1 topology_m2.ms["supplier1"].agreement.resume(ents[0].dn) ents = topology_m2.ms["supplier2"].agreement.list(suffix=SUFFIX) assert len(ents) == 1 topology_m2.ms["supplier2"].agreement.resume(ents[0].dn) def _find_tombstone(instance, base, attr, value): # # we can not use a filter with a (&(objeclass=nsTombstone)(sn=name)) because # tombstone are not index in 'sn' so 'sn=name' will return NULL # and even if tombstone are indexed for objectclass the '&' will set # the candidate list to NULL # filt = '(objectclass=%s)' % REPLICA_OC_TOMBSTONE ents = instance.search_s(base, ldap.SCOPE_SUBTREE, filt) # found = False for ent in ents: if ent.hasAttr(attr): for val in ent.getValues(attr): if ensure_str(val) == value: instance.log.debug("tombstone found: %r" % ent) return ent return None def _delete_entry(instance, entry_dn, name): instance.log.info("\n\n######################### DELETE %s (M1) ######################\n" % name) # delete the entry instance.delete_s(entry_dn) ent = _find_tombstone(instance, SUFFIX, 'sn', name) assert ent is not None def _mod_entry(instance, entry_dn, attr, value): instance.log.info("\n\n######################### MOD %s (M2) ######################\n" % entry_dn) mod = [(ldap.MOD_REPLACE, attr, ensure_bytes(value))] instance.modify_s(entry_dn, mod) def _modrdn_entry(instance=None, entry_dn=None, new_rdn=None, del_old=0, new_superior=None): assert instance is not None assert entry_dn is not None if not new_rdn: pattern = 'cn=(.*),(.*)' rdnre = re.compile(pattern) match = rdnre.match(entry_dn) old_value = match.group(1) new_rdn_val = "%s_modrdn" % old_value new_rdn = "cn=%s" % new_rdn_val instance.log.info("\n\n######################### MODRDN %s (M2) ######################\n" % new_rdn) if new_superior: instance.rename_s(entry_dn, new_rdn, newsuperior=new_superior, delold=del_old) else: instance.rename_s(entry_dn, new_rdn, delold=del_old) def _check_entry_exists(instance, entry_dn): loop = 0 ent = None while loop <= 10: try: ent = instance.getEntry(entry_dn, ldap.SCOPE_BASE, "(objectclass=*)") break except ldap.NO_SUCH_OBJECT: time.sleep(1) loop += 1 if ent is None: assert False def _check_mod_received(instance, base, filt, attr, value): instance.log.info( "\n\n######################### Check MOD replicated on %s ######################\n" % instance.serverid) loop = 0 while loop <= 10: ent = instance.getEntry(base, ldap.SCOPE_SUBTREE, filt) if ent.hasAttr(attr) and ent.getValue(attr) == value: break time.sleep(1) loop += 1 assert loop <= 10 def _check_replication(topology_m2, entry_dn): # prepare the filter to retrieve the entry filt = entry_dn.split(',')[0] topology_m2.ms["supplier1"].log.info("\n######################### Check replicat M1->M2 ######################\n") loop = 0 while loop <= 10: attr = 'description' value = 'test_value_%d' % loop mod = [(ldap.MOD_REPLACE, attr, ensure_bytes(value))] topology_m2.ms["supplier1"].modify_s(entry_dn, mod) _check_mod_received(topology_m2.ms["supplier2"], SUFFIX, filt, attr, value) loop += 1 topology_m2.ms["supplier1"].log.info("\n######################### Check replicat M2->M1 ######################\n") loop = 0 while loop <= 10: attr = 'description' value = 'test_value_%d' % loop mod = [(ldap.MOD_REPLACE, attr, ensure_bytes(value))] topology_m2.ms["supplier2"].modify_s(entry_dn, mod) _check_mod_received(topology_m2.ms["supplier1"], SUFFIX, filt, attr, value) loop += 1 def test_ticket47787_init(topology_m2): """ Creates - a staging DIT - a production DIT - add accounts in staging DIT """ topology_m2.ms["supplier1"].log.info("\n\n######################### INITIALIZATION ######################\n") # entry used to bind with topology_m2.ms["supplier1"].log.info("Add %s" % BIND_DN) topology_m2.ms["supplier1"].add_s(Entry((BIND_DN, { 'objectclass': "top person".split(), 'sn': BIND_CN, 'cn': BIND_CN, 'userpassword': BIND_PW}))) # DIT for staging topology_m2.ms["supplier1"].log.info("Add %s" % STAGING_DN) topology_m2.ms["supplier1"].add_s(Entry((STAGING_DN, { 'objectclass': "top organizationalRole".split(), 'cn': STAGING_CN, 'description': "staging DIT"}))) # DIT for production topology_m2.ms["supplier1"].log.info("Add %s" % PRODUCTION_DN) topology_m2.ms["supplier1"].add_s(Entry((PRODUCTION_DN, { 'objectclass': "top organizationalRole".split(), 'cn': PRODUCTION_CN, 'description': "production DIT"}))) # enable replication error logging mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', b'8192')] topology_m2.ms["supplier1"].modify_s(DN_CONFIG, mod) topology_m2.ms["supplier2"].modify_s(DN_CONFIG, mod) # add dummy entries in the staging DIT for cpt in range(MAX_ACCOUNTS): name = "%s%d" % (NEW_ACCOUNT, cpt) topology_m2.ms["supplier1"].add_s(Entry(("cn=%s,%s" % (name, STAGING_DN), { 'objectclass': "top person".split(), 'sn': name, 'cn': name}))) def test_ticket47787_2(topology_m2): ''' Disable replication so that updates are not replicated Delete an entry on M1. Modrdn it on M2 (chg rdn + delold=0 + same superior). update a test entry on M2 Reenable the RA. checks that entry was deleted on M2 (with the modified RDN) checks that test entry was replicated on M1 (replication M2->M1 not broken by modrdn) ''' _header(topology_m2, "test_ticket47787_2") _bind_manager(topology_m2.ms["supplier1"]) _bind_manager(topology_m2.ms["supplier2"]) # entry to test the replication is still working name = "%s%d" % (NEW_ACCOUNT, MAX_ACCOUNTS - 1) test_rdn = "cn=%s" % (name) testentry_dn = "%s,%s" % (test_rdn, STAGING_DN) name = "%s%d" % (NEW_ACCOUNT, MAX_ACCOUNTS - 2) test2_rdn = "cn=%s" % (name) testentry2_dn = "%s,%s" % (test2_rdn, STAGING_DN) # value of updates to test the replication both ways attr = 'description' value = 'test_ticket47787_2' # entry for the modrdn name = "%s%d" % (NEW_ACCOUNT, 1) rdn = "cn=%s" % (name) entry_dn = "%s,%s" % (rdn, STAGING_DN) # created on M1, wait the entry exists on M2 _check_entry_exists(topology_m2.ms["supplier2"], entry_dn) _check_entry_exists(topology_m2.ms["supplier2"], testentry_dn) _pause_RAs(topology_m2) # Delete 'entry_dn' on M1. # dummy update is only have a first CSN before the DEL # else the DEL will be in min_csn RUV and make diagnostic a bit more complex _mod_entry(topology_m2.ms["supplier1"], testentry2_dn, attr, 'dummy') _delete_entry(topology_m2.ms["supplier1"], entry_dn, name) _mod_entry(topology_m2.ms["supplier1"], testentry2_dn, attr, value) time.sleep(1) # important to have MOD.csn != DEL.csn # MOD 'entry_dn' on M1. # dummy update is only have a first CSN before the MOD entry_dn # else the DEL will be in min_csn RUV and make diagnostic a bit more complex _mod_entry(topology_m2.ms["supplier2"], testentry_dn, attr, 'dummy') _mod_entry(topology_m2.ms["supplier2"], entry_dn, attr, value) _mod_entry(topology_m2.ms["supplier2"], testentry_dn, attr, value) _resume_RAs(topology_m2) topology_m2.ms["supplier1"].log.info( "\n\n######################### Check DEL replicated on M2 ######################\n") loop = 0 while loop <= 10: ent = _find_tombstone(topology_m2.ms["supplier2"], SUFFIX, 'sn', name) if ent: break time.sleep(1) loop += 1 assert loop <= 10 assert ent # the following checks are not necessary # as this bug is only for failing replicated MOD (entry_dn) on M1 # _check_mod_received(topology_m2.ms["supplier1"], SUFFIX, "(%s)" % (test_rdn), attr, value) # _check_mod_received(topology_m2.ms["supplier2"], SUFFIX, "(%s)" % (test2_rdn), attr, value) # # _check_replication(topology_m2, testentry_dn) _status_entry_both_server(topology_m2, name=name, desc="DEL M1 - MOD M2", debug=DEBUG_FLAG) topology_m2.ms["supplier1"].log.info( "\n\n######################### Check MOD replicated on M1 ######################\n") loop = 0 while loop <= 10: ent = _find_tombstone(topology_m2.ms["supplier1"], SUFFIX, 'sn', name) if ent: break time.sleep(1) loop += 1 assert loop <= 10 assert ent assert ent.hasAttr(attr) assert ensure_str(ent.getValue(attr)) == value if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/tickets/ticket47808_test.py000066400000000000000000000074671421664411400260270ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2016 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import logging import ldap import pytest from lib389 import Entry from lib389._constants import * from lib389.topologies import topology_st from lib389.utils import * pytestmark = pytest.mark.tier2 log = logging.getLogger(__name__) ATTRIBUTE_UNIQUENESS_PLUGIN = 'cn=attribute uniqueness,cn=plugins,cn=config' ENTRY_NAME = 'test_entry' def test_ticket47808_run(topology_st): """ It enables attribute uniqueness plugin with sn as a unique attribute Add an entry 1 with sn = ENTRY_NAME Add an entry 2 with sn = ENTRY_NAME If the second add does not crash the server and the following search found none, the bug is fixed. """ # bind as directory manager topology_st.standalone.log.info("Bind as %s" % DN_DM) topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) topology_st.standalone.log.info("\n\n######################### SETUP ATTR UNIQ PLUGIN ######################\n") # enable attribute uniqueness plugin mod = [(ldap.MOD_REPLACE, 'nsslapd-pluginEnabled', b'on'), (ldap.MOD_REPLACE, 'nsslapd-pluginarg0', b'sn'), (ldap.MOD_REPLACE, 'nsslapd-pluginarg1', ensure_bytes(SUFFIX))] topology_st.standalone.modify_s(ATTRIBUTE_UNIQUENESS_PLUGIN, mod) topology_st.standalone.log.info("\n\n######################### ADD USER 1 ######################\n") # Prepare entry 1 entry_name = '%s 1' % (ENTRY_NAME) entry_dn_1 = 'cn=%s, %s' % (entry_name, SUFFIX) entry_1 = Entry(entry_dn_1) entry_1.setValues('objectclass', 'top', 'person') entry_1.setValues('sn', ENTRY_NAME) entry_1.setValues('cn', entry_name) topology_st.standalone.log.info("Try to add Add %s: %r" % (entry_1, entry_1)) topology_st.standalone.add_s(entry_1) topology_st.standalone.log.info("\n\n######################### Restart Server ######################\n") topology_st.standalone.stop(timeout=10) topology_st.standalone.start(timeout=10) topology_st.standalone.log.info("\n\n######################### ADD USER 2 ######################\n") # Prepare entry 2 having the same sn, which crashes the server entry_name = '%s 2' % (ENTRY_NAME) entry_dn_2 = 'cn=%s, %s' % (entry_name, SUFFIX) entry_2 = Entry(entry_dn_2) entry_2.setValues('objectclass', 'top', 'person') entry_2.setValues('sn', ENTRY_NAME) entry_2.setValues('cn', entry_name) topology_st.standalone.log.info("Try to add Add %s: %r" % (entry_2, entry_2)) try: topology_st.standalone.add_s(entry_2) except: topology_st.standalone.log.warning("Adding %s failed" % entry_dn_2) pass topology_st.standalone.log.info("\n\n######################### IS SERVER UP? ######################\n") ents = topology_st.standalone.search_s(entry_dn_1, ldap.SCOPE_BASE, '(objectclass=*)') assert len(ents) == 1 topology_st.standalone.log.info("Yes, it's up.") topology_st.standalone.log.info("\n\n######################### CHECK USER 2 NOT ADDED ######################\n") topology_st.standalone.log.info("Try to search %s" % entry_dn_2) try: ents = topology_st.standalone.search_s(entry_dn_2, ldap.SCOPE_BASE, '(objectclass=*)') except ldap.NO_SUCH_OBJECT: topology_st.standalone.log.info("Found none") topology_st.standalone.log.info("\n\n######################### DELETE USER 1 ######################\n") topology_st.standalone.log.info("Try to delete %s " % entry_dn_1) topology_st.standalone.delete_s(entry_dn_1) log.info('Testcase PASSED') if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/tickets/ticket47815_test.py000066400000000000000000000074731421664411400260220ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2016 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import logging import time import ldap import pytest from lib389 import Entry from lib389._constants import * from lib389.topologies import topology_st log = logging.getLogger(__name__) from lib389.utils import * # Skip on older versions pytestmark = [pytest.mark.tier2, pytest.mark.skipif(ds_is_older('1.3.3') or ds_is_newer('1.3.7'), reason="Not implemented, or invalid by nsMemberOf")] def test_ticket47815(topology_st): """ Test betxn plugins reject an invalid option, and make sure that the rejected entry is not in the entry cache. Enable memberOf, automember, and retrocl plugins Add the automember config entry Add the automember group Add a user that will be rejected by a betxn plugin - result error 53 Attempt the same add again, and it should result in another error 53 (not error 68) """ result = 0 result2 = 0 log.info( 'Testing Ticket 47815 - Add entries that should be rejected by the betxn plugins, and are not left in the entry cache') # Enabled the plugins topology_st.standalone.plugins.enable(name=PLUGIN_MEMBER_OF) topology_st.standalone.plugins.enable(name=PLUGIN_AUTOMEMBER) topology_st.standalone.plugins.enable(name=PLUGIN_RETRO_CHANGELOG) # configure automember config entry log.info('Adding automember config') try: topology_st.standalone.add_s(Entry(('cn=group cfg,cn=Auto Membership Plugin,cn=plugins,cn=config', { 'objectclass': 'top autoMemberDefinition'.split(), 'autoMemberScope': 'dc=example,dc=com', 'autoMemberFilter': 'cn=user', 'autoMemberDefaultGroup': 'cn=group,dc=example,dc=com', 'autoMemberGroupingAttr': 'member:dn', 'cn': 'group cfg'}))) except: log.error('Failed to add automember config') exit(1) topology_st.standalone.restart() # need to reopen a connection toward the instance topology_st.standalone.open() # add automember group log.info('Adding automember group') try: topology_st.standalone.add_s(Entry(('cn=group,dc=example,dc=com', { 'objectclass': 'top groupOfNames'.split(), 'cn': 'group'}))) except: log.error('Failed to add automember group') exit(1) # add user that should result in an error 53 log.info('Adding invalid entry') try: topology_st.standalone.add_s(Entry(('cn=user,dc=example,dc=com', { 'objectclass': 'top person'.split(), 'sn': 'user', 'cn': 'user'}))) except ldap.UNWILLING_TO_PERFORM: log.debug('Adding invalid entry failed as expected') result = 53 except ldap.LDAPError as e: log.error('Unexpected result ' + e.message['desc']) assert False if result == 0: log.error('Add operation unexpectedly succeeded') assert False # Attempt to add user again, should result in error 53 again try: topology_st.standalone.add_s(Entry(('cn=user,dc=example,dc=com', { 'objectclass': 'top person'.split(), 'sn': 'user', 'cn': 'user'}))) except ldap.UNWILLING_TO_PERFORM: log.debug('2nd add of invalid entry failed as expected') result2 = 53 except ldap.LDAPError as e: log.error('Unexpected result ' + e.message['desc']) assert False if result2 == 0: log.error('2nd Add operation unexpectedly succeeded') assert False if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/tickets/ticket47823_test.py000066400000000000000000001067231421664411400260170ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2016 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import logging import re import shutil import subprocess import time import ldap import pytest from lib389 import Entry from lib389._constants import * from lib389.topologies import topology_st log = logging.getLogger(__name__) from lib389.utils import * # Skip on older versions pytestmark = [pytest.mark.tier2, pytest.mark.skipif(ds_is_older('1.3.3'), reason="Not implemented")] PROVISIONING_CN = "provisioning" PROVISIONING_DN = "cn=%s,%s" % (PROVISIONING_CN, SUFFIX) ACTIVE_CN = "accounts" STAGE_CN = "staged users" DELETE_CN = "deleted users" ACTIVE_DN = "cn=%s,%s" % (ACTIVE_CN, SUFFIX) STAGE_DN = "cn=%s,%s" % (STAGE_CN, PROVISIONING_DN) DELETE_DN = "cn=%s,%s" % (DELETE_CN, PROVISIONING_DN) STAGE_USER_CN = "stage guy" STAGE_USER_DN = "cn=%s,%s" % (STAGE_USER_CN, STAGE_DN) ACTIVE_USER_CN = "active guy" ACTIVE_USER_DN = "cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN) ACTIVE_USER_1_CN = "test_1" ACTIVE_USER_1_DN = "cn=%s,%s" % (ACTIVE_USER_1_CN, ACTIVE_DN) ACTIVE_USER_2_CN = "test_2" ACTIVE_USER_2_DN = "cn=%s,%s" % (ACTIVE_USER_2_CN, ACTIVE_DN) STAGE_USER_1_CN = ACTIVE_USER_1_CN STAGE_USER_1_DN = "cn=%s,%s" % (STAGE_USER_1_CN, STAGE_DN) STAGE_USER_2_CN = ACTIVE_USER_2_CN STAGE_USER_2_DN = "cn=%s,%s" % (STAGE_USER_2_CN, STAGE_DN) ALL_CONFIG_ATTRS = ['nsslapd-pluginarg0', 'nsslapd-pluginarg1', 'nsslapd-pluginarg2', 'uniqueness-attribute-name', 'uniqueness-subtrees', 'uniqueness-across-all-subtrees'] def _header(topology_st, label): topology_st.standalone.log.info("\n\n###############################################") topology_st.standalone.log.info("#######") topology_st.standalone.log.info("####### %s" % label) topology_st.standalone.log.info("#######") topology_st.standalone.log.info("###############################################") def _uniqueness_config_entry(topology_st, name=None): if not name: return None ent = topology_st.standalone.getEntry("cn=%s,%s" % (PLUGIN_ATTR_UNIQUENESS, DN_PLUGIN), ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ['objectClass', 'cn', 'nsslapd-pluginPath', 'nsslapd-pluginInitfunc', 'nsslapd-pluginType', 'nsslapd-pluginEnabled', 'nsslapd-plugin-depends-on-type', 'nsslapd-pluginId', 'nsslapd-pluginVersion', 'nsslapd-pluginVendor', 'nsslapd-pluginDescription']) ent.dn = "cn=%s uniqueness,%s" % (name, DN_PLUGIN) return ent def _build_config(topology_st, attr_name='cn', subtree_1=None, subtree_2=None, type_config='old', across_subtrees=False): assert topology_st assert attr_name assert subtree_1 if type_config == 'old': # enable the 'cn' uniqueness on Active config = _uniqueness_config_entry(topology_st, attr_name) config.setValue('nsslapd-pluginarg0', attr_name) config.setValue('nsslapd-pluginarg1', subtree_1) if subtree_2: config.setValue('nsslapd-pluginarg2', subtree_2) else: # prepare the config entry config = _uniqueness_config_entry(topology_st, attr_name) config.setValue('uniqueness-attribute-name', attr_name) config.setValue('uniqueness-subtrees', subtree_1) if subtree_2: config.setValue('uniqueness-subtrees', subtree_2) if across_subtrees: config.setValue('uniqueness-across-all-subtrees', 'on') return config def _active_container_invalid_cfg_add(topology_st): ''' Check uniqueness is not enforced with ADD (invalid config) ''' topology_st.standalone.add_s(Entry((ACTIVE_USER_1_DN, { 'objectclass': "top person".split(), 'sn': ACTIVE_USER_1_CN, 'cn': ACTIVE_USER_1_CN}))) topology_st.standalone.add_s(Entry((ACTIVE_USER_2_DN, { 'objectclass': "top person".split(), 'sn': ACTIVE_USER_2_CN, 'cn': [ACTIVE_USER_1_CN, ACTIVE_USER_2_CN]}))) topology_st.standalone.delete_s(ACTIVE_USER_1_DN) topology_st.standalone.delete_s(ACTIVE_USER_2_DN) def _active_container_add(topology_st, type_config='old'): ''' Check uniqueness in a single container (Active) Add an entry with a given 'cn', then check we can not add an entry with the same 'cn' value ''' config = _build_config(topology_st, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=None, type_config=type_config, across_subtrees=False) # remove the 'cn' uniqueness entry try: topology_st.standalone.delete_s(config.dn) except ldap.NO_SUCH_OBJECT: pass topology_st.standalone.restart(timeout=120) topology_st.standalone.log.info('Uniqueness not enforced: create the entries') topology_st.standalone.add_s(Entry((ACTIVE_USER_1_DN, { 'objectclass': "top person".split(), 'sn': ACTIVE_USER_1_CN, 'cn': ACTIVE_USER_1_CN}))) topology_st.standalone.add_s(Entry((ACTIVE_USER_2_DN, { 'objectclass': "top person".split(), 'sn': ACTIVE_USER_2_CN, 'cn': [ACTIVE_USER_1_CN, ACTIVE_USER_2_CN]}))) topology_st.standalone.delete_s(ACTIVE_USER_1_DN) topology_st.standalone.delete_s(ACTIVE_USER_2_DN) topology_st.standalone.log.info('Uniqueness enforced: checks second entry is rejected') # enable the 'cn' uniqueness on Active topology_st.standalone.add_s(config) topology_st.standalone.restart(timeout=120) topology_st.standalone.add_s(Entry((ACTIVE_USER_1_DN, { 'objectclass': "top person".split(), 'sn': ACTIVE_USER_1_CN, 'cn': ACTIVE_USER_1_CN}))) try: topology_st.standalone.add_s(Entry((ACTIVE_USER_2_DN, { 'objectclass': "top person".split(), 'sn': ACTIVE_USER_2_CN, 'cn': [ACTIVE_USER_1_CN, ACTIVE_USER_2_CN]}))) except ldap.CONSTRAINT_VIOLATION: # yes it is expected pass # cleanup the stuff now topology_st.standalone.delete_s(config.dn) topology_st.standalone.delete_s(ACTIVE_USER_1_DN) def _active_container_mod(topology_st, type_config='old'): ''' Check uniqueness in a single container (active) Add and entry with a given 'cn', then check we can not modify an entry with the same 'cn' value ''' config = _build_config(topology_st, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=None, type_config=type_config, across_subtrees=False) # enable the 'cn' uniqueness on Active topology_st.standalone.add_s(config) topology_st.standalone.restart(timeout=120) topology_st.standalone.log.info('Uniqueness enforced: checks MOD ADD entry is rejected') topology_st.standalone.add_s(Entry((ACTIVE_USER_1_DN, { 'objectclass': "top person".split(), 'sn': ACTIVE_USER_1_CN, 'cn': ACTIVE_USER_1_CN}))) topology_st.standalone.add_s(Entry((ACTIVE_USER_2_DN, { 'objectclass': "top person".split(), 'sn': ACTIVE_USER_2_CN, 'cn': ACTIVE_USER_2_CN}))) try: topology_st.standalone.modify_s(ACTIVE_USER_2_DN, [(ldap.MOD_ADD, 'cn', ensure_bytes(ACTIVE_USER_1_CN))]) except ldap.CONSTRAINT_VIOLATION: # yes it is expected pass topology_st.standalone.log.info('Uniqueness enforced: checks MOD REPLACE entry is rejected') try: topology_st.standalone.modify_s(ACTIVE_USER_2_DN, [(ldap.MOD_REPLACE, 'cn', [ensure_bytes(ACTIVE_USER_1_CN), ensure_bytes(ACTIVE_USER_2_CN)])]) except ldap.CONSTRAINT_VIOLATION: # yes it is expected pass # cleanup the stuff now topology_st.standalone.delete_s(config.dn) topology_st.standalone.delete_s(ACTIVE_USER_1_DN) topology_st.standalone.delete_s(ACTIVE_USER_2_DN) def _active_container_modrdn(topology_st, type_config='old'): ''' Check uniqueness in a single container Add and entry with a given 'cn', then check we can not modrdn an entry with the same 'cn' value ''' config = _build_config(topology_st, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=None, type_config=type_config, across_subtrees=False) # enable the 'cn' uniqueness on Active topology_st.standalone.add_s(config) topology_st.standalone.restart(timeout=120) topology_st.standalone.log.info('Uniqueness enforced: checks MODRDN entry is rejected') topology_st.standalone.add_s(Entry((ACTIVE_USER_1_DN, { 'objectclass': "top person".split(), 'sn': ACTIVE_USER_1_CN, 'cn': [ACTIVE_USER_1_CN, 'dummy']}))) topology_st.standalone.add_s(Entry((ACTIVE_USER_2_DN, { 'objectclass': "top person".split(), 'sn': ACTIVE_USER_2_CN, 'cn': ACTIVE_USER_2_CN}))) try: topology_st.standalone.rename_s(ACTIVE_USER_2_DN, 'cn=dummy', delold=0) except ldap.CONSTRAINT_VIOLATION: # yes it is expected pass # cleanup the stuff now topology_st.standalone.delete_s(config.dn) topology_st.standalone.delete_s(ACTIVE_USER_1_DN) topology_st.standalone.delete_s(ACTIVE_USER_2_DN) def _active_stage_containers_add(topology_st, type_config='old', across_subtrees=False): ''' Check uniqueness in several containers Add an entry on a container with a given 'cn' with across_subtrees=False check we CAN add an entry with the same 'cn' value on the other container with across_subtrees=True check we CAN NOT add an entry with the same 'cn' value on the other container ''' config = _build_config(topology_st, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=STAGE_DN, type_config=type_config, across_subtrees=False) topology_st.standalone.add_s(config) topology_st.standalone.restart(timeout=120) topology_st.standalone.add_s(Entry((ACTIVE_USER_1_DN, { 'objectclass': "top person".split(), 'sn': ACTIVE_USER_1_CN, 'cn': ACTIVE_USER_1_CN}))) try: # adding an entry on a separated contains with the same 'cn' topology_st.standalone.add_s(Entry((STAGE_USER_1_DN, { 'objectclass': "top person".split(), 'sn': STAGE_USER_1_CN, 'cn': ACTIVE_USER_1_CN}))) except ldap.CONSTRAINT_VIOLATION: assert across_subtrees # cleanup the stuff now topology_st.standalone.delete_s(config.dn) topology_st.standalone.delete_s(ACTIVE_USER_1_DN) topology_st.standalone.delete_s(STAGE_USER_1_DN) def _active_stage_containers_mod(topology_st, type_config='old', across_subtrees=False): ''' Check uniqueness in a several containers Add an entry on a container with a given 'cn', then check we CAN mod an entry with the same 'cn' value on the other container ''' config = _build_config(topology_st, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=STAGE_DN, type_config=type_config, across_subtrees=False) topology_st.standalone.add_s(config) topology_st.standalone.restart(timeout=120) # adding an entry on active with a different 'cn' topology_st.standalone.add_s(Entry((ACTIVE_USER_1_DN, { 'objectclass': "top person".split(), 'sn': ACTIVE_USER_1_CN, 'cn': ACTIVE_USER_2_CN}))) # adding an entry on a stage with a different 'cn' topology_st.standalone.add_s(Entry((STAGE_USER_1_DN, { 'objectclass': "top person".split(), 'sn': STAGE_USER_1_CN, 'cn': STAGE_USER_1_CN}))) try: # modify add same value topology_st.standalone.modify_s(STAGE_USER_1_DN, [(ldap.MOD_ADD, 'cn', [ensure_bytes(ACTIVE_USER_2_CN)])]) except ldap.CONSTRAINT_VIOLATION: assert across_subtrees topology_st.standalone.delete_s(STAGE_USER_1_DN) topology_st.standalone.add_s(Entry((STAGE_USER_1_DN, { 'objectclass': "top person".split(), 'sn': STAGE_USER_1_CN, 'cn': STAGE_USER_2_CN}))) try: # modify replace same value topology_st.standalone.modify_s(STAGE_USER_1_DN, [(ldap.MOD_REPLACE, 'cn', [ensure_bytes(STAGE_USER_2_CN), ensure_bytes(ACTIVE_USER_1_CN)])]) except ldap.CONSTRAINT_VIOLATION: assert across_subtrees # cleanup the stuff now topology_st.standalone.delete_s(config.dn) topology_st.standalone.delete_s(ACTIVE_USER_1_DN) topology_st.standalone.delete_s(STAGE_USER_1_DN) def _active_stage_containers_modrdn(topology_st, type_config='old', across_subtrees=False): ''' Check uniqueness in a several containers Add and entry with a given 'cn', then check we CAN modrdn an entry with the same 'cn' value on the other container ''' config = _build_config(topology_st, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=STAGE_DN, type_config=type_config, across_subtrees=False) # enable the 'cn' uniqueness on Active and Stage topology_st.standalone.add_s(config) topology_st.standalone.restart(timeout=120) topology_st.standalone.add_s(Entry((ACTIVE_USER_1_DN, { 'objectclass': "top person".split(), 'sn': ACTIVE_USER_1_CN, 'cn': [ACTIVE_USER_1_CN, 'dummy']}))) topology_st.standalone.add_s(Entry((STAGE_USER_1_DN, { 'objectclass': "top person".split(), 'sn': STAGE_USER_1_CN, 'cn': STAGE_USER_1_CN}))) try: topology_st.standalone.rename_s(STAGE_USER_1_DN, 'cn=dummy', delold=0) # check stage entry has 'cn=dummy' stage_ent = topology_st.standalone.getEntry("cn=dummy,%s" % (STAGE_DN), ldap.SCOPE_BASE, "objectclass=*", ['cn']) assert stage_ent.hasAttr('cn') found = False for value in stage_ent.getValues('cn'): if ensure_str(value) == 'dummy': found = True assert found # check active entry has 'cn=dummy' active_ent = topology_st.standalone.getEntry(ACTIVE_USER_1_DN, ldap.SCOPE_BASE, "objectclass=*", ['cn']) assert active_ent.hasAttr('cn') found = False for value in stage_ent.getValues('cn'): if ensure_str(value) == 'dummy': found = True assert found topology_st.standalone.delete_s("cn=dummy,%s" % (STAGE_DN)) except ldap.CONSTRAINT_VIOLATION: assert across_subtrees topology_st.standalone.delete_s(STAGE_USER_1_DN) # cleanup the stuff now topology_st.standalone.delete_s(config.dn) topology_st.standalone.delete_s(ACTIVE_USER_1_DN) def _config_file(topology_st, action='save'): dse_ldif = topology_st.standalone.confdir + '/dse.ldif' sav_file = topology_st.standalone.confdir + '/dse.ldif.ticket47823' if action == 'save': shutil.copy(dse_ldif, sav_file) else: shutil.copy(sav_file, dse_ldif) time.sleep(1) def _pattern_errorlog(file, log_pattern): try: _pattern_errorlog.last_pos += 1 except AttributeError: _pattern_errorlog.last_pos = 0 found = None log.debug("_pattern_errorlog: start at offset %d" % _pattern_errorlog.last_pos) file.seek(_pattern_errorlog.last_pos) # Use a while true iteration because 'for line in file: hit a # python bug that break file.tell() while True: line = file.readline() log.debug("_pattern_errorlog: [%d] %s" % (file.tell(), line)) found = log_pattern.search(line) if ((line == '') or (found)): break log.debug("_pattern_errorlog: end at offset %d" % file.tell()) _pattern_errorlog.last_pos = file.tell() return found def test_ticket47823_init(topology_st): """ """ # Enabled the plugins topology_st.standalone.plugins.enable(name=PLUGIN_ATTR_UNIQUENESS) topology_st.standalone.restart(timeout=120) topology_st.standalone.add_s(Entry((PROVISIONING_DN, {'objectclass': "top nscontainer".split(), 'cn': PROVISIONING_CN}))) topology_st.standalone.add_s(Entry((ACTIVE_DN, {'objectclass': "top nscontainer".split(), 'cn': ACTIVE_CN}))) topology_st.standalone.add_s(Entry((STAGE_DN, {'objectclass': "top nscontainer".split(), 'cn': STAGE_CN}))) topology_st.standalone.add_s(Entry((DELETE_DN, {'objectclass': "top nscontainer".split(), 'cn': DELETE_CN}))) topology_st.standalone.errorlog_file = open(topology_st.standalone.errlog, "r") topology_st.standalone.stop(timeout=120) time.sleep(1) topology_st.standalone.start(timeout=120) time.sleep(3) def test_ticket47823_one_container_add(topology_st): ''' Check uniqueness in a single container Add and entry with a given 'cn', then check we can not add an entry with the same 'cn' value ''' _header(topology_st, "With former config (args), check attribute uniqueness with 'cn' (ADD) ") _active_container_add(topology_st, type_config='old') _header(topology_st, "With new config (args), check attribute uniqueness with 'cn' (ADD) ") _active_container_add(topology_st, type_config='new') def test_ticket47823_one_container_mod(topology_st): ''' Check uniqueness in a single container Add and entry with a given 'cn', then check we can not modify an entry with the same 'cn' value ''' _header(topology_st, "With former config (args), check attribute uniqueness with 'cn' (MOD)") _active_container_mod(topology_st, type_config='old') _header(topology_st, "With new config (args), check attribute uniqueness with 'cn' (MOD)") _active_container_mod(topology_st, type_config='new') def test_ticket47823_one_container_modrdn(topology_st): ''' Check uniqueness in a single container Add and entry with a given 'cn', then check we can not modrdn an entry with the same 'cn' value ''' _header(topology_st, "With former config (args), check attribute uniqueness with 'cn' (MODRDN)") _active_container_modrdn(topology_st, type_config='old') _header(topology_st, "With former config (args), check attribute uniqueness with 'cn' (MODRDN)") _active_container_modrdn(topology_st, type_config='new') def test_ticket47823_multi_containers_add(topology_st): ''' Check uniqueness in a several containers Add and entry with a given 'cn', then check we can not add an entry with the same 'cn' value ''' _header(topology_st, "With former config (args), check attribute uniqueness with 'cn' (ADD) ") _active_stage_containers_add(topology_st, type_config='old', across_subtrees=False) _header(topology_st, "With new config (args), check attribute uniqueness with 'cn' (ADD) ") _active_stage_containers_add(topology_st, type_config='new', across_subtrees=False) def test_ticket47823_multi_containers_mod(topology_st): ''' Check uniqueness in a several containers Add an entry on a container with a given 'cn', then check we CAN mod an entry with the same 'cn' value on the other container ''' _header(topology_st, "With former config (args), check attribute uniqueness with 'cn' (MOD) on separated container") topology_st.standalone.log.info( 'Uniqueness not enforced: if same \'cn\' modified (add/replace) on separated containers') _active_stage_containers_mod(topology_st, type_config='old', across_subtrees=False) _header(topology_st, "With new config (args), check attribute uniqueness with 'cn' (MOD) on separated container") topology_st.standalone.log.info( 'Uniqueness not enforced: if same \'cn\' modified (add/replace) on separated containers') _active_stage_containers_mod(topology_st, type_config='new', across_subtrees=False) def test_ticket47823_multi_containers_modrdn(topology_st): ''' Check uniqueness in a several containers Add and entry with a given 'cn', then check we CAN modrdn an entry with the same 'cn' value on the other container ''' _header(topology_st, "With former config (args), check attribute uniqueness with 'cn' (MODRDN) on separated containers") topology_st.standalone.log.info('Uniqueness not enforced: checks MODRDN entry is accepted on separated containers') _active_stage_containers_modrdn(topology_st, type_config='old', across_subtrees=False) topology_st.standalone.log.info('Uniqueness not enforced: checks MODRDN entry is accepted on separated containers') _active_stage_containers_modrdn(topology_st, type_config='old') def test_ticket47823_across_multi_containers_add(topology_st): ''' Check uniqueness across several containers, uniquely with the new configuration Add and entry with a given 'cn', then check we can not add an entry with the same 'cn' value ''' _header(topology_st, "With new config (args), check attribute uniqueness with 'cn' (ADD) across several containers") _active_stage_containers_add(topology_st, type_config='old', across_subtrees=True) def test_ticket47823_across_multi_containers_mod(topology_st): ''' Check uniqueness across several containers, uniquely with the new configuration Add and entry with a given 'cn', then check we can not modifiy an entry with the same 'cn' value ''' _header(topology_st, "With new config (args), check attribute uniqueness with 'cn' (MOD) across several containers") _active_stage_containers_mod(topology_st, type_config='old', across_subtrees=True) def test_ticket47823_across_multi_containers_modrdn(topology_st): ''' Check uniqueness across several containers, uniquely with the new configuration Add and entry with a given 'cn', then check we can not modrdn an entry with the same 'cn' value ''' _header(topology_st, "With new config (args), check attribute uniqueness with 'cn' (MODRDN) across several containers") _active_stage_containers_modrdn(topology_st, type_config='old', across_subtrees=True) def test_ticket47823_invalid_config_1(topology_st): ''' Check that an invalid config is detected. No uniqueness enforced Using old config: arg0 is missing ''' _header(topology_st, "Invalid config (old): arg0 is missing") _config_file(topology_st, action='save') # create an invalid config without arg0 config = _build_config(topology_st, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=None, type_config='old', across_subtrees=False) del config.data['nsslapd-pluginarg0'] # replace 'cn' uniqueness entry try: topology_st.standalone.delete_s(config.dn) except ldap.NO_SUCH_OBJECT: pass topology_st.standalone.add_s(config) topology_st.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS) # Check the server did not restart topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', b'65536')]) try: topology_st.standalone.restart() ent = topology_st.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS) if ent: # be sure to restore a valid config before assert _config_file(topology_st, action='restore') assert not ent except: pass # Check the expected error message regex = re.compile("[U|u]nable to parse old style") res = _pattern_errorlog(topology_st.standalone.errorlog_file, regex) if not res: # be sure to restore a valid config before assert _config_file(topology_st, action='restore') assert res # Check we can restart the server _config_file(topology_st, action='restore') topology_st.standalone.start() try: topology_st.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS) except ldap.NO_SUCH_OBJECT: pass def test_ticket47823_invalid_config_2(topology_st): ''' Check that an invalid config is detected. No uniqueness enforced Using old config: arg1 is missing ''' _header(topology_st, "Invalid config (old): arg1 is missing") _config_file(topology_st, action='save') # create an invalid config without arg0 config = _build_config(topology_st, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=None, type_config='old', across_subtrees=False) del config.data['nsslapd-pluginarg1'] # replace 'cn' uniqueness entry try: topology_st.standalone.delete_s(config.dn) except ldap.NO_SUCH_OBJECT: pass topology_st.standalone.add_s(config) topology_st.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS) # Check the server did not restart try: topology_st.standalone.restart() ent = topology_st.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS) if ent: # be sure to restore a valid config before assert _config_file(topology_st, action='restore') assert not ent except: pass # Check the expected error message regex = re.compile("No valid subtree is defined") res = _pattern_errorlog(topology_st.standalone.errorlog_file, regex) if not res: # be sure to restore a valid config before assert _config_file(topology_st, action='restore') assert res # Check we can restart the server _config_file(topology_st, action='restore') topology_st.standalone.start() try: topology_st.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS) except ldap.NO_SUCH_OBJECT: pass def test_ticket47823_invalid_config_3(topology_st): ''' Check that an invalid config is detected. No uniqueness enforced Using old config: arg0 is missing ''' _header(topology_st, "Invalid config (old): arg0 is missing but new config attrname exists") _config_file(topology_st, action='save') # create an invalid config without arg0 config = _build_config(topology_st, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=None, type_config='old', across_subtrees=False) del config.data['nsslapd-pluginarg0'] config.data['uniqueness-attribute-name'] = 'cn' # replace 'cn' uniqueness entry try: topology_st.standalone.delete_s(config.dn) except ldap.NO_SUCH_OBJECT: pass topology_st.standalone.add_s(config) topology_st.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS) # Check the server did not restart topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', b'65536')]) try: topology_st.standalone.restart() ent = topology_st.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS) if ent: # be sure to restore a valid config before assert _config_file(topology_st, action='restore') assert not ent except: pass # Check the expected error message regex = re.compile("[U|u]nable to parse old style") res = _pattern_errorlog(topology_st.standalone.errorlog_file, regex) if not res: # be sure to restore a valid config before assert _config_file(topology_st, action='restore') assert res # Check we can restart the server _config_file(topology_st, action='restore') topology_st.standalone.start() try: topology_st.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS) except ldap.NO_SUCH_OBJECT: pass def test_ticket47823_invalid_config_4(topology_st): ''' Check that an invalid config is detected. No uniqueness enforced Using old config: arg1 is missing ''' _header(topology_st, "Invalid config (old): arg1 is missing but new config exist") _config_file(topology_st, action='save') # create an invalid config without arg0 config = _build_config(topology_st, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=None, type_config='old', across_subtrees=False) del config.data['nsslapd-pluginarg1'] config.data['uniqueness-subtrees'] = ACTIVE_DN # replace 'cn' uniqueness entry try: topology_st.standalone.delete_s(config.dn) except ldap.NO_SUCH_OBJECT: pass topology_st.standalone.add_s(config) topology_st.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS) # Check the server did not restart try: topology_st.standalone.restart() ent = topology_st.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS) if ent: # be sure to restore a valid config before assert _config_file(topology_st, action='restore') assert not ent except: pass # Check the expected error message regex = re.compile("No valid subtree is defined") res = _pattern_errorlog(topology_st.standalone.errorlog_file, regex) if not res: # be sure to restore a valid config before assert _config_file(topology_st, action='restore') assert res # Check we can restart the server _config_file(topology_st, action='restore') topology_st.standalone.start() try: topology_st.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS) except ldap.NO_SUCH_OBJECT: pass def test_ticket47823_invalid_config_5(topology_st): ''' Check that an invalid config is detected. No uniqueness enforced Using new config: uniqueness-attribute-name is missing ''' _header(topology_st, "Invalid config (new): uniqueness-attribute-name is missing") _config_file(topology_st, action='save') # create an invalid config without arg0 config = _build_config(topology_st, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=None, type_config='new', across_subtrees=False) del config.data['uniqueness-attribute-name'] # replace 'cn' uniqueness entry try: topology_st.standalone.delete_s(config.dn) except ldap.NO_SUCH_OBJECT: pass topology_st.standalone.add_s(config) topology_st.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS) # Check the server did not restart try: topology_st.standalone.restart() ent = topology_st.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS) if ent: # be sure to restore a valid config before assert _config_file(topology_st, action='restore') assert not ent except: pass # Check the expected error message regex = re.compile("[A|a]ttribute name not defined") res = _pattern_errorlog(topology_st.standalone.errorlog_file, regex) if not res: # be sure to restore a valid config before assert _config_file(topology_st, action='restore') assert res # Check we can restart the server _config_file(topology_st, action='restore') topology_st.standalone.start() try: topology_st.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS) except ldap.NO_SUCH_OBJECT: pass def test_ticket47823_invalid_config_6(topology_st): ''' Check that an invalid config is detected. No uniqueness enforced Using new config: uniqueness-subtrees is missing ''' _header(topology_st, "Invalid config (new): uniqueness-subtrees is missing") _config_file(topology_st, action='save') # create an invalid config without arg0 config = _build_config(topology_st, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=None, type_config='new', across_subtrees=False) del config.data['uniqueness-subtrees'] # replace 'cn' uniqueness entry try: topology_st.standalone.delete_s(config.dn) except ldap.NO_SUCH_OBJECT: pass topology_st.standalone.add_s(config) topology_st.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS) # Check the server did not restart try: topology_st.standalone.restart() ent = topology_st.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS) if ent: # be sure to restore a valid config before assert _config_file(topology_st, action='restore') assert not ent except: pass # Check the expected error message regex = re.compile("[O|o]bjectclass for subtree entries is not defined") res = _pattern_errorlog(topology_st.standalone.errorlog_file, regex) if not res: # be sure to restore a valid config before assert _config_file(topology_st, action='restore') assert res # Check we can restart the server _config_file(topology_st, action='restore') topology_st.standalone.start() try: topology_st.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS) except ldap.NO_SUCH_OBJECT: pass def test_ticket47823_invalid_config_7(topology_st): ''' Check that an invalid config is detected. No uniqueness enforced Using new config: uniqueness-subtrees is missing ''' _header(topology_st, "Invalid config (new): uniqueness-subtrees are invalid") _config_file(topology_st, action='save') # create an invalid config without arg0 config = _build_config(topology_st, attr_name='cn', subtree_1="this_is dummy DN", subtree_2="an other=dummy DN", type_config='new', across_subtrees=False) topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', b'65536')]) # replace 'cn' uniqueness entry try: topology_st.standalone.delete_s(config.dn) except ldap.NO_SUCH_OBJECT: pass topology_st.standalone.add_s(config) topology_st.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS) # Check the server did not restart try: topology_st.standalone.restart() ent = topology_st.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS) if ent: # be sure to restore a valid config before assert _config_file(topology_st, action='restore') assert not ent except: pass # Check the expected error message regex = re.compile("No valid subtree is defined") res = _pattern_errorlog(topology_st.standalone.errorlog_file, regex) if not res: # be sure to restore a valid config before assert _config_file(topology_st, action='restore') assert res # Check we can restart the server _config_file(topology_st, action='restore') topology_st.standalone.start() try: topology_st.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS) except ldap.NO_SUCH_OBJECT: pass if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/tickets/ticket47828_test.py000066400000000000000000000772561421664411400260340ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2016 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import logging import ldap import pytest from lib389 import Entry from lib389._constants import * from lib389.topologies import topology_st log = logging.getLogger(__name__) ACCT_POLICY_CONFIG_DN = 'cn=config,cn=%s,cn=plugins,cn=config' % PLUGIN_ACCT_POLICY ACCT_POLICY_DN = 'cn=Account Inactivation Policy,%s' % SUFFIX from lib389.utils import * # Skip on older versions pytestmark = [pytest.mark.tier2, pytest.mark.skipif(ds_is_older('1.3.3'), reason="Not implemented")] INACTIVITY_LIMIT = '9' SEARCHFILTER = '(objectclass=*)' DUMMY_CONTAINER = 'cn=dummy container,%s' % SUFFIX PROVISIONING = 'cn=provisioning,%s' % SUFFIX ACTIVE_USER1_CN = 'active user1' ACTIVE_USER1_DN = 'cn=%s,%s' % (ACTIVE_USER1_CN, SUFFIX) STAGED_USER1_CN = 'staged user1' STAGED_USER1_DN = 'cn=%s,%s' % (STAGED_USER1_CN, PROVISIONING) DUMMY_USER1_CN = 'dummy user1' DUMMY_USER1_DN = 'cn=%s,%s' % (DUMMY_USER1_CN, DUMMY_CONTAINER) ALLOCATED_ATTR = 'employeeNumber' def _header(topology_st, label): topology_st.standalone.log.info("\n\n###############################################") topology_st.standalone.log.info("#######") topology_st.standalone.log.info("####### %s" % label) topology_st.standalone.log.info("#######") topology_st.standalone.log.info("###############################################") def test_ticket47828_init(topology_st): """ Enable DNA """ topology_st.standalone.plugins.enable(name=PLUGIN_DNA) topology_st.standalone.add_s(Entry((PROVISIONING, {'objectclass': "top nscontainer".split(), 'cn': 'provisioning'}))) topology_st.standalone.add_s(Entry((DUMMY_CONTAINER, {'objectclass': "top nscontainer".split(), 'cn': 'dummy container'}))) dn_config = "cn=excluded scope, cn=%s, %s" % (PLUGIN_DNA, DN_PLUGIN) topology_st.standalone.add_s(Entry((dn_config, {'objectclass': "top extensibleObject".split(), 'cn': 'excluded scope', 'dnaType': ALLOCATED_ATTR, 'dnaNextValue': str(1000), 'dnaMaxValue': str(2000), 'dnaMagicRegen': str(-1), 'dnaFilter': '(&(objectClass=person)(objectClass=organizationalPerson)(objectClass=inetOrgPerson))', 'dnaScope': SUFFIX}))) topology_st.standalone.restart(timeout=10) def test_ticket47828_run_0(topology_st): """ NO exclude scope: Add an active entry and check its ALLOCATED_ATTR is set """ _header(topology_st, 'NO exclude scope: Add an active entry and check its ALLOCATED_ATTR is set') topology_st.standalone.add_s( Entry((ACTIVE_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), 'cn': ACTIVE_USER1_CN, 'sn': ACTIVE_USER1_CN, ALLOCATED_ATTR: str(-1)}))) ent = topology_st.standalone.getEntry(ACTIVE_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") assert ent.hasAttr(ALLOCATED_ATTR) assert ensure_str(ent.getValue(ALLOCATED_ATTR)) != str(-1) topology_st.standalone.log.debug('%s.%s=%s' % (ACTIVE_USER1_CN, ALLOCATED_ATTR, ensure_str(ent.getValue(ALLOCATED_ATTR)))) topology_st.standalone.delete_s(ACTIVE_USER1_DN) def test_ticket47828_run_1(topology_st): """ NO exclude scope: Add an active entry and check its ALLOCATED_ATTR is unchanged (!= magic) """ _header(topology_st, 'NO exclude scope: Add an active entry and check its ALLOCATED_ATTR is unchanged (!= magic)') topology_st.standalone.add_s( Entry((ACTIVE_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), 'cn': ACTIVE_USER1_CN, 'sn': ACTIVE_USER1_CN, ALLOCATED_ATTR: str(20)}))) ent = topology_st.standalone.getEntry(ACTIVE_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") assert ent.hasAttr(ALLOCATED_ATTR) assert ensure_str(ent.getValue(ALLOCATED_ATTR)) == str(20) topology_st.standalone.log.debug('%s.%s=%s' % (ACTIVE_USER1_CN, ALLOCATED_ATTR, ensure_str(ent.getValue(ALLOCATED_ATTR)))) topology_st.standalone.delete_s(ACTIVE_USER1_DN) def test_ticket47828_run_2(topology_st): """ NO exclude scope: Add a staged entry and check its ALLOCATED_ATTR is set """ _header(topology_st, 'NO exclude scope: Add a staged entry and check its ALLOCATED_ATTR is set') topology_st.standalone.add_s( Entry((STAGED_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), 'cn': STAGED_USER1_CN, 'sn': STAGED_USER1_CN, ALLOCATED_ATTR: str(-1)}))) ent = topology_st.standalone.getEntry(STAGED_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") assert ent.hasAttr(ALLOCATED_ATTR) assert ensure_str(ent.getValue(ALLOCATED_ATTR)) != str(-1) topology_st.standalone.log.debug('%s.%s=%s' % (STAGED_USER1_CN, ALLOCATED_ATTR, ensure_str(ent.getValue(ALLOCATED_ATTR)))) topology_st.standalone.delete_s(STAGED_USER1_DN) def test_ticket47828_run_3(topology_st): """ NO exclude scope: Add a staged entry and check its ALLOCATED_ATTR is unchanged (!= magic) """ _header(topology_st, 'NO exclude scope: Add a staged entry and check its ALLOCATED_ATTR is unchanged (!= magic)') topology_st.standalone.add_s( Entry((STAGED_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), 'cn': STAGED_USER1_CN, 'sn': STAGED_USER1_CN, ALLOCATED_ATTR: str(20)}))) ent = topology_st.standalone.getEntry(STAGED_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") assert ent.hasAttr(ALLOCATED_ATTR) assert ensure_str(ent.getValue(ALLOCATED_ATTR)) == str(20) topology_st.standalone.log.debug('%s.%s=%s' % (STAGED_USER1_CN, ALLOCATED_ATTR, ensure_str(ent.getValue(ALLOCATED_ATTR)))) topology_st.standalone.delete_s(STAGED_USER1_DN) def test_ticket47828_run_4(topology_st): ''' Exclude the provisioning container ''' _header(topology_st, 'Exclude the provisioning container') dn_config = "cn=excluded scope, cn=%s, %s" % (PLUGIN_DNA, DN_PLUGIN) mod = [(ldap.MOD_REPLACE, 'dnaExcludeScope', ensure_bytes(PROVISIONING))] topology_st.standalone.modify_s(dn_config, mod) def test_ticket47828_run_5(topology_st): """ Provisioning excluded scope: Add an active entry and check its ALLOCATED_ATTR is set """ _header(topology_st, 'Provisioning excluded scope: Add an active entry and check its ALLOCATED_ATTR is set') topology_st.standalone.add_s( Entry((ACTIVE_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), 'cn': ACTIVE_USER1_CN, 'sn': ACTIVE_USER1_CN, ALLOCATED_ATTR: str(-1)}))) ent = topology_st.standalone.getEntry(ACTIVE_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") assert ent.hasAttr(ALLOCATED_ATTR) assert ensure_str(ensure_str(ent.getValue(ALLOCATED_ATTR))) != str(-1) topology_st.standalone.log.debug('%s.%s=%s' % (ACTIVE_USER1_CN, ALLOCATED_ATTR, ensure_str(ensure_str(ent.getValue(ALLOCATED_ATTR))))) topology_st.standalone.delete_s(ACTIVE_USER1_DN) def test_ticket47828_run_6(topology_st): """ Provisioning excluded scope: Add an active entry and check its ALLOCATED_ATTR is unchanged (!= magic) """ _header(topology_st, 'Provisioning excluded scope: Add an active entry and check its ALLOCATED_ATTR is unchanged (!= magic)') topology_st.standalone.add_s( Entry((ACTIVE_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), 'cn': ACTIVE_USER1_CN, 'sn': ACTIVE_USER1_CN, ALLOCATED_ATTR: str(20)}))) ent = topology_st.standalone.getEntry(ACTIVE_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") assert ent.hasAttr(ALLOCATED_ATTR) assert ensure_str(ensure_str(ent.getValue(ALLOCATED_ATTR))) == str(20) topology_st.standalone.log.debug('%s.%s=%s' % (ACTIVE_USER1_CN, ALLOCATED_ATTR, ensure_str(ensure_str(ent.getValue(ALLOCATED_ATTR))))) topology_st.standalone.delete_s(ACTIVE_USER1_DN) def test_ticket47828_run_7(topology_st): """ Provisioning excluded scope: Add a staged entry and check its ALLOCATED_ATTR is not set """ _header(topology_st, 'Provisioning excluded scope: Add a staged entry and check its ALLOCATED_ATTR is not set') topology_st.standalone.add_s( Entry((STAGED_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), 'cn': STAGED_USER1_CN, 'sn': STAGED_USER1_CN, ALLOCATED_ATTR: str(-1)}))) ent = topology_st.standalone.getEntry(STAGED_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") assert ent.hasAttr(ALLOCATED_ATTR) assert ensure_str(ent.getValue(ALLOCATED_ATTR)) == str(-1) topology_st.standalone.log.debug('%s.%s=%s' % (STAGED_USER1_CN, ALLOCATED_ATTR, ensure_str(ent.getValue(ALLOCATED_ATTR)))) topology_st.standalone.delete_s(STAGED_USER1_DN) def test_ticket47828_run_8(topology_st): """ Provisioning excluded scope: Add a staged entry and check its ALLOCATED_ATTR is unchanged (!= magic) """ _header(topology_st, 'Provisioning excluded scope: Add a staged entry and check its ALLOCATED_ATTR is unchanged (!= magic)') topology_st.standalone.add_s( Entry((STAGED_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), 'cn': STAGED_USER1_CN, 'sn': STAGED_USER1_CN, ALLOCATED_ATTR: str(20)}))) ent = topology_st.standalone.getEntry(STAGED_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") assert ent.hasAttr(ALLOCATED_ATTR) assert ensure_str(ent.getValue(ALLOCATED_ATTR)) == str(20) topology_st.standalone.log.debug('%s.%s=%s' % (STAGED_USER1_CN, ALLOCATED_ATTR, ensure_str(ent.getValue(ALLOCATED_ATTR)))) topology_st.standalone.delete_s(STAGED_USER1_DN) def test_ticket47828_run_9(topology_st): """ Provisioning excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is set """ _header(topology_st, 'Provisioning excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is set') topology_st.standalone.add_s( Entry((DUMMY_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), 'cn': DUMMY_USER1_CN, 'sn': DUMMY_USER1_CN, ALLOCATED_ATTR: str(-1)}))) ent = topology_st.standalone.getEntry(DUMMY_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") assert ent.hasAttr(ALLOCATED_ATTR) assert ensure_str(ent.getValue(ALLOCATED_ATTR)) != str(-1) topology_st.standalone.log.debug('%s.%s=%s' % (DUMMY_USER1_CN, ALLOCATED_ATTR, ensure_str(ent.getValue(ALLOCATED_ATTR)))) topology_st.standalone.delete_s(DUMMY_USER1_DN) def test_ticket47828_run_10(topology_st): """ Provisioning excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is unchanged (!= magic) """ _header(topology_st, 'Provisioning excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is unchanged (!= magic)') topology_st.standalone.add_s( Entry((DUMMY_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), 'cn': DUMMY_USER1_CN, 'sn': DUMMY_USER1_CN, ALLOCATED_ATTR: str(20)}))) ent = topology_st.standalone.getEntry(DUMMY_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") assert ent.hasAttr(ALLOCATED_ATTR) assert ensure_str(ent.getValue(ALLOCATED_ATTR)) == str(20) topology_st.standalone.log.debug('%s.%s=%s' % (DUMMY_USER1_CN, ALLOCATED_ATTR, ensure_str(ent.getValue(ALLOCATED_ATTR)))) topology_st.standalone.delete_s(DUMMY_USER1_DN) def test_ticket47828_run_11(topology_st): ''' Exclude (in addition) the dummy container ''' _header(topology_st, 'Exclude (in addition) the dummy container') dn_config = "cn=excluded scope, cn=%s, %s" % (PLUGIN_DNA, DN_PLUGIN) mod = [(ldap.MOD_ADD, 'dnaExcludeScope', ensure_bytes(DUMMY_CONTAINER))] topology_st.standalone.modify_s(dn_config, mod) def test_ticket47828_run_12(topology_st): """ Provisioning/Dummy excluded scope: Add an active entry and check its ALLOCATED_ATTR is set """ _header(topology_st, 'Provisioning/Dummy excluded scope: Add an active entry and check its ALLOCATED_ATTR is set') topology_st.standalone.add_s( Entry((ACTIVE_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), 'cn': ACTIVE_USER1_CN, 'sn': ACTIVE_USER1_CN, ALLOCATED_ATTR: str(-1)}))) ent = topology_st.standalone.getEntry(ACTIVE_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") assert ent.hasAttr(ALLOCATED_ATTR) assert ensure_str(ent.getValue(ALLOCATED_ATTR)) != str(-1) topology_st.standalone.log.debug('%s.%s=%s' % (ACTIVE_USER1_CN, ALLOCATED_ATTR, ensure_str(ent.getValue(ALLOCATED_ATTR)))) topology_st.standalone.delete_s(ACTIVE_USER1_DN) def test_ticket47828_run_13(topology_st): """ Provisioning/Dummy excluded scope: Add an active entry and check its ALLOCATED_ATTR is unchanged (!= magic) """ _header(topology_st, 'Provisioning/Dummy excluded scope: Add an active entry and check its ALLOCATED_ATTR is unchanged (!= magic)') topology_st.standalone.add_s( Entry((ACTIVE_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), 'cn': ACTIVE_USER1_CN, 'sn': ACTIVE_USER1_CN, ALLOCATED_ATTR: str(20)}))) ent = topology_st.standalone.getEntry(ACTIVE_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") assert ent.hasAttr(ALLOCATED_ATTR) assert ensure_str(ent.getValue(ALLOCATED_ATTR)) == str(20) topology_st.standalone.log.debug('%s.%s=%s' % (ACTIVE_USER1_CN, ALLOCATED_ATTR, ensure_str(ent.getValue(ALLOCATED_ATTR)))) topology_st.standalone.delete_s(ACTIVE_USER1_DN) def test_ticket47828_run_14(topology_st): """ Provisioning/Dummy excluded scope: Add a staged entry and check its ALLOCATED_ATTR is not set """ _header(topology_st, 'Provisioning/Dummy excluded scope: Add a staged entry and check its ALLOCATED_ATTR is not set') topology_st.standalone.add_s( Entry((STAGED_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), 'cn': STAGED_USER1_CN, 'sn': STAGED_USER1_CN, ALLOCATED_ATTR: str(-1)}))) ent = topology_st.standalone.getEntry(STAGED_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") assert ent.hasAttr(ALLOCATED_ATTR) assert ensure_str(ent.getValue(ALLOCATED_ATTR)) == str(-1) topology_st.standalone.log.debug('%s.%s=%s' % (STAGED_USER1_CN, ALLOCATED_ATTR, ensure_str(ent.getValue(ALLOCATED_ATTR)))) topology_st.standalone.delete_s(STAGED_USER1_DN) def test_ticket47828_run_15(topology_st): """ Provisioning/Dummy excluded scope: Add a staged entry and check its ALLOCATED_ATTR is unchanged (!= magic) """ _header(topology_st, 'Provisioning/Dummy excluded scope: Add a staged entry and check its ALLOCATED_ATTR is unchanged (!= magic)') topology_st.standalone.add_s( Entry((STAGED_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), 'cn': STAGED_USER1_CN, 'sn': STAGED_USER1_CN, ALLOCATED_ATTR: str(20)}))) ent = topology_st.standalone.getEntry(STAGED_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") assert ent.hasAttr(ALLOCATED_ATTR) assert ensure_str(ent.getValue(ALLOCATED_ATTR)) == str(20) topology_st.standalone.log.debug('%s.%s=%s' % (STAGED_USER1_CN, ALLOCATED_ATTR, ensure_str(ent.getValue(ALLOCATED_ATTR)))) topology_st.standalone.delete_s(STAGED_USER1_DN) def test_ticket47828_run_16(topology_st): """ Provisioning/Dummy excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is not set """ _header(topology_st, 'Provisioning/Dummy excluded scope: Add an dummy entry and check its ALLOCATED_ATTR not is set') topology_st.standalone.add_s( Entry((DUMMY_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), 'cn': DUMMY_USER1_CN, 'sn': DUMMY_USER1_CN, ALLOCATED_ATTR: str(-1)}))) ent = topology_st.standalone.getEntry(DUMMY_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") assert ent.hasAttr(ALLOCATED_ATTR) assert ensure_str(ent.getValue(ALLOCATED_ATTR)) == str(-1) topology_st.standalone.log.debug('%s.%s=%s' % (DUMMY_USER1_CN, ALLOCATED_ATTR, ensure_str(ent.getValue(ALLOCATED_ATTR)))) topology_st.standalone.delete_s(DUMMY_USER1_DN) def test_ticket47828_run_17(topology_st): """ Provisioning/Dummy excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is unchanged (!= magic) """ _header(topology_st, 'Provisioning/Dummy excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is unchanged (!= magic)') topology_st.standalone.add_s( Entry((DUMMY_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), 'cn': DUMMY_USER1_CN, 'sn': DUMMY_USER1_CN, ALLOCATED_ATTR: str(20)}))) ent = topology_st.standalone.getEntry(DUMMY_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") assert ent.hasAttr(ALLOCATED_ATTR) assert ensure_str(ent.getValue(ALLOCATED_ATTR)) == str(20) topology_st.standalone.log.debug('%s.%s=%s' % (DUMMY_USER1_CN, ALLOCATED_ATTR, ensure_str(ent.getValue(ALLOCATED_ATTR)))) topology_st.standalone.delete_s(DUMMY_USER1_DN) def test_ticket47828_run_18(topology_st): ''' Exclude PROVISIONING and a wrong container ''' _header(topology_st, 'Exclude PROVISIONING and a wrong container') dn_config = "cn=excluded scope, cn=%s, %s" % (PLUGIN_DNA, DN_PLUGIN) mod = [(ldap.MOD_REPLACE, 'dnaExcludeScope', ensure_bytes(PROVISIONING))] topology_st.standalone.modify_s(dn_config, mod) try: mod = [(ldap.MOD_ADD, 'dnaExcludeScope', ensure_bytes("invalidDN,%s" % SUFFIX))] topology_st.standalone.modify_s(dn_config, mod) raise ValueError("invalid dnaExcludeScope value (not a DN)") except ldap.INVALID_SYNTAX: pass def test_ticket47828_run_19(topology_st): """ Provisioning+wrong container excluded scope: Add an active entry and check its ALLOCATED_ATTR is set """ _header(topology_st, 'Provisioning+wrong container excluded scope: Add an active entry and check its ALLOCATED_ATTR is set') topology_st.standalone.add_s( Entry((ACTIVE_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), 'cn': ACTIVE_USER1_CN, 'sn': ACTIVE_USER1_CN, ALLOCATED_ATTR: str(-1)}))) ent = topology_st.standalone.getEntry(ACTIVE_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") assert ent.hasAttr(ALLOCATED_ATTR) assert ensure_str(ent.getValue(ALLOCATED_ATTR)) != str(-1) topology_st.standalone.log.debug('%s.%s=%s' % (ACTIVE_USER1_CN, ALLOCATED_ATTR, ensure_str(ent.getValue(ALLOCATED_ATTR)))) topology_st.standalone.delete_s(ACTIVE_USER1_DN) def test_ticket47828_run_20(topology_st): """ Provisioning+wrong container excluded scope: Add an active entry and check its ALLOCATED_ATTR is unchanged (!= magic) """ _header(topology_st, 'Provisioning+wrong container excluded scope: Add an active entry and check its ALLOCATED_ATTR is unchanged (!= magic)') topology_st.standalone.add_s( Entry((ACTIVE_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), 'cn': ACTIVE_USER1_CN, 'sn': ACTIVE_USER1_CN, ALLOCATED_ATTR: str(20)}))) ent = topology_st.standalone.getEntry(ACTIVE_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") assert ent.hasAttr(ALLOCATED_ATTR) assert ensure_str(ent.getValue(ALLOCATED_ATTR)) == str(20) topology_st.standalone.log.debug('%s.%s=%s' % (ACTIVE_USER1_CN, ALLOCATED_ATTR, ensure_str(ent.getValue(ALLOCATED_ATTR)))) topology_st.standalone.delete_s(ACTIVE_USER1_DN) def test_ticket47828_run_21(topology_st): """ Provisioning+wrong container excluded scope: Add a staged entry and check its ALLOCATED_ATTR is not set """ _header(topology_st, 'Provisioning+wrong container excluded scope: Add a staged entry and check its ALLOCATED_ATTR is not set') topology_st.standalone.add_s( Entry((STAGED_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), 'cn': STAGED_USER1_CN, 'sn': STAGED_USER1_CN, ALLOCATED_ATTR: str(-1)}))) ent = topology_st.standalone.getEntry(STAGED_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") assert ent.hasAttr(ALLOCATED_ATTR) assert ensure_str(ent.getValue(ALLOCATED_ATTR)) == str(-1) topology_st.standalone.log.debug('%s.%s=%s' % (STAGED_USER1_CN, ALLOCATED_ATTR, ensure_str(ent.getValue(ALLOCATED_ATTR)))) topology_st.standalone.delete_s(STAGED_USER1_DN) def test_ticket47828_run_22(topology_st): """ Provisioning+wrong container excluded scope: Add a staged entry and check its ALLOCATED_ATTR is unchanged (!= magic) """ _header(topology_st, 'Provisioning+wrong container excluded scope: Add a staged entry and check its ALLOCATED_ATTR is unchanged (!= magic)') topology_st.standalone.add_s( Entry((STAGED_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), 'cn': STAGED_USER1_CN, 'sn': STAGED_USER1_CN, ALLOCATED_ATTR: str(20)}))) ent = topology_st.standalone.getEntry(STAGED_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") assert ent.hasAttr(ALLOCATED_ATTR) assert ensure_str(ent.getValue(ALLOCATED_ATTR)) == str(20) topology_st.standalone.log.debug('%s.%s=%s' % (STAGED_USER1_CN, ALLOCATED_ATTR, ensure_str(ent.getValue(ALLOCATED_ATTR)))) topology_st.standalone.delete_s(STAGED_USER1_DN) def test_ticket47828_run_23(topology_st): """ Provisioning+wrong container excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is set """ _header(topology_st, 'Provisioning+wrong container excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is set') topology_st.standalone.add_s( Entry((DUMMY_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), 'cn': DUMMY_USER1_CN, 'sn': DUMMY_USER1_CN, ALLOCATED_ATTR: str(-1)}))) ent = topology_st.standalone.getEntry(DUMMY_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") assert ent.hasAttr(ALLOCATED_ATTR) assert ensure_str(ent.getValue(ALLOCATED_ATTR)) != str(-1) topology_st.standalone.log.debug('%s.%s=%s' % (DUMMY_USER1_CN, ALLOCATED_ATTR, ensure_str(ent.getValue(ALLOCATED_ATTR)))) topology_st.standalone.delete_s(DUMMY_USER1_DN) def test_ticket47828_run_24(topology_st): """ Provisioning+wrong container excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is unchanged (!= magic) """ _header(topology_st, 'Provisioning+wrong container excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is unchanged (!= magic)') topology_st.standalone.add_s( Entry((DUMMY_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), 'cn': DUMMY_USER1_CN, 'sn': DUMMY_USER1_CN, ALLOCATED_ATTR: str(20)}))) ent = topology_st.standalone.getEntry(DUMMY_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") assert ent.hasAttr(ALLOCATED_ATTR) assert ensure_str(ent.getValue(ALLOCATED_ATTR)) == str(20) topology_st.standalone.log.debug('%s.%s=%s' % (DUMMY_USER1_CN, ALLOCATED_ATTR, ensure_str(ent.getValue(ALLOCATED_ATTR)))) topology_st.standalone.delete_s(DUMMY_USER1_DN) def test_ticket47828_run_25(topology_st): ''' Exclude a wrong container ''' _header(topology_st, 'Exclude a wrong container') dn_config = "cn=excluded scope, cn=%s, %s" % (PLUGIN_DNA, DN_PLUGIN) try: mod = [(ldap.MOD_REPLACE, 'dnaExcludeScope', ensure_bytes("invalidDN,%s" % SUFFIX))] topology_st.standalone.modify_s(dn_config, mod) raise ValueError("invalid dnaExcludeScope value (not a DN)") except ldap.INVALID_SYNTAX: pass def test_ticket47828_run_26(topology_st): """ Wrong container excluded scope: Add an active entry and check its ALLOCATED_ATTR is set """ _header(topology_st, 'Wrong container excluded scope: Add an active entry and check its ALLOCATED_ATTR is set') topology_st.standalone.add_s( Entry((ACTIVE_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), 'cn': ACTIVE_USER1_CN, 'sn': ACTIVE_USER1_CN, ALLOCATED_ATTR: str(-1)}))) ent = topology_st.standalone.getEntry(ACTIVE_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") assert ent.hasAttr(ALLOCATED_ATTR) assert ensure_str(ent.getValue(ALLOCATED_ATTR)) != str(-1) topology_st.standalone.log.debug('%s.%s=%s' % (ACTIVE_USER1_CN, ALLOCATED_ATTR, ensure_str(ent.getValue(ALLOCATED_ATTR)))) topology_st.standalone.delete_s(ACTIVE_USER1_DN) def test_ticket47828_run_27(topology_st): """ Wrong container excluded scope: Add an active entry and check its ALLOCATED_ATTR is unchanged (!= magic) """ _header(topology_st, 'Wrong container excluded scope: Add an active entry and check its ALLOCATED_ATTR is unchanged (!= magic)') topology_st.standalone.add_s( Entry((ACTIVE_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), 'cn': ACTIVE_USER1_CN, 'sn': ACTIVE_USER1_CN, ALLOCATED_ATTR: str(20)}))) ent = topology_st.standalone.getEntry(ACTIVE_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") assert ent.hasAttr(ALLOCATED_ATTR) assert ensure_str(ent.getValue(ALLOCATED_ATTR)) == str(20) topology_st.standalone.log.debug('%s.%s=%s' % (ACTIVE_USER1_CN, ALLOCATED_ATTR, ensure_str(ent.getValue(ALLOCATED_ATTR)))) topology_st.standalone.delete_s(ACTIVE_USER1_DN) def test_ticket47828_run_28(topology_st): """ Wrong container excluded scope: Add a staged entry and check its ALLOCATED_ATTR is not set """ _header(topology_st, 'Wrong container excluded scope: Add a staged entry and check its ALLOCATED_ATTR is not set') topology_st.standalone.add_s( Entry((STAGED_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), 'cn': STAGED_USER1_CN, 'sn': STAGED_USER1_CN, ALLOCATED_ATTR: str(-1)}))) ent = topology_st.standalone.getEntry(STAGED_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") assert ent.hasAttr(ALLOCATED_ATTR) assert ensure_str(ent.getValue(ALLOCATED_ATTR)) == str(-1) topology_st.standalone.log.debug('%s.%s=%s' % (STAGED_USER1_CN, ALLOCATED_ATTR, ensure_str(ent.getValue(ALLOCATED_ATTR)))) topology_st.standalone.delete_s(STAGED_USER1_DN) def test_ticket47828_run_29(topology_st): """ Wrong container excluded scope: Add a staged entry and check its ALLOCATED_ATTR is unchanged (!= magic) """ _header(topology_st, 'Wrong container excluded scope: Add a staged entry and check its ALLOCATED_ATTR is unchanged (!= magic)') topology_st.standalone.add_s( Entry((STAGED_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), 'cn': STAGED_USER1_CN, 'sn': STAGED_USER1_CN, ALLOCATED_ATTR: str(20)}))) ent = topology_st.standalone.getEntry(STAGED_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") assert ent.hasAttr(ALLOCATED_ATTR) assert ensure_str(ent.getValue(ALLOCATED_ATTR)) == str(20) topology_st.standalone.log.debug('%s.%s=%s' % (STAGED_USER1_CN, ALLOCATED_ATTR, ensure_str(ent.getValue(ALLOCATED_ATTR)))) topology_st.standalone.delete_s(STAGED_USER1_DN) def test_ticket47828_run_30(topology_st): """ Wrong container excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is set """ _header(topology_st, 'Wrong container excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is set') topology_st.standalone.add_s( Entry((DUMMY_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), 'cn': DUMMY_USER1_CN, 'sn': DUMMY_USER1_CN, ALLOCATED_ATTR: str(-1)}))) ent = topology_st.standalone.getEntry(DUMMY_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") assert ent.hasAttr(ALLOCATED_ATTR) assert ensure_str(ent.getValue(ALLOCATED_ATTR)) != str(-1) topology_st.standalone.log.debug('%s.%s=%s' % (DUMMY_USER1_CN, ALLOCATED_ATTR, ensure_str(ent.getValue(ALLOCATED_ATTR)))) topology_st.standalone.delete_s(DUMMY_USER1_DN) def test_ticket47828_run_31(topology_st): """ Wrong container excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is unchanged (!= magic) """ _header(topology_st, 'Wrong container excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is unchanged (!= magic)') topology_st.standalone.add_s( Entry((DUMMY_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), 'cn': DUMMY_USER1_CN, 'sn': DUMMY_USER1_CN, ALLOCATED_ATTR: str(20)}))) ent = topology_st.standalone.getEntry(DUMMY_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") assert ent.hasAttr(ALLOCATED_ATTR) assert ensure_str(ent.getValue(ALLOCATED_ATTR)) == str(20) topology_st.standalone.log.debug('%s.%s=%s' % (DUMMY_USER1_CN, ALLOCATED_ATTR, ensure_str(ent.getValue(ALLOCATED_ATTR)))) topology_st.standalone.delete_s(DUMMY_USER1_DN) if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/tickets/ticket47829_test.py000066400000000000000000000745011421664411400260230ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2016 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import logging import time import ldap import pytest from lib389 import Entry from lib389._constants import * from lib389.topologies import topology_st from lib389.utils import * # Skip on older versions pytestmark = [pytest.mark.tier2, pytest.mark.skipif(ds_is_older('1.3.4'), reason="Not implemented")] SCOPE_IN_CN = 'in' SCOPE_OUT_CN = 'out' SCOPE_IN_DN = 'cn=%s,%s' % (SCOPE_IN_CN, SUFFIX) SCOPE_OUT_DN = 'cn=%s,%s' % (SCOPE_OUT_CN, SUFFIX) PROVISIONING_CN = "provisioning" PROVISIONING_DN = "cn=%s,%s" % (PROVISIONING_CN, SCOPE_IN_DN) ACTIVE_CN = "accounts" STAGE_CN = "staged users" DELETE_CN = "deleted users" ACTIVE_DN = "cn=%s,%s" % (ACTIVE_CN, SCOPE_IN_DN) STAGE_DN = "cn=%s,%s" % (STAGE_CN, PROVISIONING_DN) DELETE_DN = "cn=%s,%s" % (DELETE_CN, PROVISIONING_DN) STAGE_USER_CN = "stage guy" STAGE_USER_DN = "cn=%s,%s" % (STAGE_USER_CN, STAGE_DN) ACTIVE_USER_CN = "active guy" ACTIVE_USER_DN = "cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN) OUT_USER_CN = "out guy" OUT_USER_DN = "cn=%s,%s" % (OUT_USER_CN, SCOPE_OUT_DN) STAGE_GROUP_CN = "stage group" STAGE_GROUP_DN = "cn=%s,%s" % (STAGE_GROUP_CN, STAGE_DN) ACTIVE_GROUP_CN = "active group" ACTIVE_GROUP_DN = "cn=%s,%s" % (ACTIVE_GROUP_CN, ACTIVE_DN) OUT_GROUP_CN = "out group" OUT_GROUP_DN = "cn=%s,%s" % (OUT_GROUP_CN, SCOPE_OUT_DN) INDIRECT_ACTIVE_GROUP_CN = "indirect active group" INDIRECT_ACTIVE_GROUP_DN = "cn=%s,%s" % (INDIRECT_ACTIVE_GROUP_CN, ACTIVE_DN) log = logging.getLogger(__name__) def _header(topology_st, label): topology_st.standalone.log.info("\n\n###############################################") topology_st.standalone.log.info("#######") topology_st.standalone.log.info("####### %s" % label) topology_st.standalone.log.info("#######") topology_st.standalone.log.info("###############################################") def _add_user(topology_st, type='active'): if type == 'active': topology_st.standalone.add_s(Entry((ACTIVE_USER_DN, { 'objectclass': "top person inetuser".split(), 'sn': ACTIVE_USER_CN, 'cn': ACTIVE_USER_CN}))) elif type == 'stage': topology_st.standalone.add_s(Entry((STAGE_USER_DN, { 'objectclass': "top person inetuser".split(), 'sn': STAGE_USER_CN, 'cn': STAGE_USER_CN}))) else: topology_st.standalone.add_s(Entry((OUT_USER_DN, { 'objectclass': "top person inetuser".split(), 'sn': OUT_USER_CN, 'cn': OUT_USER_CN}))) def _find_memberof(topology_st, user_dn=None, group_dn=None, find_result=True): assert (topology_st) assert (user_dn) assert (group_dn) ent = topology_st.standalone.getEntry(user_dn, ldap.SCOPE_BASE, "(objectclass=*)", ['memberof']) found = False if ent.hasAttr('memberof'): for val in ent.getValues('memberof'): topology_st.standalone.log.info("!!!!!!! %s: memberof->%s" % (user_dn, val)) if ensure_str(val) == group_dn: found = True break if find_result: assert (found) else: assert (not found) def _find_member(topology_st, user_dn=None, group_dn=None, find_result=True): assert (topology_st) assert (user_dn) assert (group_dn) ent = topology_st.standalone.getEntry(group_dn, ldap.SCOPE_BASE, "(objectclass=*)", ['member']) found = False if ent.hasAttr('member'): for val in ent.getValues('member'): topology_st.standalone.log.info("!!!!!!! %s: member ->%s" % (group_dn, val)) if ensure_str(val) == user_dn: found = True break if find_result: assert (found) else: assert (not found) def _modrdn_entry(topology_st=None, entry_dn=None, new_rdn=None, del_old=0, new_superior=None): assert topology_st is not None assert entry_dn is not None assert new_rdn is not None topology_st.standalone.log.info("\n\n######################### MODRDN %s ######################\n" % new_rdn) try: if new_superior: topology_st.standalone.rename_s(entry_dn, new_rdn, newsuperior=new_superior, delold=del_old) else: topology_st.standalone.rename_s(entry_dn, new_rdn, delold=del_old) except ldap.NO_SUCH_ATTRIBUTE: topology_st.standalone.log.info("accepted failure due to 47833: modrdn reports error.. but succeeds") attempt = 0 if new_superior: dn = "%s,%s" % (new_rdn, new_superior) base = new_superior else: base = ','.join(entry_dn.split(",")[1:]) dn = "%s, %s" % (new_rdn, base) myfilter = entry_dn.split(',')[0] while attempt < 10: try: ent = topology_st.standalone.getEntry(dn, ldap.SCOPE_BASE, myfilter) break except ldap.NO_SUCH_OBJECT: topology_st.standalone.log.info("Accept failure due to 47833: unable to find (base) a modrdn entry") attempt += 1 time.sleep(1) if attempt == 10: ent = topology_st.standalone.getEntry(base, ldap.SCOPE_SUBTREE, myfilter) ent = topology_st.standalone.getEntry(dn, ldap.SCOPE_BASE, myfilter) def _check_memberof(topology_st=None, action=None, user_dn=None, group_dn=None, find_result=None): assert (topology_st) assert (user_dn) assert (group_dn) if action == ldap.MOD_ADD: txt = 'add' elif action == ldap.MOD_DELETE: txt = 'delete' else: txt = 'replace' topology_st.standalone.log.info('\n%s entry %s' % (txt, user_dn)) topology_st.standalone.log.info('to group %s' % group_dn) topology_st.standalone.modify_s(group_dn, [(action, 'member', ensure_bytes(user_dn))]) time.sleep(1) _find_memberof(topology_st, user_dn=user_dn, group_dn=group_dn, find_result=find_result) def test_ticket47829_init(topology_st): topology_st.standalone.add_s(Entry((SCOPE_IN_DN, { 'objectclass': "top nscontainer".split(), 'cn': SCOPE_IN_DN}))) topology_st.standalone.add_s(Entry((SCOPE_OUT_DN, { 'objectclass': "top nscontainer".split(), 'cn': SCOPE_OUT_DN}))) topology_st.standalone.add_s(Entry((PROVISIONING_DN, { 'objectclass': "top nscontainer".split(), 'cn': PROVISIONING_CN}))) topology_st.standalone.add_s(Entry((ACTIVE_DN, { 'objectclass': "top nscontainer".split(), 'cn': ACTIVE_CN}))) topology_st.standalone.add_s(Entry((STAGE_DN, { 'objectclass': "top nscontainer".split(), 'cn': STAGE_DN}))) topology_st.standalone.add_s(Entry((DELETE_DN, { 'objectclass': "top nscontainer".split(), 'cn': DELETE_CN}))) # add groups topology_st.standalone.add_s(Entry((ACTIVE_GROUP_DN, { 'objectclass': "top groupOfNames inetuser".split(), 'cn': ACTIVE_GROUP_CN}))) topology_st.standalone.add_s(Entry((STAGE_GROUP_DN, { 'objectclass': "top groupOfNames inetuser".split(), 'cn': STAGE_GROUP_CN}))) topology_st.standalone.add_s(Entry((OUT_GROUP_DN, { 'objectclass': "top groupOfNames inetuser".split(), 'cn': OUT_GROUP_CN}))) topology_st.standalone.add_s(Entry((INDIRECT_ACTIVE_GROUP_DN, { 'objectclass': "top groupOfNames".split(), 'cn': INDIRECT_ACTIVE_GROUP_CN}))) # add users _add_user(topology_st, 'active') _add_user(topology_st, 'stage') _add_user(topology_st, 'out') # enable memberof of with scope IN except provisioning topology_st.standalone.plugins.enable(name=PLUGIN_MEMBER_OF) dn = "cn=%s,%s" % (PLUGIN_MEMBER_OF, DN_PLUGIN) topology_st.standalone.modify_s(dn, [(ldap.MOD_REPLACE, 'memberOfEntryScope', ensure_bytes(SCOPE_IN_DN))]) topology_st.standalone.modify_s(dn, [(ldap.MOD_REPLACE, 'memberOfEntryScopeExcludeSubtree', ensure_bytes(PROVISIONING_DN))]) # enable RI with scope IN except provisioning topology_st.standalone.plugins.enable(name=PLUGIN_REFER_INTEGRITY) dn = "cn=%s,%s" % (PLUGIN_REFER_INTEGRITY, DN_PLUGIN) topology_st.standalone.modify_s(dn, [(ldap.MOD_REPLACE, 'nsslapd-pluginentryscope', ensure_bytes(SCOPE_IN_DN))]) topology_st.standalone.modify_s(dn, [(ldap.MOD_REPLACE, 'nsslapd-plugincontainerscope', ensure_bytes(SCOPE_IN_DN))]) topology_st.standalone.modify_s(dn, [(ldap.MOD_REPLACE, 'nsslapd-pluginExcludeEntryScope', ensure_bytes(PROVISIONING_DN))]) topology_st.standalone.restart(timeout=10) def test_ticket47829_mod_active_user_1(topology_st): _header(topology_st, 'MOD: add an active user to an active group') # add active user to active group _check_memberof(topology_st, action=ldap.MOD_ADD, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True) _find_member(topology_st, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True) # remove active user to active group _check_memberof(topology_st, action=ldap.MOD_DELETE, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=False) def test_ticket47829_mod_active_user_2(topology_st): _header(topology_st, 'MOD: add an Active user to a Stage group') # add active user to stage group _check_memberof(topology_st, action=ldap.MOD_ADD, user_dn=ACTIVE_USER_DN, group_dn=STAGE_GROUP_DN, find_result=False) _find_member(topology_st, user_dn=ACTIVE_USER_DN, group_dn=STAGE_GROUP_DN, find_result=True) # remove active user to stage group _check_memberof(topology_st, action=ldap.MOD_DELETE, user_dn=ACTIVE_USER_DN, group_dn=STAGE_GROUP_DN, find_result=False) def test_ticket47829_mod_active_user_3(topology_st): _header(topology_st, 'MOD: add an Active user to a out of scope group') # add active user to out of scope group _check_memberof(topology_st, action=ldap.MOD_ADD, user_dn=ACTIVE_USER_DN, group_dn=OUT_GROUP_DN, find_result=False) _find_member(topology_st, user_dn=ACTIVE_USER_DN, group_dn=OUT_GROUP_DN, find_result=True) # remove active user to out of scope group _check_memberof(topology_st, action=ldap.MOD_DELETE, user_dn=ACTIVE_USER_DN, group_dn=OUT_GROUP_DN, find_result=False) def test_ticket47829_mod_stage_user_1(topology_st): _header(topology_st, 'MOD: add an Stage user to a Active group') # add stage user to active group _check_memberof(topology_st, action=ldap.MOD_ADD, user_dn=STAGE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=False) _find_member(topology_st, user_dn=STAGE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True) # remove stage user to active group _check_memberof(topology_st, action=ldap.MOD_DELETE, user_dn=STAGE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=False) def test_ticket47829_mod_stage_user_2(topology_st): _header(topology_st, 'MOD: add an Stage user to a Stage group') # add stage user to stage group _check_memberof(topology_st, action=ldap.MOD_ADD, user_dn=STAGE_USER_DN, group_dn=STAGE_GROUP_DN, find_result=False) _find_member(topology_st, user_dn=STAGE_USER_DN, group_dn=STAGE_GROUP_DN, find_result=True) # remove stage user to stage group _check_memberof(topology_st, action=ldap.MOD_DELETE, user_dn=STAGE_USER_DN, group_dn=STAGE_GROUP_DN, find_result=False) def test_ticket47829_mod_stage_user_3(topology_st): _header(topology_st, 'MOD: add an Stage user to a out of scope group') # add stage user to an out of scope group _check_memberof(topology_st, action=ldap.MOD_ADD, user_dn=STAGE_USER_DN, group_dn=OUT_GROUP_DN, find_result=False) _find_member(topology_st, user_dn=STAGE_USER_DN, group_dn=OUT_GROUP_DN, find_result=True) # remove stage user to out of scope group _check_memberof(topology_st, action=ldap.MOD_DELETE, user_dn=STAGE_USER_DN, group_dn=OUT_GROUP_DN, find_result=False) def test_ticket47829_mod_out_user_1(topology_st): _header(topology_st, 'MOD: add an out of scope user to an active group') # add out of scope user to active group _check_memberof(topology_st, action=ldap.MOD_ADD, user_dn=OUT_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=False) _find_member(topology_st, user_dn=OUT_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True) # remove out of scope user to active group _check_memberof(topology_st, action=ldap.MOD_DELETE, user_dn=OUT_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=False) def test_ticket47829_mod_out_user_2(topology_st): _header(topology_st, 'MOD: add an out of scope user to a Stage group') # add out of scope user to stage group _check_memberof(topology_st, action=ldap.MOD_ADD, user_dn=OUT_USER_DN, group_dn=STAGE_GROUP_DN, find_result=False) _find_member(topology_st, user_dn=OUT_USER_DN, group_dn=STAGE_GROUP_DN, find_result=True) # remove out of scope user to stage group _check_memberof(topology_st, action=ldap.MOD_DELETE, user_dn=OUT_USER_DN, group_dn=STAGE_GROUP_DN, find_result=False) def test_ticket47829_mod_out_user_3(topology_st): _header(topology_st, 'MOD: add an out of scope user to an out of scope group') # add out of scope user to stage group _check_memberof(topology_st, action=ldap.MOD_ADD, user_dn=OUT_USER_DN, group_dn=OUT_GROUP_DN, find_result=False) _find_member(topology_st, user_dn=OUT_USER_DN, group_dn=OUT_GROUP_DN, find_result=True) # remove out of scope user to stage group _check_memberof(topology_st, action=ldap.MOD_DELETE, user_dn=OUT_USER_DN, group_dn=OUT_GROUP_DN, find_result=False) def test_ticket47829_mod_active_user_modrdn_active_user_1(topology_st): _header(topology_st, 'add an Active user to a Active group. Then move Active user to Active') # add Active user to active group _check_memberof(topology_st, action=ldap.MOD_ADD, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True) _find_member(topology_st, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True) # move the Active entry to active, expect 'member' and 'memberof' _modrdn_entry(topology_st, entry_dn=ACTIVE_USER_DN, new_rdn="cn=x%s" % ACTIVE_USER_CN, new_superior=ACTIVE_DN) _find_memberof(topology_st, user_dn="cn=x%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN, find_result=True) _find_member(topology_st, user_dn="cn=x%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN, find_result=True) # move the Active entry to active, expect 'member' and no 'memberof' _modrdn_entry(topology_st, entry_dn="cn=x%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), new_rdn="cn=%s" % ACTIVE_USER_CN, new_superior=ACTIVE_DN) _find_memberof(topology_st, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN, find_result=True) _find_member(topology_st, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN, find_result=True) # remove active user to active group _check_memberof(topology_st, action=ldap.MOD_DELETE, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=False) def test_ticket47829_mod_active_user_modrdn_stage_user_1(topology_st): _header(topology_st, 'add an Active user to a Active group. Then move Active user to Stage') # add Active user to active group _check_memberof(topology_st, action=ldap.MOD_ADD, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True) _find_member(topology_st, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True) # move the Active entry to stage, expect no 'member' and 'memberof' _modrdn_entry(topology_st, entry_dn=ACTIVE_USER_DN, new_rdn="cn=%s" % ACTIVE_USER_CN, new_superior=STAGE_DN) _find_memberof(topology_st, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, STAGE_DN), group_dn=ACTIVE_GROUP_DN, find_result=False) _find_member(topology_st, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, STAGE_DN), group_dn=ACTIVE_GROUP_DN, find_result=False) # move the Active entry to Stage, expect 'member' and no 'memberof' _modrdn_entry(topology_st, entry_dn="cn=%s,%s" % (ACTIVE_USER_CN, STAGE_DN), new_rdn="cn=%s" % ACTIVE_USER_CN, new_superior=ACTIVE_DN) _find_memberof(topology_st, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN, find_result=False) _find_member(topology_st, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN, find_result=False) def test_ticket47829_mod_active_user_modrdn_out_user_1(topology_st): _header(topology_st, 'add an Active user to a Active group. Then move Active user to out of scope') # add Active user to active group _check_memberof(topology_st, action=ldap.MOD_ADD, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True) _find_member(topology_st, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True) # move the Active entry to out of scope, expect no 'member' and no 'memberof' _modrdn_entry(topology_st, entry_dn=ACTIVE_USER_DN, new_rdn="cn=%s" % ACTIVE_USER_CN, new_superior=OUT_GROUP_DN) _find_memberof(topology_st, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, OUT_GROUP_DN), group_dn=ACTIVE_GROUP_DN, find_result=False) _find_member(topology_st, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, OUT_GROUP_DN), group_dn=ACTIVE_GROUP_DN, find_result=False) # move the Active entry to out of scope, expect no 'member' and no 'memberof' _modrdn_entry(topology_st, entry_dn="cn=%s,%s" % (ACTIVE_USER_CN, OUT_GROUP_DN), new_rdn="cn=%s" % ACTIVE_USER_CN, new_superior=ACTIVE_DN) _find_memberof(topology_st, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN, find_result=False) _find_member(topology_st, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN, find_result=False) def test_ticket47829_mod_modrdn_1(topology_st): _header(topology_st, 'add an Stage user to a Active group. Then move Stage user to Active') # add Stage user to active group _check_memberof(topology_st, action=ldap.MOD_ADD, user_dn=STAGE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=False) _find_member(topology_st, user_dn=STAGE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True) # move the Stage entry to active, expect 'member' and 'memberof' _modrdn_entry(topology_st, entry_dn=STAGE_USER_DN, new_rdn="cn=%s" % STAGE_USER_CN, new_superior=ACTIVE_DN) _find_memberof(topology_st, user_dn="cn=%s,%s" % (STAGE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN, find_result=True) _find_member(topology_st, user_dn="cn=%s,%s" % (STAGE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN, find_result=True) # move the Active entry to Stage, expect no 'member' and no 'memberof' _modrdn_entry(topology_st, entry_dn="cn=%s,%s" % (STAGE_USER_CN, ACTIVE_DN), new_rdn="cn=%s" % STAGE_USER_CN, new_superior=STAGE_DN) _find_memberof(topology_st, user_dn="cn=%s,%s" % (STAGE_USER_CN, STAGE_DN), group_dn=ACTIVE_GROUP_DN, find_result=False) _find_member(topology_st, user_dn="cn=%s,%s" % (STAGE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN, find_result=False) def test_ticket47829_mod_stage_user_modrdn_active_user_1(topology_st): _header(topology_st, 'add an Stage user to a Active group. Then move Stage user to Active') stage_user_dn = STAGE_USER_DN stage_user_rdn = "cn=%s" % STAGE_USER_CN active_user_dn = "cn=%s,%s" % (STAGE_USER_CN, ACTIVE_DN) # add Stage user to active group _check_memberof(topology_st, action=ldap.MOD_ADD, user_dn=stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=False) _find_member(topology_st, user_dn=stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=True) # move the Stage entry to Actve, expect 'member' and 'memberof' _modrdn_entry(topology_st, entry_dn=stage_user_dn, new_rdn=stage_user_rdn, new_superior=ACTIVE_DN) _find_memberof(topology_st, user_dn=active_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=True) _find_member(topology_st, user_dn=active_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=True) # move the Active entry to Stage, expect no 'member' and no 'memberof' _modrdn_entry(topology_st, entry_dn=active_user_dn, new_rdn=stage_user_rdn, new_superior=STAGE_DN) _find_memberof(topology_st, user_dn=stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=False) _find_member(topology_st, user_dn=stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=False) def test_ticket47829_mod_stage_user_modrdn_stage_user_1(topology_st): _header(topology_st, 'add an Stage user to a Active group. Then move Stage user to Stage') _header(topology_st, 'Return because it requires a fix for 47833') return old_stage_user_dn = STAGE_USER_DN old_stage_user_rdn = "cn=%s" % STAGE_USER_CN new_stage_user_rdn = "cn=x%s" % STAGE_USER_CN new_stage_user_dn = "%s,%s" % (new_stage_user_rdn, STAGE_DN) # add Stage user to active group _check_memberof(topology_st, action=ldap.MOD_ADD, user_dn=old_stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=False) _find_member(topology_st, user_dn=old_stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=True) # move the Stage entry to Stage, expect no 'member' and 'memberof' _modrdn_entry(topology_st, entry_dn=old_stage_user_dn, new_rdn=new_stage_user_rdn, new_superior=STAGE_DN) _find_memberof(topology_st, user_dn=new_stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=False) _find_member(topology_st, user_dn=new_stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=False) # move the Stage entry to Stage, expect no 'member' and no 'memberof' _modrdn_entry(topology_st, entry_dn=new_stage_user_dn, new_rdn=old_stage_user_rdn, new_superior=STAGE_DN) _find_memberof(topology_st, user_dn=old_stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=False) _find_member(topology_st, user_dn=old_stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=False) def test_ticket47829_indirect_active_group_1(topology_st): _header(topology_st, 'add an Active group (G1) to an active group (G0). Then add active user to G1') topology_st.standalone.modify_s(INDIRECT_ACTIVE_GROUP_DN, [(ldap.MOD_ADD, 'member', ensure_bytes(ACTIVE_GROUP_DN))]) # add an active user to G1. Checks that user is memberof G1 _check_memberof(topology_st, action=ldap.MOD_ADD, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True) _find_memberof(topology_st, user_dn=ACTIVE_USER_DN, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=True) # remove G1 from G0 topology_st.standalone.modify_s(INDIRECT_ACTIVE_GROUP_DN, [(ldap.MOD_DELETE, 'member', ensure_bytes(ACTIVE_GROUP_DN))]) _find_memberof(topology_st, user_dn=ACTIVE_USER_DN, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False) _find_memberof(topology_st, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True) # remove active user from G1 _check_memberof(topology_st, action=ldap.MOD_DELETE, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=False) def test_ticket47829_indirect_active_group_2(topology_st): _header(topology_st, 'add an Active group (G1) to an active group (G0). Then add active user to G1. Then move active user to stage') topology_st.standalone.modify_s(INDIRECT_ACTIVE_GROUP_DN, [(ldap.MOD_ADD, 'member', ensure_bytes(ACTIVE_GROUP_DN))]) # add an active user to G1. Checks that user is memberof G1 _check_memberof(topology_st, action=ldap.MOD_ADD, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True) _find_memberof(topology_st, user_dn=ACTIVE_USER_DN, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=True) # remove G1 from G0 topology_st.standalone.modify_s(INDIRECT_ACTIVE_GROUP_DN, [(ldap.MOD_DELETE, 'member', ensure_bytes(ACTIVE_GROUP_DN))]) _find_memberof(topology_st, user_dn=ACTIVE_USER_DN, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False) _find_memberof(topology_st, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True) # move active user to stage _modrdn_entry(topology_st, entry_dn=ACTIVE_USER_DN, new_rdn="cn=%s" % ACTIVE_USER_CN, new_superior=STAGE_DN) # stage user is no long member of active group and indirect active group _find_memberof(topology_st, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, STAGE_DN), group_dn=ACTIVE_GROUP_DN, find_result=False) _find_memberof(topology_st, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, STAGE_DN), group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False) # active group and indirect active group do no longer have stage user as member _find_member(topology_st, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, STAGE_DN), group_dn=ACTIVE_GROUP_DN, find_result=False) _find_member(topology_st, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, STAGE_DN), group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False) # return back the entry to active. It remains not member _modrdn_entry(topology_st, entry_dn="cn=%s,%s" % (ACTIVE_USER_CN, STAGE_DN), new_rdn="cn=%s" % ACTIVE_USER_CN, new_superior=ACTIVE_DN) _find_member(topology_st, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN, find_result=False) _find_member(topology_st, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False) def test_ticket47829_indirect_active_group_3(topology_st): _header(topology_st, 'add an Active group (G1) to an active group (G0). Then add active user to G1. Then move active user to out of the scope') topology_st.standalone.modify_s(INDIRECT_ACTIVE_GROUP_DN, [(ldap.MOD_ADD, 'member', ensure_bytes(ACTIVE_GROUP_DN))]) # add an active user to G1. Checks that user is memberof G1 _check_memberof(topology_st, action=ldap.MOD_ADD, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True) _find_memberof(topology_st, user_dn=ACTIVE_USER_DN, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=True) # remove G1 from G0 topology_st.standalone.modify_s(INDIRECT_ACTIVE_GROUP_DN, [(ldap.MOD_DELETE, 'member', ensure_bytes(ACTIVE_GROUP_DN))]) _find_memberof(topology_st, user_dn=ACTIVE_USER_DN, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False) _find_memberof(topology_st, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True) # move active user to out of the scope _modrdn_entry(topology_st, entry_dn=ACTIVE_USER_DN, new_rdn="cn=%s" % ACTIVE_USER_CN, new_superior=SCOPE_OUT_DN) # stage user is no long member of active group and indirect active group _find_memberof(topology_st, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, SCOPE_OUT_DN), group_dn=ACTIVE_GROUP_DN, find_result=False) _find_memberof(topology_st, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, SCOPE_OUT_DN), group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False) # active group and indirect active group do no longer have stage user as member _find_member(topology_st, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, SCOPE_OUT_DN), group_dn=ACTIVE_GROUP_DN, find_result=False) _find_member(topology_st, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, SCOPE_OUT_DN), group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False) # return back the entry to active. It remains not member _modrdn_entry(topology_st, entry_dn="cn=%s,%s" % (ACTIVE_USER_CN, SCOPE_OUT_DN), new_rdn="cn=%s" % ACTIVE_USER_CN, new_superior=ACTIVE_DN) _find_member(topology_st, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN, find_result=False) _find_member(topology_st, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False) def test_ticket47829_indirect_active_group_4(topology_st): _header(topology_st, 'add an Active group (G1) to an active group (G0). Then add stage user to G1. Then move user to active. Then move it back') topology_st.standalone.modify_s(INDIRECT_ACTIVE_GROUP_DN, [(ldap.MOD_ADD, 'member', ensure_bytes(ACTIVE_GROUP_DN))]) # add stage user to active group _check_memberof(topology_st, action=ldap.MOD_ADD, user_dn=STAGE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=False) _find_member(topology_st, user_dn=STAGE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True) _find_member(topology_st, user_dn=STAGE_USER_DN, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False) _find_memberof(topology_st, user_dn=STAGE_USER_DN, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False) _find_memberof(topology_st, user_dn=STAGE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=False) # move stage user to active _modrdn_entry(topology_st, entry_dn=STAGE_USER_DN, new_rdn="cn=%s" % STAGE_USER_CN, new_superior=ACTIVE_DN) renamed_stage_dn = "cn=%s,%s" % (STAGE_USER_CN, ACTIVE_DN) _find_member(topology_st, user_dn=renamed_stage_dn, group_dn=ACTIVE_GROUP_DN, find_result=True) _find_member(topology_st, user_dn=renamed_stage_dn, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False) _find_memberof(topology_st, user_dn=renamed_stage_dn, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=True) _find_memberof(topology_st, user_dn=renamed_stage_dn, group_dn=ACTIVE_GROUP_DN, find_result=True) # move back active to stage _modrdn_entry(topology_st, entry_dn=renamed_stage_dn, new_rdn="cn=%s" % STAGE_USER_CN, new_superior=STAGE_DN) _find_member(topology_st, user_dn=STAGE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=False) _find_member(topology_st, user_dn=STAGE_USER_DN, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False) _find_memberof(topology_st, user_dn=STAGE_USER_DN, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False) _find_memberof(topology_st, user_dn=STAGE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=False) if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/tickets/ticket47833_test.py000066400000000000000000000174041421664411400260150ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2016 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import pytest from lib389.tasks import * from lib389.utils import * from lib389.topologies import topology_st from lib389._constants import SUFFIX, DEFAULT_SUFFIX, PLUGIN_MEMBER_OF, DN_PLUGIN SCOPE_IN_CN = 'in' SCOPE_OUT_CN = 'out' SCOPE_IN_DN = 'cn=%s,%s' % (SCOPE_IN_CN, SUFFIX) SCOPE_OUT_DN = 'cn=%s,%s' % (SCOPE_OUT_CN, SUFFIX) PROVISIONING_CN = "provisioning" PROVISIONING_DN = "cn=%s,%s" % (PROVISIONING_CN, SCOPE_IN_DN) # Skip on older versions pytestmark = [pytest.mark.tier2, pytest.mark.skipif(ds_is_older('1.3.3'), reason="Not implemented")] ACTIVE_CN = "accounts" STAGE_CN = "staged users" DELETE_CN = "deleted users" ACTIVE_DN = "cn=%s,%s" % (ACTIVE_CN, SCOPE_IN_DN) STAGE_DN = "cn=%s,%s" % (STAGE_CN, PROVISIONING_DN) DELETE_DN = "cn=%s,%s" % (DELETE_CN, PROVISIONING_DN) STAGE_USER_CN = "stage guy" STAGE_USER_DN = "cn=%s,%s" % (STAGE_USER_CN, STAGE_DN) ACTIVE_USER_CN = "active guy" ACTIVE_USER_DN = "cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN) OUT_USER_CN = "out guy" OUT_USER_DN = "cn=%s,%s" % (OUT_USER_CN, SCOPE_OUT_DN) STAGE_GROUP_CN = "stage group" STAGE_GROUP_DN = "cn=%s,%s" % (STAGE_GROUP_CN, STAGE_DN) ACTIVE_GROUP_CN = "active group" ACTIVE_GROUP_DN = "cn=%s,%s" % (ACTIVE_GROUP_CN, ACTIVE_DN) OUT_GROUP_CN = "out group" OUT_GROUP_DN = "cn=%s,%s" % (OUT_GROUP_CN, SCOPE_OUT_DN) logging.getLogger(__name__).setLevel(logging.DEBUG) log = logging.getLogger(__name__) def _header(topology_st, label): topology_st.standalone.log.info("\n\n###############################################") topology_st.standalone.log.info("#######") topology_st.standalone.log.info("####### %s" % label) topology_st.standalone.log.info("#######") topology_st.standalone.log.info("###############################################") def _add_user(topology_st, type='active'): if type == 'active': topology_st.standalone.add_s(Entry((ACTIVE_USER_DN, { 'objectclass': "top person inetuser".split(), 'sn': ACTIVE_USER_CN, 'cn': ACTIVE_USER_CN}))) elif type == 'stage': topology_st.standalone.add_s(Entry((STAGE_USER_DN, { 'objectclass': "top person inetuser".split(), 'sn': STAGE_USER_CN, 'cn': STAGE_USER_CN}))) else: topology_st.standalone.add_s(Entry((OUT_USER_DN, { 'objectclass': "top person inetuser".split(), 'sn': OUT_USER_CN, 'cn': OUT_USER_CN}))) def _find_memberof(topology_st, user_dn=None, group_dn=None, find_result=True): assert (topology_st) assert (user_dn) assert (group_dn) ent = topology_st.standalone.getEntry(user_dn, ldap.SCOPE_BASE, "(objectclass=*)", ['memberof']) found = False if ent.hasAttr('memberof'): for val in ent.getValues('memberof'): topology_st.standalone.log.info("!!!!!!! %s: memberof->%s" % (user_dn, val)) if val == group_dn: found = True break if find_result: assert (found) else: assert (not found) def _find_member(topology_st, user_dn=None, group_dn=None, find_result=True): assert (topology_st) assert (user_dn) assert (group_dn) ent = topology_st.standalone.getEntry(group_dn, ldap.SCOPE_BASE, "(objectclass=*)", ['member']) found = False if ent.hasAttr('member'): for val in ent.getValues('member'): topology_st.standalone.log.info("!!!!!!! %s: member ->%s" % (group_dn, val)) if ensure_str(val) == user_dn: found = True break if find_result: assert (found) else: assert (not found) def _modrdn_entry(topology_st=None, entry_dn=None, new_rdn=None, del_old=0, new_superior=None): assert topology_st != None assert entry_dn != None assert new_rdn != None topology_st.standalone.log.info("\n\n######################### MODRDN %s ######################\n" % new_rdn) if new_superior: topology_st.standalone.rename_s(entry_dn, new_rdn, newsuperior=new_superior, delold=del_old) else: topology_st.standalone.rename_s(entry_dn, new_rdn, delold=del_old) def _check_memberof(topology_st=None, action=None, user_dn=None, group_dn=None, find_result=None): assert (topology_st) assert (user_dn) assert (group_dn) if action == ldap.MOD_ADD: txt = 'add' elif action == ldap.MOD_DELETE: txt = 'delete' else: txt = 'replace' topology_st.standalone.log.info('\n%s entry %s' % (txt, user_dn)) topology_st.standalone.log.info('to group %s' % group_dn) topology_st.standalone.modify_s(group_dn, [(action, 'member', ensure_bytes(user_dn))]) time.sleep(1) _find_memberof(topology_st, user_dn=user_dn, group_dn=group_dn, find_result=find_result) def test_ticket47829_init(topology_st): topology_st.standalone.add_s(Entry((SCOPE_IN_DN, { 'objectclass': "top nscontainer".split(), 'cn': SCOPE_IN_DN}))) topology_st.standalone.add_s(Entry((SCOPE_OUT_DN, { 'objectclass': "top nscontainer".split(), 'cn': SCOPE_OUT_DN}))) topology_st.standalone.add_s(Entry((PROVISIONING_DN, { 'objectclass': "top nscontainer".split(), 'cn': PROVISIONING_CN}))) topology_st.standalone.add_s(Entry((ACTIVE_DN, { 'objectclass': "top nscontainer".split(), 'cn': ACTIVE_CN}))) topology_st.standalone.add_s(Entry((STAGE_DN, { 'objectclass': "top nscontainer".split(), 'cn': STAGE_DN}))) topology_st.standalone.add_s(Entry((DELETE_DN, { 'objectclass': "top nscontainer".split(), 'cn': DELETE_CN}))) # add groups topology_st.standalone.add_s(Entry((ACTIVE_GROUP_DN, { 'objectclass': "top groupOfNames".split(), 'cn': ACTIVE_GROUP_CN}))) topology_st.standalone.add_s(Entry((STAGE_GROUP_DN, { 'objectclass': "top groupOfNames".split(), 'cn': STAGE_GROUP_CN}))) topology_st.standalone.add_s(Entry((OUT_GROUP_DN, { 'objectclass': "top groupOfNames".split(), 'cn': OUT_GROUP_CN}))) # add users _add_user(topology_st, 'active') _add_user(topology_st, 'stage') _add_user(topology_st, 'out') # enable memberof of with scope account topology_st.standalone.plugins.enable(name=PLUGIN_MEMBER_OF) dn = "cn=%s,%s" % (PLUGIN_MEMBER_OF, DN_PLUGIN) topology_st.standalone.modify_s(dn, [(ldap.MOD_REPLACE, 'memberOfEntryScope', ensure_bytes(ACTIVE_DN))]) topology_st.standalone.restart(timeout=10) def test_ticket47829_mod_stage_user_modrdn_stage_user_1(topology_st): _header(topology_st, 'add an Stage user to a Active group. Then move Stage user to Stage') old_stage_user_dn = STAGE_USER_DN old_stage_user_rdn = "cn=%s" % STAGE_USER_CN new_stage_user_rdn = "cn=x%s" % STAGE_USER_CN new_stage_user_dn = "%s,%s" % (new_stage_user_rdn, STAGE_DN) # add Stage user to active group _check_memberof(topology_st, action=ldap.MOD_ADD, user_dn=old_stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=False) _find_member(topology_st, user_dn=old_stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=True) # move the Stage entry to Stage, expect no 'member' and 'memberof' _modrdn_entry(topology_st, entry_dn=old_stage_user_dn, new_rdn=new_stage_user_rdn, new_superior=STAGE_DN) _find_memberof(topology_st, user_dn=new_stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=False) _find_member(topology_st, user_dn=new_stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=False) if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/tickets/ticket47869MMR_test.py000066400000000000000000000200721421664411400263750ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2016 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import logging import time import ldap import pytest from lib389 import Entry from lib389._constants import * from lib389.topologies import topology_m2 from lib389.replica import ReplicationManager from lib389.utils import * pytestmark = pytest.mark.tier2 logging.getLogger(__name__).setLevel(logging.DEBUG) log = logging.getLogger(__name__) TEST_REPL_DN = "cn=test_repl, %s" % SUFFIX ENTRY_NAME = 'test_entry' MAX_ENTRIES = 10 BIND_NAME = 'bind_entry' BIND_DN = 'cn=%s, %s' % (BIND_NAME, SUFFIX) BIND_PW = 'password' def replication_check(topology_m2): repl = ReplicationManager(SUFFIX) supplier1 = topology_m2.ms["supplier1"] supplier2 = topology_m2.ms["supplier2"] return repl.test_replication(supplier1, supplier2) def test_ticket47869_init(topology_m2): """ It adds an entry ('bind_entry') and 10 test entries It sets the anonymous aci """ # enable acl error logging mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', ensure_bytes(str(8192)))] # REPL topology_m2.ms["supplier1"].modify_s(DN_CONFIG, mod) topology_m2.ms["supplier2"].modify_s(DN_CONFIG, mod) # entry used to bind with topology_m2.ms["supplier1"].log.info("Add %s" % BIND_DN) topology_m2.ms["supplier1"].add_s(Entry((BIND_DN, { 'objectclass': "top person".split(), 'sn': BIND_NAME, 'cn': BIND_NAME, 'userpassword': BIND_PW}))) replication_check(topology_m2) ent = topology_m2.ms["supplier2"].getEntry(BIND_DN, ldap.SCOPE_BASE, "(objectclass=*)") assert ent # keep anonymous ACI for use 'read-search' aci in SEARCH test ACI_ANONYMOUS = "(targetattr!=\"userPassword\")(version 3.0; acl \"Enable anonymous access\"; allow (read, search, compare) userdn=\"ldap:///anyone\";)" mod = [(ldap.MOD_REPLACE, 'aci', ensure_bytes(ACI_ANONYMOUS))] topology_m2.ms["supplier1"].modify_s(SUFFIX, mod) topology_m2.ms["supplier2"].modify_s(SUFFIX, mod) # add entries for cpt in range(MAX_ENTRIES): name = "%s%d" % (ENTRY_NAME, cpt) mydn = "cn=%s,%s" % (name, SUFFIX) topology_m2.ms["supplier1"].add_s(Entry((mydn, {'objectclass': "top person".split(), 'sn': name, 'cn': name}))) replication_check(topology_m2) ent = topology_m2.ms["supplier2"].getEntry(mydn, ldap.SCOPE_BASE, "(objectclass=*)") assert ent def test_ticket47869_check(topology_m2): ''' On Supplier 1 and 2: Bind as Directory Manager. Search all specifying nscpEntryWsi in the attribute list. Check nscpEntryWsi is returned. On Supplier 1 and 2: Bind as Bind Entry. Search all specifying nscpEntryWsi in the attribute list. Check nscpEntryWsi is not returned. On Supplier 1 and 2: Bind as anonymous. Search all specifying nscpEntryWsi in the attribute list. Check nscpEntryWsi is not returned. ''' topology_m2.ms["supplier1"].log.info("\n\n######################### CHECK nscpentrywsi ######################\n") topology_m2.ms["supplier1"].log.info("##### Supplier1: Bind as %s #####" % DN_DM) topology_m2.ms["supplier1"].simple_bind_s(DN_DM, PASSWORD) topology_m2.ms["supplier1"].log.info("Supplier1: Calling search_ext...") msgid = topology_m2.ms["supplier1"].search_ext(SUFFIX, ldap.SCOPE_SUBTREE, 'objectclass=*', ['nscpentrywsi']) nscpentrywsicnt = 0 rtype, rdata, rmsgid = topology_m2.ms["supplier1"].result2(msgid) topology_m2.ms["supplier1"].log.info("%d results" % len(rdata)) topology_m2.ms["supplier1"].log.info("Results:") for dn, attrs in rdata: topology_m2.ms["supplier1"].log.info("dn: %s" % dn) if 'nscpentrywsi' in attrs: nscpentrywsicnt += 1 topology_m2.ms["supplier1"].log.info("Supplier1: count of nscpentrywsi: %d" % nscpentrywsicnt) topology_m2.ms["supplier2"].log.info("##### Supplier2: Bind as %s #####" % DN_DM) topology_m2.ms["supplier2"].simple_bind_s(DN_DM, PASSWORD) topology_m2.ms["supplier2"].log.info("Supplier2: Calling search_ext...") msgid = topology_m2.ms["supplier2"].search_ext(SUFFIX, ldap.SCOPE_SUBTREE, 'objectclass=*', ['nscpentrywsi']) nscpentrywsicnt = 0 rtype, rdata, rmsgid = topology_m2.ms["supplier2"].result2(msgid) topology_m2.ms["supplier2"].log.info("%d results" % len(rdata)) topology_m2.ms["supplier2"].log.info("Results:") for dn, attrs in rdata: topology_m2.ms["supplier2"].log.info("dn: %s" % dn) if 'nscpentrywsi' in attrs: nscpentrywsicnt += 1 topology_m2.ms["supplier2"].log.info("Supplier2: count of nscpentrywsi: %d" % nscpentrywsicnt) # bind as bind_entry topology_m2.ms["supplier1"].log.info("##### Supplier1: Bind as %s #####" % BIND_DN) topology_m2.ms["supplier1"].simple_bind_s(BIND_DN, BIND_PW) topology_m2.ms["supplier1"].log.info("Supplier1: Calling search_ext...") msgid = topology_m2.ms["supplier1"].search_ext(SUFFIX, ldap.SCOPE_SUBTREE, 'objectclass=*', ['nscpentrywsi']) nscpentrywsicnt = 0 rtype, rdata, rmsgid = topology_m2.ms["supplier1"].result2(msgid) topology_m2.ms["supplier1"].log.info("%d results" % len(rdata)) for dn, attrs in rdata: if 'nscpentrywsi' in attrs: nscpentrywsicnt += 1 assert nscpentrywsicnt == 0 topology_m2.ms["supplier1"].log.info("Supplier1: count of nscpentrywsi: %d" % nscpentrywsicnt) # bind as bind_entry topology_m2.ms["supplier2"].log.info("##### Supplier2: Bind as %s #####" % BIND_DN) topology_m2.ms["supplier2"].simple_bind_s(BIND_DN, BIND_PW) topology_m2.ms["supplier2"].log.info("Supplier2: Calling search_ext...") msgid = topology_m2.ms["supplier2"].search_ext(SUFFIX, ldap.SCOPE_SUBTREE, 'objectclass=*', ['nscpentrywsi']) nscpentrywsicnt = 0 rtype, rdata, rmsgid = topology_m2.ms["supplier2"].result2(msgid) topology_m2.ms["supplier2"].log.info("%d results" % len(rdata)) for dn, attrs in rdata: if 'nscpentrywsi' in attrs: nscpentrywsicnt += 1 assert nscpentrywsicnt == 0 topology_m2.ms["supplier2"].log.info("Supplier2: count of nscpentrywsi: %d" % nscpentrywsicnt) # bind as anonymous topology_m2.ms["supplier1"].log.info("##### Supplier1: Bind as anonymous #####") topology_m2.ms["supplier1"].simple_bind_s("", "") topology_m2.ms["supplier1"].log.info("Supplier1: Calling search_ext...") msgid = topology_m2.ms["supplier1"].search_ext(SUFFIX, ldap.SCOPE_SUBTREE, 'objectclass=*', ['nscpentrywsi']) nscpentrywsicnt = 0 rtype, rdata, rmsgid = topology_m2.ms["supplier1"].result2(msgid) topology_m2.ms["supplier1"].log.info("%d results" % len(rdata)) for dn, attrs in rdata: if 'nscpentrywsi' in attrs: nscpentrywsicnt += 1 assert nscpentrywsicnt == 0 topology_m2.ms["supplier1"].log.info("Supplier1: count of nscpentrywsi: %d" % nscpentrywsicnt) # bind as bind_entry topology_m2.ms["supplier2"].log.info("##### Supplier2: Bind as anonymous #####") topology_m2.ms["supplier2"].simple_bind_s("", "") topology_m2.ms["supplier2"].log.info("Supplier2: Calling search_ext...") msgid = topology_m2.ms["supplier2"].search_ext(SUFFIX, ldap.SCOPE_SUBTREE, 'objectclass=*', ['nscpentrywsi']) nscpentrywsicnt = 0 rtype, rdata, rmsgid = topology_m2.ms["supplier2"].result2(msgid) topology_m2.ms["supplier2"].log.info("%d results" % len(rdata)) for dn, attrs in rdata: if 'nscpentrywsi' in attrs: nscpentrywsicnt += 1 assert nscpentrywsicnt == 0 topology_m2.ms["supplier2"].log.info("Supplier2: count of nscpentrywsi: %d" % nscpentrywsicnt) topology_m2.ms["supplier1"].log.info("##### ticket47869 was successfully verified. #####") if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/tickets/ticket47871_test.py000066400000000000000000000071221421664411400260130ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2016 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # ''' Created on Nov 7, 2013 @author: tbordaz ''' import logging import time import ldap import pytest from lib389 import Entry from lib389._constants import * from lib389.topologies import topology_m1c1 logging.getLogger(__name__).setLevel(logging.DEBUG) from lib389.utils import * # Skip on older versions pytestmark = [pytest.mark.tier2, pytest.mark.skipif(ds_is_older('1.3.2'), reason="Not implemented")] log = logging.getLogger(__name__) TEST_REPL_DN = "cn=test_repl, %s" % SUFFIX ENTRY_DN = "cn=test_entry, %s" % SUFFIX OTHER_NAME = 'other_entry' MAX_OTHERS = 10 ATTRIBUTES = ['street', 'countryName', 'description', 'postalAddress', 'postalCode', 'title', 'l', 'roomNumber'] def test_ticket47871_init(topology_m1c1): """ Initialize the test environment """ topology_m1c1.ms["supplier1"].plugins.enable(name=PLUGIN_RETRO_CHANGELOG) mod = [(ldap.MOD_REPLACE, 'nsslapd-changelogmaxage', b"10s"), # 10 second triming (ldap.MOD_REPLACE, 'nsslapd-changelog-trim-interval', b"5s")] topology_m1c1.ms["supplier1"].modify_s("cn=%s,%s" % (PLUGIN_RETRO_CHANGELOG, DN_PLUGIN), mod) # topology_m1c1.ms["supplier1"].plugins.enable(name=PLUGIN_MEMBER_OF) # topology_m1c1.ms["supplier1"].plugins.enable(name=PLUGIN_REFER_INTEGRITY) topology_m1c1.ms["supplier1"].stop(timeout=10) topology_m1c1.ms["supplier1"].start(timeout=10) topology_m1c1.ms["supplier1"].log.info("test_ticket47871_init topology_m1c1 %r" % (topology_m1c1)) # the test case will check if a warning message is logged in the # error log of the supplier topology_m1c1.ms["supplier1"].errorlog_file = open(topology_m1c1.ms["supplier1"].errlog, "r") def test_ticket47871_1(topology_m1c1): ''' ADD entries and check they are all in the retrocl ''' # add dummy entries for cpt in range(MAX_OTHERS): name = "%s%d" % (OTHER_NAME, cpt) topology_m1c1.ms["supplier1"].add_s(Entry(("cn=%s,%s" % (name, SUFFIX), { 'objectclass': "top person".split(), 'sn': name, 'cn': name}))) topology_m1c1.ms["supplier1"].log.info( "test_ticket47871_init: %d entries ADDed %s[0..%d]" % (MAX_OTHERS, OTHER_NAME, MAX_OTHERS - 1)) # Check the number of entries in the retro changelog time.sleep(1) ents = topology_m1c1.ms["supplier1"].search_s(RETROCL_SUFFIX, ldap.SCOPE_ONELEVEL, "(objectclass=*)") assert len(ents) == MAX_OTHERS topology_m1c1.ms["supplier1"].log.info("Added entries are") for ent in ents: topology_m1c1.ms["supplier1"].log.info("%s" % ent.dn) def test_ticket47871_2(topology_m1c1): ''' Wait until there is just a last entries ''' MAX_TRIES = 10 TRY_NO = 1 while TRY_NO <= MAX_TRIES: time.sleep(6) # at least 1 trimming occurred ents = topology_m1c1.ms["supplier1"].search_s(RETROCL_SUFFIX, ldap.SCOPE_ONELEVEL, "(objectclass=*)") assert len(ents) <= MAX_OTHERS topology_m1c1.ms["supplier1"].log.info("\nTry no %d it remains %d entries" % (TRY_NO, len(ents))) for ent in ents: topology_m1c1.ms["supplier1"].log.info("%s" % ent.dn) if len(ents) > 1: TRY_NO += 1 else: break assert TRY_NO <= MAX_TRIES assert len(ents) <= 1 if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/tickets/ticket47900_test.py000066400000000000000000000175771421664411400260230ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2016 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import logging import ldap import pytest from lib389 import Entry from lib389._constants import * from lib389.topologies import topology_st from lib389.utils import * pytestmark = pytest.mark.tier2 log = logging.getLogger(__name__) CONFIG_DN = 'cn=config' ADMIN_NAME = 'passwd_admin' ADMIN_DN = 'cn=%s,%s' % (ADMIN_NAME, SUFFIX) ADMIN_PWD = 'adminPassword_1' ENTRY_NAME = 'Joe Schmo' ENTRY_DN = 'cn=%s,%s' % (ENTRY_NAME, SUFFIX) INVALID_PWDS = ('2_Short', 'No_Number', 'N0Special', '{SSHA}bBy8UdtPZwu8uZna9QOYG3Pr41RpIRVDl8wddw==') def test_ticket47900(topology_st): """ Test that password administrators/root DN can bypass password syntax/policy. We need to test how passwords are modified in existing entries, and when adding new entries. Create the Password Admin entry, but do not set it as an admin yet. Use the entry to verify invalid passwords are caught. Then activate the password admin and make sure it can bypass password policy. """ # Prepare the Password Administator entry = Entry(ADMIN_DN) entry.setValues('objectclass', 'top', 'person') entry.setValues('sn', ADMIN_NAME) entry.setValues('cn', ADMIN_NAME) entry.setValues('userpassword', ADMIN_PWD) topology_st.standalone.log.info("Creating Password Administator entry %s..." % ADMIN_DN) try: topology_st.standalone.add_s(entry) except ldap.LDAPError as e: topology_st.standalone.log.error('Unexpected result ' + e.args[0]['desc']) assert False topology_st.standalone.log.error("Failed to add Password Administator %s, error: %s " % (ADMIN_DN, e.args[0]['desc'])) assert False topology_st.standalone.log.info("Configuring password policy...") topology_st.standalone.config.replace_many(('nsslapd-pwpolicy-local', 'on'), ('passwordCheckSyntax', 'on'), ('passwordMinCategories', '1'), ('passwordMinTokenLength', '1'), ('passwordExp', 'on'), ('passwordMinDigits', '1'), ('passwordMinSpecials', '1')) # # Add an aci to allow everyone all access (just makes things easier) # topology_st.standalone.log.info("Add aci to allow password admin to add/update entries...") ACI_TARGET = "(target = \"ldap:///%s\")" % SUFFIX ACI_TARGETATTR = "(targetattr = *)" ACI_ALLOW = "(version 3.0; acl \"Password Admin Access\"; allow (all) " ACI_SUBJECT = "(userdn = \"ldap:///anyone\");)" ACI_BODY = ACI_TARGET + ACI_TARGETATTR + ACI_ALLOW + ACI_SUBJECT mod = [(ldap.MOD_ADD, 'aci', ensure_bytes(ACI_BODY))] topology_st.standalone.modify_s(SUFFIX, mod) # # Bind as the Password Admin # topology_st.standalone.log.info("Bind as the Password Administator (before activating)...") topology_st.standalone.simple_bind_s(ADMIN_DN, ADMIN_PWD) # # Setup our test entry, and test password policy is working # entry = Entry(ENTRY_DN) entry.setValues('objectclass', 'top', 'person') entry.setValues('sn', ENTRY_NAME) entry.setValues('cn', ENTRY_NAME) # # Start by attempting to add an entry with an invalid password # topology_st.standalone.log.info("Attempt to add entries with invalid passwords, these adds should fail...") for passwd in INVALID_PWDS: failed_as_expected = False entry.setValues('userpassword', passwd) topology_st.standalone.log.info("Create a regular user entry %s with password (%s)..." % (ENTRY_DN, passwd)) try: topology_st.standalone.add_s(entry) except ldap.LDAPError as e: # We failed as expected failed_as_expected = True topology_st.standalone.log.info('Add failed as expected: password (%s) result (%s)' % (passwd, e.args[0]['desc'])) if not failed_as_expected: topology_st.standalone.log.error("We were incorrectly able to add an entry " + "with an invalid password (%s)" % (passwd)) assert False # # Now activate a password administator, bind as root dn to do the config # update, then rebind as the password admin # topology_st.standalone.log.info("Activate the Password Administator...") # Bind as Root DN topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) # Update config topology_st.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'passwordAdminDN', ensure_bytes(ADMIN_DN))]) # Bind as Password Admin topology_st.standalone.simple_bind_s(ADMIN_DN, ADMIN_PWD) # # Start adding entries with invalid passwords, delete the entry after each pass. # for passwd in INVALID_PWDS: entry.setValues('userpassword', passwd) topology_st.standalone.log.info("Create a regular user entry %s with password (%s)..." % (ENTRY_DN, passwd)) topology_st.standalone.add_s(entry) topology_st.standalone.log.info('Succesfully added entry (%s)' % ENTRY_DN) # Delete entry for the next pass topology_st.standalone.delete_s(ENTRY_DN) # # Add the entry for the next round of testing (modify password) # entry.setValues('userpassword', ADMIN_PWD) topology_st.standalone.add_s(entry) # # Deactivate the password admin and make sure invalid password updates fail # topology_st.standalone.log.info("Deactivate Password Administator and try invalid password updates...") # Bind as root DN topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) # Update conf topology_st.standalone.modify_s(CONFIG_DN, [(ldap.MOD_DELETE, 'passwordAdminDN', None)]) # Bind as Password Admin topology_st.standalone.simple_bind_s(ADMIN_DN, ADMIN_PWD) # # Make invalid password updates that should fail # for passwd in INVALID_PWDS: failed_as_expected = False entry.setValues('userpassword', passwd) try: topology_st.standalone.modify_s(ENTRY_DN, [(ldap.MOD_REPLACE, 'userpassword', ensure_bytes(passwd))]) except ldap.LDAPError as e: # We failed as expected failed_as_expected = True topology_st.standalone.log.info('Password update failed as expected: password (%s) result (%s)' % (passwd, e.args[0]['desc'])) if not failed_as_expected: topology_st.standalone.log.error("We were incorrectly able to add an invalid password (%s)" % (passwd)) assert False # # Now activate a password administator # topology_st.standalone.log.info("Activate Password Administator and try updates again...") # Bind as root D topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) # Update config topology_st.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'passwordAdminDN', ensure_bytes(ADMIN_DN))]) # Bind as Password Admin topology_st.standalone.simple_bind_s(ADMIN_DN, ADMIN_PWD) # # Make the same password updates, but this time they should succeed # for passwd in INVALID_PWDS: entry.setValues('userpassword', passwd) topology_st.standalone.modify_s(ENTRY_DN, [(ldap.MOD_REPLACE, 'userpassword', ensure_bytes(passwd))]) topology_st.standalone.log.info('Password update succeeded (%s)' % passwd) if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/tickets/ticket47910_test.py000066400000000000000000000133341421664411400260070ustar00rootroot00000000000000# Copyright (C) 2016 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import logging import subprocess from datetime import datetime, timedelta import pytest from lib389.tasks import * from lib389.topologies import topology_st from lib389._constants import SUFFIX, DEFAULT_SUFFIX logging.getLogger(__name__).setLevel(logging.DEBUG) log = logging.getLogger(__name__) from lib389.utils import * # Skip on older versions pytestmark = [pytest.mark.tier2, pytest.mark.skipif(ds_is_older('1.3.4'), reason="Not implemented")] @pytest.fixture(scope="module") def log_dir(topology_st): ''' Do a search operation and disable access log buffering to generate the access log ''' log.info("Diable access log buffering") topology_st.standalone.setAccessLogBuffering(False) log.info("Do a ldapsearch operation") topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, "(objectclass=*)") log.info("sleep for sometime so that access log file get generated") time.sleep(1) return topology_st.standalone.accesslog def format_time(local_datetime): formatted_time = (local_datetime.strftime("[%d/%b/%Y:%H:%M:%S]")) return formatted_time def execute_logconv(inst, start_time_stamp, end_time_stamp, access_log): ''' This function will take start time and end time as input parameter and assign these values to -S and -E options of logconv and, it will execute logconv and return result value ''' log.info("Executing logconv.pl with -S current time and -E end time") cmd = [os.path.join(inst.get_bin_dir(), 'logconv.pl'), '-S', start_time_stamp, '-E', end_time_stamp, access_log] log.info(" ".join(cmd)) proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = proc.communicate() log.info("standard output" + ensure_str(stdout)) log.info("standard errors" + ensure_str(stderr)) return proc.returncode def test_ticket47910_logconv_start_end_positive(topology_st, log_dir): ''' Execute logconv.pl with -S and -E(endtime) with random time stamp This is execute successfully ''' # # Execute logconv.pl -S -E with random timestamp # log.info('Running test_ticket47910 - Execute logconv.pl -S -E with random values') log.info("taking current time with offset of 2 mins and formatting it to feed -S") start_time_stamp = (datetime.now() - timedelta(minutes=2)) formatted_start_time_stamp = format_time(start_time_stamp) log.info("taking current time with offset of 2 mins and formatting it to feed -E") end_time_stamp = (datetime.now() + timedelta(minutes=2)) formatted_end_time_stamp = format_time(end_time_stamp) log.info("Executing logconv.pl with -S and -E") result = execute_logconv(topology_st.standalone, formatted_start_time_stamp, formatted_end_time_stamp, log_dir) assert result == 0 def test_ticket47910_logconv_start_end_negative(topology_st, log_dir): ''' Execute logconv.pl with -S and -E(endtime) with random time stamp This is a negative test case, where endtime will be lesser than the starttime This should give error message ''' # # Execute logconv.pl -S and -E with random timestamp # log.info('Running test_ticket47910 - Execute logconv.pl -S -E with starttime>endtime') log.info("taking current time with offset of 2 mins and formatting it to feed -S") start_time_stamp = (datetime.now() + timedelta(minutes=2)) formatted_start_time_stamp = format_time(start_time_stamp) log.info("taking current time with offset of 2 mins and formatting it to feed -E") end_time_stamp = (datetime.now() - timedelta(minutes=2)) formatted_end_time_stamp = format_time(end_time_stamp) log.info("Executing logconv.pl with -S and -E") result = execute_logconv(topology_st.standalone, formatted_start_time_stamp, formatted_end_time_stamp, log_dir) assert result == 1 def test_ticket47910_logconv_start_end_invalid(topology_st, log_dir): ''' Execute logconv.pl with -S and -E(endtime) with invalid time stamp This is a negative test case, where it should give error message ''' # # Execute logconv.pl -S and -E with invalid timestamp # log.info('Running test_ticket47910 - Execute logconv.pl -S -E with invalid timestamp') log.info("Set start time and end time to invalid values") start_time_stamp = "invalid" end_time_stamp = "invalid" log.info("Executing logconv.pl with -S and -E") result = execute_logconv(topology_st.standalone, start_time_stamp, end_time_stamp, log_dir) assert result == 1 def test_ticket47910_logconv_noaccesslogs(topology_st, log_dir): ''' Execute logconv.pl -S(starttime) without specify access logs location ''' # # Execute logconv.pl -S with random timestamp and no access log location # log.info('Running test_ticket47910 - Execute logconv.pl without access logs') log.info("taking current time with offset of 2 mins and formatting it to feed -S") time_stamp = (datetime.now() - timedelta(minutes=2)) formatted_time_stamp = format_time(time_stamp) log.info("Executing logconv.pl with -S current time") cmd = [os.path.join(topology_st.standalone.get_bin_dir(), 'logconv.pl'), '-S', formatted_time_stamp] log.info(" ".join(cmd)) proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = proc.communicate() log.info("standard output" + ensure_str(stdout)) log.info("standard errors" + ensure_str(stderr)) assert proc.returncode == 1 if __name__ == '__main__': # Run isolated # -s for DEBUG mode pytest.main("-s ticket47910_test.py") 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/tickets/ticket47920_test.py000066400000000000000000000106771421664411400260170ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2016 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import logging import ldap import pytest from ldap.controls.readentry import PostReadControl from lib389 import Entry from lib389._constants import * from lib389.topologies import topology_st SCOPE_IN_CN = 'in' SCOPE_OUT_CN = 'out' SCOPE_IN_DN = 'cn=%s,%s' % (SCOPE_IN_CN, SUFFIX) SCOPE_OUT_DN = 'cn=%s,%s' % (SCOPE_OUT_CN, SUFFIX) from lib389.utils import * # Skip on older versions pytestmark = [pytest.mark.tier2, pytest.mark.skipif(ds_is_older('1.3.2'), reason="Not implemented")] PROVISIONING_CN = "provisioning" PROVISIONING_DN = "cn=%s,%s" % (PROVISIONING_CN, SCOPE_IN_DN) ACTIVE_CN = "accounts" STAGE_CN = "staged users" DELETE_CN = "deleted users" ACTIVE_DN = "cn=%s,%s" % (ACTIVE_CN, SCOPE_IN_DN) STAGE_DN = "cn=%s,%s" % (STAGE_CN, PROVISIONING_DN) DELETE_DN = "cn=%s,%s" % (DELETE_CN, PROVISIONING_DN) STAGE_USER_CN = "stage guy" STAGE_USER_DN = "cn=%s,%s" % (STAGE_USER_CN, STAGE_DN) ACTIVE_USER_CN = "active guy" ACTIVE_USER_DN = "cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN) OUT_USER_CN = "out guy" OUT_USER_DN = "cn=%s,%s" % (OUT_USER_CN, SCOPE_OUT_DN) STAGE_GROUP_CN = "stage group" STAGE_GROUP_DN = "cn=%s,%s" % (STAGE_GROUP_CN, STAGE_DN) ACTIVE_GROUP_CN = "active group" ACTIVE_GROUP_DN = "cn=%s,%s" % (ACTIVE_GROUP_CN, ACTIVE_DN) OUT_GROUP_CN = "out group" OUT_GROUP_DN = "cn=%s,%s" % (OUT_GROUP_CN, SCOPE_OUT_DN) INDIRECT_ACTIVE_GROUP_CN = "indirect active group" INDIRECT_ACTIVE_GROUP_DN = "cn=%s,%s" % (INDIRECT_ACTIVE_GROUP_CN, ACTIVE_DN) INITIAL_DESC = "inital description" FINAL_DESC = "final description" log = logging.getLogger(__name__) def _header(topology_st, label): topology_st.standalone.log.info("\n\n###############################################") topology_st.standalone.log.info("#######") topology_st.standalone.log.info("####### %s" % label) topology_st.standalone.log.info("#######") topology_st.standalone.log.info("###############################################") def _add_user(topology_st, type='active'): if type == 'active': topology_st.standalone.add_s(Entry((ACTIVE_USER_DN, { 'objectclass': "top person inetuser".split(), 'sn': ACTIVE_USER_CN, 'cn': ACTIVE_USER_CN, 'description': INITIAL_DESC}))) elif type == 'stage': topology_st.standalone.add_s(Entry((STAGE_USER_DN, { 'objectclass': "top person inetuser".split(), 'sn': STAGE_USER_CN, 'cn': STAGE_USER_CN}))) else: topology_st.standalone.add_s(Entry((OUT_USER_DN, { 'objectclass': "top person inetuser".split(), 'sn': OUT_USER_CN, 'cn': OUT_USER_CN}))) def test_ticket47920_init(topology_st): topology_st.standalone.add_s(Entry((SCOPE_IN_DN, { 'objectclass': "top nscontainer".split(), 'cn': SCOPE_IN_DN}))) topology_st.standalone.add_s(Entry((ACTIVE_DN, { 'objectclass': "top nscontainer".split(), 'cn': ACTIVE_CN}))) # add users _add_user(topology_st, 'active') def test_ticket47920_mod_readentry_ctrl(topology_st): _header(topology_st, 'MOD: with a readentry control') topology_st.standalone.log.info("Check the initial value of the entry") ent = topology_st.standalone.getEntry(ACTIVE_USER_DN, ldap.SCOPE_BASE, "(objectclass=*)", ['description']) assert ent.hasAttr('description') assert ensure_str(ent.getValue('description')) == INITIAL_DESC pr = PostReadControl(criticality=True, attrList=['cn', 'description']) _, _, _, resp_ctrls = topology_st.standalone.modify_ext_s(ACTIVE_USER_DN, [(ldap.MOD_REPLACE, 'description', [ensure_bytes(FINAL_DESC)])], serverctrls=[pr]) assert resp_ctrls[0].dn == ACTIVE_USER_DN assert 'description' in resp_ctrls[0].entry assert 'cn' in resp_ctrls[0].entry print(resp_ctrls[0].entry['description']) ent = topology_st.standalone.getEntry(ACTIVE_USER_DN, ldap.SCOPE_BASE, "(objectclass=*)", ['description']) assert ent.hasAttr('description') assert ensure_str(ent.getValue('description')) == FINAL_DESC if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/tickets/ticket47921_test.py000066400000000000000000000063241421664411400260120ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2016 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import pytest from lib389.tasks import * from lib389.utils import * from lib389.topologies import topology_st pytestmark = pytest.mark.tier2 logging.getLogger(__name__).setLevel(logging.DEBUG) log = logging.getLogger(__name__) def test_ticket47921(topology_st): ''' Test that indirect cos reflects the current value of the indirect entry ''' INDIRECT_COS_DN = 'cn=cos definition,' + DEFAULT_SUFFIX MANAGER_DN = 'uid=my manager,ou=people,' + DEFAULT_SUFFIX USER_DN = 'uid=user,ou=people,' + DEFAULT_SUFFIX # Add COS definition topology_st.standalone.add_s(Entry((INDIRECT_COS_DN, { 'objectclass': 'top cosSuperDefinition cosIndirectDefinition ldapSubEntry'.split(), 'cosIndirectSpecifier': 'manager', 'cosAttribute': 'roomnumber' }))) # Add manager entry topology_st.standalone.add_s(Entry((MANAGER_DN, {'objectclass': 'top extensibleObject'.split(), 'uid': 'my manager', 'roomnumber': '1' }))) # Add user entry topology_st.standalone.add_s(Entry((USER_DN, {'objectclass': 'top person organizationalPerson inetorgperson'.split(), 'sn': 'last', 'cn': 'full', 'givenname': 'mark', 'uid': 'user', 'manager': MANAGER_DN }))) # Test COS is working entry = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, "uid=user", ['roomnumber']) if entry: if ensure_str(entry[0].getValue('roomnumber')) != '1': log.fatal('COS is not working.') assert False else: log.fatal('Failed to find user entry') assert False # Modify manager entry topology_st.standalone.modify_s(MANAGER_DN, [(ldap.MOD_REPLACE, 'roomnumber', b'2')]) # Confirm COS is returning the new value entry = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, "uid=user", ['roomnumber']) if entry: if ensure_str(entry[0].getValue('roomnumber')) != '2': log.fatal('COS is not working after manager update.') assert False else: log.fatal('Failed to find user entry') assert False log.info('Test complete') if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/tickets/ticket47927_test.py000066400000000000000000000276341421664411400260270ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2016 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import pytest from lib389.tasks import * from lib389.utils import * from lib389.topologies import topology_st from lib389._constants import SUFFIX, DEFAULT_SUFFIX, PLUGIN_ATTR_UNIQUENESS # Skip on older versions pytestmark = [pytest.mark.tier2, pytest.mark.skipif(ds_is_older('1.3.4'), reason="Not implemented")] logging.getLogger(__name__).setLevel(logging.DEBUG) log = logging.getLogger(__name__) EXCLUDED_CONTAINER_CN = "excluded_container" EXCLUDED_CONTAINER_DN = "cn=%s,%s" % (EXCLUDED_CONTAINER_CN, SUFFIX) EXCLUDED_BIS_CONTAINER_CN = "excluded_bis_container" EXCLUDED_BIS_CONTAINER_DN = "cn=%s,%s" % (EXCLUDED_BIS_CONTAINER_CN, SUFFIX) ENFORCED_CONTAINER_CN = "enforced_container" ENFORCED_CONTAINER_DN = "cn=%s,%s" % (ENFORCED_CONTAINER_CN, SUFFIX) USER_1_CN = "test_1" USER_1_DN = "cn=%s,%s" % (USER_1_CN, ENFORCED_CONTAINER_DN) USER_2_CN = "test_2" USER_2_DN = "cn=%s,%s" % (USER_2_CN, ENFORCED_CONTAINER_DN) USER_3_CN = "test_3" USER_3_DN = "cn=%s,%s" % (USER_3_CN, EXCLUDED_CONTAINER_DN) USER_4_CN = "test_4" USER_4_DN = "cn=%s,%s" % (USER_4_CN, EXCLUDED_BIS_CONTAINER_DN) def test_ticket47927_init(topology_st): topology_st.standalone.plugins.enable(name=PLUGIN_ATTR_UNIQUENESS) try: topology_st.standalone.modify_s('cn=' + PLUGIN_ATTR_UNIQUENESS + ',cn=plugins,cn=config', [(ldap.MOD_REPLACE, 'uniqueness-attribute-name', b'telephonenumber'), (ldap.MOD_REPLACE, 'uniqueness-subtrees', ensure_bytes(DEFAULT_SUFFIX)), ]) except ldap.LDAPError as e: log.fatal('test_ticket47927: Failed to configure plugin for "telephonenumber": error ' + e.args[0]['desc']) assert False topology_st.standalone.restart(timeout=120) topology_st.standalone.add_s(Entry((EXCLUDED_CONTAINER_DN, {'objectclass': "top nscontainer".split(), 'cn': EXCLUDED_CONTAINER_CN}))) topology_st.standalone.add_s(Entry((EXCLUDED_BIS_CONTAINER_DN, {'objectclass': "top nscontainer".split(), 'cn': EXCLUDED_BIS_CONTAINER_CN}))) topology_st.standalone.add_s(Entry((ENFORCED_CONTAINER_DN, {'objectclass': "top nscontainer".split(), 'cn': ENFORCED_CONTAINER_CN}))) # adding an entry on a stage with a different 'cn' topology_st.standalone.add_s(Entry((USER_1_DN, { 'objectclass': "top person".split(), 'sn': USER_1_CN, 'cn': USER_1_CN}))) # adding an entry on a stage with a different 'cn' topology_st.standalone.add_s(Entry((USER_2_DN, { 'objectclass': "top person".split(), 'sn': USER_2_CN, 'cn': USER_2_CN}))) topology_st.standalone.add_s(Entry((USER_3_DN, { 'objectclass': "top person".split(), 'sn': USER_3_CN, 'cn': USER_3_CN}))) topology_st.standalone.add_s(Entry((USER_4_DN, { 'objectclass': "top person".split(), 'sn': USER_4_CN, 'cn': USER_4_CN}))) def test_ticket47927_one(topology_st): ''' Check that uniqueness is enforce on all SUFFIX ''' UNIQUE_VALUE = b'1234' try: topology_st.standalone.modify_s(USER_1_DN, [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)]) except ldap.LDAPError as e: log.fatal('test_ticket47927_one: Failed to set the telephonenumber for %s: %s' % (USER_1_DN, e.args[0]['desc'])) assert False # we expect to fail because user1 is in the scope of the plugin try: topology_st.standalone.modify_s(USER_2_DN, [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)]) log.fatal('test_ticket47927_one: unexpected success to set the telephonenumber for %s' % (USER_2_DN)) assert False except ldap.LDAPError as e: log.fatal('test_ticket47927_one: Failed (expected) to set the telephonenumber for %s: %s' % ( USER_2_DN, e.args[0]['desc'])) pass # we expect to fail because user1 is in the scope of the plugin try: topology_st.standalone.modify_s(USER_3_DN, [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)]) log.fatal('test_ticket47927_one: unexpected success to set the telephonenumber for %s' % (USER_3_DN)) assert False except ldap.LDAPError as e: log.fatal('test_ticket47927_one: Failed (expected) to set the telephonenumber for %s: %s' % ( USER_3_DN, e.args[0]['desc'])) pass def test_ticket47927_two(topology_st): ''' Exclude the EXCLUDED_CONTAINER_DN from the uniqueness plugin ''' try: topology_st.standalone.modify_s('cn=' + PLUGIN_ATTR_UNIQUENESS + ',cn=plugins,cn=config', [(ldap.MOD_REPLACE, 'uniqueness-exclude-subtrees', ensure_bytes(EXCLUDED_CONTAINER_DN))]) except ldap.LDAPError as e: log.fatal('test_ticket47927_two: Failed to configure plugin for to exclude %s: error %s' % ( EXCLUDED_CONTAINER_DN, e.args[0]['desc'])) assert False topology_st.standalone.restart(timeout=120) def test_ticket47927_three(topology_st): ''' Check that uniqueness is enforced on full SUFFIX except EXCLUDED_CONTAINER_DN First case: it exists an entry (with the same attribute value) in the scope of the plugin and we set the value in an entry that is in an excluded scope ''' UNIQUE_VALUE = b'9876' try: topology_st.standalone.modify_s(USER_1_DN, [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)]) except ldap.LDAPError as e: log.fatal('test_ticket47927_three: Failed to set the telephonenumber ' + e.args[0]['desc']) assert False # we should not be allowed to set this value (because user1 is in the scope) try: topology_st.standalone.modify_s(USER_2_DN, [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)]) log.fatal('test_ticket47927_three: unexpected success to set the telephonenumber for %s' % (USER_2_DN)) assert False except ldap.LDAPError as e: log.fatal('test_ticket47927_three: Failed (expected) to set the telephonenumber for %s: %s' % ( USER_2_DN, e.args[0]['desc'])) # USER_3_DN is in EXCLUDED_CONTAINER_DN so update should be successful try: topology_st.standalone.modify_s(USER_3_DN, [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)]) log.fatal('test_ticket47927_three: success to set the telephonenumber for %s' % (USER_3_DN)) except ldap.LDAPError as e: log.fatal('test_ticket47927_three: Failed (unexpected) to set the telephonenumber for %s: %s' % ( USER_3_DN, e.args[0]['desc'])) assert False def test_ticket47927_four(topology_st): ''' Check that uniqueness is enforced on full SUFFIX except EXCLUDED_CONTAINER_DN Second case: it exists an entry (with the same attribute value) in an excluded scope of the plugin and we set the value in an entry is in the scope ''' UNIQUE_VALUE = b'1111' # USER_3_DN is in EXCLUDED_CONTAINER_DN so update should be successful try: topology_st.standalone.modify_s(USER_3_DN, [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)]) log.fatal('test_ticket47927_four: success to set the telephonenumber for %s' % USER_3_DN) except ldap.LDAPError as e: log.fatal('test_ticket47927_four: Failed (unexpected) to set the telephonenumber for %s: %s' % ( USER_3_DN, e.args[0]['desc'])) assert False # we should be allowed to set this value (because user3 is excluded from scope) try: topology_st.standalone.modify_s(USER_1_DN, [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)]) except ldap.LDAPError as e: log.fatal( 'test_ticket47927_four: Failed to set the telephonenumber for %s: %s' % (USER_1_DN, e.args[0]['desc'])) assert False # we should not be allowed to set this value (because user1 is in the scope) try: topology_st.standalone.modify_s(USER_2_DN, [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)]) log.fatal('test_ticket47927_four: unexpected success to set the telephonenumber %s' % USER_2_DN) assert False except ldap.LDAPError as e: log.fatal('test_ticket47927_four: Failed (expected) to set the telephonenumber for %s: %s' % ( USER_2_DN, e.args[0]['desc'])) pass def test_ticket47927_five(topology_st): ''' Exclude the EXCLUDED_BIS_CONTAINER_DN from the uniqueness plugin ''' try: topology_st.standalone.modify_s('cn=' + PLUGIN_ATTR_UNIQUENESS + ',cn=plugins,cn=config', [(ldap.MOD_ADD, 'uniqueness-exclude-subtrees', ensure_bytes(EXCLUDED_BIS_CONTAINER_DN))]) except ldap.LDAPError as e: log.fatal('test_ticket47927_five: Failed to configure plugin for to exclude %s: error %s' % ( EXCLUDED_BIS_CONTAINER_DN, e.args[0]['desc'])) assert False topology_st.standalone.restart(timeout=120) topology_st.standalone.getEntry('cn=' + PLUGIN_ATTR_UNIQUENESS + ',cn=plugins,cn=config', ldap.SCOPE_BASE) def test_ticket47927_six(topology_st): ''' Check that uniqueness is enforced on full SUFFIX except EXCLUDED_CONTAINER_DN and EXCLUDED_BIS_CONTAINER_DN First case: it exists an entry (with the same attribute value) in the scope of the plugin and we set the value in an entry that is in an excluded scope ''' UNIQUE_VALUE = b'222' try: topology_st.standalone.modify_s(USER_1_DN, [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)]) except ldap.LDAPError as e: log.fatal('test_ticket47927_six: Failed to set the telephonenumber ' + e.args[0]['desc']) assert False # we should not be allowed to set this value (because user1 is in the scope) try: topology_st.standalone.modify_s(USER_2_DN, [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)]) log.fatal('test_ticket47927_six: unexpected success to set the telephonenumber for %s' % (USER_2_DN)) assert False except ldap.LDAPError as e: log.fatal('test_ticket47927_six: Failed (expected) to set the telephonenumber for %s: %s' % ( USER_2_DN, e.args[0]['desc'])) # USER_3_DN is in EXCLUDED_CONTAINER_DN so update should be successful try: topology_st.standalone.modify_s(USER_3_DN, [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)]) log.fatal('test_ticket47927_six: success to set the telephonenumber for %s' % (USER_3_DN)) except ldap.LDAPError as e: log.fatal('test_ticket47927_six: Failed (unexpected) to set the telephonenumber for %s: %s' % ( USER_3_DN, e.args[0]['desc'])) assert False # USER_4_DN is in EXCLUDED_CONTAINER_DN so update should be successful try: topology_st.standalone.modify_s(USER_4_DN, [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)]) log.fatal('test_ticket47927_six: success to set the telephonenumber for %s' % (USER_4_DN)) except ldap.LDAPError as e: log.fatal('test_ticket47927_six: Failed (unexpected) to set the telephonenumber for %s: %s' % ( USER_4_DN, e.args[0]['desc'])) assert False if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/tickets/ticket47931_test.py000066400000000000000000000147101421664411400260110ustar00rootroot00000000000000import threading import time import pytest from lib389.tasks import * from lib389.utils import * from lib389.topologies import topology_st from lib389._constants import DEFAULT_SUFFIX, PLUGIN_RETRO_CHANGELOG, PLUGIN_MEMBER_OF, BACKEND_NAME # Skip on older versions pytestmark = [pytest.mark.tier2, pytest.mark.skipif(ds_is_older('1.3.3'), reason="Not implemented")] logging.getLogger(__name__).setLevel(logging.DEBUG) log = logging.getLogger(__name__) installation1_prefix = None SECOND_SUFFIX = "dc=deadlock" SECOND_BACKEND = "deadlock" RETROCL_PLUGIN_DN = ('cn=' + PLUGIN_RETRO_CHANGELOG + ',cn=plugins,cn=config') MEMBEROF_PLUGIN_DN = ('cn=' + PLUGIN_MEMBER_OF + ',cn=plugins,cn=config') GROUP_DN = ("cn=group," + DEFAULT_SUFFIX) MEMBER_DN_COMP = "uid=member" TIME_OUT = 5 class modifySecondBackendThread(threading.Thread): def __init__(self, inst, timeout): threading.Thread.__init__(self) self.daemon = True self.inst = inst self.timeout = timeout def run(self): conn = self.inst.clone() conn.set_option(ldap.OPT_TIMEOUT, self.timeout) log.info('Modify second suffix...') for x in range(0, 5000): try: conn.modify_s(SECOND_SUFFIX, [(ldap.MOD_REPLACE, 'description', b'new description')]) except ldap.LDAPError as e: log.fatal('Failed to modify second suffix - error: %s' % (e.args[0]['desc'])) assert False conn.close() log.info('Finished modifying second suffix') def test_ticket47931(topology_st): """Test Retro Changelog and MemberOf deadlock fix. Verification steps: - Enable retro cl and memberOf. - Create two backends: A & B. - Configure retrocl scoping for backend A. - Configure memberOf plugin for uniquemember - Create group in backend A. - In parallel, add members to the group on A, and make modifications to entries in backend B. - Make sure the server does not hang during the updates to both backends. """ # Enable dynamic plugins to make plugin configuration easier try: topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-dynamic-plugins', b'on')]) except ldap.LDAPError as e: log.error('Failed to enable dynamic plugins! ' + e.args[0]['desc']) assert False # Enable the plugins topology_st.standalone.plugins.enable(name=PLUGIN_MEMBER_OF) topology_st.standalone.plugins.enable(name=PLUGIN_RETRO_CHANGELOG) # Create second backend topology_st.standalone.backend.create(SECOND_SUFFIX, {BACKEND_NAME: SECOND_BACKEND}) topology_st.standalone.mappingtree.create(SECOND_SUFFIX, bename=SECOND_BACKEND) # Create the root node of the second backend try: topology_st.standalone.add_s(Entry((SECOND_SUFFIX, {'objectclass': 'top domain'.split(), 'dc': 'deadlock'}))) except ldap.LDAPError as e: log.fatal('Failed to create suffix entry: error ' + e.args[0]['desc']) assert False # Configure retrocl scope try: topology_st.standalone.modify_s(RETROCL_PLUGIN_DN, [(ldap.MOD_REPLACE, 'nsslapd-include-suffix', ensure_bytes(DEFAULT_SUFFIX))]) except ldap.LDAPError as e: log.error('Failed to configure retrocl plugin: ' + e.args[0]['desc']) assert False # Configure memberOf group attribute try: topology_st.standalone.modify_s(MEMBEROF_PLUGIN_DN, [(ldap.MOD_REPLACE, 'memberofgroupattr', b'uniquemember')]) except ldap.LDAPError as e: log.fatal('Failed to configure memberOf plugin: error ' + e.args[0]['desc']) assert False time.sleep(1) # Create group try: topology_st.standalone.add_s(Entry((GROUP_DN, {'objectclass': 'top extensibleObject'.split(), 'cn': 'group'}))) except ldap.LDAPError as e: log.fatal('Failed to add grouo: error ' + e.args[0]['desc']) assert False # Create 1500 entries (future members of the group) for idx in range(1, 1500): try: USER_DN = ("uid=member%d,%s" % (idx, DEFAULT_SUFFIX)) topology_st.standalone.add_s(Entry((USER_DN, {'objectclass': 'top extensibleObject'.split(), 'uid': 'member%d' % (idx)}))) except ldap.LDAPError as e: log.fatal('Failed to add user (%s): error %s' % (USER_DN, e.args[0]['desc'])) assert False # Modify second backend (separate thread) mod_backend_thrd = modifySecondBackendThread(topology_st.standalone, TIME_OUT) mod_backend_thrd.start() time.sleep(1) # Add members to the group - set timeout log.info('Adding members to the group...') topology_st.standalone.set_option(ldap.OPT_TIMEOUT, TIME_OUT) for idx in range(1, 1500): try: MEMBER_VAL = ("uid=member%d,%s" % (idx, DEFAULT_SUFFIX)) topology_st.standalone.modify_s(GROUP_DN, [(ldap.MOD_ADD, 'uniquemember', ensure_bytes(MEMBER_VAL))]) except ldap.TIMEOUT: log.fatal('Deadlock! Bug verification failed.') assert False except ldap.LDAPError as e: log.fatal('Failed to update group(not a deadlock) member (%s) - error: %s' % (MEMBER_VAL, e.args[0]['desc'])) assert False log.info('Finished adding members to the group.') # Wait for the thread to finish mod_backend_thrd.join() # No timeout, test passed! log.info('Test complete\n') if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/tickets/ticket47953_test.py000066400000000000000000000041271421664411400260160ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2016 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import logging import shutil import pytest from lib389.tasks import * from lib389.utils import * from lib389.topologies import topology_st from lib389._constants import DATA_DIR, DEFAULT_SUFFIX pytestmark = pytest.mark.tier2 log = logging.getLogger(__name__) def test_ticket47953(topology_st): """ Test that we can delete an aci that has an invalid syntax. Sart by importing an ldif with a "bad" aci, then simply try to remove that value without error. """ log.info('Testing Ticket 47953 - Test we can delete aci that has invalid syntax') # # Import an invalid ldif # ldif_file = (topology_st.standalone.getDir(__file__, DATA_DIR) + "ticket47953/ticket47953.ldif") try: ldif_dir = topology_st.standalone.get_ldif_dir() shutil.copy(ldif_file, ldif_dir) ldif_file = ldif_dir + '/ticket47953.ldif' except: log.fatal('Failed to copy ldif to instance ldif dir') assert False importTask = Tasks(topology_st.standalone) args = {TASK_WAIT: True} try: importTask.importLDIF(DEFAULT_SUFFIX, None, ldif_file, args) except ValueError: assert False time.sleep(2) # # Delete the invalid aci # acival = '(targetattr ="fffff")(version 3.0;acl "Directory Administrators Group"' + \ ';allow (all) (groupdn = "ldap:///cn=Directory Administrators, dc=example,dc=com");)' log.info('Attempting to remove invalid aci...') try: topology_st.standalone.modify_s(DEFAULT_SUFFIX, [(ldap.MOD_DELETE, 'aci', ensure_bytes(acival))]) log.info('Removed invalid aci.') except ldap.LDAPError as e: log.error('Failed to remove invalid aci: ' + e.args[0]['desc']) assert False if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/tickets/ticket47963_test.py000066400000000000000000000116471421664411400260240ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2016 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import logging import pytest from lib389.tasks import * from lib389.utils import * from lib389.topologies import topology_st from lib389._constants import DEFAULT_SUFFIX, PLUGIN_MEMBER_OF pytestmark = pytest.mark.tier2 logging.getLogger(__name__).setLevel(logging.DEBUG) log = logging.getLogger(__name__) def test_ticket47963(topology_st): ''' Test that the memberOf plugin works correctly after setting: memberofskipnested: on ''' PLUGIN_DN = 'cn=' + PLUGIN_MEMBER_OF + ',cn=plugins,cn=config' USER_DN = 'uid=test_user,' + DEFAULT_SUFFIX GROUP_DN1 = 'cn=group1,' + DEFAULT_SUFFIX GROUP_DN2 = 'cn=group2,' + DEFAULT_SUFFIX GROUP_DN3 = 'cn=group3,' + DEFAULT_SUFFIX # # Enable the plugin and configure the skiop nest attribute, then restart the server # topology_st.standalone.plugins.enable(name=PLUGIN_MEMBER_OF) try: topology_st.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'memberofskipnested', b'on')]) except ldap.LDAPError as e: log.error('test_automember: Failed to modify config entry: error ' + e.args[0]['desc']) assert False topology_st.standalone.restart(timeout=10) # # Add our groups, users, memberships, etc # try: topology_st.standalone.add_s(Entry((USER_DN, { 'objectclass': 'top extensibleObject'.split(), 'uid': 'test_user' }))) except ldap.LDAPError as e: log.error('Failed to add teset user: error ' + e.args[0]['desc']) assert False try: topology_st.standalone.add_s(Entry((GROUP_DN1, { 'objectclass': 'top groupOfNames groupOfUniqueNames extensibleObject'.split(), 'cn': 'group1', 'member': USER_DN }))) except ldap.LDAPError as e: log.error('Failed to add group1: error ' + e.args[0]['desc']) assert False try: topology_st.standalone.add_s(Entry((GROUP_DN2, { 'objectclass': 'top groupOfNames groupOfUniqueNames extensibleObject'.split(), 'cn': 'group2', 'member': USER_DN }))) except ldap.LDAPError as e: log.error('Failed to add group2: error ' + e.args[0]['desc']) assert False # Add group with no member(yet) try: topology_st.standalone.add_s(Entry((GROUP_DN3, { 'objectclass': 'top groupOfNames groupOfUniqueNames extensibleObject'.split(), 'cn': 'group' }))) except ldap.LDAPError as e: log.error('Failed to add group3: error ' + e.args[0]['desc']) assert False time.sleep(1) # # Test we have the correct memberOf values in the user entry # try: member_filter = ('(&(memberOf=' + GROUP_DN1 + ')(memberOf=' + GROUP_DN2 + '))') entries = topology_st.standalone.search_s(USER_DN, ldap.SCOPE_BASE, member_filter) if not entries: log.fatal('User is missing expected memberOf attrs') assert False except ldap.LDAPError as e: log.fatal('Search for user1 failed: ' + e.args[0]['desc']) assert False # Add the user to the group try: topology_st.standalone.modify_s(GROUP_DN3, [(ldap.MOD_ADD, 'member', ensure_bytes(USER_DN))]) except ldap.LDAPError as e: log.error('Failed to member to group: error ' + e.args[0]['desc']) assert False time.sleep(1) # Check that the test user is a "memberOf" all three groups try: member_filter = ('(&(memberOf=' + GROUP_DN1 + ')(memberOf=' + GROUP_DN2 + ')(memberOf=' + GROUP_DN3 + '))') entries = topology_st.standalone.search_s(USER_DN, ldap.SCOPE_BASE, member_filter) if not entries: log.fatal('User is missing expected memberOf attrs') assert False except ldap.LDAPError as e: log.fatal('Search for user1 failed: ' + e.args[0]['desc']) assert False # # Delete group2, and check memberOf values in the user entry # try: topology_st.standalone.delete_s(GROUP_DN2) except ldap.LDAPError as e: log.error('Failed to delete test group2: ' + e.args[0]['desc']) assert False time.sleep(1) try: member_filter = ('(&(memberOf=' + GROUP_DN1 + ')(memberOf=' + GROUP_DN3 + '))') entries = topology_st.standalone.search_s(USER_DN, ldap.SCOPE_BASE, member_filter) if not entries: log.fatal('User incorrect memberOf attrs') assert False except ldap.LDAPError as e: log.fatal('Search for user1 failed: ' + e.args[0]['desc']) assert False log.info('Test complete') if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/tickets/ticket47970_test.py000066400000000000000000000051401421664411400260110ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2016 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import logging import ldap.sasl import pytest from lib389.tasks import * from lib389.topologies import topology_st pytestmark = pytest.mark.tier2 log = logging.getLogger(__name__) USER1_DN = "uid=user1,%s" % DEFAULT_SUFFIX USER2_DN = "uid=user2,%s" % DEFAULT_SUFFIX def test_ticket47970(topology_st): """ Testing that a failed SASL bind does not trigger account lockout - which would attempt to update the passwordRetryCount on the root dse entry """ log.info('Testing Ticket 47970 - Testing that a failed SASL bind does not trigger account lockout') # # Enable account lockout # try: topology_st.standalone.modify_s("cn=config", [(ldap.MOD_REPLACE, 'passwordLockout', b'on')]) log.info('account lockout enabled.') except ldap.LDAPError as e: log.error('Failed to enable account lockout: ' + e.args[0]['desc']) assert False try: topology_st.standalone.modify_s("cn=config", [(ldap.MOD_REPLACE, 'passwordMaxFailure', b'5')]) log.info('passwordMaxFailure set.') except ldap.LDAPError as e: log.error('Failed to to set passwordMaxFailure: ' + e.args[0]['desc']) assert False # # Perform SASL bind that should fail # failed_as_expected = False try: user_name = "mark" pw = "secret" auth_tokens = ldap.sasl.digest_md5(user_name, pw) topology_st.standalone.sasl_interactive_bind_s("", auth_tokens) except ldap.INVALID_CREDENTIALS as e: log.info("SASL Bind failed as expected") failed_as_expected = True if not failed_as_expected: log.error("SASL bind unexpectedly succeeded!") assert False # # Check that passwordRetryCount was not set on the root dse entry # try: entry = topology_st.standalone.search_s("", ldap.SCOPE_BASE, "passwordRetryCount=*", ['passwordRetryCount']) except ldap.LDAPError as e: log.error('Failed to search Root DSE entry: ' + e.args[0]['desc']) assert False if entry: log.error('Root DSE was incorrectly updated') assert False # We passed log.info('Root DSE was correctly not updated') if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/tickets/ticket47976_test.py000066400000000000000000000132171421664411400260230ustar00rootroot00000000000000import pytest from lib389.tasks import * from lib389.utils import * from lib389.topologies import topology_st from lib389._constants import SUFFIX, DEFAULT_SUFFIX, PLUGIN_MANAGED_ENTRY, DN_LDBM pytestmark = pytest.mark.tier2 PEOPLE_OU = 'people' PEOPLE_DN = "ou=%s,%s" % (PEOPLE_OU, SUFFIX) GROUPS_OU = 'groups' GROUPS_DN = "ou=%s,%s" % (GROUPS_OU, SUFFIX) DEFINITIONS_CN = 'definitions' DEFINITIONS_DN = "cn=%s,%s" % (DEFINITIONS_CN, SUFFIX) TEMPLATES_CN = 'templates' TEMPLATES_DN = "cn=%s,%s" % (TEMPLATES_CN, SUFFIX) MANAGED_GROUP_TEMPLATES_CN = 'managed group templates' MANAGED_GROUP_TEMPLATES_DN = 'cn=%s,%s' % (MANAGED_GROUP_TEMPLATES_CN, TEMPLATES_DN) MANAGED_GROUP_MEP_TMPL_CN = 'UPG' MANAGED_GROUP_MEP_TMPL_DN = 'cn=%s,%s' % (MANAGED_GROUP_MEP_TMPL_CN, MANAGED_GROUP_TEMPLATES_DN) MANAGED_GROUP_DEF_CN = 'managed group definition' MANAGED_GROUP_DEF_DN = 'cn=%s,%s' % (MANAGED_GROUP_DEF_CN, DEFINITIONS_DN) MAX_ACCOUNTS = 2 logging.getLogger(__name__).setLevel(logging.DEBUG) log = logging.getLogger(__name__) def test_ticket47976_init(topology_st): """Create mep definitions and templates""" try: topology_st.standalone.add_s(Entry((PEOPLE_DN, { 'objectclass': "top extensibleObject".split(), 'ou': 'people'}))) except ldap.ALREADY_EXISTS: pass try: topology_st.standalone.add_s(Entry((GROUPS_DN, { 'objectclass': "top extensibleObject".split(), 'ou': GROUPS_OU}))) except ldap.ALREADY_EXISTS: pass topology_st.standalone.add_s(Entry((DEFINITIONS_DN, { 'objectclass': "top nsContainer".split(), 'cn': DEFINITIONS_CN}))) topology_st.standalone.add_s(Entry((TEMPLATES_DN, { 'objectclass': "top nsContainer".split(), 'cn': TEMPLATES_CN}))) topology_st.standalone.add_s(Entry((MANAGED_GROUP_DEF_DN, { 'objectclass': "top extensibleObject".split(), 'cn': MANAGED_GROUP_DEF_CN, 'originScope': PEOPLE_DN, 'originFilter': '(objectclass=posixAccount)', 'managedBase': GROUPS_DN, 'managedTemplate': MANAGED_GROUP_MEP_TMPL_DN}))) topology_st.standalone.add_s(Entry((MANAGED_GROUP_TEMPLATES_DN, { 'objectclass': "top nsContainer".split(), 'cn': MANAGED_GROUP_TEMPLATES_CN}))) topology_st.standalone.add_s(Entry((MANAGED_GROUP_MEP_TMPL_DN, { 'objectclass': "top mepTemplateEntry".split(), 'cn': MANAGED_GROUP_MEP_TMPL_CN, 'mepRDNAttr': 'cn', 'mepStaticAttr': ['objectclass: posixGroup', 'objectclass: extensibleObject'], 'mepMappedAttr': ['cn: $cn|uid: $cn', 'gidNumber: $uidNumber']}))) topology_st.standalone.plugins.enable(name=PLUGIN_MANAGED_ENTRY) topology_st.standalone.restart(timeout=10) def test_ticket47976_1(topology_st): mod = [(ldap.MOD_REPLACE, 'nsslapd-pluginConfigArea', ensure_bytes(DEFINITIONS_DN))] topology_st.standalone.modify_s('cn=%s,cn=plugins,cn=config' % PLUGIN_MANAGED_ENTRY, mod) topology_st.standalone.stop(timeout=10) topology_st.standalone.start(timeout=10) for cpt in range(MAX_ACCOUNTS): name = "user%d" % (cpt) topology_st.standalone.add_s(Entry(("uid=%s,%s" % (name, PEOPLE_DN), { 'objectclass': 'top posixAccount extensibleObject'.split(), 'uid': name, 'cn': name, 'uidNumber': '1', 'gidNumber': '1', 'homeDirectory': '/home/%s' % name }))) def test_ticket47976_2(topology_st): """It reimports the database with a very large page size so all the entries (user and its private group). """ log.info('Test complete') mod = [(ldap.MOD_REPLACE, 'nsslapd-db-page-size', ensure_bytes(str(128 * 1024)))] topology_st.standalone.modify_s(DN_LDBM, mod) # Get the the full path and name for our LDIF we will be exporting log.info('Export LDIF file...') ldif_dir = topology_st.standalone.get_ldif_dir() ldif_file = ldif_dir + "/export.ldif" args = {EXPORT_REPL_INFO: False, TASK_WAIT: True} exportTask = Tasks(topology_st.standalone) try: exportTask.exportLDIF(DEFAULT_SUFFIX, None, ldif_file, args) except ValueError: assert False # import the new ldif file log.info('Import LDIF file...') importTask = Tasks(topology_st.standalone) args = {TASK_WAIT: True} try: importTask.importLDIF(DEFAULT_SUFFIX, None, ldif_file, args) os.remove(ldif_file) except ValueError: os.remove(ldif_file) assert False def test_ticket47976_3(topology_st): """A single delete of a user should hit 47976, because mep post op will delete its related group. """ log.info('Testing if the delete will hang or not') # log.info("\n\nAttach\n\n debugger") # time.sleep(60) topology_st.standalone.set_option(ldap.OPT_TIMEOUT, 5) try: for cpt in range(MAX_ACCOUNTS): name = "user%d" % (cpt) topology_st.standalone.delete_s("uid=%s,%s" % (name, PEOPLE_DN)) except ldap.TIMEOUT as e: log.fatal('Timeout... likely it hangs (47976)') assert False # check the entry has been deleted for cpt in range(MAX_ACCOUNTS): try: name = "user%d" % (cpt) topology_st.standalone.getEntry("uid=%s,%s" % (name, PEOPLE_DN), ldap.SCOPE_BASE, 'objectclass=*') assert False except ldap.NO_SUCH_OBJECT: log.info('%s was correctly deleted' % name) pass assert cpt == (MAX_ACCOUNTS - 1) if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/tickets/ticket47980_test.py000066400000000000000000000572101421664411400260170ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2016 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import logging import ldap.sasl import pytest from lib389.tasks import * from lib389.topologies import topology_st from lib389._constants import DN_CONFIG, DEFAULT_SUFFIX pytestmark = pytest.mark.tier2 log = logging.getLogger(__name__) BRANCH1 = 'ou=level1,' + DEFAULT_SUFFIX BRANCH2 = 'ou=level2,ou=level1,' + DEFAULT_SUFFIX BRANCH3 = 'ou=level3,ou=level2,ou=level1,' + DEFAULT_SUFFIX BRANCH4 = 'ou=people,' + DEFAULT_SUFFIX BRANCH5 = 'ou=lower,ou=people,' + DEFAULT_SUFFIX BRANCH6 = 'ou=lower,ou=lower,ou=people,' + DEFAULT_SUFFIX USER1_DN = 'uid=user1,%s' % (BRANCH1) USER2_DN = 'uid=user2,%s' % (BRANCH2) USER3_DN = 'uid=user3,%s' % (BRANCH3) USER4_DN = 'uid=user4,%s' % (BRANCH4) USER5_DN = 'uid=user5,%s' % (BRANCH5) USER6_DN = 'uid=user6,%s' % (BRANCH6) BRANCH1_CONTAINER = 'cn=nsPwPolicyContainer,ou=level1,dc=example,dc=com' BRANCH1_PWP = 'cn=cn\3DnsPwPolicyEntry\2Cou\3Dlevel1\2Cdc\3Dexample\2Cdc\3Dcom,' + \ 'cn=nsPwPolicyContainer,ou=level1,dc=example,dc=com' BRANCH1_COS_TMPL = 'cn=cn\3DnsPwTemplateEntry\2Cou\3Dlevel1\2Cdc\3Dexample\2Cdc\3Dcom,' + \ 'cn=nsPwPolicyContainer,ou=level1,dc=example,dc=com' BRANCH1_COS_DEF = 'cn=nsPwPolicy_CoS,ou=level1,dc=example,dc=com' BRANCH2_CONTAINER = 'cn=nsPwPolicyContainer,ou=level2,ou=level1,dc=example,dc=com' BRANCH2_PWP = 'cn=cn\3DnsPwPolicyEntry\2Cou\3Dlevel2\2Cou\3Dlevel1\2Cdc\3Dexample\2Cdc\3Dcom,' + \ 'cn=nsPwPolicyContainer,ou=level2,ou=level1,dc=example,dc=com' BRANCH2_COS_TMPL = 'cn=cn\3DnsPwTemplateEntry\2Cou\3Dlevel2\2Cou\3Dlevel1\2Cdc\3Dexample\2Cdc\3Dcom,' + \ 'cn=nsPwPolicyContainer,ou=level2,ou=level1,dc=example,dc=com' BRANCH2_COS_DEF = 'cn=nsPwPolicy_CoS,ou=level2,ou=level1,dc=example,dc=com' BRANCH3_CONTAINER = 'cn=nsPwPolicyContainer,ou=level3,ou=level2,ou=level1,dc=example,dc=com' BRANCH3_PWP = 'cn=cn\3DnsPwPolicyEntry\2Cou\3Dlevel3\2Cou\3Dlevel2\2Cou\3Dlevel1\2Cdc\3Dexample\2Cdc\3Dcom,' + \ 'cn=nsPwPolicyContainer,ou=level3,ou=level2,ou=level1,dc=example,dc=com' BRANCH3_COS_TMPL = 'cn=cn\3DnsPwTemplateEntry\2Cou\3Dlevel3\2Cou\3Dlevel2\2Cou\3Dlevel1\2Cdc\3Dexample\2Cdc\3Dcom,' + \ 'cn=nsPwPolicyContainer,ou=level3,ou=level2,ou=level1,dc=example,dc=com' BRANCH3_COS_DEF = 'cn=nsPwPolicy_CoS,ou=level3,ou=level2,ou=level1,dc=example,dc=com' BRANCH4_CONTAINER = 'cn=nsPwPolicyContainer,ou=people,dc=example,dc=com' BRANCH4_PWP = 'cn=cn\3DnsPwPolicyEntry\2Cou\3DPeople\2Cdc\3Dexample\2Cdc\3Dcom,' + \ 'cn=nsPwPolicyContainer,ou=People,dc=example,dc=com' BRANCH4_COS_TMPL = 'cn=cn\3DnsPwTemplateEntry\2Cou\3DPeople\2Cdc\3Dexample\2Cdc\3Dcom,' + \ 'cn=nsPwPolicyContainer,ou=People,dc=example,dc=com' BRANCH4_COS_DEF = 'cn=nsPwPolicy_CoS,ou=people,dc=example,dc=com' BRANCH5_CONTAINER = 'cn=nsPwPolicyContainer,ou=lower,ou=people,dc=example,dc=com' BRANCH5_PWP = 'cn=cn\3DnsPwPolicyEntry\2Cou\3Dlower\2Cou\3DPeople\2Cdc\3Dexample\2Cdc\3Dcom,' + \ 'cn=nsPwPolicyContainer,ou=lower,ou=People,dc=example,dc=com' BRANCH5_COS_TMPL = 'cn=cn\3DnsPwTemplateEntry\2Cou\3Dlower\2Cou\3DPeople\2Cdc\3Dexample\2Cdc\3Dcom,' + \ 'cn=nsPwPolicyContainer,ou=lower,ou=People,dc=example,dc=com' BRANCH5_COS_DEF = 'cn=nsPwPolicy_CoS,ou=lower,ou=People,dc=example,dc=com' BRANCH6_CONTAINER = 'cn=nsPwPolicyContainer,ou=lower,ou=lower,ou=People,dc=example,dc=com' BRANCH6_PWP = 'cn=cn\3DnsPwPolicyEntry\2Cou\3Dlower\2Cou\3Dlower\2Cou\3DPeople\2Cdc\3Dexample\2Cdc\3Dcom,' + \ 'cn=nsPwPolicyContainer,ou=lower,ou=lower,ou=People,dc=example,dc=com' BRANCH6_COS_TMPL = 'cn=cn\3DnsPwTemplateEntry\2Cou\3Dlower\2Cou\3Dlower\2Cou\3DPeople\2Cdc\3Dexample\2Cdc\3Dcom,' + \ 'cn=nsPwPolicyContainer,ou=lower,ou=lower,ou=People,dc=example,dc=com' BRANCH6_COS_DEF = 'cn=nsPwPolicy_CoS,ou=lower,ou=lower,ou=People,dc=example,dc=com' def test_ticket47980(topology_st): """ Multiple COS pointer definitions that use the same attribute are not correctly ordered. The cos plugin was incorrectly sorting the attribute indexes based on subtree, which lead to the wrong cos attribute value being applied to the entry. """ log.info('Testing Ticket 47980 - Testing multiple nested COS pointer definitions are processed correctly') # Add our nested branches try: topology_st.standalone.add_s(Entry((BRANCH1, { 'objectclass': 'top extensibleObject'.split(), 'ou': 'level1' }))) except ldap.LDAPError as e: log.error('Failed to add level1: error ' + e.args[0]['desc']) assert False try: topology_st.standalone.add_s(Entry((BRANCH2, { 'objectclass': 'top extensibleObject'.split(), 'ou': 'level2' }))) except ldap.LDAPError as e: log.error('Failed to add level2: error ' + e.args[0]['desc']) assert False try: topology_st.standalone.add_s(Entry((BRANCH3, { 'objectclass': 'top extensibleObject'.split(), 'uid': 'level3' }))) except ldap.LDAPError as e: log.error('Failed to add level3: error ' + e.args[0]['desc']) assert False # People branch, might already exist try: topology_st.standalone.add_s(Entry((BRANCH4, { 'objectclass': 'top extensibleObject'.split(), 'ou': 'level4' }))) except ldap.ALREADY_EXISTS: pass except ldap.LDAPError as e: log.error('Failed to add level4: error ' + e.args[0]['desc']) assert False try: topology_st.standalone.add_s(Entry((BRANCH5, { 'objectclass': 'top extensibleObject'.split(), 'ou': 'level5' }))) except ldap.LDAPError as e: log.error('Failed to add level5: error ' + e.args[0]['desc']) assert False try: topology_st.standalone.add_s(Entry((BRANCH6, { 'objectclass': 'top extensibleObject'.split(), 'uid': 'level6' }))) except ldap.LDAPError as e: log.error('Failed to add level6: error ' + e.args[0]['desc']) assert False # Add users to each branch try: topology_st.standalone.add_s(Entry((USER1_DN, { 'objectclass': 'top extensibleObject'.split(), 'uid': 'user1' }))) except ldap.LDAPError as e: log.error('Failed to add user1: error ' + e.args[0]['desc']) assert False try: topology_st.standalone.add_s(Entry((USER2_DN, { 'objectclass': 'top extensibleObject'.split(), 'uid': 'user2' }))) except ldap.LDAPError as e: log.error('Failed to add user2: error ' + e.args[0]['desc']) assert False try: topology_st.standalone.add_s(Entry((USER3_DN, { 'objectclass': 'top extensibleObject'.split(), 'uid': 'user3' }))) except ldap.LDAPError as e: log.error('Failed to add user3: error ' + e.args[0]['desc']) assert False try: topology_st.standalone.add_s(Entry((USER4_DN, { 'objectclass': 'top extensibleObject'.split(), 'uid': 'user4' }))) except ldap.LDAPError as e: log.error('Failed to add user4: error ' + e.args[0]['desc']) assert False try: topology_st.standalone.add_s(Entry((USER5_DN, { 'objectclass': 'top extensibleObject'.split(), 'uid': 'user5' }))) except ldap.LDAPError as e: log.error('Failed to add user5: error ' + e.args[0]['desc']) assert False try: topology_st.standalone.add_s(Entry((USER6_DN, { 'objectclass': 'top extensibleObject'.split(), 'uid': 'user6' }))) except ldap.LDAPError as e: log.error('Failed to add user6: error ' + e.args[0]['desc']) assert False # Enable password policy try: topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-pwpolicy-local', b'on')]) except ldap.LDAPError as e: log.error('Failed to set pwpolicy-local: error ' + e.args[0]['desc']) assert False # # Add subtree policy to branch 1 # # Add the container try: topology_st.standalone.add_s(Entry((BRANCH1_CONTAINER, { 'objectclass': 'top nsContainer'.split(), 'cn': 'nsPwPolicyContainer' }))) except ldap.LDAPError as e: log.error('Failed to add subtree container for level1: error ' + e.args[0]['desc']) assert False # Add the password policy subentry try: topology_st.standalone.add_s(Entry((BRANCH1_PWP, { 'objectclass': 'top ldapsubentry passwordpolicy'.split(), 'cn': 'cn=nsPwPolicyEntry,ou=level1,dc=example,dc=com', 'passwordMustChange': 'off', 'passwordExp': 'off', 'passwordHistory': 'off', 'passwordMinAge': '0', 'passwordChange': 'off', 'passwordStorageScheme': 'ssha' }))) except ldap.LDAPError as e: log.error('Failed to add passwordpolicy for level1: error ' + e.args[0]['desc']) assert False # Add the COS template try: topology_st.standalone.add_s(Entry((BRANCH1_COS_TMPL, { 'objectclass': 'top ldapsubentry costemplate extensibleObject'.split(), 'cn': 'cn=nsPwPolicyEntry,ou=level1,dc=example,dc=com', 'cosPriority': '1', 'cn': 'cn=nsPwTemplateEntry,ou=level1,dc=example,dc=com', 'pwdpolicysubentry': BRANCH1_PWP }))) except ldap.LDAPError as e: log.error('Failed to add COS template for level1: error ' + e.args[0]['desc']) assert False # Add the COS definition try: topology_st.standalone.add_s(Entry((BRANCH1_COS_DEF, { 'objectclass': 'top ldapsubentry cosSuperDefinition cosPointerDefinition'.split(), 'cn': 'cn=nsPwPolicyEntry,ou=level1,dc=example,dc=com', 'costemplatedn': BRANCH1_COS_TMPL, 'cosAttribute': 'pwdpolicysubentry default operational-default' }))) except ldap.LDAPError as e: log.error('Failed to add COS def for level1: error ' + e.args[0]['desc']) assert False # # Add subtree policy to branch 2 # # Add the container try: topology_st.standalone.add_s(Entry((BRANCH2_CONTAINER, { 'objectclass': 'top nsContainer'.split(), 'cn': 'nsPwPolicyContainer' }))) except ldap.LDAPError as e: log.error('Failed to add subtree container for level2: error ' + e.args[0]['desc']) assert False # Add the password policy subentry try: topology_st.standalone.add_s(Entry((BRANCH2_PWP, { 'objectclass': 'top ldapsubentry passwordpolicy'.split(), 'cn': 'cn=nsPwPolicyEntry,ou=level2,dc=example,dc=com', 'passwordMustChange': 'off', 'passwordExp': 'off', 'passwordHistory': 'off', 'passwordMinAge': '0', 'passwordChange': 'off', 'passwordStorageScheme': 'ssha' }))) except ldap.LDAPError as e: log.error('Failed to add passwordpolicy for level2: error ' + e.args[0]['desc']) assert False # Add the COS template try: topology_st.standalone.add_s(Entry((BRANCH2_COS_TMPL, { 'objectclass': 'top ldapsubentry costemplate extensibleObject'.split(), 'cn': 'cn=nsPwPolicyEntry,ou=level2,dc=example,dc=com', 'cosPriority': '1', 'cn': 'cn=nsPwTemplateEntry,ou=level2,dc=example,dc=com', 'pwdpolicysubentry': BRANCH2_PWP }))) except ldap.LDAPError as e: log.error('Failed to add COS template for level2: error ' + e.args[0]['desc']) assert False # Add the COS definition try: topology_st.standalone.add_s(Entry((BRANCH2_COS_DEF, { 'objectclass': 'top ldapsubentry cosSuperDefinition cosPointerDefinition'.split(), 'cn': 'cn=nsPwPolicyEntry,ou=level2,dc=example,dc=com', 'costemplatedn': BRANCH2_COS_TMPL, 'cosAttribute': 'pwdpolicysubentry default operational-default' }))) except ldap.LDAPError as e: log.error('Failed to add COS def for level2: error ' + e.args[0]['desc']) assert False # # Add subtree policy to branch 3 # # Add the container try: topology_st.standalone.add_s(Entry((BRANCH3_CONTAINER, { 'objectclass': 'top nsContainer'.split(), 'cn': 'nsPwPolicyContainer' }))) except ldap.LDAPError as e: log.error('Failed to add subtree container for level3: error ' + e.args[0]['desc']) assert False # Add the password policy subentry try: topology_st.standalone.add_s(Entry((BRANCH3_PWP, { 'objectclass': 'top ldapsubentry passwordpolicy'.split(), 'cn': 'cn=nsPwPolicyEntry,ou=level3,dc=example,dc=com', 'passwordMustChange': 'off', 'passwordExp': 'off', 'passwordHistory': 'off', 'passwordMinAge': '0', 'passwordChange': 'off', 'passwordStorageScheme': 'ssha' }))) except ldap.LDAPError as e: log.error('Failed to add passwordpolicy for level3: error ' + e.args[0]['desc']) assert False # Add the COS template try: topology_st.standalone.add_s(Entry((BRANCH3_COS_TMPL, { 'objectclass': 'top ldapsubentry costemplate extensibleObject'.split(), 'cn': 'cn=nsPwPolicyEntry,ou=level3,dc=example,dc=com', 'cosPriority': '1', 'cn': 'cn=nsPwTemplateEntry,ou=level3,dc=example,dc=com', 'pwdpolicysubentry': BRANCH3_PWP }))) except ldap.LDAPError as e: log.error('Failed to add COS template for level3: error ' + e.args[0]['desc']) assert False # Add the COS definition try: topology_st.standalone.add_s(Entry((BRANCH3_COS_DEF, { 'objectclass': 'top ldapsubentry cosSuperDefinition cosPointerDefinition'.split(), 'cn': 'cn=nsPwPolicyEntry,ou=level3,dc=example,dc=com', 'costemplatedn': BRANCH3_COS_TMPL, 'cosAttribute': 'pwdpolicysubentry default operational-default' }))) except ldap.LDAPError as e: log.error('Failed to add COS def for level3: error ' + e.args[0]['desc']) assert False # # Add subtree policy to branch 4 # # Add the container try: topology_st.standalone.add_s(Entry((BRANCH4_CONTAINER, { 'objectclass': 'top nsContainer'.split(), 'cn': 'nsPwPolicyContainer' }))) except ldap.LDAPError as e: log.error('Failed to add subtree container for level3: error ' + e.args[0]['desc']) assert False # Add the password policy subentry try: topology_st.standalone.add_s(Entry((BRANCH4_PWP, { 'objectclass': 'top ldapsubentry passwordpolicy'.split(), 'cn': 'cn=nsPwPolicyEntry,ou=people,dc=example,dc=com', 'passwordMustChange': 'off', 'passwordExp': 'off', 'passwordHistory': 'off', 'passwordMinAge': '0', 'passwordChange': 'off', 'passwordStorageScheme': 'ssha' }))) except ldap.LDAPError as e: log.error('Failed to add passwordpolicy for branch4: error ' + e.args[0]['desc']) assert False # Add the COS template try: topology_st.standalone.add_s(Entry((BRANCH4_COS_TMPL, { 'objectclass': 'top ldapsubentry costemplate extensibleObject'.split(), 'cn': 'cn=nsPwPolicyEntry,ou=people,dc=example,dc=com', 'cosPriority': '1', 'cn': 'cn=nsPwTemplateEntry,ou=people,dc=example,dc=com', 'pwdpolicysubentry': BRANCH4_PWP }))) except ldap.LDAPError as e: log.error('Failed to add COS template for level3: error ' + e.args[0]['desc']) assert False # Add the COS definition try: topology_st.standalone.add_s(Entry((BRANCH4_COS_DEF, { 'objectclass': 'top ldapsubentry cosSuperDefinition cosPointerDefinition'.split(), 'cn': 'cn=nsPwPolicyEntry,ou=people,dc=example,dc=com', 'costemplatedn': BRANCH4_COS_TMPL, 'cosAttribute': 'pwdpolicysubentry default operational-default' }))) except ldap.LDAPError as e: log.error('Failed to add COS def for branch4: error ' + e.args[0]['desc']) assert False # # Add subtree policy to branch 5 # # Add the container try: topology_st.standalone.add_s(Entry((BRANCH5_CONTAINER, { 'objectclass': 'top nsContainer'.split(), 'cn': 'nsPwPolicyContainer' }))) except ldap.LDAPError as e: log.error('Failed to add subtree container for branch5: error ' + e.args[0]['desc']) assert False # Add the password policy subentry try: topology_st.standalone.add_s(Entry((BRANCH5_PWP, { 'objectclass': 'top ldapsubentry passwordpolicy'.split(), 'cn': 'cn=nsPwPolicyEntry,ou=lower,ou=people,dc=example,dc=com', 'passwordMustChange': 'off', 'passwordExp': 'off', 'passwordHistory': 'off', 'passwordMinAge': '0', 'passwordChange': 'off', 'passwordStorageScheme': 'ssha' }))) except ldap.LDAPError as e: log.error('Failed to add passwordpolicy for branch5: error ' + e.args[0]['desc']) assert False # Add the COS template try: topology_st.standalone.add_s(Entry((BRANCH5_COS_TMPL, { 'objectclass': 'top ldapsubentry costemplate extensibleObject'.split(), 'cn': 'cn=nsPwPolicyEntry,ou=lower,ou=people,dc=example,dc=com', 'cosPriority': '1', 'cn': 'cn=nsPwTemplateEntry,ou=lower,ou=people,dc=example,dc=com', 'pwdpolicysubentry': BRANCH5_PWP }))) except ldap.LDAPError as e: log.error('Failed to add COS template for branch5: error ' + e.args[0]['desc']) assert False # Add the COS definition try: topology_st.standalone.add_s(Entry((BRANCH5_COS_DEF, { 'objectclass': 'top ldapsubentry cosSuperDefinition cosPointerDefinition'.split(), 'cn': 'cn=nsPwPolicyEntry,ou=lower,ou=people,dc=example,dc=com', 'costemplatedn': BRANCH5_COS_TMPL, 'cosAttribute': 'pwdpolicysubentry default operational-default' }))) except ldap.LDAPError as e: log.error('Failed to add COS def for level3: error ' + e.args[0]['desc']) assert False # # Add subtree policy to branch 6 # # Add the container try: topology_st.standalone.add_s(Entry((BRANCH6_CONTAINER, { 'objectclass': 'top nsContainer'.split(), 'cn': 'nsPwPolicyContainer' }))) except ldap.LDAPError as e: log.error('Failed to add subtree container for branch6: error ' + e.args[0]['desc']) assert False # Add the password policy subentry try: topology_st.standalone.add_s(Entry((BRANCH6_PWP, { 'objectclass': 'top ldapsubentry passwordpolicy'.split(), 'cn': 'cn=nsPwPolicyEntry,ou=level3,dc=example,dc=com', 'passwordMustChange': 'off', 'passwordExp': 'off', 'passwordHistory': 'off', 'passwordMinAge': '0', 'passwordChange': 'off', 'passwordStorageScheme': 'ssha' }))) except ldap.LDAPError as e: log.error('Failed to add passwordpolicy for branch6: error ' + e.args[0]['desc']) assert False # Add the COS template try: topology_st.standalone.add_s(Entry((BRANCH6_COS_TMPL, { 'objectclass': 'top ldapsubentry costemplate extensibleObject'.split(), 'cn': 'cn=nsPwPolicyEntry,ou=lower,ou=lower,ou=people,dc=example,dc=com', 'cosPriority': '1', 'cn': 'cn=nsPwTemplateEntry,ou=lower,ou=lower,ou=people,dc=example,dc=com', 'pwdpolicysubentry': BRANCH6_PWP }))) except ldap.LDAPError as e: log.error('Failed to add COS template for branch6: error ' + e.args[0]['desc']) assert False # Add the COS definition try: topology_st.standalone.add_s(Entry((BRANCH6_COS_DEF, { 'objectclass': 'top ldapsubentry cosSuperDefinition cosPointerDefinition'.split(), 'cn': 'cn=nsPwPolicyEntry,ou=lower,ou=lower,ou=people,dc=example,dc=com', 'costemplatedn': BRANCH6_COS_TMPL, 'cosAttribute': 'pwdpolicysubentry default operational-default' }))) except ldap.LDAPError as e: log.error('Failed to add COS def for branch6: error ' + e.args[0]['desc']) assert False time.sleep(2) # # Now check that each user has its expected passwordPolicy subentry # try: entries = topology_st.standalone.search_s(USER1_DN, ldap.SCOPE_BASE, '(objectclass=top)', ['pwdpolicysubentry']) if not entries[0].hasValue('pwdpolicysubentry', BRANCH1_PWP): log.fatal('User %s does not have expected pwdpolicysubentry!') assert False except ldap.LDAPError as e: log.fatal('Unable to search for entry %s: error %s' % (USER1_DN, e.args[0]['desc'])) assert False try: entries = topology_st.standalone.search_s(USER2_DN, ldap.SCOPE_BASE, '(objectclass=top)', ['pwdpolicysubentry']) if not entries[0].hasValue('pwdpolicysubentry', BRANCH2_PWP): log.fatal('User %s does not have expected pwdpolicysubentry!' % USER2_DN) assert False except ldap.LDAPError as e: log.fatal('Unable to search for entry %s: error %s' % (USER2_DN, e.args[0]['desc'])) assert False try: entries = topology_st.standalone.search_s(USER3_DN, ldap.SCOPE_BASE, '(objectclass=top)', ['pwdpolicysubentry']) if not entries[0].hasValue('pwdpolicysubentry', BRANCH3_PWP): log.fatal('User %s does not have expected pwdpolicysubentry!' % USER3_DN) assert False except ldap.LDAPError as e: log.fatal('Unable to search for entry %s: error %s' % (USER3_DN, e.args[0]['desc'])) assert False try: entries = topology_st.standalone.search_s(USER4_DN, ldap.SCOPE_BASE, '(objectclass=top)', ['pwdpolicysubentry']) if not entries[0].hasValue('pwdpolicysubentry', BRANCH4_PWP): log.fatal('User %s does not have expected pwdpolicysubentry!' % USER4_DN) assert False except ldap.LDAPError as e: log.fatal('Unable to search for entry %s: error %s' % (USER4_DN, e.args[0]['desc'])) assert False try: entries = topology_st.standalone.search_s(USER5_DN, ldap.SCOPE_BASE, '(objectclass=top)', ['pwdpolicysubentry']) if not entries[0].hasValue('pwdpolicysubentry', BRANCH5_PWP): log.fatal('User %s does not have expected pwdpolicysubentry!' % USER5_DN) assert False except ldap.LDAPError as e: log.fatal('Unable to search for entry %s: error %s' % (USER5_DN, e.args[0]['desc'])) assert False try: entries = topology_st.standalone.search_s(USER6_DN, ldap.SCOPE_BASE, '(objectclass=top)', ['pwdpolicysubentry']) if not entries[0].hasValue('pwdpolicysubentry', BRANCH6_PWP): log.fatal('User %s does not have expected pwdpolicysubentry!' % USER6_DN) assert False except ldap.LDAPError as e: log.fatal('Unable to search for entry %s: error %s' % (USER6_DN, e.args[0]['desc'])) assert False if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/tickets/ticket47981_test.py000066400000000000000000000176041421664411400260230ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2016 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import logging import ldap.sasl import pytest from lib389.tasks import * from lib389.topologies import topology_st from lib389._constants import DEFAULT_SUFFIX, BACKEND_NAME, DN_CONFIG pytestmark = pytest.mark.tier2 log = logging.getLogger(__name__) BRANCH = 'ou=people,' + DEFAULT_SUFFIX USER_DN = 'uid=user1,%s' % (BRANCH) BRANCH_CONTAINER = 'cn=nsPwPolicyContainer,ou=people,dc=example,dc=com' BRANCH_COS_DEF = 'cn=nsPwPolicy_CoS,ou=people,dc=example,dc=com' BRANCH_PWP = 'cn=cn\\3DnsPwPolicyEntry\\2Cou\\3DPeople\\2Cdc\\3Dexample\\2Cdc\\3Dcom,' + \ 'cn=nsPwPolicyContainer,ou=People,dc=example,dc=com' BRANCH_COS_TMPL = 'cn=cn\\3DnsPwTemplateEntry\\2Cou\\3DPeople\\2Cdc\\3Dexample\\2Cdc\\3Dcom,' + \ 'cn=nsPwPolicyContainer,ou=People,dc=example,dc=com' SECOND_SUFFIX = 'o=netscaperoot' BE_NAME = 'netscaperoot' def addSubtreePwPolicy(inst): # # Add subtree policy to the people branch # try: inst.add_s(Entry((BRANCH_CONTAINER, { 'objectclass': 'top nsContainer'.split(), 'cn': 'nsPwPolicyContainer' }))) except ldap.LDAPError as e: log.error('Failed to add subtree container for ou=people: error ' + e.args[0]['desc']) assert False # Add the password policy subentry try: inst.add_s(Entry((BRANCH_PWP, { 'objectclass': 'top ldapsubentry passwordpolicy'.split(), 'cn': 'cn=nsPwPolicyEntry,ou=people,dc=example,dc=com', 'passwordMustChange': 'off', 'passwordExp': 'off', 'passwordHistory': 'off', 'passwordMinAge': '0', 'passwordChange': 'off', 'passwordStorageScheme': 'ssha' }))) except ldap.LDAPError as e: log.error('Failed to add passwordpolicy: error ' + e.args[0]['desc']) assert False # Add the COS template try: inst.add_s(Entry((BRANCH_COS_TMPL, { 'objectclass': 'top ldapsubentry costemplate extensibleObject'.split(), 'cn': 'cn=nsPwPolicyEntry,ou=people,dc=example,dc=com', 'cosPriority': '1', 'cn': 'cn=nsPwTemplateEntry,ou=people,dc=example,dc=com', 'pwdpolicysubentry': BRANCH_PWP }))) except ldap.LDAPError as e: log.error('Failed to add COS template: error ' + e.args[0]['desc']) assert False # Add the COS definition try: inst.add_s(Entry((BRANCH_COS_DEF, { 'objectclass': 'top ldapsubentry cosSuperDefinition cosPointerDefinition'.split(), 'cn': 'cn=nsPwPolicyEntry,ou=people,dc=example,dc=com', 'costemplatedn': BRANCH_COS_TMPL, 'cosAttribute': 'pwdpolicysubentry default operational-default' }))) except ldap.LDAPError as e: log.error('Failed to add COS def: error ' + e.args[0]['desc']) assert False time.sleep(1) def delSubtreePwPolicy(inst): try: inst.delete_s(BRANCH_COS_DEF) except ldap.LDAPError as e: log.error('Failed to delete COS def: error ' + e.args[0]['desc']) assert False try: inst.delete_s(BRANCH_COS_TMPL) except ldap.LDAPError as e: log.error('Failed to delete COS template: error ' + e.args[0]['desc']) assert False try: inst.delete_s(BRANCH_PWP) except ldap.LDAPError as e: log.error('Failed to delete COS password policy: error ' + e.args[0]['desc']) assert False try: inst.delete_s(BRANCH_CONTAINER) except ldap.LDAPError as e: log.error('Failed to delete COS container: error ' + e.args[0]['desc']) assert False time.sleep(1) def test_ticket47981(topology_st): """ If there are multiple suffixes, and the last suffix checked does not contain any COS entries, while other suffixes do, then the vattr cache is not invalidated as it should be. Then any cached entries will still contain the old COS attributes/values. """ log.info('Testing Ticket 47981 - Test that COS def changes are correctly reflected in affected users') # # Create a second backend that does not have any COS entries # log.info('Adding second suffix that will not contain any COS entries...\n') topology_st.standalone.backend.create(SECOND_SUFFIX, {BACKEND_NAME: BE_NAME}) topology_st.standalone.mappingtree.create(SECOND_SUFFIX, bename=BE_NAME) try: topology_st.standalone.add_s(Entry((SECOND_SUFFIX, { 'objectclass': 'top organization'.split(), 'o': BE_NAME}))) except ldap.ALREADY_EXISTS: pass except ldap.LDAPError as e: log.error('Failed to create suffix entry: error ' + e.args[0]['desc']) assert False # # Add People branch, it might already exist # log.info('Add our test entries to the default suffix, and proceed with the test...') try: topology_st.standalone.add_s(Entry((BRANCH, { 'objectclass': 'top extensibleObject'.split(), 'ou': 'level4' }))) except ldap.ALREADY_EXISTS: pass except ldap.LDAPError as e: log.error('Failed to add ou=people: error ' + e.args[0]['desc']) assert False # # Add a user to the branch # try: topology_st.standalone.add_s(Entry((USER_DN, { 'objectclass': 'top extensibleObject'.split(), 'uid': 'user1' }))) except ldap.LDAPError as e: log.error('Failed to add user1: error ' + e.args[0]['desc']) assert False # # Enable password policy and add the subtree policy # try: topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-pwpolicy-local', b'on')]) except ldap.LDAPError as e: log.error('Failed to set pwpolicy-local: error ' + e.args[0]['desc']) assert False addSubtreePwPolicy(topology_st.standalone) # # Now check the user has its expected passwordPolicy subentry # try: entries = topology_st.standalone.search_s(USER_DN, ldap.SCOPE_BASE, '(objectclass=top)', ['pwdpolicysubentry', 'dn']) if not entries[0].hasAttr('pwdpolicysubentry'): log.fatal('User does not have expected pwdpolicysubentry!') assert False except ldap.LDAPError as e: log.fatal('Unable to search for entry %s: error %s' % (USER_DN, e.args[0]['desc'])) assert False # # Delete the password policy and make sure it is removed from the same user # delSubtreePwPolicy(topology_st.standalone) try: entries = topology_st.standalone.search_s(USER_DN, ldap.SCOPE_BASE, '(objectclass=top)', ['pwdpolicysubentry']) if entries[0].hasAttr('pwdpolicysubentry'): log.fatal('User unexpectedly does have the pwdpolicysubentry!') assert False except ldap.LDAPError as e: log.fatal('Unable to search for entry %s: error %s' % (USER_DN, e.args[0]['desc'])) assert False # # Add the subtree policvy back and see if the user now has it # addSubtreePwPolicy(topology_st.standalone) try: entries = topology_st.standalone.search_s(USER_DN, ldap.SCOPE_BASE, '(objectclass=top)', ['pwdpolicysubentry']) if not entries[0].hasAttr('pwdpolicysubentry'): log.fatal('User does not have expected pwdpolicysubentry!') assert False except ldap.LDAPError as e: log.fatal('Unable to search for entry %s: error %s' % (USER_DN, e.args[0]['desc'])) assert False if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/tickets/ticket47988_test.py000066400000000000000000000352131421664411400260260ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2016 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # ''' Created on Nov 7, 2013 @author: tbordaz ''' import logging import shutil import stat import tarfile import time from random import randint import ldap import pytest from lib389 import Entry from lib389._constants import * from lib389.topologies import topology_m2 from lib389.utils import * pytestmark = pytest.mark.tier2 logging.getLogger(__name__).setLevel(logging.DEBUG) log = logging.getLogger(__name__) TEST_REPL_DN = "cn=test_repl, %s" % SUFFIX OC_NAME = 'OCticket47988' MUST = "(postalAddress $ postalCode)" MAY = "(member $ street)" OTHER_NAME = 'other_entry' MAX_OTHERS = 10 BIND_NAME = 'bind_entry' BIND_DN = 'cn=%s, %s' % (BIND_NAME, SUFFIX) BIND_PW = 'password' ENTRY_NAME = 'test_entry' ENTRY_DN = 'cn=%s, %s' % (ENTRY_NAME, SUFFIX) ENTRY_OC = "top person %s" % OC_NAME def _oc_definition(oid_ext, name, must=None, may=None): oid = "1.2.3.4.5.6.7.8.9.10.%d" % oid_ext desc = 'To test ticket 47490' sup = 'person' if not must: must = MUST if not may: may = MAY new_oc = "( %s NAME '%s' DESC '%s' SUP %s AUXILIARY MUST %s MAY %s )" % (oid, name, desc, sup, must, may) return new_oc def _header(topology_m2, label): topology_m2.ms["supplier1"].log.info("\n\n###############################################") topology_m2.ms["supplier1"].log.info("#######") topology_m2.ms["supplier1"].log.info("####### %s" % label) topology_m2.ms["supplier1"].log.info("#######") topology_m2.ms["supplier1"].log.info("###################################################") def _install_schema(server, tarFile): server.stop(timeout=10) tmpSchema = '/tmp/schema_47988' if not os.path.isdir(tmpSchema): os.mkdir(tmpSchema) for the_file in os.listdir(tmpSchema): file_path = os.path.join(tmpSchema, the_file) if os.path.isfile(file_path): os.unlink(file_path) os.chdir(tmpSchema) tar = tarfile.open(tarFile, 'r:gz') for member in tar.getmembers(): tar.extract(member.name) tar.close() st = os.stat(server.schemadir) os.chmod(server.schemadir, st.st_mode | stat.S_IWUSR | stat.S_IXUSR | stat.S_IRUSR) for the_file in os.listdir(tmpSchema): schemaFile = os.path.join(server.schemadir, the_file) if os.path.isfile(schemaFile): if the_file.startswith('99user.ldif'): # only replace 99user.ldif, the other standard definition are kept os.chmod(schemaFile, stat.S_IWUSR | stat.S_IRUSR) server.log.info("replace %s" % schemaFile) shutil.copy(the_file, schemaFile) else: server.log.info("add %s" % schemaFile) shutil.copy(the_file, schemaFile) os.chmod(schemaFile, stat.S_IRUSR | stat.S_IRGRP) os.chmod(server.schemadir, st.st_mode | stat.S_IRUSR | stat.S_IRGRP) def test_ticket47988_init(topology_m2): """ It adds - Objectclass with MAY 'member' - an entry ('bind_entry') with which we bind to test the 'SELFDN' operation It deletes the anonymous aci """ _header(topology_m2, 'test_ticket47988_init') # enable acl error logging mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', ensure_bytes(str(8192)))] # REPL topology_m2.ms["supplier1"].modify_s(DN_CONFIG, mod) topology_m2.ms["supplier2"].modify_s(DN_CONFIG, mod) mod = [(ldap.MOD_REPLACE, 'nsslapd-accesslog-level', ensure_bytes(str(260)))] # Internal op topology_m2.ms["supplier1"].modify_s(DN_CONFIG, mod) topology_m2.ms["supplier2"].modify_s(DN_CONFIG, mod) # add dummy entries for cpt in range(MAX_OTHERS): name = "%s%d" % (OTHER_NAME, cpt) topology_m2.ms["supplier1"].add_s(Entry(("cn=%s,%s" % (name, SUFFIX), { 'objectclass': "top person".split(), 'sn': name, 'cn': name}))) # check that entry 0 is replicated before loop = 0 entryDN = "cn=%s0,%s" % (OTHER_NAME, SUFFIX) while loop <= 10: try: ent = topology_m2.ms["supplier2"].getEntry(entryDN, ldap.SCOPE_BASE, "(objectclass=*)", ['telephonenumber']) break except ldap.NO_SUCH_OBJECT: time.sleep(1) loop += 1 assert (loop <= 10) topology_m2.ms["supplier1"].stop(timeout=10) topology_m2.ms["supplier2"].stop(timeout=10) # install the specific schema M1: ipa3.3, M2: ipa4.1 schema_file = os.path.join(topology_m2.ms["supplier1"].getDir(__file__, DATA_DIR), "ticket47988/schema_ipa3.3.tar.gz") _install_schema(topology_m2.ms["supplier1"], schema_file) schema_file = os.path.join(topology_m2.ms["supplier1"].getDir(__file__, DATA_DIR), "ticket47988/schema_ipa4.1.tar.gz") _install_schema(topology_m2.ms["supplier2"], schema_file) topology_m2.ms["supplier1"].start(timeout=10) topology_m2.ms["supplier2"].start(timeout=10) def _do_update_schema(server, range=3999): ''' Update the schema of the M2 (IPA4.1). to generate a nsSchemaCSN ''' postfix = str(randint(range, range + 1000)) OID = '2.16.840.1.113730.3.8.12.%s' % postfix NAME = 'thierry%s' % postfix value = '( %s NAME \'%s\' DESC \'Override for Group Attributes\' STRUCTURAL MUST ( cn ) MAY sn X-ORIGIN ( \'IPA v4.1.2\' \'user defined\' ) )' % ( OID, NAME) mod = [(ldap.MOD_ADD, 'objectclasses', ensure_bytes(value))] server.modify_s('cn=schema', mod) def _do_update_entry(supplier=None, consumer=None, attempts=10): ''' This is doing an update on M2 (IPA4.1) and checks the update has been propagated to M1 (IPA3.3) ''' assert (supplier) assert (consumer) entryDN = "cn=%s0,%s" % (OTHER_NAME, SUFFIX) value = str(randint(100, 200)) mod = [(ldap.MOD_REPLACE, 'telephonenumber', ensure_bytes(value))] supplier.modify_s(entryDN, mod) loop = 0 while loop <= attempts: ent = consumer.getEntry(entryDN, ldap.SCOPE_BASE, "(objectclass=*)", ['telephonenumber']) read_val = ensure_str(ent.telephonenumber) or "0" if read_val == value: break # the expected value is not yet replicated. try again time.sleep(5) loop += 1 supplier.log.debug("test_do_update: receive %s (expected %s)" % (read_val, value)) assert (loop <= attempts) def _pause_M2_to_M1(topology_m2): topology_m2.ms["supplier1"].log.info("\n\n######################### Pause RA M2->M1 ######################\n") ents = topology_m2.ms["supplier2"].agreement.list(suffix=SUFFIX) assert len(ents) == 1 topology_m2.ms["supplier2"].agreement.pause(ents[0].dn) def _resume_M1_to_M2(topology_m2): topology_m2.ms["supplier1"].log.info("\n\n######################### resume RA M1->M2 ######################\n") ents = topology_m2.ms["supplier1"].agreement.list(suffix=SUFFIX) assert len(ents) == 1 topology_m2.ms["supplier1"].agreement.resume(ents[0].dn) def _pause_M1_to_M2(topology_m2): topology_m2.ms["supplier1"].log.info("\n\n######################### Pause RA M1->M2 ######################\n") ents = topology_m2.ms["supplier1"].agreement.list(suffix=SUFFIX) assert len(ents) == 1 topology_m2.ms["supplier1"].agreement.pause(ents[0].dn) def _resume_M2_to_M1(topology_m2): topology_m2.ms["supplier1"].log.info("\n\n######################### resume RA M2->M1 ######################\n") ents = topology_m2.ms["supplier2"].agreement.list(suffix=SUFFIX) assert len(ents) == 1 topology_m2.ms["supplier2"].agreement.resume(ents[0].dn) def test_ticket47988_1(topology_m2): ''' Check that replication is working and pause replication M2->M1 ''' _header(topology_m2, 'test_ticket47988_1') topology_m2.ms["supplier1"].log.debug("\n\nCheck that replication is working and pause replication M2->M1\n") _do_update_entry(supplier=topology_m2.ms["supplier2"], consumer=topology_m2.ms["supplier1"], attempts=5) _pause_M2_to_M1(topology_m2) def test_ticket47988_2(topology_m2): ''' Update M1 schema and trigger update M1->M2 So M1 should learn new/extended definitions that are in M2 schema ''' _header(topology_m2, 'test_ticket47988_2') topology_m2.ms["supplier1"].log.debug("\n\nUpdate M1 schema and an entry on M1\n") supplier1_schema_csn = topology_m2.ms["supplier1"].schema.get_schema_csn() supplier2_schema_csn = topology_m2.ms["supplier2"].schema.get_schema_csn() topology_m2.ms["supplier1"].log.debug("\nBefore updating the schema on M1\n") topology_m2.ms["supplier1"].log.debug("Supplier1 nsschemaCSN: %s" % supplier1_schema_csn) topology_m2.ms["supplier1"].log.debug("Supplier2 nsschemaCSN: %s" % supplier2_schema_csn) # Here M1 should no, should check M2 schema and learn _do_update_schema(topology_m2.ms["supplier1"]) supplier1_schema_csn = topology_m2.ms["supplier1"].schema.get_schema_csn() supplier2_schema_csn = topology_m2.ms["supplier2"].schema.get_schema_csn() topology_m2.ms["supplier1"].log.debug("\nAfter updating the schema on M1\n") topology_m2.ms["supplier1"].log.debug("Supplier1 nsschemaCSN: %s" % supplier1_schema_csn) topology_m2.ms["supplier1"].log.debug("Supplier2 nsschemaCSN: %s" % supplier2_schema_csn) assert (supplier1_schema_csn) # to avoid linger effect where a replication session is reused without checking the schema _pause_M1_to_M2(topology_m2) _resume_M1_to_M2(topology_m2) # topo.supplier1.log.debug("\n\nSleep.... attach the debugger dse_modify") # time.sleep(60) _do_update_entry(supplier=topology_m2.ms["supplier1"], consumer=topology_m2.ms["supplier2"], attempts=15) supplier1_schema_csn = topology_m2.ms["supplier1"].schema.get_schema_csn() supplier2_schema_csn = topology_m2.ms["supplier2"].schema.get_schema_csn() topology_m2.ms["supplier1"].log.debug("\nAfter a full replication session\n") topology_m2.ms["supplier1"].log.debug("Supplier1 nsschemaCSN: %s" % supplier1_schema_csn) topology_m2.ms["supplier1"].log.debug("Supplier2 nsschemaCSN: %s" % supplier2_schema_csn) assert (supplier1_schema_csn) assert (supplier2_schema_csn) def test_ticket47988_3(topology_m2): ''' Resume replication M2->M1 and check replication is still working ''' _header(topology_m2, 'test_ticket47988_3') _resume_M2_to_M1(topology_m2) _do_update_entry(supplier=topology_m2.ms["supplier1"], consumer=topology_m2.ms["supplier2"], attempts=5) _do_update_entry(supplier=topology_m2.ms["supplier2"], consumer=topology_m2.ms["supplier1"], attempts=5) def test_ticket47988_4(topology_m2): ''' Check schemaCSN is identical on both server And save the nsschemaCSN to later check they do not change unexpectedly ''' _header(topology_m2, 'test_ticket47988_4') supplier1_schema_csn = topology_m2.ms["supplier1"].schema.get_schema_csn() supplier2_schema_csn = topology_m2.ms["supplier2"].schema.get_schema_csn() topology_m2.ms["supplier1"].log.debug("\n\nSupplier1 nsschemaCSN: %s" % supplier1_schema_csn) topology_m2.ms["supplier1"].log.debug("\n\nSupplier2 nsschemaCSN: %s" % supplier2_schema_csn) assert (supplier1_schema_csn) assert (supplier2_schema_csn) assert (supplier1_schema_csn == supplier2_schema_csn) topology_m2.ms["supplier1"].saved_schema_csn = supplier1_schema_csn topology_m2.ms["supplier2"].saved_schema_csn = supplier2_schema_csn def test_ticket47988_5(topology_m2): ''' Check schemaCSN do not change unexpectedly ''' _header(topology_m2, 'test_ticket47988_5') _do_update_entry(supplier=topology_m2.ms["supplier1"], consumer=topology_m2.ms["supplier2"], attempts=5) _do_update_entry(supplier=topology_m2.ms["supplier2"], consumer=topology_m2.ms["supplier1"], attempts=5) supplier1_schema_csn = topology_m2.ms["supplier1"].schema.get_schema_csn() supplier2_schema_csn = topology_m2.ms["supplier2"].schema.get_schema_csn() topology_m2.ms["supplier1"].log.debug("\n\nSupplier1 nsschemaCSN: %s" % supplier1_schema_csn) topology_m2.ms["supplier1"].log.debug("\n\nSupplier2 nsschemaCSN: %s" % supplier2_schema_csn) assert (supplier1_schema_csn) assert (supplier2_schema_csn) assert (supplier1_schema_csn == supplier2_schema_csn) assert (topology_m2.ms["supplier1"].saved_schema_csn == supplier1_schema_csn) assert (topology_m2.ms["supplier2"].saved_schema_csn == supplier2_schema_csn) def test_ticket47988_6(topology_m2): ''' Update M1 schema and trigger update M2->M1 So M2 should learn new/extended definitions that are in M1 schema ''' _header(topology_m2, 'test_ticket47988_6') topology_m2.ms["supplier1"].log.debug("\n\nUpdate M1 schema and an entry on M1\n") supplier1_schema_csn = topology_m2.ms["supplier1"].schema.get_schema_csn() supplier2_schema_csn = topology_m2.ms["supplier2"].schema.get_schema_csn() topology_m2.ms["supplier1"].log.debug("\nBefore updating the schema on M1\n") topology_m2.ms["supplier1"].log.debug("Supplier1 nsschemaCSN: %s" % supplier1_schema_csn) topology_m2.ms["supplier1"].log.debug("Supplier2 nsschemaCSN: %s" % supplier2_schema_csn) # Here M1 should no, should check M2 schema and learn _do_update_schema(topology_m2.ms["supplier1"], range=5999) supplier1_schema_csn = topology_m2.ms["supplier1"].schema.get_schema_csn() supplier2_schema_csn = topology_m2.ms["supplier2"].schema.get_schema_csn() topology_m2.ms["supplier1"].log.debug("\nAfter updating the schema on M1\n") topology_m2.ms["supplier1"].log.debug("Supplier1 nsschemaCSN: %s" % supplier1_schema_csn) topology_m2.ms["supplier1"].log.debug("Supplier2 nsschemaCSN: %s" % supplier2_schema_csn) assert (supplier1_schema_csn) # to avoid linger effect where a replication session is reused without checking the schema _pause_M1_to_M2(topology_m2) _resume_M1_to_M2(topology_m2) # topo.supplier1.log.debug("\n\nSleep.... attach the debugger dse_modify") # time.sleep(60) _do_update_entry(supplier=topology_m2.ms["supplier2"], consumer=topology_m2.ms["supplier1"], attempts=15) supplier1_schema_csn = topology_m2.ms["supplier1"].schema.get_schema_csn() supplier2_schema_csn = topology_m2.ms["supplier2"].schema.get_schema_csn() topology_m2.ms["supplier1"].log.debug("\nAfter a full replication session\n") topology_m2.ms["supplier1"].log.debug("Supplier1 nsschemaCSN: %s" % supplier1_schema_csn) topology_m2.ms["supplier1"].log.debug("Supplier2 nsschemaCSN: %s" % supplier2_schema_csn) assert (supplier1_schema_csn) assert (supplier2_schema_csn) if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/tickets/ticket48005_test.py000066400000000000000000000306301421664411400260010ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2016 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import logging import re import pytest from lib389.tasks import * from lib389.topologies import topology_st from lib389._constants import (DEFAULT_SUFFIX, SUFFIX, PLUGIN_REFER_INTEGRITY, PLUGIN_AUTOMEMBER, PLUGIN_MEMBER_OF, PLUGIN_USN) pytestmark = pytest.mark.tier2 logging.getLogger(__name__).setLevel(logging.DEBUG) log = logging.getLogger(__name__) def test_ticket48005_setup(topology_st): ''' allow dump core generate a test ldif file using dbgen.pl import the ldif ''' log.info("Ticket 48005 setup...") if hasattr(topology_st.standalone, 'prefix'): prefix = topology_st.standalone.prefix else: prefix = None sysconfig_dirsrv = os.path.join(topology_st.standalone.get_initconfig_dir(), 'dirsrv') cmdline = 'egrep "ulimit -c unlimited" %s' % sysconfig_dirsrv p = os.popen(cmdline, "r") ulimitc = p.readline() if ulimitc == "": log.info('No ulimit -c in %s' % sysconfig_dirsrv) log.info('Adding it') cmdline = 'echo "ulimit -c unlimited" >> %s' % sysconfig_dirsrv sysconfig_dirsrv_systemd = sysconfig_dirsrv + ".systemd" cmdline = 'egrep LimitCORE=infinity %s' % sysconfig_dirsrv_systemd p = os.popen(cmdline, "r") lcore = p.readline() if lcore == "": log.info('No LimitCORE in %s' % sysconfig_dirsrv_systemd) log.info('Adding it') cmdline = 'echo LimitCORE=infinity >> %s' % sysconfig_dirsrv_systemd topology_st.standalone.restart(timeout=10) ldif_file = topology_st.standalone.get_ldif_dir() + "/ticket48005.ldif" os.system('ls %s' % ldif_file) os.system('rm -f %s' % ldif_file) if hasattr(topology_st.standalone, 'prefix'): prefix = topology_st.standalone.prefix else: prefix = "" dbgen_prog = prefix + '/bin/dbgen.pl' log.info('dbgen_prog: %s' % dbgen_prog) os.system('%s -s %s -o %s -u -n 10000' % (dbgen_prog, SUFFIX, ldif_file)) cmdline = 'egrep dn: %s | wc -l' % ldif_file p = os.popen(cmdline, "r") dnnumstr = p.readline() num = int(dnnumstr) log.info("We have %d entries.\n", num) importTask = Tasks(topology_st.standalone) args = {TASK_WAIT: True} importTask.importLDIF(SUFFIX, None, ldif_file, args) log.info('Importing %s complete.' % ldif_file) def test_ticket48005_memberof(topology_st): ''' Enable memberof and referint plugin Run fixmemberof task without waiting Shutdown the server Check if a core file was generated or not If no core was found, this test case was successful. ''' log.info("Ticket 48005 memberof test...") topology_st.standalone.plugins.enable(name=PLUGIN_MEMBER_OF) topology_st.standalone.plugins.enable(name=PLUGIN_REFER_INTEGRITY) topology_st.standalone.restart(timeout=10) try: # run the fixup task topology_st.standalone.tasks.fixupMemberOf(suffix=SUFFIX, args={TASK_WAIT: False}) except ValueError: log.error('Some problem occured with a value that was provided') assert False topology_st.standalone.stop(timeout=10) mytmp = '/tmp' logdir = re.sub('errors', '', topology_st.standalone.errlog) cmdline = 'ls ' + logdir + 'core*' p = os.popen(cmdline, "r") lcore = p.readline() if lcore != "": s.system('mv %score* %s/core.ticket48005_memberof' % (logdir, mytmp)) log.error('FixMemberof: Moved core file(s) to %s; Test failed' % mytmp) assert False log.info('No core files are found') topology_st.standalone.start(timeout=10) topology_st.standalone.plugins.disable(name=PLUGIN_REFER_INTEGRITY) topology_st.standalone.plugins.disable(name=PLUGIN_MEMBER_OF) topology_st.standalone.restart(timeout=10) log.info("Ticket 48005 memberof test complete") def test_ticket48005_automember(topology_st): ''' Enable automember and referint plugin 1. Run automember rebuild membership task without waiting Shutdown the server Check if a core file was generated or not If no core was found, this test case was successful. 2. Run automember export updates task without waiting Shutdown the server Check if a core file was generated or not If no core was found, this test case was successful. 3. Run automember map updates task without waiting Shutdown the server Check if a core file was generated or not If no core was found, this test case was successful. ''' log.info("Ticket 48005 automember test...") topology_st.standalone.plugins.enable(name=PLUGIN_AUTOMEMBER) topology_st.standalone.plugins.enable(name=PLUGIN_REFER_INTEGRITY) # configure automember config entry log.info('Adding automember config') try: topology_st.standalone.add_s(Entry(('cn=group cfg,cn=Auto Membership Plugin,cn=plugins,cn=config', { 'objectclass': 'top autoMemberDefinition'.split(), 'autoMemberScope': 'dc=example,dc=com', 'autoMemberFilter': 'objectclass=inetorgperson', 'autoMemberDefaultGroup': 'cn=group0,dc=example,dc=com', 'autoMemberGroupingAttr': 'uniquemember:dn', 'cn': 'group cfg'}))) except ValueError: log.error('Failed to add automember config') assert False topology_st.standalone.restart(timeout=10) try: # run the automember rebuild task topology_st.standalone.tasks.automemberRebuild(suffix=SUFFIX, args={TASK_WAIT: False}) except ValueError: log.error('Automember rebuild task failed.') assert False topology_st.standalone.stop(timeout=10) mytmp = '/tmp' logdir = re.sub('errors', '', topology_st.standalone.errlog) cmdline = 'ls ' + logdir + 'core*' p = os.popen(cmdline, "r") lcore = p.readline() if lcore != "": s.system('mv %score* %s/core.ticket48005_automember_rebuild' % (logdir, mytmp)) log.error('Automember_rebuld: Moved core file(s) to %s; Test failed' % mytmp) assert False log.info('No core files are found') topology_st.standalone.start(timeout=10) ldif_out_file = mytmp + "/ticket48005_automember_exported.ldif" try: # run the automember export task topology_st.standalone.tasks.automemberExport(suffix=SUFFIX, ldif_out=ldif_out_file, args={TASK_WAIT: False}) except ValueError: log.error('Automember Export task failed.') assert False topology_st.standalone.stop(timeout=10) logdir = re.sub('errors', '', topology_st.standalone.errlog) cmdline = 'ls ' + logdir + 'core*' p = os.popen(cmdline, "r") lcore = p.readline() if lcore != "": s.system('mv %score* %s/core.ticket48005_automember_export' % (logdir, mytmp)) log.error('Automember_export: Moved core file(s) to %s; Test failed' % mytmp) assert False log.info('No core files are found') topology_st.standalone.start(timeout=10) ldif_in_file = topology_st.standalone.get_ldif_dir() + "/ticket48005.ldif" ldif_out_file = mytmp + "/ticket48005_automember_map.ldif" try: # run the automember map task topology_st.standalone.tasks.automemberMap(ldif_in=ldif_in_file, ldif_out=ldif_out_file, args={TASK_WAIT: False}) except ValueError: log.error('Automember Map task failed.') assert False topology_st.standalone.stop(timeout=10) logdir = re.sub('errors', '', topology_st.standalone.errlog) cmdline = 'ls ' + logdir + 'core*' p = os.popen(cmdline, "r") lcore = p.readline() if lcore != "": s.system('mv %score* %s/core.ticket48005_automember_map' % (logdir, mytmp)) log.error('Automember_map: Moved core file(s) to %s; Test failed' % mytmp) assert False log.info('No core files are found') topology_st.standalone.start(timeout=10) topology_st.standalone.plugins.disable(name=PLUGIN_REFER_INTEGRITY) topology_st.standalone.plugins.enable(name=PLUGIN_AUTOMEMBER) topology_st.standalone.restart(timeout=10) log.info("Ticket 48005 automember test complete") def test_ticket48005_syntaxvalidate(topology_st): ''' Run syntax validate task without waiting Shutdown the server Check if a core file was generated or not If no core was found, this test case was successful. ''' log.info("Ticket 48005 syntax validate test...") try: # run the fixup task topology_st.standalone.tasks.syntaxValidate(suffix=SUFFIX, args={TASK_WAIT: False}) except ValueError: log.error('Some problem occured with a value that was provided') assert False topology_st.standalone.stop(timeout=10) mytmp = '/tmp' logdir = re.sub('errors', '', topology_st.standalone.errlog) cmdline = 'ls ' + logdir + 'core*' p = os.popen(cmdline, "r") lcore = p.readline() if lcore != "": s.system('mv %score* %s/core.ticket48005_syntaxvalidate' % (logdir, mytmp)) log.error('SyntaxValidate: Moved core file(s) to %s; Test failed' % mytmp) assert False log.info('No core files are found') topology_st.standalone.start(timeout=10) log.info("Ticket 48005 syntax validate test complete") def test_ticket48005_usn(topology_st): ''' Enable entryusn Delete all user entries. Run USN tombstone cleanup task Shutdown the server Check if a core file was generated or not If no core was found, this test case was successful. ''' log.info("Ticket 48005 usn test...") topology_st.standalone.plugins.enable(name=PLUGIN_USN) topology_st.standalone.restart(timeout=10) try: entries = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, "(objectclass=inetorgperson)") if len(entries) == 0: log.info("No user entries.") else: for i in range(len(entries)): # log.info('Deleting %s' % entries[i].dn) try: topology_st.standalone.delete_s(entries[i].dn) except ValueError: log.error('delete_s %s failed.' % entries[i].dn) assert False except ValueError: log.error('search_s failed.') assert False try: # run the usn tombstone cleanup topology_st.standalone.tasks.usnTombstoneCleanup(suffix=SUFFIX, bename="userRoot", args={TASK_WAIT: False}) except ValueError: log.error('Some problem occured with a value that was provided') assert False topology_st.standalone.stop(timeout=10) mytmp = '/tmp' logdir = re.sub('errors', '', topology_st.standalone.errlog) cmdline = 'ls ' + logdir + 'core*' p = os.popen(cmdline, "r") lcore = p.readline() if lcore != "": s.system('mv %score* %s/core.ticket48005_usn' % (logdir, mytmp)) log.error('usnTombstoneCleanup: Moved core file(s) to %s; Test failed' % mytmp) assert False log.info('No core files are found') topology_st.standalone.start(timeout=10) topology_st.standalone.plugins.disable(name=PLUGIN_USN) topology_st.standalone.restart(timeout=10) log.info("Ticket 48005 usn test complete") def test_ticket48005_schemareload(topology_st): ''' Run schema reload task without waiting Shutdown the server Check if a core file was generated or not If no core was found, this test case was successful. ''' log.info("Ticket 48005 schema reload test...") try: # run the schema reload task topology_st.standalone.tasks.schemaReload(args={TASK_WAIT: False}) except ValueError: log.error('Schema Reload task failed.') assert False topology_st.standalone.stop(timeout=10) logdir = re.sub('errors', '', topology_st.standalone.errlog) cmdline = 'ls ' + logdir + 'core*' p = os.popen(cmdline, "r") lcore = p.readline() if lcore != "": mytmp = '/tmp' s.system('mv %score* %s/core.ticket48005_schema_reload' % (logdir, mytmp)) log.error('Schema reload: Moved core file(s) to %s; Test failed' % mytmp) assert False log.info('No core files are found') topology_st.standalone.start(timeout=10) log.info("Ticket 48005 schema reload test complete") if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/tickets/ticket48013_test.py000066400000000000000000000056701421664411400260060ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2016 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import ldapurl import pytest from ldap.ldapobject import SimpleLDAPObject from ldap.syncrepl import SyncreplConsumer from lib389.utils import * from lib389.topologies import topology_st from lib389._constants import (PLUGIN_RETRO_CHANGELOG, DEFAULT_SUFFIX, DN_CONFIG, DN_DM, PASSWORD, PLUGIN_REPL_SYNC, HOST_STANDALONE, PORT_STANDALONE) # Skip on older versions pytestmark = [pytest.mark.tier2, pytest.mark.skipif(ds_is_older('1.3.4'), reason="Not implemented")] logging.getLogger(__name__).setLevel(logging.DEBUG) log = logging.getLogger(__name__) class SyncObject(SimpleLDAPObject, SyncreplConsumer): def __init__(self, uri): # Init the ldap connection SimpleLDAPObject.__init__(self, uri) def sync_search(self, test_cookie): self.syncrepl_search('dc=example,dc=com', ldap.SCOPE_SUBTREE, filterstr='(objectclass=*)', mode='refreshOnly', cookie=test_cookie) def poll(self): self.syncrepl_poll(all=1) def test_ticket48013(topology_st): ''' Content Synchonization: Test that invalid cookies are caught ''' cookies = ('#', '##', 'a#a#a', 'a#a#1') # Enable dynamic plugins try: topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-dynamic-plugins', b'on')]) except ldap.LDAPError as e: log.error('Failed to enable dynamic plugin! {}'.format(e.args[0]['desc'])) assert False # Enable retro changelog topology_st.standalone.plugins.enable(name=PLUGIN_RETRO_CHANGELOG) # Enbale content sync plugin topology_st.standalone.plugins.enable(name=PLUGIN_REPL_SYNC) # Set everything up ldap_url = ldapurl.LDAPUrl('ldap://%s:%s' % (HOST_STANDALONE, PORT_STANDALONE)) ldap_connection = SyncObject(ldap_url.initializeUrl()) # Authenticate try: ldap_connection.simple_bind_s(DN_DM, PASSWORD) except ldap.LDAPError as e: log.error('Login to LDAP server failed: {}'.format(e.args[0]['desc'])) assert False # Test invalid cookies for invalid_cookie in cookies: log.info('Testing cookie: %s' % invalid_cookie) try: ldap_connection.sync_search(invalid_cookie) ldap_connection.poll() log.fatal('Invalid cookie accepted!') assert False except Exception as e: log.info('Invalid cookie correctly rejected: {}'.format(e.args[0]['info'])) pass # Success log.info('Test complete') if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/tickets/ticket48026_test.py000066400000000000000000000111541421664411400260040ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2016 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import pytest from lib389.tasks import * from lib389.utils import * from lib389.topologies import topology_st from lib389._constants import PLUGIN_ATTR_UNIQUENESS, DEFAULT_SUFFIX # Skip on older versions pytestmark = [pytest.mark.tier2, pytest.mark.skipif(ds_is_older('1.3.4'), reason="Not implemented")] logging.getLogger(__name__).setLevel(logging.DEBUG) log = logging.getLogger(__name__) USER1_DN = 'uid=user1,' + DEFAULT_SUFFIX USER2_DN = 'uid=user2,' + DEFAULT_SUFFIX def test_ticket48026(topology_st): ''' Test that multiple attribute uniqueness works correctly. ''' # Configure the plugin inst = topology_st.standalone inst.plugins.enable(name=PLUGIN_ATTR_UNIQUENESS) try: # This plugin enable / disable doesn't seem to create the nsslapd-pluginId correctly? inst.modify_s('cn=' + PLUGIN_ATTR_UNIQUENESS + ',cn=plugins,cn=config', [(ldap.MOD_REPLACE, 'uniqueness-attribute-name', b'mail'), (ldap.MOD_ADD, 'uniqueness-attribute-name', b'mailAlternateAddress'), ]) except ldap.LDAPError as e: log.fatal('test_ticket48026: Failed to configure plugin for "mail": error {}'.format(e.args[0]['desc'])) assert False inst.restart(timeout=30) # Add an entry try: inst.add_s(Entry((USER1_DN, {'objectclass': "top extensibleObject".split(), 'sn': '1', 'cn': 'user 1', 'uid': 'user1', 'mail': 'user1@example.com', 'mailAlternateAddress': 'user1@alt.example.com', 'userpassword': 'password'}))) except ldap.LDAPError as e: log.fatal('test_ticket48026: Failed to add test user' + USER1_DN + ': error {}'.format(e.args[0]['desc'])) assert False try: inst.add_s(Entry((USER2_DN, {'objectclass': "top extensibleObject".split(), 'sn': '2', 'cn': 'user 2', 'uid': 'user2', 'mail': 'user1@example.com', 'userpassword': 'password'}))) except ldap.CONSTRAINT_VIOLATION: pass else: log.error('test_ticket48026: Adding of 1st entry(mail v mail) incorrectly succeeded') assert False try: inst.add_s(Entry((USER2_DN, {'objectclass': "top extensibleObject".split(), 'sn': '2', 'cn': 'user 2', 'uid': 'user2', 'mailAlternateAddress': 'user1@alt.example.com', 'userpassword': 'password'}))) except ldap.CONSTRAINT_VIOLATION: pass else: log.error( 'test_ticket48026: Adding of 2nd entry(mailAlternateAddress v mailAlternateAddress) incorrectly succeeded') assert False try: inst.add_s(Entry((USER2_DN, {'objectclass': "top extensibleObject".split(), 'sn': '2', 'cn': 'user 2', 'uid': 'user2', 'mail': 'user1@alt.example.com', 'userpassword': 'password'}))) except ldap.CONSTRAINT_VIOLATION: pass else: log.error('test_ticket48026: Adding of 3rd entry(mail v mailAlternateAddress) incorrectly succeeded') assert False try: inst.add_s(Entry((USER2_DN, {'objectclass': "top extensibleObject".split(), 'sn': '2', 'cn': 'user 2', 'uid': 'user2', 'mailAlternateAddress': 'user1@example.com', 'userpassword': 'password'}))) except ldap.CONSTRAINT_VIOLATION: pass else: log.error('test_ticket48026: Adding of 4th entry(mailAlternateAddress v mail) incorrectly succeeded') assert False log.info('Test complete') if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/tickets/ticket48109_test.py000066400000000000000000000330571421664411400260140ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2016 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import pytest from lib389.tasks import * from lib389.utils import * from lib389.topologies import topology_st from lib389._constants import SUFFIX, DEFAULT_SUFFIX pytestmark = pytest.mark.tier2 logging.getLogger(__name__).setLevel(logging.DEBUG) log = logging.getLogger(__name__) UID_INDEX = 'cn=uid,cn=index,cn=userRoot,cn=ldbm database,cn=plugins,cn=config' def test_ticket48109(topology_st): ''' Set SubStr lengths to cn=uid,cn=index,... objectClass: extensibleObject nsIndexType: sub nsSubStrBegin: 2 nsSubStrEnd: 2 ''' log.info('Test case 0') # add substr setting to UID_INDEX try: topology_st.standalone.modify_s(UID_INDEX, [(ldap.MOD_ADD, 'objectClass', b'extensibleObject'), (ldap.MOD_ADD, 'nsIndexType', b'sub'), (ldap.MOD_ADD, 'nsSubStrBegin', b'2'), (ldap.MOD_ADD, 'nsSubStrEnd', b'2')]) except ldap.LDAPError as e: log.error('Failed to add substr lengths: error {}'.format(e.args[0]['desc'])) assert False # restart the server to apply the indexing topology_st.standalone.restart(timeout=10) # add a test user UID = 'auser0' USER_DN = 'uid=%s,%s' % (UID, SUFFIX) try: topology_st.standalone.add_s(Entry((USER_DN, { 'objectclass': 'top person organizationalPerson inetOrgPerson'.split(), 'cn': 'a user0', 'sn': 'user0', 'givenname': 'a', 'mail': UID}))) except ldap.LDAPError as e: log.error('Failed to add ' + USER_DN + ': error {}'.format(e.args[0]['desc'])) assert False entries = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, '(uid=a*)') assert len(entries) == 1 # restart the server to check the access log topology_st.standalone.restart(timeout=10) cmdline = 'egrep %s %s | egrep "uid=a\*"' % (SUFFIX, topology_st.standalone.accesslog) p = os.popen(cmdline, "r") l0 = p.readline() if l0 == "": log.error('Search with "(uid=a*)" is not logged in ' + topology_st.standalone.accesslog) assert False else: # regex = re.compile('\(conn=[0-9]* op=[0-9]*\) SRCH .*') regex = re.compile(r'.*\s+(conn=\d+ op=\d+)\s+SRCH .*') match = regex.match(l0) log.info('match: %s' % match.group(1)) cmdline = 'egrep "%s" %s | egrep "RESULT"' % (match.group(1), topology_st.standalone.accesslog) p = os.popen(cmdline, "r") l1 = p.readline() if l1 == "": log.error('Search result of "(uid=a*)" is not logged in ' + topology_st.standalone.accesslog) assert False else: log.info('l1: %s' % l1) regex = re.compile(r'.*nentries=(\d+)\s+.*') match = regex.match(l1) log.info('match: nentires=%s' % match.group(1)) if match.group(1) == "0": log.error('Entry uid=a* not found.') assert False else: log.info('Entry uid=a* found.') regex = re.compile(r'.*(notes=[AU]).*') match = regex.match(l1) if match: log.error('%s - substr index was not used' % match.group(1)) assert False else: log.info('Test case 0 - OK - substr index used') # clean up substr setting to UID_INDEX try: topology_st.standalone.modify_s(UID_INDEX, [(ldap.MOD_DELETE, 'objectClass', b'extensibleObject'), (ldap.MOD_DELETE, 'nsIndexType', b'sub'), (ldap.MOD_DELETE, 'nsSubStrBegin', b'2'), (ldap.MOD_DELETE, 'nsSubStrEnd', b'2')]) except ldap.LDAPError as e: log.error('Failed to delete substr lengths: error {}'.format(e.args[0]['desc'])) assert False ''' Set SubStr lengths to cn=uid,cn=index,... nsIndexType: sub nsMatchingRule: nsSubStrBegin=2 nsMatchingRule: nsSubStrEnd=2 ''' log.info('Test case 1') # add substr setting to UID_INDEX try: topology_st.standalone.modify_s(UID_INDEX, [(ldap.MOD_ADD, 'nsIndexType', b'sub'), (ldap.MOD_ADD, 'nsMatchingRule', b'nssubstrbegin=2'), (ldap.MOD_ADD, 'nsMatchingRule', b'nssubstrend=2')]) except ldap.LDAPError as e: log.error('Failed to add substr lengths: error {}'.format(e.args[0]['desc'])) assert False # restart the server to apply the indexing topology_st.standalone.restart(timeout=10) # add a test user UID = 'buser1' USER_DN = 'uid=%s,%s' % (UID, SUFFIX) try: topology_st.standalone.add_s(Entry((USER_DN, { 'objectclass': 'top person organizationalPerson inetOrgPerson'.split(), 'cn': 'b user1', 'sn': 'user1', 'givenname': 'b', 'mail': UID}))) except ldap.LDAPError as e: log.error('Failed to add ' + USER_DN + ': error {}'.format(e.args[0]['desc'])) assert False entries = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, '(uid=b*)') assert len(entries) == 1 # restart the server to check the access log topology_st.standalone.restart(timeout=10) cmdline = 'egrep %s %s | egrep "uid=b\*"' % (SUFFIX, topology_st.standalone.accesslog) p = os.popen(cmdline, "r") l0 = p.readline() if l0 == "": log.error('Search with "(uid=b*)" is not logged in ' + topology_st.standalone.accesslog) assert False else: # regex = re.compile('\(conn=[0-9]* op=[0-9]*\) SRCH .*') regex = re.compile(r'.*\s+(conn=\d+ op=\d+)\s+SRCH .*') match = regex.match(l0) log.info('match: %s' % match.group(1)) cmdline = 'egrep "%s" %s | egrep "RESULT"' % (match.group(1), topology_st.standalone.accesslog) p = os.popen(cmdline, "r") l1 = p.readline() if l1 == "": log.error('Search result of "(uid=*b)" is not logged in ' + topology_st.standalone.accesslog) assert False else: log.info('l1: %s' % l1) regex = re.compile(r'.*nentries=(\d+)\s+.*') match = regex.match(l1) log.info('match: nentires=%s' % match.group(1)) if match.group(1) == "0": log.error('Entry uid=*b not found.') assert False else: log.info('Entry uid=*b found.') regex = re.compile(r'.*(notes=[AU]).*') match = regex.match(l1) if match: log.error('%s - substr index was not used' % match.group(1)) assert False else: log.info('Test case 1 - OK - substr index used') # clean up substr setting to UID_INDEX try: topology_st.standalone.modify_s(UID_INDEX, [(ldap.MOD_DELETE, 'nsIndexType', b'sub'), (ldap.MOD_DELETE, 'nsMatchingRule', b'nssubstrbegin=2'), (ldap.MOD_DELETE, 'nsMatchingRule', b'nssubstrend=2')]) except ldap.LDAPError as e: log.error('Failed to delete substr lengths: error {}'.format(e.args[0]['desc'])) assert False ''' Set SubStr conflict formats/lengths to cn=uid,cn=index,... objectClass: extensibleObject nsIndexType: sub nsMatchingRule: nsSubStrBegin=3 nsMatchingRule: nsSubStrEnd=3 nsSubStrBegin: 2 nsSubStrEnd: 2 nsSubStr{Begin,End} are honored. ''' log.info('Test case 2') # add substr setting to UID_INDEX try: topology_st.standalone.modify_s(UID_INDEX, [(ldap.MOD_ADD, 'nsIndexType', b'sub'), (ldap.MOD_ADD, 'nsMatchingRule', b'nssubstrbegin=3'), (ldap.MOD_ADD, 'nsMatchingRule', b'nssubstrend=3'), (ldap.MOD_ADD, 'objectClass', b'extensibleObject'), (ldap.MOD_ADD, 'nsSubStrBegin', b'2'), (ldap.MOD_ADD, 'nsSubStrEnd', b'2')]) except ldap.LDAPError as e: log.error('Failed to add substr lengths: error {}'.format(e.args[0]['desc'])) assert False # restart the server to apply the indexing topology_st.standalone.restart(timeout=10) # add a test user UID = 'cuser2' USER_DN = 'uid=%s,%s' % (UID, SUFFIX) try: topology_st.standalone.add_s(Entry((USER_DN, { 'objectclass': 'top person organizationalPerson inetOrgPerson'.split(), 'cn': 'c user2', 'sn': 'user2', 'givenname': 'c', 'mail': UID}))) except ldap.LDAPError as e: log.error('Failed to add ' + USER_DN + ': error {}'.format(e.args[0]['desc'])) assert False entries = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, '(uid=c*)') assert len(entries) == 1 entries = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, '(uid=*2)') assert len(entries) == 1 # restart the server to check the access log topology_st.standalone.restart(timeout=10) cmdline = 'egrep %s %s | egrep "uid=c\*"' % (SUFFIX, topology_st.standalone.accesslog) p = os.popen(cmdline, "r") l0 = p.readline() if l0 == "": log.error('Search with "(uid=c*)" is not logged in ' + topology_st.standalone.accesslog) assert False else: # regex = re.compile('\(conn=[0-9]* op=[0-9]*\) SRCH .*') regex = re.compile(r'.*\s+(conn=\d+ op=\d+)\s+SRCH .*') match = regex.match(l0) log.info('match: %s' % match.group(1)) cmdline = 'egrep "%s" %s | egrep "RESULT"' % (match.group(1), topology_st.standalone.accesslog) p = os.popen(cmdline, "r") l1 = p.readline() if l1 == "": log.error('Search result of "(uid=c*)" is not logged in ' + topology_st.standalone.accesslog) assert False else: log.info('l1: %s' % l1) regex = re.compile(r'.*nentries=(\d+)\s+.*') match = regex.match(l1) log.info('match: nentires=%s' % match.group(1)) if match.group(1) == "0": log.error('Entry uid=c* not found.') assert False else: log.info('Entry uid=c* found.') regex = re.compile(r'.*(notes=[AU]).*') match = regex.match(l1) if match: log.error('%s - substr index was not used' % match.group(1)) assert False else: log.info('Test case 2-1 - OK - correct substr index used') cmdline = 'egrep %s %s | egrep "uid=\*2"' % (SUFFIX, topology_st.standalone.accesslog) p = os.popen(cmdline, "r") l0 = p.readline() if l0 == "": log.error('Search with "(uid=*2)" is not logged in ' + topology_st.standalone.accesslog) assert False else: # regex = re.compile('\(conn=[0-9]* op=[0-9]*\) SRCH .*') regex = re.compile(r'.*\s+(conn=\d+ op=\d+)\s+SRCH .*') match = regex.match(l0) log.info('match: %s' % match.group(1)) cmdline = 'egrep "%s" %s | egrep "RESULT"' % (match.group(1), topology_st.standalone.accesslog) p = os.popen(cmdline, "r") l1 = p.readline() if l1 == "": log.error('Search result of "(uid=*2)" is not logged in ' + topology_st.standalone.accesslog) assert False else: log.info('l1: %s' % l1) regex = re.compile(r'.*nentries=(\d+)\s+.*') match = regex.match(l1) log.info('match: nentires=%s' % match.group(1)) if match.group(1) == "0": log.error('Entry uid=*2 not found.') assert False else: log.info('Entry uid=*2 found.') regex = re.compile(r'.*(notes=[AU]).*') match = regex.match(l1) if match: log.error('%s - substr index was not used' % match.group(1)) assert False else: log.info('Test case 2-2 - OK - correct substr index used') # clean up substr setting to UID_INDEX try: topology_st.standalone.modify_s(UID_INDEX, [(ldap.MOD_DELETE, 'nsIndexType', b'sub'), (ldap.MOD_DELETE, 'nsMatchingRule', b'nssubstrbegin=3'), (ldap.MOD_DELETE, 'nsMatchingRule', b'nssubstrend=3'), (ldap.MOD_DELETE, 'objectClass', b'extensibleObject'), (ldap.MOD_DELETE, 'nsSubStrBegin', b'2'), (ldap.MOD_DELETE, 'nsSubStrEnd', b'2')]) except ldap.LDAPError as e: log.error('Failed to delete substr lengths: error {}'.format(e.args[0]['desc'])) assert False log.info('Testcase PASSED') if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/tickets/ticket48170_test.py000066400000000000000000000021761421664411400260100ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2016 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import pytest from lib389.utils import * from lib389.topologies import topology_st pytestmark = pytest.mark.tier2 logging.getLogger(__name__).setLevel(logging.DEBUG) log = logging.getLogger(__name__) def test_ticket48170(topology_st): ''' Attempt to add a nsIndexType wikth an invalid value: "eq,pres" ''' INDEX_DN = 'cn=cn,cn=index,cn=userroot,cn=ldbm database,cn=plugins,cn=config' REJECTED = False try: topology_st.standalone.modify_s(INDEX_DN, [(ldap.MOD_ADD, 'nsINdexType', b'eq,pres')]) except ldap.UNWILLING_TO_PERFORM: log.info('Index update correctly rejected') REJECTED = True if not REJECTED: log.fatal('Invalid nsIndexType value was incorrectly accepted.') assert False log.info('Test complete') if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/tickets/ticket48194_test.py000066400000000000000000000335641421664411400260230ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2016 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import logging import subprocess import time import ldap import pytest from lib389 import Entry from lib389._constants import * from lib389.topologies import topology_st from lib389.nss_ssl import NssSsl log = logging.getLogger(__name__) CONFIG_DN = 'cn=config' from lib389.utils import * # Skip on older versions pytestmark = [pytest.mark.tier2, pytest.mark.skipif(ds_is_older('1.3.4'), reason="Not implemented")] ENCRYPTION_DN = 'cn=encryption,%s' % CONFIG_DN RSA = 'RSA' RSA_DN = 'cn=%s,%s' % (RSA, ENCRYPTION_DN) LDAPSPORT = str(SECUREPORT_STANDALONE) SERVERCERT = 'Server-Cert' plus_all_ecount = 0 plus_all_dcount = 0 plus_all_ecount_noweak = 0 plus_all_dcount_noweak = 0 def _header(topology_st, label): topology_st.standalone.log.info("\n\n###############################################") topology_st.standalone.log.info("####### %s" % label) topology_st.standalone.log.info("###############################################") def test_init(topology_st): """ Generate self signed cert and import it to the DS cert db. Enable SSL """ _header(topology_st, 'Testing Ticket 48194 - harden the list of ciphers available by default') nss_ssl = NssSsl(dbpath=topology_st.standalone.get_cert_dir()) nss_ssl.reinit() nss_ssl.create_rsa_ca() nss_ssl.create_rsa_key_and_cert() log.info("\n######################### enable SSL in the directory server with all ciphers ######################\n") topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) topology_st.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3', b'off'), (ldap.MOD_REPLACE, 'nsTLS1', b'on'), (ldap.MOD_REPLACE, 'nsSSLClientAuth', b'allowed'), (ldap.MOD_REPLACE, 'allowWeakCipher', b'on'), (ldap.MOD_REPLACE, 'nsSSL3Ciphers', b'+all')]) topology_st.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'nsslapd-security', b'on'), (ldap.MOD_REPLACE, 'nsslapd-ssl-check-hostname', b'off'), (ldap.MOD_REPLACE, 'nsslapd-secureport', ensure_bytes(LDAPSPORT))]) if ds_is_older('1.4.0'): topology_st.standalone.add_s(Entry((RSA_DN, {'objectclass': "top nsEncryptionModule".split(), 'cn': RSA, 'nsSSLPersonalitySSL': SERVERCERT, 'nsSSLToken': 'internal (software)', 'nsSSLActivation': 'on'}))) def connectWithOpenssl(topology_st, cipher, expect): """ Connect with the given cipher Condition: If expect is True, the handshake should be successful. If expect is False, the handshake should be refused with access log: "Cannot communicate securely with peer: no common encryption algorithm(s)." """ log.info("Testing %s -- expect to handshake %s", cipher, "successfully" if expect else "failed") myurl = 'localhost:%s' % LDAPSPORT cmdline = ['/usr/bin/openssl', 's_client', '-connect', myurl, '-cipher', cipher] strcmdline = " ".join(cmdline) log.info("Running cmdline: %s", strcmdline) try: proc = subprocess.Popen(cmdline, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.STDOUT) except ValueError: log.info("%s failed: %s", cmdline, ValueError) proc.kill() while True: l = proc.stdout.readline() if l == b"": break if b'Cipher is' in l: log.info("Found: %s", l) if expect: if b'(NONE)' in l: assert False else: proc.stdin.close() assert True else: if b'(NONE)' in l: assert True else: proc.stdin.close() assert False def test_run_0(topology_st): """ Check nsSSL3Ciphers: +all All ciphers are enabled except null. Note: allowWeakCipher: on """ _header(topology_st, 'Test Case 1 - Check the ciphers availability for "+all"; allowWeakCipher: on') topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) topology_st.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', b'64')]) log.info("\n######################### Restarting the server ######################\n") topology_st.standalone.restart(timeout=120) connectWithOpenssl(topology_st, 'DES-CBC3-SHA', True) connectWithOpenssl(topology_st, 'AES256-SHA256', True) def test_run_1(topology_st): """ Check nsSSL3Ciphers: +all All ciphers are enabled except null. Note: default allowWeakCipher (i.e., off) for +all """ _header(topology_st, 'Test Case 2 - Check the ciphers availability for "+all" with default allowWeakCiphers') topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) topology_st.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', b'64')]) # Make sure allowWeakCipher is not set. topology_st.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_DELETE, 'allowWeakCipher', None)]) log.info("\n######################### Restarting the server ######################\n") topology_st.standalone.stop(timeout=10) os.system('mv %s %s.48194_0' % (topology_st.standalone.errlog, topology_st.standalone.errlog)) os.system('touch %s' % (topology_st.standalone.errlog)) time.sleep(2) topology_st.standalone.start(timeout=120) connectWithOpenssl(topology_st, 'DES-CBC3-SHA', False) connectWithOpenssl(topology_st, 'AES256-SHA256', True) def test_run_2(topology_st): """ Check nsSSL3Ciphers: +rsa_aes_128_sha,+rsa_aes_256_sha rsa_aes_128_sha, tls_rsa_aes_128_sha, rsa_aes_256_sha, tls_rsa_aes_256_sha are enabled. default allowWeakCipher """ _header(topology_st, 'Test Case 3 - Check the ciphers availability for "+rsa_aes_128_sha,+rsa_aes_256_sha" with default allowWeakCipher') topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) topology_st.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', b'+rsa_aes_128_sha,+rsa_aes_256_sha')]) log.info("\n######################### Restarting the server ######################\n") topology_st.standalone.stop(timeout=10) os.system('mv %s %s.48194_1' % (topology_st.standalone.errlog, topology_st.standalone.errlog)) os.system('touch %s' % (topology_st.standalone.errlog)) time.sleep(2) topology_st.standalone.start(timeout=120) connectWithOpenssl(topology_st, 'DES-CBC3-SHA', False) connectWithOpenssl(topology_st, 'AES256-SHA256', False) connectWithOpenssl(topology_st, 'AES128-SHA', True) connectWithOpenssl(topology_st, 'AES256-SHA', True) def test_run_3(topology_st): """ Check nsSSL3Ciphers: -all All ciphers are disabled. default allowWeakCipher """ _header(topology_st, 'Test Case 4 - Check the ciphers availability for "-all"') topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) topology_st.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', b'-all')]) log.info("\n######################### Restarting the server ######################\n") topology_st.standalone.stop(timeout=10) os.system('mv %s %s.48194_2' % (topology_st.standalone.errlog, topology_st.standalone.errlog)) os.system('touch %s' % (topology_st.standalone.errlog)) time.sleep(1) topology_st.standalone.start(timeout=120) connectWithOpenssl(topology_st, 'DES-CBC3-SHA', False) connectWithOpenssl(topology_st, 'AES256-SHA256', False) def test_run_4(topology_st): """ Check no nsSSL3Ciphers Default ciphers are enabled. default allowWeakCipher """ _header(topology_st, 'Test Case 5 - Check no nsSSL3Ciphers (-all) with default allowWeakCipher') topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) topology_st.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_DELETE, 'nsSSL3Ciphers', b'-all')]) log.info("\n######################### Restarting the server ######################\n") topology_st.standalone.stop(timeout=10) os.system('mv %s %s.48194_3' % (topology_st.standalone.errlog, topology_st.standalone.errlog)) os.system('touch %s' % (topology_st.standalone.errlog)) time.sleep(2) topology_st.standalone.start(timeout=120) connectWithOpenssl(topology_st, 'DES-CBC3-SHA', False) connectWithOpenssl(topology_st, 'AES256-SHA256', True) def test_run_5(topology_st): """ Check nsSSL3Ciphers: default Default ciphers are enabled. default allowWeakCipher """ _header(topology_st, 'Test Case 6 - Check default nsSSL3Ciphers (default setting) with default allowWeakCipher') topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) topology_st.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', b'default')]) log.info("\n######################### Restarting the server ######################\n") topology_st.standalone.stop(timeout=10) os.system('mv %s %s.48194_4' % (topology_st.standalone.errlog, topology_st.standalone.errlog)) os.system('touch %s' % (topology_st.standalone.errlog)) time.sleep(2) topology_st.standalone.start(timeout=120) connectWithOpenssl(topology_st, 'DES-CBC3-SHA', False) connectWithOpenssl(topology_st, 'AES256-SHA256', True) def test_run_6(topology_st): """ Check nsSSL3Ciphers: +all,-TLS_RSA_WITH_AES_256_CBC_SHA256 All ciphers are disabled. default allowWeakCipher """ _header(topology_st, 'Test Case 7 - Check nsSSL3Ciphers: +all,-TLS_RSA_WITH_AES_256_CBC_SHA256 with default allowWeakCipher') topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) topology_st.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', b'+all,-TLS_RSA_WITH_AES_256_CBC_SHA256')]) log.info("\n######################### Restarting the server ######################\n") topology_st.standalone.stop(timeout=10) os.system('mv %s %s.48194_5' % (topology_st.standalone.errlog, topology_st.standalone.errlog)) os.system('touch %s' % (topology_st.standalone.errlog)) time.sleep(2) topology_st.standalone.start(timeout=120) connectWithOpenssl(topology_st, 'DES-CBC3-SHA', False) connectWithOpenssl(topology_st, 'AES256-SHA256', False) connectWithOpenssl(topology_st, 'AES128-SHA', True) def test_run_8(topology_st): """ Check nsSSL3Ciphers: default + allowWeakCipher: off Strong Default ciphers are enabled. """ _header(topology_st, 'Test Case 9 - Check default nsSSL3Ciphers (default setting + allowWeakCipher: off)') topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) topology_st.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', b'default'), (ldap.MOD_REPLACE, 'allowWeakCipher', b'off')]) log.info("\n######################### Restarting the server ######################\n") topology_st.standalone.stop(timeout=10) os.system('mv %s %s.48194_7' % (topology_st.standalone.errlog, topology_st.standalone.errlog)) os.system('touch %s' % (topology_st.standalone.errlog)) time.sleep(2) topology_st.standalone.start(timeout=120) connectWithOpenssl(topology_st, 'DES-CBC3-SHA', False) connectWithOpenssl(topology_st, 'AES256-SHA256', True) def test_run_9(topology_st): """ Check no nsSSL3Ciphers Default ciphers are enabled. allowWeakCipher: on nsslapd-errorlog-level: 0 """ _header(topology_st, 'Test Case 10 - Check no nsSSL3Ciphers (default setting) with no errorlog-level & allowWeakCipher on') topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) topology_st.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', None), (ldap.MOD_REPLACE, 'allowWeakCipher', b'on')]) topology_st.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', None)]) log.info("\n######################### Restarting the server ######################\n") topology_st.standalone.stop(timeout=10) os.system('mv %s %s.48194_8' % (topology_st.standalone.errlog, topology_st.standalone.errlog)) os.system('touch %s' % (topology_st.standalone.errlog)) time.sleep(2) topology_st.standalone.start(timeout=120) connectWithOpenssl(topology_st, 'DES-CBC3-SHA', True) connectWithOpenssl(topology_st, 'AES256-SHA256', True) def test_run_11(topology_st): """ Check nsSSL3Ciphers: +fortezza SSL_GetImplementedCiphers does not return this as a secuire cipher suite """ _header(topology_st, 'Test Case 12 - Check nsSSL3Ciphers: +fortezza, which is not supported') topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) topology_st.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', b'+fortezza')]) log.info("\n######################### Restarting the server ######################\n") topology_st.standalone.stop(timeout=10) os.system('mv %s %s.48194_10' % (topology_st.standalone.errlog, topology_st.standalone.errlog)) os.system('touch %s' % (topology_st.standalone.errlog)) time.sleep(1) topology_st.standalone.start(timeout=120) connectWithOpenssl(topology_st, 'DES-CBC3-SHA', False) connectWithOpenssl(topology_st, 'AES256-SHA256', False) if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/tickets/ticket48212_test.py000066400000000000000000000114741421664411400260060ustar00rootroot00000000000000import pytest from lib389.tasks import * from lib389.utils import * from lib389.topologies import topology_st from lib389._constants import DN_DM, PASSWORD, DEFAULT_SUFFIX, DATA_DIR pytestmark = pytest.mark.tier2 log = logging.getLogger(__name__) MYSUFFIX = 'dc=example,dc=com' MYSUFFIXBE = 'userRoot' _MYLDIF = 'example1k_posix.ldif' UIDNUMBERDN = "cn=uidnumber,cn=index,cn=userroot,cn=ldbm database,cn=plugins,cn=config" def runDbVerify(topology_st): topology_st.standalone.log.info("\n\n +++++ dbverify +++++\n") sbin_dir = get_sbin_dir() dbverifyCMD = sbin_dir + "/dbverify -Z " + topology_st.standalone.serverid + " -V" dbverifyOUT = os.popen(dbverifyCMD, "r") topology_st.standalone.log.info("Running %s" % dbverifyCMD) running = True error = False while running: l = dbverifyOUT.readline() if l == "": running = False elif "libdb:" in l: running = False error = True topology_st.standalone.log.info("%s" % l) elif "verify failed" in l: error = True running = False topology_st.standalone.log.info("%s" % l) if error: topology_st.standalone.log.fatal("dbverify failed") assert False else: topology_st.standalone.log.info("dbverify passed") def reindexUidNumber(topology_st): topology_st.standalone.log.info("\n\n +++++ reindex uidnumber +++++\n") try: args = {TASK_WAIT: True} topology_st.standalone.tasks.reindex(suffix=MYSUFFIX, attrname='uidNumber', args=args) except: topology_st.standalone.log.fatal("Reindexing failed") assert False def test_ticket48212(topology_st): """ Import posixAccount entries. Index uidNumber add nsMatchingRule: integerOrderingMatch run dbverify to see if it reports the db corruption or not delete nsMatchingRule: integerOrderingMatch run dbverify to see if it reports the db corruption or not if no corruption is reported, the bug fix was verified. """ log.info( 'Testing Ticket 48212 - Dynamic nsMatchingRule changes had no effect on the attrinfo thus following reindexing, as well.') # bind as directory manager topology_st.standalone.log.info("Bind as %s" % DN_DM) topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) data_dir_path = topology_st.standalone.getDir(__file__, DATA_DIR) ldif_file = f"{data_dir_path}ticket48212/{_MYLDIF}" try: ldif_dir = topology_st.standalone.get_ldif_dir() shutil.copy(ldif_file, ldif_dir) ldif_file = ldif_dir + '/' + _MYLDIF except: log.fatal('Failed to copy ldif to instance ldif dir') assert False topology_st.standalone.log.info( "\n\n######################### Import Test data (%s) ######################\n" % ldif_file) args = {TASK_WAIT: True} importTask = Tasks(topology_st.standalone) importTask.importLDIF(MYSUFFIX, MYSUFFIXBE, ldif_file, args) args = {TASK_WAIT: True} runDbVerify(topology_st) topology_st.standalone.log.info("\n\n######################### Add index by uidnumber ######################\n") try: topology_st.standalone.add_s(Entry((UIDNUMBERDN, {'objectclass': "top nsIndex".split(), 'cn': 'uidnumber', 'nsSystemIndex': 'false', 'nsIndexType': "pres eq".split()}))) except ValueError: topology_st.standalone.log.fatal("add_s failed: %s", ValueError) topology_st.standalone.log.info("\n\n######################### reindexing... ######################\n") reindexUidNumber(topology_st) runDbVerify(topology_st) topology_st.standalone.log.info("\n\n######################### Add nsMatchingRule ######################\n") try: topology_st.standalone.modify_s(UIDNUMBERDN, [(ldap.MOD_ADD, 'nsMatchingRule', b'integerOrderingMatch')]) except ValueError: topology_st.standalone.log.fatal("modify_s failed: %s", ValueError) topology_st.standalone.log.info("\n\n######################### reindexing... ######################\n") reindexUidNumber(topology_st) runDbVerify(topology_st) topology_st.standalone.log.info("\n\n######################### Delete nsMatchingRule ######################\n") try: topology_st.standalone.modify_s(UIDNUMBERDN, [(ldap.MOD_DELETE, 'nsMatchingRule', b'integerOrderingMatch')]) except ValueError: topology_st.standalone.log.fatal("modify_s failed: %s", ValueError) reindexUidNumber(topology_st) runDbVerify(topology_st) log.info('Testcase PASSED') if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/tickets/ticket48214_test.py000066400000000000000000000077521421664411400260140ustar00rootroot00000000000000import logging import pytest from lib389.tasks import * from lib389.topologies import topology_st from lib389._constants import DEFAULT_SUFFIX, DN_DM, PASSWORD pytestmark = pytest.mark.tier2 log = logging.getLogger(__name__) MYSUFFIX = 'dc=example,dc=com' MYSUFFIXBE = 'userRoot' def getMaxBerSizeFromDseLdif(topology_st): topology_st.standalone.log.info(" +++++ Get maxbersize from dse.ldif +++++\n") dse_ldif = topology_st.standalone.confdir + '/dse.ldif' grepMaxBerCMD = "egrep nsslapd-maxbersize " + dse_ldif topology_st.standalone.log.info(" Run CMD: %s\n" % grepMaxBerCMD) grepMaxBerOUT = os.popen(grepMaxBerCMD, "r") running = True maxbersize = -1 while running: l = grepMaxBerOUT.readline() if l == "": topology_st.standalone.log.info(" Empty: %s\n" % l) running = False elif "nsslapd-maxbersize:" in l.lower(): running = False fields = l.split() if len(fields) >= 2: maxbersize = fields[1] topology_st.standalone.log.info(" Right format - %s %s\n" % (fields[0], fields[1])) else: topology_st.standalone.log.info(" Wrong format - %s\n" % l) else: topology_st.standalone.log.info(" Else?: %s\n" % l) return maxbersize def checkMaxBerSize(topology_st): topology_st.standalone.log.info(" +++++ Check Max Ber Size +++++\n") maxbersizestr = getMaxBerSizeFromDseLdif(topology_st) maxbersize = int(maxbersizestr) isdefault = True defaultvalue = 2097152 if maxbersize < 0: topology_st.standalone.log.info(" No nsslapd-maxbersize found in dse.ldif\n") elif maxbersize == 0: topology_st.standalone.log.info(" nsslapd-maxbersize: %d\n" % maxbersize) else: isdefault = False topology_st.standalone.log.info(" nsslapd-maxbersize: %d\n" % maxbersize) try: entry = topology_st.standalone.search_s('cn=config', ldap.SCOPE_BASE, "(cn=*)", ['nsslapd-maxbersize']) if entry: searchedsize = entry[0].getValue('nsslapd-maxbersize') topology_st.standalone.log.info(" ldapsearch returned nsslapd-maxbersize: %s\n" % searchedsize) else: topology_st.standalone.log.fatal('ERROR: cn=config is not found?') assert False except ldap.LDAPError as e: topology_st.standalone.log.error('ERROR: Failed to search for user entry: ' + e.message['desc']) assert False if isdefault: topology_st.standalone.log.info(" Checking %d vs %d\n" % (int(searchedsize), defaultvalue)) assert int(searchedsize) == defaultvalue def test_ticket48214_run(topology_st): """ Check ldapsearch returns the correct maxbersize when it is not explicitly set. """ log.info('Testing Ticket 48214 - ldapsearch on nsslapd-maxbersize returns 0 instead of current value') # bind as directory manager topology_st.standalone.log.info("Bind as %s" % DN_DM) topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) topology_st.standalone.log.info("\n\n######################### Out of Box ######################\n") checkMaxBerSize(topology_st) topology_st.standalone.log.info("\n\n######################### Add nsslapd-maxbersize: 0 ######################\n") topology_st.standalone.modify_s('cn=config', [(ldap.MOD_REPLACE, 'nsslapd-maxbersize', b'0')]) checkMaxBerSize(topology_st) topology_st.standalone.log.info( "\n\n######################### Add nsslapd-maxbersize: 10000 ######################\n") topology_st.standalone.modify_s('cn=config', [(ldap.MOD_REPLACE, 'nsslapd-maxbersize', b'10000')]) checkMaxBerSize(topology_st) topology_st.standalone.log.info("ticket48214 was successfully verified.") if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/tickets/ticket48228_test.py000066400000000000000000000252771421664411400260230ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2016 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import logging import time import pytest from lib389.tasks import * from lib389.topologies import topology_st from lib389._constants import DEFAULT_SUFFIX, DN_DM, PASSWORD, DN_CONFIG pytestmark = pytest.mark.tier2 log = logging.getLogger(__name__) # Assuming DEFAULT_SUFFIX is "dc=example,dc=com", otherwise it does not work... :( SUBTREE_CONTAINER = 'cn=nsPwPolicyContainer,' + DEFAULT_SUFFIX SUBTREE_PWPDN = 'cn=nsPwPolicyEntry,' + DEFAULT_SUFFIX SUBTREE_PWP = 'cn=cn\3DnsPwPolicyEntry\2Cdc\3Dexample\2Cdc\3Dcom,' + SUBTREE_CONTAINER SUBTREE_COS_TMPLDN = 'cn=nsPwTemplateEntry,' + DEFAULT_SUFFIX SUBTREE_COS_TMPL = 'cn=cn\3DnsPwTemplateEntry\2Cdc\3Dexample\2Cdc\3Dcom,' + SUBTREE_CONTAINER SUBTREE_COS_DEF = 'cn=nsPwPolicy_CoS,' + DEFAULT_SUFFIX USER1_DN = 'uid=user1,' + DEFAULT_SUFFIX USER2_DN = 'uid=user2,' + DEFAULT_SUFFIX def set_global_pwpolicy(topology_st, inhistory): log.info(" +++++ Enable global password policy +++++\n") topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) # Enable password policy try: topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-pwpolicy-local', b'on')]) except ldap.LDAPError as e: log.error('Failed to set pwpolicy-local: error ' + e.message['desc']) assert False log.info(" Set global password history on\n") try: topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'passwordHistory', b'on')]) except ldap.LDAPError as e: log.error('Failed to set passwordHistory: error ' + e.message['desc']) assert False log.info(" Set global passwords in history\n") try: count = "%d" % inhistory topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'passwordInHistory', count.encode())]) except ldap.LDAPError as e: log.error('Failed to set passwordInHistory: error ' + e.message['desc']) assert False time.sleep(1) def set_subtree_pwpolicy(topology_st): log.info(" +++++ Enable subtree level password policy +++++\n") topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) log.info(" Add the container") try: topology_st.standalone.add_s(Entry((SUBTREE_CONTAINER, {'objectclass': 'top nsContainer'.split(), 'cn': 'nsPwPolicyContainer'}))) except ldap.LDAPError as e: log.error('Failed to add subtree container: error ' + e.message['desc']) assert False log.info(" Add the password policy subentry {passwordHistory: on, passwordInHistory: 6}") try: topology_st.standalone.add_s(Entry((SUBTREE_PWP, {'objectclass': 'top ldapsubentry passwordpolicy'.split(), 'cn': SUBTREE_PWPDN, 'passwordMustChange': 'off', 'passwordExp': 'off', 'passwordHistory': 'on', 'passwordInHistory': '6', 'passwordMinAge': '0', 'passwordChange': 'on', 'passwordStorageScheme': 'clear'}))) except ldap.LDAPError as e: log.error('Failed to add passwordpolicy: error ' + e.message['desc']) assert False log.info(" Add the COS template") try: topology_st.standalone.add_s( Entry((SUBTREE_COS_TMPL, {'objectclass': 'top ldapsubentry costemplate extensibleObject'.split(), 'cn': SUBTREE_PWPDN, 'cosPriority': '1', 'cn': SUBTREE_COS_TMPLDN, 'pwdpolicysubentry': SUBTREE_PWP}))) except ldap.LDAPError as e: log.error('Failed to add COS template: error ' + e.message['desc']) assert False log.info(" Add the COS definition") try: topology_st.standalone.add_s( Entry((SUBTREE_COS_DEF, {'objectclass': 'top ldapsubentry cosSuperDefinition cosPointerDefinition'.split(), 'cn': SUBTREE_PWPDN, 'costemplatedn': SUBTREE_COS_TMPL, 'cosAttribute': 'pwdpolicysubentry default operational-default'}))) except ldap.LDAPError as e: log.error('Failed to add COS def: error ' + e.message['desc']) assert False time.sleep(1) def check_passwd_inhistory(topology_st, user, cpw, passwd): inhistory = 0 log.info(" Bind as {%s,%s}" % (user, cpw)) topology_st.standalone.simple_bind_s(user, cpw) time.sleep(1) try: topology_st.standalone.modify_s(user, [(ldap.MOD_REPLACE, 'userpassword', passwd.encode())]) except ldap.LDAPError as e: log.info(' The password ' + passwd + ' of user' + USER1_DN + ' in history: error {0}'.format(e)) inhistory = 1 time.sleep(1) return inhistory def update_passwd(topology_st, user, passwd, times): # Set the default value cpw = passwd for i in range(times): log.info(" Bind as {%s,%s}" % (user, cpw)) topology_st.standalone.simple_bind_s(user, cpw) # Now update the value for this iter. cpw = 'password%d' % i try: topology_st.standalone.modify_s(user, [(ldap.MOD_REPLACE, 'userpassword', cpw.encode())]) except ldap.LDAPError as e: log.fatal( 'test_ticket48228: Failed to update the password ' + cpw + ' of user ' + user + ': error ' + e.message[ 'desc']) assert False # checking the first password, which is supposed to be in history inhistory = check_passwd_inhistory(topology_st, user, cpw, passwd) assert inhistory == 1 def test_ticket48228_test_global_policy(topology_st): """ Check global password policy """ log.info(' Set inhistory = 6') set_global_pwpolicy(topology_st, 6) log.info(' Bind as directory manager') log.info("Bind as %s" % DN_DM) topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) log.info(' Add an entry' + USER1_DN) try: topology_st.standalone.add_s( Entry((USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), 'sn': '1', 'cn': 'user 1', 'uid': 'user1', 'givenname': 'user', 'mail': 'user1@example.com', 'userpassword': 'password'}))) except ldap.LDAPError as e: log.fatal('test_ticket48228: Failed to add user' + USER1_DN + ': error ' + e.message['desc']) assert False log.info(' Update the password of ' + USER1_DN + ' 6 times') update_passwd(topology_st, USER1_DN, 'password', 6) log.info(' Set inhistory = 4') set_global_pwpolicy(topology_st, 4) log.info(' checking the first password, which is supposed NOT to be in history any more') cpw = 'password%d' % 5 tpw = 'password' inhistory = check_passwd_inhistory(topology_st, USER1_DN, cpw, tpw) assert inhistory == 0 log.info(' checking the second password, which is supposed NOT to be in history any more') cpw = tpw tpw = 'password%d' % 0 inhistory = check_passwd_inhistory(topology_st, USER1_DN, cpw, tpw) assert inhistory == 0 log.info(' checking the third password, which is supposed NOT to be in history any more') cpw = tpw tpw = 'password%d' % 1 inhistory = check_passwd_inhistory(topology_st, USER1_DN, cpw, tpw) assert inhistory == 0 log.info(' checking the sixth password, which is supposed to be in history') cpw = tpw tpw = 'password%d' % 5 inhistory = check_passwd_inhistory(topology_st, USER1_DN, cpw, tpw) assert inhistory == 1 log.info("Global policy was successfully verified.") def text_ticket48228_text_subtree_policy(topology_st): """ Check subtree level password policy """ log.info(' Set inhistory = 6') set_subtree_pwpolicy(topology_st) log.info(' Bind as directory manager') log.info("Bind as %s" % DN_DM) topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) log.info(' Add an entry' + USER2_DN) try: topology_st.standalone.add_s( Entry((USER2_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), 'sn': '2', 'cn': 'user 2', 'uid': 'user2', 'givenname': 'user', 'mail': 'user2@example.com', 'userpassword': 'password'}))) except ldap.LDAPError as e: log.fatal('test_ticket48228: Failed to add user' + USER2_DN + ': error ' + e.message['desc']) assert False log.info(' Update the password of ' + USER2_DN + ' 6 times') update_passwd(topology_st, USER2_DN, 'password', 6) log.info(' Set inhistory = 4') topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) try: topology_st.standalone.modify_s(SUBTREE_PWP, [(ldap.MOD_REPLACE, 'passwordInHistory', b'4')]) except ldap.LDAPError as e: log.error('Failed to set pwpolicy-local: error ' + e.message['desc']) assert False log.info(' checking the first password, which is supposed NOT to be in history any more') cpw = 'password%d' % 5 tpw = 'password' inhistory = check_passwd_inhistory(topology_st, USER2_DN, cpw, tpw) assert inhistory == 0 log.info(' checking the second password, which is supposed NOT to be in history any more') cpw = tpw tpw = 'password%d' % 1 inhistory = check_passwd_inhistory(topology_st, USER2_DN, cpw, tpw) assert inhistory == 0 log.info(' checking the third password, which is supposed NOT to be in history any more') cpw = tpw tpw = 'password%d' % 2 inhistory = check_passwd_inhistory(topology_st, USER2_DN, cpw, tpw) assert inhistory == 0 log.info(' checking the six password, which is supposed to be in history') cpw = tpw tpw = 'password%d' % 5 inhistory = check_passwd_inhistory(topology_st, USER2_DN, cpw, tpw) assert inhistory == 1 log.info("Subtree level policy was successfully verified.") if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/tickets/ticket48233_test.py000066400000000000000000000035451421664411400260110ustar00rootroot00000000000000import pytest from lib389.utils import * from lib389.topologies import topology_st from lib389._constants import DEFAULT_SUFFIX pytestmark = pytest.mark.tier2 logging.getLogger(__name__).setLevel(logging.DEBUG) log = logging.getLogger(__name__) def test_ticket48233(topology_st): """Test that ACI's that use IP restrictions do not crash the server at shutdown """ # Add aci to restrict access my ip aci_text = ('(targetattr != "userPassword")(version 3.0;acl ' + '"Enable anonymous access - IP"; allow (read,compare,search)' + '(userdn = "ldap:///anyone") and (ip="127.0.0.1");)') try: topology_st.standalone.modify_s(DEFAULT_SUFFIX, [(ldap.MOD_ADD, 'aci', ensure_bytes(aci_text))]) except ldap.LDAPError as e: log.error('Failed to add aci: ({}) error {}'.format(aci_text,e.args[0]['desc'])) assert False time.sleep(1) # Anonymous search to engage the aci try: topology_st.standalone.simple_bind_s("", "") except ldap.LDAPError as e: log.error('Failed to anonymously bind -error {}'.format(e.args[0]['desc'])) assert False try: entries = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, 'objectclass=*') if not entries: log.fatal('Failed return an entries from search') assert False except ldap.LDAPError as e: log.fatal('Search failed: ' + e.message['desc']) assert False # Restart the server topology_st.standalone.restart(timeout=10) # Check for crash if topology_st.standalone.detectDisorderlyShutdown(): log.fatal('Server crashed!') assert False log.info('Test complete') if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/tickets/ticket48252_test.py000066400000000000000000000100731421664411400260040ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2016 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import logging import pytest from lib389.tasks import * from lib389.utils import * from lib389.topologies import topology_st from lib389.idm.user import UserAccounts from lib389._constants import DEFAULT_SUFFIX, SUFFIX, DEFAULT_BENAME, PLUGIN_USN pytestmark = pytest.mark.tier2 log = logging.getLogger(__name__) # Assuming DEFAULT_SUFFIX is "dc=example,dc=com", otherwise it does not work... :( USER_NUM = 10 TEST_USER = "test_user" def test_ticket48252_setup(topology_st): """ Enable USN plug-in for enabling tombstones Add test entries """ log.info("Enable the USN plugin...") try: topology_st.standalone.plugins.enable(name=PLUGIN_USN) except e: log.error("Failed to enable USN Plugin: error " + e.message['desc']) assert False log.info("Adding test entries...") ua = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX) for i in range(USER_NUM): ua.create(properties={ 'uid': "%s%d" % (TEST_USER, i), 'cn' : "%s%d" % (TEST_USER, i), 'sn' : 'user', 'uidNumber' : '1000', 'gidNumber' : '2000', 'homeDirectory' : '/home/testuser' }) def in_index_file(topology_st, id, index): key = "%s%s" % (TEST_USER, id) log.info(" dbscan - checking %s is in index file %s..." % (key, index)) dbscanOut = topology_st.standalone.dbscan(DEFAULT_BENAME, index) if ensure_bytes(key) in ensure_bytes(dbscanOut): found = True topology_st.standalone.log.info("Found key %s in dbscan output" % key) else: found = False topology_st.standalone.log.info("Did not found key %s in dbscan output" % key) return found def test_ticket48252_run_0(topology_st): """ Delete an entry cn=test_entry0 Check it is not in the 'cn' index file """ log.info("Case 1 - Check deleted entry is not in the 'cn' index file") uas = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX) del_rdn = "uid=%s0" % TEST_USER del_entry = uas.get('%s0' % TEST_USER) log.info(" Deleting a test entry %s..." % del_entry) del_entry.delete() assert in_index_file(topology_st, 0, 'cn') is False log.info(" db2index - reindexing %s ..." % 'cn') topology_st.standalone.stop() assert topology_st.standalone.db2index(suffixes=[DEFAULT_SUFFIX], attrs=['cn']) topology_st.standalone.start() assert in_index_file(topology_st, 0, 'cn') is False log.info(" entry %s is not in the cn index file after reindexed." % del_rdn) log.info('Case 1 - PASSED') def test_ticket48252_run_1(topology_st): """ Delete an entry cn=test_entry1 Check it is in the 'objectclass' index file as a tombstone entry """ log.info("Case 2 - Check deleted entry is in the 'objectclass' index file as a tombstone entry") uas = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX) del_rdn = "uid=%s1" % TEST_USER del_entry = uas.get('%s1' % TEST_USER) log.info(" Deleting a test entry %s..." % del_rdn) del_entry.delete() entry = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, '(&(objectclass=nstombstone)(%s))' % del_rdn) assert len(entry) == 1 log.info(" entry %s is in the objectclass index file." % del_rdn) log.info(" db2index - reindexing %s ..." % 'objectclass') topology_st.standalone.stop() assert topology_st.standalone.db2index(suffixes=[DEFAULT_SUFFIX], attrs=['objectclass']) topology_st.standalone.start() entry = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, '(&(objectclass=nstombstone)(%s))' % del_rdn) assert len(entry) == 1 log.info(" entry %s is in the objectclass index file after reindexed." % del_rdn) log.info('Case 2 - PASSED') if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/tickets/ticket48265_test.py000066400000000000000000000050331421664411400260100ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2016 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import pytest from lib389.tasks import * from lib389.utils import * from lib389.topologies import topology_st from lib389._constants import SUFFIX, DEFAULT_SUFFIX pytestmark = pytest.mark.tier2 logging.getLogger(__name__).setLevel(logging.DEBUG) log = logging.getLogger(__name__) USER_NUM = 20 TEST_USER = 'test_user' def test_ticket48265_test(topology_st): """ Complex filter issues Ticket 47521 type complex filter: (&(|(uid=tuser*)(cn=Test user*))(&(givenname=test*3))(mail=tuser@example.com)(&(description=*))) Ticket 48264 type complex filter: (&(&(|(l=EU)(l=AP)(l=NA))(|(c=SE)(c=DE)))(|(uid=*test*)(cn=*test*))(l=eu)) """ log.info("Adding %d test entries..." % USER_NUM) for id in range(USER_NUM): name = "%s%d" % (TEST_USER, id) mail = "%s@example.com" % name secretary = "cn=%s,ou=secretary,%s" % (name, SUFFIX) topology_st.standalone.add_s(Entry(("cn=%s,%s" % (name, SUFFIX), { 'objectclass': "top person organizationalPerson inetOrgPerson".split(), 'sn': name, 'cn': name, 'uid': name, 'givenname': 'test', 'mail': mail, 'description': 'description', 'secretary': secretary, 'l': 'MV', 'title': 'Engineer'}))) log.info("Search with Ticket 47521 type complex filter") for id in range(USER_NUM): name = "%s%d" % (TEST_USER, id) mail = "%s@example.com" % name filter47521 = '(&(|(uid=%s*)(cn=%s*))(&(givenname=test))(mail=%s)(&(description=*)))' % ( TEST_USER, TEST_USER, mail) entry = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, filter47521) assert len(entry) == 1 log.info("Search with Ticket 48265 type complex filter") for id in range(USER_NUM): name = "%s%d" % (TEST_USER, id) mail = "%s@example.com" % name filter48265 = '(&(&(|(l=AA)(l=BB)(l=MV))(|(title=admin)(title=engineer)))(|(uid=%s)(mail=%s))(description=description))' % ( name, mail) entry = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, filter48265) assert len(entry) == 1 log.info('Test 48265 complete\n') if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/tickets/ticket48266_test.py000066400000000000000000000246001421664411400260120ustar00rootroot00000000000000import pytest from lib389.tasks import * from lib389.utils import * from lib389.topologies import topology_m2 from lib389.replica import ReplicationManager from lib389._constants import SUFFIX, DEFAULT_SUFFIX, HOST_SUPPLIER_2, PORT_SUPPLIER_2 pytestmark = pytest.mark.tier2 logging.getLogger(__name__).setLevel(logging.DEBUG) log = logging.getLogger(__name__) NEW_ACCOUNT = "new_account" MAX_ACCOUNTS = 20 @pytest.fixture(scope="module") def entries(topology_m2): # add dummy entries in the staging DIT for cpt in range(MAX_ACCOUNTS): name = "%s%d" % (NEW_ACCOUNT, cpt) topology_m2.ms["supplier1"].add_s(Entry(("cn=%s,%s" % (name, SUFFIX), { 'objectclass': "top person".split(), 'sn': name, 'cn': name}))) topology_m2.ms["supplier1"].config.set('nsslapd-accesslog-logbuffering', 'off') topology_m2.ms["supplier1"].config.set('nsslapd-errorlog-level', '8192') # 256 + 4 topology_m2.ms["supplier1"].config.set('nsslapd-accesslog-level', '260') topology_m2.ms["supplier2"].config.set('nsslapd-accesslog-logbuffering', 'off') topology_m2.ms["supplier2"].config.set('nsslapd-errorlog-level', '8192') # 256 + 4 topology_m2.ms["supplier2"].config.set('nsslapd-accesslog-level', '260') def test_ticket48266_fractional(topology_m2, entries): ents = topology_m2.ms["supplier1"].agreement.list(suffix=SUFFIX) assert len(ents) == 1 mod = [(ldap.MOD_REPLACE, 'nsDS5ReplicatedAttributeList', [b'(objectclass=*) $ EXCLUDE telephonenumber']), (ldap.MOD_REPLACE, 'nsds5ReplicaStripAttrs', [b'modifiersname modifytimestamp'])] ents = topology_m2.ms["supplier1"].agreement.list(suffix=SUFFIX) assert len(ents) == 1 m1_m2_agmt = ents[0].dn topology_m2.ms["supplier1"].modify_s(ents[0].dn, mod) ents = topology_m2.ms["supplier2"].agreement.list(suffix=SUFFIX) assert len(ents) == 1 topology_m2.ms["supplier2"].modify_s(ents[0].dn, mod) topology_m2.ms["supplier1"].restart() topology_m2.ms["supplier2"].restart() repl = ReplicationManager(DEFAULT_SUFFIX) repl.ensure_agreement(topology_m2.ms["supplier1"], topology_m2.ms["supplier2"]) repl.test_replication(topology_m2.ms["supplier1"], topology_m2.ms["supplier2"]) def test_ticket48266_check_repl_desc(topology_m2, entries): name = "cn=%s1,%s" % (NEW_ACCOUNT, SUFFIX) value = 'check repl. description' mod = [(ldap.MOD_REPLACE, 'description', ensure_bytes(value))] topology_m2.ms["supplier1"].modify_s(name, mod) loop = 0 while loop <= 10: ent = topology_m2.ms["supplier2"].getEntry(name, ldap.SCOPE_BASE, "(objectclass=*)") if ent.hasAttr('description') and ent.getValue('description') == ensure_bytes(value): break time.sleep(1) loop += 1 assert loop <= 10 # will use this CSN as a starting point on error log # after this is one 'Skipped' then the first csn _get_first_not_replicated_csn # should no longer be Skipped in the error log def _get_last_not_replicated_csn(topology_m2): name = "cn=%s5,%s" % (NEW_ACCOUNT, SUFFIX) # read the first CSN that will not be replicated mod = [(ldap.MOD_REPLACE, 'telephonenumber', ensure_bytes('123456'))] topology_m2.ms["supplier1"].modify_s(name, mod) msgid = topology_m2.ms["supplier1"].search_ext(name, ldap.SCOPE_SUBTREE, 'objectclass=*', ['nscpentrywsi']) rtype, rdata, rmsgid = topology_m2.ms["supplier1"].result2(msgid) attrs = None for dn, raw_attrs in rdata: topology_m2.ms["supplier1"].log.info("dn: %s" % dn) if 'nscpentrywsi' in raw_attrs: attrs = raw_attrs['nscpentrywsi'] assert attrs for attr in attrs: if ensure_str(attr.lower()).startswith('telephonenumber'): break assert attr log.info("############# %s " % name) # now retrieve the CSN of the operation we are looking for csn = None found_ops = topology_m2.ms['supplier1'].ds_access_log.match(".*MOD dn=\"%s\".*" % name) assert(len(found_ops) > 0) found_op = topology_m2.ms['supplier1'].ds_access_log.parse_line(found_ops[-1]) log.info(found_op) # Now look for the related CSN found_csns = topology_m2.ms['supplier1'].ds_access_log.match(".*conn=%s op=%s RESULT.*" % (found_op['conn'], found_op['op'])) assert(len(found_csns) > 0) found_csn = topology_m2.ms['supplier1'].ds_access_log.parse_line(found_csns[-1]) log.info(found_csn) return found_csn['csn'] def _get_first_not_replicated_csn(topology_m2): name = "cn=%s2,%s" % (NEW_ACCOUNT, SUFFIX) # read the first CSN that will not be replicated mod = [(ldap.MOD_REPLACE, 'telephonenumber', ensure_bytes('123456'))] topology_m2.ms["supplier1"].modify_s(name, mod) msgid = topology_m2.ms["supplier1"].search_ext(name, ldap.SCOPE_SUBTREE, 'objectclass=*', ['nscpentrywsi']) rtype, rdata, rmsgid = topology_m2.ms["supplier1"].result2(msgid) attrs = None for dn, raw_attrs in rdata: topology_m2.ms["supplier1"].log.info("dn: %s" % dn) if 'nscpentrywsi' in raw_attrs: attrs = raw_attrs['nscpentrywsi'] assert attrs for attr in attrs: if ensure_str(attr.lower()).startswith('telephonenumber'): break assert attr log.info("############# %s " % name) # now retrieve the CSN of the operation we are looking for csn = None found_ops = topology_m2.ms['supplier1'].ds_access_log.match(".*MOD dn=\"%s\".*" % name) assert(len(found_ops) > 0) found_op = topology_m2.ms['supplier1'].ds_access_log.parse_line(found_ops[-1]) log.info(found_op) # Now look for the related CSN found_csns = topology_m2.ms['supplier1'].ds_access_log.match(".*conn=%s op=%s RESULT.*" % (found_op['conn'], found_op['op'])) assert(len(found_csns) > 0) found_csn = topology_m2.ms['supplier1'].ds_access_log.parse_line(found_csns[-1]) log.info(found_csn) return found_csn['csn'] def _count_full_session(topology_m2): # # compute the number of 'No more updates' # file_obj = open(topology_m2.ms["supplier1"].errlog, "r") # pattern to find pattern = ".*No more updates to send.*" regex = re.compile(pattern) no_more_updates = 0 # check initiation number of 'No more updates while True: line = file_obj.readline() found = regex.search(line) if (found): no_more_updates = no_more_updates + 1 if (line == ''): break file_obj.close() return no_more_updates def test_ticket48266_count_csn_evaluation(topology_m2, entries): ents = topology_m2.ms["supplier1"].agreement.list(suffix=SUFFIX) assert len(ents) == 1 first_csn = _get_first_not_replicated_csn(topology_m2) name = "cn=%s3,%s" % (NEW_ACCOUNT, SUFFIX) NB_SESSION = 102 no_more_update_cnt = _count_full_session(topology_m2) topology_m2.ms["supplier1"].agreement.pause(ents[0].dn) # now do a set of updates that will NOT be replicated for telNumber in range(NB_SESSION): mod = [(ldap.MOD_REPLACE, 'telephonenumber', ensure_bytes(str(telNumber)))] topology_m2.ms["supplier1"].modify_s(name, mod) topology_m2.ms["supplier1"].agreement.resume(ents[0].dn) # let's wait all replication session complete MAX_LOOP = 10 cnt = 0 current_no_more_update = _count_full_session(topology_m2) while (current_no_more_update == no_more_update_cnt): cnt = cnt + 1 if (cnt > MAX_LOOP): break time.sleep(5) current_no_more_update = _count_full_session(topology_m2) log.info('after %d MODs we have completed %d replication sessions' % ( NB_SESSION, (current_no_more_update - no_more_update_cnt))) no_more_update_cnt = current_no_more_update # At this point, with the fix a dummy update was made BUT may be not sent it # make sure it was sent so that the consumer CSN will be updated last_csn = _get_last_not_replicated_csn(topology_m2) # let's wait all replication session complete MAX_LOOP = 10 cnt = 0 current_no_more_update = _count_full_session(topology_m2) while (current_no_more_update == no_more_update_cnt): cnt = cnt + 1 if (cnt > MAX_LOOP): break time.sleep(5) current_no_more_update = _count_full_session(topology_m2) log.info('This MODs %s triggered the send of the dummy update completed %d replication sessions' % ( last_csn, (current_no_more_update - no_more_update_cnt))) no_more_update_cnt = current_no_more_update # so we should no longer see the first_csn in the log # Let's create a new csn (last_csn) and check there is no longer first_csn topology_m2.ms["supplier1"].agreement.pause(ents[0].dn) last_csn = _get_last_not_replicated_csn(topology_m2) topology_m2.ms["supplier1"].agreement.resume(ents[0].dn) # let's wait for the session to complete MAX_LOOP = 10 cnt = 0 while (current_no_more_update == no_more_update_cnt): cnt = cnt + 1 if (cnt > MAX_LOOP): break time.sleep(5) current_no_more_update = _count_full_session(topology_m2) log.info('This MODs %s completed in %d replication sessions, should be sent without evaluating %s' % ( last_csn, (current_no_more_update - no_more_update_cnt), first_csn)) no_more_update_cnt = current_no_more_update # Now determine how many times we have skipped 'csn' # no need to stop the server to check the error log file_obj = open(topology_m2.ms["supplier1"].errlog, "r") # find where the last_csn operation was processed pattern = ".*ruv_add_csn_inprogress: successfully inserted csn %s.*" % last_csn regex = re.compile(pattern) cnt = 0 while True: line = file_obj.readline() found = regex.search(line) if ((line == '') or (found)): break if (found): log.info('last operation was found at %d' % file_obj.tell()) log.info(line) log.info('Now check the we can not find the first csn %s in the log' % first_csn) pattern = ".*Skipping update operation.*CSN %s.*" % first_csn regex = re.compile(pattern) found = False while True: line = file_obj.readline() found = regex.search(line) if ((line == '') or (found)): break if (found): log.info('Unexpected found %s' % line) assert not found if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/tickets/ticket48270_test.py000066400000000000000000000113241421664411400260040ustar00rootroot00000000000000import pytest from lib389.tasks import * from lib389.utils import * from lib389.topologies import topology_st from lib389._constants import SUFFIX, DEFAULT_SUFFIX pytestmark = pytest.mark.tier2 logging.getLogger(__name__).setLevel(logging.DEBUG) log = logging.getLogger(__name__) NEW_ACCOUNT = "new_account" MAX_ACCOUNTS = 20 MIXED_VALUE = "/home/mYhOmEdIrEcToRy" LOWER_VALUE = "/home/myhomedirectory" HOMEDIRECTORY_INDEX = 'cn=homeDirectory,cn=index,cn=userRoot,cn=ldbm database,cn=plugins,cn=config' HOMEDIRECTORY_CN = "homedirectory" MATCHINGRULE = 'nsMatchingRule' UIDNUMBER_INDEX = 'cn=uidnumber,cn=index,cn=userRoot,cn=ldbm database,cn=plugins,cn=config' UIDNUMBER_CN = "uidnumber" def test_ticket48270_init(topology_st): log.info("Initialization: add dummy entries for the tests") for cpt in range(MAX_ACCOUNTS): name = "%s%d" % (NEW_ACCOUNT, cpt) topology_st.standalone.add_s(Entry(("uid=%s,%s" % (name, SUFFIX), { 'objectclass': "top posixAccount".split(), 'uid': name, 'cn': name, 'uidnumber': str(111), 'gidnumber': str(222), 'homedirectory': "/home/tbordaz_%d" % cpt}))) def test_ticket48270_homeDirectory_indexed_cis(topology_st): log.info("\n\nindex homeDirectory in caseIgnoreIA5Match and caseExactIA5Match") try: ent = topology_st.standalone.getEntry(HOMEDIRECTORY_INDEX, ldap.SCOPE_BASE) except ldap.NO_SUCH_OBJECT: topology_st.standalone.add_s(Entry((HOMEDIRECTORY_INDEX, { 'objectclass': "top nsIndex".split(), 'cn': HOMEDIRECTORY_CN, 'nsSystemIndex': 'false', 'nsIndexType': 'eq'}))) # log.info("attach debugger") # time.sleep(60) IGNORE_MR_NAME = b'caseIgnoreIA5Match' EXACT_MR_NAME = b'caseExactIA5Match' mod = [(ldap.MOD_REPLACE, MATCHINGRULE, (IGNORE_MR_NAME, EXACT_MR_NAME))] topology_st.standalone.modify_s(HOMEDIRECTORY_INDEX, mod) # topology_st.standalone.stop(timeout=10) log.info("successfully checked that filter with exact mr , a filter with lowercase eq is failing") # assert topology_st.standalone.db2index(bename=DEFAULT_BENAME, suffixes=None, attrs=['homeDirectory']) # topology_st.standalone.start(timeout=10) args = {TASK_WAIT: True} topology_st.standalone.tasks.reindex(suffix=SUFFIX, attrname='homeDirectory', args=args) log.info("Check indexing succeeded with a specified matching rule") file_obj = open(topology_st.standalone.errlog, "r") # Check if the MR configuration failure occurs regex = re.compile("unknown or invalid matching rule") while True: line = file_obj.readline() found = regex.search(line) if ((line == '') or (found)): break if (found): log.info("The configuration of a specific MR fails") log.info(line) # assert not found def test_ticket48270_homeDirectory_mixed_value(topology_st): # Set a homedirectory value with mixed case name = "uid=%s1,%s" % (NEW_ACCOUNT, SUFFIX) mod = [(ldap.MOD_REPLACE, 'homeDirectory', ensure_bytes(MIXED_VALUE))] topology_st.standalone.modify_s(name, mod) def test_ticket48270_extensible_search(topology_st): name = "uid=%s1,%s" % (NEW_ACCOUNT, SUFFIX) # check with the exact stored value log.info("Default: can retrieve an entry filter syntax with exact stored value") ent = topology_st.standalone.getEntry(name, ldap.SCOPE_BASE, "(homeDirectory=%s)" % MIXED_VALUE) log.info("Default: can retrieve an entry filter caseExactIA5Match with exact stored value") ent = topology_st.standalone.getEntry(name, ldap.SCOPE_BASE, "(homeDirectory:caseExactIA5Match:=%s)" % MIXED_VALUE) # check with a lower case value that is different from the stored value log.info("Default: can not retrieve an entry filter syntax match with lowered stored value") try: ent = topology_st.standalone.getEntry(name, ldap.SCOPE_BASE, "(homeDirectory=%s)" % LOWER_VALUE) assert ent is None except ldap.NO_SUCH_OBJECT: pass log.info("Default: can not retrieve an entry filter caseExactIA5Match with lowered stored value") try: ent = topology_st.standalone.getEntry(name, ldap.SCOPE_BASE, "(homeDirectory:caseExactIA5Match:=%s)" % LOWER_VALUE) assert ent is None except ldap.NO_SUCH_OBJECT: pass log.info("Default: can retrieve an entry filter caseIgnoreIA5Match with lowered stored value") ent = topology_st.standalone.getEntry(name, ldap.SCOPE_BASE, "(homeDirectory:caseIgnoreIA5Match:=%s)" % LOWER_VALUE) if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/tickets/ticket48272_test.py000066400000000000000000000100211421664411400257770ustar00rootroot00000000000000import pytest from lib389.tasks import * from lib389.utils import * from lib389.topologies import topology_st from lib389._constants import DEFAULT_SUFFIX, HOST_STANDALONE, PORT_STANDALONE DEBUGGING = os.getenv('DEBUGGING', False) if DEBUGGING: logging.getLogger(__name__).setLevel(logging.DEBUG) else: logging.getLogger(__name__).setLevel(logging.INFO) log = logging.getLogger(__name__) # Skip on older versions pytestmark = [pytest.mark.tier2, pytest.mark.skipif(ds_is_older('1.3.6'), reason="Not implemented")] USER1 = 'user1' USER1_DOMAIN = 'user1@example.com' PW = 'password' USER1_DN = 'uid=user1,ou=People,%s' % DEFAULT_SUFFIX USER1_CONFLICT_DN = 'uid=user1,%s' % DEFAULT_SUFFIX def _create_user(inst, name, dn): inst.add_s(Entry(( dn, { 'objectClass': 'top account simplesecurityobject'.split(), 'uid': name, 'userpassword': PW }))) def _bind(name, cred): # Returns true or false if it worked. if DEBUGGING: print('test 48272 BINDING AS %s:%s' % (name, cred)) status = True conn = ldap.initialize("ldap://%s:%s" % (HOST_STANDALONE, PORT_STANDALONE)) try: conn.simple_bind_s(name, cred) conn.unbind_s() except ldap.INVALID_CREDENTIALS: status = False return status def test_ticket48272(topology_st): """ Test the functionality of the addn bind plugin. This should allow users of the type "name" or "name@domain.com" to bind. """ # There will be a better way to do this in the future. topology_st.standalone.add_s(Entry(( "cn=addn,cn=plugins,cn=config", { "objectClass": "top nsSlapdPlugin extensibleObject".split(), "cn": "addn", "nsslapd-pluginPath": "libaddn-plugin", "nsslapd-pluginInitfunc": "addn_init", "nsslapd-pluginType": "preoperation", "nsslapd-pluginEnabled": "on", "nsslapd-pluginId": "addn", "nsslapd-pluginVendor": "389 Project", "nsslapd-pluginVersion": "1.3.6.0", "nsslapd-pluginDescription": "Allow AD DN style bind names to LDAP", "addn_default_domain": "example.com", } ))) topology_st.standalone.add_s(Entry(( "cn=example.com,cn=addn,cn=plugins,cn=config", { "objectClass": "top extensibleObject".split(), "cn": "example.com", "addn_base": "ou=People,%s" % DEFAULT_SUFFIX, "addn_filter": "(&(objectClass=account)(uid=%s))", } ))) topology_st.standalone.restart(60) # Add a user _create_user(topology_st.standalone, USER1, USER1_DN) if DEBUGGING is not False: print("Attach now") time.sleep(20) # Make sure our binds still work. assert (_bind(USER1_DN, PW)) # Test an anonymous bind for i in range(0, 10): # Test bind as name assert (_bind(USER1, PW)) # Make sure that name@fakedom fails assert (_bind(USER1_DOMAIN, PW)) # Add a conflicting user to an alternate subtree _create_user(topology_st.standalone, USER1, USER1_CONFLICT_DN) # Change the plugin to search from the rootdn instead # This means we have a conflicting user in scope now! topology_st.standalone.modify_s("cn=example.com,cn=addn,cn=plugins,cn=config", [(ldap.MOD_REPLACE, 'addn_base', ensure_bytes(DEFAULT_SUFFIX))]) topology_st.standalone.restart(60) # Make sure our binds still work. assert (_bind(USER1_DN, PW)) assert (_bind(USER1_CONFLICT_DN, PW)) for i in range(0, 10): # Test bind as name fails try: _bind(USER1, PW) assert (False) except: pass # Test bind as name@domain fails too try: _bind(USER1_DOMAIN, PW) assert (False) except: pass log.info('Test PASSED') if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/tickets/ticket48294_test.py000066400000000000000000000202301421664411400260060ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2016 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import logging import time import ldap import pytest from lib389 import Entry from lib389._constants import * from lib389.topologies import topology_st from lib389.utils import * pytestmark = pytest.mark.tier2 log = logging.getLogger(__name__) LINKEDATTR_PLUGIN = 'cn=Linked Attributes,cn=plugins,cn=config' MANAGER_LINK = 'cn=Manager Link,' + LINKEDATTR_PLUGIN OU_PEOPLE = 'ou=People,' + DEFAULT_SUFFIX LINKTYPE = 'directReport' MANAGEDTYPE = 'manager' def _header(topology_st, label): topology_st.standalone.log.info("###############################################") topology_st.standalone.log.info("####### %s" % label) topology_st.standalone.log.info("###############################################") def check_attr_val(topology_st, dn, attr, expected): try: centry = topology_st.standalone.search_s(dn, ldap.SCOPE_BASE, 'uid=*') if centry: val = centry[0].getValue(attr) if val.lower() == expected.lower(): log.info('Value of %s is %s' % (attr, expected)) else: log.info('Value of %s is not %s, but %s' % (attr, expected, val)) assert False else: log.fatal('Failed to get %s' % dn) assert False except ldap.LDAPError as e: log.fatal('Failed to search ' + dn + ': ' + e.args[0]['desc']) assert False def _modrdn_entry(topology_st=None, entry_dn=None, new_rdn=None, del_old=0, new_superior=None): assert topology_st is not None assert entry_dn is not None assert new_rdn is not None topology_st.standalone.log.info("\n\n######################### MODRDN %s ######################\n" % new_rdn) try: if new_superior: topology_st.standalone.rename_s(entry_dn, new_rdn, newsuperior=new_superior, delold=del_old) else: topology_st.standalone.rename_s(entry_dn, new_rdn, delold=del_old) except ldap.NO_SUCH_ATTRIBUTE: topology_st.standalone.log.info("accepted failure due to 47833: modrdn reports error.. but succeeds") attempt = 0 if new_superior: dn = "%s,%s" % (new_rdn, new_superior) base = new_superior else: base = ','.join(entry_dn.split(",")[1:]) dn = "%s, %s" % (new_rdn, base) myfilter = entry_dn.split(',')[0] while attempt < 10: try: ent = topology_st.standalone.getEntry(dn, ldap.SCOPE_BASE, myfilter) break except ldap.NO_SUCH_OBJECT: topology_st.standalone.log.info("Accept failure due to 47833: unable to find (base) a modrdn entry") attempt += 1 time.sleep(1) if attempt == 10: ent = topology_st.standalone.getEntry(base, ldap.SCOPE_SUBTREE, myfilter) ent = topology_st.standalone.getEntry(dn, ldap.SCOPE_BASE, myfilter) def test_48294_init(topology_st): """ Set up Linked Attribute """ _header(topology_st, 'Testing Ticket 48294 - Linked Attributes plug-in - won\'t update links after MODRDN operation') log.info('Enable Dynamic plugins, and the linked Attrs plugin') try: topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-dynamic-plugins', b'on')]) except ldap.LDAPError as e: log.fatal('Failed to enable dynamic plugin!' + e.args[0]['desc']) assert False try: topology_st.standalone.plugins.enable(name=PLUGIN_LINKED_ATTRS) except ValueError as e: log.fatal('Failed to enable linked attributes plugin!' + e.args[0]['desc']) assert False log.info('Add the plugin config entry') try: topology_st.standalone.add_s(Entry((MANAGER_LINK, { 'objectclass': 'top extensibleObject'.split(), 'cn': 'Manager Link', 'linkType': LINKTYPE, 'managedType': MANAGEDTYPE }))) except ldap.LDAPError as e: log.fatal('Failed to add linked attr config entry: error ' + e.args[0]['desc']) assert False log.info('Add 2 entries: manager1 and employee1') try: topology_st.standalone.add_s(Entry(('uid=manager1,%s' % OU_PEOPLE, { 'objectclass': 'top extensibleObject'.split(), 'uid': 'manager1'}))) except ldap.LDAPError as e: log.fatal('Add manager1 failed: error ' + e.args[0]['desc']) assert False try: topology_st.standalone.add_s(Entry(('uid=employee1,%s' % OU_PEOPLE, { 'objectclass': 'top extensibleObject'.split(), 'uid': 'employee1'}))) except ldap.LDAPError as e: log.fatal('Add employee1 failed: error ' + e.args[0]['desc']) assert False log.info('Add linktype to manager1') topology_st.standalone.modify_s('uid=manager1,%s' % OU_PEOPLE, [(ldap.MOD_ADD, LINKTYPE, ensure_bytes('uid=employee1,%s' % OU_PEOPLE))]) log.info('Check managed attribute') check_attr_val(topology_st, 'uid=employee1,%s' % OU_PEOPLE, MANAGEDTYPE, ensure_bytes('uid=manager1,%s' % OU_PEOPLE)) log.info('PASSED') def test_48294_run_0(topology_st): """ Rename employee1 to employee2 and adjust the value of directReport by replace """ _header(topology_st, 'Case 0 - Rename employee1 and adjust the link type value by replace') log.info('Rename employee1 to employee2') _modrdn_entry(topology_st, entry_dn='uid=employee1,%s' % OU_PEOPLE, new_rdn='uid=employee2') log.info('Modify the value of directReport to uid=employee2') try: topology_st.standalone.modify_s('uid=manager1,%s' % OU_PEOPLE, [(ldap.MOD_REPLACE, LINKTYPE, ensure_bytes('uid=employee2,%s' % OU_PEOPLE))]) except ldap.LDAPError as e: log.fatal('Failed to replace uid=employee1 with employee2: ' + e.args[0]['desc']) assert False log.info('Check managed attribute') check_attr_val(topology_st, 'uid=employee2,%s' % OU_PEOPLE, MANAGEDTYPE, ensure_bytes('uid=manager1,%s' % OU_PEOPLE)) log.info('PASSED') def test_48294_run_1(topology_st): """ Rename employee2 to employee3 and adjust the value of directReport by delete and add """ _header(topology_st, 'Case 1 - Rename employee2 and adjust the link type value by delete and add') log.info('Rename employee2 to employee3') _modrdn_entry(topology_st, entry_dn='uid=employee2,%s' % OU_PEOPLE, new_rdn='uid=employee3') log.info('Modify the value of directReport to uid=employee3') try: topology_st.standalone.modify_s('uid=manager1,%s' % OU_PEOPLE, [(ldap.MOD_DELETE, LINKTYPE, ensure_bytes('uid=employee2,%s' % OU_PEOPLE))]) except ldap.LDAPError as e: log.fatal('Failed to delete employee2: ' + e.args[0]['desc']) assert False try: topology_st.standalone.modify_s('uid=manager1,%s' % OU_PEOPLE, [(ldap.MOD_ADD, LINKTYPE, ensure_bytes('uid=employee3,%s' % OU_PEOPLE))]) except ldap.LDAPError as e: log.fatal('Failed to add employee3: ' + e.args[0]['desc']) assert False log.info('Check managed attribute') check_attr_val(topology_st, 'uid=employee3,%s' % OU_PEOPLE, MANAGEDTYPE, ensure_bytes('uid=manager1,%s' % OU_PEOPLE)) log.info('PASSED') def test_48294_run_2(topology_st): """ Rename manager1 to manager2 and make sure the managed attribute value is updated """ _header(topology_st, 'Case 2 - Rename manager1 to manager2 and make sure the managed attribute value is updated') log.info('Rename manager1 to manager2') _modrdn_entry(topology_st, entry_dn='uid=manager1,%s' % OU_PEOPLE, new_rdn='uid=manager2') log.info('Check managed attribute') check_attr_val(topology_st, 'uid=employee3,%s' % OU_PEOPLE, MANAGEDTYPE, ensure_bytes('uid=manager2,%s' % OU_PEOPLE)) log.info('PASSED') if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/tickets/ticket48295_test.py000066400000000000000000000121011421664411400260050ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2016 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import logging import ldap import pytest from lib389 import Entry from lib389._constants import * from lib389.topologies import topology_st from lib389.utils import * pytestmark = pytest.mark.tier2 log = logging.getLogger(__name__) LINKEDATTR_PLUGIN = 'cn=Linked Attributes,cn=plugins,cn=config' MANAGER_LINK = 'cn=Manager Link,' + LINKEDATTR_PLUGIN OU_PEOPLE = 'ou=People,' + DEFAULT_SUFFIX LINKTYPE = 'directReport' MANAGEDTYPE = 'manager' def _header(topology_st, label): topology_st.standalone.log.info("###############################################") topology_st.standalone.log.info("####### %s" % label) topology_st.standalone.log.info("###############################################") def check_attr_val(topology_st, dn, attr, expected, revert): try: centry = topology_st.standalone.search_s(dn, ldap.SCOPE_BASE, 'uid=*') if centry: val = centry[0].getValue(attr) if val: if val.lower() == expected.lower(): if revert: log.info('Value of %s %s exists, which should not.' % (attr, expected)) assert False else: log.info('Value of %s is %s' % (attr, expected)) else: if revert: log.info('NEEDINFO: Value of %s is not %s, but %s' % (attr, expected, val)) else: log.info('Value of %s is not %s, but %s' % (attr, expected, val)) assert False else: if revert: log.info('Value of %s does not expectedly exist' % attr) else: log.info('Value of %s does not exist' % attr) assert False else: log.fatal('Failed to get %s' % dn) assert False except ldap.LDAPError as e: log.fatal('Failed to search ' + dn + ': ' + e.args[0]['desc']) assert False def test_48295_init(topology_st): """ Set up Linked Attribute """ _header(topology_st, 'Testing Ticket 48295 - Entry cache is not rolled back -- Linked Attributes plug-in - wrong behaviour when adding valid and broken links') log.info('Enable Dynamic plugins, and the linked Attrs plugin') try: topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-dynamic-plugins', b'on')]) except ldap.LDAPError as e: log.fatal('Failed to enable dynamic plugin!' + e.args[0]['desc']) assert False try: topology_st.standalone.plugins.enable(name=PLUGIN_LINKED_ATTRS) except ValueError as e: log.fatal('Failed to enable linked attributes plugin!' + e.args[0]['desc']) assert False log.info('Add the plugin config entry') try: topology_st.standalone.add_s(Entry((MANAGER_LINK, { 'objectclass': 'top extensibleObject'.split(), 'cn': 'Manager Link', 'linkType': LINKTYPE, 'managedType': MANAGEDTYPE }))) except ldap.LDAPError as e: log.fatal('Failed to add linked attr config entry: error ' + e.args[0]['desc']) assert False log.info('Add 2 entries: manager1 and employee1') try: topology_st.standalone.add_s(Entry(('uid=manager1,%s' % OU_PEOPLE, { 'objectclass': 'top extensibleObject'.split(), 'uid': 'manager1'}))) except ldap.LDAPError as e: log.fatal('Add manager1 failed: error ' + e.args[0]['desc']) assert False try: topology_st.standalone.add_s(Entry(('uid=employee1,%s' % OU_PEOPLE, { 'objectclass': 'top extensibleObject'.split(), 'uid': 'employee1'}))) except ldap.LDAPError as e: log.fatal('Add employee1 failed: error ' + e.args[0]['desc']) assert False log.info('PASSED') def test_48295_run(topology_st): """ Add 2 linktypes - one exists, another does not """ _header(topology_st, 'Add 2 linktypes to manager1 - one exists, another does not to make sure the managed entry does not have managed type.') try: topology_st.standalone.modify_s('uid=manager1,%s' % OU_PEOPLE, [(ldap.MOD_ADD, LINKTYPE, ensure_bytes('uid=employee1,%s' % OU_PEOPLE)), (ldap.MOD_ADD, LINKTYPE, ensure_bytes('uid=doNotExist,%s' % OU_PEOPLE))]) except ldap.UNWILLING_TO_PERFORM: log.info('Add uid=employee1 and uid=doNotExist expectedly failed.') pass log.info('Check managed attribute does not exist.') check_attr_val(topology_st, 'uid=employee1,%s' % OU_PEOPLE, MANAGEDTYPE, ensure_bytes('uid=manager1,%s' % OU_PEOPLE), True) log.info('PASSED') if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/tickets/ticket48312_test.py000066400000000000000000000076741421664411400260160ustar00rootroot00000000000000import pytest from lib389.tasks import * from lib389.utils import * from lib389.topologies import topology_st from lib389._constants import DEFAULT_SUFFIX, PLUGIN_MANAGED_ENTRY, DN_CONFIG pytestmark = pytest.mark.tier2 logging.getLogger(__name__).setLevel(logging.DEBUG) log = logging.getLogger(__name__) def test_ticket48312(topology_st): """ Configure managed entries plugins(tempalte/definition), then perform a modrdn(deleteoldrdn 1), and make sure the server does not crash. """ GROUP_OU = 'ou=groups,' + DEFAULT_SUFFIX PEOPLE_OU = 'ou=people,' + DEFAULT_SUFFIX USER_DN = 'uid=user1,ou=people,' + DEFAULT_SUFFIX CONFIG_DN = 'cn=config,cn=' + PLUGIN_MANAGED_ENTRY + ',cn=plugins,cn=config' TEMPLATE_DN = 'cn=MEP Template,' + DEFAULT_SUFFIX USER_NEWRDN = 'uid=\+user1' # # First enable dynamic plugins # try: topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-dynamic-plugins', b'on')]) except ldap.LDAPError as e: log.fatal('Failed to enable dynamic plugin!' + e.args[0]['desc']) assert False topology_st.standalone.plugins.enable(name=PLUGIN_MANAGED_ENTRY) # # Add our org units (they should already exist, but do it just in case) # try: topology_st.standalone.add_s(Entry((PEOPLE_OU, { 'objectclass': 'top extensibleObject'.split(), 'ou': 'people'}))) except ldap.ALREADY_EXISTS: pass except ldap.LDAPError as e: log.fatal('test_mep: Failed to add people org unit: error ' + e.args[0]['desc']) assert False try: topology_st.standalone.add_s(Entry((GROUP_OU, { 'objectclass': 'top extensibleObject'.split(), 'ou': 'people'}))) except ldap.ALREADY_EXISTS: pass except ldap.LDAPError as e: log.fatal('test_mep: Failed to add people org unit: error ' + e.args[0]['desc']) assert False # # Add the template entry # try: topology_st.standalone.add_s(Entry((TEMPLATE_DN, { 'objectclass': 'top mepTemplateEntry extensibleObject'.split(), 'cn': 'MEP Template', 'mepRDNAttr': 'cn', 'mepStaticAttr': ['objectclass: posixGroup', 'objectclass: extensibleObject'], 'mepMappedAttr': ['cn: $uid', 'uid: $cn', 'gidNumber: $uidNumber'] }))) except ldap.LDAPError as e: log.fatal('test_mep: Failed to add template entry: error ' + e.args[0]['desc']) assert False # # Add the definition entry # try: topology_st.standalone.add_s(Entry((CONFIG_DN, { 'objectclass': 'top extensibleObject'.split(), 'cn': 'config', 'originScope': PEOPLE_OU, 'originFilter': 'objectclass=posixAccount', 'managedBase': GROUP_OU, 'managedTemplate': TEMPLATE_DN }))) except ldap.LDAPError as e: log.fatal('test_mep: Failed to add config entry: error ' + e.args[0]['desc']) assert False # # Add an entry that meets the MEP scope # try: topology_st.standalone.add_s(Entry((USER_DN, { 'objectclass': 'top posixAccount extensibleObject'.split(), 'uid': 'user1', 'cn': 'user1', 'uidNumber': '1', 'gidNumber': '1', 'homeDirectory': '/home/user1', 'description': 'uiser description' }))) except ldap.LDAPError as e: log.fatal('test_mep: Failed to user1: error ' + e.args[0]['desc']) assert False # # Perform a modrdn on USER_DN # try: topology_st.standalone.rename_s(USER_DN, USER_NEWRDN, delold=1) except ldap.LDAPError as e: log.error('Failed to modrdn: error ' + e.args[0]['desc']) assert False log.info('Test complete') if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/tickets/ticket48325_test.py000066400000000000000000000121201421664411400260000ustar00rootroot00000000000000import pytest from lib389.utils import * from lib389.tasks import * from lib389.topologies import topology_m1h1c1 from lib389.replica import ReplicationManager from lib389._constants import (DEFAULT_SUFFIX, REPLICA_RUV_FILTER, defaultProperties, REPLICATION_BIND_DN, REPLICATION_BIND_PW, REPLICATION_BIND_METHOD, REPLICATION_TRANSPORT, RA_NAME, RA_BINDDN, RA_BINDPW, RA_METHOD, RA_TRANSPORT_PROT, SUFFIX) pytestmark = pytest.mark.tier2 logging.getLogger(__name__).setLevel(logging.DEBUG) log = logging.getLogger(__name__) def checkFirstElement(ds, rid): """ Return True if the first RUV element is for the specified rid """ try: entry = ds.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, REPLICA_RUV_FILTER, ['nsds50ruv']) assert entry entry = entry[0] except ldap.LDAPError as e: log.fatal('Failed to retrieve RUV entry: %s' % str(e)) assert False ruv_elements = entry.getValues('nsds50ruv') if ('replica %s ' % rid) in ensure_str(ruv_elements[1]): return True else: return False def test_ticket48325(topology_m1h1c1): """ Test that the RUV element order is correctly maintained when promoting a hub or consumer. """ # # Promote consumer to supplier # C1 = topology_m1h1c1.cs["consumer1"] M1 = topology_m1h1c1.ms["supplier1"] H1 = topology_m1h1c1.hs["hub1"] repl = ReplicationManager(DEFAULT_SUFFIX) repl._ensure_changelog(C1) DN = topology_m1h1c1.cs["consumer1"].replica._get_mt_entry(DEFAULT_SUFFIX) topology_m1h1c1.cs["consumer1"].modify_s(DN, [(ldap.MOD_REPLACE, 'nsDS5ReplicaType', b'3'), (ldap.MOD_REPLACE, 'nsDS5ReplicaID', b'1234'), (ldap.MOD_REPLACE, 'nsDS5Flags', b'1')]) time.sleep(1) # # Check ruv has been reordered # if not checkFirstElement(topology_m1h1c1.cs["consumer1"], '1234'): log.fatal('RUV was not reordered') assert False topology_m1h1c1.ms["supplier1"].add_s(Entry((defaultProperties[REPLICATION_BIND_DN], {'objectclass': 'top netscapeServer'.split(), 'cn': 'replication manager', 'userPassword': 'password'}))) DN = topology_m1h1c1.ms["supplier1"].replica._get_mt_entry(DEFAULT_SUFFIX) topology_m1h1c1.ms["supplier1"].modify_s(DN, [(ldap.MOD_REPLACE, 'nsDS5ReplicaBindDN', ensure_bytes(defaultProperties[REPLICATION_BIND_DN]))]) # # Create repl agreement from the newly promoted supplier to supplier1 properties = {RA_NAME: 'meTo_{}:{}'.format(topology_m1h1c1.ms["supplier1"].host, str(topology_m1h1c1.ms["supplier1"].port)), RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} new_agmt = topology_m1h1c1.cs["consumer1"].agreement.create(suffix=SUFFIX, host=topology_m1h1c1.ms["supplier1"].host, port=topology_m1h1c1.ms["supplier1"].port, properties=properties) if not new_agmt: log.fatal("Fail to create new agmt from old consumer to the supplier") assert False # Test replication is working repl.test_replication(C1, M1) # # Promote hub to supplier # DN = topology_m1h1c1.hs["hub1"].replica._get_mt_entry(DEFAULT_SUFFIX) topology_m1h1c1.hs["hub1"].modify_s(DN, [(ldap.MOD_REPLACE, 'nsDS5ReplicaType', b'3'), (ldap.MOD_REPLACE, 'nsDS5ReplicaID', b'5678')]) time.sleep(1) # # Check ruv has been reordered # if not checkFirstElement(topology_m1h1c1.hs["hub1"], '5678'): log.fatal('RUV was not reordered') assert False # Test replication is working repl.test_replication(M1, H1) # Done log.info('Test complete') if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/tickets/ticket48342_test.py000066400000000000000000000116431421664411400260100ustar00rootroot00000000000000import pytest from lib389.tasks import * from lib389.utils import * from lib389.topologies import topology_m3 from lib389._constants import SUFFIX, DEFAULT_SUFFIX, PLUGIN_DNA pytestmark = pytest.mark.tier2 logging.getLogger(__name__).setLevel(logging.DEBUG) log = logging.getLogger(__name__) PEOPLE_OU = 'people' PEOPLE_DN = "ou=%s,%s" % (PEOPLE_OU, SUFFIX) MAX_ACCOUNTS = 5 def _dna_config(server, nextValue=500, maxValue=510): log.info("Add dna plugin config entry...%s" % server) try: server.add_s(Entry(('cn=dna config,cn=Distributed Numeric Assignment Plugin,cn=plugins,cn=config', { 'objectclass': 'top dnaPluginConfig'.split(), 'dnaType': 'description', 'dnaMagicRegen': '-1', 'dnaFilter': '(objectclass=posixAccount)', 'dnaScope': 'ou=people,%s' % SUFFIX, 'dnaNextValue': str(nextValue), 'dnaMaxValue': str(nextValue + maxValue), 'dnaSharedCfgDN': 'ou=ranges,%s' % SUFFIX }))) except ldap.LDAPError as e: log.error('Failed to add DNA config entry: error ' + e.args[0]['desc']) assert False log.info("Enable the DNA plugin...") try: server.plugins.enable(name=PLUGIN_DNA) except e: log.error("Failed to enable DNA Plugin: error " + e.args[0]['desc']) assert False log.info("Restarting the server...") server.stop(timeout=120) time.sleep(1) server.start(timeout=120) time.sleep(3) def test_ticket4026(topology_m3): """Write your replication testcase here. To access each DirSrv instance use: topology_m3.ms["supplier1"], topology_m3.ms["supplier2"], ..., topology_m3.hub1, ..., topology_m3.consumer1, ... Also, if you need any testcase initialization, please, write additional fixture for that(include finalizer). """ try: topology_m3.ms["supplier1"].add_s(Entry((PEOPLE_DN, { 'objectclass': "top extensibleObject".split(), 'ou': 'people'}))) except ldap.ALREADY_EXISTS: pass topology_m3.ms["supplier1"].add_s(Entry(('ou=ranges,' + SUFFIX, { 'objectclass': 'top organizationalunit'.split(), 'ou': 'ranges' }))) for cpt in range(MAX_ACCOUNTS): name = "user%d" % (cpt) topology_m3.ms["supplier1"].add_s(Entry(("uid=%s,%s" % (name, PEOPLE_DN), { 'objectclass': 'top posixAccount extensibleObject'.split(), 'uid': name, 'cn': name, 'uidNumber': '1', 'gidNumber': '1', 'homeDirectory': '/home/%s' % name }))) # make supplier3 having more free slots that supplier2 # so supplier1 will contact supplier3 _dna_config(topology_m3.ms["supplier1"], nextValue=100, maxValue=10) _dna_config(topology_m3.ms["supplier2"], nextValue=200, maxValue=10) _dna_config(topology_m3.ms["supplier3"], nextValue=300, maxValue=3000) # Turn on lots of error logging now. mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', b'16384')] # mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '1')] topology_m3.ms["supplier1"].modify_s('cn=config', mod) topology_m3.ms["supplier2"].modify_s('cn=config', mod) topology_m3.ms["supplier3"].modify_s('cn=config', mod) # We need to wait for the event in dna.c to fire to start the servers # see dna.c line 899 time.sleep(60) # add on supplier1 users with description DNA for cpt in range(10): name = "user_with_desc1_%d" % (cpt) topology_m3.ms["supplier1"].add_s(Entry(("uid=%s,%s" % (name, PEOPLE_DN), { 'objectclass': 'top posixAccount extensibleObject'.split(), 'uid': name, 'cn': name, 'description': '-1', 'uidNumber': '1', 'gidNumber': '1', 'homeDirectory': '/home/%s' % name }))) # give time to negociate supplier1 <--> supplier3 time.sleep(10) # add on supplier1 users with description DNA for cpt in range(11, 20): name = "user_with_desc1_%d" % (cpt) topology_m3.ms["supplier1"].add_s(Entry(("uid=%s,%s" % (name, PEOPLE_DN), { 'objectclass': 'top posixAccount extensibleObject'.split(), 'uid': name, 'cn': name, 'description': '-1', 'uidNumber': '1', 'gidNumber': '1', 'homeDirectory': '/home/%s' % name }))) log.info('Test complete') # add on supplier1 users with description DNA mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', b'16384')] # mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '1')] topology_m3.ms["supplier1"].modify_s('cn=config', mod) topology_m3.ms["supplier2"].modify_s('cn=config', mod) topology_m3.ms["supplier3"].modify_s('cn=config', mod) log.info('Test complete') if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/tickets/ticket48354_test.py000066400000000000000000000025441421664411400260130ustar00rootroot00000000000000import pytest from lib389.utils import * from lib389.topologies import topology_st from lib389._constants import DEFAULT_SUFFIX, HOST_STANDALONE, PORT_STANDALONE pytestmark = pytest.mark.tier2 DEBUGGING = os.getenv('DEBUGGING', False) if DEBUGGING: logging.getLogger(__name__).setLevel(logging.DEBUG) else: logging.getLogger(__name__).setLevel(logging.INFO) log = logging.getLogger(__name__) def _attr_present(conn, name): results = conn.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(%s=*)' % name, [name, ]) if DEBUGGING: print(results) if len(results) > 0: return True return False def test_ticket48354(topology_st): """ Test that we cannot view ACIs, userPassword, or certain other attributes as anonymous. """ if DEBUGGING: # Add debugging steps(if any)... pass # Do an anonymous bind conn = ldap.initialize("ldap://%s:%s" % (HOST_STANDALONE, PORT_STANDALONE)) conn.simple_bind_s() # Make sure that we cannot see: # * userPassword assert (not _attr_present(conn, 'userPassword')) # * aci assert (not _attr_present(conn, 'aci')) # * anything else? conn.unbind_s() log.info('Test PASSED') if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/tickets/ticket48362_test.py000066400000000000000000000155401421664411400260120ustar00rootroot00000000000000import pytest from lib389.tasks import * from lib389.utils import * from lib389.topologies import topology_m2 from lib389._constants import SUFFIX, DEFAULT_SUFFIX, PLUGIN_DNA # Skip on older versions pytestmark = [pytest.mark.tier2, pytest.mark.skipif(ds_is_older('1.3.4'), reason="Not implemented")] logging.getLogger(__name__).setLevel(logging.DEBUG) log = logging.getLogger(__name__) PEOPLE_OU = 'people' PEOPLE_DN = "ou=%s,%s" % (PEOPLE_OU, SUFFIX) MAX_ACCOUNTS = 5 BINDMETHOD_ATTR = 'dnaRemoteBindMethod' BINDMETHOD_VALUE = b'SASL/GSSAPI' PROTOCOLE_ATTR = 'dnaRemoteConnProtocol' PROTOCOLE_VALUE = b'LDAP' SHARE_CFG_BASE = 'ou=ranges,' + SUFFIX def _dna_config(server, nextValue=500, maxValue=510): log.info("Add dna plugin config entry...%s" % server) cfg_base_dn = 'cn=dna config,cn=Distributed Numeric Assignment Plugin,cn=plugins,cn=config' try: server.add_s(Entry((cfg_base_dn, { 'objectclass': 'top dnaPluginConfig'.split(), 'dnaType': 'description', 'dnaMagicRegen': '-1', 'dnaFilter': '(objectclass=posixAccount)', 'dnaScope': 'ou=people,%s' % SUFFIX, 'dnaNextValue': str(nextValue), 'dnaMaxValue': str(nextValue + maxValue), 'dnaSharedCfgDN': 'ou=ranges,%s' % SUFFIX }))) except ldap.LDAPError as e: log.error('Failed to add DNA config entry: error ' + e.message['desc']) assert False log.info("Enable the DNA plugin...") try: server.plugins.enable(name=PLUGIN_DNA) except e: log.error("Failed to enable DNA Plugin: error " + e.message['desc']) assert False log.info("Restarting the server...") server.stop(timeout=120) time.sleep(1) server.start(timeout=120) time.sleep(3) def _wait_shared_cfg_servers(server, expected): attempts = 0 ents = [] try: ents = server.search_s(SHARE_CFG_BASE, ldap.SCOPE_ONELEVEL, "(objectclass=*)") except ldap.NO_SUCH_OBJECT: pass except lib389.NoSuchEntryError: pass while (len(ents) != expected): assert attempts < 10 time.sleep(5) try: ents = server.search_s(SHARE_CFG_BASE, ldap.SCOPE_ONELEVEL, "(objectclass=*)") except ldap.NO_SUCH_OBJECT: pass except lib389.NoSuchEntryError: pass def _shared_cfg_server_update(server, method=BINDMETHOD_VALUE, transport=PROTOCOLE_VALUE): log.info('\n======================== Update dnaPortNum=%d ============================\n' % server.port) try: ent = server.getEntry(SHARE_CFG_BASE, ldap.SCOPE_ONELEVEL, "(dnaPortNum=%d)" % server.port) mod = [(ldap.MOD_REPLACE, BINDMETHOD_ATTR, ensure_bytes(method)), (ldap.MOD_REPLACE, PROTOCOLE_ATTR, ensure_bytes(transport))] server.modify_s(ent.dn, mod) log.info('\n======================== Update done\n') ent = server.getEntry(SHARE_CFG_BASE, ldap.SCOPE_ONELEVEL, "(dnaPortNum=%d)" % server.port) except ldap.NO_SUCH_OBJECT: log.fatal("Unknown host") assert False def test_ticket48362(topology_m2): """Write your replication testcase here. To access each DirSrv instance use: topology_m2.ms["supplier1"], topology_m2.ms["supplier2"], ..., topology_m2.hub1, ..., topology_m2.consumer1, ... Also, if you need any testcase initialization, please, write additional fixture for that(include finalizer). """ try: topology_m2.ms["supplier1"].add_s(Entry((PEOPLE_DN, { 'objectclass': "top extensibleObject".split(), 'ou': 'people'}))) except ldap.ALREADY_EXISTS: pass topology_m2.ms["supplier1"].add_s(Entry((SHARE_CFG_BASE, { 'objectclass': 'top organizationalunit'.split(), 'ou': 'ranges' }))) # supplier 1 will have a valid remaining range (i.e. 101) # supplier 2 will not have a valid remaining range (i.e. 0) so dna servers list on supplier2 # will not contain supplier 2. So at restart, supplier 2 is recreated without the method/protocol attribute _dna_config(topology_m2.ms["supplier1"], nextValue=1000, maxValue=100) _dna_config(topology_m2.ms["supplier2"], nextValue=2000, maxValue=-1) # check we have all the servers available _wait_shared_cfg_servers(topology_m2.ms["supplier1"], 2) _wait_shared_cfg_servers(topology_m2.ms["supplier2"], 2) # now force the method/transport on the servers entry _shared_cfg_server_update(topology_m2.ms["supplier1"]) _shared_cfg_server_update(topology_m2.ms["supplier2"]) log.info('\n======================== BEFORE RESTART ============================\n') ent = topology_m2.ms["supplier1"].getEntry(SHARE_CFG_BASE, ldap.SCOPE_ONELEVEL, "(dnaPortNum=%d)" % topology_m2.ms["supplier1"].port) log.info('\n======================== BEFORE RESTART ============================\n') assert (ent.hasAttr(BINDMETHOD_ATTR) and ent.getValue(BINDMETHOD_ATTR) == BINDMETHOD_VALUE) assert (ent.hasAttr(PROTOCOLE_ATTR) and ent.getValue(PROTOCOLE_ATTR) == PROTOCOLE_VALUE) ent = topology_m2.ms["supplier2"].getEntry(SHARE_CFG_BASE, ldap.SCOPE_ONELEVEL, "(dnaPortNum=%d)" % topology_m2.ms["supplier2"].port) log.info('\n======================== BEFORE RESTART ============================\n') assert (ent.hasAttr(BINDMETHOD_ATTR) and ent.getValue(BINDMETHOD_ATTR) == BINDMETHOD_VALUE) assert (ent.hasAttr(PROTOCOLE_ATTR) and ent.getValue(PROTOCOLE_ATTR) == PROTOCOLE_VALUE) topology_m2.ms["supplier1"].restart(10) topology_m2.ms["supplier2"].restart(10) # to allow DNA plugin to recreate the local host entry time.sleep(40) log.info('\n=================== AFTER RESTART =================================\n') ent = topology_m2.ms["supplier1"].getEntry(SHARE_CFG_BASE, ldap.SCOPE_ONELEVEL, "(dnaPortNum=%d)" % topology_m2.ms["supplier1"].port) log.info('\n=================== AFTER RESTART =================================\n') assert (ent.hasAttr(BINDMETHOD_ATTR) and ent.getValue(BINDMETHOD_ATTR) == BINDMETHOD_VALUE) assert (ent.hasAttr(PROTOCOLE_ATTR) and ent.getValue(PROTOCOLE_ATTR) == PROTOCOLE_VALUE) ent = topology_m2.ms["supplier2"].getEntry(SHARE_CFG_BASE, ldap.SCOPE_ONELEVEL, "(dnaPortNum=%d)" % topology_m2.ms["supplier2"].port) log.info('\n=================== AFTER RESTART =================================\n') assert (ent.hasAttr(BINDMETHOD_ATTR) and ent.getValue(BINDMETHOD_ATTR) == BINDMETHOD_VALUE) assert (ent.hasAttr(PROTOCOLE_ATTR) and ent.getValue(PROTOCOLE_ATTR) == PROTOCOLE_VALUE) log.info('Test complete') if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/tickets/ticket48366_test.py000066400000000000000000000134401421664411400260130ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2016 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import logging import ldap import pytest from ldap.controls.simple import ProxyAuthzControl from lib389 import Entry from lib389._constants import * from lib389.topologies import topology_st log = logging.getLogger(__name__) from lib389.utils import * # Skip on older versions pytestmark = [pytest.mark.tier2, pytest.mark.skipif(ds_is_older('1.3.5'), reason="Not implemented")] PROXY_USER_DN = 'cn=proxy,ou=people,%s' % SUFFIX TEST_USER_DN = 'cn=test,ou=people,%s' % SUFFIX USER_PW = 'password' # subtrees used in test SUBTREE_GREEN = "ou=green,%s" % SUFFIX SUBTREE_RED = "ou=red,%s" % SUFFIX SUBTREES = (SUBTREE_GREEN, SUBTREE_RED) def test_ticket48366_init(topology_st): """ It creates identical entries in 3 subtrees It creates aci which allow access to a set of attrs in two of these subtrees for bound users It creates a user to be used for test """ topology_st.standalone.log.info("Add subtree: %s" % SUBTREE_GREEN) topology_st.standalone.add_s(Entry((SUBTREE_GREEN, { 'objectclass': "top organizationalunit".split(), 'ou': "green_one"}))) topology_st.standalone.log.info("Add subtree: %s" % SUBTREE_RED) topology_st.standalone.add_s(Entry((SUBTREE_RED, { 'objectclass': "top organizationalunit".split(), 'ou': "red"}))) # add proxy user and test user topology_st.standalone.log.info("Add %s" % TEST_USER_DN) topology_st.standalone.add_s(Entry((TEST_USER_DN, { 'objectclass': "top person".split(), 'sn': 'test', 'cn': 'test', 'userpassword': USER_PW}))) topology_st.standalone.log.info("Add %s" % PROXY_USER_DN) topology_st.standalone.add_s(Entry((PROXY_USER_DN, { 'objectclass': "top person".split(), 'sn': 'proxy', 'cn': 'proxy', 'userpassword': USER_PW}))) # enable acl error logging # mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '128')] # topology_st.standalone.modify_s(DN_CONFIG, mod) # get rid of default ACIs mod = [(ldap.MOD_DELETE, 'aci', None)] topology_st.standalone.modify_s(SUFFIX, mod) # Ok Now add the proper ACIs ACI_TARGET = "(target = \"ldap:///%s\")" % SUBTREE_GREEN ACI_TARGETATTR = "(targetattr = \"objectclass || cn || sn || uid || givenname \")" ACI_ALLOW = "(version 3.0; acl \"Allow search-read to green subtree\"; allow (read, search, compare)" ACI_SUBJECT = " userdn = \"ldap:///%s\";)" % TEST_USER_DN ACI_BODY = ACI_TARGET + ACI_TARGETATTR + ACI_ALLOW + ACI_SUBJECT mod = [(ldap.MOD_ADD, 'aci', ensure_bytes(ACI_BODY))] topology_st.standalone.modify_s(SUFFIX, mod) ACI_ALLOW = "(version 3.0; acl \"Allow use pf proxy auth to green subtree\"; allow (proxy)" ACI_SUBJECT = " userdn = \"ldap:///%s\";)" % PROXY_USER_DN ACI_BODY = ACI_TARGET + ACI_TARGETATTR + ACI_ALLOW + ACI_SUBJECT mod = [(ldap.MOD_ADD, 'aci', ensure_bytes(ACI_BODY))] topology_st.standalone.modify_s(SUFFIX, mod) log.info("Adding %d test entries...") for id in range(2): name = "%s%d" % ('test', id) mail = "%s@example.com" % name for subtree in SUBTREES: topology_st.standalone.add_s(Entry(("cn=%s,%s" % (name, subtree), { 'objectclass': "top person organizationalPerson inetOrgPerson".split(), 'sn': name, 'cn': name, 'uid': name, 'givenname': 'test', 'mail': mail, 'description': 'description', 'employeenumber': "%d" % id, 'telephonenumber': "%d%d%d" % (id, id, id), 'mobile': "%d%d%d" % (id, id, id), 'l': 'MV', 'title': 'Engineer'}))) def test_ticket48366_search_user(topology_st): proxy_ctrl = ProxyAuthzControl(criticality=True, authzId=ensure_bytes("dn: " + TEST_USER_DN)) # searching as test user should return one entry from the green subtree topology_st.standalone.simple_bind_s(TEST_USER_DN, PASSWORD) ents = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, 'uid=test1') assert (len(ents) == 1) # searching as proxy user should return no entry topology_st.standalone.simple_bind_s(PROXY_USER_DN, PASSWORD) ents = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, 'uid=test1') assert (len(ents) == 0) # serching as proxy user, authorizing as test user should return 1 entry ents = topology_st.standalone.search_ext_s(SUFFIX, ldap.SCOPE_SUBTREE, 'uid=test1', serverctrls=[proxy_ctrl]) assert (len(ents) == 1) def test_ticket48366_search_dm(topology_st): # searching as directory manager should return one entries from both subtrees topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) ents = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, 'uid=test1') assert (len(ents) == 2) # searching as directory manager proxying test user should return one entry proxy_ctrl = ProxyAuthzControl(criticality=True, authzId=ensure_bytes("dn: " + TEST_USER_DN)) ents = topology_st.standalone.search_ext_s(SUFFIX, ldap.SCOPE_SUBTREE, 'uid=test1', serverctrls=[proxy_ctrl]) assert (len(ents) == 1) # searching as directory manager proxying proxy user should return no entry proxy_ctrl = ProxyAuthzControl(criticality=True, authzId=ensure_bytes("dn: " + PROXY_USER_DN)) ents = topology_st.standalone.search_ext_s(SUFFIX, ldap.SCOPE_SUBTREE, 'uid=test1', serverctrls=[proxy_ctrl]) assert (len(ents) == 0) if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/tickets/ticket48370_test.py000066400000000000000000000153151421664411400260110ustar00rootroot00000000000000import pytest from lib389.tasks import * from lib389.utils import * from lib389.topologies import topology_st pytestmark = pytest.mark.tier2 logging.getLogger(__name__).setLevel(logging.DEBUG) log = logging.getLogger(__name__) def test_ticket48370(topology_st): """ Deleting attirbute values and readding a value does not properly update the pres index. The values are not actually deleted from the index """ DN = 'uid=user0099,' + DEFAULT_SUFFIX # # Add an entry # topology_st.standalone.add_s(Entry((DN, { 'objectclass': ['top', 'person', 'organizationalPerson', 'inetorgperson', 'posixAccount'], 'givenname': 'test', 'sn': 'user', 'loginshell': '/bin/bash', 'uidNumber': '10099', 'gidNumber': '10099', 'gecos': 'Test User', 'mail': ['user0099@dev.null', 'alias@dev.null', 'user0099@redhat.com'], 'cn': 'Test User', 'homeDirectory': '/home/user0099', 'uid': 'admin2', 'userpassword': 'password'}))) # # Perform modify (delete & add mail attributes) # try: topology_st.standalone.modify_s(DN, [(ldap.MOD_DELETE, 'mail', b'user0099@dev.null'), (ldap.MOD_DELETE, 'mail', b'alias@dev.null'), (ldap.MOD_ADD, 'mail', b'user0099@dev.null')]) except ldap.LDAPError as e: log.fatal('Failedto modify user: ' + str(e)) assert False # # Search using deleted attribute value- no entries should be returned # try: entry = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, 'mail=alias@dev.null') if entry: log.fatal('Entry incorrectly returned') assert False except ldap.LDAPError as e: log.fatal('Failed to search for user: ' + str(e)) assert False # # Search using existing attribute value - the entry should be returned # try: entry = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, 'mail=user0099@dev.null') if entry is None: log.fatal('Entry not found, but it should have been') assert False except ldap.LDAPError as e: log.fatal('Failed to search for user: ' + str(e)) assert False # # Delete the last values # try: topology_st.standalone.modify_s(DN, [(ldap.MOD_DELETE, 'mail', b'user0099@dev.null'), (ldap.MOD_DELETE, 'mail', b'user0099@redhat.com') ]) except ldap.LDAPError as e: log.fatal('Failed to modify user: ' + str(e)) assert False # # Search using deleted attribute value - no entries should be returned # try: entry = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, 'mail=user0099@redhat.com') if entry: log.fatal('Entry incorrectly returned') assert False except ldap.LDAPError as e: log.fatal('Failed to search for user: ' + str(e)) assert False # # Make sure presence index is correctly updated - no entries should be # returned # try: entry = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, 'mail=*') if entry: log.fatal('Entry incorrectly returned') assert False except ldap.LDAPError as e: log.fatal('Failed to search for user: ' + str(e)) assert False # # Now add the attributes back, and lets run a different set of tests with # a different number of attributes # try: topology_st.standalone.modify_s(DN, [(ldap.MOD_ADD, 'mail', [b'user0099@dev.null', b'alias@dev.null'])]) except ldap.LDAPError as e: log.fatal('Failedto modify user: ' + str(e)) assert False # # Remove and readd some attibutes # try: topology_st.standalone.modify_s(DN, [(ldap.MOD_DELETE, 'mail', b'alias@dev.null'), (ldap.MOD_DELETE, 'mail', b'user0099@dev.null'), (ldap.MOD_ADD, 'mail', b'user0099@dev.null')]) except ldap.LDAPError as e: log.fatal('Failedto modify user: ' + str(e)) assert False # # Search using deleted attribute value - no entries should be returned # try: entry = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, 'mail=alias@dev.null') if entry: log.fatal('Entry incorrectly returned') assert False except ldap.LDAPError as e: log.fatal('Failed to search for user: ' + str(e)) assert False # # Search using existing attribute value - the entry should be returned # try: entry = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, 'mail=user0099@dev.null') if entry is None: log.fatal('Entry not found, but it should have been') assert False except ldap.LDAPError as e: log.fatal('Failed to search for user: ' + str(e)) assert False log.info('Test PASSED') if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/tickets/ticket48383_test.py000066400000000000000000000062051421664411400260130ustar00rootroot00000000000000import random import string import pytest from lib389.tasks import * from lib389.utils import * from lib389.topologies import topology_st from lib389._constants import DEFAULT_SUFFIX, SERVERID_STANDALONE pytestmark = pytest.mark.tier2 logging.getLogger(__name__).setLevel(logging.DEBUG) log = logging.getLogger(__name__) def test_ticket48383(topology_st): """ This test case will check that we re-alloc buffer sizes on import.c We achieve this by setting the servers dbcachesize to a stupid small value and adding huge objects to ds. Then when we run db2index, either: * If we are not using the re-alloc code, it will FAIL (Bad) * If we re-alloc properly, it all works regardless. """ topology_st.standalone.config.set('nsslapd-maxbersize', '200000000') topology_st.standalone.restart() # Create some stupid huge objects / attributes in DS. # seeAlso is indexed by default. Lets do that! # This will take a while ... data = [random.choice(string.ascii_letters) for x in range(10000000)] s = "".join(data) # This was here for an iteration test. i = 1 USER_DN = 'uid=user%s,ou=people,%s' % (i, DEFAULT_SUFFIX) padding = ['%s' % n for n in range(400)] user = Entry((USER_DN, { 'objectclass': 'top posixAccount person extensibleObject'.split(), 'uid': 'user%s' % (i), 'cn': 'user%s' % (i), 'uidNumber': '%s' % (i), 'gidNumber': '%s' % (i), 'homeDirectory': '/home/user%s' % (i), 'description': 'user description', 'sn': s, 'padding': padding, })) topology_st.standalone.add_s(user) # Set the dbsize really low. try: topology_st.standalone.modify_s(DEFAULT_BENAME, [(ldap.MOD_REPLACE, 'nsslapd-cachememsize', b'1')]) except ldap.LDAPError as e: log.fatal('Failed to change nsslapd-cachememsize {}'.format(e.args[0]['desc'])) ## Does ds try and set a minimum possible value for this? ## Yes: [16/Feb/2016:16:39:18 +1000] - WARNING: cache too small, increasing to 500K bytes # Given the formula, by default, this means DS will make the buffsize 400k # So an object with a 1MB attribute should break indexing ldifpath = os.path.join(topology_st.standalone.get_ldif_dir(), "%s.ldif" % SERVERID_STANDALONE) # stop the server topology_st.standalone.stop() # Now export and import the DB. It's easier than db2index ... topology_st.standalone.db2ldif(bename=DEFAULT_BENAME, suffixes=[DEFAULT_SUFFIX], excludeSuffixes=[], encrypt=False, repl_data=True, outputfile=ldifpath) result = topology_st.standalone.ldif2db(DEFAULT_BENAME, None, None, False, ldifpath) assert (result) topology_st.standalone.start() # see if user1 exists at all .... result_user = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(uid=user1)') assert (len(result_user) > 0) log.info('Test complete') if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/tickets/ticket48497_test.py000066400000000000000000000106301421664411400260160ustar00rootroot00000000000000import pytest from lib389.tasks import * from lib389.utils import * from lib389.topologies import topology_st from lib389._constants import DEFAULT_SUFFIX, SUFFIX pytestmark = pytest.mark.tier2 logging.getLogger(__name__).setLevel(logging.DEBUG) log = logging.getLogger(__name__) NEW_ACCOUNT = "new_account" MAX_ACCOUNTS = 20 MIXED_VALUE = "/home/mYhOmEdIrEcToRy" LOWER_VALUE = "/home/myhomedirectory" HOMEDIRECTORY_INDEX = 'cn=homeDirectory,cn=index,cn=userRoot,cn=ldbm database,cn=plugins,cn=config' HOMEDIRECTORY_CN = "homedirectory" MATCHINGRULE = 'nsMatchingRule' UIDNUMBER_INDEX = 'cn=uidnumber,cn=index,cn=userRoot,cn=ldbm database,cn=plugins,cn=config' UIDNUMBER_CN = "uidnumber" def test_ticket48497_init(topology_st): log.info("Initialization: add dummy entries for the tests") for cpt in range(MAX_ACCOUNTS): name = "%s%d" % (NEW_ACCOUNT, cpt) topology_st.standalone.add_s(Entry(("uid=%s,%s" % (name, SUFFIX), { 'objectclass': "top posixAccount".split(), 'uid': name, 'cn': name, 'uidnumber': str(111), 'gidnumber': str(222), 'homedirectory': "/home/tb_%d" % cpt}))) def test_ticket48497_homeDirectory_mixed_value(topology_st): # Set a homedirectory value with mixed case name = "uid=%s1,%s" % (NEW_ACCOUNT, SUFFIX) mod = [(ldap.MOD_REPLACE, 'homeDirectory', ensure_bytes(MIXED_VALUE))] topology_st.standalone.modify_s(name, mod) def test_ticket48497_extensible_search(topology_st): name = "uid=%s1,%s" % (NEW_ACCOUNT, SUFFIX) # check with the exact stored value log.info("Default: can retrieve an entry filter syntax with exact stored value") ent = topology_st.standalone.getEntry(name, ldap.SCOPE_BASE, "(homeDirectory=%s)" % MIXED_VALUE) log.info("Default: can retrieve an entry filter caseExactIA5Match with exact stored value") ent = topology_st.standalone.getEntry(name, ldap.SCOPE_BASE, "(homeDirectory:caseExactIA5Match:=%s)" % MIXED_VALUE) # check with a lower case value that is different from the stored value log.info("Default: can not retrieve an entry filter syntax match with lowered stored value") try: ent = topology_st.standalone.getEntry(name, ldap.SCOPE_BASE, "(homeDirectory=%s)" % LOWER_VALUE) assert ent is None except ldap.NO_SUCH_OBJECT: pass log.info("Default: can not retrieve an entry filter caseExactIA5Match with lowered stored value") try: ent = topology_st.standalone.getEntry(name, ldap.SCOPE_BASE, "(homeDirectory:caseExactIA5Match:=%s)" % LOWER_VALUE) assert ent is None except ldap.NO_SUCH_OBJECT: pass log.info("Default: can retrieve an entry filter caseIgnoreIA5Match with lowered stored value") ent = topology_st.standalone.getEntry(name, ldap.SCOPE_BASE, "(homeDirectory:caseIgnoreIA5Match:=%s)" % LOWER_VALUE) def test_ticket48497_homeDirectory_index_cfg(topology_st): log.info("\n\nindex homeDirectory in caseIgnoreIA5Match and caseExactIA5Match") try: ent = topology_st.standalone.getEntry(HOMEDIRECTORY_INDEX, ldap.SCOPE_BASE) except ldap.NO_SUCH_OBJECT: topology_st.standalone.add_s(Entry((HOMEDIRECTORY_INDEX, { 'objectclass': "top nsIndex".split(), 'cn': HOMEDIRECTORY_CN, 'nsSystemIndex': 'false', 'nsIndexType': 'eq'}))) IGNORE_MR_NAME = b'caseIgnoreIA5Match' EXACT_MR_NAME = b'caseExactIA5Match' mod = [(ldap.MOD_REPLACE, MATCHINGRULE, (IGNORE_MR_NAME, EXACT_MR_NAME))] topology_st.standalone.modify_s(HOMEDIRECTORY_INDEX, mod) def test_ticket48497_homeDirectory_index_run(topology_st): args = {TASK_WAIT: True} topology_st.standalone.tasks.reindex(suffix=SUFFIX, attrname='homeDirectory', args=args) log.info("Check indexing succeeded with a specified matching rule") file_obj = open(topology_st.standalone.errlog, "r") # Check if the MR configuration failure occurs regex = re.compile("unknown or invalid matching rule") while True: line = file_obj.readline() found = regex.search(line) if ((line == '') or (found)): break if (found): log.info("The configuration of a specific MR fails") log.info(line) assert 0 if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/tickets/ticket48637_test.py000066400000000000000000000107461421664411400260220ustar00rootroot00000000000000import pytest from lib389.tasks import * from lib389.utils import * from lib389.topologies import topology_st pytestmark = pytest.mark.tier2 DEBUGGING = os.getenv('DEBUGGING', False) if DEBUGGING: logging.getLogger(__name__).setLevel(logging.DEBUG) else: logging.getLogger(__name__).setLevel(logging.INFO) log = logging.getLogger(__name__) USER_DN = "uid=test,ou=people,dc=example,dc=com" GROUP_DN = "cn=group,dc=example,dc=com" GROUP_OU = "ou=groups,dc=example,dc=com" PEOPLE_OU = "ou=people,dc=example,dc=com" MEP_OU = "ou=mep,dc=example,dc=com" MEP_TEMPLATE = "cn=mep template,dc=example,dc=com" AUTO_DN = "cn=All Users,cn=Auto Membership Plugin,cn=plugins,cn=config" MEP_DN = "cn=MEP Definition,cn=Managed Entries,cn=plugins,cn=config" def test_ticket48637(topology_st): """Test for entry cache corruption This requires automember and managed entry plugins to be configured. Then remove the group that automember would use to trigger a failure when adding a new entry. Automember fails, and then managed entry also fails. Make sure a base search on the entry returns error 32 """ if DEBUGGING: # Add debugging steps(if any)... pass # # Add our setup entries # try: topology_st.standalone.add_s(Entry((PEOPLE_OU, { 'objectclass': 'top organizationalunit'.split(), 'ou': 'people'}))) except ldap.ALREADY_EXISTS: pass except ldap.LDAPError as e: log.fatal('Failed to add people ou: ' + str(e)) assert False try: topology_st.standalone.add_s(Entry((GROUP_OU, { 'objectclass': 'top organizationalunit'.split(), 'ou': 'groups'}))) except ldap.ALREADY_EXISTS: pass except ldap.LDAPError as e: log.fatal('Failed to add groups ou: ' + str(e)) assert False try: topology_st.standalone.add_s(Entry((MEP_OU, { 'objectclass': 'top extensibleObject'.split(), 'ou': 'mep'}))) except ldap.LDAPError as e: log.fatal('Failed to add MEP ou: ' + str(e)) assert False try: topology_st.standalone.add_s(Entry((MEP_TEMPLATE, { 'objectclass': 'top mepTemplateEntry'.split(), 'cn': 'mep template', 'mepRDNAttr': 'cn', 'mepStaticAttr': 'objectclass: groupofuniquenames', 'mepMappedAttr': 'cn: $uid'}))) except ldap.LDAPError as e: log.fatal('Failed to add MEP ou: ' + str(e)) assert False # # Configure automember # try: topology_st.standalone.add_s(Entry((AUTO_DN, { 'cn': 'All Users', 'objectclass': ['top', 'autoMemberDefinition'], 'autoMemberScope': 'dc=example,dc=com', 'autoMemberFilter': 'objectclass=person', 'autoMemberDefaultGroup': GROUP_DN, 'autoMemberGroupingAttr': 'uniquemember:dn'}))) except ldap.LDAPError as e: log.fatal('Failed to configure automember plugin : ' + str(e)) assert False # # Configure managed entry plugin # try: topology_st.standalone.add_s(Entry((MEP_DN, { 'cn': 'MEP Definition', 'objectclass': ['top', 'extensibleObject'], 'originScope': 'ou=people,dc=example,dc=com', 'originFilter': 'objectclass=person', 'managedBase': 'ou=groups,dc=example,dc=com', 'managedTemplate': MEP_TEMPLATE}))) except ldap.LDAPError as e: log.fatal('Failed to configure managed entry plugin : ' + str(e)) assert False # # Restart DS # topology_st.standalone.restart(timeout=30) # # Add entry that should fail since the automember group does not exist # try: topology_st.standalone.add_s(Entry((USER_DN, { 'uid': 'test', 'objectclass': ['top', 'person', 'extensibleObject'], 'sn': 'test', 'cn': 'test'}))) except ldap.LDAPError as e: pass # # Search for the entry - it should not be returned # try: entry = topology_st.standalone.search_s(USER_DN, ldap.SCOPE_SUBTREE, 'objectclass=*') if entry: log.fatal('Entry was incorrectly returned') assert False except ldap.NO_SUCH_OBJECT: pass log.info('Test PASSED') if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/tickets/ticket48665_test.py000066400000000000000000000053421421664411400260170ustar00rootroot00000000000000import pytest from lib389.utils import * from lib389.topologies import topology_st from lib389._constants import DEFAULT_SUFFIX, DEFAULT_BENAME pytestmark = pytest.mark.tier2 logging.getLogger(__name__).setLevel(logging.DEBUG) log = logging.getLogger(__name__) def test_ticket48665(topology_st): """ This tests deletion of certain cn=config values. First, it should be able to delete, and not crash the server. Second, we might be able to delete then add to replace values. We should also still be able to mod replace the values and keep the server alive. """ # topology_st.standalone.config.enable_log('audit') # topology_st.standalone.config.enable_log('auditfail') # This will trigger a mod delete then add. topology_st.standalone.modify_s('cn=config,cn=ldbm database,cn=plugins,cn=config', [(ldap.MOD_REPLACE, 'nsslapd-cache-autosize', b'0')]) try: modlist = [(ldap.MOD_DELETE, 'nsslapd-cachememsize', None), (ldap.MOD_ADD, 'nsslapd-cachememsize', b'1')] topology_st.standalone.modify_s("cn=%s,cn=ldbm database,cn=plugins,cn=config" % DEFAULT_BENAME, modlist) except: pass # Check the server has not commited seppuku. entries = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(cn=*)') assert len(entries) > 0 log.info('{} entries are returned from the server.'.format(len(entries))) # This has a magic hack to determine if we are in cn=config. try: topology_st.standalone.modify_s(DEFAULT_BENAME, [(ldap.MOD_REPLACE, 'nsslapd-cachememsize', b'1')]) except ldap.LDAPError as e: log.fatal('Failed to change nsslapd-cachememsize ' + e.args[0]['desc']) # Check the server has not commited seppuku. entries = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(cn=*)') assert len(entries) > 0 log.info('{} entries are returned from the server.'.format(len(entries))) # Now try with mod_replace. This should be okay. modlist = [(ldap.MOD_REPLACE, 'nsslapd-cachememsize', b'1')] topology_st.standalone.modify_s("cn=%s,cn=ldbm database,cn=plugins,cn=config" % DEFAULT_BENAME, modlist) # Check the server has not commited seppuku. entries = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(cn=*)') assert len(entries) > 0 log.info('{} entries are returned from the server.'.format(len(entries))) log.info('Test complete') if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/tickets/ticket48745_test.py000066400000000000000000000121241421664411400260120ustar00rootroot00000000000000import pytest from lib389.tasks import * from lib389.utils import * from lib389.topologies import topology_st from lib389._constants import SUFFIX, DEFAULT_SUFFIX pytestmark = pytest.mark.tier2 logging.getLogger(__name__).setLevel(logging.DEBUG) log = logging.getLogger(__name__) NEW_ACCOUNT = "new_account" MAX_ACCOUNTS = 20 MIXED_VALUE = "/home/mYhOmEdIrEcToRy" LOWER_VALUE = "/home/myhomedirectory" HOMEDIRECTORY_INDEX = 'cn=homeDirectory,cn=index,cn=userRoot,cn=ldbm database,cn=plugins,cn=config' HOMEDIRECTORY_CN = "homedirectory" MATCHINGRULE = 'nsMatchingRule' UIDNUMBER_INDEX = 'cn=uidnumber,cn=index,cn=userRoot,cn=ldbm database,cn=plugins,cn=config' UIDNUMBER_CN = "uidnumber" def test_ticket48745_init(topology_st): log.info("Initialization: add dummy entries for the tests") for cpt in range(MAX_ACCOUNTS): name = "%s%d" % (NEW_ACCOUNT, cpt) topology_st.standalone.add_s(Entry(("uid=%s,%s" % (name, SUFFIX), { 'objectclass': "top posixAccount".split(), 'uid': name, 'cn': name, 'uidnumber': str(111), 'gidnumber': str(222), 'homedirectory': "/home/tbordaz_%d" % cpt}))) def test_ticket48745_homeDirectory_indexed_cis(topology_st): log.info("\n\nindex homeDirectory in caseIgnoreIA5Match and caseExactIA5Match") try: ent = topology_st.standalone.getEntry(HOMEDIRECTORY_INDEX, ldap.SCOPE_BASE) except ldap.NO_SUCH_OBJECT: topology_st.standalone.add_s(Entry((HOMEDIRECTORY_INDEX, { 'objectclass': "top nsIndex".split(), 'cn': HOMEDIRECTORY_CN, 'nsSystemIndex': 'false', 'nsIndexType': 'eq'}))) # log.info("attach debugger") # time.sleep(60) IGNORE_MR_NAME = b'caseIgnoreIA5Match' EXACT_MR_NAME = b'caseExactIA5Match' mod = [(ldap.MOD_REPLACE, MATCHINGRULE, (IGNORE_MR_NAME, EXACT_MR_NAME))] topology_st.standalone.modify_s(HOMEDIRECTORY_INDEX, mod) # topology_st.standalone.stop(timeout=10) log.info("successfully checked that filter with exact mr , a filter with lowercase eq is failing") # assert topology_st.standalone.db2index(bename=DEFAULT_BENAME, suffixes=None, attrs=['homeDirectory']) # topology_st.standalone.start(timeout=10) args = {TASK_WAIT: True} topology_st.standalone.tasks.reindex(suffix=SUFFIX, attrname='homeDirectory', args=args) log.info("Check indexing succeeded with a specified matching rule") file_obj = open(topology_st.standalone.errlog, "r") # Check if the MR configuration failure occurs regex = re.compile("unknown or invalid matching rule") while True: line = file_obj.readline() found = regex.search(line) if ((line == '') or (found)): break if (found): log.info("The configuration of a specific MR fails") log.info(line) assert 0 def test_ticket48745_homeDirectory_mixed_value(topology_st): # Set a homedirectory value with mixed case name = "uid=%s1,%s" % (NEW_ACCOUNT, SUFFIX) mod = [(ldap.MOD_REPLACE, 'homeDirectory', ensure_bytes(MIXED_VALUE))] topology_st.standalone.modify_s(name, mod) def test_ticket48745_extensible_search_after_index(topology_st): name = "uid=%s1,%s" % (NEW_ACCOUNT, SUFFIX) # check with the exact stored value log.info("Default: can retrieve an entry filter syntax with exact stored value") ent = topology_st.standalone.getEntry(SUFFIX, ldap.SCOPE_SUBTREE, "(homeDirectory=%s)" % MIXED_VALUE) # log.info("attach debugger") # time.sleep(60) # This search will fail because a # subtree search with caseExactIA5Match will find a key # where the value has been lowercase log.info("Default: can retrieve an entry filter caseExactIA5Match with exact stored value") ent = topology_st.standalone.getEntry(SUFFIX, ldap.SCOPE_SUBTREE, "(homeDirectory:caseExactIA5Match:=%s)" % MIXED_VALUE) assert ent # But do additional searches.. just for more tests # check with a lower case value that is different from the stored value log.info("Default: can not retrieve an entry filter syntax match with lowered stored value") try: ent = topology_st.standalone.getEntry(SUFFIX, ldap.SCOPE_SUBTREE, "(homeDirectory=%s)" % LOWER_VALUE) assert ent is None except ldap.NO_SUCH_OBJECT: pass log.info("Default: can not retrieve an entry filter caseExactIA5Match with lowered stored value") try: ent = topology_st.standalone.getEntry(SUFFIX, ldap.SCOPE_SUBTREE, "(homeDirectory:caseExactIA5Match:=%s)" % LOWER_VALUE) assert ent is None except ldap.NO_SUCH_OBJECT: pass log.info("Default: can retrieve an entry filter caseIgnoreIA5Match with lowered stored value") ent = topology_st.standalone.getEntry(SUFFIX, ldap.SCOPE_SUBTREE, "(homeDirectory:caseIgnoreIA5Match:=%s)" % LOWER_VALUE) if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/tickets/ticket48746_test.py000066400000000000000000000133241421664411400260160ustar00rootroot00000000000000import pytest from lib389.tasks import * from lib389.utils import * from lib389.topologies import topology_st from lib389._constants import SUFFIX, DEFAULT_SUFFIX, DEFAULT_BENAME pytestmark = pytest.mark.tier2 logging.getLogger(__name__).setLevel(logging.DEBUG) log = logging.getLogger(__name__) NEW_ACCOUNT = "new_account" MAX_ACCOUNTS = 20 MIXED_VALUE = "/home/mYhOmEdIrEcToRy" LOWER_VALUE = "/home/myhomedirectory" HOMEDIRECTORY_INDEX = 'cn=homeDirectory,cn=index,cn=userRoot,cn=ldbm database,cn=plugins,cn=config' HOMEDIRECTORY_CN = "homedirectory" MATCHINGRULE = 'nsMatchingRule' UIDNUMBER_INDEX = 'cn=uidnumber,cn=index,cn=userRoot,cn=ldbm database,cn=plugins,cn=config' UIDNUMBER_CN = "uidnumber" def test_ticket48746_init(topology_st): log.info("Initialization: add dummy entries for the tests") for cpt in range(MAX_ACCOUNTS): name = "%s%d" % (NEW_ACCOUNT, cpt) topology_st.standalone.add_s(Entry(("uid=%s,%s" % (name, SUFFIX), { 'objectclass': "top posixAccount".split(), 'uid': name, 'cn': name, 'uidnumber': str(111), 'gidnumber': str(222), 'homedirectory': "/home/tbordaz_%d" % cpt}))) def test_ticket48746_homeDirectory_indexed_cis(topology_st): log.info("\n\nindex homeDirectory in caseIgnoreIA5Match and caseExactIA5Match") try: ent = topology_st.standalone.getEntry(HOMEDIRECTORY_INDEX, ldap.SCOPE_BASE) except ldap.NO_SUCH_OBJECT: topology_st.standalone.add_s(Entry((HOMEDIRECTORY_INDEX, { 'objectclass': "top nsIndex".split(), 'cn': HOMEDIRECTORY_CN, 'nsSystemIndex': 'false', 'nsIndexType': 'eq'}))) # log.info("attach debugger") # time.sleep(60) IGNORE_MR_NAME = b'caseIgnoreIA5Match' EXACT_MR_NAME = b'caseExactIA5Match' mod = [(ldap.MOD_REPLACE, MATCHINGRULE, (IGNORE_MR_NAME, EXACT_MR_NAME))] topology_st.standalone.modify_s(HOMEDIRECTORY_INDEX, mod) # topology_st.standalone.stop(timeout=10) log.info("successfully checked that filter with exact mr , a filter with lowercase eq is failing") # assert topology_st.standalone.db2index(bename=DEFAULT_BENAME, suffixes=None, attrs=['homeDirectory']) # topology_st.standalone.start(timeout=10) args = {TASK_WAIT: True} topology_st.standalone.tasks.reindex(suffix=SUFFIX, attrname='homeDirectory', args=args) log.info("Check indexing succeeded with a specified matching rule") file_obj = open(topology_st.standalone.errlog, "r") # Check if the MR configuration failure occurs regex = re.compile("unknown or invalid matching rule") while True: line = file_obj.readline() found = regex.search(line) if ((line == '') or (found)): break if (found): log.info("The configuration of a specific MR fails") log.info(line) assert not found def test_ticket48746_homeDirectory_mixed_value(topology_st): # Set a homedirectory value with mixed case name = "uid=%s1,%s" % (NEW_ACCOUNT, SUFFIX) mod = [(ldap.MOD_REPLACE, 'homeDirectory', ensure_bytes(MIXED_VALUE))] topology_st.standalone.modify_s(name, mod) def test_ticket48746_extensible_search_after_index(topology_st): name = "uid=%s1,%s" % (NEW_ACCOUNT, SUFFIX) # check with the exact stored value # log.info("Default: can retrieve an entry filter syntax with exact stored value") # ent = topology_st.standalone.getEntry(name, ldap.SCOPE_BASE, "(homeDirectory=%s)" % MIXED_VALUE) # log.info("attach debugger") # time.sleep(60) # This search is enought to trigger the crash # because it loads a registered filter MR plugin that has no indexer create function # following index will trigger the crash log.info("Default: can retrieve an entry filter caseExactIA5Match with exact stored value") ent = topology_st.standalone.getEntry(name, ldap.SCOPE_BASE, "(homeDirectory:caseExactIA5Match:=%s)" % MIXED_VALUE) def test_ticket48746_homeDirectory_indexed_ces(topology_st): log.info("\n\nindex homeDirectory in caseExactIA5Match, this would trigger the crash") try: ent = topology_st.standalone.getEntry(HOMEDIRECTORY_INDEX, ldap.SCOPE_BASE) except ldap.NO_SUCH_OBJECT: topology_st.standalone.add_s(Entry((HOMEDIRECTORY_INDEX, { 'objectclass': "top nsIndex".split(), 'cn': HOMEDIRECTORY_CN, 'nsSystemIndex': 'false', 'nsIndexType': 'eq'}))) # log.info("attach debugger") # time.sleep(60) EXACT_MR_NAME = b'caseExactIA5Match' mod = [(ldap.MOD_REPLACE, MATCHINGRULE, (EXACT_MR_NAME))] topology_st.standalone.modify_s(HOMEDIRECTORY_INDEX, mod) # topology_st.standalone.stop(timeout=10) log.info("successfully checked that filter with exact mr , a filter with lowercase eq is failing") # assert topology_st.standalone.db2index(bename=DEFAULT_BENAME, suffixes=None, attrs=['homeDirectory']) # topology_st.standalone.start(timeout=10) args = {TASK_WAIT: True} topology_st.standalone.tasks.reindex(suffix=SUFFIX, attrname='homeDirectory', args=args) log.info("Check indexing succeeded with a specified matching rule") file_obj = open(topology_st.standalone.errlog, "r") # Check if the MR configuration failure occurs regex = re.compile("unknown or invalid matching rule") while True: line = file_obj.readline() found = regex.search(line) if ((line == '') or (found)): break if (found): log.info("The configuration of a specific MR fails") log.info(line) assert not found if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/tickets/ticket48759_test.py000066400000000000000000000177031421664411400260270ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2016 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import logging import pytest from lib389.tasks import * from lib389.utils import * from lib389.topologies import topology_st from lib389.replica import ReplicationManager,Replicas from lib389._constants import (PLUGIN_MEMBER_OF, DEFAULT_SUFFIX, ReplicaRole, REPLICAID_SUPPLIER_1, PLUGIN_RETRO_CHANGELOG, REPLICA_PRECISE_PURGING, REPLICA_PURGE_DELAY, REPLICA_PURGE_INTERVAL) pytestmark = pytest.mark.tier2 log = logging.getLogger(__name__) MEMBEROF_PLUGIN_DN = ('cn=' + PLUGIN_MEMBER_OF + ',cn=plugins,cn=config') GROUP_DN = ("cn=group," + DEFAULT_SUFFIX) MEMBER_DN_COMP = "uid=member" def _add_group_with_members(topology_st): # Create group try: topology_st.standalone.add_s(Entry((GROUP_DN, {'objectclass': 'top groupofnames'.split(), 'cn': 'group'}))) except ldap.LDAPError as e: log.fatal('Failed to add group: error ' + e.args[0]['desc']) assert False # Add members to the group - set timeout log.info('Adding members to the group...') for idx in range(1, 5): try: MEMBER_VAL = ("uid=member%d,%s" % (idx, DEFAULT_SUFFIX)) topology_st.standalone.modify_s(GROUP_DN, [(ldap.MOD_ADD, 'member', ensure_bytes(MEMBER_VAL))]) except ldap.LDAPError as e: log.fatal('Failed to update group: member (%s) - error: %s' % (MEMBER_VAL, e.args[0]['desc'])) assert False def _find_retrocl_changes(topology_st, user_dn=None): ents = topology_st.standalone.search_s('cn=changelog', ldap.SCOPE_SUBTREE, '(targetDn=%s)' % user_dn) return len(ents) def _find_memberof(topology_st, user_dn=None, group_dn=None, find_result=True): ent = topology_st.standalone.getEntry(user_dn, ldap.SCOPE_BASE, "(objectclass=*)", ['memberof']) found = False if ent.hasAttr('memberof'): for val in ent.getValues('memberof'): topology_st.standalone.log.info("!!!!!!! %s: memberof->%s" % (user_dn, val)) if ensure_str(val) == group_dn: found = True break if find_result: assert (found) else: assert (not found) def test_ticket48759(topology_st): """ The fix for ticket 48759 has to prevent plugin calls for tombstone purging The test uses the memberof and retrocl plugins to verify this. In tombstone purging without the fix the mmeberof plugin is called, if the tombstone entry is a group, it modifies the user entries for the group and if retrocl is enabled this mod is written to the retrocl The test sequence is: - enable replication - enable memberof and retro cl plugin - add user entries - add a group and add the users as members - verify memberof is set to users - delete the group - verify memberof is removed from users - add group again - verify memberof is set to users - get number of changes in retro cl for one user - configure tombstone purging - wait for purge interval to pass - add a dummy entry to increase maxcsn - wait for purge interval to pass two times - get number of changes in retro cl for user again - assert there was no additional change """ log.info('Testing Ticket 48759 - no plugin calls for tombstone purging') # # Setup Replication # log.info('Setting up replication...') repl = ReplicationManager(DEFAULT_SUFFIX) repl.create_first_supplier(topology_st.standalone) # # enable dynamic plugins, memberof and retro cl plugin # log.info('Enable plugins...') try: topology_st.standalone.config.set('nsslapd-dynamic-plugins', 'on') except ldap.LDAPError as e: ldap.error('Failed to enable dynamic plugins! ' + e.args[0]['desc']) assert False topology_st.standalone.plugins.enable(name=PLUGIN_MEMBER_OF) topology_st.standalone.plugins.enable(name=PLUGIN_RETRO_CHANGELOG) # Configure memberOf group attribute try: topology_st.standalone.modify_s(MEMBEROF_PLUGIN_DN, [(ldap.MOD_REPLACE, 'memberofgroupattr', b'member')]) except ldap.LDAPError as e: log.fatal('Failed to configure memberOf plugin: error ' + e.args[0]['desc']) assert False # # create some users and a group # log.info('create users and group...') for idx in range(1, 5): try: USER_DN = ("uid=member%d,%s" % (idx, DEFAULT_SUFFIX)) topology_st.standalone.add_s(Entry((USER_DN, {'objectclass': 'top extensibleObject'.split(), 'uid': 'member%d' % (idx)}))) except ldap.LDAPError as e: log.fatal('Failed to add user (%s): error %s' % (USER_DN, e.args[0]['desc'])) assert False _add_group_with_members(topology_st) MEMBER_VAL = ("uid=member2,%s" % DEFAULT_SUFFIX) time.sleep(1) _find_memberof(topology_st, MEMBER_VAL, GROUP_DN, True) # delete group log.info('delete group...') try: topology_st.standalone.delete_s(GROUP_DN) except ldap.LDAPError as e: log.error('Failed to delete entry: ' + e.args[0]['desc']) assert False time.sleep(1) _find_memberof(topology_st, MEMBER_VAL, GROUP_DN, False) # add group again log.info('add group again') _add_group_with_members(topology_st) time.sleep(1) _find_memberof(topology_st, MEMBER_VAL, GROUP_DN, True) # # get number of changelog records for one user entry log.info('get number of changes for %s before tombstone purging' % MEMBER_VAL) changes_pre = _find_retrocl_changes(topology_st, MEMBER_VAL) # configure tombstone purging args = {REPLICA_PRECISE_PURGING: 'on', REPLICA_PURGE_DELAY: '5', REPLICA_PURGE_INTERVAL: '5'} try: Repl_DN = 'cn=replica,cn=dc\\3Dexample\\2Cdc\\3Dcom,cn=mapping tree,cn=config' topology_st.standalone.modify_s(Repl_DN, [(ldap.MOD_ADD, 'nsDS5ReplicaPreciseTombstonePurging', b'on'), (ldap.MOD_ADD, 'nsDS5ReplicaPurgeDelay', b'5'), (ldap.MOD_ADD, 'nsDS5ReplicaTombstonePurgeInterval', b'5')]) except: log.fatal('Failed to configure replica') assert False # Wait for the interval to pass log.info('Wait for tombstone purge interval to pass ...') time.sleep(6) # Add an entry to trigger replication log.info('add dummy entry') try: topology_st.standalone.add_s(Entry(('cn=test_entry,dc=example,dc=com', { 'objectclass': 'top person'.split(), 'sn': 'user', 'cn': 'entry1'}))) except ldap.LDAPError as e: log.error('Failed to add entry: ' + e.args[0]['desc']) assert False # check memberof is still correct time.sleep(1) _find_memberof(topology_st, MEMBER_VAL, GROUP_DN, True) # Wait for the interval to pass again log.info('Wait for tombstone purge interval to pass again...') time.sleep(10) # # get number of changelog records for one user entry log.info('get number of changes for %s before tombstone purging' % MEMBER_VAL) changes_post = _find_retrocl_changes(topology_st, MEMBER_VAL) assert (changes_pre == changes_post) if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/tickets/ticket48784_test.py000066400000000000000000000125151421664411400260210ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2016 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import pytest from lib389.tasks import * from lib389.utils import * # Skip on older versions pytestmark = [pytest.mark.tier2, pytest.mark.skipif(ds_is_older('1.3.5'), reason="Not implemented")] from lib389.topologies import topology_m2 from lib389._constants import DEFAULT_SUFFIX, DN_DM, PASSWORD logging.getLogger(__name__).setLevel(logging.DEBUG) log = logging.getLogger(__name__) CONFIG_DN = 'cn=config' ENCRYPTION_DN = 'cn=encryption,%s' % CONFIG_DN RSA = 'RSA' RSA_DN = 'cn=%s,%s' % (RSA, ENCRYPTION_DN) ISSUER = 'cn=CAcert' CACERT = 'CAcertificate' SERVERCERT = 'Server-Cert' @pytest.fixture(scope="module") def add_entry(server, name, rdntmpl, start, num): log.info("\n######################### Adding %d entries to %s ######################" % (num, name)) for i in range(num): ii = start + i dn = '%s%d,%s' % (rdntmpl, ii, DEFAULT_SUFFIX) try: server.add_s(Entry((dn, {'objectclass': 'top person extensibleObject'.split(), 'uid': '%s%d' % (rdntmpl, ii), 'cn': '%s user%d' % (name, ii), 'sn': 'user%d' % (ii)}))) except ldap.LDAPError as e: log.error('Failed to add %s ' % dn + e.message['desc']) assert False def config_tls_agreements(topology_m2): log.info("######################### Configure SSL/TLS agreements ######################") log.info("######################## supplier1 <-- startTLS -> supplier2 #####################") log.info("##### Update the agreement of supplier1") m1 = topology_m2.ms["supplier1"] m1_m2_agmt = m1.agreement.list(suffix=DEFAULT_SUFFIX)[0].dn topology_m2.ms["supplier1"].modify_s(m1_m2_agmt, [(ldap.MOD_REPLACE, 'nsDS5ReplicaTransportInfo', b'TLS')]) log.info("##### Update the agreement of supplier2") m2 = topology_m2.ms["supplier2"] m2_m1_agmt = m2.agreement.list(suffix=DEFAULT_SUFFIX)[0].dn topology_m2.ms["supplier2"].modify_s(m2_m1_agmt, [(ldap.MOD_REPLACE, 'nsDS5ReplicaTransportInfo', b'TLS')]) time.sleep(1) topology_m2.ms["supplier1"].restart(10) topology_m2.ms["supplier2"].restart(10) log.info("\n######################### Configure SSL/TLS agreements Done ######################\n") def set_ssl_Version(server, name, version): log.info("\n######################### Set %s on %s ######################\n" % (version, name)) server.simple_bind_s(DN_DM, PASSWORD) server.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3', b'off'), (ldap.MOD_REPLACE, 'nsTLS1', b'on'), (ldap.MOD_REPLACE, 'sslVersionMin', ensure_bytes(version)), (ldap.MOD_REPLACE, 'sslVersionMax', ensure_bytes(version))]) def test_ticket48784(topology_m2): """ Set up 2way MMR: supplier_1 <----- startTLS -----> supplier_2 Make sure the replication is working. Then, stop the servers and set only TLS1.0 on supplier_1 while TLS1.2 on supplier_2 Replication is supposed to fail. """ log.info("Ticket 48784 - Allow usage of OpenLDAP libraries that don't use NSS for crypto") #create_keys_certs(topology_m2) [i.enable_tls() for i in topology_m2] config_tls_agreements(topology_m2) add_entry(topology_m2.ms["supplier1"], 'supplier1', 'uid=m1user', 0, 5) add_entry(topology_m2.ms["supplier2"], 'supplier2', 'uid=m2user', 0, 5) time.sleep(10) log.info('##### Searching for entries on supplier1...') entries = topology_m2.ms["supplier1"].search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(uid=*)') assert 10 == len(entries) log.info('##### Searching for entries on supplier2...') entries = topology_m2.ms["supplier2"].search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(uid=*)') assert 10 == len(entries) log.info("##### openldap client just accepts sslVersionMin not Max.") set_ssl_Version(topology_m2.ms["supplier1"], 'supplier1', 'TLS1.0') set_ssl_Version(topology_m2.ms["supplier2"], 'supplier2', 'TLS1.2') log.info("##### restart supplier[12]") topology_m2.ms["supplier1"].restart(timeout=10) topology_m2.ms["supplier2"].restart(timeout=10) log.info("##### replication from supplier_1 to supplier_2 should be ok.") add_entry(topology_m2.ms["supplier1"], 'supplier1', 'uid=m1user', 10, 1) log.info("##### replication from supplier_2 to supplier_1 should fail.") add_entry(topology_m2.ms["supplier2"], 'supplier2', 'uid=m2user', 10, 1) time.sleep(10) log.info('##### Searching for entries on supplier1...') entries = topology_m2.ms["supplier1"].search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(uid=*)') assert 11 == len(entries) # This is supposed to be "1" less than supplier 2's entry count log.info('##### Searching for entries on supplier2...') entries = topology_m2.ms["supplier2"].search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(uid=*)') assert 12 == len(entries) log.info("Ticket 48784 - PASSED") if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/tickets/ticket48798_test.py000066400000000000000000000037201421664411400260240ustar00rootroot00000000000000from subprocess import check_output import pytest from lib389.tasks import * from lib389.utils import * from lib389.topologies import topology_st from lib389.config import Encryption from lib389._constants import DEFAULT_SUFFIX, DEFAULT_SECURE_PORT pytestmark = pytest.mark.tier2 logging.getLogger(__name__).setLevel(logging.DEBUG) log = logging.getLogger(__name__) def check_socket_dh_param_size(hostname, port): ### You know why we have to do this? # Because TLS and SSL suck. Hard. They are impossible. It's all terrible, burn it all down. cmd = "echo quit | openssl s_client -connect {HOSTNAME}:{PORT} -msg -cipher DH | grep -A 1 ServerKeyExchange".format( HOSTNAME=hostname, PORT=port) output = check_output(cmd, shell=True) dhheader = output.split(b'\n')[1] # Get rid of all the other whitespace. dhheader = dhheader.replace(b' ', b'') # Example is 0c00040b0100ffffffffffffffffadf8 # We need the bits 0100 here. Which means 256 bytes aka 256 * 8, for 2048 bit. dhheader = dhheader[8:12] # make it an int, and times 8 i = int(dhheader, 16) * 8 return i def test_ticket48798(topology_st): """ Test DH param sizes offered by DS. """ topology_st.standalone.enable_tls() # Confirm that we have a connection, and that it has DH # Open a socket to the port. # Check the security settings. size = check_socket_dh_param_size(topology_st.standalone.host, topology_st.standalone.sslport) assert size == 2048 # Now toggle the settings. enc = Encryption(topology_st.standalone) enc.set('allowWeakDHParam', 'on') topology_st.standalone.restart() # Check the DH params are less than 1024. size = check_socket_dh_param_size(topology_st.standalone.host, topology_st.standalone.sslport) assert size == 1024 if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/tickets/ticket48799_test.py000066400000000000000000000060241421664411400260250ustar00rootroot00000000000000import pytest from lib389.tasks import * from lib389.utils import * from lib389.topologies import topology_m1c1 pytestmark = pytest.mark.tier2 logging.getLogger(__name__).setLevel(logging.DEBUG) log = logging.getLogger(__name__) def _add_custom_schema(server): attr_value = b"( 10.0.9.2342.19200300.100.1.1 NAME 'customManager' EQUALITY distinguishedNameMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 X-ORIGIN 'user defined' )" mod = [(ldap.MOD_ADD, 'attributeTypes', attr_value)] server.modify_s('cn=schema', mod) oc_value = b"( 1.3.6.1.4.1.4843.2.1 NAME 'customPerson' SUP inetorgperson STRUCTURAL MAY (customManager) X-ORIGIN 'user defined' )" mod = [(ldap.MOD_ADD, 'objectclasses', oc_value)] server.modify_s('cn=schema', mod) def _create_user(server): server.add_s(Entry(( "uid=testuser,ou=People,%s" % DEFAULT_SUFFIX, { 'objectClass': "top account posixaccount".split(), 'uid': 'testuser', 'gecos': 'Test User', 'cn': 'testuser', 'homedirectory': '/home/testuser', 'passwordexpirationtime': '20160710184141Z', 'userpassword': '!', 'uidnumber': '1111212', 'gidnumber': '1111212', 'loginshell': '/bin/bash' } ))) def _modify_user(server): mod = [ (ldap.MOD_ADD, 'objectClass', [b'customPerson']), (ldap.MOD_ADD, 'sn', [b'User']), (ldap.MOD_ADD, 'customManager', [b'cn=manager']), ] server.modify("uid=testuser,ou=People,%s" % DEFAULT_SUFFIX, mod) def test_ticket48799(topology_m1c1): """Write your replication testcase here. To access each DirSrv instance use: topology_m1c1.ms["supplier1"], topology_m1c1.ms["supplier1"]2, ..., topology_m1c1.hub1, ..., topology_m1c1.cs["consumer1"],... Also, if you need any testcase initialization, please, write additional fixture for that(include finalizer). """ # Add the new schema element. _add_custom_schema(topology_m1c1.ms["supplier1"]) _add_custom_schema(topology_m1c1.cs["consumer1"]) # Add a new user on the supplier. _create_user(topology_m1c1.ms["supplier1"]) # Modify the user on the supplier. _modify_user(topology_m1c1.ms["supplier1"]) # We need to wait for replication here. time.sleep(15) # Now compare the supplier vs consumer, and see if the objectClass was dropped. supplier_entry = topology_m1c1.ms["supplier1"].search_s("uid=testuser,ou=People,%s" % DEFAULT_SUFFIX, ldap.SCOPE_BASE, '(objectclass=*)', ['objectClass']) consumer_entry = topology_m1c1.cs["consumer1"].search_s("uid=testuser,ou=People,%s" % DEFAULT_SUFFIX, ldap.SCOPE_BASE, '(objectclass=*)', ['objectClass']) assert (supplier_entry == consumer_entry) log.info('Test complete') if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/tickets/ticket48808_test.py000066400000000000000000000267431421664411400260260ustar00rootroot00000000000000from random import sample import pytest from ldap.controls import SimplePagedResultsControl from lib389.tasks import * from lib389.utils import * from lib389.topologies import topology_st from lib389._constants import DEFAULT_SUFFIX, DN_DM, PASSWORD pytestmark = pytest.mark.tier2 logging.getLogger(__name__).setLevel(logging.DEBUG) log = logging.getLogger(__name__) TEST_USER_NAME = 'simplepaged_test' TEST_USER_DN = 'uid=%s,%s' % (TEST_USER_NAME, DEFAULT_SUFFIX) TEST_USER_PWD = 'simplepaged_test' @pytest.fixture(scope="module") def create_user(topology_st): """User for binding operation""" try: topology_st.standalone.add_s(Entry((TEST_USER_DN, { 'objectclass': 'top person'.split(), 'objectclass': 'organizationalPerson', 'objectclass': 'inetorgperson', 'cn': TEST_USER_NAME, 'sn': TEST_USER_NAME, 'userpassword': TEST_USER_PWD, 'mail': '%s@redhat.com' % TEST_USER_NAME, 'uid': TEST_USER_NAME }))) except ldap.LDAPError as e: log.error('Failed to add user (%s): error (%s)' % (TEST_USER_DN, e.args[0]['desc'])) raise e def add_users(topology_st, users_num): """Add users to the default suffix and return a list of added user DNs. """ users_list = [] log.info('Adding %d users' % users_num) for num in sample(range(1000), users_num): num_ran = int(round(num)) USER_NAME = 'test%05d' % num_ran USER_DN = 'uid=%s,%s' % (USER_NAME, DEFAULT_SUFFIX) users_list.append(USER_DN) try: topology_st.standalone.add_s(Entry((USER_DN, { 'objectclass': 'top person'.split(), 'objectclass': 'organizationalPerson', 'objectclass': 'inetorgperson', 'cn': USER_NAME, 'sn': USER_NAME, 'userpassword': 'pass%s' % num_ran, 'mail': '%s@redhat.com' % USER_NAME, 'uid': USER_NAME }))) except ldap.LDAPError as e: log.error('Failed to add user (%s): error (%s)' % (USER_DN, e.args[0]['desc'])) raise e return users_list def del_users(topology_st, users_list): """Delete users with DNs from given list""" log.info('Deleting %d users' % len(users_list)) for user_dn in users_list: try: topology_st.standalone.delete_s(user_dn) except ldap.LDAPError as e: log.error('Failed to delete user (%s): error (%s)' % (user_dn, e.args[0]['desc'])) raise e def change_conf_attr(topology_st, suffix, attr_name, attr_value): """Change configurational attribute in the given suffix. Funtion returns previous attribute value. """ try: entries = topology_st.standalone.search_s(suffix, ldap.SCOPE_BASE, 'objectclass=top', [attr_name]) attr_value_bck = entries[0].data.get(attr_name) log.info('Set %s to %s. Previous value - %s. Modified suffix - %s.' % ( attr_name, attr_value, attr_value_bck, suffix)) if attr_value is None: topology_st.standalone.modify_s(suffix, [(ldap.MOD_DELETE, attr_name, attr_value)]) else: topology_st.standalone.modify_s(suffix, [(ldap.MOD_REPLACE, attr_name, attr_value)]) except ldap.LDAPError as e: log.error('Failed to change attr value (%s): error (%s)' % (attr_name, e.args[0]['desc'])) raise e return attr_value_bck def paged_search(topology_st, controls, search_flt, searchreq_attrlist): """Search at the DEFAULT_SUFFIX with ldap.SCOPE_SUBTREE using Simple Paged Control(should the first item in the list controls. Return the list with results summarized from all pages """ pages = 0 pctrls = [] all_results = [] req_ctrl = controls[0] msgid = topology_st.standalone.search_ext(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, search_flt, searchreq_attrlist, serverctrls=controls) while True: log.info('Getting page %d' % (pages,)) rtype, rdata, rmsgid, rctrls = topology_st.standalone.result3(msgid) all_results.extend(rdata) pages += 1 pctrls = [ c for c in rctrls if c.controlType == SimplePagedResultsControl.controlType ] if pctrls: if pctrls[0].cookie: # Copy cookie from response control to request control req_ctrl.cookie = pctrls[0].cookie msgid = topology_st.standalone.search_ext(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, search_flt, searchreq_attrlist, serverctrls=controls) else: break # no more pages available else: break assert not pctrls[0].cookie return all_results def test_ticket48808(topology_st, create_user): log.info('Run multiple paging controls on a single connection') users_num = 100 page_size = 30 users_list = add_users(topology_st, users_num) search_flt = r'(uid=test*)' searchreq_attrlist = ['dn', 'sn'] log.info('Set user bind') topology_st.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PWD) log.info('Create simple paged results control instance') req_ctrl = SimplePagedResultsControl(True, size=page_size, cookie='') controls = [req_ctrl] for ii in range(3): log.info('Iteration %d' % ii) msgid = topology_st.standalone.search_ext(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, search_flt, searchreq_attrlist, serverctrls=controls) rtype, rdata, rmsgid, rctrls = topology_st.standalone.result3(msgid) pctrls = [ c for c in rctrls if c.controlType == SimplePagedResultsControl.controlType ] req_ctrl.cookie = pctrls[0].cookie msgid = topology_st.standalone.search_ext(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, search_flt, searchreq_attrlist, serverctrls=controls) log.info('Set Directory Manager bind back') topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) del_users(topology_st, users_list) log.info('Abandon the search') users_num = 10 page_size = 0 users_list = add_users(topology_st, users_num) search_flt = r'(uid=test*)' searchreq_attrlist = ['dn', 'sn'] log.info('Set user bind') topology_st.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PWD) log.info('Create simple paged results control instance') req_ctrl = SimplePagedResultsControl(True, size=page_size, cookie='') controls = [req_ctrl] msgid = topology_st.standalone.search_ext(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, search_flt, searchreq_attrlist, serverctrls=controls) rtype, rdata, rmsgid, rctrls = topology_st.standalone.result3(msgid) pctrls = [ c for c in rctrls if c.controlType == SimplePagedResultsControl.controlType ] assert not pctrls[0].cookie log.info('Set Directory Manager bind back') topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) del_users(topology_st, users_list) log.info("Search should fail with 'nsPagedSizeLimit = 5'" "and 'nsslapd-pagedsizelimit = 15' with 10 users") conf_attr = b'15' user_attr = b'5' expected_rs = ldap.SIZELIMIT_EXCEEDED users_num = 10 page_size = 10 users_list = add_users(topology_st, users_num) search_flt = r'(uid=test*)' searchreq_attrlist = ['dn', 'sn'] conf_attr_bck = change_conf_attr(topology_st, DN_CONFIG, 'nsslapd-pagedsizelimit', conf_attr) user_attr_bck = change_conf_attr(topology_st, TEST_USER_DN, 'nsPagedSizeLimit', user_attr) log.info('Set user bind') topology_st.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PWD) log.info('Create simple paged results control instance') req_ctrl = SimplePagedResultsControl(True, size=page_size, cookie='') controls = [req_ctrl] log.info('Expect to fail with SIZELIMIT_EXCEEDED') with pytest.raises(expected_rs): all_results = paged_search(topology_st, controls, search_flt, searchreq_attrlist) log.info('Set Directory Manager bind back') topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) del_users(topology_st, users_list) change_conf_attr(topology_st, DN_CONFIG, 'nsslapd-pagedsizelimit', conf_attr_bck) change_conf_attr(topology_st, TEST_USER_DN, 'nsPagedSizeLimit', user_attr_bck) log.info("Search should pass with 'nsPagedSizeLimit = 15'" "and 'nsslapd-pagedsizelimit = 5' with 10 users") conf_attr = b'5' user_attr = b'15' users_num = 10 page_size = 10 users_list = add_users(topology_st, users_num) search_flt = r'(uid=test*)' searchreq_attrlist = ['dn', 'sn'] conf_attr_bck = change_conf_attr(topology_st, DN_CONFIG, 'nsslapd-pagedsizelimit', conf_attr) user_attr_bck = change_conf_attr(topology_st, TEST_USER_DN, 'nsPagedSizeLimit', user_attr) log.info('Set user bind') topology_st.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PWD) log.info('Create simple paged results control instance') req_ctrl = SimplePagedResultsControl(True, size=page_size, cookie='') controls = [req_ctrl] log.info('Search should PASS') all_results = paged_search(topology_st, controls, search_flt, searchreq_attrlist) log.info('%d results' % len(all_results)) assert len(all_results) == len(users_list) log.info('Set Directory Manager bind back') topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) del_users(topology_st, users_list) change_conf_attr(topology_st, DN_CONFIG, 'nsslapd-pagedsizelimit', conf_attr_bck) change_conf_attr(topology_st, TEST_USER_DN, 'nsPagedSizeLimit', user_attr_bck) if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/tickets/ticket48844_test.py000066400000000000000000000154321421664411400260170ustar00rootroot00000000000000import pytest from lib389.tasks import * from lib389.utils import * from lib389.topologies import topology_st from lib389._constants import DEFAULT_SUFFIX, BACKEND_NAME pytestmark = pytest.mark.tier2 logging.getLogger(__name__).setLevel(logging.DEBUG) log = logging.getLogger(__name__) PLUGIN_BITWISE = 'Bitwise Plugin' TESTBASEDN = "dc=bitwise,dc=com" TESTBACKEND_NAME = "TestBitw" F1 = 'objectclass=testperson' BITWISE_F2 = '(&(%s)(testUserAccountControl:1.2.840.113556.1.4.803:=514))' % F1 BITWISE_F3 = '(&(%s)(testUserAccountControl:1.2.840.113556.1.4.803:=513))' % F1 BITWISE_F6 = '(&(%s)(testUserAccountControl:1.2.840.113556.1.4.803:=16777216))' % F1 def _addBitwiseEntries(topology_st): users = [ ('testuser2', '65536', 'PasswordNeverExpired'), ('testuser3', '8388608', 'PasswordExpired'), ('testuser4', '256', 'TempDuplicateAccount'), ('testuser5', '16777216', 'TrustedAuthDelegation'), ('testuser6', '528', 'AccountLocked'), ('testuser7', '513', 'AccountActive'), ('testuser8', '98536 99512 99528'.split(), 'AccountActive PasswordExxpired AccountLocked'.split()), ('testuser9', '87536 912'.split(), 'AccountActive PasswordNeverExpired'.split()), ('testuser10', '89536 97546 96579'.split(), 'TestVerify1 TestVerify2 TestVerify3'.split()), ('testuser11', '655236', 'TestStatus1'), ('testuser12', '665522', 'TestStatus2'), ('testuser13', '266552', 'TestStatus3')] try: topology_st.standalone.add_s(Entry((TESTBASEDN, {'objectclass': "top dcobject".split(), 'dc': 'bitwise', 'aci': '(target =\"ldap:///dc=bitwise,dc=com\")' + \ '(targetattr != \"userPassword\")' + \ '(version 3.0;acl \"Anonymous read-search access\";' + \ 'allow (read, search, compare)(userdn = \"ldap:///anyone\");)'}))) topology_st.standalone.add_s(Entry(('uid=btestuser1,%s' % TESTBASEDN, {'objectclass': 'top testperson organizationalPerson inetorgperson'.split(), 'mail': 'btestuser1@redhat.com', 'uid': 'btestuser1', 'givenName': 'bit', 'sn': 'testuser1', 'userPassword': 'testuser1', 'testUserAccountControl': '514', 'testUserStatus': 'Disabled', 'cn': 'bit tetsuser1'}))) for (userid, accCtl, accStatus) in users: topology_st.standalone.add_s(Entry(('uid=b%s,%s' % (userid, TESTBASEDN), { 'objectclass': 'top testperson organizationalPerson inetorgperson'.split(), 'mail': '%s@redhat.com' % userid, 'uid': 'b%s' % userid, 'givenName': 'bit', 'sn': userid, 'userPassword': userid, 'testUserAccountControl': accCtl, 'testUserStatus': accStatus, 'cn': 'bit %s' % userid}))) except ValueError: topology_st.standalone.log.fatal("add_s failed: %s", ValueError) def test_ticket48844_init(topology_st): # create a suffix where test entries will be stored BITW_SCHEMA_AT_1 = '( NAME \'testUserAccountControl\' DESC \'Attribute Bitwise filteri-Multi-Valued\' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 )' BITW_SCHEMA_AT_2 = '( NAME \'testUserStatus\' DESC \'State of User account active/disabled\' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 )' BITW_SCHEMA_OC_1 = '( NAME \'testperson\' SUP top STRUCTURAL MUST ( sn $ cn $ testUserAccountControl $ testUserStatus )' + \ ' MAY ( userPassword $ telephoneNumber $ seeAlso $ description ) X-ORIGIN \'BitWise\' )' topology_st.standalone.schema.add_schema('attributetypes', [ensure_bytes(BITW_SCHEMA_AT_1), ensure_bytes(BITW_SCHEMA_AT_2)]) topology_st.standalone.schema.add_schema('objectClasses', ensure_bytes(BITW_SCHEMA_OC_1)) topology_st.standalone.backend.create(TESTBASEDN, {BACKEND_NAME: TESTBACKEND_NAME}) topology_st.standalone.mappingtree.create(TESTBASEDN, bename=TESTBACKEND_NAME, parent=None) _addBitwiseEntries(topology_st) def test_ticket48844_bitwise_on(topology_st): """ Check that bitwise plugin (old style MR plugin) that defines Its own indexer create function, is selected to evaluate the filter """ topology_st.standalone.plugins.enable(name=PLUGIN_BITWISE) topology_st.standalone.restart(timeout=10) ents = topology_st.standalone.search_s('cn=%s,cn=plugins,cn=config' % PLUGIN_BITWISE, ldap.SCOPE_BASE, 'objectclass=*') assert (ents[0].hasValue('nsslapd-pluginEnabled', 'on')) expect = 2 ents = topology_st.standalone.search_s(TESTBASEDN, ldap.SCOPE_SUBTREE, BITWISE_F2) assert (len(ents) == expect) expect = 1 ents = topology_st.standalone.search_s(TESTBASEDN, ldap.SCOPE_SUBTREE, BITWISE_F3) assert (len(ents) == expect) assert (ents[0].hasAttr('testUserAccountControl')) expect = 1 ents = topology_st.standalone.search_s(TESTBASEDN, ldap.SCOPE_SUBTREE, BITWISE_F6) assert (len(ents) == expect) assert (ents[0].hasAttr('testUserAccountControl')) def test_ticket48844_bitwise_off(topology_st): """ Check that when bitwise plugin is not enabled, no plugin is identified to evaluate the filter -> ldap.UNAVAILABLE_CRITICAL_EXTENSION: """ topology_st.standalone.plugins.disable(name=PLUGIN_BITWISE) topology_st.standalone.restart(timeout=10) ents = topology_st.standalone.search_s('cn=%s,cn=plugins,cn=config' % PLUGIN_BITWISE, ldap.SCOPE_BASE, 'objectclass=*') assert (ents[0].hasValue('nsslapd-pluginEnabled', 'off')) res = 0 try: ents = topology_st.standalone.search_s(TESTBASEDN, ldap.SCOPE_SUBTREE, BITWISE_F2) except ldap.UNAVAILABLE_CRITICAL_EXTENSION: res = 12 assert (res == 12) if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/tickets/ticket48891_test.py000066400000000000000000000066671421664411400260330ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2016 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import fnmatch import logging import pytest from lib389.tasks import * from lib389.topologies import topology_st from lib389._constants import DN_DM, PASSWORD, DEFAULT_SUFFIX, BACKEND_NAME, SUFFIX pytestmark = pytest.mark.tier2 log = logging.getLogger(__name__) CONFIG_DN = 'cn=config' RDN_VAL_SUFFIX = 'ticket48891.org' MYSUFFIX = 'dc=%s' % RDN_VAL_SUFFIX MYSUFFIXBE = 'ticket48891' SEARCHFILTER = '(objectclass=person)' OTHER_NAME = 'other_entry' MAX_OTHERS = 10 def test_ticket48891_setup(topology_st): """ Check there is no core Create a second backend stop DS (that should trigger the core) check there is no core """ log.info('Testing Ticket 48891 - ns-slapd crashes during the shutdown after adding attribute with a matching rule') # bind as directory manager topology_st.standalone.log.info("Bind as %s" % DN_DM) topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) # check there is no core path = topology_st.standalone.config.get_attr_val_utf8('nsslapd-errorlog').replace('errors', '') log.debug('Looking for a core file in: ' + path) cores = fnmatch.filter(os.listdir(path), 'core.*') assert len(cores) == 0 topology_st.standalone.log.info( "\n\n######################### SETUP SUFFIX o=ticket48891.org ######################\n") topology_st.standalone.backend.create(MYSUFFIX, {BACKEND_NAME: MYSUFFIXBE}) topology_st.standalone.mappingtree.create(MYSUFFIX, bename=MYSUFFIXBE) topology_st.standalone.add_s(Entry((MYSUFFIX, { 'objectclass': "top domain".split(), 'dc': RDN_VAL_SUFFIX}))) topology_st.standalone.log.info("\n\n######################### Generate Test data ######################\n") # add dummy entries on both backends for cpt in range(MAX_OTHERS): name = "%s%d" % (OTHER_NAME, cpt) topology_st.standalone.add_s(Entry(("cn=%s,%s" % (name, SUFFIX), { 'objectclass': "top person".split(), 'sn': name, 'cn': name}))) for cpt in range(MAX_OTHERS): name = "%s%d" % (OTHER_NAME, cpt) topology_st.standalone.add_s(Entry(("cn=%s,%s" % (name, MYSUFFIX), { 'objectclass': "top person".split(), 'sn': name, 'cn': name}))) topology_st.standalone.log.info("\n\n######################### SEARCH ALL ######################\n") topology_st.standalone.log.info("Bind as %s and add the READ/SEARCH SELFDN aci" % DN_DM) topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) entries = topology_st.standalone.search_s(MYSUFFIX, ldap.SCOPE_SUBTREE, SEARCHFILTER) topology_st.standalone.log.info("Returned %d entries.\n", len(entries)) assert MAX_OTHERS == len(entries) topology_st.standalone.log.info('%d person entries are successfully created under %s.' % (len(entries), MYSUFFIX)) topology_st.standalone.stop(timeout=1) cores = fnmatch.filter(os.listdir(path), 'core.*') for core in cores: core = os.path.join(path, core) topology_st.standalone.log.info('cores are %s' % core) assert not os.path.isfile(core) log.info('Testcase PASSED') if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/tickets/ticket48893_test.py000066400000000000000000000023251421664411400260200ustar00rootroot00000000000000import pytest from lib389.utils import * from lib389.topologies import topology_st from lib389._constants import DEFAULT_SUFFIX, HOST_STANDALONE, PORT_STANDALONE pytestmark = pytest.mark.tier2 DEBUGGING = os.getenv('DEBUGGING', False) if DEBUGGING: logging.getLogger(__name__).setLevel(logging.DEBUG) else: logging.getLogger(__name__).setLevel(logging.INFO) log = logging.getLogger(__name__) def _attr_present(conn): results = conn.search_s('cn=config', ldap.SCOPE_SUBTREE, '(objectClass=*)') if DEBUGGING: print(results) if len(results) > 0: return True return False def test_ticket48893(topology_st): """ Test that anonymous has NO VIEW to cn=config """ if DEBUGGING: # Add debugging steps(if any)... pass # Do an anonymous bind conn = ldap.initialize("ldap://%s:%s" % (HOST_STANDALONE, PORT_STANDALONE)) conn.simple_bind_s() # Make sure that we cannot see what's in cn=config as anonymous assert (not _attr_present(conn)) conn.unbind_s() log.info('Test PASSED') if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/tickets/ticket48896_test.py000066400000000000000000000104761421664411400260310ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2016 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import pytest from lib389.tasks import * from lib389.utils import * from lib389.topologies import topology_st from lib389._constants import DEFAULT_SUFFIX, DN_DM, PASSWORD # Skip on older versions pytestmark = [pytest.mark.tier2, pytest.mark.skipif(ds_is_older('1.3.6'), reason="Not implemented")] logging.getLogger(__name__).setLevel(logging.DEBUG) log = logging.getLogger(__name__) CONFIG_DN = 'cn=config' UID = 'buser123' TESTDN = 'uid=%s,' % UID + DEFAULT_SUFFIX def check_attr_val(topology_st, dn, attr, expected): try: centry = topology_st.standalone.search_s(dn, ldap.SCOPE_BASE, 'cn=*') if centry: val = centry[0].getValue(attr) if val == expected: log.info('Default value of %s is %s' % (attr, expected)) else: log.info('Default value of %s is not %s, but %s' % (attr, expected, val)) assert False else: log.fatal('Failed to get %s' % dn) assert False except ldap.LDAPError as e: log.fatal('Failed to search ' + dn + ': ' + e.message['desc']) assert False def replace_pw(server, curpw, newpw, expstr, rc): log.info('Binding as {%s, %s}' % (TESTDN, curpw)) server.simple_bind_s(TESTDN, curpw) hit = 0 log.info('Replacing password: %s -> %s, which should %s' % (curpw, newpw, expstr)) try: server.modify_s(TESTDN, [(ldap.MOD_REPLACE, 'userPassword', ensure_bytes(newpw))]) except Exception as e: log.info("Exception (expected): %s" % type(e).__name__) hit = 1 assert isinstance(e, rc) if (0 != rc) and (0 == hit): log.info('Expected to fail with %s, but passed' % rc.__name__) assert False log.info('PASSED') def test_ticket48896(topology_st): """ """ log.info('Testing Ticket 48896 - Default Setting for passwordMinTokenLength does not work') log.info("Setting global password policy with password syntax.") topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) topology_st.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'passwordCheckSyntax', b'on'), (ldap.MOD_REPLACE, 'nsslapd-pwpolicy-local', b'on')]) config = topology_st.standalone.search_s(CONFIG_DN, ldap.SCOPE_BASE, 'cn=*') mintokenlen = config[0].getValue('passwordMinTokenLength') history = config[0].getValue('passwordInHistory') log.info('Default passwordMinTokenLength == %s' % mintokenlen) log.info('Default passwordInHistory == %s' % history) log.info('Adding a user.') curpw = 'password' topology_st.standalone.add_s(Entry((TESTDN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), 'cn': 'test user', 'sn': 'user', 'userPassword': curpw}))) newpw = 'Abcd012+' exp = 'be ok' rc = 0 replace_pw(topology_st.standalone, curpw, newpw, exp, rc) curpw = 'Abcd012+' newpw = 'user' exp = 'fail' rc = ldap.CONSTRAINT_VIOLATION replace_pw(topology_st.standalone, curpw, newpw, exp, rc) curpw = 'Abcd012+' newpw = UID exp = 'fail' rc = ldap.CONSTRAINT_VIOLATION replace_pw(topology_st.standalone, curpw, newpw, exp, rc) curpw = 'Abcd012+' newpw = 'Tuse!1234' exp = 'fail' rc = ldap.CONSTRAINT_VIOLATION replace_pw(topology_st.standalone, curpw, newpw, exp, rc) curpw = 'Abcd012+' newpw = 'Tuse!0987' exp = 'fail' rc = ldap.CONSTRAINT_VIOLATION replace_pw(topology_st.standalone, curpw, newpw, exp, rc) curpw = 'Abcd012+' newpw = 'Tabc!1234' exp = 'fail' rc = ldap.CONSTRAINT_VIOLATION replace_pw(topology_st.standalone, curpw, newpw, exp, rc) curpw = 'Abcd012+' newpw = 'Direc+ory389' exp = 'be ok' rc = 0 replace_pw(topology_st.standalone, curpw, newpw, exp, rc) log.info('SUCCESS') if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/tickets/ticket48906_test.py000066400000000000000000000314741421664411400260220ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2016 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import fnmatch import logging import shutil import pytest from lib389.tasks import * from lib389.topologies import topology_st from lib389.utils import * from lib389._constants import DEFAULT_SUFFIX, DN_LDBM, DN_DM, PASSWORD, SUFFIX # Skip on older versions pytestmark = [pytest.mark.tier2, pytest.mark.skipif(ds_is_older('1.3.6'), reason="Not implemented")] log = logging.getLogger(__name__) CONFIG_DN = 'cn=config' RDN_VAL_SUFFIX = 'ticket48906.org' MYSUFFIX = 'dc=%s' % RDN_VAL_SUFFIX MYSUFFIXBE = 'ticket48906' SEARCHFILTER = '(objectclass=person)' OTHER_NAME = 'other_entry' MAX_OTHERS = 10 DBLOCK_DEFAULT = "10000" DBLOCK_LDAP_UPDATE = "20000" DBLOCK_EDIT_UPDATE = "40000" DBLOCK_MIN_UPDATE = DBLOCK_DEFAULT DBLOCK_ATTR_CONFIG = "nsslapd-db-locks" DBLOCK_ATTR_MONITOR = "nsslapd-db-configured-locks" DBLOCK_ATTR_GUARDIAN = "locks" DBCACHE_LDAP_UPDATE = "20000000" DBCACHE_EDIT_UPDATE = "40000000" DBCACHE_ATTR_CONFIG = "nsslapd-dbcachesize" DBCACHE_ATTR_GUARDIAN = "cachesize" ldbm_config = "cn=config,%s" % (DN_LDBM) bdb_ldbm_config = "cn=bdb,cn=config,%s" % (DN_LDBM) ldbm_monitor = "cn=database,cn=monitor,%s" % (DN_LDBM) def test_ticket48906_setup(topology_st): """ Check there is no core Create a second backend stop DS (that should trigger the core) check there is no core """ log.info('Testing Ticket 48906 - ns-slapd crashes during the shutdown after adding attribute with a matching rule') # bind as directory manager topology_st.standalone.log.info("Bind as %s" % DN_DM) topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) # check there is no core entry = topology_st.standalone.search_s(CONFIG_DN, ldap.SCOPE_BASE, "(cn=config)", ['nsslapd-workingdir']) assert entry assert entry[0] assert entry[0].hasAttr('nsslapd-workingdir') path = entry[0].getValue('nsslapd-workingdir') cores = fnmatch.filter(os.listdir(path), b'core.*') assert len(cores) == 0 # add dummy entries on backend for cpt in range(MAX_OTHERS): name = "%s%d" % (OTHER_NAME, cpt) topology_st.standalone.add_s(Entry(("cn=%s,%s" % (name, SUFFIX), { 'objectclass': "top person".split(), 'sn': name, 'cn': name}))) topology_st.standalone.log.info("\n\n######################### SEARCH ALL ######################\n") topology_st.standalone.log.info("Bind as %s and add the READ/SEARCH SELFDN aci" % DN_DM) topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) entries = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, SEARCHFILTER) topology_st.standalone.log.info("Returned %d entries.\n", len(entries)) assert MAX_OTHERS == len(entries) topology_st.standalone.log.info('%d person entries are successfully created under %s.' % (len(entries), SUFFIX)) def _check_configured_value(topology_st, attr=DBLOCK_ATTR_CONFIG, expected_value=None, required=False): entries = topology_st.standalone.search_s(bdb_ldbm_config, ldap.SCOPE_BASE, 'cn=bdb') if required: assert (entries[0].hasValue(attr)) elif entries[0].hasValue(attr): assert (entries[0].getValue(attr) == ensure_bytes(expected_value)) def _check_monitored_value(topology_st, expected_value): entries = topology_st.standalone.search_s(ldbm_monitor, ldap.SCOPE_BASE, '(objectclass=*)') assert (entries[0].hasValue(DBLOCK_ATTR_MONITOR) and entries[0].getValue(DBLOCK_ATTR_MONITOR) == ensure_bytes(expected_value)) def _check_dse_ldif_value(topology_st, attr=DBLOCK_ATTR_CONFIG, expected_value=DBLOCK_LDAP_UPDATE): dse_ref_ldif = topology_st.standalone.confdir + '/dse.ldif' dse_ref = open(dse_ref_ldif, "r") # Check the DBLOCK in dse.ldif value = None while True: line = dse_ref.readline() if (line == ''): break elif attr in line.lower(): value = line.split()[1] assert (value == expected_value) break assert (value) def _check_guardian_value(topology_st, attr=DBLOCK_ATTR_CONFIG, expected_value=None): guardian_file = os.path.join(topology_st.standalone.dbdir, 'guardian') assert (os.path.exists(guardian_file)) guardian = open(guardian_file, "r") value = None while True: line = guardian.readline() if (line == ''): break elif attr in line.lower(): value = line.split(':')[1].replace("\n", "") print("line") print(line) print("expected_value") print(expected_value) print("value") print(value) assert (str(value) == str(expected_value)) break assert (value) def test_ticket48906_dblock_default(topology_st): topology_st.standalone.log.info('###################################') topology_st.standalone.log.info('###') topology_st.standalone.log.info('### Check that before any change config/monitor') topology_st.standalone.log.info('### contains the default value') topology_st.standalone.log.info('###') topology_st.standalone.log.info('###################################') _check_monitored_value(topology_st, DBLOCK_DEFAULT) _check_configured_value(topology_st, attr=DBLOCK_ATTR_CONFIG, expected_value=DBLOCK_DEFAULT, required=False) def test_ticket48906_dblock_ldap_update(topology_st): topology_st.standalone.log.info('###################################') topology_st.standalone.log.info('###') topology_st.standalone.log.info('### Check that after ldap update') topology_st.standalone.log.info('### - monitor contains DEFAULT') topology_st.standalone.log.info('### - configured contains DBLOCK_LDAP_UPDATE') topology_st.standalone.log.info('### - After stop dse.ldif contains DBLOCK_LDAP_UPDATE') topology_st.standalone.log.info('### - After stop guardian contains DEFAULT') topology_st.standalone.log.info('### In fact guardian should differ from config to recreate the env') topology_st.standalone.log.info('### Check that after restart (DBenv recreated)') topology_st.standalone.log.info('### - monitor contains DBLOCK_LDAP_UPDATE ') topology_st.standalone.log.info('### - configured contains DBLOCK_LDAP_UPDATE') topology_st.standalone.log.info('### - dse.ldif contains DBLOCK_LDAP_UPDATE') topology_st.standalone.log.info('###') topology_st.standalone.log.info('###################################') topology_st.standalone.modify_s(ldbm_config, [(ldap.MOD_REPLACE, DBLOCK_ATTR_CONFIG, ensure_bytes(DBLOCK_LDAP_UPDATE))]) _check_monitored_value(topology_st, DBLOCK_DEFAULT) _check_configured_value(topology_st, attr=DBLOCK_ATTR_CONFIG, expected_value=DBLOCK_LDAP_UPDATE, required=True) topology_st.standalone.stop(timeout=10) _check_dse_ldif_value(topology_st, attr=DBLOCK_ATTR_CONFIG, expected_value=DBLOCK_LDAP_UPDATE) _check_guardian_value(topology_st, attr=DBLOCK_ATTR_GUARDIAN, expected_value=DBLOCK_DEFAULT) # Check that the value is the same after restart and recreate topology_st.standalone.start(timeout=10) _check_monitored_value(topology_st, DBLOCK_LDAP_UPDATE) _check_configured_value(topology_st, attr=DBLOCK_ATTR_CONFIG, expected_value=DBLOCK_LDAP_UPDATE, required=True) _check_dse_ldif_value(topology_st, attr=DBLOCK_ATTR_CONFIG, expected_value=DBLOCK_LDAP_UPDATE) def test_ticket48906_dblock_edit_update(topology_st): topology_st.standalone.log.info('###################################') topology_st.standalone.log.info('###') topology_st.standalone.log.info('### Check that after stop') topology_st.standalone.log.info('### - dse.ldif contains DBLOCK_LDAP_UPDATE') topology_st.standalone.log.info('### - guardian contains DBLOCK_LDAP_UPDATE') topology_st.standalone.log.info('### Check that edit dse+restart') topology_st.standalone.log.info('### - monitor contains DBLOCK_EDIT_UPDATE') topology_st.standalone.log.info('### - configured contains DBLOCK_EDIT_UPDATE') topology_st.standalone.log.info('### Check that after stop') topology_st.standalone.log.info('### - dse.ldif contains DBLOCK_EDIT_UPDATE') topology_st.standalone.log.info('### - guardian contains DBLOCK_EDIT_UPDATE') topology_st.standalone.log.info('###') topology_st.standalone.log.info('###################################') topology_st.standalone.stop(timeout=10) _check_dse_ldif_value(topology_st, attr=DBLOCK_ATTR_CONFIG, expected_value=DBLOCK_LDAP_UPDATE) _check_guardian_value(topology_st, attr=DBLOCK_ATTR_GUARDIAN, expected_value=DBLOCK_LDAP_UPDATE) dse_ref_ldif = topology_st.standalone.confdir + '/dse.ldif' dse_new_ldif = topology_st.standalone.confdir + '/dse.ldif.new' dse_ref = open(dse_ref_ldif, "r") dse_new = open(dse_new_ldif, "w") # Change the DBLOCK in dse.ldif value = None while True: line = dse_ref.readline() if (line == ''): break elif DBLOCK_ATTR_CONFIG in line.lower(): value = line.split()[1] assert (value == DBLOCK_LDAP_UPDATE) new_value = [line.split()[0], DBLOCK_EDIT_UPDATE, ] new_line = "%s\n" % " ".join(new_value) else: new_line = line dse_new.write(new_line) assert (value) dse_ref.close() dse_new.close() shutil.move(dse_new_ldif, dse_ref_ldif) # Check that the value is the same after restart topology_st.standalone.start(timeout=10) _check_monitored_value(topology_st, DBLOCK_EDIT_UPDATE) _check_configured_value(topology_st, attr=DBLOCK_ATTR_CONFIG, expected_value=DBLOCK_EDIT_UPDATE, required=True) topology_st.standalone.stop(timeout=10) _check_dse_ldif_value(topology_st, attr=DBLOCK_ATTR_CONFIG, expected_value=DBLOCK_EDIT_UPDATE) _check_guardian_value(topology_st, attr=DBLOCK_ATTR_GUARDIAN, expected_value=DBLOCK_EDIT_UPDATE) def test_ticket48906_dblock_robust(topology_st): topology_st.standalone.log.info('###################################') topology_st.standalone.log.info('###') topology_st.standalone.log.info('### Check that the following values are rejected') topology_st.standalone.log.info('### - negative value') topology_st.standalone.log.info('### - insuffisant value') topology_st.standalone.log.info('### - invalid value') topology_st.standalone.log.info('### Check that minimum value is accepted') topology_st.standalone.log.info('###') topology_st.standalone.log.info('###################################') topology_st.standalone.start(timeout=10) _check_monitored_value(topology_st, DBLOCK_EDIT_UPDATE) _check_configured_value(topology_st, attr=DBLOCK_ATTR_CONFIG, expected_value=DBLOCK_EDIT_UPDATE, required=True) # Check negative value try: topology_st.standalone.modify_s(ldbm_config, [(ldap.MOD_REPLACE, DBLOCK_ATTR_CONFIG, b"-1")]) except ldap.UNWILLING_TO_PERFORM: pass _check_monitored_value(topology_st, DBLOCK_EDIT_UPDATE) _check_configured_value(topology_st, attr=DBLOCK_ATTR_CONFIG, expected_value=DBLOCK_LDAP_UPDATE, required=True) # Check insuffisant value too_small = int(DBLOCK_MIN_UPDATE) - 1 try: topology_st.standalone.modify_s(ldbm_config, [(ldap.MOD_REPLACE, DBLOCK_ATTR_CONFIG, ensure_bytes(str(too_small)))]) except ldap.UNWILLING_TO_PERFORM: pass _check_monitored_value(topology_st, DBLOCK_EDIT_UPDATE) _check_configured_value(topology_st, attr=DBLOCK_ATTR_CONFIG, expected_value=DBLOCK_LDAP_UPDATE, required=True) # Check invalid value try: topology_st.standalone.modify_s(ldbm_config, [(ldap.MOD_REPLACE, DBLOCK_ATTR_CONFIG, b"dummy")]) except ldap.UNWILLING_TO_PERFORM: pass _check_monitored_value(topology_st, DBLOCK_EDIT_UPDATE) _check_configured_value(topology_st, attr=DBLOCK_ATTR_CONFIG, expected_value=DBLOCK_LDAP_UPDATE, required=True) # now check the minimal value topology_st.standalone.modify_s(ldbm_config, [(ldap.MOD_REPLACE, DBLOCK_ATTR_CONFIG, ensure_bytes(DBLOCK_MIN_UPDATE))]) _check_monitored_value(topology_st, DBLOCK_EDIT_UPDATE) _check_configured_value(topology_st, attr=DBLOCK_ATTR_CONFIG, expected_value=DBLOCK_MIN_UPDATE, required=True) topology_st.standalone.stop(timeout=10) _check_dse_ldif_value(topology_st, attr=DBLOCK_ATTR_CONFIG, expected_value=DBLOCK_MIN_UPDATE) _check_guardian_value(topology_st, attr=DBLOCK_ATTR_GUARDIAN, expected_value=DBLOCK_EDIT_UPDATE) topology_st.standalone.start(timeout=10) _check_monitored_value(topology_st, DBLOCK_MIN_UPDATE) _check_configured_value(topology_st, attr=DBLOCK_ATTR_CONFIG, expected_value=DBLOCK_MIN_UPDATE, required=True) if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/tickets/ticket48916_test.py000066400000000000000000000100161421664411400260100ustar00rootroot00000000000000import pytest from lib389.tasks import * from lib389.utils import * from lib389.topologies import topology_m2 DEBUGGING = os.getenv('DEBUGGING', False) if DEBUGGING: logging.getLogger(__name__).setLevel(logging.DEBUG) else: logging.getLogger(__name__).setLevel(logging.INFO) log = logging.getLogger(__name__) # Skip on older versions pytestmark = [pytest.mark.tier2, pytest.mark.skipif(ds_is_older('1.3.5'), reason="Not implemented")] def _create_user(inst, idnum): inst.add_s(Entry( ('uid=user%s,ou=People,%s' % (idnum, DEFAULT_SUFFIX), { 'objectClass': 'top account posixAccount'.split(' '), 'cn': 'user', 'uid': 'user%s' % idnum, 'homeDirectory': '/home/user%s' % idnum, 'loginShell': '/bin/nologin', 'gidNumber': '-1', 'uidNumber': '-1', }) )) def test_ticket48916(topology_m2): """ https://bugzilla.redhat.com/show_bug.cgi?id=1353629 This is an issue with ID exhaustion in DNA causing a crash. To access each DirSrv instance use: topology_m2.ms["supplier1"], topology_m2.ms["supplier2"], ..., topology_m2.hub1, ..., topology_m2.consumer1,... """ if DEBUGGING: # Add debugging steps(if any)... pass # Enable the plugin on both servers dna_m1 = topology_m2.ms["supplier1"].plugins.get('Distributed Numeric Assignment Plugin') dna_m2 = topology_m2.ms["supplier2"].plugins.get('Distributed Numeric Assignment Plugin') # Configure it # Create the container for the ranges to go into. topology_m2.ms["supplier1"].add_s(Entry( ('ou=Ranges,%s' % DEFAULT_SUFFIX, { 'objectClass': 'top organizationalUnit'.split(' '), 'ou': 'Ranges', }) )) # Create the dnaAdmin? # For now we just pinch the dn from the dna_m* types, and add the relevant child config # but in the future, this could be a better plugin template type from lib389 config_dn = dna_m1.dn topology_m2.ms["supplier1"].add_s(Entry( ('cn=uids,%s' % config_dn, { 'objectClass': 'top dnaPluginConfig'.split(' '), 'cn': 'uids', 'dnatype': 'uidNumber gidNumber'.split(' '), 'dnafilter': '(objectclass=posixAccount)', 'dnascope': '%s' % DEFAULT_SUFFIX, 'dnaNextValue': '1', 'dnaMaxValue': '50', 'dnasharedcfgdn': 'ou=Ranges,%s' % DEFAULT_SUFFIX, 'dnaThreshold': '0', 'dnaRangeRequestTimeout': '60', 'dnaMagicRegen': '-1', 'dnaRemoteBindDN': 'uid=dnaAdmin,ou=People,%s' % DEFAULT_SUFFIX, 'dnaRemoteBindCred': 'secret123', 'dnaNextRange': '80-90' }) )) topology_m2.ms["supplier2"].add_s(Entry( ('cn=uids,%s' % config_dn, { 'objectClass': 'top dnaPluginConfig'.split(' '), 'cn': 'uids', 'dnatype': 'uidNumber gidNumber'.split(' '), 'dnafilter': '(objectclass=posixAccount)', 'dnascope': '%s' % DEFAULT_SUFFIX, 'dnaNextValue': '61', 'dnaMaxValue': '70', 'dnasharedcfgdn': 'ou=Ranges,%s' % DEFAULT_SUFFIX, 'dnaThreshold': '2', 'dnaRangeRequestTimeout': '60', 'dnaMagicRegen': '-1', 'dnaRemoteBindDN': 'uid=dnaAdmin,ou=People,%s' % DEFAULT_SUFFIX, 'dnaRemoteBindCred': 'secret123', }) )) # Enable the plugins dna_m1.enable() dna_m2.enable() # Restart the instances topology_m2.ms["supplier1"].restart(60) topology_m2.ms["supplier2"].restart(60) # Wait for a replication ..... time.sleep(40) # Allocate the 10 members to exhaust for i in range(1, 11): _create_user(topology_m2.ms["supplier2"], i) # Allocate the 11th _create_user(topology_m2.ms["supplier2"], 11) log.info('Test PASSED') if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/tickets/ticket48944_test.py000066400000000000000000000250041421664411400260140ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2017 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import pytest from lib389.tasks import * from lib389.utils import * from lib389.topologies import topology_m2c2 as topo from lib389._constants import (PLUGIN_ACCT_POLICY, DN_PLUGIN, DN_CONFIG, DN_DM, PASSWORD, DEFAULT_SUFFIX, SUFFIX) pytestmark = pytest.mark.tier2 DEBUGGING = os.getenv("DEBUGGING", default=False) if DEBUGGING: logging.getLogger(__name__).setLevel(logging.DEBUG) else: logging.getLogger(__name__).setLevel(logging.INFO) log = logging.getLogger(__name__) ACCPOL_DN = "cn={},{}".format(PLUGIN_ACCT_POLICY, DN_PLUGIN) ACCP_CONF = "{},{}".format(DN_CONFIG, ACCPOL_DN) USER_PW = 'Secret123' def _last_login_time(topo, userdn, inst_name, last_login): """Find lastLoginTime attribute value for a given supplier/consumer""" if 'supplier' in inst_name: if (last_login == 'bind_n_check'): topo.ms[inst_name].simple_bind_s(userdn, USER_PW) topo.ms[inst_name].simple_bind_s(DN_DM, PASSWORD) entry = topo.ms[inst_name].search_s(userdn, ldap.SCOPE_BASE, 'objectClass=*', ['lastLoginTime']) else: if (last_login == 'bind_n_check'): topo.cs[inst_name].simple_bind_s(userdn, USER_PW) topo.cs[inst_name].simple_bind_s(DN_DM, PASSWORD) entry = topo.cs[inst_name].search_s(userdn, ldap.SCOPE_BASE, 'objectClass=*', ['lastLoginTime']) lastLogin = entry[0].lastLoginTime time.sleep(1) return lastLogin def _enable_plugin(topo, inst_name): """Enable account policy plugin and configure required attributes""" log.info('Enable account policy plugin and configure required attributes') if 'supplier' in inst_name: log.info('Configure Account policy plugin on {}'.format(inst_name)) topo.ms[inst_name].simple_bind_s(DN_DM, PASSWORD) try: topo.ms[inst_name].plugins.enable(name=PLUGIN_ACCT_POLICY) topo.ms[inst_name].modify_s(ACCPOL_DN, [(ldap.MOD_REPLACE, 'nsslapd-pluginarg0', ensure_bytes(ACCP_CONF))]) topo.ms[inst_name].modify_s(ACCP_CONF, [(ldap.MOD_REPLACE, 'alwaysrecordlogin', b'yes')]) topo.ms[inst_name].modify_s(ACCP_CONF, [(ldap.MOD_REPLACE, 'stateattrname', b'lastLoginTime')]) topo.ms[inst_name].modify_s(ACCP_CONF, [(ldap.MOD_REPLACE, 'altstateattrname', b'createTimestamp')]) topo.ms[inst_name].modify_s(ACCP_CONF, [(ldap.MOD_REPLACE, 'specattrname', b'acctPolicySubentry')]) topo.ms[inst_name].modify_s(ACCP_CONF, [(ldap.MOD_REPLACE, 'limitattrname', b'accountInactivityLimit')]) topo.ms[inst_name].modify_s(ACCP_CONF, [(ldap.MOD_REPLACE, 'accountInactivityLimit', b'3600')]) except ldap.LDAPError as e: log.error('Failed to configure {} plugin for inst-{} error: {}'.format(PLUGIN_ACCT_POLICY, inst_name, str(e))) topo.ms[inst_name].restart(timeout=10) else: log.info('Configure Account policy plugin on {}'.format(inst_name)) topo.cs[inst_name].simple_bind_s(DN_DM, PASSWORD) try: topo.cs[inst_name].plugins.enable(name=PLUGIN_ACCT_POLICY) topo.cs[inst_name].modify_s(ACCPOL_DN, [(ldap.MOD_REPLACE, 'nsslapd-pluginarg0', ensure_bytes(ACCP_CONF))]) topo.cs[inst_name].modify_s(ACCP_CONF, [(ldap.MOD_REPLACE, 'alwaysrecordlogin', b'yes')]) topo.cs[inst_name].modify_s(ACCP_CONF, [(ldap.MOD_REPLACE, 'stateattrname', b'lastLoginTime')]) topo.cs[inst_name].modify_s(ACCP_CONF, [(ldap.MOD_REPLACE, 'altstateattrname', b'createTimestamp')]) topo.cs[inst_name].modify_s(ACCP_CONF, [(ldap.MOD_REPLACE, 'specattrname', b'acctPolicySubentry')]) topo.cs[inst_name].modify_s(ACCP_CONF, [(ldap.MOD_REPLACE, 'limitattrname', b'accountInactivityLimit')]) topo.cs[inst_name].modify_s(ACCP_CONF, [(ldap.MOD_REPLACE, 'accountInactivityLimit', b'3600')]) except ldap.LDAPError as e: log.error('Failed to configure {} plugin for inst-{} error {}'.format(PLUGIN_ACCT_POLICY, inst_name, str(e))) topo.cs[inst_name].restart(timeout=10) def test_ticket48944(topo): """On a read only replica invalid state info can accumulate :id: 833be131-f3bf-493e-97c6-3121438a07b1 :feature: Account Policy Plugin :setup: Two supplier and two consumer setup :steps: 1. Configure Account policy plugin with alwaysrecordlogin set to yes 2. Check if entries are synced across suppliers and consumers 3. Stop all suppliers and consumers 4. Start supplier1 and bind as user1 to create lastLoginTime attribute 5. Start supplier2 and wait for the sync of lastLoginTime attribute 6. Stop supplier1 and bind as user1 from supplier2 7. Check if lastLoginTime attribute is updated and greater than supplier1 8. Stop supplier2, start consumer1, consumer2 and then supplier2 9. Check if lastLoginTime attribute is updated on both consumers 10. Bind as user1 to both consumers and check the value is updated 11. Check if lastLoginTime attribute is not updated from consumers 12. Start supplier1 and make sure the lastLoginTime attribute is not updated on consumers 13. Bind as user1 from supplier1 and check if all suppliers and consumers have the same value 14. Check error logs of consumers for "deletedattribute;deleted" message :expectedresults: No accumulation of replica invalid state info on consumers """ log.info("Ticket 48944 - On a read only replica invalid state info can accumulate") user_name = 'newbzusr' tuserdn = 'uid={}1,ou=people,{}'.format(user_name, SUFFIX) inst_list = ['supplier1', 'supplier2', 'consumer1', 'consumer2'] for inst_name in inst_list: _enable_plugin(topo, inst_name) log.info('Sleep for 10secs for the server to come up') time.sleep(10) log.info('Add few entries to server and check if entries are replicated') for nos in range(10): userdn = 'uid={}{},ou=people,{}'.format(user_name, nos, SUFFIX) try: topo.ms['supplier1'].add_s(Entry((userdn, { 'objectclass': 'top person'.split(), 'objectclass': 'inetorgperson', 'cn': user_name, 'sn': user_name, 'userpassword': USER_PW, 'mail': '{}@redhat.com'.format(user_name)}))) except ldap.LDAPError as e: log.error('Failed to add {} user: error {}'.format(userdn, e.message['desc'])) raise e log.info('Checking if entries are synced across suppliers and consumers') entries_m1 = topo.ms['supplier1'].search_s(SUFFIX, ldap.SCOPE_SUBTREE, 'uid={}*'.format(user_name), ['uid=*']) exp_entries = str(entries_m1).count('dn: uid={}*'.format(user_name)) entries_m2 = topo.ms['supplier2'].search_s(SUFFIX, ldap.SCOPE_SUBTREE, 'uid={}*'.format(user_name), ['uid=*']) act_entries = str(entries_m2).count('dn: uid={}*'.format(user_name)) assert act_entries == exp_entries inst_list = ['consumer1', 'consumer2'] for inst in inst_list: entries_other = topo.cs[inst].search_s(SUFFIX, ldap.SCOPE_SUBTREE, 'uid={}*'.format(user_name), ['uid=*']) act_entries = str(entries_other).count('dn: uid={}*'.format(user_name)) assert act_entries == exp_entries topo.ms['supplier2'].stop(timeout=10) topo.ms['supplier1'].stop(timeout=10) topo.cs['consumer1'].stop(timeout=10) topo.cs['consumer2'].stop(timeout=10) topo.ms['supplier1'].start(timeout=10) lastLogin_m1_1 = _last_login_time(topo, tuserdn, 'supplier1', 'bind_n_check') log.info('Start supplier2 to sync lastLoginTime attribute from supplier1') topo.ms['supplier2'].start(timeout=10) time.sleep(5) log.info('Stop supplier1') topo.ms['supplier1'].stop(timeout=10) log.info('Bind as user1 to supplier2 and check if lastLoginTime attribute is greater than supplier1') lastLogin_m2_1 = _last_login_time(topo, tuserdn, 'supplier2', 'bind_n_check') assert lastLogin_m2_1 > lastLogin_m1_1 log.info('Start all servers except supplier1') topo.ms['supplier2'].stop(timeout=10) topo.cs['consumer1'].start(timeout=10) topo.cs['consumer2'].start(timeout=10) topo.ms['supplier2'].start(timeout=10) time.sleep(10) log.info('Check if consumers are updated with lastLoginTime attribute value from supplier2') lastLogin_c1_1 = _last_login_time(topo, tuserdn, 'consumer1', 'check') assert lastLogin_c1_1 == lastLogin_m2_1 lastLogin_c2_1 = _last_login_time(topo, tuserdn, 'consumer2', 'check') assert lastLogin_c2_1 == lastLogin_m2_1 log.info('Check if lastLoginTime update in consumers not synced to supplier2') lastLogin_c1_2 = _last_login_time(topo, tuserdn, 'consumer1', 'bind_n_check') assert lastLogin_c1_2 > lastLogin_m2_1 lastLogin_c2_2 = _last_login_time(topo, tuserdn, 'consumer2', 'bind_n_check') assert lastLogin_c2_2 > lastLogin_m2_1 time.sleep(10) # Allow replication to kick in lastLogin_m2_2 = _last_login_time(topo, tuserdn, 'supplier2', 'check') assert lastLogin_m2_2 == lastLogin_m2_1 log.info('Start supplier1 and check if its updating its older lastLoginTime attribute to consumers') topo.ms['supplier1'].start(timeout=10) time.sleep(10) lastLogin_c1_3 = _last_login_time(topo, tuserdn, 'consumer1', 'check') assert lastLogin_c1_3 == lastLogin_c1_2 lastLogin_c2_3 = _last_login_time(topo, tuserdn, 'consumer2', 'check') assert lastLogin_c2_3 == lastLogin_c2_2 log.info('Check if lastLoginTime update from supplier2 is synced to all suppliers and consumers') lastLogin_m2_3 = _last_login_time(topo, tuserdn, 'supplier2', 'bind_n_check') time.sleep(10) # Allow replication to kick in lastLogin_m1_2 = _last_login_time(topo, tuserdn, 'supplier1', 'check') lastLogin_c1_4 = _last_login_time(topo, tuserdn, 'consumer1', 'check') lastLogin_c2_4 = _last_login_time(topo, tuserdn, 'consumer2', 'check') assert lastLogin_m2_3 == lastLogin_m1_2 == lastLogin_c2_4 == lastLogin_c1_4 log.info('Checking consumer error logs for replica invalid state info') assert not topo.cs['consumer2'].ds_error_log.match('.*deletedattribute;deleted.*') assert not topo.cs['consumer1'].ds_error_log.match('.*deletedattribute;deleted.*') if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/tickets/ticket48956_test.py000066400000000000000000000116501421664411400260210ustar00rootroot00000000000000import pytest import subprocess from lib389.tasks import * from lib389.utils import * from lib389.topologies import topology_st from lib389._constants import (PLUGIN_ACCT_POLICY, DEFAULT_SUFFIX, DN_DM, PASSWORD, SUFFIX, BACKEND_NAME) pytestmark = pytest.mark.tier2 DEBUGGING = os.getenv('DEBUGGING', False) RDN_LONG_SUFFIX = 'this' LONG_SUFFIX = "dc=%s,dc=is,dc=a,dc=very,dc=long,dc=suffix,dc=so,dc=long,dc=suffix,dc=extremely,dc=long,dc=suffix" % RDN_LONG_SUFFIX LONG_SUFFIX_BE = 'ticket48956' ACCT_POLICY_PLUGIN_DN = 'cn=%s,cn=plugins,cn=config' % PLUGIN_ACCT_POLICY ACCT_POLICY_CONFIG_DN = 'cn=config,%s' % ACCT_POLICY_PLUGIN_DN INACTIVITY_LIMIT = '9' SEARCHFILTER = '(objectclass=*)' TEST_USER = 'ticket48956user' TEST_USER_PW = '%s' % TEST_USER if DEBUGGING: logging.getLogger(__name__).setLevel(logging.DEBUG) else: logging.getLogger(__name__).setLevel(logging.INFO) log = logging.getLogger(__name__) def _check_status(topology_st, user, expected): nsaccountstatus = os.path.join(topology_st.standalone.ds_paths.sbin_dir, "ns-accountstatus.pl") try: output = subprocess.check_output([nsaccountstatus, '-Z', topology_st.standalone.serverid, '-D', DN_DM, '-w', PASSWORD, '-p', str(topology_st.standalone.port), '-I', user]) except subprocess.CalledProcessError as err: output = err.output log.info("output: %s" % output) if expected in output: return True return False def _check_inactivity(topology_st, mysuffix): ACCT_POLICY_DN = 'cn=Account Inactivation Policy,%s' % mysuffix log.info("\n######################### Adding Account Policy entry: %s ######################\n" % ACCT_POLICY_DN) topology_st.standalone.add_s( Entry((ACCT_POLICY_DN, {'objectclass': "top ldapsubentry extensibleObject accountpolicy".split(), 'accountInactivityLimit': INACTIVITY_LIMIT}))) time.sleep(1) TEST_USER_DN = 'uid=%s,%s' % (TEST_USER, mysuffix) log.info("\n######################### Adding Test User entry: %s ######################\n" % TEST_USER_DN) topology_st.standalone.add_s( Entry((TEST_USER_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), 'cn': TEST_USER, 'sn': TEST_USER, 'givenname': TEST_USER, 'userPassword': TEST_USER_PW, 'acctPolicySubentry': ACCT_POLICY_DN}))) time.sleep(1) # Setting the lastLoginTime try: topology_st.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PW) except ldap.CONSTRAINT_VIOLATION as e: log.error('CONSTRAINT VIOLATION ' + e.message['desc']) topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) assert (_check_status(topology_st, TEST_USER_DN, b'- activated')) time.sleep(int(INACTIVITY_LIMIT) + 5) assert (_check_status(topology_st, TEST_USER_DN, b'- inactivated (inactivity limit exceeded')) def test_ticket48956(topology_st): """Write your testcase here... Also, if you need any testcase initialization, please, write additional fixture for that(include finalizer). """ topology_st.standalone.modify_s(ACCT_POLICY_PLUGIN_DN, [(ldap.MOD_REPLACE, 'nsslapd-pluginarg0', ensure_bytes(ACCT_POLICY_CONFIG_DN))]) topology_st.standalone.modify_s(ACCT_POLICY_CONFIG_DN, [(ldap.MOD_REPLACE, 'alwaysrecordlogin', b'yes'), (ldap.MOD_REPLACE, 'stateattrname', b'lastLoginTime'), (ldap.MOD_REPLACE, 'altstateattrname', b'createTimestamp'), (ldap.MOD_REPLACE, 'specattrname', b'acctPolicySubentry'), (ldap.MOD_REPLACE, 'limitattrname', b'accountInactivityLimit')]) # Enable the plugins topology_st.standalone.plugins.enable(name=PLUGIN_ACCT_POLICY) topology_st.standalone.restart(timeout=10) # Check inactivity on standard suffix (short) _check_inactivity(topology_st, SUFFIX) # Check inactivity on a long suffix topology_st.standalone.backend.create(LONG_SUFFIX, {BACKEND_NAME: LONG_SUFFIX_BE}) topology_st.standalone.mappingtree.create(LONG_SUFFIX, bename=LONG_SUFFIX_BE) topology_st.standalone.add_s(Entry((LONG_SUFFIX, { 'objectclass': "top domain".split(), 'dc': RDN_LONG_SUFFIX}))) _check_inactivity(topology_st, LONG_SUFFIX) if DEBUGGING: # Add debugging steps(if any)... pass log.info('Test PASSED') if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/tickets/ticket48973_test.py000066400000000000000000000256551421664411400260320ustar00rootroot00000000000000import os import sys import time import ldap import logging import pytest from lib389 import DirSrv, Entry from lib389._constants import * from lib389.properties import * from lib389.tasks import * from lib389.utils import * pytestmark = pytest.mark.tier2 logging.getLogger(__name__).setLevel(logging.DEBUG) log = logging.getLogger(__name__) installation1_prefix = None NEW_ACCOUNT = "new_account" MAX_ACCOUNTS = 100 HOMEHEAD = "/home/xyz_" MIXED_VALUE="/home/mYhOmEdIrEcToRy" LOWER_VALUE="/home/myhomedirectory" HOMEDIRECTORY_INDEX = 'cn=homeDirectory,cn=index,cn=userRoot,cn=ldbm database,cn=plugins,cn=config' HOMEDIRECTORY_CN="homedirectory" MATCHINGRULE = 'nsMatchingRule' UIDNUMBER_INDEX = 'cn=uidnumber,cn=index,cn=userRoot,cn=ldbm database,cn=plugins,cn=config' UIDNUMBER_CN="uidnumber" class TopologyStandalone(object): def __init__(self, standalone): standalone.open() self.standalone = standalone @pytest.fixture(scope="module") def topology(request): # Creating standalone instance ... standalone = DirSrv(verbose=False) args_instance[SER_HOST] = HOST_STANDALONE args_instance[SER_PORT] = PORT_STANDALONE args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX args_standalone = args_instance.copy() standalone.allocate(args_standalone) instance_standalone = standalone.exists() if instance_standalone: standalone.delete() standalone.create() standalone.open() # Delete each instance in the end def fin(): #standalone.delete() pass request.addfinalizer(fin) return TopologyStandalone(standalone) def _find_notes_accesslog(file, log_pattern): try: _find_notes_accesslog.last_pos += 1 except AttributeError: _find_notes_accesslog.last_pos = 0 #position to the where we were last time found = None file.seek(_find_notes_accesslog.last_pos) while True: line = file.readline() found = log_pattern.search(line) if ((line == '') or (found)): break if found: # assuming that the result is the next line of the search line = file.readline() _find_notes_accesslog.last_pos = file.tell() return line else: _find_notes_accesslog.last_pos = file.tell() return None def _find_next_notes(topology, Filter): topology.standalone.stop(timeout=10) file_path = topology.standalone.accesslog file_obj = open(file_path, "r") regex = re.compile("filter=\"\(%s" % Filter) result = _find_notes_accesslog(file_obj, regex) file_obj.close() topology.standalone.start(timeout=10) return result # # find the next message showing an indexing failure # (starting at the specified posistion) # and return the position in the error log # If there is not such message -> return None def _find_next_indexing_failure(topology, pattern, position): file_path = topology.standalone.errlog file_obj = open(file_path, "r") try: file_obj.seek(position + 1) except: file_obj.close() return None # Check if the MR configuration failure occurs regex = re.compile(pattern) while True: line = file_obj.readline() found = regex.search(line) if ((line == '') or (found)): break if (found): log.info("The configuration of a specific MR fails") log.info(line) result = file_obj.tell() file_obj.close() return result else: file_obj.close() result = None return result # # find the first message showing an indexing failure # and return the position in the error log # If there is not such message -> return None def _find_first_indexing_failure(topology, pattern): file_path = topology.standalone.errlog file_obj = open(file_path, "r") # Check if the MR configuration failure occurs regex = re.compile(pattern) while True: line = file_obj.readline() found = regex.search(line) if ((line == '') or (found)): break if (found): log.info("pattern is found: \"%s\"") log.info(line) result = file_obj.tell() file_obj.close() else: result = None return result def _check_entry(topology, filterHead=None, filterValueUpper=False, entry_ext=None, found=False, indexed=False): # Search with CES with exact value -> find an entry + indexed if filterValueUpper: homehead = HOMEHEAD.upper() else: homehead = HOMEHEAD searchedHome = "%s%d" % (homehead, entry_ext) Filter = "(%s=%s)" % (filterHead, searchedHome) log.info("Search %s" % Filter) ents = topology.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, Filter) if found: assert len(ents) == 1 assert ents[0].hasAttr('homedirectory') valueHome = ensure_bytes("%s%d" % (HOMEHEAD, entry_ext)) assert valueHome in ents[0].getValues('homedirectory') else: assert len(ents) == 0 result = _find_next_notes(topology, Filter) log.info("result=%s" % result) if indexed: assert not "notes=U" in result else: assert "notes=U" in result def test_ticket48973_init(topology): log.info("Initialization: add dummy entries for the tests") for cpt in range(MAX_ACCOUNTS): name = "%s%d" % (NEW_ACCOUNT, cpt) topology.standalone.add_s(Entry(("uid=%s,%s" % (name, SUFFIX), { 'objectclass': "top posixAccount".split(), 'uid': name, 'cn': name, 'uidnumber': str(111), 'gidnumber': str(222), 'homedirectory': "%s%d" % (HOMEHEAD, cpt)}))) def test_ticket48973_ces_not_indexed(topology): """ Check that homedirectory is not indexed - do a search unindexed """ entry_ext = 0 searchedHome = "%s%d" % (HOMEHEAD, entry_ext) Filter = "(homeDirectory=%s)" % searchedHome log.info("Search %s" % Filter) ents = topology.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, Filter) assert len(ents) == 1 assert ents[0].hasAttr('homedirectory') assert ensure_bytes(searchedHome) in ents[0].getValues('homedirectory') result = _find_next_notes(topology, Filter) log.info("result=%s" % result) assert "notes=U" in result def test_ticket48973_homeDirectory_indexing(topology): """ Check that homedirectory is indexed with syntax (ces) - triggers index - no failure on index - do a search indexed with exact value (ces) and no default_mr_indexer_create warning - do a search indexed with uppercase value (ces) and no default_mr_indexer_create warning """ entry_ext = 1 try: ent = topology.standalone.getEntry(HOMEDIRECTORY_INDEX, ldap.SCOPE_BASE) except ldap.NO_SUCH_OBJECT: topology.standalone.add_s(Entry((HOMEDIRECTORY_INDEX, { 'objectclass': "top nsIndex".split(), 'cn': HOMEDIRECTORY_CN, 'nsSystemIndex': 'false', 'nsIndexType': 'eq'}))) args = {TASK_WAIT: True} topology.standalone.tasks.reindex(suffix=SUFFIX, attrname='homeDirectory', args=args) log.info("Check indexing succeeded with no specified matching rule") assert not _find_first_indexing_failure(topology, "unknown or invalid matching rule") assert not _find_first_indexing_failure(topology, "default_mr_indexer_create: warning") assert not _find_first_indexing_failure(topology, "default_mr_indexer_create - Plugin .* does not handle") _check_entry(topology, filterHead="homeDirectory", filterValueUpper=False, entry_ext=entry_ext,found=True, indexed=True) _check_entry(topology, filterHead="homeDirectory:caseExactIA5Match:", filterValueUpper=False, entry_ext=entry_ext, found=True, indexed=False) _check_entry(topology, filterHead="homeDirectory:caseIgnoreIA5Match:", filterValueUpper=False, entry_ext=entry_ext, found=True, indexed=False) _check_entry(topology, filterHead="homeDirectory", filterValueUpper=True, entry_ext=entry_ext, found=False, indexed=True) _check_entry(topology, filterHead="homeDirectory:caseExactIA5Match:", filterValueUpper=True, entry_ext=entry_ext, found=False, indexed=False) _check_entry(topology, filterHead="homeDirectory:caseIgnoreIA5Match:", filterValueUpper=True, entry_ext=entry_ext, found=True, indexed=False) def test_ticket48973_homeDirectory_caseExactIA5Match_caseIgnoreIA5Match_indexing(topology): """ Check that homedirectory is indexed with syntax (ces && cis) - triggers index - no failure on index - do a search indexed (ces) and no default_mr_indexer_create warning - do a search indexed (cis) and no default_mr_indexer_create warning """ entry_ext = 4 log.info("\n\nindex homeDirectory in caseExactIA5Match and caseIgnoreIA5Match") EXACTIA5_MR_NAME=b'caseExactIA5Match' IGNOREIA5_MR_NAME=b'caseIgnoreIA5Match' EXACT_MR_NAME=b'caseExactMatch' IGNORE_MR_NAME=b'caseIgnoreMatch' mod = [(ldap.MOD_REPLACE, MATCHINGRULE, (EXACT_MR_NAME, IGNORE_MR_NAME, EXACTIA5_MR_NAME, IGNOREIA5_MR_NAME))] topology.standalone.modify_s(HOMEDIRECTORY_INDEX, mod) args = {TASK_WAIT: True} topology.standalone.tasks.reindex(suffix=SUFFIX, attrname='homeDirectory', args=args) log.info("Check indexing succeeded with no specified matching rule") assert not _find_first_indexing_failure(topology, "unknown or invalid matching rule") assert not _find_first_indexing_failure(topology, "default_mr_indexer_create: warning") assert not _find_first_indexing_failure(topology, "default_mr_indexer_create - Plugin .* does not handle") _check_entry(topology, filterHead="homeDirectory", filterValueUpper=False, entry_ext=entry_ext, found=True, indexed=True) _check_entry(topology, filterHead="homeDirectory:caseExactIA5Match:", filterValueUpper=False, entry_ext=entry_ext, found=True, indexed=True) _check_entry(topology, filterHead="homeDirectory:caseIgnoreIA5Match:", filterValueUpper=False, entry_ext=entry_ext, found=True, indexed=True) _check_entry(topology, filterHead="homeDirectory", filterValueUpper=True, entry_ext=entry_ext, found=False, indexed=True) _check_entry(topology, filterHead="homeDirectory:caseExactIA5Match:", filterValueUpper=True, entry_ext=entry_ext, found=False, indexed=True) _check_entry(topology, filterHead="homeDirectory:caseIgnoreIA5Match:", filterValueUpper=True, entry_ext=entry_ext, found=True, indexed=True) if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/tickets/ticket49008_test.py000066400000000000000000000110011421664411400257740ustar00rootroot00000000000000import pytest from lib389.tasks import * from lib389.utils import * from lib389.topologies import topology_m3 as T from lib389._constants import DEFAULT_SUFFIX, PLUGIN_MEMBER_OF # Skip on older versions pytestmark = [pytest.mark.tier2, pytest.mark.skipif(ds_is_older('1.3.6'), reason="Not implemented")] DEBUGGING = os.getenv("DEBUGGING", default=False) if DEBUGGING: logging.getLogger(__name__).setLevel(logging.DEBUG) else: logging.getLogger(__name__).setLevel(logging.INFO) log = logging.getLogger(__name__) def test_ticket49008(T): A = T.ms['supplier1'] B = T.ms['supplier2'] C = T.ms['supplier3'] A.enableReplLogging() B.enableReplLogging() C.enableReplLogging() AtoB = A.agreement.list(suffix=DEFAULT_SUFFIX)[0].dn AtoC = A.agreement.list(suffix=DEFAULT_SUFFIX)[1].dn CtoA = C.agreement.list(suffix=DEFAULT_SUFFIX)[0].dn CtoB = C.agreement.list(suffix=DEFAULT_SUFFIX)[1].dn # we want replication in a line A <==> B <==> C A.agreement.pause(AtoC) C.agreement.pause(CtoA) # Enable memberOf on Supplier B B.plugins.enable(name=PLUGIN_MEMBER_OF) # Set the auto OC to an objectclass that does NOT allow memberOf B.modify_s('cn=MemberOf Plugin,cn=plugins,cn=config', [(ldap.MOD_REPLACE, 'memberofAutoAddOC', b'referral')]) B.restart(timeout=10) # add a few entries allowing memberof for i in range(1, 6): name = "userX{}".format(i) dn = "cn={},{}".format(name, DEFAULT_SUFFIX) A.add_s(Entry((dn, {'objectclass': "top person inetuser".split(), 'sn': name, 'cn': name}))) # add a few entries not allowing memberof for i in range(1, 6): name = "userY{}".format(i) dn = "cn={},{}".format(name, DEFAULT_SUFFIX) A.add_s(Entry((dn, {'objectclass': "top person".split(), 'sn': name, 'cn': name}))) time.sleep(15) A_entries = A.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(objectClass=person)') B_entries = B.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(objectClass=person)') C_entries = C.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(objectClass=person)') log.debug("A contains: %s", A_entries) log.debug("B contains: %s", B_entries) log.debug("C contains: %s", C_entries) assert len(A_entries) == len(B_entries) assert len(B_entries) == len(C_entries) # add a group with members allowing memberof dn = "cn=g1,{}".format(DEFAULT_SUFFIX) A.add_s(Entry((dn, {'objectclass': "top groupOfNames".split(), 'description': "Test Owned Group {}".format(name), 'member': "cn=userX1,{}".format(DEFAULT_SUFFIX), 'cn': "g1"}))) # check ruv on m2 before applying failing op time.sleep(10) B_RUV = B.search_s("cn=config", ldap.SCOPE_SUBTREE, "(&(objectclass=nsds5replica)(nsDS5ReplicaRoot={}))".format(DEFAULT_SUFFIX), ['nsds50ruv']) elements = B_RUV[0].getValues('nsds50ruv') ruv_before = 'ruv_before' for ruv in elements: if b'replica 2' in ruv: ruv_before = ruv # add a group with members allowing memberof and members which don't # the op will fail on M2 dn = "cn=g2,{}".format(DEFAULT_SUFFIX) A.add_s(Entry((dn, {'objectclass': "top groupOfNames".split(), 'description': "Test Owned Group {}".format(name), 'member': ["cn=userX1,{}".format(DEFAULT_SUFFIX), "cn=userX2,{}".format(DEFAULT_SUFFIX), "cn=userY1,{}".format(DEFAULT_SUFFIX)], 'cn': "g2"}))) # check ruv on m2 after applying failing op time.sleep(10) B_RUV = B.search_s("cn=config", ldap.SCOPE_SUBTREE, "(&(objectclass=nsds5replica)(nsDS5ReplicaRoot={}))".format(DEFAULT_SUFFIX), ['nsds50ruv']) elements = B_RUV[0].getValues('nsds50ruv') ruv_after = 'ruv_after' for ruv in elements: if b'replica 2' in ruv: ruv_after = ruv log.info('ruv before fail: {}'.format(ruv_before)) log.info('ruv after fail: {}'.format(ruv_after)) # the ruv should not have changed assert ruv_before == ruv_after if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s {}".format(CURRENT_FILE)) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/tickets/ticket49020_test.py000066400000000000000000000043311421664411400257760ustar00rootroot00000000000000import time import ldap import logging import pytest from lib389 import DirSrv, Entry, tools, tasks from lib389.tools import DirSrvTools from lib389._constants import * from lib389.properties import * from lib389.tasks import * from lib389.utils import * from lib389.topologies import topology_m3 as T import socket # Skip on older versions pytestmark = [pytest.mark.tier2, pytest.mark.skipif(ds_is_older('1.3.6'), reason="Not implemented")] DEBUGGING = os.getenv("DEBUGGING", default=False) if DEBUGGING: logging.getLogger(__name__).setLevel(logging.DEBUG) else: logging.getLogger(__name__).setLevel(logging.INFO) log = logging.getLogger(__name__) def test_ticket49020(T): A = T.ms['supplier1'] B = T.ms['supplier2'] C = T.ms['supplier3'] A.enableReplLogging() B.enableReplLogging() C.enableReplLogging() AtoB = A.agreement.list(suffix=DEFAULT_SUFFIX)[0].dn AtoC = A.agreement.list(suffix=DEFAULT_SUFFIX)[1].dn CtoB = C.agreement.list(suffix=DEFAULT_SUFFIX)[1].dn A.agreement.pause(AtoB) C.agreement.pause(CtoB) time.sleep(5) name = "userX" dn = "cn={},{}".format(name, DEFAULT_SUFFIX) A.add_s(Entry((dn, {'objectclass': "top person".split(), 'sn': name,'cn': name}))) A.agreement.init(DEFAULT_SUFFIX, socket.gethostname(), PORT_SUPPLIER_3) time.sleep(5) for i in range(1,11): name = "userY{}".format(i) dn = "cn={},{}".format(name, DEFAULT_SUFFIX) A.add_s(Entry((dn, {'objectclass': "top person".split(), 'sn': name,'cn': name}))) time.sleep(5) C.agreement.resume(CtoB) time.sleep(5) A_entries = A.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(objectClass=person)') B_entries = B.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(objectClass=person)') C_entries = C.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(objectClass=person)') assert len(A_entries) == len(C_entries) assert len(B_entries) == len(A_entries) - 11 if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/tickets/ticket49039_test.py000066400000000000000000000073351421664411400260170ustar00rootroot00000000000000import time import ldap import logging import pytest import os from lib389 import Entry from lib389._constants import * from lib389.properties import * from lib389.tasks import * from lib389.utils import * from lib389.topologies import topology_st as topo from lib389.pwpolicy import PwPolicyManager pytestmark = pytest.mark.tier2 DEBUGGING = os.getenv("DEBUGGING", default=False) if DEBUGGING: logging.getLogger(__name__).setLevel(logging.DEBUG) else: logging.getLogger(__name__).setLevel(logging.INFO) log = logging.getLogger(__name__) USER_DN = 'uid=user,dc=example,dc=com' def test_ticket49039(topo): """Test "password must change" verses "password min age". Min age should not block password update if the password was reset. """ # Setup SSL (for ldappasswd test) topo.standalone.enable_tls() # Configure password policy try: policy = PwPolicyManager(topo.standalone) policy.set_global_policy(properties={'nsslapd-pwpolicy-local': 'on', 'passwordMustChange': 'on', 'passwordExp': 'on', 'passwordMaxAge': '86400000', 'passwordMinAge': '8640000', 'passwordChange': 'on'}) except ldap.LDAPError as e: log.fatal('Failed to set password policy: ' + str(e)) # Add user, bind, and set password try: topo.standalone.add_s(Entry((USER_DN, { 'objectclass': 'top extensibleObject'.split(), 'uid': 'user1', 'userpassword': PASSWORD }))) except ldap.LDAPError as e: log.fatal('Failed to add user: error ' + e.args[0]['desc']) assert False # Reset password as RootDN try: topo.standalone.modify_s(USER_DN, [(ldap.MOD_REPLACE, 'userpassword', ensure_bytes(PASSWORD))]) except ldap.LDAPError as e: log.fatal('Failed to bind: error ' + e.args[0]['desc']) assert False time.sleep(1) # Reset password as user try: topo.standalone.simple_bind_s(USER_DN, PASSWORD) except ldap.LDAPError as e: log.fatal('Failed to bind: error ' + e.args[0]['desc']) assert False try: topo.standalone.modify_s(USER_DN, [(ldap.MOD_REPLACE, 'userpassword', ensure_bytes(PASSWORD))]) except ldap.LDAPError as e: log.fatal('Failed to change password: error ' + e.args[0]['desc']) assert False ################################### # Make sure ldappasswd also works ################################### # Reset password as RootDN try: topo.standalone.simple_bind_s(DN_DM, PASSWORD) except ldap.LDAPError as e: log.fatal('Failed to bind as rootdn: error ' + e.args[0]['desc']) assert False try: topo.standalone.modify_s(USER_DN, [(ldap.MOD_REPLACE, 'userpassword', ensure_bytes(PASSWORD))]) except ldap.LDAPError as e: log.fatal('Failed to bind: error ' + e.args[0]['desc']) assert False time.sleep(1) # Run ldappasswd as the User. os.environ["LDAPTLS_CACERTDIR"] = topo.standalone.get_cert_dir() cmd = ('ldappasswd' + ' -h ' + topo.standalone.host + ' -Z -p 38901 -D ' + USER_DN + ' -w password -a password -s password2 ' + USER_DN) os.system(cmd) time.sleep(1) try: topo.standalone.simple_bind_s(USER_DN, "password2") except ldap.LDAPError as e: log.fatal('Failed to bind: error ' + e.args[0]['desc']) assert False log.info('Test Passed') if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/tickets/ticket49072_test.py000066400000000000000000000106371421664411400260130ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2017 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import pytest import subprocess from lib389.tasks import * from lib389.utils import * from lib389.topologies import topology_st as topo from lib389._constants import (DEFAULT_SUFFIX, PLUGIN_MEMBER_OF, DN_DM, PASSWORD, SERVERID_STANDALONE, SUFFIX) pytestmark = pytest.mark.tier2 logging.getLogger(__name__).setLevel(logging.DEBUG) log = logging.getLogger(__name__) TEST_FILTER = '(objectClass=person' TEST_BASEDN = 'dc=testdb,dc=com' FILTER = '(objectClass=person)' FIXUP_MEMOF = 'fixup-memberof.pl' def test_ticket49072_basedn(topo): """memberOf fixup task does not validate args :id: dce9b898-119d-42b8-a236-1130e59bfe18 :feature: memberOf :setup: Standalone instance, with memberOf plugin :steps: 1. Run fixup-memberOf.pl with invalid DN entry 2. Check if error log reports "Failed to get be backend" :expectedresults: Fixup-memberOf.pl task should complete, but errors logged. """ log.info("Ticket 49072 memberof fixup task with invalid basedn...") topo.standalone.plugins.enable(name=PLUGIN_MEMBER_OF) topo.standalone.restart(timeout=10) if ds_is_older('1.3'): inst_dir = topo.standalone.get_inst_dir() memof_task = os.path.join(inst_dir, FIXUP_MEMOF) try: output = subprocess.check_output([memof_task, '-D', DN_DM, '-w', PASSWORD, '-b', TEST_BASEDN, '-f', FILTER]) except subprocess.CalledProcessError as err: output = err.output else: sbin_dir = topo.standalone.get_sbin_dir() memof_task = os.path.join(sbin_dir, FIXUP_MEMOF) try: output = subprocess.check_output( [memof_task, '-D', DN_DM, '-w', PASSWORD, '-b', TEST_BASEDN, '-Z', SERVERID_STANDALONE, '-f', FILTER]) except subprocess.CalledProcessError as err: output = err.output log.info('output: {}'.format(output)) expected = b"Successfully added task entry" assert expected in output log_entry = topo.standalone.ds_error_log.match('.*Failed to get be backend.*') log.info('Error log out: {}'.format(log_entry)) assert topo.standalone.ds_error_log.match('.*Failed to get be backend.*') def test_ticket49072_filter(topo): """memberOf fixup task does not validate args :id: dde9e893-119d-42c8-a236-1190e56bfe98 :feature: memberOf :setup: Standalone instance, with memberOf plugin :steps: 1. Run fixup-memberOf.pl with invalid filter 2. Check if error log reports "Bad search filter" :expectedresults: Fixup-memberOf.pl task should complete, but errors logged. """ log.info("Ticket 49072 memberof fixup task with invalid filter...") log.info('Wait for 10 secs and check if task is completed') time.sleep(10) task_memof = 'cn=memberOf task,cn=tasks,cn=config' if topo.standalone.search_s(task_memof, ldap.SCOPE_SUBTREE, 'cn=memberOf_fixup*', ['dn:']): log.info('memberof task is still running, wait for +10 secs') time.sleep(10) if ds_is_older('1.3'): inst_dir = topo.standalone.get_inst_dir() memof_task = os.path.join(inst_dir, FIXUP_MEMOF) try: output = subprocess.check_output([memof_task, '-D', DN_DM, '-w', PASSWORD, '-b', SUFFIX, '-f', TEST_FILTER]) except subprocess.CalledProcessError as err: output = err.output else: sbin_dir = topo.standalone.get_sbin_dir() memof_task = os.path.join(sbin_dir, FIXUP_MEMOF) try: output = subprocess.check_output( [memof_task, '-D', DN_DM, '-w', PASSWORD, '-b', SUFFIX, '-Z', SERVERID_STANDALONE, '-f', TEST_FILTER]) except subprocess.CalledProcessError as err: output = err.output log.info('output: {}'.format(output)) expected = b"Successfully added task entry" assert expected in output log_entry = topo.standalone.ds_error_log.match('.*Bad search filter.*') log.info('Error log out: {}'.format(log_entry)) assert topo.standalone.ds_error_log.match('.*Bad search filter.*') log.info("Ticket 49072 complete: memberOf fixup task does not validate args") if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/tickets/ticket49073_test.py000066400000000000000000000133061421664411400260100ustar00rootroot00000000000000import pytest from lib389.tasks import * from lib389.utils import * from lib389.topologies import topology_m2 from lib389._constants import (PLUGIN_MEMBER_OF, DEFAULT_SUFFIX, SUFFIX, HOST_SUPPLIER_2, PORT_SUPPLIER_2) # Skip on older versions pytestmark = [pytest.mark.tier2, pytest.mark.skipif(ds_is_older('1.3.6'), reason="Not implemented")] DEBUGGING = os.getenv('DEBUGGING', False) GROUP_DN = ("cn=group," + DEFAULT_SUFFIX) if DEBUGGING: logging.getLogger(__name__).setLevel(logging.DEBUG) else: logging.getLogger(__name__).setLevel(logging.INFO) log = logging.getLogger(__name__) def _add_group_with_members(topology_m2): # Create group try: topology_m2.ms["supplier1"].add_s(Entry((GROUP_DN, {'objectclass': 'top groupofnames'.split(), 'cn': 'group'}))) except ldap.LDAPError as e: log.fatal('Failed to add group: error ' + e.message['desc']) assert False # Add members to the group - set timeout log.info('Adding members to the group...') for idx in range(1, 5): try: MEMBER_VAL = ("uid=member%d,%s" % (idx, DEFAULT_SUFFIX)) topology_m2.ms["supplier1"].modify_s(GROUP_DN, [(ldap.MOD_ADD, 'member', MEMBER_VAL)]) except ldap.LDAPError as e: log.fatal('Failed to update group: member (%s) - error: %s' % (MEMBER_VAL, e.message['desc'])) assert False def _check_memberof(supplier, presence_flag): # Check that members have memberof attribute on M1 for idx in range(1, 5): try: USER_DN = ("uid=member%d,%s" % (idx, DEFAULT_SUFFIX)) ent = supplier.getEntry(USER_DN, ldap.SCOPE_BASE, "(objectclass=*)") if presence_flag: assert ent.hasAttr('memberof') and ent.getValue('memberof') == GROUP_DN else: assert not ent.hasAttr('memberof') except ldap.LDAPError as e: log.fatal('Failed to retrieve user (%s): error %s' % (USER_DN, e.message['desc'])) assert False def _check_entry_exist(supplier, dn): attempt = 0 while attempt <= 10: try: dn ent = supplier.getEntry(dn, ldap.SCOPE_BASE, "(objectclass=*)") break except ldap.NO_SUCH_OBJECT: attempt = attempt + 1 time.sleep(1) except ldap.LDAPError as e: log.fatal('Failed to retrieve user (%s): error %s' % (dn, e.message['desc'])) assert False assert attempt != 10 def test_ticket49073(topology_m2): """Write your replication test here. To access each DirSrv instance use: topology_m2.ms["supplier1"], topology_m2.ms["supplier2"], ..., topology_m2.hub1, ..., topology_m2.consumer1,... Also, if you need any testcase initialization, please, write additional fixture for that(include finalizer). """ topology_m2.ms["supplier1"].plugins.enable(name=PLUGIN_MEMBER_OF) topology_m2.ms["supplier1"].restart(timeout=10) topology_m2.ms["supplier2"].plugins.enable(name=PLUGIN_MEMBER_OF) topology_m2.ms["supplier2"].restart(timeout=10) # Configure fractional to prevent total init to send memberof ents = topology_m2.ms["supplier1"].agreement.list(suffix=SUFFIX) assert len(ents) == 1 log.info('update %s to add nsDS5ReplicatedAttributeListTotal' % ents[0].dn) topology_m2.ms["supplier1"].modify_s(ents[0].dn, [(ldap.MOD_REPLACE, 'nsDS5ReplicatedAttributeListTotal', '(objectclass=*) $ EXCLUDE '), (ldap.MOD_REPLACE, 'nsDS5ReplicatedAttributeList', '(objectclass=*) $ EXCLUDE memberOf')]) topology_m2.ms["supplier1"].restart(timeout=10) # # create some users and a group # log.info('create users and group...') for idx in range(1, 5): try: USER_DN = ("uid=member%d,%s" % (idx, DEFAULT_SUFFIX)) topology_m2.ms["supplier1"].add_s(Entry((USER_DN, {'objectclass': 'top extensibleObject'.split(), 'uid': 'member%d' % (idx)}))) except ldap.LDAPError as e: log.fatal('Failed to add user (%s): error %s' % (USER_DN, e.message['desc'])) assert False _check_entry_exist(topology_m2.ms["supplier2"], "uid=member4,%s" % (DEFAULT_SUFFIX)) _add_group_with_members(topology_m2) _check_entry_exist(topology_m2.ms["supplier2"], GROUP_DN) # Check that for regular update memberof was on both side (because plugin is enabled both) time.sleep(5) _check_memberof(topology_m2.ms["supplier1"], True) _check_memberof(topology_m2.ms["supplier2"], True) # reinit with fractional definition ents = topology_m2.ms["supplier1"].agreement.list(suffix=SUFFIX) assert len(ents) == 1 topology_m2.ms["supplier1"].agreement.init(SUFFIX, HOST_SUPPLIER_2, PORT_SUPPLIER_2) topology_m2.ms["supplier1"].waitForReplInit(ents[0].dn) # Check that for total update memberof was on both side # because memberof is NOT excluded from total init time.sleep(5) _check_memberof(topology_m2.ms["supplier1"], True) _check_memberof(topology_m2.ms["supplier2"], True) if DEBUGGING: # Add debugging steps(if any)... pass if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/tickets/ticket49076_test.py000066400000000000000000000071501421664411400260130ustar00rootroot00000000000000import time import ldap import logging import pytest from lib389 import DirSrv, Entry, tools, tasks from lib389.tools import DirSrvTools from lib389._constants import * from lib389.properties import * from lib389.tasks import * from lib389.utils import * from lib389.topologies import topology_st as topo pytestmark = pytest.mark.tier2 DEBUGGING = os.getenv("DEBUGGING", default=False) if DEBUGGING: logging.getLogger(__name__).setLevel(logging.DEBUG) else: logging.getLogger(__name__).setLevel(logging.INFO) log = logging.getLogger(__name__) ldbm_config = "cn=config,%s" % (DN_LDBM) txn_begin_flag = "nsslapd-db-transaction-wait" TEST_USER_DN = 'cn=test,%s' % SUFFIX TEST_USER = "test" def _check_configured_value(topology_st, attr=txn_begin_flag, expected_value=None, required=False): entries = topology_st.standalone.search_s(ldbm_config, ldap.SCOPE_BASE, 'cn=config') if required: assert (entries[0].hasValue(attr)) if entries[0].hasValue(attr): topology_st.standalone.log.info('Current value is %s' % entries[0].getValue(attr)) assert (entries[0].getValue(attr) == ensure_bytes(expected_value)) def _update_db(topology_st): topology_st.standalone.add_s( Entry((TEST_USER_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), 'cn': TEST_USER, 'sn': TEST_USER, 'givenname': TEST_USER}))) topology_st.standalone.delete_s(TEST_USER_DN) def test_ticket49076(topo): """Write your testcase here... Also, if you need any testcase initialization, please, write additional fixture for that(include finalizer). """ # check default value is DB_TXN_NOWAIT _check_configured_value(topo, expected_value="off") # tests we are able to update DB _update_db(topo) # switch to wait mode topo.standalone.modify_s(ldbm_config, [(ldap.MOD_REPLACE, txn_begin_flag, b"on")]) # check default value is DB_TXN_NOWAIT _check_configured_value(topo, expected_value="on") _update_db(topo) # switch back to "normal mode" topo.standalone.modify_s(ldbm_config, [(ldap.MOD_REPLACE, txn_begin_flag, b"off")]) # check default value is DB_TXN_NOWAIT _check_configured_value(topo, expected_value="off") # tests we are able to update DB _update_db(topo) # check that settings are not reset by restart topo.standalone.modify_s(ldbm_config, [(ldap.MOD_REPLACE, txn_begin_flag, b"on")]) # check default value is DB_TXN_NOWAIT _check_configured_value(topo, expected_value="on") _update_db(topo) topo.standalone.restart(timeout=10) _check_configured_value(topo, expected_value="on") _update_db(topo) # switch default value topo.standalone.modify_s(ldbm_config, [(ldap.MOD_DELETE, txn_begin_flag, None)]) # check default value is DB_TXN_NOWAIT _check_configured_value(topo, expected_value="off") # tests we are able to update DB _update_db(topo) topo.standalone.restart(timeout=10) _check_configured_value(topo, expected_value="off") # tests we are able to update DB _update_db(topo) if DEBUGGING: # Add debugging steps(if any)... pass if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/tickets/ticket49095_test.py000066400000000000000000000057311421664411400260170ustar00rootroot00000000000000import time import ldap import logging import pytest from lib389 import DirSrv, Entry, tools, tasks from lib389.tools import DirSrvTools from lib389._constants import * from lib389.properties import * from lib389.tasks import * from lib389.utils import * from lib389.topologies import topology_st as topo pytestmark = pytest.mark.tier2 DEBUGGING = os.getenv("DEBUGGING", default=False) if DEBUGGING: logging.getLogger(__name__).setLevel(logging.DEBUG) else: logging.getLogger(__name__).setLevel(logging.INFO) log = logging.getLogger(__name__) USER_DN = 'uid=testuser,dc=example,dc=com' acis = ['(targetattr != "tele*") (version 3.0;acl "test case";allow (read,compare,search)(userdn = "ldap:///anyone");)', '(targetattr != "TELE*") (version 3.0;acl "test case";allow (read,compare,search)(userdn = "ldap:///anyone");)', '(targetattr != "telephonenum*") (version 3.0;acl "test case";allow (read,compare,search)(userdn = "ldap:///anyone");)', '(targetattr != "TELEPHONENUM*") (version 3.0;acl "test case";allow (read,compare,search)(userdn = "ldap:///anyone");)'] def test_ticket49095(topo): """Check that target attrbiutes with wildcards are case insensitive """ # Add an entry try: topo.standalone.add_s(Entry((USER_DN, { 'objectclass': 'top extensibleObject'.split(), 'uid': 'testuser', 'telephonenumber': '555-555-5555' }))) except ldap.LDAPError as e: log.fatal('Failed to add test user: ' + e.args[0]['desc']) assert False for aci in acis: # Add ACI try: topo.standalone.modify_s(DEFAULT_SUFFIX, [(ldap.MOD_REPLACE, 'aci', ensure_bytes(aci))]) except ldap.LDAPError as e: log.fatal('Failed to set aci: ' + aci + ': ' + e.args[0]['desc']) assert False # Set Anonymous Bind to test aci try: topo.standalone.simple_bind_s("", "") except ldap.LDAPError as e: log.fatal('Failed to bind anonymously: ' + e.args[0]['desc']) assert False # Search for entry - should not get any results try: entry = topo.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_BASE, 'telephonenumber=*') if entry: log.fatal('The entry was incorrectly returned') assert False except ldap.LDAPError as e: log.fatal('Failed to search anonymously: ' + e.args[0]['desc']) assert False # Set root DN Bind so we can update aci's try: topo.standalone.simple_bind_s(DN_DM, PASSWORD) except ldap.LDAPError as e: log.fatal('Failed to bind anonymously: ' + e.args[0]['desc']) assert False log.info("Test Passed") if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/tickets/ticket49104_test.py000066400000000000000000000057661421664411400260160ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2017 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import logging import subprocess import pytest from lib389.tasks import * from lib389.utils import * from lib389.topologies import topology_st # Skip on older versions pytestmark = [pytest.mark.tier2, pytest.mark.skipif(ds_is_older('1.3.6'), reason="Not implemented")] log = logging.getLogger(__name__) def test_ticket49104_setup(topology_st): """ Generate an ldif file having 10K entries and import it. """ # Generate a test ldif (100k entries) ldif_dir = topology_st.standalone.get_ldif_dir() import_ldif = ldif_dir + '/49104.ldif' try: topology_st.standalone.buildLDIF(100000, import_ldif) except OSError as e: log.fatal('ticket 49104: failed to create test ldif,\ error: %s - %s' % (e.errno, e.strerror)) assert False # Online try: topology_st.standalone.tasks.importLDIF(suffix=DEFAULT_SUFFIX, input_file=import_ldif, args={TASK_WAIT: True}) except ValueError: log.fatal('ticket 49104: Online import failed') assert False def test_ticket49104(topology_st): """ Run dbscan with valgrind changing the truncate size. If there is no Invalid report, we can claim the test has passed. """ log.info("Test ticket 49104 -- dbscan crashes by memory corruption") myvallog = '/tmp/val49104.out' if os.path.exists(myvallog): os.remove(myvallog) prog = os.path.join(topology_st.standalone.get_bin_dir(), 'dbscan-bin') valcmd = 'valgrind --tool=memcheck --leak-check=yes --num-callers=40 --log-file=%s ' % myvallog if topology_st.standalone.has_asan(): valcmd = '' id2entry = os.path.join(topology_st.standalone.dbdir, DEFAULT_BENAME, 'id2entry.db') for i in range(20, 30): cmd = valcmd + '%s -f %s -t %d -R' % (prog, id2entry , i) log.info('Running script: %s' % cmd) proc = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE) outs = '' try: outs = proc.communicate() except OSError as e: log.exception('dbscan: error executing (%s): error %d - %s' % (cmd, e.errno, e.strerror)) raise e # If we have asan, this fails in other spectacular ways instead if not topology_st.standalone.has_asan(): grep = 'egrep "Invalid read|Invalid write" %s' % myvallog p = os.popen(grep, "r") l = p.readline() if 'Invalid' in l: log.fatal('ERROR: valgrind reported invalid read/write: %s' % l) assert False log.info('ticket 49104 - PASSED') if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/tickets/ticket49121_test.py000066400000000000000000000317421421664411400260060ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2017 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import pytest import codecs from lib389.tasks import * from lib389.utils import * from lib389.topologies import topology_m2 from lib389._constants import DATA_DIR, DEFAULT_SUFFIX, VALGRIND_INVALID_STR # Skip on older versions pytestmark = [pytest.mark.tier2, pytest.mark.skipif(ds_is_older('1.3.6'), reason="Not implemented")] DEBUGGING = os.getenv('DEBUGGING', False) if DEBUGGING: logging.getLogger(__name__).setLevel(logging.DEBUG) else: logging.getLogger(__name__).setLevel(logging.INFO) log = logging.getLogger(__name__) ds_paths = Paths() @pytest.mark.skipif(not ds_paths.asan_enabled, reason="Don't run if ASAN is not enabled") def test_ticket49121(topology_m2): """ Creating some users. Deleting quite a number of attributes which may or may not be in the entry. The attribute type names are to be long. Under the conditions, it did not estimate the size of string format entry shorter than the real size and caused the Invalid write / server crash. """ utf8file = os.path.join(topology_m2.ms["supplier1"].getDir(__file__, DATA_DIR), "ticket49121/utf8str.txt") utf8obj = codecs.open(utf8file, 'r', 'utf-8') utf8strorig = utf8obj.readline() utf8str = ensure_bytes(utf8strorig).rstrip(b'\n') utf8obj.close() assert (utf8str) # Get the sbin directory so we know where to replace 'ns-slapd' sbin_dir = topology_m2.ms["supplier1"].get_sbin_dir() log.info('sbin_dir: %s' % sbin_dir) # stop M1 to do the next updates topology_m2.ms["supplier1"].stop(30) topology_m2.ms["supplier2"].stop(30) # wait for the servers shutdown time.sleep(5) # start M1 to do the next updates topology_m2.ms["supplier1"].start() topology_m2.ms["supplier2"].start() for idx in range(1, 10): try: USER_DN = 'CN=user%d,ou=People,%s' % (idx, DEFAULT_SUFFIX) log.info('adding user %s...' % (USER_DN)) topology_m2.ms["supplier1"].add_s(Entry((USER_DN, {'objectclass': 'top person extensibleObject'.split(' '), 'cn': 'user%d' % idx, 'sn': 'SN%d-%s' % (idx, utf8str)}))) except ldap.LDAPError as e: log.fatal('Failed to add user (%s): error %s' % (USER_DN, e.args[0]['desc'])) assert False for i in range(1, 3): time.sleep(3) for idx in range(1, 10): try: USER_DN = 'CN=user%d,ou=People,%s' % (idx, DEFAULT_SUFFIX) log.info('[%d] modify user %s - replacing attrs...' % (i, USER_DN)) topology_m2.ms["supplier1"].modify_s( USER_DN, [(ldap.MOD_REPLACE, 'cn', b'user%d' % idx), (ldap.MOD_REPLACE, 'ABCDEFGH_ID', [b'239001ad-06dd-e011-80fa-c00000ad5174', b'240f0878-c552-e411-b0f3-000006040037']), (ldap.MOD_REPLACE, 'attr1', b'NEW_ATTR'), (ldap.MOD_REPLACE, 'attr20000000000000000000000000000000000000000', None), (ldap.MOD_REPLACE, 'attr30000000000000000000000000000000000000000000000', None), (ldap.MOD_REPLACE, 'attr40000000000000000000000000000', None), (ldap.MOD_REPLACE, 'attr50000000000000000000000000000000000000', None), (ldap.MOD_REPLACE, 'attr600000000000000000000000000000000000000000', None), (ldap.MOD_REPLACE, 'attr7000000000000000000000000000000', None), (ldap.MOD_REPLACE, 'attr8000000000000000000000000000000000', None), (ldap.MOD_REPLACE, 'attr900000000000000000', None), (ldap.MOD_REPLACE, 'attr1000000000000000000000', None), (ldap.MOD_REPLACE, 'attr110000000000000', None), (ldap.MOD_REPLACE, 'attr120000000000000', None), (ldap.MOD_REPLACE, 'attr130000000000000000000000000000000000000000000000', None), (ldap.MOD_REPLACE, 'attr140000000000000000000000000000', None), (ldap.MOD_REPLACE, 'attr150000000000000000000000000000000000000000000000000000000000000', None), (ldap.MOD_REPLACE, 'attr1600000000000000000000000000000000000000000', None), (ldap.MOD_REPLACE, 'attr17000000000000000000000000000000', None), (ldap.MOD_REPLACE, 'attr18000000000000000000000000000000000', None), (ldap.MOD_REPLACE, 'attr1900000000000000000', None), (ldap.MOD_REPLACE, 'attr2000000000000000000000', None), (ldap.MOD_REPLACE, 'attr210000000000000000000000000000000000000000', None), (ldap.MOD_REPLACE, 'attr220000000000000000000000000000000000000000', None), (ldap.MOD_REPLACE, 'attr230000000000000000000000000000000000000000', None), (ldap.MOD_REPLACE, 'attr240000000000000000000000000000000000000000', None), (ldap.MOD_REPLACE, 'attr25000000000000000000000000000000', None), (ldap.MOD_REPLACE, 'attr260000000000000000000000000000000000000000', None), (ldap.MOD_REPLACE, 'attr270000000000000000000000000000000000000000000000000000000000000000000000', None), (ldap.MOD_REPLACE, 'attr280000000000000000000000000000000000000000', None), (ldap.MOD_REPLACE, 'attr29000000000000000000000000000000000000000000000000000000000', None), (ldap.MOD_REPLACE, 'attr3000000000000000000000000', None), (ldap.MOD_REPLACE, 'attr310000000000000000000000000000000000000000000000', None), (ldap.MOD_REPLACE, 'attr320000000000000000000000000000000000000000000000', None), (ldap.MOD_REPLACE, 'attr330000000000000000000000000000000000000000000000', None), (ldap.MOD_REPLACE, 'attr340000000000000000000000000000000000000000000000', None), (ldap.MOD_REPLACE, 'attr350000000000000000000000000000000000000000', None), (ldap.MOD_REPLACE, 'attr360000000000000000000000000000000000000000000000', None), (ldap.MOD_REPLACE, 'attr370000000000000000000000000000000000000000000000', None), (ldap.MOD_REPLACE, 'attr380000000000000000000000000000000000000000000000', None), (ldap.MOD_REPLACE, 'attr390000000000000000000000000000000000000000000000', None), (ldap.MOD_REPLACE, 'attr4000000000000000000000000', None), (ldap.MOD_REPLACE, 'attr410000000000000000000000000000', None), (ldap.MOD_REPLACE, 'attr420000000000000000000000000000', None), (ldap.MOD_REPLACE, 'attr430000000000000000000000000000', None), (ldap.MOD_REPLACE, 'attr440000000000000000000000000000', None), (ldap.MOD_REPLACE, 'attr4500000000000000000000000000000000000000', None), (ldap.MOD_REPLACE, 'attr460000000000000000000000000000', None), (ldap.MOD_REPLACE, 'attr470000000000000000000000000000000000000000000000000000000000', None), (ldap.MOD_REPLACE, 'attr480000000000000000000000000000', None), (ldap.MOD_REPLACE, 'attr49000000000000000000000000000000000000000000000', None), (ldap.MOD_REPLACE, 'attr5000000000000000000000000', None), (ldap.MOD_REPLACE, 'attr510000000000000', None), (ldap.MOD_REPLACE, 'attr520000000000000', None), (ldap.MOD_REPLACE, 'attr530000000000000', None), (ldap.MOD_REPLACE, 'attr540000000000000', None), (ldap.MOD_REPLACE, 'attr550000000000000000000000000000000000000', None), (ldap.MOD_REPLACE, 'attr5600000000000000000000000000000000000000000', None), (ldap.MOD_REPLACE, 'attr57000000000000000000000000000000', None), (ldap.MOD_REPLACE, 'attr58000000000000000000000000000000000', None), (ldap.MOD_REPLACE, 'attr5900000000000000000', None), (ldap.MOD_REPLACE, 'attr6000000000000000000000000', None), (ldap.MOD_REPLACE, 'attr6100000000000000000000000000000000000000000', None), (ldap.MOD_REPLACE, 'attr6200000000000000000000000000000000000000000', None), (ldap.MOD_REPLACE, 'attr6300000000000000000000000000000000000000000', None), (ldap.MOD_REPLACE, 'attr6400000000000000000000000000000000000000000', None), (ldap.MOD_REPLACE, 'attr65000000000000000000000000000000000000000000000000000000000000000000000000000000', None), (ldap.MOD_REPLACE, 'attr6600000000000000000000000000000000000000000', None), (ldap.MOD_REPLACE, 'attr6700000000000000000000000000000000000000000', None), (ldap.MOD_REPLACE, 'attr6800000000000000000000000000000000000000000', None), (ldap.MOD_REPLACE, 'attr690000000000000000000000000000000000000000000000000000000000', None), (ldap.MOD_REPLACE, 'attr7000000000000000000000000', None), (ldap.MOD_REPLACE, 'attr71000000000000000000000000000000', None), (ldap.MOD_REPLACE, 'attr72000000000000000000000000000000', None), (ldap.MOD_REPLACE, 'attr73000000000000000000000000000000', None), (ldap.MOD_REPLACE, 'attr74000000000000000000000000000000', None), (ldap.MOD_REPLACE, 'attr750000000000000000000000000000000000000000000', None), (ldap.MOD_REPLACE, 'attr7600000000000000000000000000000000000000000', None), (ldap.MOD_REPLACE, 'attr77000000000000000000000000000000', None), ( ldap.MOD_REPLACE, 'attr78000000000000000000000000000000000000000000000000000000000000000', None), (ldap.MOD_REPLACE, 'attr79000000000000000000000000000000', None), (ldap.MOD_REPLACE, 'attr800000000000000000000000000000000000000000000000', None), (ldap.MOD_REPLACE, 'attr81000000000000000000000000000000000', None), (ldap.MOD_REPLACE, 'attr82000000000000000000000000000000000', None), (ldap.MOD_REPLACE, 'attr83000000000000000000000000000000000', None), (ldap.MOD_REPLACE, 'attr84000000000000000000000000000000000', None), (ldap.MOD_REPLACE, 'attr85000000000000000000000000000000000000000000000000000', None), (ldap.MOD_REPLACE, 'attr8600000000000000000000000000000000000000000', None), (ldap.MOD_REPLACE, 'attr87000000000000000000000000000000', None), (ldap.MOD_REPLACE, 'attr88000000000000000000000000000000000', None), (ldap.MOD_REPLACE, 'attr89000000000000000000000000000000000', None), (ldap.MOD_REPLACE, 'attr9000000000000000000000000000000000000000000000000000', None)]) except ldap.LDAPError as e: log.fatal('Failed to modify user - deleting attrs (%s): error %s' % (USER_DN, e.args[0]['desc'])) # Stop supplier2 topology_m2.ms["supplier1"].stop(30) topology_m2.ms["supplier2"].stop(30) # start M1 to do the next updates topology_m2.ms["supplier1"].start() topology_m2.ms["supplier2"].start() log.info('Testcase PASSED') if DEBUGGING: # Add debugging steps(if any)... pass if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/tickets/ticket49122_test.py000066400000000000000000000063451421664411400260100ustar00rootroot00000000000000import time import ldap import logging import pytest from lib389 import Entry from lib389._constants import * from lib389.properties import * from lib389.tasks import * from lib389.utils import * from lib389.topologies import topology_st as topo pytestmark = pytest.mark.tier2 DEBUGGING = os.getenv("DEBUGGING", default=False) if DEBUGGING: logging.getLogger(__name__).setLevel(logging.DEBUG) else: logging.getLogger(__name__).setLevel(logging.INFO) log = logging.getLogger(__name__) USER_DN = 'uid=user,' + DEFAULT_SUFFIX ROLE_DN = 'cn=Filtered_Role_That_Includes_Empty_Role,' + DEFAULT_SUFFIX filters = ['nsrole=cn=empty,dc=example,dc=com', '(nsrole=cn=empty,dc=example,dc=com)', '(&(nsrole=cn=empty,dc=example,dc=com))', '(!(nsrole=cn=empty,dc=example,dc=com))', '(&(|(objectclass=person)(sn=app*))(userpassword=*))', '(&(|(objectclass=person)(nsrole=cn=empty,dc=example,dc=com))(userpassword=*))', '(&(|(nsrole=cn=empty,dc=example,dc=com)(sn=app*))(userpassword=*))', '(&(|(objectclass=person)(sn=app*))(nsrole=cn=empty,dc=example,dc=com))', '(&(|(&(cn=*)(objectclass=person)(nsrole=cn=empty,dc=example,dc=com)))(uid=*))'] def test_ticket49122(topo): """Search for non-existant role and make sure the server does not crash """ # Enable roles plugin topo.standalone.plugins.enable(name=PLUGIN_ROLES) topo.standalone.restart() # Add test user try: topo.standalone.add_s(Entry(( USER_DN, {'objectclass': "top extensibleObject".split(), 'uid': 'user'}))) except ldap.LDAPError as e: topo.standalone.log.fatal('Failed to add test user: error ' + str(e)) assert False if DEBUGGING: print("Attach gdb") time.sleep(20) # Loop over filters for role_filter in filters: log.info('Testing filter: ' + role_filter) # Add invalid role try: topo.standalone.add_s(Entry(( ROLE_DN, {'objectclass': ['top', 'ldapsubentry', 'nsroledefinition', 'nscomplexroledefinition', 'nsfilteredroledefinition'], 'cn': 'Filtered_Role_That_Includes_Empty_Role', 'nsRoleFilter': role_filter, 'description': 'A filtered role with filter that will crash the server'}))) except ldap.LDAPError as e: topo.standalone.log.fatal('Failed to add filtered role: error ' + e.message['desc']) assert False # Search for the role try: topo.standalone.search_s(USER_DN, ldap.SCOPE_SUBTREE, 'objectclass=*', ['nsrole']) except ldap.LDAPError as e: topo.standalone.log.fatal('Search failed: error ' + str(e)) assert False # Cleanup try: topo.standalone.delete_s(ROLE_DN) except ldap.LDAPError as e: topo.standalone.log.fatal('delete failed: error ' + str(e)) assert False time.sleep(1) topo.standalone.log.info('Test Passed') if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/tickets/ticket49180_test.py000066400000000000000000000106371421664411400260130ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2016 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import threading import pytest from lib389.tasks import * from lib389.utils import * from lib389.topologies import topology_m4 from lib389.replica import ReplicationManager from lib389._constants import (DEFAULT_SUFFIX, SUFFIX) from lib389 import DirSrv pytestmark = pytest.mark.tier2 logging.getLogger(__name__).setLevel(logging.DEBUG) log = logging.getLogger(__name__) def remove_supplier4_agmts(msg, topology_m4): """Remove all the repl agmts to supplier4. """ log.info('%s: remove all the agreements to supplier 4...' % msg) for num in range(1, 4): try: topology_m4.ms["supplier{}".format(num)].agreement.delete(DEFAULT_SUFFIX, topology_m4.ms["supplier4"].host, topology_m4.ms["supplier4"].port) except ldap.LDAPError as e: log.fatal('{}: Failed to delete agmt(m{} -> m4), error: {}'.format(msg, num, str(e))) assert False def restore_supplier4(topology_m4): """In our tests will always be removing supplier 4, so we need a common way to restore it for another test """ log.info('Restoring supplier 4...') # Enable replication on supplier 4 M4 = topology_m4.ms["supplier4"] M1 = topology_m4.ms["supplier1"] repl = ReplicationManager(SUFFIX) repl.join_supplier(M1, M4) repl.ensure_agreement(M4, M1) repl.ensure_agreement(M1, M4) # Test Replication is working for num in range(2, 5): if topology_m4.ms["supplier1"].testReplication(DEFAULT_SUFFIX, topology_m4.ms["supplier{}".format(num)]): log.info('Replication is working m1 -> m{}.'.format(num)) else: log.fatal('restore_supplier4: Replication is not working from m1 -> m{}.'.format(num)) assert False time.sleep(1) # Check replication is working from supplier 4 to supplier1... if topology_m4.ms["supplier4"].testReplication(DEFAULT_SUFFIX, topology_m4.ms["supplier1"]): log.info('Replication is working m4 -> m1.') else: log.fatal('restore_supplier4: Replication is not working from m4 -> 1.') assert False time.sleep(5) log.info('Supplier 4 has been successfully restored.') def test_ticket49180(topology_m4): log.info('Running test_ticket49180...') log.info('Check that replication works properly on all suppliers') agmt_nums = {"supplier1": ("2", "3", "4"), "supplier2": ("1", "3", "4"), "supplier3": ("1", "2", "4"), "supplier4": ("1", "2", "3")} for inst_name, agmts in agmt_nums.items(): for num in agmts: if not topology_m4.ms[inst_name].testReplication(DEFAULT_SUFFIX, topology_m4.ms["supplier{}".format(num)]): log.fatal( 'test_replication: Replication is not working between {} and supplier {}.'.format(inst_name, num)) assert False # Disable supplier 4 log.info('test_clean: disable supplier 4...') topology_m4.ms["supplier4"].replica.disableReplication(DEFAULT_SUFFIX) # Remove the agreements from the other suppliers that point to supplier 4 remove_supplier4_agmts("test_clean", topology_m4) # Cleanup - restore supplier 4 restore_supplier4(topology_m4) attr_errors = os.popen('egrep "attrlist_replace" %s | wc -l' % topology_m4.ms["supplier1"].errlog) ecount = int(attr_errors.readline().rstrip()) log.info("Errors found on m1: %d" % ecount) assert (ecount == 0) attr_errors = os.popen('egrep "attrlist_replace" %s | wc -l' % topology_m4.ms["supplier2"].errlog) ecount = int(attr_errors.readline().rstrip()) log.info("Errors found on m2: %d" % ecount) assert (ecount == 0) attr_errors = os.popen('egrep "attrlist_replace" %s | wc -l' % topology_m4.ms["supplier3"].errlog) ecount = int(attr_errors.readline().rstrip()) log.info("Errors found on m3: %d" % ecount) assert (ecount == 0) if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/tickets/ticket49184_test.py000066400000000000000000000135711421664411400260170ustar00rootroot00000000000000import time import ldap import logging import pytest from lib389 import DirSrv, Entry, tools, tasks from lib389.tools import DirSrvTools from lib389._constants import * from lib389.properties import * from lib389.tasks import * from lib389.utils import * from lib389.topologies import topology_st as topo pytestmark = pytest.mark.tier2 DEBUGGING = os.getenv("DEBUGGING", default=False) GROUP_DN_1 = ("cn=group1," + DEFAULT_SUFFIX) GROUP_DN_2 = ("cn=group2," + DEFAULT_SUFFIX) SUPER_GRP1 = ("cn=super_grp1," + DEFAULT_SUFFIX) SUPER_GRP2 = ("cn=super_grp2," + DEFAULT_SUFFIX) SUPER_GRP3 = ("cn=super_grp3," + DEFAULT_SUFFIX) if DEBUGGING: logging.getLogger(__name__).setLevel(logging.DEBUG) else: logging.getLogger(__name__).setLevel(logging.INFO) log = logging.getLogger(__name__) def _add_group_with_members(topo, group_dn): # Create group try: topo.standalone.add_s(Entry((group_dn, {'objectclass': 'top groupofnames extensibleObject'.split(), 'cn': 'group'}))) except ldap.LDAPError as e: log.fatal('Failed to add group: error ' + e.args[0]['desc']) assert False # Add members to the group - set timeout log.info('Adding members to the group...') for idx in range(1, 5): try: MEMBER_VAL = ("uid=member%d,%s" % (idx, DEFAULT_SUFFIX)) topo.standalone.modify_s(group_dn, [(ldap.MOD_ADD, 'member', ensure_bytes(MEMBER_VAL))]) except ldap.LDAPError as e: log.fatal('Failed to update group: member (%s) - error: %s' % (MEMBER_VAL, e.args[0]['desc'])) assert False def _check_memberof(topo, member=None, memberof=True, group_dn=None): # Check that members have memberof attribute on M1 for idx in range(1, 5): try: USER_DN = ("uid=member%d,%s" % (idx, DEFAULT_SUFFIX)) ent = topo.standalone.getEntry(USER_DN, ldap.SCOPE_BASE, "(objectclass=*)") if presence_flag: assert ent.hasAttr('memberof') and ent.getValue('memberof') == ensure_bytes(group_dn) else: assert not ent.hasAttr('memberof') except ldap.LDAPError as e: log.fatal('Failed to retrieve user (%s): error %s' % (USER_DN, e.args[0]['desc'])) assert False def _check_memberof(topo, member=None, memberof=True, group_dn=None): ent = topo.standalone.getEntry(member, ldap.SCOPE_BASE, "(objectclass=*)") if memberof: assert group_dn assert ent.hasAttr('memberof') and ensure_bytes(group_dn) in ent.getValues('memberof') else: if ent.hasAttr('memberof'): assert ensure_bytes(group_dn) not in ent.getValues('memberof') def test_ticket49184(topo): """Write your testcase here... Also, if you need any testcase initialization, please, write additional fixture for that(include finalizer). """ topo.standalone.plugins.enable(name=PLUGIN_MEMBER_OF) topo.standalone.restart(timeout=10) # # create some users and a group # log.info('create users and group...') for idx in range(1, 5): try: USER_DN = ("uid=member%d,%s" % (idx, DEFAULT_SUFFIX)) topo.standalone.add_s(Entry((USER_DN, {'objectclass': 'top extensibleObject'.split(), 'uid': 'member%d' % (idx)}))) except ldap.LDAPError as e: log.fatal('Failed to add user (%s): error %s' % (USER_DN, e.args[0]['desc'])) assert False # add all users in GROUP_DN_1 and checks each users is memberof GROUP_DN_1 _add_group_with_members(topo, GROUP_DN_1) for idx in range(1, 5): USER_DN = ("uid=member%d,%s" % (idx, DEFAULT_SUFFIX)) _check_memberof(topo, member=USER_DN, memberof=True, group_dn=GROUP_DN_1 ) # add all users in GROUP_DN_2 and checks each users is memberof GROUP_DN_2 _add_group_with_members(topo, GROUP_DN_2) for idx in range(1, 5): USER_DN = ("uid=member%d,%s" % (idx, DEFAULT_SUFFIX)) _check_memberof(topo, member=USER_DN, memberof=True, group_dn=GROUP_DN_2 ) # add the level 2, 3 and 4 group for super_grp in (SUPER_GRP1, SUPER_GRP2, SUPER_GRP3): topo.standalone.add_s(Entry((super_grp, {'objectclass': 'top groupofnames extensibleObject'.split(), 'cn': 'super_grp'}))) topo.standalone.modify_s(SUPER_GRP1, [(ldap.MOD_ADD, 'member', ensure_bytes(GROUP_DN_1)), (ldap.MOD_ADD, 'member', ensure_bytes(GROUP_DN_2))]) topo.standalone.modify_s(SUPER_GRP2, [(ldap.MOD_ADD, 'member', ensure_bytes(GROUP_DN_1)), (ldap.MOD_ADD, 'member', ensure_bytes(GROUP_DN_2))]) return topo.standalone.delete_s(GROUP_DN_2) for idx in range(1, 5): USER_DN = ("uid=member%d,%s" % (idx, DEFAULT_SUFFIX)) _check_memberof(topo, member=USER_DN, memberof=True, group_dn=GROUP_DN_1 ) _check_memberof(topo, member=USER_DN, memberof=False, group_dn=GROUP_DN_2 ) if DEBUGGING: # Add debugging steps(if any)... pass if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/tickets/ticket49192_test.py000066400000000000000000000136611421664411400260160ustar00rootroot00000000000000import time import ldap import logging import pytest from lib389 import Entry from lib389._constants import * from lib389.properties import * from lib389.tasks import * from lib389.utils import * from lib389.topologies import topology_st as topo pytestmark = pytest.mark.tier2 DEBUGGING = os.getenv("DEBUGGING", default=False) if DEBUGGING: logging.getLogger(__name__).setLevel(logging.DEBUG) else: logging.getLogger(__name__).setLevel(logging.INFO) log = logging.getLogger(__name__) INDEX_DN = 'cn=index,cn=Second_Backend,cn=ldbm database,cn=plugins,cn=config' SUFFIX_DN = 'cn=Second_Backend,cn=ldbm database,cn=plugins,cn=config' MY_SUFFIX = "o=hang.com" USER_DN = 'uid=user,' + MY_SUFFIX def test_ticket49192(topo): """Trigger deadlock when removing suffix """ # # Create a second suffix/backend # log.info('Creating second backend...') topo.standalone.backends.create(None, properties={ BACKEND_NAME: "Second_Backend", 'suffix': "o=hang.com", }) try: topo.standalone.add_s(Entry(("o=hang.com", { 'objectclass': 'top organization'.split(), 'o': 'hang.com'}))) except ldap.LDAPError as e: log.fatal('Failed to create 2nd suffix: error ' + e.args[0]['desc']) assert False # # Add roles # log.info('Adding roles...') try: topo.standalone.add_s(Entry(('cn=nsManagedDisabledRole,' + MY_SUFFIX, { 'objectclass': ['top', 'LdapSubEntry', 'nsRoleDefinition', 'nsSimpleRoleDefinition', 'nsManagedRoleDefinition'], 'cn': 'nsManagedDisabledRole'}))) except ldap.LDAPError as e: log.fatal('Failed to add managed role: error ' + e.args[0]['desc']) assert False try: topo.standalone.add_s(Entry(('cn=nsDisabledRole,' + MY_SUFFIX, { 'objectclass': ['top', 'LdapSubEntry', 'nsRoleDefinition', 'nsComplexRoleDefinition', 'nsNestedRoleDefinition'], 'cn': 'nsDisabledRole', 'nsRoledn': 'cn=nsManagedDisabledRole,' + MY_SUFFIX}))) except ldap.LDAPError as e: log.fatal('Failed to add nested role: error ' + e.args[0]['desc']) assert False try: topo.standalone.add_s(Entry(('cn=nsAccountInactivationTmp,' + MY_SUFFIX, { 'objectclass': ['top', 'nsContainer'], 'cn': 'nsAccountInactivationTmp'}))) except ldap.LDAPError as e: log.fatal('Failed to add container: error ' + e.args[0]['desc']) assert False try: topo.standalone.add_s(Entry(('cn=\"cn=nsDisabledRole,' + MY_SUFFIX + '\",cn=nsAccountInactivationTmp,' + MY_SUFFIX, { 'objectclass': ['top', 'extensibleObject', 'costemplate', 'ldapsubentry'], 'nsAccountLock': 'true'}))) except ldap.LDAPError as e: log.fatal('Failed to add cos1: error ' + e.args[0]['desc']) assert False try: topo.standalone.add_s(Entry(('cn=nsAccountInactivation_cos,' + MY_SUFFIX, { 'objectclass': ['top', 'LdapSubEntry', 'cosSuperDefinition', 'cosClassicDefinition'], 'cn': 'nsAccountInactivation_cos', 'cosTemplateDn': 'cn=nsAccountInactivationTmp,' + MY_SUFFIX, 'cosSpecifier': 'nsRole', 'cosAttribute': 'nsAccountLock operational'}))) except ldap.LDAPError as e: log.fatal('Failed to add cos2 : error ' + e.args[0]['desc']) assert False # # Add test entry # try: topo.standalone.add_s(Entry((USER_DN, { 'objectclass': 'top extensibleObject'.split(), 'uid': 'user', 'userpassword': 'password', }))) except ldap.LDAPError as e: log.fatal('Failed to add user: error ' + e.args[0]['desc']) assert False # # Inactivate the user account # try: topo.standalone.modify_s(USER_DN, [(ldap.MOD_ADD, 'nsRoleDN', ensure_bytes('cn=nsManagedDisabledRole,' + MY_SUFFIX))]) except ldap.LDAPError as e: log.fatal('Failed to disable user: error ' + e.args[0]['desc']) assert False time.sleep(1) # Bind as user (should fail) try: topo.standalone.simple_bind_s(USER_DN, 'password') log.error("Bind incorrectly worked") assert False except ldap.UNWILLING_TO_PERFORM: log.info('Got error 53 as expected') except ldap.LDAPError as e: log.fatal('Bind has unexpected error ' + e.args[0]['desc']) assert False # Bind as root DN try: topo.standalone.simple_bind_s(DN_DM, PASSWORD) except ldap.LDAPError as e: log.fatal('RootDN Bind has unexpected error ' + e.args[0]['desc']) assert False # # Delete suffix # log.info('Delete the suffix and children...') try: index_entries = topo.standalone.search_s( SUFFIX_DN, ldap.SCOPE_SUBTREE, 'objectclass=top') except ldap.LDAPError as e: log.error('Failed to search: %s - error %s' % (SUFFIX_DN, str(e))) for entry in reversed(index_entries): try: log.info("Deleting: " + entry.dn) if entry.dn != SUFFIX_DN and entry.dn != INDEX_DN: topo.standalone.search_s(entry.dn, ldap.SCOPE_ONELEVEL, 'objectclass=top') topo.standalone.delete_s(entry.dn) except ldap.LDAPError as e: log.fatal('Failed to delete entry: %s - error %s' % (entry.dn, str(e))) assert False log.info("Test Passed") if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/tickets/ticket49227_test.py000066400000000000000000000107511421664411400260120ustar00rootroot00000000000000import os import time import ldap import logging import pytest from lib389._constants import * from lib389.properties import * from lib389.tasks import * from lib389.utils import * from lib389.topologies import topology_st as topo pytestmark = pytest.mark.tier2 DEBUGGING = os.getenv("DEBUGGING", default=False) if DEBUGGING: logging.getLogger(__name__).setLevel(logging.DEBUG) else: logging.getLogger(__name__).setLevel(logging.INFO) log = logging.getLogger(__name__) DEFAULT_LEVEL = b"16384" COMB_LEVEL = b"73864" # 65536+8192+128+8 = 73864 COMB_DEFAULT_LEVEL = b"90248" # 65536+8192+128+8+16384 = 90248 def set_level(topo, level): ''' Set the error log level ''' try: topo.standalone.modify_s("cn=config", [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', ensure_bytes(level))]) time.sleep(1) except ldap.LDAPError as e: log.fatal('Failed to set loglevel to %s - error: %s' % (level, str(e))) assert False def get_level(topo): ''' Set the error log level ''' try: config = topo.standalone.search_s("cn=config", ldap.SCOPE_BASE, "objectclass=top") time.sleep(1) return config[0].getValue('nsslapd-errorlog-level') except ldap.LDAPError as e: log.fatal('Failed to get loglevel - error: %s' % (str(e))) assert False def get_log_size(topo): ''' Get the errors log size ''' statinfo = os.stat(topo.standalone.errlog) return statinfo.st_size def test_ticket49227(topo): """Set the error log to varying levels, and make sure a search for that value reflects the expected value (not the bitmasked value. """ log_size = get_log_size(topo) # Check the default level level = get_level(topo) if level != DEFAULT_LEVEL: log.fatal('Incorrect default logging level: %s' % (level)) assert False # Set connection logging set_level(topo, '8') level = get_level(topo) if level != b'8': log.fatal('Incorrect connection logging level: %s' % (level)) assert False # Check the actual log new_size = get_log_size(topo) if new_size == log_size: # Size should be different log.fatal('Connection logging is not working') assert False # Set default logging using zero set_level(topo, '0') log_size = get_log_size(topo) level = get_level(topo) if level != DEFAULT_LEVEL: log.fatal('Incorrect default logging level: %s' % (level)) assert False # Check the actual log new_size = get_log_size(topo) if new_size != log_size: # Size should be the size log.fatal('Connection logging is still on') assert False # Set default logging using the default value set_level(topo, DEFAULT_LEVEL) level = get_level(topo) if level != DEFAULT_LEVEL: log.fatal('Incorrect default logging level: %s' % (level)) assert False # Check the actual log new_size = get_log_size(topo) if new_size != log_size: # Size should be the size log.fatal('Connection logging is still on') assert False # Set a combined level that includes the default level set_level(topo, COMB_DEFAULT_LEVEL) level = get_level(topo) if level != COMB_DEFAULT_LEVEL: log.fatal('Incorrect combined logging level with default level: %s expected %s' % (level, COMB_DEFAULT_LEVEL)) assert False # Set a combined level that does not includes the default level set_level(topo, COMB_LEVEL) level = get_level(topo) if level != COMB_LEVEL: log.fatal('Incorrect combined logging level without default level: %s expected %s' % (level, COMB_LEVEL)) assert False # Check our level is present after a restart - previous level was COMB_LEVEL topo.standalone.restart() log_size = get_log_size(topo) # Grab the log size for our next check level = get_level(topo) # This should trigger connection logging if level != COMB_LEVEL: log.fatal('Incorrect combined logging level with default level: %s expected %s' % (level, COMB_LEVEL)) assert False # Now check the actual levels are still working new_size = get_log_size(topo) if new_size == log_size: # Size should be different log.fatal('Combined logging is not working') assert False if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/tickets/ticket49249_test.py000066400000000000000000000116521421664411400260170ustar00rootroot00000000000000import time import ldap import logging import pytest from lib389 import DirSrv, Entry, tools, tasks from lib389.tools import DirSrvTools from lib389._constants import * from lib389.properties import * from lib389.tasks import * from lib389.utils import * from lib389.topologies import topology_st as topo pytestmark = pytest.mark.tier2 DEBUGGING = os.getenv("DEBUGGING", default=False) if DEBUGGING: logging.getLogger(__name__).setLevel(logging.DEBUG) else: logging.getLogger(__name__).setLevel(logging.INFO) log = logging.getLogger(__name__) COS_BRANCH = 'ou=cos_scope,' + DEFAULT_SUFFIX COS_DEF = 'cn=cos_definition,' + COS_BRANCH COS_TEMPLATE = 'cn=cos_template,' + COS_BRANCH INVALID_USER_WITH_COS = 'cn=cos_user_no_mail,' + COS_BRANCH VALID_USER_WITH_COS = 'cn=cos_user_with_mail,' + COS_BRANCH NO_COS_BRANCH = 'ou=no_cos_scope,' + DEFAULT_SUFFIX INVALID_USER_WITHOUT_COS = 'cn=no_cos_user_no_mail,' + NO_COS_BRANCH VALID_USER_WITHOUT_COS = 'cn=no_cos_user_with_mail,' + NO_COS_BRANCH def test_ticket49249(topo): """Write your testcase here... Also, if you need any testcase initialization, please, write additional fixture for that(include finalizer). """ # Add the branches try: topo.standalone.add_s(Entry((COS_BRANCH, { 'objectclass': 'top extensibleObject'.split(), 'ou': 'cos_scope' }))) except ldap.LDAPError as e: log.error('Failed to add cos_scope: error ' + e.message['desc']) assert False try: topo.standalone.add_s(Entry((NO_COS_BRANCH, { 'objectclass': 'top extensibleObject'.split(), 'ou': 'no_cos_scope' }))) except ldap.LDAPError as e: log.error('Failed to add no_cos_scope: error ' + e.message['desc']) assert False try: topo.standalone.add_s(Entry((COS_TEMPLATE, { 'objectclass': 'top ldapsubentry costemplate extensibleObject'.split(), 'cn': 'cos_template', 'cosPriority': '1', 'cn': 'cn=nsPwTemplateEntry,ou=level1,dc=example,dc=com', 'mailAlternateAddress': 'hello@world' }))) except ldap.LDAPError as e: log.error('Failed to add cos_template: error ' + e.message['desc']) assert False try: topo.standalone.add_s(Entry((COS_DEF, { 'objectclass': 'top ldapsubentry cosSuperDefinition cosPointerDefinition'.split(), 'cn': 'cos_definition', 'costemplatedn': COS_TEMPLATE, 'cosAttribute': 'mailAlternateAddress default' }))) except ldap.LDAPError as e: log.error('Failed to add cos_definition: error ' + e.message['desc']) assert False try: # This entry is not allowed to have mailAlternateAddress topo.standalone.add_s(Entry((INVALID_USER_WITH_COS, { 'objectclass': 'top person'.split(), 'cn': 'cos_user_no_mail', 'sn': 'cos_user_no_mail' }))) except ldap.LDAPError as e: log.error('Failed to add cos_user_no_mail: error ' + e.message['desc']) assert False try: # This entry is allowed to have mailAlternateAddress topo.standalone.add_s(Entry((VALID_USER_WITH_COS, { 'objectclass': 'top mailGroup'.split(), 'cn': 'cos_user_with_mail' }))) except ldap.LDAPError as e: log.error('Failed to add cos_user_no_mail: error ' + e.message['desc']) assert False try: # This entry is not allowed to have mailAlternateAddress topo.standalone.add_s(Entry((INVALID_USER_WITHOUT_COS, { 'objectclass': 'top person'.split(), 'cn': 'no_cos_user_no_mail', 'sn': 'no_cos_user_no_mail' }))) except ldap.LDAPError as e: log.error('Failed to add no_cos_user_no_mail: error ' + e.message['desc']) assert False try: # This entry is allowed to have mailAlternateAddress topo.standalone.add_s(Entry((VALID_USER_WITHOUT_COS, { 'objectclass': 'top mailGroup'.split(), 'cn': 'no_cos_user_with_mail' }))) except ldap.LDAPError as e: log.error('Failed to add no_cos_user_with_mail: error ' + e.message['desc']) assert False try: entries = topo.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, '(mailAlternateAddress=*)') assert len(entries) == 1 assert entries[0].hasValue('mailAlternateAddress', 'hello@world') except ldap.LDAPError as e: log.fatal('Unable to retrieve cos_user_with_mail (only entry with mailAlternateAddress) : error %s' % (USER1_DN, e.message['desc'])) assert False assert not topo.standalone.ds_error_log.match(".*cos attribute mailAlternateAddress failed schema.*") if DEBUGGING: # Add debugging steps(if any)... pass if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/tickets/ticket49273_test.py000066400000000000000000000026651421664411400260200ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2017 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import pytest import ldap from lib389.topologies import topology_st # This pulls in logging I think from lib389.utils import * from lib389.sasl import PlainSASL from lib389.idm.services import ServiceAccounts pytestmark = pytest.mark.tier2 log = logging.getLogger(__name__) def test_49273_corrupt_dbversion(topology_st): """ ticket 49273 was caused by a disk space full, which corrupted the users DBVERSION files. We can't prevent this, but we can handle the error better than "crash". """ standalone = topology_st.standalone # Stop the instance standalone.stop() # Corrupt userRoot dbversion dbvf = os.path.join(standalone.ds_paths.db_dir, 'userRoot/DBVERSION') with open(dbvf, 'w') as f: # This will trunc the file f.write('') # Start up try: # post_open false, means ds state is OFFLINE, which allows # dspaths below to use defaults rather than ldap check. standalone.start(timeout=20, post_open=False) except: pass # Trigger an update of the running server state, to move it OFFLINE. standalone.status() # CHeck error log? error_lines = standalone.ds_error_log.match('.*Could not parse file.*') assert(len(error_lines) > 0) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/tickets/ticket49287_test.py000066400000000000000000000263751421664411400260310ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2017 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import pytest from lib389.tasks import * from lib389.utils import * from lib389.properties import RA_NAME, RA_BINDDN, RA_BINDPW, RA_METHOD, RA_TRANSPORT_PROT, BACKEND_NAME from lib389.topologies import topology_m2 from lib389._constants import * from lib389.replica import ReplicationManager pytestmark = pytest.mark.tier2 DEBUGGING = os.getenv('DEBUGGING', False) GROUP_DN = ("cn=group," + DEFAULT_SUFFIX) if DEBUGGING: logging.getLogger(__name__).setLevel(logging.DEBUG) else: logging.getLogger(__name__).setLevel(logging.INFO) log = logging.getLogger(__name__) def _add_repl_backend(s1, s2, be): suffix = 'ou=%s,dc=test,dc=com' % be create_backend(s1, s2, suffix, be) add_ou(s1, suffix) replicate_backend(s1, s2, suffix) def _wait_for_sync(s1, s2, testbase, final_db): now = time.time() cn1 = 'sync-%s-%d' % (now, 1) cn2 = 'sync-%s-%d' % (now, 2) add_user(s1, cn1, testbase, 'add on m1', sleep=False) add_user(s2, cn2, testbase, 'add on m2', sleep=False) dn1 = 'cn=%s,%s' % (cn1, testbase) dn2 = 'cn=%s,%s' % (cn2, testbase) if final_db: final_db.append(dn1) final_db.append(dn2) _check_entry_exist(s2, dn1, 10, 5) _check_entry_exist(s1, dn2, 10, 5) def _check_entry_exist(supplier, dn, loops=10, wait=1): attempt = 0 while attempt <= loops: try: dn ent = supplier.getEntry(dn, ldap.SCOPE_BASE, "(objectclass=*)") break except ldap.NO_SUCH_OBJECT: attempt = attempt + 1 time.sleep(wait) except ldap.LDAPError as e: log.fatal('Failed to retrieve user (%s): error %s' % (dn, e.message['desc'])) assert False assert attempt <= loops def config_memberof(server): server.plugins.enable(name=PLUGIN_MEMBER_OF) MEMBEROF_PLUGIN_DN = ('cn=' + PLUGIN_MEMBER_OF + ',cn=plugins,cn=config') server.modify_s(MEMBEROF_PLUGIN_DN, [(ldap.MOD_REPLACE, 'memberOfAllBackends', b'on')]) # Configure fractional to prevent total init to send memberof ents = server.agreement.list(suffix=DEFAULT_SUFFIX) log.info('update %s to add nsDS5ReplicatedAttributeListTotal' % ents[0].dn) for ent in ents: server.modify_s(ent.dn, [(ldap.MOD_REPLACE, 'nsDS5ReplicatedAttributeListTotal', b'(objectclass=*) $ EXCLUDE '), (ldap.MOD_REPLACE, 'nsDS5ReplicatedAttributeList', b'(objectclass=*) $ EXCLUDE memberOf')]) def _disable_auto_oc_memberof(server): MEMBEROF_PLUGIN_DN = ('cn=' + PLUGIN_MEMBER_OF + ',cn=plugins,cn=config') server.modify_s(MEMBEROF_PLUGIN_DN, [(ldap.MOD_REPLACE, 'memberOfAutoAddOC', b'nsContainer')]) def _enable_auto_oc_memberof(server): MEMBEROF_PLUGIN_DN = ('cn=' + PLUGIN_MEMBER_OF + ',cn=plugins,cn=config') server.modify_s(MEMBEROF_PLUGIN_DN, [(ldap.MOD_REPLACE, 'memberOfAutoAddOC', b'nsMemberOf')]) def add_dc(server, dn): server.add_s(Entry((dn, {'objectclass': ['top', 'domain']}))) def add_ou(server, dn): server.add_s(Entry((dn, {'objectclass': ['top', 'organizationalunit']}))) def add_container(server, dn): server.add_s(Entry((dn, {'objectclass': ['top', 'nscontainer']}))) def add_user(server, cn, testbase, desc, sleep=True): dn = 'cn=%s,%s' % (cn, testbase) log.fatal('Adding user (%s): ' % dn) server.add_s(Entry((dn, {'objectclass': ['top', 'person', 'inetuser'], 'sn': 'user_%s' % cn, 'description': desc}))) if sleep: time.sleep(2) def add_person(server, cn, testbase, desc, sleep=True): dn = 'cn=%s,%s' % (cn, testbase) log.fatal('Adding user (%s): ' % dn) server.add_s(Entry((dn, {'objectclass': ['top', 'person'], 'sn': 'user_%s' % cn, 'description': desc}))) if sleep: time.sleep(2) def add_multi_member(server, cn, mem_id, mem_usr, testbase, sleep=True): dn = 'cn=%s,ou=groups,%s' % (cn, testbase) members = [] for usr in mem_usr: members.append('cn=a%d,ou=be_%d,%s' % (mem_id, usr, testbase)) for mem in members: mod = [(ldap.MOD_ADD, 'member', ensure_bytes(mem))] try: server.modify_s(dn, mod) except ldap.OBJECT_CLASS_VIOLATION: log.info('objectclass violation') if sleep: time.sleep(2) def add_member(server, cn, mem, testbase, sleep=True): dn = 'cn=%s,ou=groups,%s' % (cn, testbase) mem_dn = 'cn=%s,ou=people,%s' % (mem, testbase) mod = [(ldap.MOD_ADD, 'member', ensure_bytes(mem_dn))] server.modify_s(dn, mod) if sleep: time.sleep(2) def add_group(server, testbase, nr, sleep=True): dn = 'cn=g%d,ou=groups,%s' % (nr, testbase) server.add_s(Entry((dn, {'objectclass': ['top', 'groupofnames'], 'member': [ 'cn=m1_%d,%s' % (nr, testbase), 'cn=m2_%d,%s' % (nr, testbase), 'cn=m3_%d,%s' % (nr, testbase) ], 'description': 'group %d' % nr}))) if sleep: time.sleep(2) def del_group(server, testbase, nr, sleep=True): dn = 'cn=g%d,%s' % (nr, testbase) server.delete_s(dn) if sleep: time.sleep(2) def mod_entry(server, cn, testbase, desc): dn = 'cn=%s,%s' % (cn, testbase) mod = [(ldap.MOD_ADD, 'description', ensure_bytes(desc))] server.modify_s(dn, mod) time.sleep(2) def del_entry(server, testbase, cn): dn = 'cn=%s,%s' % (cn, testbase) server.delete_s(dn) time.sleep(2) def _disable_nunc_stans(server): server.config.set('nsslapd-enable-nunc-stans', 'off') def _enable_spec_logging(server): server.config.replace_many(('nsslapd-accesslog-level', '260'), ('nsslapd-errorlog-level', str(8192 + 65536)), ('nsslapd-plugin-logging', 'on'), ('nsslapd-auditlog-logging-enabled', 'on')) def create_backend(s1, s2, beSuffix, beName): s1.mappingtree.create(beSuffix, beName) s1.backend.create(beSuffix, {BACKEND_NAME: beName}) s2.mappingtree.create(beSuffix, beName) s2.backend.create(beSuffix, {BACKEND_NAME: beName}) def replicate_backend(s1, s2, beSuffix): repl = ReplicationManager(beSuffix) repl.create_first_supplier(s1) repl.join_supplier(s1, s2) repl.ensure_agreement(s1, s2) repl.ensure_agreement(s2, s2) # agreement m2_m1_agmt is not needed... :p # def check_group_mods(server1, server2, group, testbase): # add members to group add_multi_member(server1, group, 1, [1,2,3,4,5], testbase, sleep=False) add_multi_member(server1, group, 2, [3,4,5], testbase, sleep=False) add_multi_member(server1, group, 3, [0], testbase, sleep=False) add_multi_member(server1, group, 4, [1,3,5], testbase, sleep=False) add_multi_member(server1, group, 5, [2,0], testbase, sleep=False) add_multi_member(server1, group, 6, [2,3,4], testbase, sleep=False) # check that replication is working # for main backend and some member backends _wait_for_sync(server1, server2, testbase, None) for i in range(6): be = "be_%d" % i _wait_for_sync(server1, server2, 'ou=%s,dc=test,dc=com' % be, None) def check_multi_group_mods(server1, server2, group1, group2, testbase): # add members to group add_multi_member(server2, group1, 1, [1,2,3,4,5], testbase, sleep=False) add_multi_member(server1, group2, 1, [1,2,3,4,5], testbase, sleep=False) add_multi_member(server2, group1, 2, [3,4,5], testbase, sleep=False) add_multi_member(server1, group2, 2, [3,4,5], testbase, sleep=False) add_multi_member(server2, group1, 3, [0], testbase, sleep=False) add_multi_member(server1, group2, 3, [0], testbase, sleep=False) add_multi_member(server2, group1, 4, [1,3,5], testbase, sleep=False) add_multi_member(server1, group2, 4, [1,3,5], testbase, sleep=False) add_multi_member(server2, group1, 5, [2,0], testbase, sleep=False) add_multi_member(server1, group2, 5, [2,0], testbase, sleep=False) add_multi_member(server2, group1, 6, [2,3,4], testbase, sleep=False) add_multi_member(server1, group2, 6, [2,3,4], testbase, sleep=False) # check that replication is working # for main backend and some member backends _wait_for_sync(server1, server2, testbase, None) for i in range(6): be = "be_%d" % i _wait_for_sync(server1, server2, 'ou=%s,dc=test,dc=com' % be, None) def test_ticket49287(topology_m2): """ test case for memberof and conflict entries """ # return M1 = topology_m2.ms["supplier1"] M2 = topology_m2.ms["supplier2"] config_memberof(M1) config_memberof(M2) _enable_spec_logging(M1) _enable_spec_logging(M2) _disable_nunc_stans(M1) _disable_nunc_stans(M2) M1.restart(timeout=10) M2.restart(timeout=10) testbase = 'dc=test,dc=com' bename = 'test' create_backend(M1, M2, testbase, bename) add_dc(M1, testbase) add_ou(M1, 'ou=groups,%s' % testbase) replicate_backend(M1, M2, testbase) peoplebase = 'ou=people,dc=test,dc=com' peoplebe = 'people' create_backend(M1, M2, peoplebase, peoplebe) add_ou(M1, peoplebase) replicate_backend(M1, M2, peoplebase) for i in range(10): cn = 'a%d' % i add_user(M1, cn, peoplebase, 'add on m1', sleep=False) time.sleep(2) add_group(M1, testbase, 1) for i in range(10): cn = 'a%d' % i add_member(M1, 'g1', cn, testbase, sleep=False) cn = 'b%d' % i add_user(M1, cn, peoplebase, 'add on m1', sleep=False) time.sleep(2) _wait_for_sync(M1, M2, testbase, None) _wait_for_sync(M1, M2, peoplebase, None) # test group with members in multiple backends for i in range(7): be = "be_%d" % i _add_repl_backend(M1, M2, be) # add entries akllowing meberof for i in range(1, 7): be = "be_%d" % i for i in range(10): cn = 'a%d' % i add_user(M1, cn, 'ou=%s,dc=test,dc=com' % be, 'add on m1', sleep=False) # add entries not allowing memberof be = 'be_0' for i in range(10): cn = 'a%d' % i add_person(M1, cn, 'ou=%s,dc=test,dc=com' % be, 'add on m1', sleep=False) _disable_auto_oc_memberof(M1) _disable_auto_oc_memberof(M2) add_group(M1, testbase, 2) check_group_mods(M1, M2, 'g2', testbase) _enable_auto_oc_memberof(M1) add_group(M1, testbase, 3) check_group_mods(M1, M2, 'g3', testbase) _enable_auto_oc_memberof(M2) add_group(M1, testbase, 4) check_group_mods(M1, M2, 'g4', testbase) add_group(M1, testbase, 5) add_group(M1, testbase, 6) check_multi_group_mods(M1, M2, 'g5', 'g6', testbase) if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/tickets/ticket49290_test.py000066400000000000000000000047171421664411400260170ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2017 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import pytest import ldap from lib389.topologies import topology_st from lib389._constants import DEFAULT_SUFFIX, DEFAULT_BENAME from lib389.backend import Backends pytestmark = pytest.mark.tier2 def test_49290_range_unindexed_notes(topology_st): """ Ticket 49290 had a small collection of issues - the primary issue is that range requests on an attribute that is unindexed was not reporting notes=U. This asserts that: * When unindexed, the attr shows notes=U * when indexed, the attr does not """ # First, assert that modifyTimestamp does not have an index. If it does, # delete it. topology_st.standalone.config.set('nsslapd-accesslog-logbuffering', 'off') backends = Backends(topology_st.standalone) backend = backends.get(DEFAULT_BENAME) indexes = backend.get_indexes() for i in indexes.list(): i_cn = i.get_attr_val_utf8('cn') if i_cn.lower() == 'modifytimestamp': i.delete() topology_st.standalone.restart() # Now restart the server, and perform a modifyTimestamp range operation. # in access, we should see notes=U (or notes=A) results = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(modifyTimestamp>=0)', ['nsUniqueId',]) access_lines_unindexed = topology_st.standalone.ds_access_log.match('.*notes=U.*') assert len(access_lines_unindexed) == 1 # Now add the modifyTimestamp index and run db2index. This will restart # the server indexes.create(properties={ 'cn': 'modifytimestamp', 'nsSystemIndex': 'false', 'nsIndexType' : 'eq', }) topology_st.standalone.stop() assert topology_st.standalone.db2index(DEFAULT_BENAME, attrs=['modifytimestamp'] ) topology_st.standalone.start() # Now run the modifyTimestamp range query again. Assert that there is no # notes=U/A in the log results = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(modifyTimestamp>=0)', ['nsUniqueId',]) access_lines_indexed = topology_st.standalone.ds_access_log.match('.*notes=U.*') # Remove the old lines too. access_lines_final = set(access_lines_unindexed) - set(access_lines_indexed) # Make sure we have no unindexed notes in the log. assert len(access_lines_final) == 0 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/tickets/ticket49303_test.py000066400000000000000000000062461421664411400260110ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2017 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import time import logging import os import subprocess import pytest from lib389.topologies import topology_st as topo from lib389.nss_ssl import NssSsl from lib389._constants import SECUREPORT_STANDALONE1, HOST_STANDALONE1 pytestmark = pytest.mark.tier2 DEBUGGING = os.getenv("DEBUGGING", default=False) if DEBUGGING: logging.getLogger(__name__).setLevel(logging.DEBUG) else: logging.getLogger(__name__).setLevel(logging.INFO) log = logging.getLogger(__name__) def try_reneg(host, port): """ Connect to the specified host and port with openssl, and attempt to initiate a renegotiation. Returns true if successful, false if not. """ cmd = [ '/usr/bin/openssl', 's_client', '-connect', '%s:%d' % (host, port), ] try: proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE) except ValueError as e: log.info("openssl failed: %s", e) proc.kill() # This 'R' command is intercepted by openssl and triggers a renegotiation proc.communicate(b'R\n') # We rely on openssl returning 0 if no errors occured, and 1 if any did # (for example, the server rejecting renegotiation and terminating the # connection) return proc.returncode == 0 def enable_ssl(server, ldapsport): server.stop() nss_ssl = NssSsl(dbpath=server.get_cert_dir()) nss_ssl.reinit() nss_ssl.create_rsa_ca() nss_ssl.create_rsa_key_and_cert() server.start() server.config.set('nsslapd-secureport', '%s' % ldapsport) server.config.set('nsslapd-security', 'on') server.sslport = SECUREPORT_STANDALONE1 server.restart() def set_reneg(server, state): server.encryption.set('nsTLSAllowClientRenegotiation', state) time.sleep(1) server.restart() def test_ticket49303(topo): """ Test the nsTLSAllowClientRenegotiation setting. """ sslport = SECUREPORT_STANDALONE1 log.info("Ticket 49303 - Allow disabling of SSL renegotiation") # No value set, defaults to reneg allowed enable_ssl(topo.standalone, sslport) assert try_reneg(HOST_STANDALONE1, sslport) is True log.info("Renegotiation allowed by default - OK") # Turn reneg off set_reneg(topo.standalone, 'off') assert try_reneg(HOST_STANDALONE1, sslport) is False log.info("Renegotiation disallowed - OK") # Explicitly enable set_reneg(topo.standalone, 'on') assert try_reneg(HOST_STANDALONE1, sslport) is True log.info("Renegotiation explicitly allowed - OK") # Set to an invalid value, defaults to allowed set_reneg(topo.standalone, 'invalid') assert try_reneg(HOST_STANDALONE1, sslport) is True log.info("Renegotiation allowed when option is invalid - OK") log.info("Ticket 49303 - PASSED") if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/tickets/ticket49386_test.py000066400000000000000000000113741421664411400260220ustar00rootroot00000000000000import logging import pytest import os import ldap import time from lib389.utils import * from lib389.topologies import topology_st as topo from lib389._constants import * from lib389.config import Config from lib389 import Entry pytestmark = [pytest.mark.tier2, pytest.mark.skipif(ds_is_older('1.3.7'), reason="Not implemented")] USER_CN='user_' GROUP_CN='group_' DEBUGGING = os.getenv("DEBUGGING", default=False) if DEBUGGING: logging.getLogger(__name__).setLevel(logging.DEBUG) else: logging.getLogger(__name__).setLevel(logging.INFO) log = logging.getLogger(__name__) def add_user(server, no, desc='dummy', sleep=True): cn = '%s%d' % (USER_CN, no) dn = 'cn=%s,ou=people,%s' % (cn, SUFFIX) log.fatal('Adding user (%s): ' % dn) server.add_s(Entry((dn, {'objectclass': ['top', 'person', 'inetuser'], 'sn': ['_%s' % cn], 'description': [desc]}))) if sleep: time.sleep(2) def add_group(server, nr, sleep=True): cn = '%s%d' % (GROUP_CN, nr) dn = 'cn=%s,ou=groups,%s' % (cn, SUFFIX) server.add_s(Entry((dn, {'objectclass': ['top', 'groupofnames'], 'description': 'group %d' % nr}))) if sleep: time.sleep(2) def update_member(server, member_dn, group_dn, op, sleep=True): mod = [(op, 'member', ensure_bytes(member_dn))] server.modify_s(group_dn, mod) if sleep: time.sleep(2) def config_memberof(server): server.plugins.enable(name=PLUGIN_MEMBER_OF) MEMBEROF_PLUGIN_DN = ('cn=' + PLUGIN_MEMBER_OF + ',cn=plugins,cn=config') server.modify_s(MEMBEROF_PLUGIN_DN, [(ldap.MOD_REPLACE, 'memberOfAllBackends', b'on'), (ldap.MOD_REPLACE, 'memberOfAutoAddOC', b'nsMemberOf')]) def _find_memberof(server, member_dn, group_dn, find_result=True): ent = server.getEntry(member_dn, ldap.SCOPE_BASE, "(objectclass=*)", ['memberof']) found = False if ent.hasAttr('memberof'): for val in ent.getValues('memberof'): server.log.info("!!!!!!! %s: memberof->%s" % (member_dn, val)) server.log.info("!!!!!!! %s" % (val)) server.log.info("!!!!!!! %s" % (group_dn)) if val.lower() == ensure_bytes(group_dn.lower()): found = True break if find_result: assert (found) else: assert (not found) def test_ticket49386(topo): """Specify a test case purpose or name here :id: ceb1e2b7-42cb-49f9-8ddd-bc752aa4a589 :setup: Fill in set up configuration here :steps: 1. Configure memberof 2. Add users (user_1) 3. Add groups (group_1) 4. Make user_1 member of group_1 5. Check that user_1 has the memberof attribute to group_1 6. Enable plugin log to capture memberof modrdn callback notification 7. Rename group_1 in itself 8. Check that the operation was skipped by memberof :expectedresults: 1. memberof modrdn callbackk to log notfication that the update is skipped """ S1 = topo.standalone # Step 1 config_memberof(S1) S1.restart() # Step 2 for i in range(10): add_user(S1, i, desc='add on S1') # Step 3 for i in range(3): add_group(S1, i) # Step 4 member_dn = 'cn=%s%d,ou=people,%s' % (USER_CN, 1, SUFFIX) group_parent_dn = 'ou=groups,%s' % (SUFFIX) group_rdn = 'cn=%s%d' % (GROUP_CN, 1) group_dn = '%s,%s' % (group_rdn, group_parent_dn) update_member(S1, member_dn, group_dn, ldap.MOD_ADD, sleep=False) # Step 5 _find_memberof(S1, member_dn, group_dn, find_result=True) # Step 6 S1.config.loglevel(vals=[LOG_PLUGIN, LOG_DEFAULT], service='error') # Step 7 S1.rename_s(group_dn, group_rdn, newsuperior=group_parent_dn, delold=0) # Step 8 time.sleep(2) # should not be useful.. found = False for i in S1.ds_error_log.match('.*Skip modrdn operation because src/dst identical.*'): log.info('memberof log found: %s' % i) found = True assert(found) # If you need any test suite initialization, # please, write additional fixture for that (including finalizer). # Topology for suites are predefined in lib389/topologies.py. # If you need host, port or any other data about instance, # Please, use the instance object attributes for that (for example, topo.ms["supplier1"].serverid) if DEBUGGING: # Add debugging steps(if any)... pass if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/tickets/ticket49412_test.py000066400000000000000000000036431421664411400260100ustar00rootroot00000000000000import logging import pytest import os import ldap import time from lib389._constants import * from lib389.topologies import topology_m1c1 as topo from lib389._constants import * from lib389 import Entry pytestmark = pytest.mark.tier2 DEBUGGING = os.getenv("DEBUGGING", default=False) if DEBUGGING: logging.getLogger(__name__).setLevel(logging.DEBUG) else: logging.getLogger(__name__).setLevel(logging.INFO) log = logging.getLogger(__name__) CHANGELOG = 'cn=changelog5,cn=config' MAXAGE_ATTR = 'nsslapd-changelogmaxage' TRIMINTERVAL = 'nsslapd-changelogtrim-interval' def test_ticket49412(topo): """Specify a test case purpose or name here :id: 4c7681ff-0511-4256-9589-bdcad84c13e6 :setup: Fill in set up configuration here :steps: 1. Fill in test case steps here 2. And indent them like this (RST format requirement) :expectedresults: 1. Fill in the result that is expected 2. For each test step """ M1 = topo.ms["supplier1"] # wrong call with invalid value (should be str(60) # that create replace with NULL value # it should fail with UNWILLING_TO_PERFORM try: M1.modify_s(CHANGELOG, [(ldap.MOD_REPLACE, MAXAGE_ATTR, 60), (ldap.MOD_REPLACE, TRIMINTERVAL, 10)]) assert(False) except ldap.UNWILLING_TO_PERFORM: pass # If you need any test suite initialization, # please, write additional fixture for that (including finalizer). # Topology for suites are predefined in lib389/topologies.py. # If you need host, port or any other data about instance, # Please, use the instance object attributes for that (for example, topo.ms["supplier1"].serverid) if DEBUGGING: # Add debugging steps(if any)... pass if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/tickets/ticket49441_test.py000066400000000000000000000046171421664411400260140ustar00rootroot00000000000000import logging import pytest import os import ldap from lib389._constants import * from lib389.topologies import topology_st as topo from lib389.utils import * pytestmark = pytest.mark.tier2 DEBUGGING = os.getenv("DEBUGGING", default=False) if DEBUGGING: logging.getLogger(__name__).setLevel(logging.DEBUG) else: logging.getLogger(__name__).setLevel(logging.INFO) log = logging.getLogger(__name__) def test_ticket49441(topo): """Import ldif with large indexed binary attributes, the server should not crash :id: 4e5df145-cbd1-4955-8f77-6a7eaa14beba :setup: standalone topology :steps: 1. Add indexes for binary attributes 2. Perform online import 3. Verify server is still running :expectedresults: 1. Indexes are successfully added 2. Import succeeds 3. Server is still running """ log.info('Position ldif files, and add indexes...') ldif_dir = topo.standalone.get_ldif_dir() + "binary.ldif" ldif_file = (topo.standalone.getDir(__file__, DATA_DIR) + "ticket49441/binary.ldif") shutil.copyfile(ldif_file, ldif_dir) args = {INDEX_TYPE: ['eq', 'pres']} for attr in ('usercertificate', 'authorityrevocationlist', 'certificaterevocationlist', 'crosscertificatepair', 'cacertificate'): try: topo.standalone.index.create(suffix=DEFAULT_SUFFIX, be_name='userroot', attr=attr, args=args) except ldap.LDAPError as e: log.fatal("Failed to add index '{}' error: {}".format(attr, str(e))) raise e log.info('Import LDIF with large indexed binary attributes...') try: topo.standalone.tasks.importLDIF(suffix=DEFAULT_SUFFIX, input_file=ldif_dir, args={TASK_WAIT: True}) except: log.fatal('Import failed!') assert False log.info('Verify server is still running...') try: topo.standalone.search_s("", ldap.SCOPE_BASE, "objectclass=*") except ldap.LDAPError as e: log.fatal('Server is not alive: ' + str(e)) assert False log.info('Test PASSED') if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/tickets/ticket49460_test.py000066400000000000000000000065461421664411400260200ustar00rootroot00000000000000import time import ldap import logging import pytest import os import re from lib389._constants import * from lib389.config import Config from lib389 import DirSrv, Entry from lib389.topologies import topology_m3 as topo pytestmark = pytest.mark.tier2 DEBUGGING = os.getenv("DEBUGGING", default=False) if DEBUGGING: logging.getLogger(__name__).setLevel(logging.DEBUG) else: logging.getLogger(__name__).setLevel(logging.INFO) log = logging.getLogger(__name__) USER_CN="user" def add_user(server, no, desc='dummy', sleep=True): cn = '%s%d' % (USER_CN, no) dn = 'cn=%s,ou=people,%s' % (cn, SUFFIX) log.fatal('Adding user (%s): ' % dn) server.add_s(Entry((dn, {'objectclass': ['top', 'person', 'inetuser', 'userSecurityInformation'], 'sn': ['_%s' % cn], 'description': [desc]}))) time.sleep(1) def check_user(server, no, timeout=10): cn = '%s%d' % (USER_CN, no) dn = 'cn=%s,ou=people,%s' % (cn, SUFFIX) found = False cpt = 0 while cpt < timeout: try: server.getEntry(dn, ldap.SCOPE_BASE, "(objectclass=*)") found = True break except ldap.NO_SUCH_OBJECT: time.sleep(1) cpt += 1 return found def pattern_errorlog(server, log_pattern): file_obj = open(server.errlog, "r") found = None # Use a while true iteration because 'for line in file: hit a while True: line = file_obj.readline() found = log_pattern.search(line) if ((line == '') or (found)): break return found def test_ticket_49460(topo): """Specify a test case purpose or name here :id: d1aa2e8b-e6ab-4fc6-9c63-c6f622544f2d :setup: Fill in set up configuration here :steps: 1. Enable replication logging 2. Do few updates to generatat RUV update :expectedresults: 1. No report of failure when the RUV is updated """ M1 = topo.ms["supplier1"] M2 = topo.ms["supplier2"] M3 = topo.ms["supplier3"] for i in (M1, M2, M3): i.config.loglevel(vals=[256 + 4], service='access') i.config.loglevel(vals=[LOG_REPLICA, LOG_DEFAULT], service='error') add_user(M1, 11, desc="add to M1") add_user(M2, 21, desc="add to M2") add_user(M3, 31, desc="add to M3") for i in (M1, M2, M3): assert check_user(i, 11) assert check_user(i, 21) assert check_user(i, 31) time.sleep(10) #M1.tasks.cleanAllRUV(suffix=SUFFIX, replicaid='3', # force=False, args={TASK_WAIT: True}) #time.sleep(10) regex = re.compile(".*Failed to update RUV tombstone.*LDAP error - 0") assert not pattern_errorlog(M1, regex) assert not pattern_errorlog(M2, regex) assert not pattern_errorlog(M3, regex) # If you need any test suite initialization, # please, write additional fixture for that (including finalizer). # Topology for suites are predefined in lib389/topologies.py. # If you need host, port or any other data about instance, # Please, use the instance object attributes for that (for example, topo.ms["supplier1"].serverid) if DEBUGGING: # Add debugging steps(if any)... pass if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/tickets/ticket49463_test.py000066400000000000000000000204221421664411400260100ustar00rootroot00000000000000import time import ldap import logging import pytest import os import re from lib389._constants import DEFAULT_SUFFIX, SUFFIX, LOG_REPLICA, LOG_DEFAULT from lib389.config import Config from lib389 import DirSrv, Entry from lib389.topologies import topology_m4 as topo from lib389.replica import Replicas, ReplicationManager from lib389.idm.user import UserAccounts, UserAccount from lib389.tasks import * from lib389.utils import * pytestmark = pytest.mark.tier2 USER_CN = "test_user" def add_user(server, no, desc='dummy'): user = UserAccounts(server, DEFAULT_SUFFIX) users = user.create_test_user(uid=no) users.add('description', [desc]) users.add('objectclass', 'userSecurityInformation') def pattern_errorlog(server, log_pattern): for i in range(10): time.sleep(5) found = server.ds_error_log.match(log_pattern) if found == '' or found: return found break def fractional_server_to_replica(server, replica): repl = ReplicationManager(DEFAULT_SUFFIX) repl.ensure_agreement(server, replica) replica_server = Replicas(server).get(DEFAULT_SUFFIX) agmt_server = replica_server.get_agreements().list()[0] agmt_server.replace_many( ('nsDS5ReplicatedAttributeListTotal', '(objectclass=*) $ EXCLUDE telephoneNumber'), ('nsDS5ReplicatedAttributeList', '(objectclass=*) $ EXCLUDE telephoneNumber'), ('nsds5ReplicaStripAttrs', 'modifiersname modifytimestamp'), ) def count_pattern_accesslog(server, log_pattern): count = 0 server.config.set('nsslapd-accesslog-logbuffering', 'off') if server.ds_access_log.match(log_pattern): count = count + 1 return count def test_ticket_49463(topo): """Specify a test case purpose or name here :id: 2a68e8be-387d-4ac7-9452-1439e8483c13 :setup: Fill in set up configuration here :steps: 1. Enable fractional replication 2. Enable replication logging 3. Check that replication is working fine 4. Generate skipped updates to create keep alive entries 5. Remove M3 from the topology 6. issue cleanAllRuv FORCE that will run on M1 then propagated M2 and M4 7. Check that Number DEL keep alive '3' is <= 1 8. Check M1 is the originator of cleanAllRuv and M2/M4 the propagated ones 9. Check replication M1,M2 and M4 can recover 10. Remove M4 from the topology 11. Issue cleanAllRuv not force while M2 is stopped (that hangs the cleanAllRuv) 12. Check that nsds5ReplicaCleanRUV is correctly encoded on M1 (last value: 1) 13. Check that nsds5ReplicaCleanRUV encoding survives M1 restart 14. Check that nsds5ReplicaCleanRUV encoding is valid on M2 (last value: 0) 15. Check that (for M4 cleanAllRUV) M1 is Originator and M2 propagation :expectedresults: 1. No report of failure when the RUV is updated """ # Step 1 - Configure fractional (skip telephonenumber) replication M1 = topo.ms["supplier1"] M2 = topo.ms["supplier2"] M3 = topo.ms["supplier3"] M4 = topo.ms["supplier4"] repl = ReplicationManager(DEFAULT_SUFFIX) fractional_server_to_replica(M1, M2) fractional_server_to_replica(M1, M3) fractional_server_to_replica(M1, M4) fractional_server_to_replica(M2, M1) fractional_server_to_replica(M2, M3) fractional_server_to_replica(M2, M4) fractional_server_to_replica(M3, M1) fractional_server_to_replica(M3, M2) fractional_server_to_replica(M3, M4) fractional_server_to_replica(M4, M1) fractional_server_to_replica(M4, M2) fractional_server_to_replica(M4, M3) # Step 2 - enable internal op logging and replication debug for i in (M1, M2, M3, M4): i.config.loglevel(vals=[256 + 4], service='access') i.config.loglevel(vals=[LOG_REPLICA, LOG_DEFAULT], service='error') # Step 3 - Check that replication is working fine add_user(M1, 11, desc="add to M1") add_user(M2, 21, desc="add to M2") add_user(M3, 31, desc="add to M3") add_user(M4, 41, desc="add to M4") for i in (M1, M2, M3, M4): for j in (M1, M2, M3, M4): if i == j: continue repl.wait_for_replication(i, j) # Step 4 - Generate skipped updates to create keep alive entries for i in (M1, M2, M3, M4): cn = '%s_%d' % (USER_CN, 11) dn = 'uid=%s,ou=People,%s' % (cn, SUFFIX) users = UserAccount(i, dn) for j in range(110): users.set('telephoneNumber', str(j)) # Step 5 - Remove M3 from the topology M3.stop() M1.agreement.delete(suffix=SUFFIX, consumer_host=M3.host, consumer_port=M3.port) M2.agreement.delete(suffix=SUFFIX, consumer_host=M3.host, consumer_port=M3.port) M4.agreement.delete(suffix=SUFFIX, consumer_host=M3.host, consumer_port=M3.port) # Step 6 - Then issue cleanAllRuv FORCE that will run on M1, M2 and M4 M1.tasks.cleanAllRUV(suffix=SUFFIX, replicaid='3', force=True, args={TASK_WAIT: True}) # Step 7 - Count the number of received DEL of the keep alive 3 for i in (M1, M2, M4): i.restart() regex = re.compile(".*DEL dn=.cn=repl keep alive 3.*") for i in (M1, M2, M4): count = count_pattern_accesslog(M1, regex) log.debug("count on %s = %d" % (i, count)) # check that DEL is replicated once (If DEL is kept in the fix) # check that DEL is is not replicated (If DEL is finally no long done in the fix) assert ((count == 1) or (count == 0)) # Step 8 - Check that M1 is Originator of cleanAllRuv and M2, M4 propagation regex = re.compile(".*Original task deletes Keep alive entry .3.*") assert pattern_errorlog(M1, regex) regex = re.compile(".*Propagated task does not delete Keep alive entry .3.*") assert pattern_errorlog(M2, regex) assert pattern_errorlog(M4, regex) # Step 9 - Check replication M1,M2 and M4 can recover add_user(M1, 12, desc="add to M1") add_user(M2, 22, desc="add to M2") for i in (M1, M2, M4): for j in (M1, M2, M4): if i == j: continue repl.wait_for_replication(i, j) # Step 10 - Remove M4 from the topology M4.stop() M1.agreement.delete(suffix=SUFFIX, consumer_host=M4.host, consumer_port=M4.port) M2.agreement.delete(suffix=SUFFIX, consumer_host=M4.host, consumer_port=M4.port) # Step 11 - Issue cleanAllRuv not force while M2 is stopped (that hangs the cleanAllRuv) M2.stop() M1.tasks.cleanAllRUV(suffix=SUFFIX, replicaid='4', force=False, args={TASK_WAIT: False}) # Step 12 # CleanAllRuv is hanging waiting for M2 to restart # Check that nsds5ReplicaCleanRUV is correctly encoded on M1 replicas = Replicas(M1) replica = replicas.list()[0] time.sleep(0.5) replica.present('nsds5ReplicaCleanRUV') log.info("M1: nsds5ReplicaCleanRUV=%s" % replica.get_attr_val_utf8('nsds5replicacleanruv')) regex = re.compile("^4:.*:no:1$") assert regex.match(replica.get_attr_val_utf8('nsds5replicacleanruv')) # Step 13 # Check that it encoding survives restart M1.restart() assert replica.present('nsds5ReplicaCleanRUV') assert regex.match(replica.get_attr_val_utf8('nsds5replicacleanruv')) # Step 14 - Check that nsds5ReplicaCleanRUV encoding is valid on M2 M1.stop() M2.start() replicas = Replicas(M2) replica = replicas.list()[0] M1.start() time.sleep(0.5) if replica.present('nsds5ReplicaCleanRUV'): log.info("M2: nsds5ReplicaCleanRUV=%s" % replica.get_attr_val_utf8('nsds5replicacleanruv')) regex = re.compile("^4:.*:no:0$") assert regex.match(replica.get_attr_val_utf8('nsds5replicacleanruv')) # time to run cleanAllRuv for i in (M1, M2): for j in (M1, M2): if i == j: continue repl.wait_for_replication(i, j) # Step 15 - Check that M1 is Originator of cleanAllRuv and M2 propagation regex = re.compile(".*Original task deletes Keep alive entry .4.*") assert pattern_errorlog(M1, regex) regex = re.compile(".*Propagated task does not delete Keep alive entry .4.*") assert pattern_errorlog(M2, regex) if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/tickets/ticket49471_test.py000066400000000000000000000046151421664411400260150ustar00rootroot00000000000000import logging import pytest import os import time import ldap from lib389._constants import * from lib389.topologies import topology_st as topo from lib389 import Entry pytestmark = pytest.mark.tier2 DEBUGGING = os.getenv("DEBUGGING", default=False) if DEBUGGING: logging.getLogger(__name__).setLevel(logging.DEBUG) else: logging.getLogger(__name__).setLevel(logging.INFO) log = logging.getLogger(__name__) USER_CN='user_' def _user_get_dn(no): cn = '%s%d' % (USER_CN, no) dn = 'cn=%s,ou=people,%s' % (cn, SUFFIX) return (cn, dn) def add_user(server, no, desc='dummy', sleep=True): (cn, dn) = _user_get_dn(no) log.fatal('Adding user (%s): ' % dn) server.add_s(Entry((dn, {'objectclass': ['top', 'person', 'inetuser', 'userSecurityInformation'], 'cn': [cn], 'description': [desc], 'sn': [cn], 'description': ['add on that host']}))) if sleep: time.sleep(2) def test_ticket49471(topo): """Specify a test case purpose or name here :id: 457ab172-9455-4eb2-89a0-150e3de5993f :setup: Fill in set up configuration here :steps: 1. Fill in test case steps here 2. And indent them like this (RST format requirement) :expectedresults: 1. Fill in the result that is expected 2. For each test step """ # If you need any test suite initialization, # please, write additional fixture for that (including finalizer). # Topology for suites are predefined in lib389/topologies.py. # If you need host, port or any other data about instance, # Please, use the instance object attributes for that (for example, topo.ms["supplier1"].serverid) S1 = topo.standalone add_user(S1, 1) Filter = "(description:2.16.840.1.113730.3.3.2.1.1.6:=\*on\*)" ents = S1.search_s(SUFFIX, ldap.SCOPE_SUBTREE, Filter) assert len(ents) == 1 # # The following is for the test 49491 # skipped here else it crashes in ASAN #Filter = "(description:2.16.840.1.113730.3.3.2.1.1.6:=\*host)" #ents = S1.search_s(SUFFIX, ldap.SCOPE_SUBTREE, Filter) #assert len(ents) == 1 if DEBUGGING: # Add debugging steps(if any)... pass if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/tickets/ticket49540_test.py000066400000000000000000000115611421664411400260100ustar00rootroot00000000000000import logging import pytest import os import ldap import time import re from lib389._constants import * from lib389.tasks import * from lib389.topologies import topology_st as topo from lib389.idm.user import UserAccount, UserAccounts, TEST_USER_PROPERTIES from lib389 import Entry pytestmark = pytest.mark.tier2 DEBUGGING = os.getenv("DEBUGGING", default=False) if DEBUGGING: logging.getLogger(__name__).setLevel(logging.DEBUG) else: logging.getLogger(__name__).setLevel(logging.INFO) log = logging.getLogger(__name__) HOMEDIRECTORY_INDEX = 'cn=homeDirectory,cn=index,cn=userRoot,cn=ldbm database,cn=plugins,cn=config' HOMEDIRECTORY_CN = "homedirectory" MATCHINGRULE = 'nsMatchingRule' USER_CN = 'user_' def create_index_entry(topo): log.info("\n\nindex homeDirectory") try: ent = topo.getEntry(HOMEDIRECTORY_INDEX, ldap.SCOPE_BASE) except ldap.NO_SUCH_OBJECT: topo.add_s(Entry((HOMEDIRECTORY_INDEX, { 'objectclass': "top nsIndex".split(), 'cn': HOMEDIRECTORY_CN, 'nsSystemIndex': 'false', MATCHINGRULE: ['caseIgnoreIA5Match', 'caseExactIA5Match' ], 'nsIndexType': ['eq', 'sub', 'pres']}))) def provision_users(topo): test_users = [] homeValue = b'x' * (32 * 1024) # just to slow down indexing for i in range(100): CN = '%s%d' % (USER_CN, i) users = UserAccounts(topo, SUFFIX) user_props = TEST_USER_PROPERTIES.copy() user_props.update({'uid': CN, 'cn': CN, 'sn': '_%s' % CN, HOMEDIRECTORY_CN: homeValue}) testuser = users.create(properties=user_props) test_users.append(testuser) return test_users def start_start_status(server): args = {TASK_WAIT: False} indexTask = Tasks(server) indexTask.reindex(suffix=SUFFIX, attrname='homeDirectory', args=args) return indexTask def check_task_status(server, indexTask, test_entry): finish_pattern = re.compile(".*Finished indexing.*") mod = [(ldap.MOD_REPLACE, 'sn', b'foo')] for i in range(10): log.info("check_task_status =========> %d th loop" % i) try: ent = server.getEntry(indexTask.dn, ldap.SCOPE_BASE) if ent.hasAttr('nsTaskStatus'): value = str(ent.getValue('nsTaskStatus')) finish = finish_pattern.search(value) log.info("%s ---> %s" % (indexTask.dn, value)) else: finish = None log.info("%s ---> NO STATUS" % (indexTask.dn)) if not finish: # This is not yet finished try an update try: server.modify_s(test_entry, mod) # weird, may be indexing just complete ent = server.getEntry(indexTask.dn, ldap.SCOPE_BASE, ['nsTaskStatus']) assert (ent.hasAttr('nsTaskStatus') and regex.search(ent.getValue('nsTaskStatus'))) log.info("Okay, it just finished so the MOD was successful") except ldap.UNWILLING_TO_PERFORM: log.info("=========> Great it was expected in the middle of index") else: # The update should be successful server.modify_s(test_entry, mod) except ldap.NO_SUCH_OBJECT: log.info("%s: no found" % (indexTask.dn)) time.sleep(1) def test_ticket49540(topo): """Specify a test case purpose or name here :id: 1df16d5a-1b92-46b7-8435-876b87545748 :setup: Standalone Instance :steps: 1. Create homeDirectory index (especially with substring) 2. Creates 100 users with large homeDirectory value => long to index 3. Start an indexing task WITHOUT waiting for its completion 4. Monitor that until task.status = 'Finish', any update -> UNWILLING to perform :expectedresults: 1. Index configuration succeeds 2. users entry are successfully created 3. Indexing task is started 4. If the task.status does not contain 'Finished indexing', any update should return UNWILLING_TO_PERFORM When it contains 'Finished indexing', updates should be successful """ server = topo.standalone create_index_entry(server) test_users = provision_users(server) indexTask = start_start_status(server) check_task_status(server, indexTask, test_users[0].dn) # If you need any test suite initialization, # please, write additional fixture for that (including finalizer). # Topology for suites are predefined in lib389/topologies.py. # If you need host, port or any other data about instance, # Please, use the instance object attributes for that (for example, topo.ms["supplier1"].serverid) if DEBUGGING: # Add debugging steps(if any)... pass if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main(["-s", CURRENT_FILE]) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/tickets/ticket49623_2_test.py000066400000000000000000000042011421664411400262240ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2020 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import os import ldap import pytest import subprocess from lib389.tasks import * from lib389.utils import * from lib389.topologies import topology_m1 from lib389.idm.user import UserAccounts from lib389._constants import DEFAULT_SUFFIX from contextlib import contextmanager pytestmark = pytest.mark.tier1 logging.getLogger(__name__).setLevel(logging.DEBUG) log = logging.getLogger(__name__) @pytest.mark.ds49623 @pytest.mark.bz1790986 def test_modrdn_loop(topology_m1): """Test that renaming the same entry multiple times reusing the same RDN multiple times does not result in cenotaph error messages :id: 631b2be9-5c03-44c7-9853-a87c923d5b30 :customerscenario: True :setup: Single supplier instance :steps: 1. Add an entry with RDN start rdn 2. Rename the entry to rdn change 3. Rename the entry to start again 4. Rename the entry to rdn change 5. check for cenotaph error messages :expectedresults: 1. No error messages """ topo = topology_m1.ms['supplier1'] TEST_ENTRY_RDN_START = 'start' TEST_ENTRY_RDN_CHANGE = 'change' TEST_ENTRY_NAME = 'tuser' users = UserAccounts(topo, DEFAULT_SUFFIX) user_properties = { 'uid': TEST_ENTRY_RDN_START, 'cn': TEST_ENTRY_NAME, 'sn': TEST_ENTRY_NAME, 'uidNumber': '1001', 'gidNumber': '2001', 'homeDirectory': '/home/{}'.format(TEST_ENTRY_NAME) } tuser = users.create(properties=user_properties) tuser.rename('uid={}'.format(TEST_ENTRY_RDN_CHANGE), newsuperior=None, deloldrdn=True) tuser.rename('uid={}'.format(TEST_ENTRY_RDN_START), newsuperior=None, deloldrdn=True) tuser.rename('uid={}'.format(TEST_ENTRY_RDN_CHANGE), newsuperior=None, deloldrdn=True) log.info("Check the log messages for cenotaph error") error_msg = ".*urp_fixup_add_cenotaph - failed to add cenotaph, err= 68" assert not topo.ds_error_log.match(error_msg) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/tickets/ticket49658_test.py000066400000000000000000005536051421664411400260340ustar00rootroot00000000000000import logging import pytest import os import ldap import time import sys print(sys.path) from lib389 import Entry from lib389._constants import DEFAULT_SUFFIX from lib389.idm.user import UserAccounts, TEST_USER_PROPERTIES from lib389.topologies import topology_m3 as topo pytestmark = pytest.mark.tier2 DEBUGGING = os.getenv("DEBUGGING", default=False) if DEBUGGING: logging.getLogger(__name__).setLevel(logging.DEBUG) else: logging.getLogger(__name__).setLevel(logging.INFO) log = logging.getLogger(__name__) MAX_EMPLOYEENUMBER_USER = 20 MAX_STANDARD_USER = 100 MAX_USER = MAX_STANDARD_USER + MAX_EMPLOYEENUMBER_USER EMPLOYEENUMBER_RDN_START = 0 USER_UID='user_' BASE_DISTINGUISHED = 'ou=distinguished,ou=people,%s' % (DEFAULT_SUFFIX) BASE_REGULAR = 'ou=regular,ou=people,%s' % (DEFAULT_SUFFIX) def _user_get_dn(no): uid = '%s%d' % (USER_UID, no) dn = 'uid=%s,%s' % (uid, BASE_REGULAR) return (uid, dn) def add_user(server, no, init_val): (uid, dn) = _user_get_dn(no) log.fatal('Adding user (%s): ' % dn) server.add_s(Entry((dn, {'objectclass': ['top', 'person', 'organizationalPerson', 'inetOrgPerson'], 'uid': [uid], 'sn' : [uid], 'cn' : [uid], 'employeeNumber': init_val}))) return dn def _employeenumber_user_get_dn(no): employeeNumber = str(no) dn = 'employeeNumber=%s,%s' % (employeeNumber, BASE_DISTINGUISHED) return (employeeNumber, dn) def add_employeenumber_user(server, no): (uid, dn) = _employeenumber_user_get_dn(EMPLOYEENUMBER_RDN_START + no) log.fatal('Adding user (%s): ' % dn) server.add_s(Entry((dn, {'objectclass': ['top', 'person', 'organizationalPerson', 'inetOrgPerson'], 'uid': [uid], 'sn' : [uid], 'cn' : [uid], 'employeeNumber': str(EMPLOYEENUMBER_RDN_START + no)}))) return dn def save_stuff(): M1 = topo.ms["supplier1"] M2 = topo.ms["supplier2"] M3 = topo.ms["supplier3"] value_11 = '11'.encode() value_1000 = '1000'.encode() value_13 = '13'.encode() value_14 = '14'.encode() # Step 2 test_user_dn= add_user(M3, 0, value_11) log.info('Adding %s on M3' % test_user_dn) M3.modify_s(test_user_dn, [(ldap.MOD_DELETE, 'employeeNumber', value_11), (ldap.MOD_ADD, 'employeeNumber', value_1000)]) ents = M3.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') assert len(ents) == 1 # Step 3 # Check the entry is replicated on M1 for j in range(30): try: ent = M1.getEntry(test_user_dn, ldap.SCOPE_BASE,) if not ent.hasAttr('employeeNumber'): # wait for the MOD log.info('M1 waiting for employeeNumber') time.sleep(1) continue; break; except ldap.NO_SUCH_OBJECT: time.sleep(1) pass time.sleep(1) ents = M1.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') assert len(ents) == 1 # Check the entry is replicated on M2 for j in range(30): try: ent = M2.getEntry(test_user_dn, ldap.SCOPE_BASE,) if not ent.hasAttr('employeeNumber'): # wait for the MOD log.info('M2 waiting for employeeNumber') time.sleep(1) continue; break; except ldap.NO_SUCH_OBJECT: time.sleep(1) pass time.sleep(1) ents = M2.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') assert len(ents) == 1 def test_ticket49658_init(topo): """Specify a test case purpose or name here :id: f8d43cef-c385-46a2-b32b-fdde2114b45e :setup: 3 Supplier Instances :steps: 1. Create 3 suppliers 2. Create on M3 MAX_USER test entries having a single-value attribute employeeNumber=11 and update it MOD_DEL 11 + MOD_ADD 1000 3. Check they are replicated on M1 and M2 :expectedresults: 1. Fill in the result that is expected 2. For each test step """ # If you need any test suite initialization, # please, write additional fixture for that (including finalizer). # Topology for suites are predefined in lib389/topologies.py. # If you need host, port or any other data about instance, # Please, use the instance object attributes for that (for example, topo.ms["supplier1"].serverid) if DEBUGGING: # Add debugging steps(if any)... pass M1 = topo.ms["supplier1"] M2 = topo.ms["supplier2"] M3 = topo.ms["supplier3"] value_11 = '11'.encode() value_1000 = '1000'.encode() # Step 2 M3.add_s(Entry((BASE_DISTINGUISHED, {'objectclass': ['top', 'organizationalUnit'], 'ou': ['distinguished']}))) for i in range(MAX_EMPLOYEENUMBER_USER): test_user_dn= add_employeenumber_user(M3, i) log.info('Adding %s on M3' % test_user_dn) ents = M3.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') assert len(ents) == (i + 1) M3.add_s(Entry((BASE_REGULAR, {'objectclass': ['top', 'organizationalUnit'], 'ou': ['regular']}))) for i in range(MAX_STANDARD_USER): test_user_dn= add_user(M3, i, value_11) log.info('Adding %s on M3' % test_user_dn) M3.modify_s(test_user_dn, [(ldap.MOD_DELETE, 'employeeNumber', value_11), (ldap.MOD_ADD, 'employeeNumber', value_1000)]) ents = M3.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') assert len(ents) == (MAX_EMPLOYEENUMBER_USER + i + 1) # Step 3 # Check the last entry is replicated on M1 (uid, test_user_dn) = _user_get_dn(MAX_STANDARD_USER - 1) for j in range(30): try: ent = M1.getEntry(test_user_dn, ldap.SCOPE_BASE,) if not ent.hasAttr('employeeNumber'): # wait for the MOD log.info('M1 waiting for employeeNumber') time.sleep(1) continue; break; except ldap.NO_SUCH_OBJECT: time.sleep(1) pass time.sleep(1) ents = M1.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') assert len(ents) == MAX_USER # Check the last entry is replicated on M2 for j in range(30): try: ent = M2.getEntry(test_user_dn, ldap.SCOPE_BASE,) if not ent.hasAttr('employeeNumber'): # wait for the MOD log.info('M2 waiting for employeeNumber') time.sleep(1) continue; break; except ldap.NO_SUCH_OBJECT: time.sleep(1) pass time.sleep(1) ents = M2.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') assert len(ents) == MAX_USER def test_ticket49658_0(topo): """Do MOD(DEL+ADD) and replicate MOST RECENT first M1: MOD(DEL+ADD) -> V1 M2: MOD(DEL+ADD) -> V1 expected: V1 :id: 5360b304-9b33-4d37-935f-ab73e0baa1aa :setup: 3 Supplier Instances 1. using user_0 where employNumber=1000 :steps: 1. Create 3 suppliers 2. Isolate M1 and M2 by pausing the replication agreements 3. On M1 do MOD_DEL 1000 + MOD_ADD_13 4. On M2 do MOD_DEL 1000 + MOD_ADD_13 5. Enable replication agreement M2 -> M3, so that update step 6 is replicated first 6. Enable replication agreement M1 -> M3, so that update step 5 is replicated second 7. Check that the employeeNumber is 13 on all servers :expectedresults: 1. Fill in the result that is expected 2. For each test step """ # If you need any test suite initialization, # please, write additional fixture for that (including finalizer). # Topology for suites are predefined in lib389/topologies.py. # If you need host, port or any other data about instance, # Please, use the instance object attributes for that (for example, topo.ms["supplier1"].serverid) if DEBUGGING: # Add debugging steps(if any)... pass M1 = topo.ms["supplier1"] M2 = topo.ms["supplier2"] M3 = topo.ms["supplier3"] value_1000 = '1000'.encode() last = '0' value_end = last.encode() theFilter = '(employeeNumber=%s)' % last (uid, test_user_dn) = _user_get_dn(int(last)) # # Step 2 # # disable all RA from M1 and M2 # only M3 can replicate the update # agreement_m1_m2 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M2.host, consumer_port=M2.port) agreement_m1_m3 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) agreement_m2_m1 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M1.host, consumer_port=M1.port) agreement_m2_m3 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) M1.agreement.pause(agreement_m1_m2[0].dn) M1.agreement.pause(agreement_m1_m3[0].dn) M2.agreement.pause(agreement_m2_m1[0].dn) M2.agreement.pause(agreement_m2_m3[0].dn) # Step 3 # Oldest update # check that the entry on M1 contains employeeNumber= M1.modify_s(test_user_dn, [(ldap.MOD_DELETE, 'employeeNumber', value_1000), (ldap.MOD_ADD, 'employeeNumber', value_end)]) ents = M1.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, theFilter) assert len(ents) == 1 time.sleep(1) # Step 4 # More recent update # check that the entry on M2 contains employeeNumber= M2.modify_s(test_user_dn, [(ldap.MOD_DELETE, 'employeeNumber', value_1000), (ldap.MOD_ADD, 'employeeNumber', value_end)]) ents = M2.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, theFilter) assert len(ents) == 1 #time.sleep(60) # Step 7 # Renable M2 before M1 so that on M3, the most recent update is replicated before for ra in agreement_m2_m1, agreement_m2_m3: M2.agreement.resume(ra[0].dn) # Step 8 # Renable M1 so that on M3 oldest update is now replicated time.sleep(4) for ra in agreement_m1_m2, agreement_m1_m3: M1.agreement.resume(ra[0].dn) # Step 9 # Check that M1 still contains employeeNumber= ents = M1.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, theFilter) log.info('Search M1 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), value_end)) assert len(ents) == 1 assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == value_end # Check that M2 still contains employeeNumber= ents = M2.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, theFilter) log.info('Search M2 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), value_end)) assert len(ents) == 1 assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == value_end # Check that M3 still contain employeeNumber and it contains employeeNumber= ents = M3.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') assert len(ents) == MAX_STANDARD_USER ents = M3.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, theFilter) log.info('Search M3 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), value_end)) assert len(ents) == 1 assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == value_end def test_ticket49658_1(topo): """Do MOD(DEL+ADD) and replicate OLDEST first M2: MOD(DEL+ADD) -> V1 M1: MOD(DEL+ADD) -> V1 expected: V1 :id: bc6620d9-eae1-48af-8a4f-bc14405ea6b6 :setup: 3 Supplier Instances 1. using user_1 where employNumber=1000 :steps: 1. Isolate M1 and M2 by pausing the replication agreements 2. On M2 do MOD_DEL 1000 + MOD_ADD_13 3. On M1 do MOD_DEL 1000 + MOD_ADD_13 4. Enable replication agreement M2 -> M3, so that update step 2 is replicated first 5. Enable replication agreement M1 -> M3, so that update step 3 is replicated second 6. Check that the employeeNumber is 13 on all servers :expectedresults: 1. Fill in the result that is expected 2. For each test step """ # If you need any test suite initialization, # please, write additional fixture for that (including finalizer). # Topology for suites are predefined in lib389/topologies.py. # If you need host, port or any other data about instance, # Please, use the instance object attributes for that (for example, topo.ms["supplier1"].serverid) if DEBUGGING: # Add debugging steps(if any)... pass M1 = topo.ms["supplier1"] M2 = topo.ms["supplier2"] M3 = topo.ms["supplier3"] value_1000 = '1000'.encode() last = '1' value_end = last.encode() theFilter = '(employeeNumber=%s)' % last # This test takes the user_1 (uid, test_user_dn) = _user_get_dn(int(1)) # # Step 4 # # disable all RA from M1 and M2 # only M3 can replicate the update # agreement_m1_m2 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M2.host, consumer_port=M2.port) agreement_m1_m3 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) agreement_m2_m1 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M1.host, consumer_port=M1.port) agreement_m2_m3 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) M1.agreement.pause(agreement_m1_m2[0].dn) M1.agreement.pause(agreement_m1_m3[0].dn) M2.agreement.pause(agreement_m2_m1[0].dn) M2.agreement.pause(agreement_m2_m3[0].dn) # Step 5 # Oldest update # check that the entry on M2 contains employeeNumber= M2.modify_s(test_user_dn, [(ldap.MOD_DELETE, 'employeeNumber', value_1000), (ldap.MOD_ADD, 'employeeNumber', value_end)]) ents = M2.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, theFilter) assert len(ents) == 1 time.sleep(1) # Step 6 # More recent update # check that the entry on M2 contains employeeNumber= M1.modify_s(test_user_dn, [(ldap.MOD_DELETE, 'employeeNumber', value_1000), (ldap.MOD_ADD, 'employeeNumber', value_end)]) ents = M1.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, theFilter) assert len(ents) == 1 #time.sleep(60) # Step 7 # Renable M2 before M1 so that on M3, the most recent update is replicated before for ra in agreement_m2_m1, agreement_m2_m3: M2.agreement.resume(ra[0].dn) # Step 8 # Renable M1 so that on M3 oldest update is now replicated time.sleep(4) for ra in agreement_m1_m2, agreement_m1_m3: M1.agreement.resume(ra[0].dn) # Step 9 # Check that M1 still contains employeeNumber= ents = M1.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, theFilter) log.info('Search M1 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), value_end)) assert len(ents) == 1 assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == value_end # Check that M2 still contains employeeNumber= ents = M2.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, theFilter) log.info('Search M2 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), value_end)) assert len(ents) == 1 assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == value_end # Check that M3 still contain employeeNumber and it contains employeeNumber= ents = M3.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') assert len(ents) == MAX_STANDARD_USER ents = M3.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, theFilter) log.info('Search M3 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), value_end)) assert len(ents) == 1 assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == value_end def test_ticket49658_2(topo): """Do MOD(ADD+DEL) and replicate OLDEST first M2: MOD(ADD+DEL) -> V1 M1: MOD(ADD+DEL) -> V1 expected: V1 :id: 672ff689-5b76-4107-92be-fb95d08400b3 :setup: 3 Supplier Instances 1. using user_2 where employNumber=1000 :steps: 1. Isolate M1 and M2 by pausing the replication agreements 2. On M2 do MOD_DEL 1000 + MOD_ADD_13 3. On M1 do MOD_DEL 1000 + MOD_ADD_13 4. Enable replication agreement M2 -> M3, so that update step 2 is replicated first 5. Enable replication agreement M1 -> M3, so that update step 3 is replicated second 6. Check that the employeeNumber is 13 on all servers :expectedresults: 1. Fill in the result that is expected 2. For each test step """ # If you need any test suite initialization, # please, write additional fixture for that (including finalizer). # Topology for suites are predefined in lib389/topologies.py. # If you need host, port or any other data about instance, # Please, use the instance object attributes for that (for example, topo.ms["supplier1"].serverid) if DEBUGGING: # Add debugging steps(if any)... pass M1 = topo.ms["supplier1"] M2 = topo.ms["supplier2"] M3 = topo.ms["supplier3"] value_1000 = '1000'.encode() last = '2' value_end = last.encode() theFilter = '(employeeNumber=%s)' % last # This test takes the user_1 (uid, test_user_dn) = _user_get_dn(int(last)) # # Step 4 # # disable all RA from M1 and M2 # only M3 can replicate the update # agreement_m1_m2 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M2.host, consumer_port=M2.port) agreement_m1_m3 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) agreement_m2_m1 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M1.host, consumer_port=M1.port) agreement_m2_m3 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) M1.agreement.pause(agreement_m1_m2[0].dn) M1.agreement.pause(agreement_m1_m3[0].dn) M2.agreement.pause(agreement_m2_m1[0].dn) M2.agreement.pause(agreement_m2_m3[0].dn) # Step 5 # Oldest update # check that the entry on M2 contains employeeNumber= M2.modify_s(test_user_dn, [(ldap.MOD_ADD, 'employeeNumber', value_end),(ldap.MOD_DELETE, 'employeeNumber', value_1000)]) ents = M2.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, theFilter) assert len(ents) == 1 time.sleep(1) # Step 6 # More recent update # check that the entry on M2 contains employeeNumber= M1.modify_s(test_user_dn, [(ldap.MOD_ADD, 'employeeNumber', value_end), (ldap.MOD_DELETE, 'employeeNumber', value_1000)]) ents = M1.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, theFilter) assert len(ents) == 1 #time.sleep(60) # Step 7 # Renable M2 before M1 so that on M3, the most recent update is replicated before for ra in agreement_m2_m1, agreement_m2_m3: M2.agreement.resume(ra[0].dn) # Step 8 # Renable M1 so that on M3 oldest update is now replicated time.sleep(4) for ra in agreement_m1_m2, agreement_m1_m3: M1.agreement.resume(ra[0].dn) # Step 9 # Check that M1 still contains employeeNumber= ents = M1.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, theFilter) log.info('Search M1 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), value_end)) assert len(ents) == 1 assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == value_end # Check that M2 still contains employeeNumber= ents = M2.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, theFilter) log.info('Search M2 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), value_end)) assert len(ents) == 1 assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == value_end # Check that M3 still contain employeeNumber and it contains employeeNumber= ents = M3.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') assert len(ents) == MAX_STANDARD_USER ents = M3.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, theFilter) log.info('Search M3 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), value_end)) assert len(ents) == 1 assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == value_end def test_ticket49658_3(topo): """Do MOD(ADD+DEL) and replicate MOST RECENT first M1: MOD(ADD+DEL) -> V1 M2: MOD(ADD+DEL) -> V1 expected: V1 :id: b25e508a-8bf2-4351-88f6-3b6c098ccc44 :setup: 3 Supplier Instances 1. using user_2 where employNumber=1000 :steps: 1. Isolate M1 and M2 by pausing the replication agreements 2. On M1 do MOD_DEL 1000 + MOD_ADD_13 3. On M2 do MOD_DEL 1000 + MOD_ADD_13 4. Enable replication agreement M2 -> M3, so that update step 2 is replicated first 5. Enable replication agreement M1 -> M3, so that update step 3 is replicated second 6. Check that the employeeNumber is 13 on all servers :expectedresults: 1. Fill in the result that is expected 2. For each test step """ # If you need any test suite initialization, # please, write additional fixture for that (including finalizer). # Topology for suites are predefined in lib389/topologies.py. # If you need host, port or any other data about instance, # Please, use the instance object attributes for that (for example, topo.ms["supplier1"].serverid) if DEBUGGING: # Add debugging steps(if any)... pass M1 = topo.ms["supplier1"] M2 = topo.ms["supplier2"] M3 = topo.ms["supplier3"] value_1000 = '1000'.encode() last = '3' value_end = last.encode() theFilter = '(employeeNumber=%s)' % last # This test takes the user_1 (uid, test_user_dn) = _user_get_dn(int(last)) # # Step 4 # # disable all RA from M1 and M2 # only M3 can replicate the update # agreement_m1_m2 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M2.host, consumer_port=M2.port) agreement_m1_m3 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) agreement_m2_m1 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M1.host, consumer_port=M1.port) agreement_m2_m3 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) M1.agreement.pause(agreement_m1_m2[0].dn) M1.agreement.pause(agreement_m1_m3[0].dn) M2.agreement.pause(agreement_m2_m1[0].dn) M2.agreement.pause(agreement_m2_m3[0].dn) # Step 5 # Oldest update # check that the entry on M1 contains employeeNumber= M1.modify_s(test_user_dn, [(ldap.MOD_ADD, 'employeeNumber', value_end),(ldap.MOD_DELETE, 'employeeNumber', value_1000)]) ents = M1.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, theFilter) assert len(ents) == 1 time.sleep(1) # Step 6 # More recent update # check that the entry on M2 contains employeeNumber= M2.modify_s(test_user_dn, [(ldap.MOD_ADD, 'employeeNumber', value_end), (ldap.MOD_DELETE, 'employeeNumber', value_1000)]) ents = M2.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, theFilter) assert len(ents) == 1 #time.sleep(60) # Step 7 # Renable M2 before M1 so that on M3, the most recent update is replicated before for ra in agreement_m2_m1, agreement_m2_m3: M2.agreement.resume(ra[0].dn) # Step 8 # Renable M1 so that on M3 oldest update is now replicated time.sleep(4) for ra in agreement_m1_m2, agreement_m1_m3: M1.agreement.resume(ra[0].dn) # Step 9 # Check that M1 still contains employeeNumber= ents = M1.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, theFilter) log.info('Search M1 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), value_end)) assert len(ents) == 1 assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == value_end # Check that M2 still contains employeeNumber= ents = M2.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, theFilter) log.info('Search M2 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), value_end)) assert len(ents) == 1 assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == value_end # Check that M3 still contain employeeNumber and it contains employeeNumber= ents = M3.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') assert len(ents) == MAX_STANDARD_USER ents = M3.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, theFilter) log.info('Search M3 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), value_end)) assert len(ents) == 1 assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == value_end def test_ticket49658_4(topo): """Do MOD(ADD+DEL) MOD(REPL) and replicate MOST RECENT first M1: MOD(ADD+DEL) -> V1 M2: MOD(REPL) -> V1 expected: V1 :id: 8f7ce9ff-e36f-48cd-b0ed-b7077a3e7341 :setup: 3 Supplier Instances 1. using user_2 where employNumber=1000 :steps: 1. Isolate M1 and M2 by pausing the replication agreements 2. On M1 do MOD_DEL 1000 + MOD_ADD_13 3. On M2 do MOD_REPL _13 4. Enable replication agreement M2 -> M3, so that update step 2 is replicated first 5. Enable replication agreement M1 -> M3, so that update step 3 is replicated second 6. Check that the employeeNumber is 13 on all servers :expectedresults: 1. Fill in the result that is expected 2. For each test step """ # If you need any test suite initialization, # please, write additional fixture for that (including finalizer). # Topology for suites are predefined in lib389/topologies.py. # If you need host, port or any other data about instance, # Please, use the instance object attributes for that (for example, topo.ms["supplier1"].serverid) if DEBUGGING: # Add debugging steps(if any)... pass M1 = topo.ms["supplier1"] M2 = topo.ms["supplier2"] M3 = topo.ms["supplier3"] value_1000 = '1000'.encode() last = '4' value_end = last.encode() theFilter = '(employeeNumber=%s)' % last # This test takes the user_1 (uid, test_user_dn) = _user_get_dn(int(last)) # # Step 4 # # disable all RA from M1 and M2 # only M3 can replicate the update # agreement_m1_m2 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M2.host, consumer_port=M2.port) agreement_m1_m3 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) agreement_m2_m1 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M1.host, consumer_port=M1.port) agreement_m2_m3 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) M1.agreement.pause(agreement_m1_m2[0].dn) M1.agreement.pause(agreement_m1_m3[0].dn) M2.agreement.pause(agreement_m2_m1[0].dn) M2.agreement.pause(agreement_m2_m3[0].dn) # Step 5 # Oldest update # check that the entry on M1 contains employeeNumber= M1.modify_s(test_user_dn, [(ldap.MOD_ADD, 'employeeNumber', value_end),(ldap.MOD_DELETE, 'employeeNumber', value_1000)]) ents = M1.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, theFilter) assert len(ents) == 1 time.sleep(1) # Step 6 # More recent update # check that the entry on M2 contains employeeNumber= M2.modify_s(test_user_dn, [(ldap.MOD_REPLACE, 'employeeNumber', value_end)]) ents = M2.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, theFilter) assert len(ents) == 1 #time.sleep(60) # Step 7 # Renable M2 before M1 so that on M3, the most recent update is replicated before for ra in agreement_m2_m1, agreement_m2_m3: M2.agreement.resume(ra[0].dn) # Step 8 # Renable M1 so that on M3 oldest update is now replicated time.sleep(4) for ra in agreement_m1_m2, agreement_m1_m3: M1.agreement.resume(ra[0].dn) # Step 9 # Check that M1 still contains employeeNumber= ents = M1.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, theFilter) log.info('Search M1 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), value_end)) assert len(ents) == 1 assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == value_end # Check that M2 still contains employeeNumber= ents = M2.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, theFilter) log.info('Search M2 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), value_end)) assert len(ents) == 1 assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == value_end # Check that M3 still contain employeeNumber and it contains employeeNumber= ents = M3.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') assert len(ents) == MAX_STANDARD_USER ents = M3.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, theFilter) log.info('Search M3 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), value_end)) assert len(ents) == 1 assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == value_end def test_ticket49658_5(topo): """Do MOD(REPL) MOD(ADD+DEL) and replicate MOST RECENT first M1: MOD(REPL) -> V1 M2: MOD(ADD+DEL) -> V1 expected: V1 :id: d6b88e3c-a509-4d3e-8e5d-849237993f47 :setup: 3 Supplier Instances 1. using user_2 where employNumber=1000 :steps: 1. Isolate M1 and M2 by pausing the replication agreements 2. On M1 do MOD_DEL 1000 + MOD_ADD_13 3. On M2 do MOD_REPL _13 4. Enable replication agreement M2 -> M3, so that update step 2 is replicated first 5. Enable replication agreement M1 -> M3, so that update step 3 is replicated second 6. Check that the employeeNumber is 13 on all servers :expectedresults: 1. Fill in the result that is expected 2. For each test step """ # If you need any test suite initialization, # please, write additional fixture for that (including finalizer). # Topology for suites are predefined in lib389/topologies.py. # If you need host, port or any other data about instance, # Please, use the instance object attributes for that (for example, topo.ms["supplier1"].serverid) if DEBUGGING: # Add debugging steps(if any)... pass M1 = topo.ms["supplier1"] M2 = topo.ms["supplier2"] M3 = topo.ms["supplier3"] value_1000 = '1000'.encode() last = '5' value_end = last.encode() theFilter = '(employeeNumber=%s)' % last # This test takes the user_1 (uid, test_user_dn) = _user_get_dn(int(last)) # # Step 4 # # disable all RA from M1 and M2 # only M3 can replicate the update # agreement_m1_m2 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M2.host, consumer_port=M2.port) agreement_m1_m3 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) agreement_m2_m1 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M1.host, consumer_port=M1.port) agreement_m2_m3 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) M1.agreement.pause(agreement_m1_m2[0].dn) M1.agreement.pause(agreement_m1_m3[0].dn) M2.agreement.pause(agreement_m2_m1[0].dn) M2.agreement.pause(agreement_m2_m3[0].dn) # Step 5 # Oldest update # check that the entry on M1 contains employeeNumber= M1.modify_s(test_user_dn, [(ldap.MOD_REPLACE, 'employeeNumber', value_end)]) ents = M1.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, theFilter) assert len(ents) == 1 time.sleep(1) # Step 6 # More recent update # check that the entry on M2 contains employeeNumber= M2.modify_s(test_user_dn, [(ldap.MOD_ADD, 'employeeNumber', value_end),(ldap.MOD_DELETE, 'employeeNumber', value_1000)]) ents = M2.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, theFilter) assert len(ents) == 1 #time.sleep(60) # Step 7 # Renable M2 before M1 so that on M3, the most recent update is replicated before for ra in agreement_m2_m1, agreement_m2_m3: M2.agreement.resume(ra[0].dn) # Step 8 # Renable M1 so that on M3 oldest update is now replicated time.sleep(4) for ra in agreement_m1_m2, agreement_m1_m3: M1.agreement.resume(ra[0].dn) # Step 9 # Check that M1 still contains employeeNumber= ents = M1.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, theFilter) log.info('Search M1 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), value_end)) assert len(ents) == 1 assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == value_end # Check that M2 still contains employeeNumber= ents = M2.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, theFilter) log.info('Search M2 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), value_end)) assert len(ents) == 1 assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == value_end # Check that M3 still contain employeeNumber and it contains employeeNumber= ents = M3.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') assert len(ents) == MAX_STANDARD_USER ents = M3.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, theFilter) log.info('Search M3 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), value_end)) assert len(ents) == 1 assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == value_end def test_ticket49658_6(topo): """Do M1: MOD(REPL) -> V1 M2: MOD(ADD+DEL) -> V2 expected: V2 :id: 5eb67db1-2ff2-4c17-85af-e124b45aace3 :setup: 3 Supplier Instances 1. using user_2 where employNumber=1000 :steps: 1. Isolate M1 and M2 by pausing the replication agreements 2. On M1 do MOD_DEL 1000 + MOD_ADD_13 3. On M2 do MOD_REPL _13 4. Enable replication agreement M2 -> M3, so that update step 2 is replicated first 5. Enable replication agreement M1 -> M3, so that update step 3 is replicated second 6. Check that the employeeNumber is 13 on all servers :expectedresults: 1. Fill in the result that is expected 2. For each test step """ # If you need any test suite initialization, # please, write additional fixture for that (including finalizer). # Topology for suites are predefined in lib389/topologies.py. # If you need host, port or any other data about instance, # Please, use the instance object attributes for that (for example, topo.ms["supplier1"].serverid) if DEBUGGING: # Add debugging steps(if any)... pass M1 = topo.ms["supplier1"] M2 = topo.ms["supplier2"] M3 = topo.ms["supplier3"] value_1000 = '1000' last = '6' value_S1 = '6.1' value_S2 = '6.2' description = { "S1": M1, "S2": M2, "S1_MOD": [(ldap.MOD_REPLACE, 'employeeNumber', value_S1.encode())], "S2_MOD": [(ldap.MOD_ADD, 'employeeNumber', value_S2.encode()),(ldap.MOD_DELETE, 'employeeNumber', value_1000.encode())], "expected": value_S2} # This test takes the user_1 (uid, test_user_dn) = _user_get_dn(int(last)) # # Step 4 # # disable all RA from M1 and M2 # only M3 can replicate the update # agreement_m1_m2 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M2.host, consumer_port=M2.port) agreement_m1_m3 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) agreement_m2_m1 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M1.host, consumer_port=M1.port) agreement_m2_m3 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) M1.agreement.pause(agreement_m1_m2[0].dn) M1.agreement.pause(agreement_m1_m3[0].dn) M2.agreement.pause(agreement_m2_m1[0].dn) M2.agreement.pause(agreement_m2_m3[0].dn) # Step 5 # Oldest update # check that the entry on M1 contains employeeNumber= description["S1"].modify_s(test_user_dn, description["S1_MOD"]) ents = description["S1"].search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S1) assert len(ents) == 1 time.sleep(1) # Step 6 # More recent update # check that the entry on M2 contains employeeNumber= description["S2"].modify_s(test_user_dn, description["S2_MOD"]) ents = description["S2"].search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S2) assert len(ents) == 1 #time.sleep(60) # Step 7 # Renable M2 before M1 so that on M3, the most recent update is replicated before for ra in agreement_m2_m1, agreement_m2_m3: M2.agreement.resume(ra[0].dn) # Step 8 # Renable M1 so that on M3 oldest update is now replicated time.sleep(4) for ra in agreement_m1_m2, agreement_m1_m3: M1.agreement.resume(ra[0].dn) # Step 9 # Check that M1 still contains employeeNumber= ents = M1.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) log.info('Search M1 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) assert len(ents) == 1 assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() # Check that M2 still contains employeeNumber= ents = M2.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) log.info('Search M2 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) assert len(ents) == 1 assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() # Check that M3 still contain employeeNumber and it contains employeeNumber= ents = M3.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') assert len(ents) == MAX_STANDARD_USER ents = M3.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) log.info('Search M3 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) assert len(ents) == 1 assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() def test_ticket49658_7(topo): """Do M1: MOD(ADD+DEL) -> V1 M2: MOD(REPL) -> V2 expected: V2 :id: a79036ca-0e1b-453e-9524-fb44e1d7c929 :setup: 3 Supplier Instances :steps: :expectedresults: 1. Fill in the result that is expected 2. For each test step """ # If you need any test suite initialization, # please, write additional fixture for that (including finalizer). # Topology for suites are predefined in lib389/topologies.py. # If you need host, port or any other data about instance, # Please, use the instance object attributes for that (for example, topo.ms["supplier1"].serverid) if DEBUGGING: # Add debugging steps(if any)... pass M1 = topo.ms["supplier1"] M2 = topo.ms["supplier2"] M3 = topo.ms["supplier3"] value_1000 = '1000' last = '7' value_S1 = '7.1' value_S2 = '7.2' description = { "S1": M1, "S2": M2, "S1_MOD": [(ldap.MOD_ADD, 'employeeNumber', value_S1.encode()),(ldap.MOD_DELETE, 'employeeNumber', value_1000.encode())], "S2_MOD": [(ldap.MOD_REPLACE, 'employeeNumber', value_S2.encode())], "expected": value_S2} # This test takes the user_1 (uid, test_user_dn) = _user_get_dn(int(last)) # # Step 4 # # disable all RA from M1 and M2 # only M3 can replicate the update # agreement_m1_m2 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M2.host, consumer_port=M2.port) agreement_m1_m3 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) agreement_m2_m1 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M1.host, consumer_port=M1.port) agreement_m2_m3 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) M1.agreement.pause(agreement_m1_m2[0].dn) M1.agreement.pause(agreement_m1_m3[0].dn) M2.agreement.pause(agreement_m2_m1[0].dn) M2.agreement.pause(agreement_m2_m3[0].dn) # Step 5 # Oldest update # check that the entry on M1 contains employeeNumber= description["S1"].modify_s(test_user_dn, description["S1_MOD"]) ents = description["S1"].search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S1) assert len(ents) == 1 time.sleep(1) # Step 6 # More recent update # check that the entry on M2 contains employeeNumber= description["S2"].modify_s(test_user_dn, description["S2_MOD"]) ents = description["S2"].search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S2) assert len(ents) == 1 #time.sleep(60) # Step 7 # Renable M2 before M1 so that on M3, the most recent update is replicated before for ra in agreement_m2_m1, agreement_m2_m3: M2.agreement.resume(ra[0].dn) # Step 8 # Renable M1 so that on M3 oldest update is now replicated time.sleep(4) for ra in agreement_m1_m2, agreement_m1_m3: M1.agreement.resume(ra[0].dn) # Step 9 # Check that M1 still contains employeeNumber= ents = M1.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) log.info('Search M1 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) assert len(ents) == 1 assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() # Check that M2 still contains employeeNumber= ents = M2.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) log.info('Search M2 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) assert len(ents) == 1 assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() # Check that M3 still contain employeeNumber and it contains employeeNumber= ents = M3.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') assert len(ents) == MAX_STANDARD_USER ents = M3.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) log.info('Search M3 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) assert len(ents) == 1 assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() def test_ticket49658_8(topo): """Do M1: MOD(DEL+ADD) -> V1 M2: MOD(REPL) -> V2 expected: V2 :id: 06acb988-b735-424a-9886-b0557ee12a9a :setup: 3 Supplier Instances :steps: :expectedresults: 1. Fill in the result that is expected 2. For each test step """ # If you need any test suite initialization, # please, write additional fixture for that (including finalizer). # Topology for suites are predefined in lib389/topologies.py. # If you need host, port or any other data about instance, # Please, use the instance object attributes for that (for example, topo.ms["supplier1"].serverid) if DEBUGGING: # Add debugging steps(if any)... pass M1 = topo.ms["supplier1"] M2 = topo.ms["supplier2"] M3 = topo.ms["supplier3"] value_1000 = '1000' last = '8' value_S1 = '8.1' value_S2 = '8.2' description = { "S1": M1, "S2": M2, "S1_MOD": [(ldap.MOD_DELETE, 'employeeNumber', value_1000.encode()),(ldap.MOD_ADD, 'employeeNumber', value_S1.encode())], "S2_MOD": [(ldap.MOD_REPLACE, 'employeeNumber', value_S2.encode())], "expected": value_S2} # This test takes the user_1 (uid, test_user_dn) = _user_get_dn(int(last)) # # Step 4 # # disable all RA from M1 and M2 # only M3 can replicate the update # agreement_m1_m2 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M2.host, consumer_port=M2.port) agreement_m1_m3 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) agreement_m2_m1 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M1.host, consumer_port=M1.port) agreement_m2_m3 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) M1.agreement.pause(agreement_m1_m2[0].dn) M1.agreement.pause(agreement_m1_m3[0].dn) M2.agreement.pause(agreement_m2_m1[0].dn) M2.agreement.pause(agreement_m2_m3[0].dn) # Step 5 # Oldest update # check that the entry on M1 contains employeeNumber= description["S1"].modify_s(test_user_dn, description["S1_MOD"]) ents = description["S1"].search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S1) assert len(ents) == 1 time.sleep(1) # Step 6 # More recent update # check that the entry on M2 contains employeeNumber= description["S2"].modify_s(test_user_dn, description["S2_MOD"]) ents = description["S2"].search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S2) assert len(ents) == 1 #time.sleep(60) # Step 7 # Renable M2 before M1 so that on M3, the most recent update is replicated before for ra in agreement_m2_m1, agreement_m2_m3: M2.agreement.resume(ra[0].dn) # Step 8 # Renable M1 so that on M3 oldest update is now replicated time.sleep(4) for ra in agreement_m1_m2, agreement_m1_m3: M1.agreement.resume(ra[0].dn) # Step 9 # Check that M1 still contains employeeNumber= ents = M1.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) log.info('Search M1 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) assert len(ents) == 1 assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() # Check that M2 still contains employeeNumber= ents = M2.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) log.info('Search M2 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) assert len(ents) == 1 assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() # Check that M3 still contain employeeNumber and it contains employeeNumber= ents = M3.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') assert len(ents) == MAX_STANDARD_USER ents = M3.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) log.info('Search M3 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) assert len(ents) == 1 assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() def test_ticket49658_9(topo): """Do M1: MOD(REPL) -> V1 M2: MOD(DEL+ADD) -> V2 expected: V2 :id: 3a4c1be3-e3b9-44fe-aa5a-72a3b1a8985c :setup: 3 Supplier Instances :steps: :expectedresults: 1. Fill in the result that is expected 2. For each test step """ # If you need any test suite initialization, # please, write additional fixture for that (including finalizer). # Topology for suites are predefined in lib389/topologies.py. # If you need host, port or any other data about instance, # Please, use the instance object attributes for that (for example, topo.ms["supplier1"].serverid) if DEBUGGING: # Add debugging steps(if any)... pass M1 = topo.ms["supplier1"] M2 = topo.ms["supplier2"] M3 = topo.ms["supplier3"] value_1000 = '1000' last = '9' value_S1 = '9.1' value_S2 = '9.2' description = { "S1": M1, "S2": M2, "S1_MOD": [(ldap.MOD_REPLACE, 'employeeNumber', value_S1.encode())], "S2_MOD": [(ldap.MOD_DELETE, 'employeeNumber', value_1000.encode()),(ldap.MOD_ADD, 'employeeNumber', value_S2.encode())], "expected": value_S2} # This test takes the user_1 (uid, test_user_dn) = _user_get_dn(int(last)) # # Step 4 # # disable all RA from M1 and M2 # only M3 can replicate the update # agreement_m1_m2 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M2.host, consumer_port=M2.port) agreement_m1_m3 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) agreement_m2_m1 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M1.host, consumer_port=M1.port) agreement_m2_m3 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) M1.agreement.pause(agreement_m1_m2[0].dn) M1.agreement.pause(agreement_m1_m3[0].dn) M2.agreement.pause(agreement_m2_m1[0].dn) M2.agreement.pause(agreement_m2_m3[0].dn) # Step 5 # Oldest update # check that the entry on M1 contains employeeNumber= description["S1"].modify_s(test_user_dn, description["S1_MOD"]) ents = description["S1"].search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S1) assert len(ents) == 1 time.sleep(1) # Step 6 # More recent update # check that the entry on M2 contains employeeNumber= description["S2"].modify_s(test_user_dn, description["S2_MOD"]) ents = description["S2"].search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S2) assert len(ents) == 1 #time.sleep(60) # Step 7 # Renable M2 before M1 so that on M3, the most recent update is replicated before for ra in agreement_m2_m1, agreement_m2_m3: M2.agreement.resume(ra[0].dn) # Step 8 # Renable M1 so that on M3 oldest update is now replicated time.sleep(4) for ra in agreement_m1_m2, agreement_m1_m3: M1.agreement.resume(ra[0].dn) # Step 9 # Check that M1 still contains employeeNumber= ents = M1.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) log.info('Search M1 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) assert len(ents) == 1 assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() # Check that M2 still contains employeeNumber= ents = M2.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) log.info('Search M2 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) assert len(ents) == 1 assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() # Check that M3 still contain employeeNumber and it contains employeeNumber= ents = M3.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') assert len(ents) == MAX_STANDARD_USER ents = M3.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) log.info('Search M3 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) assert len(ents) == 1 assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() def test_ticket49658_10(topo): """Do M1: MOD(REPL) -> V1 M2: MOD(REPL) -> V2 expected: V2 :id: 1413341a-45e6-422a-b6cc-9fde6fc9bb15 :setup: 3 Supplier Instances :steps: :expectedresults: 1. Fill in the result that is expected 2. For each test step """ # If you need any test suite initialization, # please, write additional fixture for that (including finalizer). # Topology for suites are predefined in lib389/topologies.py. # If you need host, port or any other data about instance, # Please, use the instance object attributes for that (for example, topo.ms["supplier1"].serverid) if DEBUGGING: # Add debugging steps(if any)... pass M1 = topo.ms["supplier1"] M2 = topo.ms["supplier2"] M3 = topo.ms["supplier3"] value_1000 = '1000' last = '10' value_S1 = '10.1' value_S2 = '10.2' description = { "S1": M1, "S2": M2, "S1_MOD": [(ldap.MOD_REPLACE, 'employeeNumber', value_S1.encode())], "S2_MOD": [(ldap.MOD_REPLACE, 'employeeNumber', value_S2.encode())], "expected": value_S2} # This test takes the user_1 (uid, test_user_dn) = _user_get_dn(int(last)) # # Step 4 # # disable all RA from M1 and M2 # only M3 can replicate the update # agreement_m1_m2 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M2.host, consumer_port=M2.port) agreement_m1_m3 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) agreement_m2_m1 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M1.host, consumer_port=M1.port) agreement_m2_m3 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) M1.agreement.pause(agreement_m1_m2[0].dn) M1.agreement.pause(agreement_m1_m3[0].dn) M2.agreement.pause(agreement_m2_m1[0].dn) M2.agreement.pause(agreement_m2_m3[0].dn) # Step 5 # Oldest update # check that the entry on M1 contains employeeNumber= description["S1"].modify_s(test_user_dn, description["S1_MOD"]) ents = description["S1"].search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S1) assert len(ents) == 1 time.sleep(1) # Step 6 # More recent update # check that the entry on M2 contains employeeNumber= description["S2"].modify_s(test_user_dn, description["S2_MOD"]) ents = description["S2"].search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S2) assert len(ents) == 1 #time.sleep(60) # Step 7 # Renable M2 before M1 so that on M3, the most recent update is replicated before for ra in agreement_m2_m1, agreement_m2_m3: M2.agreement.resume(ra[0].dn) # Step 8 # Renable M1 so that on M3 oldest update is now replicated time.sleep(4) for ra in agreement_m1_m2, agreement_m1_m3: M1.agreement.resume(ra[0].dn) # Step 9 # Check that M1 still contains employeeNumber= ents = M1.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) log.info('Search M1 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) assert len(ents) == 1 assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() # Check that M2 still contains employeeNumber= ents = M2.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) log.info('Search M2 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) assert len(ents) == 1 assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() # Check that M3 still contain employeeNumber and it contains employeeNumber= ents = M3.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') assert len(ents) == MAX_STANDARD_USER ents = M3.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) log.info('Search M3 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) assert len(ents) == 1 assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() def test_ticket49658_11(topo): """Do M2: MOD(REPL) -> V2 M1: MOD(REPL) -> V1 expected: V1 :id: a2810403-418b-41d7-948c-6f8ca46e2f29 :setup: 3 Supplier Instances :steps: :expectedresults: 1. Fill in the result that is expected 2. For each test step """ # If you need any test suite initialization, # please, write additional fixture for that (including finalizer). # Topology for suites are predefined in lib389/topologies.py. # If you need host, port or any other data about instance, # Please, use the instance object attributes for that (for example, topo.ms["supplier1"].serverid) if DEBUGGING: # Add debugging steps(if any)... pass M1 = topo.ms["supplier1"] M2 = topo.ms["supplier2"] M3 = topo.ms["supplier3"] value_1000 = '1000' last = '11' value_S1 = '11.1' value_S2 = '11.2' description = { "S1": M1, "S2": M2, "S1_MOD": [(ldap.MOD_REPLACE, 'employeeNumber', value_S1.encode())], "S2_MOD": [(ldap.MOD_REPLACE, 'employeeNumber', value_S2.encode())], "expected": value_S1} # This test takes the user_1 (uid, test_user_dn) = _user_get_dn(int(last)) # # Step 4 # # disable all RA from M1 and M2 # only M3 can replicate the update # agreement_m1_m2 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M2.host, consumer_port=M2.port) agreement_m1_m3 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) agreement_m2_m1 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M1.host, consumer_port=M1.port) agreement_m2_m3 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) M1.agreement.pause(agreement_m1_m2[0].dn) M1.agreement.pause(agreement_m1_m3[0].dn) M2.agreement.pause(agreement_m2_m1[0].dn) M2.agreement.pause(agreement_m2_m3[0].dn) # Step 5 # Oldest update # check that the entry on M1 contains employeeNumber= description["S2"].modify_s(test_user_dn, description["S2_MOD"]) ents = description["S2"].search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S2) assert len(ents) == 1 time.sleep(1) # Step 6 # More recent update # check that the entry on M2 contains employeeNumber= description["S1"].modify_s(test_user_dn, description["S1_MOD"]) ents = description["S1"].search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S1) assert len(ents) == 1 #time.sleep(60) # Step 7 # Renable M2 before M1 so that on M3, the most recent update is replicated before for ra in agreement_m2_m1, agreement_m2_m3: M2.agreement.resume(ra[0].dn) # Step 8 # Renable M1 so that on M3 oldest update is now replicated time.sleep(4) for ra in agreement_m1_m2, agreement_m1_m3: M1.agreement.resume(ra[0].dn) # Step 9 # Check that M1 still contains employeeNumber= ents = M1.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) log.info('Search M1 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) assert len(ents) == 1 assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() # Check that M2 still contains employeeNumber= ents = M2.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) log.info('Search M2 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) assert len(ents) == 1 assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() # Check that M3 still contain employeeNumber and it contains employeeNumber= ents = M3.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') assert len(ents) == MAX_STANDARD_USER ents = M3.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) log.info('Search M3 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) assert len(ents) == 1 assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() def test_ticket49658_12(topo): """Do M2: MOD(ADD+DEL) -> V2 M1: MOD(REPL) -> V1 expected: V1 :id: daba6f3c-e060-4d3f-8f9c-25ea4c1bca48 :setup: 3 Supplier Instances 1. using user_2 where employNumber=1000 :steps: 1. Isolate M1 and M2 by pausing the replication agreements 2. On M1 do MOD_DEL 1000 + MOD_ADD_13 3. On M2 do MOD_REPL _13 4. Enable replication agreement M2 -> M3, so that update step 2 is replicated first 5. Enable replication agreement M1 -> M3, so that update step 3 is replicated second 6. Check that the employeeNumber is 13 on all servers :expectedresults: 1. Fill in the result that is expected 2. For each test step """ # If you need any test suite initialization, # please, write additional fixture for that (including finalizer). # Topology for suites are predefined in lib389/topologies.py. # If you need host, port or any other data about instance, # Please, use the instance object attributes for that (for example, topo.ms["supplier1"].serverid) if DEBUGGING: # Add debugging steps(if any)... pass M1 = topo.ms["supplier1"] M2 = topo.ms["supplier2"] M3 = topo.ms["supplier3"] value_1000 = '1000' last = '12' value_S1 = '12.1' value_S2 = '12.2' description = { "S1": M1, "S2": M2, "S1_MOD": [(ldap.MOD_REPLACE, 'employeeNumber', value_S1.encode())], "S2_MOD": [(ldap.MOD_ADD, 'employeeNumber', value_S2.encode()),(ldap.MOD_DELETE, 'employeeNumber', value_1000.encode())], "expected": value_S1} # This test takes the user_1 (uid, test_user_dn) = _user_get_dn(int(last)) # # Step 4 # # disable all RA from M1 and M2 # only M3 can replicate the update # agreement_m1_m2 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M2.host, consumer_port=M2.port) agreement_m1_m3 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) agreement_m2_m1 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M1.host, consumer_port=M1.port) agreement_m2_m3 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) M1.agreement.pause(agreement_m1_m2[0].dn) M1.agreement.pause(agreement_m1_m3[0].dn) M2.agreement.pause(agreement_m2_m1[0].dn) M2.agreement.pause(agreement_m2_m3[0].dn) # Step 5 # Oldest update # check that the entry on M1 contains employeeNumber= description["S2"].modify_s(test_user_dn, description["S2_MOD"]) ents = description["S2"].search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S2) assert len(ents) == 1 time.sleep(1) # Step 6 # More recent update # check that the entry on M2 contains employeeNumber= description["S1"].modify_s(test_user_dn, description["S1_MOD"]) ents = description["S1"].search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S1) assert len(ents) == 1 #time.sleep(60) # Step 7 # Renable M2 before M1 so that on M3, the most recent update is replicated before for ra in agreement_m2_m1, agreement_m2_m3: M2.agreement.resume(ra[0].dn) # Step 8 # Renable M1 so that on M3 oldest update is now replicated time.sleep(4) for ra in agreement_m1_m2, agreement_m1_m3: M1.agreement.resume(ra[0].dn) # Step 9 # Check that M1 still contains employeeNumber= ents = M1.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) log.info('Search M1 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) assert len(ents) == 1 assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() # Check that M2 still contains employeeNumber= ents = M2.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) log.info('Search M2 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) assert len(ents) == 1 assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() # Check that M3 still contain employeeNumber and it contains employeeNumber= ents = M3.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') assert len(ents) == MAX_STANDARD_USER ents = M3.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) log.info('Search M3 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) assert len(ents) == 1 assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() def test_ticket49658_13(topo): """Do M2: MOD(DEL+ADD) -> V2 M1: MOD(REPL) -> V1 expected: V1 :id: 50006b1f-d17c-47a1-86a5-4d78b2a6eab1 :setup: 3 Supplier Instances 1. using user_2 where employNumber=1000 :steps: 1. Isolate M1 and M2 by pausing the replication agreements 2. On M1 do MOD_DEL 1000 + MOD_ADD_13 3. On M2 do MOD_REPL _13 4. Enable replication agreement M2 -> M3, so that update step 2 is replicated first 5. Enable replication agreement M1 -> M3, so that update step 3 is replicated second 6. Check that the employeeNumber is 13 on all servers :expectedresults: 1. Fill in the result that is expected 2. For each test step """ # If you need any test suite initialization, # please, write additional fixture for that (including finalizer). # Topology for suites are predefined in lib389/topologies.py. # If you need host, port or any other data about instance, # Please, use the instance object attributes for that (for example, topo.ms["supplier1"].serverid) if DEBUGGING: # Add debugging steps(if any)... pass M1 = topo.ms["supplier1"] M2 = topo.ms["supplier2"] M3 = topo.ms["supplier3"] value_1000 = '1000' last = '13' value_S1 = '13.1' value_S2 = '13.2' description = { "S1": M1, "S2": M2, "S1_MOD": [(ldap.MOD_REPLACE, 'employeeNumber', value_S1.encode())], "S2_MOD": [(ldap.MOD_DELETE, 'employeeNumber', value_1000.encode()),(ldap.MOD_ADD, 'employeeNumber', value_S2.encode())], "expected": value_S1} # This test takes the user_1 (uid, test_user_dn) = _user_get_dn(int(last)) # # Step 4 # # disable all RA from M1 and M2 # only M3 can replicate the update # agreement_m1_m2 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M2.host, consumer_port=M2.port) agreement_m1_m3 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) agreement_m2_m1 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M1.host, consumer_port=M1.port) agreement_m2_m3 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) M1.agreement.pause(agreement_m1_m2[0].dn) M1.agreement.pause(agreement_m1_m3[0].dn) M2.agreement.pause(agreement_m2_m1[0].dn) M2.agreement.pause(agreement_m2_m3[0].dn) # Step 5 # Oldest update # check that the entry on M1 contains employeeNumber= description["S2"].modify_s(test_user_dn, description["S2_MOD"]) ents = description["S2"].search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S2) assert len(ents) == 1 time.sleep(1) # Step 6 # More recent update # check that the entry on M2 contains employeeNumber= description["S1"].modify_s(test_user_dn, description["S1_MOD"]) ents = description["S1"].search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S1) assert len(ents) == 1 #time.sleep(60) # Step 7 # Renable M2 before M1 so that on M3, the most recent update is replicated before for ra in agreement_m2_m1, agreement_m2_m3: M2.agreement.resume(ra[0].dn) # Step 8 # Renable M1 so that on M3 oldest update is now replicated time.sleep(4) for ra in agreement_m1_m2, agreement_m1_m3: M1.agreement.resume(ra[0].dn) # Step 9 # Check that M1 still contains employeeNumber= ents = M1.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) log.info('Search M1 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) assert len(ents) == 1 assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() # Check that M2 still contains employeeNumber= ents = M2.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) log.info('Search M2 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) assert len(ents) == 1 assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() # Check that M3 still contain employeeNumber and it contains employeeNumber= ents = M3.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') assert len(ents) == MAX_STANDARD_USER ents = M3.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) log.info('Search M3 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) assert len(ents) == 1 assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() def test_ticket49658_14(topo): """Do M2: MOD(DEL+ADD) -> V2 M1: MOD(DEL+ADD) -> V1 expected: V1 :id: d45c58f1-c95e-4314-9cdd-53a2dd391218 :setup: 3 Supplier Instances 1. using user_2 where employNumber=1000 :steps: 1. Isolate M1 and M2 by pausing the replication agreements 2. On M1 do MOD_DEL 1000 + MOD_ADD_13 3. On M2 do MOD_REPL _13 4. Enable replication agreement M2 -> M3, so that update step 2 is replicated first 5. Enable replication agreement M1 -> M3, so that update step 3 is replicated second 6. Check that the employeeNumber is 13 on all servers :expectedresults: 1. Fill in the result that is expected 2. For each test step """ # If you need any test suite initialization, # please, write additional fixture for that (including finalizer). # Topology for suites are predefined in lib389/topologies.py. # If you need host, port or any other data about instance, # Please, use the instance object attributes for that (for example, topo.ms["supplier1"].serverid) if DEBUGGING: # Add debugging steps(if any)... pass M1 = topo.ms["supplier1"] M2 = topo.ms["supplier2"] M3 = topo.ms["supplier3"] value_1000 = '1000' last = '14' value_S1 = '14.1' value_S2 = '14.2' description = { "S1": M1, "S2": M2, "S1_MOD": [(ldap.MOD_DELETE, 'employeeNumber', value_1000.encode()),(ldap.MOD_ADD, 'employeeNumber', value_S1.encode())], "S2_MOD": [(ldap.MOD_DELETE, 'employeeNumber', value_1000.encode()),(ldap.MOD_ADD, 'employeeNumber', value_S2.encode())], "expected": value_S1} # This test takes the user_1 (uid, test_user_dn) = _user_get_dn(int(last)) # # Step 4 # # disable all RA from M1 and M2 # only M3 can replicate the update # agreement_m1_m2 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M2.host, consumer_port=M2.port) agreement_m1_m3 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) agreement_m2_m1 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M1.host, consumer_port=M1.port) agreement_m2_m3 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) M1.agreement.pause(agreement_m1_m2[0].dn) M1.agreement.pause(agreement_m1_m3[0].dn) M2.agreement.pause(agreement_m2_m1[0].dn) M2.agreement.pause(agreement_m2_m3[0].dn) # Step 5 # Oldest update # check that the entry on M1 contains employeeNumber= description["S2"].modify_s(test_user_dn, description["S2_MOD"]) ents = description["S2"].search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S2) assert len(ents) == 1 time.sleep(1) # Step 6 # More recent update # check that the entry on M2 contains employeeNumber= description["S1"].modify_s(test_user_dn, description["S1_MOD"]) ents = description["S1"].search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S1) assert len(ents) == 1 #time.sleep(60) # Step 7 # Renable M2 before M1 so that on M3, the most recent update is replicated before for ra in agreement_m2_m1, agreement_m2_m3: M2.agreement.resume(ra[0].dn) # Step 8 # Renable M1 so that on M3 oldest update is now replicated time.sleep(4) for ra in agreement_m1_m2, agreement_m1_m3: M1.agreement.resume(ra[0].dn) # Step 9 # Check that M1 still contains employeeNumber= ents = M1.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) log.info('Search M1 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) assert len(ents) == 1 assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() # Check that M2 still contains employeeNumber= ents = M2.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) log.info('Search M2 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) assert len(ents) == 1 assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() # Check that M3 still contain employeeNumber and it contains employeeNumber= ents = M3.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') assert len(ents) == MAX_STANDARD_USER ents = M3.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) log.info('Search M3 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) assert len(ents) == 1 assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() def test_ticket49658_15(topo): """Do M2: MOD(ADD+DEL) -> V2 M1: MOD(DEL+ADD) -> V1 expected: V1 :id: e077f312-e0af-497a-8a31-3395873512d8 :setup: 3 Supplier Instances 1. using user_2 where employNumber=1000 :steps: 1. Isolate M1 and M2 by pausing the replication agreements 2. On M1 do MOD_DEL 1000 + MOD_ADD_13 3. On M2 do MOD_REPL _13 4. Enable replication agreement M2 -> M3, so that update step 2 is replicated first 5. Enable replication agreement M1 -> M3, so that update step 3 is replicated second 6. Check that the employeeNumber is 13 on all servers :expectedresults: 1. Fill in the result that is expected 2. For each test step """ # If you need any test suite initialization, # please, write additional fixture for that (including finalizer). # Topology for suites are predefined in lib389/topologies.py. # If you need host, port or any other data about instance, # Please, use the instance object attributes for that (for example, topo.ms["supplier1"].serverid) if DEBUGGING: # Add debugging steps(if any)... pass M1 = topo.ms["supplier1"] M2 = topo.ms["supplier2"] M3 = topo.ms["supplier3"] value_1000 = '1000' last = '15' value_S1 = '15.1' value_S2 = '15.2' description = { "S1": M1, "S2": M2, "S1_MOD": [(ldap.MOD_DELETE, 'employeeNumber', value_1000.encode()),(ldap.MOD_ADD, 'employeeNumber', value_S1.encode())], "S2_MOD": [(ldap.MOD_ADD, 'employeeNumber', value_S2.encode()),(ldap.MOD_DELETE, 'employeeNumber', value_1000.encode())], "expected": value_S1} # This test takes the user_1 (uid, test_user_dn) = _user_get_dn(int(last)) # # Step 4 # # disable all RA from M1 and M2 # only M3 can replicate the update # agreement_m1_m2 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M2.host, consumer_port=M2.port) agreement_m1_m3 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) agreement_m2_m1 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M1.host, consumer_port=M1.port) agreement_m2_m3 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) M1.agreement.pause(agreement_m1_m2[0].dn) M1.agreement.pause(agreement_m1_m3[0].dn) M2.agreement.pause(agreement_m2_m1[0].dn) M2.agreement.pause(agreement_m2_m3[0].dn) # Step 5 # Oldest update # check that the entry on M1 contains employeeNumber= description["S2"].modify_s(test_user_dn, description["S2_MOD"]) ents = description["S2"].search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S2) assert len(ents) == 1 time.sleep(1) # Step 6 # More recent update # check that the entry on M2 contains employeeNumber= description["S1"].modify_s(test_user_dn, description["S1_MOD"]) ents = description["S1"].search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S1) assert len(ents) == 1 #time.sleep(60) # Step 7 # Renable M2 before M1 so that on M3, the most recent update is replicated before for ra in agreement_m2_m1, agreement_m2_m3: M2.agreement.resume(ra[0].dn) # Step 8 # Renable M1 so that on M3 oldest update is now replicated time.sleep(4) for ra in agreement_m1_m2, agreement_m1_m3: M1.agreement.resume(ra[0].dn) # Step 9 # Check that M1 still contains employeeNumber= ents = M1.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) log.info('Search M1 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) assert len(ents) == 1 assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() # Check that M2 still contains employeeNumber= ents = M2.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) log.info('Search M2 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) assert len(ents) == 1 assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() # Check that M3 still contain employeeNumber and it contains employeeNumber= ents = M3.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') assert len(ents) == MAX_STANDARD_USER ents = M3.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) log.info('Search M3 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) assert len(ents) == 1 assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() def _resume_ra_M1_then_M2(M1, M2, M3): agreement_m1_m2 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M2.host, consumer_port=M2.port) agreement_m1_m3 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) agreement_m2_m1 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M1.host, consumer_port=M1.port) agreement_m2_m3 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) for ra in agreement_m1_m2, agreement_m1_m3: M1.agreement.resume(ra[0].dn) time.sleep(4) for ra in agreement_m2_m1, agreement_m2_m3: M2.agreement.resume(ra[0].dn) time.sleep(4) def _resume_ra_M2_then_M1(M1, M2, M3): agreement_m1_m2 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M2.host, consumer_port=M2.port) agreement_m1_m3 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) agreement_m2_m1 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M1.host, consumer_port=M1.port) agreement_m2_m3 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) for ra in agreement_m2_m1, agreement_m2_m3: M2.agreement.resume(ra[0].dn) time.sleep(4) for ra in agreement_m1_m2, agreement_m1_m3: M1.agreement.resume(ra[0].dn) time.sleep(4) def test_ticket49658_16(topo): """Do M1: MODRDN -> V1 M2: MODRDN -> V1 expected: V1 resume order: M2, M1 :id: 131b4e4c-0a6d-45df-88aa-cb26a1cd6fa6 :setup: 3 Supplier Instances 1. Use employeenumber=1000,ou=distinguished,ou=people, :steps: 1. Isolate M1 and M2 by pausing the replication agreements 2. On M1 do DEL+ADD 1000 + MOD_ADD_13 3. On M2 do DEL+ADD 1000 + MOD_ADD_13 4. Enable replication agreement M2 -> M3, so that update step 2 is replicated first 5. Enable replication agreement M1 -> M3, so that update step 3 is replicated second 6. Check that the employeeNumber is 13 on all servers :expectedresults: 1. Fill in the result that is expected 2. For each test step """ # If you need any test suite initialization, # please, write additional fixture for that (including finalizer). # Topology for suites are predefined in lib389/topologies.py. # If you need host, port or any other data about instance, # Please, use the instance object attributes for that (for example, topo.ms["supplier1"].serverid) if DEBUGGING: # Add debugging steps(if any)... pass M1 = topo.ms["supplier1"] M2 = topo.ms["supplier2"] M3 = topo.ms["supplier3"] value_init = '1' last = '1' value_S1 = '1.1' value_S2 = value_S1 description = { "S1": M1, "S2": M2, "S1_MODRDN": value_S1, "S2_MODRDN": value_S2, "expected": value_S1} # This test takes the user_1 (uid, test_user_dn) = _employeenumber_user_get_dn(int(last)) # # Step 4 # # disable all RA from M1 and M2 # only M3 can replicate the update # agreement_m1_m2 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M2.host, consumer_port=M2.port) agreement_m1_m3 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) agreement_m2_m1 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M1.host, consumer_port=M1.port) agreement_m2_m3 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) M1.agreement.pause(agreement_m1_m2[0].dn) M1.agreement.pause(agreement_m1_m3[0].dn) M2.agreement.pause(agreement_m2_m1[0].dn) M2.agreement.pause(agreement_m2_m3[0].dn) # Step 5 # Oldest update # check that the entry on M1 contains employeeNumber= description["S1"].rename_s(test_user_dn, 'employeeNumber=%s' % description["S1_MODRDN"], newsuperior=BASE_DISTINGUISHED, delold=1) ents = description["S1"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S1_MODRDN"]) assert len(ents) == 1 time.sleep(1) # Step 6 # More recent update # check that the entry on M2 contains employeeNumber= description["S2"].rename_s(test_user_dn, 'employeeNumber=%s' % description["S2_MODRDN"], newsuperior=BASE_DISTINGUISHED, delold=1) ents = description["S2"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S2_MODRDN"]) assert len(ents) == 1 _resume_ra_M2_then_M1(M1, M2, M3) # Step 9 # Check that M1 still contains employeeNumber= ents = M1.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) log.info('Search M1 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) assert len(ents) == 1 assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() # Check that M2 still contains employeeNumber= ents = M2.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) log.info('Search M2 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) assert len(ents) == 1 assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() # Check that M3 still contain employeeNumber and it contains employeeNumber= ents = M3.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') assert len(ents) == MAX_EMPLOYEENUMBER_USER ents = M3.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) log.info('Search M3 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) assert len(ents) == 1 assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() def test_ticket49658_17(topo): """Do M1: MODRDN -> V1 M2: MODRDN -> V2 expected: V2 resume order: M2 then M1 :id: 1d3423ec-a2f3-4c03-9765-ec0924f03cb2 :setup: 3 Supplier Instances 1. Use employeenumber=1000,ou=distinguished,ou=people, :steps: 1. Isolate M1 and M2 by pausing the replication agreements 2. On M1 do DEL+ADD 1000 + MOD_ADD_13 3. On M2 do DEL+ADD 1000 + MOD_ADD_13 4. Enable replication agreement M2 -> M3, so that update step 2 is replicated first 5. Enable replication agreement M1 -> M3, so that update step 3 is replicated second 6. Check that the employeeNumber is 13 on all servers :expectedresults: 1. Fill in the result that is expected 2. For each test step """ # If you need any test suite initialization, # please, write additional fixture for that (including finalizer). # Topology for suites are predefined in lib389/topologies.py. # If you need host, port or any other data about instance, # Please, use the instance object attributes for that (for example, topo.ms["supplier1"].serverid) if DEBUGGING: # Add debugging steps(if any)... pass M1 = topo.ms["supplier1"] M2 = topo.ms["supplier2"] M3 = topo.ms["supplier3"] value_init = '2' last = '2' value_S1 = '2.1' value_S2 = '2.2' description = { "S1": M1, "S2": M2, "S1_MODRDN": value_S1, "S2_MODRDN": value_S2, "expected": value_S2} # This test takes the user_1 (uid, test_user_dn) = _employeenumber_user_get_dn(int(last)) # # Step 4 # # disable all RA from M1 and M2 # only M3 can replicate the update # agreement_m1_m2 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M2.host, consumer_port=M2.port) agreement_m1_m3 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) agreement_m2_m1 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M1.host, consumer_port=M1.port) agreement_m2_m3 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) M1.agreement.pause(agreement_m1_m2[0].dn) M1.agreement.pause(agreement_m1_m3[0].dn) M2.agreement.pause(agreement_m2_m1[0].dn) M2.agreement.pause(agreement_m2_m3[0].dn) # Step 5 # Oldest update # check that the entry on M1 contains employeeNumber= description["S1"].rename_s(test_user_dn, 'employeeNumber=%s' % description["S1_MODRDN"], newsuperior=BASE_DISTINGUISHED, delold=1) ents = description["S1"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S1_MODRDN"]) assert len(ents) == 1 time.sleep(1) # Step 6 # More recent update # check that the entry on M2 contains employeeNumber= description["S2"].rename_s(test_user_dn, 'employeeNumber=%s' % description["S2_MODRDN"], newsuperior=BASE_DISTINGUISHED, delold=1) ents = description["S2"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S2_MODRDN"]) assert len(ents) == 1 _resume_ra_M2_then_M1(M1, M2, M3) # Step 9 # Check that M1 still contains employeeNumber= ents = M1.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) log.info('Search M1 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) assert len(ents) == 1 assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() # Check that M2 still contains employeeNumber= ents = M2.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) log.info('Search M2 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) assert len(ents) == 1 assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() # Check that M3 still contain employeeNumber and it contains employeeNumber= ents = M3.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') assert len(ents) == MAX_EMPLOYEENUMBER_USER ents = M3.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) log.info('Search M3 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) assert len(ents) == 1 assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() def test_ticket49658_18(topo): """Do M1: MODRDN -> V1 M2: MODRDN -> V2 expected: V2 resume order: M1 then M2 :id: c50ea634-ba35-4943-833b-0524a446214f :setup: 3 Supplier Instances 1. Use employeenumber=1000,ou=distinguished,ou=people, :steps: 1. Isolate M1 and M2 by pausing the replication agreements 2. On M1 do DEL+ADD 1000 + MOD_ADD_13 3. On M2 do DEL+ADD 1000 + MOD_ADD_13 4. Enable replication agreement M2 -> M3, so that update step 2 is replicated first 5. Enable replication agreement M1 -> M3, so that update step 3 is replicated second 6. Check that the employeeNumber is 13 on all servers :expectedresults: 1. Fill in the result that is expected 2. For each test step """ # If you need any test suite initialization, # please, write additional fixture for that (including finalizer). # Topology for suites are predefined in lib389/topologies.py. # If you need host, port or any other data about instance, # Please, use the instance object attributes for that (for example, topo.ms["supplier1"].serverid) if DEBUGGING: # Add debugging steps(if any)... pass M1 = topo.ms["supplier1"] M2 = topo.ms["supplier2"] M3 = topo.ms["supplier3"] value_init = '2' last = '3' value_S1 = '3.1' value_S2 = '3.2' description = { "S1": M1, "S2": M2, "S1_MODRDN": value_S1, "S2_MODRDN": value_S2, "expected": value_S2} # This test takes the user_1 (uid, test_user_dn) = _employeenumber_user_get_dn(int(last)) # # Step 4 # # disable all RA from M1 and M2 # only M3 can replicate the update # agreement_m1_m2 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M2.host, consumer_port=M2.port) agreement_m1_m3 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) agreement_m2_m1 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M1.host, consumer_port=M1.port) agreement_m2_m3 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) M1.agreement.pause(agreement_m1_m2[0].dn) M1.agreement.pause(agreement_m1_m3[0].dn) M2.agreement.pause(agreement_m2_m1[0].dn) M2.agreement.pause(agreement_m2_m3[0].dn) # Step 5 # Oldest update # check that the entry on M1 contains employeeNumber= description["S1"].rename_s(test_user_dn, 'employeeNumber=%s' % description["S1_MODRDN"], newsuperior=BASE_DISTINGUISHED, delold=1) ents = description["S1"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S1_MODRDN"]) assert len(ents) == 1 time.sleep(1) # Step 6 # More recent update # check that the entry on M2 contains employeeNumber= description["S2"].rename_s(test_user_dn, 'employeeNumber=%s' % description["S2_MODRDN"], newsuperior=BASE_DISTINGUISHED, delold=1) ents = description["S2"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S2_MODRDN"]) assert len(ents) == 1 _resume_ra_M1_then_M2(M1, M2, M3) # Step 9 # Check that M1 still contains employeeNumber= ents = M1.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) log.info('Search M1 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) assert len(ents) == 1 assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() # Check that M2 still contains employeeNumber= ents = M2.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) log.info('Search M2 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) assert len(ents) == 1 assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() # Check that M3 still contain employeeNumber and it contains employeeNumber= ents = M3.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') assert len(ents) == MAX_EMPLOYEENUMBER_USER ents = M3.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) log.info('Search M3 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) assert len(ents) == 1 assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() def test_ticket49658_19(topo): """Do M1: MODRDN -> V1 M2: MODRDN -> V2 M1: MOD(REPL) -> V1 Replicate order: M2 then M1 expected: V1 :id: 787db943-fc95-4fbb-b066-5e8895cfd296 :setup: 3 Supplier Instances 1. Use employeenumber=1000,ou=distinguished,ou=people, :steps: 1. Isolate M1 and M2 by pausing the replication agreements 2. On M1 do DEL+ADD 1000 + MOD_ADD_13 3. On M2 do DEL+ADD 1000 + MOD_ADD_13 4. Enable replication agreement M2 -> M3, so that update step 2 is replicated first 5. Enable replication agreement M1 -> M3, so that update step 3 is replicated second 6. Check that the employeeNumber is 13 on all servers :expectedresults: 1. Fill in the result that is expected 2. For each test step """ # If you need any test suite initialization, # please, write additional fixture for that (including finalizer). # Topology for suites are predefined in lib389/topologies.py. # If you need host, port or any other data about instance, # Please, use the instance object attributes for that (for example, topo.ms["supplier1"].serverid) if DEBUGGING: # Add debugging steps(if any)... pass M1 = topo.ms["supplier1"] M2 = topo.ms["supplier2"] M3 = topo.ms["supplier3"] value_init = '3' last = '4' value_S1 = '4.1' value_S2 = '4.2' description = { "S1": M1, "S2": M2, "S1_MODRDN": value_S1, "S1_MOD": [(ldap.MOD_REPLACE, 'employeeNumber', value_S1.encode())], "S2_MODRDN": value_S2, "expected": value_S1} # This test takes the user_1 (uid, test_user_dn) = _employeenumber_user_get_dn(int(last)) # # Step 4 # # disable all RA from M1 and M2 # only M3 can replicate the update # agreement_m1_m2 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M2.host, consumer_port=M2.port) agreement_m1_m3 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) agreement_m2_m1 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M1.host, consumer_port=M1.port) agreement_m2_m3 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) M1.agreement.pause(agreement_m1_m2[0].dn) M1.agreement.pause(agreement_m1_m3[0].dn) M2.agreement.pause(agreement_m2_m1[0].dn) M2.agreement.pause(agreement_m2_m3[0].dn) # Step 5 # Oldest update # check that the entry on M1 contains employeeNumber= description["S1"].rename_s(test_user_dn, 'employeeNumber=%s' % description["S1_MODRDN"], newsuperior=BASE_DISTINGUISHED, delold=1) ents = description["S1"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S1_MODRDN"]) assert len(ents) == 1 time.sleep(1) # Step 6 # More recent update # check that the entry on M2 contains employeeNumber= description["S2"].rename_s(test_user_dn, 'employeeNumber=%s' % description["S2_MODRDN"], newsuperior=BASE_DISTINGUISHED, delold=1) ents = description["S2"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S2_MODRDN"]) assert len(ents) == 1 time.sleep(1) (no, new_test_user_dn) = _employeenumber_user_get_dn(description["S1_MODRDN"]) description["S1"].modify_s(new_test_user_dn, description["S1_MOD"]) ents = description["S1"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S1) assert len(ents) == 1 _resume_ra_M2_then_M1(M1, M2, M3) #time.sleep(3600) # Step 9 # Check that M1 still contains employeeNumber= ents = M1.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) log.info('Search M1 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) assert len(ents) == 1 assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() # Check that M2 still contains employeeNumber= ents = M2.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) log.info('Search M2 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) assert len(ents) == 1 assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() # Check that M3 still contain employeeNumber and it contains employeeNumber= ents = M3.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') assert len(ents) == MAX_EMPLOYEENUMBER_USER ents = M3.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) log.info('Search M3 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) assert len(ents) == 1 assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() def test_ticket49658_20(topo): """Do M1: MODRDN -> V1 M2: MODRDN -> V2 M1: MOD(REPL) -> V1 Replicate order: M1 then M2 expected: V1 :id: a3df2f72-b8b1-4bb8-b0ca-ebd306539c8b :setup: 3 Supplier Instances 1. Use employeenumber=1000,ou=distinguished,ou=people, :steps: 1. Isolate M1 and M2 by pausing the replication agreements 2. On M1 do DEL+ADD 1000 + MOD_ADD_13 3. On M2 do DEL+ADD 1000 + MOD_ADD_13 4. Enable replication agreement M2 -> M3, so that update step 2 is replicated first 5. Enable replication agreement M1 -> M3, so that update step 3 is replicated second 6. Check that the employeeNumber is 13 on all servers :expectedresults: 1. Fill in the result that is expected 2. For each test step """ # If you need any test suite initialization, # please, write additional fixture for that (including finalizer). # Topology for suites are predefined in lib389/topologies.py. # If you need host, port or any other data about instance, # Please, use the instance object attributes for that (for example, topo.ms["supplier1"].serverid) if DEBUGGING: # Add debugging steps(if any)... pass M1 = topo.ms["supplier1"] M2 = topo.ms["supplier2"] M3 = topo.ms["supplier3"] value_init = '3' last = '5' value_S1 = '5.1' value_S2 = '5.2' description = { "S1": M1, "S2": M2, "S1_MODRDN": value_S1, "S1_MOD": [(ldap.MOD_REPLACE, 'employeeNumber', value_S1.encode())], "S2_MODRDN": value_S2, "expected": value_S1} # This test takes the user_1 (uid, test_user_dn) = _employeenumber_user_get_dn(int(last)) # # Step 4 # # disable all RA from M1 and M2 # only M3 can replicate the update # agreement_m1_m2 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M2.host, consumer_port=M2.port) agreement_m1_m3 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) agreement_m2_m1 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M1.host, consumer_port=M1.port) agreement_m2_m3 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) M1.agreement.pause(agreement_m1_m2[0].dn) M1.agreement.pause(agreement_m1_m3[0].dn) M2.agreement.pause(agreement_m2_m1[0].dn) M2.agreement.pause(agreement_m2_m3[0].dn) # Step 5 # Oldest update # check that the entry on M1 contains employeeNumber= description["S1"].rename_s(test_user_dn, 'employeeNumber=%s' % description["S1_MODRDN"], newsuperior=BASE_DISTINGUISHED, delold=1) ents = description["S1"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S1_MODRDN"]) assert len(ents) == 1 time.sleep(1) # Step 6 # More recent update # check that the entry on M2 contains employeeNumber= description["S2"].rename_s(test_user_dn, 'employeeNumber=%s' % description["S2_MODRDN"], newsuperior=BASE_DISTINGUISHED, delold=1) ents = description["S2"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S2_MODRDN"]) assert len(ents) == 1 time.sleep(1) (no, new_test_user_dn) = _employeenumber_user_get_dn(description["S1_MODRDN"]) description["S1"].modify_s(new_test_user_dn, description["S1_MOD"]) ents = description["S1"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S1) assert len(ents) == 1 _resume_ra_M1_then_M2(M1, M2, M3) #time.sleep(3600) # Step 9 # Check that M1 still contains employeeNumber= ents = M1.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) log.info('Search M1 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) assert len(ents) == 1 assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() # Check that M2 still contains employeeNumber= ents = M2.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) log.info('Search M2 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) assert len(ents) == 1 assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() # Check that M3 still contain employeeNumber and it contains employeeNumber= ents = M3.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') assert len(ents) == MAX_EMPLOYEENUMBER_USER ents = M3.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) log.info('Search M3 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) assert len(ents) == 1 assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() def test_ticket49658_21(topo): """Do M1: MODRDN -> V1 M2: MODRDN -> V2 M1: MOD(DEL/ADD) -> V1 Replicate order: M2 then M1 expected: V1 :id: f338188c-6877-4a2e-bbb1-14b81ac7668a :setup: 3 Supplier Instances 1. Use employeenumber=1000,ou=distinguished,ou=people, :steps: 1. Isolate M1 and M2 by pausing the replication agreements 2. On M1 do DEL+ADD 1000 + MOD_ADD_13 3. On M2 do DEL+ADD 1000 + MOD_ADD_13 4. Enable replication agreement M2 -> M3, so that update step 2 is replicated first 5. Enable replication agreement M1 -> M3, so that update step 3 is replicated second 6. Check that the employeeNumber is 13 on all servers :expectedresults: 1. Fill in the result that is expected 2. For each test step """ # If you need any test suite initialization, # please, write additional fixture for that (including finalizer). # Topology for suites are predefined in lib389/topologies.py. # If you need host, port or any other data about instance, # Please, use the instance object attributes for that (for example, topo.ms["supplier1"].serverid) if DEBUGGING: # Add debugging steps(if any)... pass M1 = topo.ms["supplier1"] M2 = topo.ms["supplier2"] M3 = topo.ms["supplier3"] value_init = '3' last = '6' value_S1 = '6.1' value_S2 = '6.2' description = { "S1": M1, "S2": M2, "S1_MODRDN": value_S1, "S1_MOD": [(ldap.MOD_DELETE, 'employeeNumber', value_S1.encode()),(ldap.MOD_ADD, 'employeeNumber', value_S1.encode())], "S2_MODRDN": value_S2, "expected": value_S1} # This test takes the user_1 (uid, test_user_dn) = _employeenumber_user_get_dn(int(last)) # # Step 4 # # disable all RA from M1 and M2 # only M3 can replicate the update # agreement_m1_m2 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M2.host, consumer_port=M2.port) agreement_m1_m3 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) agreement_m2_m1 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M1.host, consumer_port=M1.port) agreement_m2_m3 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) M1.agreement.pause(agreement_m1_m2[0].dn) M1.agreement.pause(agreement_m1_m3[0].dn) M2.agreement.pause(agreement_m2_m1[0].dn) M2.agreement.pause(agreement_m2_m3[0].dn) # Step 5 # Oldest update # check that the entry on M1 contains employeeNumber= description["S1"].rename_s(test_user_dn, 'employeeNumber=%s' % description["S1_MODRDN"], newsuperior=BASE_DISTINGUISHED, delold=1) ents = description["S1"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S1_MODRDN"]) assert len(ents) == 1 time.sleep(1) # Step 6 # More recent update # check that the entry on M2 contains employeeNumber= description["S2"].rename_s(test_user_dn, 'employeeNumber=%s' % description["S2_MODRDN"], newsuperior=BASE_DISTINGUISHED, delold=1) ents = description["S2"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S2_MODRDN"]) assert len(ents) == 1 time.sleep(1) (no, new_test_user_dn) = _employeenumber_user_get_dn(description["S1_MODRDN"]) description["S1"].modify_s(new_test_user_dn, description["S1_MOD"]) ents = description["S1"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S1) assert len(ents) == 1 _resume_ra_M2_then_M1(M1, M2, M3) #time.sleep(3600) # Step 9 # Check that M1 still contains employeeNumber= ents = M1.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) log.info('Search M1 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) assert len(ents) == 1 assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() # Check that M2 still contains employeeNumber= ents = M2.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) log.info('Search M2 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) assert len(ents) == 1 assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() # Check that M3 still contain employeeNumber and it contains employeeNumber= ents = M3.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') assert len(ents) == MAX_EMPLOYEENUMBER_USER ents = M3.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) log.info('Search M3 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) assert len(ents) == 1 assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() def test_ticket49658_22(topo): """Do M1: MODRDN -> V1 M2: MODRDN -> V2 M1: MOD(DEL/ADD) -> V1 Replicate: M1 then M2 expected: V1 :id: f3b33f52-d5c7-4b49-89cf-3cbe4b060674 :setup: 3 Supplier Instances 1. Use employeenumber=1000,ou=distinguished,ou=people, :steps: 1. Isolate M1 and M2 by pausing the replication agreements 2. On M1 do DEL+ADD 1000 + MOD_ADD_13 3. On M2 do DEL+ADD 1000 + MOD_ADD_13 4. Enable replication agreement M2 -> M3, so that update step 2 is replicated first 5. Enable replication agreement M1 -> M3, so that update step 3 is replicated second 6. Check that the employeeNumber is 13 on all servers :expectedresults: 1. Fill in the result that is expected 2. For each test step """ # If you need any test suite initialization, # please, write additional fixture for that (including finalizer). # Topology for suites are predefined in lib389/topologies.py. # If you need host, port or any other data about instance, # Please, use the instance object attributes for that (for example, topo.ms["supplier1"].serverid) if DEBUGGING: # Add debugging steps(if any)... pass M1 = topo.ms["supplier1"] M2 = topo.ms["supplier2"] M3 = topo.ms["supplier3"] value_init = '3' last = '7' value_S1 = '7.1' value_S2 = '7.2' description = { "S1": M1, "S2": M2, "S1_MODRDN": value_S1, "S1_MOD": [(ldap.MOD_DELETE, 'employeeNumber', value_S1.encode()),(ldap.MOD_ADD, 'employeeNumber', value_S1.encode())], "S2_MODRDN": value_S2, "expected": value_S1} # This test takes the user_1 (uid, test_user_dn) = _employeenumber_user_get_dn(int(last)) # # Step 4 # # disable all RA from M1 and M2 # only M3 can replicate the update # agreement_m1_m2 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M2.host, consumer_port=M2.port) agreement_m1_m3 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) agreement_m2_m1 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M1.host, consumer_port=M1.port) agreement_m2_m3 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) M1.agreement.pause(agreement_m1_m2[0].dn) M1.agreement.pause(agreement_m1_m3[0].dn) M2.agreement.pause(agreement_m2_m1[0].dn) M2.agreement.pause(agreement_m2_m3[0].dn) # Step 5 # Oldest update # check that the entry on M1 contains employeeNumber= description["S1"].rename_s(test_user_dn, 'employeeNumber=%s' % description["S1_MODRDN"], newsuperior=BASE_DISTINGUISHED, delold=1) ents = description["S1"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S1_MODRDN"]) assert len(ents) == 1 time.sleep(1) # Step 6 # More recent update # check that the entry on M2 contains employeeNumber= description["S2"].rename_s(test_user_dn, 'employeeNumber=%s' % description["S2_MODRDN"], newsuperior=BASE_DISTINGUISHED, delold=1) ents = description["S2"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S2_MODRDN"]) assert len(ents) == 1 time.sleep(1) (no, new_test_user_dn) = _employeenumber_user_get_dn(description["S1_MODRDN"]) description["S1"].modify_s(new_test_user_dn, description["S1_MOD"]) ents = description["S1"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S1) assert len(ents) == 1 _resume_ra_M1_then_M2(M1, M2, M3) #time.sleep(3600) # Step 9 # Check that M1 still contains employeeNumber= ents = M1.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) log.info('Search M1 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) assert len(ents) == 1 assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() # Check that M2 still contains employeeNumber= ents = M2.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) log.info('Search M2 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) assert len(ents) == 1 assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() # Check that M3 still contain employeeNumber and it contains employeeNumber= ents = M3.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') assert len(ents) == MAX_EMPLOYEENUMBER_USER ents = M3.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) log.info('Search M3 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) assert len(ents) == 1 assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() def test_ticket49658_23(topo): """Do M1: MODRDN -> V1 M2: MODRDN -> V2 M1: MOD(REPL) -> V1 M2: MOD(REPL) -> V2 Replicate order: M2 then M1 expected: V2 :id: 2c550174-33a0-4666-8abf-f3362e19ae29 :setup: 3 Supplier Instances 1. Use employeenumber=1000,ou=distinguished,ou=people, :steps: 1. Isolate M1 and M2 by pausing the replication agreements 2. On M1 do DEL+ADD 1000 + MOD_ADD_13 3. On M2 do DEL+ADD 1000 + MOD_ADD_13 4. Enable replication agreement M2 -> M3, so that update step 2 is replicated first 5. Enable replication agreement M1 -> M3, so that update step 3 is replicated second 6. Check that the employeeNumber is 13 on all servers :expectedresults: 1. Fill in the result that is expected 2. For each test step """ # If you need any test suite initialization, # please, write additional fixture for that (including finalizer). # Topology for suites are predefined in lib389/topologies.py. # If you need host, port or any other data about instance, # Please, use the instance object attributes for that (for example, topo.ms["supplier1"].serverid) if DEBUGGING: # Add debugging steps(if any)... pass M1 = topo.ms["supplier1"] M2 = topo.ms["supplier2"] M3 = topo.ms["supplier3"] value_init = '7' last = '8' value_S1 = '8.1' value_S2 = '8.2' description = { "S1": M1, "S2": M2, "S1_MODRDN": value_S1, "S1_MOD": [(ldap.MOD_REPLACE, 'employeeNumber', value_S1.encode())], "S2_MOD": [(ldap.MOD_REPLACE, 'employeeNumber', value_S2.encode())], "S2_MODRDN": value_S2, "expected": value_S2} # This test takes the user_1 (uid, test_user_dn) = _employeenumber_user_get_dn(int(last)) # # Step 4 # # disable all RA from M1 and M2 # only M3 can replicate the update # agreement_m1_m2 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M2.host, consumer_port=M2.port) agreement_m1_m3 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) agreement_m2_m1 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M1.host, consumer_port=M1.port) agreement_m2_m3 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) M1.agreement.pause(agreement_m1_m2[0].dn) M1.agreement.pause(agreement_m1_m3[0].dn) M2.agreement.pause(agreement_m2_m1[0].dn) M2.agreement.pause(agreement_m2_m3[0].dn) # Step 5 # Oldest update # check that the entry on M1 contains employeeNumber= description["S1"].rename_s(test_user_dn, 'employeeNumber=%s' % description["S1_MODRDN"], newsuperior=BASE_DISTINGUISHED, delold=1) ents = description["S1"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S1_MODRDN"]) assert len(ents) == 1 time.sleep(1) # Step 6 # More recent update # check that the entry on M2 contains employeeNumber= description["S2"].rename_s(test_user_dn, 'employeeNumber=%s' % description["S2_MODRDN"], newsuperior=BASE_DISTINGUISHED, delold=1) ents = description["S2"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S2_MODRDN"]) assert len(ents) == 1 time.sleep(1) (no, new_test_user_dn) = _employeenumber_user_get_dn(description["S1_MODRDN"]) description["S1"].modify_s(new_test_user_dn, description["S1_MOD"]) ents = description["S1"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S1) assert len(ents) == 1 time.sleep(1) (no, new_test_user_dn) = _employeenumber_user_get_dn(description["S2_MODRDN"]) description["S2"].modify_s(new_test_user_dn, description["S2_MOD"]) ents = description["S2"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S2) assert len(ents) == 1 _resume_ra_M2_then_M1(M1, M2, M3) #time.sleep(3600) # Step 9 # Check that M1 still contains employeeNumber= ents = M1.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) log.info('Search M1 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) assert len(ents) == 1 assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() # Check that M2 still contains employeeNumber= ents = M2.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) log.info('Search M2 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) assert len(ents) == 1 assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() # Check that M3 still contain employeeNumber and it contains employeeNumber= ents = M3.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') assert len(ents) == MAX_EMPLOYEENUMBER_USER ents = M3.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) log.info('Search M3 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) assert len(ents) == 1 assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() def test_ticket49658_24(topo): """Do M1: MODRDN -> V1 M2: MODRDN -> V2 M1: MOD(REPL) -> V1 M2: MOD(REPL) -> V2 Replicate order: M1 then M2 expected: V2 :id: af6a472c-29e3-4833-a5dc-d96c684d33f9 :setup: 3 Supplier Instances 1. Use employeenumber=1000,ou=distinguished,ou=people, :steps: 1. Isolate M1 and M2 by pausing the replication agreements 2. On M1 do DEL+ADD 1000 + MOD_ADD_13 3. On M2 do DEL+ADD 1000 + MOD_ADD_13 4. Enable replication agreement M2 -> M3, so that update step 2 is replicated first 5. Enable replication agreement M1 -> M3, so that update step 3 is replicated second 6. Check that the employeeNumber is 13 on all servers :expectedresults: 1. Fill in the result that is expected 2. For each test step """ # If you need any test suite initialization, # please, write additional fixture for that (including finalizer). # Topology for suites are predefined in lib389/topologies.py. # If you need host, port or any other data about instance, # Please, use the instance object attributes for that (for example, topo.ms["supplier1"].serverid) if DEBUGGING: # Add debugging steps(if any)... pass M1 = topo.ms["supplier1"] M2 = topo.ms["supplier2"] M3 = topo.ms["supplier3"] value_init = '7' last = '9' value_S1 = '9.1' value_S2 = '9.2' description = { "S1": M1, "S2": M2, "S1_MODRDN": value_S1, "S1_MOD": [(ldap.MOD_REPLACE, 'employeeNumber', value_S1.encode())], "S2_MOD": [(ldap.MOD_REPLACE, 'employeeNumber', value_S2.encode())], "S2_MODRDN": value_S2, "expected": value_S2} # This test takes the user_1 (uid, test_user_dn) = _employeenumber_user_get_dn(int(last)) # # Step 4 # # disable all RA from M1 and M2 # only M3 can replicate the update # agreement_m1_m2 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M2.host, consumer_port=M2.port) agreement_m1_m3 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) agreement_m2_m1 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M1.host, consumer_port=M1.port) agreement_m2_m3 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) M1.agreement.pause(agreement_m1_m2[0].dn) M1.agreement.pause(agreement_m1_m3[0].dn) M2.agreement.pause(agreement_m2_m1[0].dn) M2.agreement.pause(agreement_m2_m3[0].dn) # Step 5 # Oldest update # check that the entry on M1 contains employeeNumber= description["S1"].rename_s(test_user_dn, 'employeeNumber=%s' % description["S1_MODRDN"], newsuperior=BASE_DISTINGUISHED, delold=1) ents = description["S1"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S1_MODRDN"]) assert len(ents) == 1 time.sleep(1) # Step 6 # More recent update # check that the entry on M2 contains employeeNumber= description["S2"].rename_s(test_user_dn, 'employeeNumber=%s' % description["S2_MODRDN"], newsuperior=BASE_DISTINGUISHED, delold=1) ents = description["S2"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S2_MODRDN"]) assert len(ents) == 1 time.sleep(1) (no, new_test_user_dn) = _employeenumber_user_get_dn(description["S1_MODRDN"]) description["S1"].modify_s(new_test_user_dn, description["S1_MOD"]) ents = description["S1"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S1) assert len(ents) == 1 time.sleep(1) (no, new_test_user_dn) = _employeenumber_user_get_dn(description["S2_MODRDN"]) description["S2"].modify_s(new_test_user_dn, description["S2_MOD"]) ents = description["S2"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S2) assert len(ents) == 1 _resume_ra_M1_then_M2(M1, M2, M3) #time.sleep(3600) # Step 9 # Check that M1 still contains employeeNumber= ents = M1.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) log.info('Search M1 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) assert len(ents) == 1 assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() # Check that M2 still contains employeeNumber= ents = M2.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) log.info('Search M2 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) assert len(ents) == 1 assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() # Check that M3 still contain employeeNumber and it contains employeeNumber= ents = M3.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') assert len(ents) == MAX_EMPLOYEENUMBER_USER ents = M3.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) log.info('Search M3 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) assert len(ents) == 1 assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() def test_ticket49658_25(topo): """Do M1: MODRDN -> V1 M2: MODRDN -> V2 M1: MOD(REPL) -> V1 M2: MOD(DEL/ADD) -> V2 Replicate order: M1 then M2 expected: V2 :id: df2cba7c-7afa-44b3-b1df-261e8bf0c9b4 :setup: 3 Supplier Instances 1. Use employeenumber=1000,ou=distinguished,ou=people, :steps: 1. Isolate M1 and M2 by pausing the replication agreements 2. On M1 do DEL+ADD 1000 + MOD_ADD_13 3. On M2 do DEL+ADD 1000 + MOD_ADD_13 4. Enable replication agreement M2 -> M3, so that update step 2 is replicated first 5. Enable replication agreement M1 -> M3, so that update step 3 is replicated second 6. Check that the employeeNumber is 13 on all servers :expectedresults: 1. Fill in the result that is expected 2. For each test step """ # If you need any test suite initialization, # please, write additional fixture for that (including finalizer). # Topology for suites are predefined in lib389/topologies.py. # If you need host, port or any other data about instance, # Please, use the instance object attributes for that (for example, topo.ms["supplier1"].serverid) if DEBUGGING: # Add debugging steps(if any)... pass M1 = topo.ms["supplier1"] M2 = topo.ms["supplier2"] M3 = topo.ms["supplier3"] value_init = '7' last = '10' value_S1 = '10.1' value_S2 = '10.2' description = { "S1": M1, "S2": M2, "S1_MODRDN": value_S1, "S1_MOD": [(ldap.MOD_REPLACE, 'employeeNumber', value_S1.encode())], "S2_MOD": [(ldap.MOD_DELETE, 'employeeNumber', value_S2.encode()),(ldap.MOD_ADD, 'employeeNumber', value_S2.encode())], "S2_MODRDN": value_S2, "expected": value_S2} # This test takes the user_1 (uid, test_user_dn) = _employeenumber_user_get_dn(int(last)) # # Step 4 # # disable all RA from M1 and M2 # only M3 can replicate the update # agreement_m1_m2 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M2.host, consumer_port=M2.port) agreement_m1_m3 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) agreement_m2_m1 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M1.host, consumer_port=M1.port) agreement_m2_m3 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) M1.agreement.pause(agreement_m1_m2[0].dn) M1.agreement.pause(agreement_m1_m3[0].dn) M2.agreement.pause(agreement_m2_m1[0].dn) M2.agreement.pause(agreement_m2_m3[0].dn) # Step 5 # Oldest update # check that the entry on M1 contains employeeNumber= description["S1"].rename_s(test_user_dn, 'employeeNumber=%s' % description["S1_MODRDN"], newsuperior=BASE_DISTINGUISHED, delold=1) ents = description["S1"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S1_MODRDN"]) assert len(ents) == 1 time.sleep(1) # Step 6 # More recent update # check that the entry on M2 contains employeeNumber= description["S2"].rename_s(test_user_dn, 'employeeNumber=%s' % description["S2_MODRDN"], newsuperior=BASE_DISTINGUISHED, delold=1) ents = description["S2"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S2_MODRDN"]) assert len(ents) == 1 time.sleep(1) (no, new_test_user_dn) = _employeenumber_user_get_dn(description["S1_MODRDN"]) description["S1"].modify_s(new_test_user_dn, description["S1_MOD"]) ents = description["S1"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S1) assert len(ents) == 1 time.sleep(1) (no, new_test_user_dn) = _employeenumber_user_get_dn(description["S2_MODRDN"]) description["S2"].modify_s(new_test_user_dn, description["S2_MOD"]) ents = description["S2"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S2) assert len(ents) == 1 _resume_ra_M1_then_M2(M1, M2, M3) #time.sleep(3600) # Step 9 # Check that M1 still contains employeeNumber= ents = M1.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) log.info('Search M1 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) assert len(ents) == 1 assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() # Check that M2 still contains employeeNumber= ents = M2.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) log.info('Search M2 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) assert len(ents) == 1 assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() # Check that M3 still contain employeeNumber and it contains employeeNumber= ents = M3.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') assert len(ents) == MAX_EMPLOYEENUMBER_USER ents = M3.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) log.info('Search M3 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) assert len(ents) == 1 assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() def test_ticket49658_26(topo): """Do M1: MODRDN -> V1 M2: MODRDN -> V2 M1: MOD(REPL) -> V1 M2: MOD(DEL/ADD) -> V2 Replicate order: M2 then M1 expected: V2 :id: 8e9f85d3-22cc-4a84-a828-cec29202821f :setup: 3 Supplier Instances 1. Use employeenumber=1000,ou=distinguished,ou=people, :steps: 1. Isolate M1 and M2 by pausing the replication agreements 2. On M1 do DEL+ADD 1000 + MOD_ADD_13 3. On M2 do DEL+ADD 1000 + MOD_ADD_13 4. Enable replication agreement M2 -> M3, so that update step 2 is replicated first 5. Enable replication agreement M1 -> M3, so that update step 3 is replicated second 6. Check that the employeeNumber is 13 on all servers :expectedresults: 1. Fill in the result that is expected 2. For each test step """ # If you need any test suite initialization, # please, write additional fixture for that (including finalizer). # Topology for suites are predefined in lib389/topologies.py. # If you need host, port or any other data about instance, # Please, use the instance object attributes for that (for example, topo.ms["supplier1"].serverid) if DEBUGGING: # Add debugging steps(if any)... pass M1 = topo.ms["supplier1"] M2 = topo.ms["supplier2"] M3 = topo.ms["supplier3"] value_init = '7' last = '11' value_S1 = '11.1' value_S2 = '11.2' description = { "S1": M1, "S2": M2, "S1_MODRDN": value_S1, "S1_MOD": [(ldap.MOD_REPLACE, 'employeeNumber', value_S1.encode())], "S2_MOD": [(ldap.MOD_DELETE, 'employeeNumber', value_S2.encode()),(ldap.MOD_ADD, 'employeeNumber', value_S2.encode())], "S2_MODRDN": value_S2, "expected": value_S2} # This test takes the user_1 (uid, test_user_dn) = _employeenumber_user_get_dn(int(last)) # # Step 4 # # disable all RA from M1 and M2 # only M3 can replicate the update # agreement_m1_m2 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M2.host, consumer_port=M2.port) agreement_m1_m3 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) agreement_m2_m1 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M1.host, consumer_port=M1.port) agreement_m2_m3 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) M1.agreement.pause(agreement_m1_m2[0].dn) M1.agreement.pause(agreement_m1_m3[0].dn) M2.agreement.pause(agreement_m2_m1[0].dn) M2.agreement.pause(agreement_m2_m3[0].dn) # Step 5 # Oldest update # check that the entry on M1 contains employeeNumber= description["S1"].rename_s(test_user_dn, 'employeeNumber=%s' % description["S1_MODRDN"], newsuperior=BASE_DISTINGUISHED, delold=1) ents = description["S1"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S1_MODRDN"]) assert len(ents) == 1 time.sleep(1) # Step 6 # More recent update # check that the entry on M2 contains employeeNumber= description["S2"].rename_s(test_user_dn, 'employeeNumber=%s' % description["S2_MODRDN"], newsuperior=BASE_DISTINGUISHED, delold=1) ents = description["S2"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S2_MODRDN"]) assert len(ents) == 1 time.sleep(1) (no, new_test_user_dn) = _employeenumber_user_get_dn(description["S1_MODRDN"]) description["S1"].modify_s(new_test_user_dn, description["S1_MOD"]) ents = description["S1"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S1) assert len(ents) == 1 time.sleep(1) (no, new_test_user_dn) = _employeenumber_user_get_dn(description["S2_MODRDN"]) description["S2"].modify_s(new_test_user_dn, description["S2_MOD"]) ents = description["S2"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S2) assert len(ents) == 1 _resume_ra_M2_then_M1(M1, M2, M3) #time.sleep(3600) # Step 9 # Check that M1 still contains employeeNumber= ents = M1.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) log.info('Search M1 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) assert len(ents) == 1 assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() # Check that M2 still contains employeeNumber= ents = M2.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) log.info('Search M2 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) assert len(ents) == 1 assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() # Check that M3 still contain employeeNumber and it contains employeeNumber= ents = M3.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') assert len(ents) == MAX_EMPLOYEENUMBER_USER ents = M3.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) log.info('Search M3 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) assert len(ents) == 1 assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() def test_ticket49658_27(topo): """Do M1: MODRDN -> V1 M2: MODRDN -> V2 M1: MOD(DEL/ADD) -> V1 M2: MOD(REPL) -> V2 Replicate order: M1 then M2 expected: V2 :id: d85bd9ef-b257-4027-a29c-dfba87c0bf51 :setup: 3 Supplier Instances 1. Use employeenumber=1000,ou=distinguished,ou=people, :steps: 1. Isolate M1 and M2 by pausing the replication agreements 2. On M1 do DEL+ADD 1000 + MOD_ADD_13 3. On M2 do DEL+ADD 1000 + MOD_ADD_13 4. Enable replication agreement M2 -> M3, so that update step 2 is replicated first 5. Enable replication agreement M1 -> M3, so that update step 3 is replicated second 6. Check that the employeeNumber is 13 on all servers :expectedresults: 1. Fill in the result that is expected 2. For each test step """ # If you need any test suite initialization, # please, write additional fixture for that (including finalizer). # Topology for suites are predefined in lib389/topologies.py. # If you need host, port or any other data about instance, # Please, use the instance object attributes for that (for example, topo.ms["supplier1"].serverid) if DEBUGGING: # Add debugging steps(if any)... pass M1 = topo.ms["supplier1"] M2 = topo.ms["supplier2"] M3 = topo.ms["supplier3"] value_init = '7' last = '12' value_S1 = '12.1' value_S2 = '12.2' description = { "S1": M1, "S2": M2, "S1_MODRDN": value_S1, "S1_MOD": [(ldap.MOD_DELETE, 'employeeNumber', value_S1.encode()),(ldap.MOD_ADD, 'employeeNumber', value_S1.encode())], "S2_MOD": [(ldap.MOD_REPLACE, 'employeeNumber', value_S2.encode())], "S2_MODRDN": value_S2, "expected": value_S2} # This test takes the user_1 (uid, test_user_dn) = _employeenumber_user_get_dn(int(last)) # # Step 4 # # disable all RA from M1 and M2 # only M3 can replicate the update # agreement_m1_m2 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M2.host, consumer_port=M2.port) agreement_m1_m3 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) agreement_m2_m1 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M1.host, consumer_port=M1.port) agreement_m2_m3 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) M1.agreement.pause(agreement_m1_m2[0].dn) M1.agreement.pause(agreement_m1_m3[0].dn) M2.agreement.pause(agreement_m2_m1[0].dn) M2.agreement.pause(agreement_m2_m3[0].dn) # Step 5 # Oldest update # check that the entry on M1 contains employeeNumber= description["S1"].rename_s(test_user_dn, 'employeeNumber=%s' % description["S1_MODRDN"], newsuperior=BASE_DISTINGUISHED, delold=1) ents = description["S1"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S1_MODRDN"]) assert len(ents) == 1 time.sleep(1) # Step 6 # More recent update # check that the entry on M2 contains employeeNumber= description["S2"].rename_s(test_user_dn, 'employeeNumber=%s' % description["S2_MODRDN"], newsuperior=BASE_DISTINGUISHED, delold=1) ents = description["S2"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S2_MODRDN"]) assert len(ents) == 1 time.sleep(1) (no, new_test_user_dn) = _employeenumber_user_get_dn(description["S1_MODRDN"]) description["S1"].modify_s(new_test_user_dn, description["S1_MOD"]) ents = description["S1"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S1) assert len(ents) == 1 time.sleep(1) (no, new_test_user_dn) = _employeenumber_user_get_dn(description["S2_MODRDN"]) description["S2"].modify_s(new_test_user_dn, description["S2_MOD"]) ents = description["S2"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S2) assert len(ents) == 1 _resume_ra_M1_then_M2(M1, M2, M3) #time.sleep(3600) # Step 9 # Check that M1 still contains employeeNumber= ents = M1.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) log.info('Search M1 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) assert len(ents) == 1 assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() # Check that M2 still contains employeeNumber= ents = M2.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) log.info('Search M2 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) assert len(ents) == 1 assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() # Check that M3 still contain employeeNumber and it contains employeeNumber= ents = M3.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') assert len(ents) == MAX_EMPLOYEENUMBER_USER ents = M3.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) log.info('Search M3 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) assert len(ents) == 1 assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() def test_ticket49658_28(topo): """Do M1: MODRDN -> V1 M2: MODRDN -> V2 M1: MOD(DEL/ADD) -> V1 M2: MOD(REPL) -> V2 Replicate order: M2 then M1 expected: V2 :id: 286cd17e-225e-490f-83c9-20618b9407a9 :setup: 3 Supplier Instances 1. Use employeenumber=1000,ou=distinguished,ou=people, :steps: 1. Isolate M1 and M2 by pausing the replication agreements 2. On M1 do DEL+ADD 1000 + MOD_ADD_13 3. On M2 do DEL+ADD 1000 + MOD_ADD_13 4. Enable replication agreement M2 -> M3, so that update step 2 is replicated first 5. Enable replication agreement M1 -> M3, so that update step 3 is replicated second 6. Check that the employeeNumber is 13 on all servers :expectedresults: 1. Fill in the result that is expected 2. For each test step """ # If you need any test suite initialization, # please, write additional fixture for that (including finalizer). # Topology for suites are predefined in lib389/topologies.py. # If you need host, port or any other data about instance, # Please, use the instance object attributes for that (for example, topo.ms["supplier1"].serverid) if DEBUGGING: # Add debugging steps(if any)... pass M1 = topo.ms["supplier1"] M2 = topo.ms["supplier2"] M3 = topo.ms["supplier3"] value_init = '7' last = '13' value_S1 = '13.1' value_S2 = '13.2' description = { "S1": M1, "S2": M2, "S1_MODRDN": value_S1, "S1_MOD": [(ldap.MOD_DELETE, 'employeeNumber', value_S1.encode()),(ldap.MOD_ADD, 'employeeNumber', value_S1.encode())], "S2_MOD": [(ldap.MOD_REPLACE, 'employeeNumber', value_S2.encode())], "S2_MODRDN": value_S2, "expected": value_S2} # This test takes the user_1 (uid, test_user_dn) = _employeenumber_user_get_dn(int(last)) # # Step 4 # # disable all RA from M1 and M2 # only M3 can replicate the update # agreement_m1_m2 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M2.host, consumer_port=M2.port) agreement_m1_m3 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) agreement_m2_m1 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M1.host, consumer_port=M1.port) agreement_m2_m3 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) M1.agreement.pause(agreement_m1_m2[0].dn) M1.agreement.pause(agreement_m1_m3[0].dn) M2.agreement.pause(agreement_m2_m1[0].dn) M2.agreement.pause(agreement_m2_m3[0].dn) # Step 5 # Oldest update # check that the entry on M1 contains employeeNumber= description["S1"].rename_s(test_user_dn, 'employeeNumber=%s' % description["S1_MODRDN"], newsuperior=BASE_DISTINGUISHED, delold=1) ents = description["S1"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S1_MODRDN"]) assert len(ents) == 1 time.sleep(1) # Step 6 # More recent update # check that the entry on M2 contains employeeNumber= description["S2"].rename_s(test_user_dn, 'employeeNumber=%s' % description["S2_MODRDN"], newsuperior=BASE_DISTINGUISHED, delold=1) ents = description["S2"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S2_MODRDN"]) assert len(ents) == 1 time.sleep(1) (no, new_test_user_dn) = _employeenumber_user_get_dn(description["S1_MODRDN"]) description["S1"].modify_s(new_test_user_dn, description["S1_MOD"]) ents = description["S1"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S1) assert len(ents) == 1 time.sleep(1) (no, new_test_user_dn) = _employeenumber_user_get_dn(description["S2_MODRDN"]) description["S2"].modify_s(new_test_user_dn, description["S2_MOD"]) ents = description["S2"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S2) assert len(ents) == 1 _resume_ra_M2_then_M1(M1, M2, M3) #time.sleep(3600) # Step 9 # Check that M1 still contains employeeNumber= ents = M1.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) log.info('Search M1 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) assert len(ents) == 1 assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() # Check that M2 still contains employeeNumber= ents = M2.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) log.info('Search M2 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) assert len(ents) == 1 assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() # Check that M3 still contain employeeNumber and it contains employeeNumber= ents = M3.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') assert len(ents) == MAX_EMPLOYEENUMBER_USER ents = M3.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) log.info('Search M3 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) assert len(ents) == 1 assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() def test_ticket49658_29(topo): """Do M1: MODRDN -> V1 M2: MODRDN -> V2 M1: MOD(DEL/ADD) -> V1 M2: MOD(DEL/ADD) -> V2 Replicate order: M1 then M2 expected: V2 :id: b81f3885-7965-48fe-8dbf-692d1150d061 :setup: 3 Supplier Instances 1. Use employeenumber=1000,ou=distinguished,ou=people, :steps: 1. Isolate M1 and M2 by pausing the replication agreements 2. On M1 do DEL+ADD 1000 + MOD_ADD_13 3. On M2 do DEL+ADD 1000 + MOD_ADD_13 4. Enable replication agreement M2 -> M3, so that update step 2 is replicated first 5. Enable replication agreement M1 -> M3, so that update step 3 is replicated second 6. Check that the employeeNumber is 13 on all servers :expectedresults: 1. Fill in the result that is expected 2. For each test step """ # If you need any test suite initialization, # please, write additional fixture for that (including finalizer). # Topology for suites are predefined in lib389/topologies.py. # If you need host, port or any other data about instance, # Please, use the instance object attributes for that (for example, topo.ms["supplier1"].serverid) if DEBUGGING: # Add debugging steps(if any)... pass M1 = topo.ms["supplier1"] M2 = topo.ms["supplier2"] M3 = topo.ms["supplier3"] value_init = '7' last = '14' value_S1 = '14.1' value_S2 = '14.2' description = { "S1": M1, "S2": M2, "S1_MODRDN": value_S1, "S1_MOD": [(ldap.MOD_DELETE, 'employeeNumber', value_S1.encode()),(ldap.MOD_ADD, 'employeeNumber', value_S1.encode())], "S2_MOD": [(ldap.MOD_DELETE, 'employeeNumber', value_S2.encode()),(ldap.MOD_ADD, 'employeeNumber', value_S2.encode())], "S2_MODRDN": value_S2, "expected": value_S2} # This test takes the user_1 (uid, test_user_dn) = _employeenumber_user_get_dn(int(last)) # # Step 4 # # disable all RA from M1 and M2 # only M3 can replicate the update # agreement_m1_m2 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M2.host, consumer_port=M2.port) agreement_m1_m3 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) agreement_m2_m1 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M1.host, consumer_port=M1.port) agreement_m2_m3 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) M1.agreement.pause(agreement_m1_m2[0].dn) M1.agreement.pause(agreement_m1_m3[0].dn) M2.agreement.pause(agreement_m2_m1[0].dn) M2.agreement.pause(agreement_m2_m3[0].dn) # Step 5 # Oldest update # check that the entry on M1 contains employeeNumber= description["S1"].rename_s(test_user_dn, 'employeeNumber=%s' % description["S1_MODRDN"], newsuperior=BASE_DISTINGUISHED, delold=1) ents = description["S1"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S1_MODRDN"]) assert len(ents) == 1 time.sleep(1) # Step 6 # More recent update # check that the entry on M2 contains employeeNumber= description["S2"].rename_s(test_user_dn, 'employeeNumber=%s' % description["S2_MODRDN"], newsuperior=BASE_DISTINGUISHED, delold=1) ents = description["S2"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S2_MODRDN"]) assert len(ents) == 1 time.sleep(1) (no, new_test_user_dn) = _employeenumber_user_get_dn(description["S1_MODRDN"]) description["S1"].modify_s(new_test_user_dn, description["S1_MOD"]) ents = description["S1"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S1) assert len(ents) == 1 time.sleep(1) (no, new_test_user_dn) = _employeenumber_user_get_dn(description["S2_MODRDN"]) description["S2"].modify_s(new_test_user_dn, description["S2_MOD"]) ents = description["S2"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S2) assert len(ents) == 1 _resume_ra_M1_then_M2(M1, M2, M3) #time.sleep(3600) # Step 9 # Check that M1 still contains employeeNumber= ents = M1.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) log.info('Search M1 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) assert len(ents) == 1 assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() # Check that M2 still contains employeeNumber= ents = M2.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) log.info('Search M2 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) assert len(ents) == 1 assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() # Check that M3 still contain employeeNumber and it contains employeeNumber= ents = M3.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') assert len(ents) == MAX_EMPLOYEENUMBER_USER ents = M3.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) log.info('Search M3 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) assert len(ents) == 1 assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() def test_ticket49658_30(topo): """Do M1: MODRDN -> V1 M2: MODRDN -> V2 M1: MOD(DEL/ADD) -> V1 M2: MOD(DEL/ADD) -> V2 Replicate order: M2 then M1 expected: V2 :id: 4dce88f8-31db-488b-aeb4-fce4173e3f12 :setup: 3 Supplier Instances 1. Use employeenumber=1000,ou=distinguished,ou=people, :steps: 1. Isolate M1 and M2 by pausing the replication agreements 2. On M1 do DEL+ADD 1000 + MOD_ADD_13 3. On M2 do DEL+ADD 1000 + MOD_ADD_13 4. Enable replication agreement M2 -> M3, so that update step 2 is replicated first 5. Enable replication agreement M1 -> M3, so that update step 3 is replicated second 6. Check that the employeeNumber is 13 on all servers :expectedresults: 1. Fill in the result that is expected 2. For each test step """ # If you need any test suite initialization, # please, write additional fixture for that (including finalizer). # Topology for suites are predefined in lib389/topologies.py. # If you need host, port or any other data about instance, # Please, use the instance object attributes for that (for example, topo.ms["supplier1"].serverid) if DEBUGGING: # Add debugging steps(if any)... pass M1 = topo.ms["supplier1"] M2 = topo.ms["supplier2"] M3 = topo.ms["supplier3"] value_init = '7' last = '15' value_S1 = '15.1' value_S2 = '15.2' description = { "S1": M1, "S2": M2, "S1_MODRDN": value_S1, "S1_MOD": [(ldap.MOD_DELETE, 'employeeNumber', value_S1.encode()),(ldap.MOD_ADD, 'employeeNumber', value_S1.encode())], "S2_MOD": [(ldap.MOD_DELETE, 'employeeNumber', value_S2.encode()),(ldap.MOD_ADD, 'employeeNumber', value_S2.encode())], "S2_MODRDN": value_S2, "expected": value_S2} # This test takes the user_1 (uid, test_user_dn) = _employeenumber_user_get_dn(int(last)) # # Step 4 # # disable all RA from M1 and M2 # only M3 can replicate the update # agreement_m1_m2 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M2.host, consumer_port=M2.port) agreement_m1_m3 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) agreement_m2_m1 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M1.host, consumer_port=M1.port) agreement_m2_m3 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) M1.agreement.pause(agreement_m1_m2[0].dn) M1.agreement.pause(agreement_m1_m3[0].dn) M2.agreement.pause(agreement_m2_m1[0].dn) M2.agreement.pause(agreement_m2_m3[0].dn) # Step 5 # Oldest update # check that the entry on M1 contains employeeNumber= description["S1"].rename_s(test_user_dn, 'employeeNumber=%s' % description["S1_MODRDN"], newsuperior=BASE_DISTINGUISHED, delold=1) ents = description["S1"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S1_MODRDN"]) assert len(ents) == 1 time.sleep(1) # Step 6 # More recent update # check that the entry on M2 contains employeeNumber= description["S2"].rename_s(test_user_dn, 'employeeNumber=%s' % description["S2_MODRDN"], newsuperior=BASE_DISTINGUISHED, delold=1) ents = description["S2"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S2_MODRDN"]) assert len(ents) == 1 time.sleep(1) (no, new_test_user_dn) = _employeenumber_user_get_dn(description["S1_MODRDN"]) description["S1"].modify_s(new_test_user_dn, description["S1_MOD"]) ents = description["S1"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S1) assert len(ents) == 1 time.sleep(1) (no, new_test_user_dn) = _employeenumber_user_get_dn(description["S2_MODRDN"]) description["S2"].modify_s(new_test_user_dn, description["S2_MOD"]) ents = description["S2"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S2) assert len(ents) == 1 _resume_ra_M2_then_M1(M1, M2, M3) #time.sleep(3600) # Step 9 # Check that M1 still contains employeeNumber= ents = M1.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) log.info('Search M1 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) assert len(ents) == 1 assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() # Check that M2 still contains employeeNumber= ents = M2.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) log.info('Search M2 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) assert len(ents) == 1 assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() # Check that M3 still contain employeeNumber and it contains employeeNumber= ents = M3.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') assert len(ents) == MAX_EMPLOYEENUMBER_USER ents = M3.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) log.info('Search M3 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) assert len(ents) == 1 assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() def test_ticket49658_31(topo): """Do M1: MODRDN -> V1 M2: MODRDN -> V2 M1: MOD(REPL) -> V1 M2: MOD(REPL) -> V2 M2: MODRDN -> V1 Replicate order: M2 then M1 expected: V1 :id: 2791a3df-25a2-4e6e-a5e9-514d76af43fb :setup: 3 Supplier Instances 1. Use employeenumber=1000,ou=distinguished,ou=people, :steps: 1. Isolate M1 and M2 by pausing the replication agreements 2. On M1 do DEL+ADD 1000 + MOD_ADD_13 3. On M2 do DEL+ADD 1000 + MOD_ADD_13 4. Enable replication agreement M2 -> M3, so that update step 2 is replicated first 5. Enable replication agreement M1 -> M3, so that update step 3 is replicated second 6. Check that the employeeNumber is 13 on all servers :expectedresults: 1. Fill in the result that is expected 2. For each test step """ # If you need any test suite initialization, # please, write additional fixture for that (including finalizer). # Topology for suites are predefined in lib389/topologies.py. # If you need host, port or any other data about instance, # Please, use the instance object attributes for that (for example, topo.ms["supplier1"].serverid) if DEBUGGING: # Add debugging steps(if any)... pass M1 = topo.ms["supplier1"] M2 = topo.ms["supplier2"] M3 = topo.ms["supplier3"] value_init = '7' last = '16' value_S1 = '16.1' value_S2 = '16.2' description = { "S1": M1, "S2": M2, "S1_MODRDN": value_S1, "S1_MOD": [(ldap.MOD_REPLACE, 'employeeNumber', value_S1.encode())], "S2_MOD": [(ldap.MOD_REPLACE, 'employeeNumber', value_S2.encode())], "S2_MODRDN_1": value_S2, "S2_MODRDN_2": value_S1, "expected": value_S1} # This test takes the user_1 (uid, test_user_dn) = _employeenumber_user_get_dn(int(last)) # # Step 4 # # disable all RA from M1 and M2 # only M3 can replicate the update # agreement_m1_m2 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M2.host, consumer_port=M2.port) agreement_m1_m3 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) agreement_m2_m1 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M1.host, consumer_port=M1.port) agreement_m2_m3 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) M1.agreement.pause(agreement_m1_m2[0].dn) M1.agreement.pause(agreement_m1_m3[0].dn) M2.agreement.pause(agreement_m2_m1[0].dn) M2.agreement.pause(agreement_m2_m3[0].dn) # Step 5 # Oldest update # check that the entry on M1 contains employeeNumber= description["S1"].rename_s(test_user_dn, 'employeeNumber=%s' % description["S1_MODRDN"], newsuperior=BASE_DISTINGUISHED, delold=1) ents = description["S1"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S1_MODRDN"]) assert len(ents) == 1 time.sleep(1) # Step 6 # More recent update # check that the entry on M2 contains employeeNumber= description["S2"].rename_s(test_user_dn, 'employeeNumber=%s' % description["S2_MODRDN_1"], newsuperior=BASE_DISTINGUISHED, delold=1) ents = description["S2"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S2_MODRDN_1"]) assert len(ents) == 1 time.sleep(1) (no, new_test_user_dn) = _employeenumber_user_get_dn(description["S1_MODRDN"]) description["S1"].modify_s(new_test_user_dn, description["S1_MOD"]) ents = description["S1"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S1) assert len(ents) == 1 time.sleep(1) (no, new_test_user_dn) = _employeenumber_user_get_dn(description["S2_MODRDN_1"]) description["S2"].modify_s(new_test_user_dn, description["S2_MOD"]) ents = description["S2"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S2) assert len(ents) == 1 description["S2"].rename_s(new_test_user_dn, 'employeeNumber=%s' % description["S2_MODRDN_2"], newsuperior=BASE_DISTINGUISHED, delold=1) ents = description["S2"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S2_MODRDN_2"]) assert len(ents) == 1 time.sleep(1) _resume_ra_M2_then_M1(M1, M2, M3) #time.sleep(3600) # Step 9 # Check that M1 still contains employeeNumber= ents = M1.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) log.info('Search M1 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) assert len(ents) == 1 assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() # Check that M2 still contains employeeNumber= ents = M2.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) log.info('Search M2 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) assert len(ents) == 1 assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() # Check that M3 still contain employeeNumber and it contains employeeNumber= ents = M3.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') assert len(ents) == MAX_EMPLOYEENUMBER_USER ents = M3.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) log.info('Search M3 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) assert len(ents) == 1 assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() def test_ticket49658_32(topo): """Do M1: MODRDN -> V1 M2: MODRDN -> V2 M1: MOD(REPL) -> V1 M2: MOD(REPL) -> V2 M2: MODRDN -> V1 Replicate order: M1 then M2 expected: V1 :id: 6af57e2e-a325-474a-9c9d-f07cd2244657 :setup: 3 Supplier Instances 1. Use employeenumber=1000,ou=distinguished,ou=people, :steps: 1. Isolate M1 and M2 by pausing the replication agreements 2. On M1 do DEL+ADD 1000 + MOD_ADD_13 3. On M2 do DEL+ADD 1000 + MOD_ADD_13 4. Enable replication agreement M2 -> M3, so that update step 2 is replicated first 5. Enable replication agreement M1 -> M3, so that update step 3 is replicated second 6. Check that the employeeNumber is 13 on all servers :expectedresults: 1. Fill in the result that is expected 2. For each test step """ # If you need any test suite initialization, # please, write additional fixture for that (including finalizer). # Topology for suites are predefined in lib389/topologies.py. # If you need host, port or any other data about instance, # Please, use the instance object attributes for that (for example, topo.ms["supplier1"].serverid) if DEBUGGING: # Add debugging steps(if any)... pass M1 = topo.ms["supplier1"] M2 = topo.ms["supplier2"] M3 = topo.ms["supplier3"] value_init = '7' last = '17' value_S1 = '17.1' value_S2 = '17.2' description = { "S1": M1, "S2": M2, "S1_MODRDN": value_S1, "S1_MOD": [(ldap.MOD_REPLACE, 'employeeNumber', value_S1.encode())], "S2_MOD": [(ldap.MOD_REPLACE, 'employeeNumber', value_S2.encode())], "S2_MODRDN_1": value_S2, "S2_MODRDN_2": value_S1, "expected": value_S1} # This test takes the user_1 (uid, test_user_dn) = _employeenumber_user_get_dn(int(last)) # # Step 4 # # disable all RA from M1 and M2 # only M3 can replicate the update # agreement_m1_m2 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M2.host, consumer_port=M2.port) agreement_m1_m3 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) agreement_m2_m1 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M1.host, consumer_port=M1.port) agreement_m2_m3 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) M1.agreement.pause(agreement_m1_m2[0].dn) M1.agreement.pause(agreement_m1_m3[0].dn) M2.agreement.pause(agreement_m2_m1[0].dn) M2.agreement.pause(agreement_m2_m3[0].dn) # Step 5 # Oldest update # check that the entry on M1 contains employeeNumber= description["S1"].rename_s(test_user_dn, 'employeeNumber=%s' % description["S1_MODRDN"], newsuperior=BASE_DISTINGUISHED, delold=1) ents = description["S1"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S1_MODRDN"]) assert len(ents) == 1 time.sleep(1) # Step 6 # More recent update # check that the entry on M2 contains employeeNumber= description["S2"].rename_s(test_user_dn, 'employeeNumber=%s' % description["S2_MODRDN_1"], newsuperior=BASE_DISTINGUISHED, delold=1) ents = description["S2"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S2_MODRDN_1"]) assert len(ents) == 1 time.sleep(1) (no, new_test_user_dn) = _employeenumber_user_get_dn(description["S1_MODRDN"]) description["S1"].modify_s(new_test_user_dn, description["S1_MOD"]) ents = description["S1"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S1) assert len(ents) == 1 time.sleep(1) (no, new_test_user_dn) = _employeenumber_user_get_dn(description["S2_MODRDN_1"]) description["S2"].modify_s(new_test_user_dn, description["S2_MOD"]) ents = description["S2"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S2) assert len(ents) == 1 description["S2"].rename_s(new_test_user_dn, 'employeeNumber=%s' % description["S2_MODRDN_2"], newsuperior=BASE_DISTINGUISHED, delold=1) ents = description["S2"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S2_MODRDN_2"]) assert len(ents) == 1 time.sleep(1) _resume_ra_M1_then_M2(M1, M2, M3) #time.sleep(3600) # Step 9 # Check that M1 still contains employeeNumber= ents = M1.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) log.info('Search M1 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) assert len(ents) == 1 assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() # Check that M2 still contains employeeNumber= ents = M2.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) log.info('Search M2 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) assert len(ents) == 1 assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() # Check that M3 still contain employeeNumber and it contains employeeNumber= ents = M3.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') assert len(ents) == MAX_EMPLOYEENUMBER_USER ents = M3.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) log.info('Search M3 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) assert len(ents) == 1 assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() def test_ticket49658_33(topo): """Do M1: MODRDN -> V1 M2: MODRDN -> V2 M1: MOD(REPL) -> V1 M2: MODRDN -> V1 Replicate order: M2 then M1 expected: V1 :id: 81100b04-d3b6-47df-90eb-d96ef14a3722 :setup: 3 Supplier Instances 1. Use employeenumber=1000,ou=distinguished,ou=people, :steps: 1. Isolate M1 and M2 by pausing the replication agreements 2. On M1 do DEL+ADD 1000 + MOD_ADD_13 3. On M2 do DEL+ADD 1000 + MOD_ADD_13 4. Enable replication agreement M2 -> M3, so that update step 2 is replicated first 5. Enable replication agreement M1 -> M3, so that update step 3 is replicated second 6. Check that the employeeNumber is 13 on all servers :expectedresults: 1. Fill in the result that is expected 2. For each test step """ # If you need any test suite initialization, # please, write additional fixture for that (including finalizer). # Topology for suites are predefined in lib389/topologies.py. # If you need host, port or any other data about instance, # Please, use the instance object attributes for that (for example, topo.ms["supplier1"].serverid) if DEBUGGING: # Add debugging steps(if any)... pass M1 = topo.ms["supplier1"] M2 = topo.ms["supplier2"] M3 = topo.ms["supplier3"] value_init = '7' last = '18' value_S1 = '18.1' value_S2 = '18.2' description = { "S1": M1, "S2": M2, "S1_MODRDN": value_S1, "S1_MOD": [(ldap.MOD_REPLACE, 'employeeNumber', value_S1.encode())], "S2_MODRDN_1": value_S2, "S2_MODRDN_2": value_S1, "expected": value_S1} # This test takes the user_1 (uid, test_user_dn) = _employeenumber_user_get_dn(int(last)) # # Step 4 # # disable all RA from M1 and M2 # only M3 can replicate the update # agreement_m1_m2 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M2.host, consumer_port=M2.port) agreement_m1_m3 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) agreement_m2_m1 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M1.host, consumer_port=M1.port) agreement_m2_m3 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) M1.agreement.pause(agreement_m1_m2[0].dn) M1.agreement.pause(agreement_m1_m3[0].dn) M2.agreement.pause(agreement_m2_m1[0].dn) M2.agreement.pause(agreement_m2_m3[0].dn) # Step 5 # Oldest update # check that the entry on M1 contains employeeNumber= description["S1"].rename_s(test_user_dn, 'employeeNumber=%s' % description["S1_MODRDN"], newsuperior=BASE_DISTINGUISHED, delold=1) ents = description["S1"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S1_MODRDN"]) assert len(ents) == 1 time.sleep(1) # Step 6 # More recent update # check that the entry on M2 contains employeeNumber= description["S2"].rename_s(test_user_dn, 'employeeNumber=%s' % description["S2_MODRDN_1"], newsuperior=BASE_DISTINGUISHED, delold=1) ents = description["S2"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S2_MODRDN_1"]) assert len(ents) == 1 time.sleep(1) (no, new_test_user_dn) = _employeenumber_user_get_dn(description["S1_MODRDN"]) description["S1"].modify_s(new_test_user_dn, description["S1_MOD"]) ents = description["S1"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S1) assert len(ents) == 1 time.sleep(1) (no, new_test_user_dn) = _employeenumber_user_get_dn(description["S2_MODRDN_1"]) description["S2"].rename_s(new_test_user_dn, 'employeeNumber=%s' % description["S2_MODRDN_2"], newsuperior=BASE_DISTINGUISHED, delold=1) ents = description["S2"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S2_MODRDN_2"]) assert len(ents) == 1 time.sleep(1) _resume_ra_M2_then_M1(M1, M2, M3) #time.sleep(3600) # Step 9 # Check that M1 still contains employeeNumber= ents = M1.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) log.info('Search M1 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) assert len(ents) == 1 assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() # Check that M2 still contains employeeNumber= ents = M2.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) log.info('Search M2 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) assert len(ents) == 1 assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() # Check that M3 still contain employeeNumber and it contains employeeNumber= ents = M3.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') assert len(ents) == MAX_EMPLOYEENUMBER_USER ents = M3.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) log.info('Search M3 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) assert len(ents) == 1 assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() def test_ticket49658_34(topo): """Do M1: MODRDN -> V1 M2: MODRDN -> V2 M1: MOD(REPL) -> V1 M2: MODRDN -> V1 Replicate order: M1 then M2 expected: V1 :id: 796d3d77-2401-49f5-89fa-80b231d3e758 :setup: 3 Supplier Instances 1. Use employeenumber=1000,ou=distinguished,ou=people, :steps: 1. Isolate M1 and M2 by pausing the replication agreements 2. On M1 do DEL+ADD 1000 + MOD_ADD_13 3. On M2 do DEL+ADD 1000 + MOD_ADD_13 4. Enable replication agreement M2 -> M3, so that update step 2 is replicated first 5. Enable replication agreement M1 -> M3, so that update step 3 is replicated second 6. Check that the employeeNumber is 13 on all servers :expectedresults: 1. Fill in the result that is expected 2. For each test step """ # If you need any test suite initialization, # please, write additional fixture for that (including finalizer). # Topology for suites are predefined in lib389/topologies.py. # If you need host, port or any other data about instance, # Please, use the instance object attributes for that (for example, topo.ms["supplier1"].serverid) if DEBUGGING: # Add debugging steps(if any)... pass M1 = topo.ms["supplier1"] M2 = topo.ms["supplier2"] M3 = topo.ms["supplier3"] value_init = '7' last = '19' value_S1 = '19.1' value_S2 = '19.2' description = { "S1": M1, "S2": M2, "S1_MODRDN": value_S1, "S1_MOD": [(ldap.MOD_REPLACE, 'employeeNumber', value_S1.encode())], "S2_MODRDN_1": value_S2, "S2_MODRDN_2": value_S1, "expected": value_S1} # This test takes the user_1 (uid, test_user_dn) = _employeenumber_user_get_dn(int(last)) # # Step 4 # # disable all RA from M1 and M2 # only M3 can replicate the update # agreement_m1_m2 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M2.host, consumer_port=M2.port) agreement_m1_m3 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) agreement_m2_m1 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M1.host, consumer_port=M1.port) agreement_m2_m3 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) M1.agreement.pause(agreement_m1_m2[0].dn) M1.agreement.pause(agreement_m1_m3[0].dn) M2.agreement.pause(agreement_m2_m1[0].dn) M2.agreement.pause(agreement_m2_m3[0].dn) # Step 5 # Oldest update # check that the entry on M1 contains employeeNumber= description["S1"].rename_s(test_user_dn, 'employeeNumber=%s' % description["S1_MODRDN"], newsuperior=BASE_DISTINGUISHED, delold=1) ents = description["S1"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S1_MODRDN"]) assert len(ents) == 1 time.sleep(1) # Step 6 # More recent update # check that the entry on M2 contains employeeNumber= description["S2"].rename_s(test_user_dn, 'employeeNumber=%s' % description["S2_MODRDN_1"], newsuperior=BASE_DISTINGUISHED, delold=1) ents = description["S2"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S2_MODRDN_1"]) assert len(ents) == 1 time.sleep(1) (no, new_test_user_dn) = _employeenumber_user_get_dn(description["S1_MODRDN"]) description["S1"].modify_s(new_test_user_dn, description["S1_MOD"]) ents = description["S1"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S1) assert len(ents) == 1 time.sleep(1) (no, new_test_user_dn) = _employeenumber_user_get_dn(description["S2_MODRDN_1"]) description["S2"].rename_s(new_test_user_dn, 'employeeNumber=%s' % description["S2_MODRDN_2"], newsuperior=BASE_DISTINGUISHED, delold=1) ents = description["S2"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S2_MODRDN_2"]) assert len(ents) == 1 time.sleep(1) _resume_ra_M1_then_M2(M1, M2, M3) #time.sleep(3600) # Step 9 # Check that M1 still contains employeeNumber= ents = M1.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) log.info('Search M1 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) assert len(ents) == 1 assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() # Check that M2 still contains employeeNumber= ents = M2.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) log.info('Search M2 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) assert len(ents) == 1 assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() # Check that M3 still contain employeeNumber and it contains employeeNumber= ents = M3.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') assert len(ents) == MAX_EMPLOYEENUMBER_USER ents = M3.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) log.info('Search M3 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) assert len(ents) == 1 assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main(["-s", CURRENT_FILE]) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/tickets/ticket49788_test.py000066400000000000000000000057331421664411400260320ustar00rootroot00000000000000import logging import time import ldap import base64 import pytest import os from lib389 import Entry from lib389.tasks import * from lib389.utils import * from lib389.properties import * from lib389.topologies import topology_st from lib389._constants import DEFAULT_SUFFIX, DN_CONFIG, DN_DM, PASSWORD, DEFAULT_SUFFIX_ESCAPED pytestmark = pytest.mark.tier2 logging.getLogger(__name__).setLevel(logging.DEBUG) log = logging.getLogger(__name__) VALID_STRINGS = [ 'dHJpdmlhbCBzdHJpbmc=' # trivial string '8J+YjQ==', # 😠'aGVsbG8g8J+YjQ==', # hello 😠'8J+krCBTbyB0aGVyZSEg8J+YoQ==', # 🤬 So there! 😡 'YnJvY2NvbGkgYmVlZg==', # broccoli beef 'Y2FybmUgZGUgYnLDs2NvbGk=', # carne de brócoli '2YTYrdmFINio2YLYsdmKINio2LHZiNmD2YTZig==', # لحم بقري بروكلي '6KW/5YWw6Iqx54mb6IKJ', # 西兰花牛肉 '6KW/6Jit6Iqx54mb6IKJ', # 西蘭花牛肉 '0LPQvtCy0LXQtNGB0LrQviDQvNC10YHQviDQvtC0INCx0YDQvtC60YPQu9Cw', # говедÑко меÑо од брокула ] INVALID_STRINGS = [ '0LPQxtCy0LXQtNGB0LrQviDQvNC10YHQviDQvtC0INCx0YDQvtC60YPQu9Cw', '8R+KjQ==', ] USER_DN = 'cn=test_user,' + DEFAULT_SUFFIX def test_ticket49781(topology_st): """ Test that four-byte UTF-8 characters are accepted by the directory string syntax. """ # Add a test user try: topology_st.standalone.add_s(Entry((USER_DN, {'objectclass': ['top', 'person'], 'sn': 'sn', 'description': 'Four-byte UTF8 test', 'cn': 'test_user'}))) except ldap.LDAPError as e: log.fatal('Failed to add test user') assert False try: topology_st.standalone.modify_s(USER_DN, [(ldap.MOD_REPLACE, 'description', b'something else')]) except ldap.LDAPError as e: log.fatal('trivial test failed!') assert False # Iterate over valid tests for s in VALID_STRINGS: decoded = base64.b64decode(s) try: topology_st.standalone.modify_s(USER_DN, [(ldap.MOD_REPLACE, 'description', decoded)]) except ldap.LDAPError as e: log.fatal('description: ' + decoded.decode('UTF-8') + ' failed') assert False # Iterate over invalid tests for s in INVALID_STRINGS: decoded = base64.b64decode(s) try: topology_st.standalone.modify_s(USER_DN, [(ldap.MOD_REPLACE, 'description', decoded)]) log.fatal('base64-decoded string ' + s + " was accepted, when it shouldn't have been!") assert False except ldap.LDAPError as e: pass if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/tickets/ticket50078_test.py000066400000000000000000000044111421664411400260020ustar00rootroot00000000000000import pytest from lib389.utils import * from lib389.topologies import topology_m1h1c1 from lib389.idm.user import UserAccounts from lib389._constants import (DEFAULT_SUFFIX, REPLICA_RUV_FILTER, defaultProperties, REPLICATION_BIND_DN, REPLICATION_BIND_PW, REPLICATION_BIND_METHOD, REPLICATION_TRANSPORT, SUFFIX, RA_NAME, RA_BINDDN, RA_BINDPW, RA_METHOD, RA_TRANSPORT_PROT, SUFFIX) pytestmark = pytest.mark.tier2 logging.getLogger(__name__).setLevel(logging.DEBUG) log = logging.getLogger(__name__) TEST_USER = "test_user" def test_ticket50078(topology_m1h1c1): """ Test that for a MODRDN operation the cenotaph entry is created on a hub or consumer. """ M1 = topology_m1h1c1.ms["supplier1"] H1 = topology_m1h1c1.hs["hub1"] C1 = topology_m1h1c1.cs["consumer1"] # # Test replication is working # if M1.testReplication(DEFAULT_SUFFIX, topology_m1h1c1.cs["consumer1"]): log.info('Replication is working.') else: log.fatal('Replication is not working.') assert False ua = UserAccounts(M1, DEFAULT_SUFFIX) ua.create(properties={ 'uid': "%s%d" % (TEST_USER, 1), 'cn' : "%s%d" % (TEST_USER, 1), 'sn' : 'user', 'uidNumber' : '1000', 'gidNumber' : '2000', 'homeDirectory' : '/home/testuser' }) user = ua.get('%s1' % TEST_USER) log.info(" Rename the test entry %s..." % user) user.rename('uid=test_user_new') # wait until replication is in sync if M1.testReplication(DEFAULT_SUFFIX, topology_m1h1c1.cs["consumer1"]): log.info('Replication is working.') else: log.fatal('Replication is not working.') assert False # check if cenotaph was created on hub and consumer ents = H1.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, filterstr="(&(objectclass=nstombstone)(cenotaphid=*))") assert len(ents) == 1 ents = C1.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, filterstr="(&(objectclass=nstombstone)(cenotaphid=*))") assert len(ents) == 1 if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/tickets/ticket50232_test.py000066400000000000000000000113021421664411400257670ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2016 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import logging import pytest # from lib389.tasks import * # from lib389.utils import * from lib389.topologies import topology_st from lib389.replica import ReplicationManager,Replicas from lib389._constants import DEFAULT_SUFFIX, BACKEND_NAME from lib389.idm.user import UserAccounts from lib389.idm.organization import Organization from lib389.idm.organizationalunit import OrganizationalUnit pytestmark = pytest.mark.tier2 log = logging.getLogger(__name__) NORMAL_SUFFIX = 'o=normal' NORMAL_BACKEND_NAME = 'normal' REVERSE_SUFFIX = 'o=reverse' REVERSE_BACKEND_NAME = 'reverse' def _enable_replica(instance, suffix): repl = ReplicationManager(DEFAULT_SUFFIX) repl._ensure_changelog(instance) replicas = Replicas(instance) replicas.create(properties={ 'cn': 'replica', 'nsDS5ReplicaRoot': suffix, 'nsDS5ReplicaId': '1', 'nsDS5Flags': '1', 'nsDS5ReplicaType': '3' }) def _populate_suffix(instance, suffixname): o = Organization(instance, 'o={}'.format(suffixname)) o.create(properties={ 'o': suffixname, 'description': 'test' }) ou = OrganizationalUnit(instance, 'ou=people,o={}'.format(suffixname)) ou.create(properties={ 'ou': 'people' }) def _get_replica_generation(instance, suffix): replicas = Replicas(instance) replica = replicas.get(suffix) ruv = replica.get_ruv() return ruv._data_generation def _test_export_import(instance, suffix, backend): before_generation = _get_replica_generation(instance, suffix) instance.stop() instance.db2ldif( bename=backend, suffixes=[suffix], excludeSuffixes=[], encrypt=False, repl_data=True, outputfile="/tmp/output_file", ) instance.ldif2db( bename=None, excludeSuffixes=None, encrypt=False, suffixes=[suffix], import_file="/tmp/output_file", ) instance.start() after_generation = _get_replica_generation(instance, suffix) assert (before_generation == after_generation) def test_ticket50232_normal(topology_st): """ The fix for ticket 50232 The test sequence is: - create suffix - add suffix entry and some child entries - "normally" done after populating suffix: enable replication - get RUV and database generation - export -r - import - get RUV and database generation - assert database generation has not changed """ log.info('Testing Ticket 50232 - export creates not imprtable ldif file, normal creation order') topology_st.standalone.backend.create(NORMAL_SUFFIX, {BACKEND_NAME: NORMAL_BACKEND_NAME}) topology_st.standalone.mappingtree.create(NORMAL_SUFFIX, bename=NORMAL_BACKEND_NAME, parent=None) _populate_suffix(topology_st.standalone, NORMAL_BACKEND_NAME) repl = ReplicationManager(DEFAULT_SUFFIX) repl._ensure_changelog(topology_st.standalone) replicas = Replicas(topology_st.standalone) replicas.create(properties={ 'cn': 'replica', 'nsDS5ReplicaRoot': NORMAL_SUFFIX, 'nsDS5ReplicaId': '1', 'nsDS5Flags': '1', 'nsDS5ReplicaType': '3' }) _test_export_import(topology_st.standalone, NORMAL_SUFFIX, NORMAL_BACKEND_NAME) def test_ticket50232_reverse(topology_st): """ The fix for ticket 50232 The test sequence is: - create suffix - enable replication before suffix enztry is added - add suffix entry and some child entries - get RUV and database generation - export -r - import - get RUV and database generation - assert database generation has not changed """ log.info('Testing Ticket 50232 - export creates not imprtable ldif file, normal creation order') # # Setup Replication # log.info('Setting up replication...') repl = ReplicationManager(DEFAULT_SUFFIX) # repl.create_first_supplier(topology_st.standalone) # # enable dynamic plugins, memberof and retro cl plugin # topology_st.standalone.backend.create(REVERSE_SUFFIX, {BACKEND_NAME: REVERSE_BACKEND_NAME}) topology_st.standalone.mappingtree.create(REVERSE_SUFFIX, bename=REVERSE_BACKEND_NAME, parent=None) _enable_replica(topology_st.standalone, REVERSE_SUFFIX) _populate_suffix(topology_st.standalone, REVERSE_BACKEND_NAME) _test_export_import(topology_st.standalone, REVERSE_SUFFIX, REVERSE_BACKEND_NAME) if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/tickets/ticket50234_test.py000066400000000000000000000046511421664411400260020ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2019 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import logging import time import ldap import pytest from lib389.topologies import topology_st from lib389._constants import DEFAULT_SUFFIX from lib389.idm.user import UserAccount, UserAccounts from lib389.idm.organizationalunit import OrganizationalUnit pytestmark = pytest.mark.tier2 log = logging.getLogger(__name__) def test_ticket50234(topology_st): """ The fix for ticket 50234 The test sequence is: - create more than 10 entries with objectclass organizational units ou=org{} - add an Account in one of them, eg below ou=org5 - do searches with search base ou=org5 and search filter "objectclass=organizationalunit" - a subtree search should return 1 entry, the base entry - a onelevel search should return no entry """ log.info('Testing Ticket 50234 - onelvel search returns not matching entry') for i in range(1,15): ou = OrganizationalUnit(topology_st.standalone, "ou=Org{},{}".format(i, DEFAULT_SUFFIX)) ou.create(properties={'ou': 'Org'.format(i)}) properties = { 'uid': 'Jeff Vedder', 'cn': 'Jeff Vedder', 'sn': 'user', 'uidNumber': '1000', 'gidNumber': '2000', 'homeDirectory': '/home/' + 'JeffVedder', 'userPassword': 'password' } user = UserAccount(topology_st.standalone, "cn=Jeff Vedder,ou=org5,{}".format(DEFAULT_SUFFIX)) user.create(properties=properties) # in a subtree search the entry used as search base matches the filter and shoul be returned ent = topology_st.standalone.getEntry("ou=org5,{}".format(DEFAULT_SUFFIX), ldap.SCOPE_SUBTREE, "(objectclass=organizationalunit)") # in a onelevel search the only child is an useraccount which does not match the filter # no entry should be returned, which would cause getEntry to raise an exception we need to handle found = 1 try: ent = topology_st.standalone.getEntry("ou=org5,{}".format(DEFAULT_SUFFIX), ldap.SCOPE_ONELEVEL, "(objectclass=organizationalunit)") except ldap.NO_SUCH_OBJECT: found = 0 assert (found == 0) if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/tickets/ticket548_test.py000066400000000000000000000372431421664411400256500ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2016 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import pytest from lib389.tasks import * from lib389.utils import * from lib389.topologies import topology_st from lib389._constants import DEFAULT_SUFFIX, DN_CONFIG, DN_DM, PASSWORD, DEFAULT_SUFFIX_ESCAPED # Skip on older versions pytestmark = [pytest.mark.tier2, pytest.mark.skipif(ds_is_older('1.3.6'), reason="Not implemented")] log = logging.getLogger(__name__) # Assuming DEFAULT_SUFFIX is "dc=example,dc=com", otherwise it does not work... :( SUBTREE_CONTAINER = 'cn=nsPwPolicyContainer,' + DEFAULT_SUFFIX SUBTREE_PWPDN = 'cn=nsPwPolicyEntry,' + DEFAULT_SUFFIX SUBTREE_PWP = 'cn=cn\3DnsPwPolicyEntry\2C' + DEFAULT_SUFFIX_ESCAPED + ',' + SUBTREE_CONTAINER SUBTREE_COS_TMPLDN = 'cn=nsPwTemplateEntry,' + DEFAULT_SUFFIX SUBTREE_COS_TMPL = 'cn=cn\3DnsPwTemplateEntry\2C' + DEFAULT_SUFFIX_ESCAPED + ',' + SUBTREE_CONTAINER SUBTREE_COS_DEF = 'cn=nsPwPolicy_CoS,' + DEFAULT_SUFFIX USER1_DN = 'uid=user1,' + DEFAULT_SUFFIX USER2_DN = 'uid=user2,' + DEFAULT_SUFFIX USER3_DN = 'uid=user3,' + DEFAULT_SUFFIX USER_PW = 'password' def days_to_secs(days): # Value of 60 * 60 * 24 return days * 86400 # Values are in days def set_global_pwpolicy(topology_st, min_=1, max_=10, warn=3): log.info(" +++++ Enable global password policy +++++\n") # Enable password policy try: topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-pwpolicy-local', b'on')]) except ldap.LDAPError as e: log.error('Failed to set pwpolicy-local: error ' + e.message['desc']) assert False # Convert our values to seconds min_secs = days_to_secs(min_) max_secs = days_to_secs(max_) warn_secs = days_to_secs(warn) log.info(" Set global password Min Age -- %s day\n" % min_) try: topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'passwordMinAge', ('%s' % min_secs).encode())]) except ldap.LDAPError as e: log.error('Failed to set passwordMinAge: error ' + e.message['desc']) assert False log.info(" Set global password Expiration -- on\n") try: topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'passwordExp', b'on')]) except ldap.LDAPError as e: log.error('Failed to set passwordExp: error ' + e.message['desc']) assert False log.info(" Set global password Max Age -- %s days\n" % max_) try: topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'passwordMaxAge', ('%s' % max_secs).encode())]) except ldap.LDAPError as e: log.error('Failed to set passwordMaxAge: error ' + e.message['desc']) assert False log.info(" Set global password Warning -- %s days\n" % warn) try: topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'passwordWarning', ('%s' % warn_secs).encode())]) except ldap.LDAPError as e: log.error('Failed to set passwordWarning: error ' + e.message['desc']) assert False def set_subtree_pwpolicy(topology_st, min_=2, max_=20, warn=6): log.info(" +++++ Enable subtree level password policy +++++\n") # Convert our values to seconds min_secs = days_to_secs(min_) max_secs = days_to_secs(max_) warn_secs = days_to_secs(warn) log.info(" Add the container") try: topology_st.standalone.add_s(Entry((SUBTREE_CONTAINER, {'objectclass': 'top nsContainer'.split(), 'cn': 'nsPwPolicyContainer'}))) except ldap.ALREADY_EXISTS: pass except ldap.LDAPError as e: log.error('Failed to add subtree container: error ' + e.message['desc']) # assert False try: # Purge the old policy topology_st.standalone.delete_s(SUBTREE_PWP) except: pass log.info( " Add the password policy subentry {passwordMustChange: on, passwordMinAge: %s, passwordMaxAge: %s, passwordWarning: %s}" % ( min_, max_, warn)) try: topology_st.standalone.add_s(Entry((SUBTREE_PWP, {'objectclass': 'top ldapsubentry passwordpolicy'.split(), 'cn': SUBTREE_PWPDN, 'passwordMustChange': 'on', 'passwordExp': 'on', 'passwordMinAge': '%s' % min_secs, 'passwordMaxAge': '%s' % max_secs, 'passwordWarning': '%s' % warn_secs, 'passwordChange': 'on', 'passwordStorageScheme': 'clear'}))) except ldap.LDAPError as e: log.error('Failed to add passwordpolicy: error ' + e.message['desc']) assert False log.info(" Add the COS template") try: topology_st.standalone.add_s( Entry((SUBTREE_COS_TMPL, {'objectclass': 'top ldapsubentry costemplate extensibleObject'.split(), 'cn': SUBTREE_PWPDN, 'cosPriority': '1', 'cn': SUBTREE_COS_TMPLDN, 'pwdpolicysubentry': SUBTREE_PWP}))) except ldap.ALREADY_EXISTS: pass except ldap.LDAPError as e: log.error('Failed to add COS template: error ' + e.message['desc']) # assert False log.info(" Add the COS definition") try: topology_st.standalone.add_s( Entry((SUBTREE_COS_DEF, {'objectclass': 'top ldapsubentry cosSuperDefinition cosPointerDefinition'.split(), 'cn': SUBTREE_PWPDN, 'costemplatedn': SUBTREE_COS_TMPL, 'cosAttribute': 'pwdpolicysubentry default operational-default'}))) except ldap.ALREADY_EXISTS: pass except ldap.LDAPError as e: log.error('Failed to add COS def: error ' + e.message['desc']) # assert False time.sleep(1) def update_passwd(topology_st, user, passwd, newpasswd): log.info(" Bind as {%s,%s}" % (user, passwd)) topology_st.standalone.simple_bind_s(user, passwd) try: topology_st.standalone.modify_s(user, [(ldap.MOD_REPLACE, 'userpassword', newpasswd.encode())]) except ldap.LDAPError as e: log.fatal('test_ticket548: Failed to update the password ' + cpw + ' of user ' + user + ': error ' + e.message[ 'desc']) assert False time.sleep(1) def check_shadow_attr_value(entry, attr_type, expected, dn): if entry.hasAttr(attr_type): actual = entry.getValue(attr_type) if int(actual) == expected: log.info('%s of entry %s has expected value %s' % (attr_type, dn, actual)) assert True else: log.fatal('%s %s of entry %s does not have expected value %s' % (attr_type, actual, dn, expected)) assert False else: log.fatal('entry %s does not have %s attr' % (dn, attr_type)) assert False def test_ticket548_test_with_no_policy(topology_st): """ Check shadowAccount under no password policy """ log.info("Case 1. No password policy") log.info("Bind as %s" % DN_DM) topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) log.info('Add an entry' + USER1_DN) try: topology_st.standalone.add_s( Entry((USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson shadowAccount".split(), 'sn': '1', 'cn': 'user 1', 'uid': 'user1', 'givenname': 'user', 'mail': 'user1@' + DEFAULT_SUFFIX, 'userpassword': USER_PW}))) except ldap.LDAPError as e: log.fatal('test_ticket548: Failed to add user' + USER1_DN + ': error ' + e.message['desc']) assert False edate = int(time.time() / (60 * 60 * 24)) log.info('Search entry %s' % USER1_DN) log.info("Bind as %s" % USER1_DN) topology_st.standalone.simple_bind_s(USER1_DN, USER_PW) entry = topology_st.standalone.getEntry(USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)", ['shadowLastChange']) check_shadow_attr_value(entry, 'shadowLastChange', edate, USER1_DN) log.info("Check shadowAccount with no policy was successfully verified.") def test_ticket548_test_global_policy(topology_st): """ Check shadowAccount with global password policy """ log.info("Case 2. Check shadowAccount with global password policy") log.info("Bind as %s" % DN_DM) topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) set_global_pwpolicy(topology_st) log.info('Add an entry' + USER2_DN) try: topology_st.standalone.add_s( Entry((USER2_DN, {'objectclass': "top person organizationalPerson inetOrgPerson shadowAccount".split(), 'sn': '2', 'cn': 'user 2', 'uid': 'user2', 'givenname': 'user', 'mail': 'user2@' + DEFAULT_SUFFIX, 'userpassword': USER_PW}))) except ldap.LDAPError as e: log.fatal('test_ticket548: Failed to add user' + USER2_DN + ': error ' + e.message['desc']) assert False edate = int(time.time() / (60 * 60 * 24)) log.info("Bind as %s" % USER1_DN) topology_st.standalone.simple_bind_s(USER1_DN, USER_PW) log.info('Search entry %s' % USER1_DN) entry = topology_st.standalone.getEntry(USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") check_shadow_attr_value(entry, 'shadowLastChange', edate, USER1_DN) # passwordMinAge -- 1 day check_shadow_attr_value(entry, 'shadowMin', 1, USER1_DN) # passwordMaxAge -- 10 days check_shadow_attr_value(entry, 'shadowMax', 10, USER1_DN) # passwordWarning -- 3 days check_shadow_attr_value(entry, 'shadowWarning', 3, USER1_DN) log.info("Bind as %s" % USER2_DN) topology_st.standalone.simple_bind_s(USER2_DN, USER_PW) log.info('Search entry %s' % USER2_DN) entry = topology_st.standalone.getEntry(USER2_DN, ldap.SCOPE_BASE, "(objectclass=*)") check_shadow_attr_value(entry, 'shadowLastChange', edate, USER2_DN) # passwordMinAge -- 1 day check_shadow_attr_value(entry, 'shadowMin', 1, USER2_DN) # passwordMaxAge -- 10 days check_shadow_attr_value(entry, 'shadowMax', 10, USER2_DN) # passwordWarning -- 3 days check_shadow_attr_value(entry, 'shadowWarning', 3, USER2_DN) # Bind as DM again, change policy log.info("Bind as %s" % DN_DM) topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) set_global_pwpolicy(topology_st, 3, 30, 9) # change the user password, then check again. log.info("Bind as %s" % USER2_DN) topology_st.standalone.simple_bind_s(USER2_DN, USER_PW) newpasswd = USER_PW + '2' update_passwd(topology_st, USER2_DN, USER_PW, newpasswd) log.info("Re-bind as %s with new password" % USER2_DN) topology_st.standalone.simple_bind_s(USER2_DN, newpasswd) ## This tests if we update the shadow values on password change. log.info('Search entry %s' % USER2_DN) entry = topology_st.standalone.getEntry(USER2_DN, ldap.SCOPE_BASE, "(objectclass=*)") # passwordMinAge -- 1 day check_shadow_attr_value(entry, 'shadowMin', 3, USER2_DN) # passwordMaxAge -- 10 days check_shadow_attr_value(entry, 'shadowMax', 30, USER2_DN) # passwordWarning -- 3 days check_shadow_attr_value(entry, 'shadowWarning', 9, USER2_DN) log.info("Check shadowAccount with global policy was successfully verified.") def test_ticket548_test_subtree_policy(topology_st): """ Check shadowAccount with subtree level password policy """ log.info("Case 3. Check shadowAccount with subtree level password policy") log.info("Bind as %s" % DN_DM) topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) # Check the global policy values set_subtree_pwpolicy(topology_st, 2, 20, 6) log.info('Add an entry' + USER3_DN) try: topology_st.standalone.add_s( Entry((USER3_DN, {'objectclass': "top person organizationalPerson inetOrgPerson shadowAccount".split(), 'sn': '3', 'cn': 'user 3', 'uid': 'user3', 'givenname': 'user', 'mail': 'user3@' + DEFAULT_SUFFIX, 'userpassword': USER_PW}))) except ldap.LDAPError as e: log.fatal('test_ticket548: Failed to add user' + USER3_DN + ': error ' + e.message['desc']) assert False log.info('Search entry %s' % USER3_DN) entry0 = topology_st.standalone.getEntry(USER3_DN, ldap.SCOPE_BASE, "(objectclass=*)") log.info('Expecting shadowLastChange 0 since passwordMustChange is on') check_shadow_attr_value(entry0, 'shadowLastChange', 0, USER3_DN) # passwordMinAge -- 2 day check_shadow_attr_value(entry0, 'shadowMin', 2, USER3_DN) # passwordMaxAge -- 20 days check_shadow_attr_value(entry0, 'shadowMax', 20, USER3_DN) # passwordWarning -- 6 days check_shadow_attr_value(entry0, 'shadowWarning', 6, USER3_DN) log.info("Bind as %s" % USER3_DN) topology_st.standalone.simple_bind_s(USER3_DN, USER_PW) log.info('Search entry %s' % USER3_DN) try: entry1 = topology_st.standalone.getEntry(USER3_DN, ldap.SCOPE_BASE, "(objectclass=*)") except ldap.UNWILLING_TO_PERFORM: log.info('test_ticket548: Search by' + USER3_DN + ' failed by UNWILLING_TO_PERFORM as expected') except ldap.LDAPError as e: log.fatal('test_ticket548: Failed to serch user' + USER3_DN + ' by self: error ' + e.message['desc']) assert False log.info("Bind as %s and updating the password with a new one" % USER3_DN) topology_st.standalone.simple_bind_s(USER3_DN, USER_PW) # Bind as DM again, change policy log.info("Bind as %s" % DN_DM) topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) set_subtree_pwpolicy(topology_st, 4, 40, 12) newpasswd = USER_PW + '0' update_passwd(topology_st, USER3_DN, USER_PW, newpasswd) log.info("Re-bind as %s with new password" % USER3_DN) topology_st.standalone.simple_bind_s(USER3_DN, newpasswd) try: entry2 = topology_st.standalone.getEntry(USER3_DN, ldap.SCOPE_BASE, "(objectclass=*)") except ldap.LDAPError as e: log.fatal('test_ticket548: Failed to serch user' + USER3_DN + ' by self: error ' + e.message['desc']) assert False edate = int(time.time() / (60 * 60 * 24)) log.info('Expecting shadowLastChange %d once userPassword is updated', edate) check_shadow_attr_value(entry2, 'shadowLastChange', edate, USER3_DN) log.info('Search entry %s' % USER3_DN) entry = topology_st.standalone.getEntry(USER3_DN, ldap.SCOPE_BASE, "(objectclass=*)") check_shadow_attr_value(entry, 'shadowLastChange', edate, USER3_DN) # passwordMinAge -- 1 day check_shadow_attr_value(entry, 'shadowMin', 4, USER3_DN) # passwordMaxAge -- 10 days check_shadow_attr_value(entry, 'shadowMax', 40, USER3_DN) # passwordWarning -- 3 days check_shadow_attr_value(entry, 'shadowWarning', 12, USER3_DN) log.info("Check shadowAccount with subtree level policy was successfully verified.") if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s %s" % CURRENT_FILE) 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/tmp/000077500000000000000000000000001421664411400216345ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/tmp/README000066400000000000000000000006161421664411400225170ustar00rootroot00000000000000TMP DIRECTORY README This directory is used to store files(LDIFs, etc) that are created during the ticket script runtime. The script is also responsible for removing any files it places in this directory. This directory can be retrieved via getDir() from the DirSrv class. Example: tmp_dir_path = topology.standalone.getDir(__file__, TMP_DIR) new_ldif = tmp_dir_path + "export.ldif" 389-ds-base-389-ds-base-2.0.15/dirsrvtests/tests/tmp/__init__.py000066400000000000000000000000001421664411400237330ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/docker.mk000066400000000000000000000002421421664411400170740ustar00rootroot00000000000000 suse: docker build -t 389-ds-suse:master -f docker/389-ds-suse/Dockerfile . fedora: docker build -t 389-ds-fedora:master -f docker/389-ds-fedora/Dockerfile . 389-ds-base-389-ds-base-2.0.15/docker/000077500000000000000000000000001421664411400165455ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/docker/389-ds-fedora/000077500000000000000000000000001421664411400207325ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/docker/389-ds-fedora/Dockerfile000066400000000000000000000026051421664411400227270ustar00rootroot00000000000000# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2017 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- FROM fedora:latest MAINTAINER 389-devel@lists.fedoraproject.org EXPOSE 3389 3636 ADD ./ /usr/local/src/389-ds-base WORKDIR /usr/local/src/389-ds-base # install dependencies RUN dnf upgrade -y \ && dnf install --setopt=strict=False -y @buildsys-build rpm-build make bzip2 git rsync \ `grep -E "^(Build)?Requires" rpm/389-ds-base.spec.in \ | grep -v -E '(name|MODULE)' \ | awk '{ print $2 }' \ | sed 's/%{python3_pkgversion}/3/g' \ | grep -v "^/" \ | grep -v pkgversion \ | sort | uniq \ | tr '\n' ' '` \ && dnf clean all # build RUN make -f rpm.mk rpms || sh -c 'echo "build failed, sleeping for some time to allow you debug" ; sleep 3600' RUN dnf install -y dist/rpms/*389*.rpm && \ dnf clean all # Link some known static locations to point to /data RUN mkdir -p /data/config && \ mkdir -p /data/ssca && \ mkdir -p /data/run && \ mkdir -p /var/run/dirsrv && \ ln -s /data/config /etc/dirsrv/slapd-localhost && \ ln -s /data/ssca /etc/dirsrv/ssca && \ ln -s /data/run /var/run/dirsrv VOLUME /data #USER dirsrv HEALTHCHECK --start-period=5m --timeout=5s --interval=5s --retries=2 \ CMD /usr/libexec/dirsrv/dscontainer -H CMD [ "/usr/libexec/dirsrv/dscontainer", "-r" ] 389-ds-base-389-ds-base-2.0.15/docker/389-ds-suse/000077500000000000000000000000001421664411400204515ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/docker/389-ds-suse/Dockerfile000066400000000000000000000061341421664411400224470ustar00rootroot00000000000000#!BuildTag: 389-ds-container FROM opensuse/leap:15.1 MAINTAINER wbrown@suse.de EXPOSE 3389 3636 # RUN zypper ar -G obs://network:ldap network:ldap && \ RUN zypper ar http://download.opensuse.org/update/leap/15.1/oss/ u && \ zypper ar http://download.opensuse.org/distribution/leap/15.1/repo/oss/ m && \ zypper ar http://download.opensuse.org/repositories/network:ldap/openSUSE_Leap_15.1/ "network:ldap" && \ zypper mr -p 97 "network:ldap" && \ zypper --gpg-auto-import-keys ref RUN zypper --non-interactive si --build-deps-only 389-ds && \ zypper in -y acl cargo cyrus-sasl cyrus-sasl-plain db48-utils krb5-client libLLVM7 libedit0 libgit2-26 libhttp_parser2_7_1 libssh2-1 mozilla-nss-tools rust # Install build dependencies # RUN zypper in -C -y autoconf automake cracklib-devel cyrus-sasl-devel db-devel doxygen gcc-c++ \ # gdb krb5-devel libcmocka-devel libevent-devel libtalloc-devel libtevent-devel libtool \ # net-snmp-devel openldap2-devel pam-devel pkgconfig python-rpm-macros "pkgconfig(icu-i18n)" \ # "pkgconfig(icu-uc)" "pkgconfig(libcap)" "pkgconfig(libpcre)" "pkgconfig(libsystemd)" \ # "pkgconfig(nspr)" "pkgconfig(nss)" rsync cargo rust rust-std acl cyrus-sasl-plain db-utils \ # bind-utils krb5 fillup shadow openldap2-devel pkgconfig "pkgconfig(nspr)" "pkgconfig(nss)" \ # "pkgconfig(systemd)" python3-argcomplete python3-argparse-manpage python3-ldap \ # python3-pyasn1 python3-pyasn1-modules python3-python-dateutil python3-six krb5-client \ # mozilla-nss-tools # Push source code to the container ADD ./ /usr/local/src/389-ds-base WORKDIR /usr/local/src/389-ds-base # Build and install # Derived from rpm --eval '%configure' on opensuse. RUN autoreconf -fiv && \ ./configure --host=x86_64-suse-linux-gnu --build=x86_64-suse-linux-gnu \ --program-prefix= \ --disable-dependency-tracking \ --prefix=/usr \ --exec-prefix=/usr \ --bindir=/usr/bin \ --sbindir=/usr/sbin \ --sysconfdir=/etc \ --datadir=/usr/share \ --includedir=/usr/include \ --libdir=/usr/lib64 \ --libexecdir=/usr/lib \ --localstatedir=/var \ --sharedstatedir=/var/lib \ --mandir=/usr/share/man \ --infodir=/usr/share/info \ --disable-dependency-tracking \ --enable-debug \ --enable-gcc-security --enable-autobind --enable-auto-dn-suffix --with-openldap \ --enable-cmocka --enable-rust --disable-perl --with-pythonexec="python3" --without-systemd \ --libexecdir=/usr/lib/dirsrv/ --prefix=/ && \ make -j 12 && \ make install && \ make lib389 && \ make lib389-install # Link some known static locations to point to /data RUN mkdir -p /data/config && \ mkdir -p /data/ssca && \ mkdir -p /data/run && \ mkdir -p /var/run/dirsrv && \ ln -s /data/config /etc/dirsrv/slapd-localhost && \ ln -s /data/ssca /etc/dirsrv/ssca && \ ln -s /data/run /var/run/dirsrv # Temporal volumes for each instance VOLUME /data # Set the userup correctly. # USER dirsrv HEALTHCHECK --start-period=5m --timeout=5s --interval=5s --retries=2 \ CMD /usr/libexec/dirsrv/dscontainer -H CMD [ "/usr/libexec/dirsrv/dscontainer", "-r" ] 389-ds-base-389-ds-base-2.0.15/docker/389-ds-suse/Dockerfile.release000066400000000000000000000050021421664411400240570ustar00rootroot00000000000000#!BuildTag: 389-ds-container FROM opensuse/leap:15.1 MAINTAINER wbrown@suse.de EXPOSE 3389 3636 # RUN zypper ar -G obs://network:ldap network:ldap && \ RUN zypper ar http://download.opensuse.org/update/leap/15.1/oss/ u && \ zypper ar http://download.opensuse.org/distribution/leap/15.1/repo/oss/ m && \ zypper ar http://download.opensuse.org/repositories/network:ldap/openSUSE_Leap_15.1/ "network:ldap" && \ zypper mr -p 97 "network:ldap" && \ zypper --gpg-auto-import-keys ref # Push source code to the container - we do this early because we want the zypper and # build instructions in a single RUN stanza to minimise the container final size. ADD ./ /usr/local/src/389-ds-base WORKDIR /usr/local/src/389-ds-base # Build and install # Derived from rpm --eval '%configure' on opensuse. RUN zypper --non-interactive si --build-deps-only 389-ds && \ zypper in -y 389-ds rust cargo rust-std && \ zypper rm -y 389-ds lib389 && \ autoreconf -fiv && \ ./configure --host=x86_64-suse-linux-gnu --build=x86_64-suse-linux-gnu \ --program-prefix= \ --disable-dependency-tracking \ --prefix=/usr \ --exec-prefix=/usr \ --bindir=/usr/bin \ --sbindir=/usr/sbin \ --sysconfdir=/etc \ --datadir=/usr/share \ --includedir=/usr/include \ --libdir=/usr/lib64 \ --libexecdir=/usr/lib \ --localstatedir=/var \ --sharedstatedir=/var/lib \ --mandir=/usr/share/man \ --infodir=/usr/share/info \ --disable-dependency-tracking \ --enable-gcc-security --enable-autobind --enable-auto-dn-suffix --with-openldap \ --enable-rust --disable-perl --with-pythonexec="python3" --without-systemd \ --libexecdir=/usr/lib/dirsrv/ --prefix=/ && \ make -j 12 && \ make install && \ make lib389 && \ make lib389-install && \ make clean && \ zypper rm -y -u rust cargo rust-std gcc gcc-c++ automake autoconf # Link some known static locations to point to /data RUN mkdir -p /data/config && \ mkdir -p /data/ssca && \ mkdir -p /data/run && \ mkdir -p /var/run/dirsrv && \ ln -s /data/config /etc/dirsrv/slapd-localhost && \ ln -s /data/ssca /etc/dirsrv/ssca && \ ln -s /data/run /var/run/dirsrv # Temporal volumes for each instance VOLUME /data # Set the userup correctly. This was created as part of the 389ds in above. # For k8s we'll need 389 to not drop privs? I think we don't specify a user # here and ds should do the right thing if a non root user runs the server. # USER dirsrv CMD [ "/usr/libexec/dirsrv/dscontainer", "-r" ] 389-ds-base-389-ds-base-2.0.15/docker/README.md000066400000000000000000000043451421664411400200320ustar00rootroot00000000000000 #### Issue Description This folder contains proof of concept dockerfiles for 389 Directory Server. This utilises many of our latest developments for installing instances and configuring them. We have developed native, clean, and powerful container integration. This container image is usable on CentOS / RHEL / Fedora atomic host, and pure docker implementations. Please note this image will not currently work in openshift due to a reliance on volume features that openshift does not support, but we will correct this. #### Using the files These docker files are designed to be build from docker hub as the will do a remote git fetch during the build process. They are not currently designed to operate on a local source tree (we may add this later). ``` cd docker/389ds_poc; docker build -t 389ds_poc:latest . ``` #### Deploying and using the final product ``` docker create -h ldap.example.com 389ds_poc:latest docker start docker inspect | grep IPAddress ldapsearch -H ldap://
-b '' -s base -x + .... supportedLDAPVersion: 3 vendorName: 389 Project vendorVersion: 389-Directory/1.3.6.3 B2017.093.354 ``` To expose the ports you may consider adding: ``` -P OR -p 127.0.0.1:$HOSTPORT:$CONTAINERPORT ``` You can not currently use a persistent volume with the 389ds_poc image due to an issue with docker volumes. This will be corrected by https://github.com/389ds/389-ds-base/issues/2272 #### Warnings The 389ds_poc container is supplied with a static Directory Manager password. This is HIGHLY INSECURE and should not be used in production. The password is "directory manager password". The 389ds_poc container has some issues with volume over-rides due to our use of a pre-built instance. We are working to resolve this, but until a solution is derived, you can not override the datavolumes. #### Other ideas * We could develop a dockerfile that builds and runs DS tests in an isolated environment. * Make a container image that allows mounting an arbitrary 389-ds repo into it for simple development purposes. #### NOTE of 389 DS project support This is not a "supported" method of deployment to a production system and may result in data loss. This should be considered an experimental deployment method until otherwise announced. 389-ds-base-389-ds-base-2.0.15/docs/000077500000000000000000000000001421664411400162265ustar00rootroot00000000000000389-ds-base-389-ds-base-2.0.15/docs/custom.css000066400000000000000000000566671421664411400202760ustar00rootroot00000000000000/* The standard CSS for doxygen 1.8.6 */ body, table, div, p, dl { font: 400 14px/22px Liberation Sans,DejaVu Sans,Roboto,sans-serif; } /* @group Heading Levels */ h1.groupheader { font-size: 150%; } .title { font: 400 14px/28px Liberation Sans,DejaVu Sans,Roboto,sans-serif; font-size: 150%; font-weight: bold; margin: 10px 2px; } h2.groupheader { border-bottom: 1px solid #879ECB; color: #354C7B; font-size: 150%; font-weight: normal; margin-top: 1.75em; padding-top: 8px; padding-bottom: 4px; width: 100%; } h3.groupheader { font-size: 100%; } h1, h2, h3, h4, h5, h6 { -webkit-transition: text-shadow 0.5s linear; -moz-transition: text-shadow 0.5s linear; -ms-transition: text-shadow 0.5s linear; -o-transition: text-shadow 0.5s linear; transition: text-shadow 0.5s linear; margin-right: 15px; } h1.glow, h2.glow, h3.glow, h4.glow, h5.glow, h6.glow { text-shadow: 0 0 15px cyan; } dt { font-weight: bold; } div.multicol { -moz-column-gap: 1em; -webkit-column-gap: 1em; -moz-column-count: 3; -webkit-column-count: 3; } p.startli, p.startdd { margin-top: 2px; } p.starttd { margin-top: 0px; } p.endli { margin-bottom: 0px; } p.enddd { margin-bottom: 4px; } p.endtd { margin-bottom: 2px; } /* @end */ caption { font-weight: bold; } span.legend { font-size: 70%; text-align: center; } h3.version { font-size: 90%; text-align: center; } div.qindex, div.navtab{ background-color: #EBEFF6; border: 1px solid #A3B4D7; text-align: center; } div.qindex, div.navpath { width: 100%; line-height: 140%; } div.navtab { margin-right: 15px; } /* @group Link Styling */ a { color: #3D578C; font-weight: normal; text-decoration: none; } .contents a:visited { color: #4665A2; } a:hover { text-decoration: underline; } a.qindex { font-weight: bold; } a.qindexHL { font-weight: bold; background-color: #9CAFD4; color: #ffffff; border: 1px double #869DCA; } .contents a.qindexHL:visited { color: #ffffff; } a.el { font-weight: bold; } a.elRef { } a.code, a.code:visited, a.line, a.line:visited { color: #4665A2; } a.codeRef, a.codeRef:visited, a.lineRef, a.lineRef:visited { color: #4665A2; } /* @end */ dl.el { margin-left: -1cm; } pre.fragment { border: 1px solid #C4CFE5; background-color: #FBFCFD; padding: 4px 6px; margin: 4px 8px 4px 2px; overflow: auto; word-wrap: break-word; font-size: 9pt; line-height: 125%; font-family: monospace, fixed; font-size: 105%; } div.fragment { padding: 4px 6px; margin: 4px 8px 4px 2px; background-color: #FBFCFD; border: 1px solid #C4CFE5; } div.line { font-family: monospace, fixed; font-size: 13px; min-height: 13px; line-height: 1.0; text-wrap: unrestricted; white-space: -moz-pre-wrap; /* Moz */ white-space: -pre-wrap; /* Opera 4-6 */ white-space: -o-pre-wrap; /* Opera 7 */ white-space: pre-wrap; /* CSS3 */ word-wrap: break-word; /* IE 5.5+ */ text-indent: -53px; padding-left: 53px; padding-bottom: 0px; margin: 0px; -webkit-transition-property: background-color, box-shadow; -webkit-transition-duration: 0.5s; -moz-transition-property: background-color, box-shadow; -moz-transition-duration: 0.5s; -ms-transition-property: background-color, box-shadow; -ms-transition-duration: 0.5s; -o-transition-property: background-color, box-shadow; -o-transition-duration: 0.5s; transition-property: background-color, box-shadow; transition-duration: 0.5s; } div.line.glow { background-color: cyan; box-shadow: 0 0 10px cyan; } span.lineno { padding-right: 4px; text-align: right; border-right: 2px solid #0F0; background-color: #E8E8E8; white-space: pre; } span.lineno a { background-color: #D8D8D8; } span.lineno a:hover { background-color: #C8C8C8; } div.ah { background-color: black; font-weight: bold; color: #ffffff; margin-bottom: 3px; margin-top: 3px; padding: 0.2em; border: solid thin #333; border-radius: 0.5em; -webkit-border-radius: .5em; -moz-border-radius: .5em; box-shadow: 2px 2px 3px #999; -webkit-box-shadow: 2px 2px 3px #999; -moz-box-shadow: rgba(0, 0, 0, 0.15) 2px 2px 2px; background-image: -webkit-gradient(linear, left top, left bottom, from(#eee), to(#000),color-stop(0.3, #444)); background-image: -moz-linear-gradient(center top, #eee 0%, #444 40%, #000); } div.groupHeader { margin-left: 16px; margin-top: 12px; font-weight: bold; } div.groupText { margin-left: 16px; font-style: italic; } body { background-color: white; color: black; margin: 0; } div.contents { margin-top: 10px; margin-left: 12px; margin-right: 8px; } td.indexkey { background-color: #EBEFF6; font-weight: bold; border: 1px solid #C4CFE5; margin: 2px 0px 2px 0; padding: 2px 10px; white-space: nowrap; vertical-align: top; } td.indexvalue { background-color: #EBEFF6; border: 1px solid #C4CFE5; padding: 2px 10px; margin: 2px 0px; } tr.memlist { background-color: #EEF1F7; } p.formulaDsp { text-align: center; } img.formulaDsp { } img.formulaInl { vertical-align: middle; } div.center { text-align: center; margin-top: 0px; margin-bottom: 0px; padding: 0px; } div.center img { border: 0px; } address.footer { text-align: right; padding-right: 12px; } img.footer { border: 0px; vertical-align: middle; } /* @group Code Colorization */ span.keyword { color: #008000 } span.keywordtype { color: #604020 } span.keywordflow { color: #e08000 } span.comment { color: #800000 } span.preprocessor { color: #806020 } span.stringliteral { color: #002080 } span.charliteral { color: #008080 } span.vhdldigit { color: #ff00ff } span.vhdlchar { color: #000000 } span.vhdlkeyword { color: #700070 } span.vhdllogic { color: #ff0000 } blockquote { background-color: #F7F8FB; border-left: 2px solid #9CAFD4; margin: 0 24px 0 4px; padding: 0 12px 0 16px; } /* @end */ /* .search { color: #003399; font-weight: bold; } form.search { margin-bottom: 0px; margin-top: 0px; } input.search { font-size: 75%; color: #000080; font-weight: normal; background-color: #e8eef2; } */ td.tiny { font-size: 75%; } .dirtab { padding: 4px; border-collapse: collapse; border: 1px solid #A3B4D7; } th.dirtab { background: #EBEFF6; font-weight: bold; } hr { height: 0px; border: none; border-top: 1px solid #4A6AAA; } hr.footer { height: 1px; } /* @group Member Descriptions */ table.memberdecls { border-spacing: 0px; padding: 0px; } .memberdecls td, .fieldtable tr { -webkit-transition-property: background-color, box-shadow; -webkit-transition-duration: 0.5s; -moz-transition-property: background-color, box-shadow; -moz-transition-duration: 0.5s; -ms-transition-property: background-color, box-shadow; -ms-transition-duration: 0.5s; -o-transition-property: background-color, box-shadow; -o-transition-duration: 0.5s; transition-property: background-color, box-shadow; transition-duration: 0.5s; } .memberdecls td.glow, .fieldtable tr.glow { background-color: cyan; box-shadow: 0 0 15px cyan; } .mdescLeft, .mdescRight, .memItemLeft, .memItemRight, .memTemplItemLeft, .memTemplItemRight, .memTemplParams { background-color: #F9FAFC; border: none; margin: 4px; padding: 1px 0 0 8px; } .mdescLeft, .mdescRight { padding: 0px 8px 4px 8px; color: #555; } .memSeparator { border-bottom: 1px solid #DEE4F0; line-height: 1px; margin: 0px; padding: 0px; } .memItemLeft, .memTemplItemLeft { white-space: nowrap; } .memItemRight { width: 100%; } .memTemplParams { color: #4665A2; white-space: nowrap; font-size: 80%; } /* @end */ /* @group Member Details */ /* Styles for detailed member documentation */ .memtemplate { font-size: 80%; color: #4665A2; font-weight: normal; margin-left: 9px; } .memnav { background-color: #EBEFF6; border: 1px solid #A3B4D7; text-align: center; margin: 2px; margin-right: 15px; padding: 2px; } .mempage { width: 100%; } .memitem { padding: 0; margin-bottom: 10px; margin-right: 5px; -webkit-transition: box-shadow 0.5s linear; -moz-transition: box-shadow 0.5s linear; -ms-transition: box-shadow 0.5s linear; -o-transition: box-shadow 0.5s linear; transition: box-shadow 0.5s linear; display: table !important; width: 100%; } .memitem.glow { box-shadow: 0 0 15px cyan; } .memname { font-weight: bold; margin-left: 6px; } .memname td { vertical-align: bottom; } .memproto, dl.reflist dt { border-top: 1px solid #A8B8D9; border-left: 1px solid #A8B8D9; border-right: 1px solid #A8B8D9; padding: 6px 0px 6px 0px; color: #253555; font-weight: bold; text-shadow: 0px 1px 1px rgba(255, 255, 255, 0.9); background-image:url('nav_f.png'); background-repeat:repeat-x; background-color: #E2E8F2; /* opera specific markup */ box-shadow: 5px 5px 5px rgba(0, 0, 0, 0.15); border-top-right-radius: 4px; border-top-left-radius: 4px; /* firefox specific markup */ -moz-box-shadow: rgba(0, 0, 0, 0.15) 5px 5px 5px; -moz-border-radius-topright: 4px; -moz-border-radius-topleft: 4px; /* webkit specific markup */ -webkit-box-shadow: 5px 5px 5px rgba(0, 0, 0, 0.15); -webkit-border-top-right-radius: 4px; -webkit-border-top-left-radius: 4px; } .memdoc, dl.reflist dd { border-bottom: 1px solid #A8B8D9; border-left: 1px solid #A8B8D9; border-right: 1px solid #A8B8D9; padding: 6px 10px 2px 10px; background-color: #FBFCFD; border-top-width: 0; background-image:url('nav_g.png'); background-repeat:repeat-x; background-color: #FFFFFF; /* opera specific markup */ border-bottom-left-radius: 4px; border-bottom-right-radius: 4px; box-shadow: 5px 5px 5px rgba(0, 0, 0, 0.15); /* firefox specific markup */ -moz-border-radius-bottomleft: 4px; -moz-border-radius-bottomright: 4px; -moz-box-shadow: rgba(0, 0, 0, 0.15) 5px 5px 5px; /* webkit specific markup */ -webkit-border-bottom-left-radius: 4px; -webkit-border-bottom-right-radius: 4px; -webkit-box-shadow: 5px 5px 5px rgba(0, 0, 0, 0.15); } dl.reflist dt { padding: 5px; } dl.reflist dd { margin: 0px 0px 10px 0px; padding: 5px; } .paramkey { text-align: right; } .paramtype { white-space: nowrap; } .paramname { color: #602020; white-space: nowrap; } .paramname em { font-style: normal; } .paramname code { line-height: 14px; } .params, .retval, .exception, .tparams { margin-left: 0px; padding-left: 0px; } .params .paramname, .retval .paramname { font-weight: bold; vertical-align: top; } .params .paramtype { font-style: italic; vertical-align: top; } .params .paramdir { font-family: "courier new",courier,monospace; vertical-align: top; } table.mlabels { border-spacing: 0px; } td.mlabels-left { width: 100%; padding: 0px; } td.mlabels-right { vertical-align: bottom; padding: 0px; white-space: nowrap; } span.mlabels { margin-left: 8px; } span.mlabel { background-color: #728DC1; border-top:1px solid #5373B4; border-left:1px solid #5373B4; border-right:1px solid #C4CFE5; border-bottom:1px solid #C4CFE5; text-shadow: none; color: white; margin-right: 4px; padding: 2px 3px; border-radius: 3px; font-size: 7pt; white-space: nowrap; vertical-align: middle; } /* @end */ /* these are for tree view when not used as main index */ div.directory { margin: 10px 0px; border-top: 1px solid #A8B8D9; border-bottom: 1px solid #A8B8D9; width: 100%; } .directory table { border-collapse:collapse; } .directory td { margin: 0px; padding: 0px; vertical-align: top; } .directory td.entry { white-space: nowrap; padding-right: 6px; padding-top: 3px; } .directory td.entry a { outline:none; } .directory td.entry a img { border: none; } .directory td.desc { width: 100%; padding-left: 6px; padding-right: 6px; padding-top: 3px; border-left: 1px solid rgba(0,0,0,0.05); } .directory tr.even { padding-left: 6px; background-color: #F7F8FB; } .directory img { vertical-align: -30%; } .directory .levels { white-space: nowrap; width: 100%; text-align: right; font-size: 9pt; } .directory .levels span { cursor: pointer; padding-left: 2px; padding-right: 2px; color: #3D578C; } div.dynheader { margin-top: 8px; -webkit-touch-callout: none; -webkit-user-select: none; -khtml-user-select: none; -moz-user-select: none; -ms-user-select: none; user-select: none; } address { font-style: normal; color: #2A3D61; } table.doxtable { border-collapse:collapse; margin-top: 4px; margin-bottom: 4px; } table.doxtable td, table.doxtable th { border: 1px solid #2D4068; padding: 3px 7px 2px; } table.doxtable th { background-color: #374F7F; color: #FFFFFF; font-size: 110%; padding-bottom: 4px; padding-top: 5px; } table.fieldtable { /*width: 100%;*/ margin-bottom: 10px; border: 1px solid #A8B8D9; border-spacing: 0px; -moz-border-radius: 4px; -webkit-border-radius: 4px; border-radius: 4px; -moz-box-shadow: rgba(0, 0, 0, 0.15) 2px 2px 2px; -webkit-box-shadow: 2px 2px 2px rgba(0, 0, 0, 0.15); box-shadow: 2px 2px 2px rgba(0, 0, 0, 0.15); } .fieldtable td, .fieldtable th { padding: 3px 7px 2px; } .fieldtable td.fieldtype, .fieldtable td.fieldname { white-space: nowrap; border-right: 1px solid #A8B8D9; border-bottom: 1px solid #A8B8D9; vertical-align: top; } .fieldtable td.fieldname { padding-top: 3px; } .fieldtable td.fielddoc { border-bottom: 1px solid #A8B8D9; /*width: 100%;*/ } .fieldtable td.fielddoc p:first-child { margin-top: 0px; } .fieldtable td.fielddoc p:last-child { margin-bottom: 2px; } .fieldtable tr:last-child td { border-bottom: none; } .fieldtable th { background-image:url('nav_f.png'); background-repeat:repeat-x; background-color: #E2E8F2; font-size: 90%; color: #253555; padding-bottom: 4px; padding-top: 5px; text-align:left; -moz-border-radius-topleft: 4px; -moz-border-radius-topright: 4px; -webkit-border-top-left-radius: 4px; -webkit-border-top-right-radius: 4px; border-top-left-radius: 4px; border-top-right-radius: 4px; border-bottom: 1px solid #A8B8D9; } .tabsearch { top: 0px; left: 10px; height: 36px; background-image: url('tab_b.png'); z-index: 101; overflow: hidden; font-size: 13px; } .navpath ul { font-size: 11px; background-image:url('tab_b.png'); background-repeat:repeat-x; background-position: 0 -5px; height:30px; line-height:30px; color:#8AA0CC; border:solid 1px #C2CDE4; overflow:hidden; margin:0px; padding:0px; } .navpath li { list-style-type:none; float:left; padding-left:10px; padding-right:15px; background-image:url('bc_s.png'); background-repeat:no-repeat; background-position:right; color:#364D7C; } .navpath li.navelem a { height:32px; display:block; text-decoration: none; outline: none; color: #283A5D; font-family: 'Lucida Grande',Geneva,Helvetica,Arial,sans-serif; text-shadow: 0px 1px 1px rgba(255, 255, 255, 0.9); text-decoration: none; } .navpath li.navelem a:hover { color:#6884BD; } .navpath li.footer { list-style-type:none; float:right; padding-left:10px; padding-right:15px; background-image:none; background-repeat:no-repeat; background-position:right; color:#364D7C; font-size: 8pt; } div.summary { float: right; font-size: 8pt; padding-right: 5px; width: 50%; text-align: right; } div.summary a { white-space: nowrap; } div.ingroups { font-size: 8pt; width: 50%; text-align: left; } div.ingroups a { white-space: nowrap; } div.header { background-image:url('nav_h.png'); background-repeat:repeat-x; background-color: #F9FAFC; margin: 0px; border-bottom: 1px solid #C4CFE5; } div.headertitle { padding: 5px 5px 5px 10px; } dl { padding: 0 0 0 10px; } /* dl.note, dl.warning, dl.attention, dl.pre, dl.post, dl.invariant, dl.deprecated, dl.todo, dl.test, dl.bug */ dl.section { margin-left: 0px; padding-left: 0px; } dl.note { margin-left:-7px; padding-left: 3px; border-left:4px solid; border-color: #D0C000; } dl.warning, dl.attention { margin-left:-7px; padding-left: 3px; border-left:4px solid; border-color: #FF0000; } dl.pre, dl.post, dl.invariant { margin-left:-7px; padding-left: 3px; border-left:4px solid; border-color: #00D000; } dl.deprecated { margin-left:-7px; padding-left: 3px; border-left:4px solid; border-color: #505050; } dl.todo { margin-left:-7px; padding-left: 3px; border-left:4px solid; border-color: #00C0E0; } dl.test { margin-left:-7px; padding-left: 3px; border-left:4px solid; border-color: #3030E0; } dl.bug { margin-left:-7px; padding-left: 3px; border-left:4px solid; border-color: #C08050; } dl.section dd { margin-bottom: 6px; } #projectlogo { text-align: center; vertical-align: bottom; border-collapse: separate; } #projectlogo img { border: 0px none; } #projectname { font: 300% Tahoma, Arial,sans-serif; margin: 0px; padding: 2px 0px; } #projectbrief { font: 120% Tahoma, Arial,sans-serif; margin: 0px; padding: 0px; } #projectnumber { font: 50% Tahoma, Arial,sans-serif; margin: 0px; padding: 0px; } #titlearea { padding: 0px; margin: 0px; width: 100%; border-bottom: 1px solid #5373B4; } .image { text-align: center; } .dotgraph { text-align: center; } .mscgraph { text-align: center; } .diagraph { text-align: center; } .caption { font-weight: bold; } div.zoom { border: 1px solid #90A5CE; } dl.citelist { margin-bottom:50px; } dl.citelist dt { color:#334975; float:left; font-weight:bold; margin-right:10px; padding:5px; } dl.citelist dd { margin:2px 0; padding:5px 0; } div.toc { padding: 14px 25px; background-color: #F4F6FA; border: 1px solid #D8DFEE; border-radius: 7px 7px 7px 7px; float: right; height: auto; margin: 0 20px 10px 10px; width: 200px; } div.toc li { background: url("bdwn.png") no-repeat scroll 0 5px transparent; font: 10px/1.2 Verdana,DejaVu Sans,Geneva,sans-serif; margin-top: 5px; padding-left: 10px; padding-top: 2px; } div.toc h3 { font: bold 12px/1.2 Arial,FreeSans,sans-serif; color: #4665A2; border-bottom: 0 none; margin: 0; } div.toc ul { list-style: none outside none; border: medium none; padding: 0px; } div.toc li.level1 { margin-left: 0px; } div.toc li.level2 { margin-left: 15px; } div.toc li.level3 { margin-left: 30px; } div.toc li.level4 { margin-left: 45px; } .inherit_header { font-weight: bold; color: gray; cursor: pointer; -webkit-touch-callout: none; -webkit-user-select: none; -khtml-user-select: none; -moz-user-select: none; -ms-user-select: none; user-select: none; } .inherit_header td { padding: 6px 0px 2px 5px; } .inherit { display: none; } tr.heading h2 { margin-top: 12px; margin-bottom: 4px; } /* tooltip related style info */ .ttc { position: absolute; display: none; } #powerTip { cursor: default; white-space: nowrap; background-color: white; border: 1px solid gray; border-radius: 4px 4px 4px 4px; box-shadow: 1px 1px 7px gray; display: none; font-size: smaller; max-width: 80%; opacity: 0.9; padding: 1ex 1em 1em; position: absolute; z-index: 2147483647; } #powerTip div.ttdoc { color: grey; font-style: italic; } #powerTip div.ttname a { font-weight: bold; } #powerTip div.ttname { font-weight: bold; } #powerTip div.ttdeci { color: #006318; } #powerTip div { margin: 0px; padding: 0px; font: 12px/16px Roboto,sans-serif; } #powerTip:before, #powerTip:after { content: ""; position: absolute; margin: 0px; } #powerTip.n:after, #powerTip.n:before, #powerTip.s:after, #powerTip.s:before, #powerTip.w:after, #powerTip.w:before, #powerTip.e:after, #powerTip.e:before, #powerTip.ne:after, #powerTip.ne:before, #powerTip.se:after, #powerTip.se:before, #powerTip.nw:after, #powerTip.nw:before, #powerTip.sw:after, #powerTip.sw:before { border: solid transparent; content: " "; height: 0; width: 0; position: absolute; } #powerTip.n:after, #powerTip.s:after, #powerTip.w:after, #powerTip.e:after, #powerTip.nw:after, #powerTip.ne:after, #powerTip.sw:after, #powerTip.se:after { border-color: rgba(255, 255, 255, 0); } #powerTip.n:before, #powerTip.s:before, #powerTip.w:before, #powerTip.e:before, #powerTip.nw:before, #powerTip.ne:before, #powerTip.sw:before, #powerTip.se:before { border-color: rgba(128, 128, 128, 0); } #powerTip.n:after, #powerTip.n:before, #powerTip.ne:after, #powerTip.ne:before, #powerTip.nw:after, #powerTip.nw:before { top: 100%; } #powerTip.n:after, #powerTip.ne:after, #powerTip.nw:after { border-top-color: #ffffff; border-width: 10px; margin: 0px -10px; } #powerTip.n:before { border-top-color: #808080; border-width: 11px; margin: 0px -11px; } #powerTip.n:after, #powerTip.n:before { left: 50%; } #powerTip.nw:after, #powerTip.nw:before { right: 14px; } #powerTip.ne:after, #powerTip.ne:before { left: 14px; } #powerTip.s:after, #powerTip.s:before, #powerTip.se:after, #powerTip.se:before, #powerTip.sw:after, #powerTip.sw:before { bottom: 100%; } #powerTip.s:after, #powerTip.se:after, #powerTip.sw:after { border-bottom-color: #ffffff; border-width: 10px; margin: 0px -10px; } #powerTip.s:before, #powerTip.se:before, #powerTip.sw:before { border-bottom-color: #808080; border-width: 11px; margin: 0px -11px; } #powerTip.s:after, #powerTip.s:before { left: 50%; } #powerTip.sw:after, #powerTip.sw:before { right: 14px; } #powerTip.se:after, #powerTip.se:before { left: 14px; } #powerTip.e:after, #powerTip.e:before { left: 100%; } #powerTip.e:after { border-left-color: #ffffff; border-width: 10px; top: 50%; margin-top: -10px; } #powerTip.e:before { border-left-color: #808080; border-width: 11px; top: 50%; margin-top: -11px; } #powerTip.w:after, #powerTip.w:before { right: 100%; } #powerTip.w:after { border-right-color: #ffffff; border-width: 10px; top: 50%; margin-top: -10px; } #powerTip.w:before { border-right-color: #808080; border-width: 11px; top: 50%; margin-top: -11px; } @media print { #top { display: none; } #side-nav { display: none; } #nav-path { display: none; } body { overflow:visible; } h1, h2, h3, h4, h5, h6 { page-break-after: avoid; } .summary { display: none; } .memitem { page-break-inside: avoid; } #doc-content { margin-left:0 !important; height:auto !important; width:auto !important; overflow:inherit; display:inline; } } 389-ds-base-389-ds-base-2.0.15/docs/doc_header.html000066400000000000000000000031301421664411400211660ustar00rootroot00000000000000 $projectname: $title $title $treeview $search $mathjax $extrastylesheet
$projectbrief
$searchbox
389-ds-base-389-ds-base-2.0.15/docs/slapi.doxy.in000066400000000000000000003374271421664411400206700ustar00rootroot00000000000000# Doxyfile 1.9.1 # This file describes the settings to be used by the documentation system # doxygen (www.doxygen.org) for a project. # # All text after a double hash (##) is considered a comment and is placed in # front of the TAG it is preceding. # # All text after a single hash (#) is considered a comment and will be ignored. # The format is: # TAG = value [value, ...] # For lists, items can also be appended using: # TAG += value [value, ...] # Values that contain spaces should be placed between quotes (\" \"). #--------------------------------------------------------------------------- # Project related configuration options #--------------------------------------------------------------------------- # This tag specifies the encoding used for all characters in the configuration # file that follow. The default is UTF-8 which is also the encoding used for all # text before the first occurrence of this tag. Doxygen uses libiconv (or the # iconv built into libc) for the transcoding. See # https://www.gnu.org/software/libiconv/ for the list of possible encodings. # The default value is: UTF-8. DOXYFILE_ENCODING = UTF-8 # The PROJECT_NAME tag is a single word (or a sequence of words surrounded by # double-quotes, unless you are using Doxywizard) that should identify the # project for which the documentation is generated. This name is used in the # title of most generated pages and in a few other places. # The default value is: My Project. PROJECT_NAME = @PACKAGE_NAME@ # The PROJECT_NUMBER tag can be used to enter a project or revision number. This # could be handy for archiving the generated documentation or if some version # control system is used. PROJECT_NUMBER = @PACKAGE_VERSION@ # Using the PROJECT_BRIEF tag one can provide an optional one line description # for a project that appears at the top of each page and should give viewer a # quick idea about the purpose of the project. Keep the description short. PROJECT_BRIEF = # With the PROJECT_LOGO tag one can specify a logo or an icon that is included # in the documentation. The maximum height of the logo should not exceed 55 # pixels and the maximum width should not exceed 200 pixels. Doxygen will copy # the logo to the output directory. PROJECT_LOGO = # The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) path # into which the generated documentation will be written. If a relative path is # entered, it will be relative to the location where doxygen was started. If # left blank the current directory will be used. OUTPUT_DIRECTORY = @abs_top_builddir@ # If the CREATE_SUBDIRS tag is set to YES then doxygen will create 4096 sub- # directories (in 2 levels) under the output directory of each output format and # will distribute the generated files over these directories. Enabling this # option can be useful when feeding doxygen a huge amount of source files, where # putting all generated files in the same directory would otherwise causes # performance problems for the file system. # The default value is: NO. CREATE_SUBDIRS = NO # If the ALLOW_UNICODE_NAMES tag is set to YES, doxygen will allow non-ASCII # characters to appear in the names of generated files. If set to NO, non-ASCII # characters will be escaped, for example _xE3_x81_x84 will be used for Unicode # U+3044. # The default value is: NO. ALLOW_UNICODE_NAMES = NO # The OUTPUT_LANGUAGE tag is used to specify the language in which all # documentation generated by doxygen is written. Doxygen will use this # information to generate all constant output in the proper language. # Possible values are: Afrikaans, Arabic, Armenian, Brazilian, Catalan, Chinese, # Chinese-Traditional, Croatian, Czech, Danish, Dutch, English (United States), # Esperanto, Farsi (Persian), Finnish, French, German, Greek, Hungarian, # Indonesian, Italian, Japanese, Japanese-en (Japanese with English messages), # Korean, Korean-en (Korean with English messages), Latvian, Lithuanian, # Macedonian, Norwegian, Persian (Farsi), Polish, Portuguese, Romanian, Russian, # Serbian, Serbian-Cyrillic, Slovak, Slovene, Spanish, Swedish, Turkish, # Ukrainian and Vietnamese. # The default value is: English. OUTPUT_LANGUAGE = English # The OUTPUT_TEXT_DIRECTION tag is used to specify the direction in which all # documentation generated by doxygen is written. Doxygen will use this # information to generate all generated output in the proper direction. # Possible values are: None, LTR, RTL and Context. # The default value is: None. OUTPUT_TEXT_DIRECTION = None # If the BRIEF_MEMBER_DESC tag is set to YES, doxygen will include brief member # descriptions after the members that are listed in the file and class # documentation (similar to Javadoc). Set to NO to disable this. # The default value is: YES. BRIEF_MEMBER_DESC = YES # If the REPEAT_BRIEF tag is set to YES, doxygen will prepend the brief # description of a member or function before the detailed description # # Note: If both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the # brief descriptions will be completely suppressed. # The default value is: YES. REPEAT_BRIEF = YES # This tag implements a quasi-intelligent brief description abbreviator that is # used to form the text in various listings. Each string in this list, if found # as the leading text of the brief description, will be stripped from the text # and the result, after processing the whole list, is used as the annotated # text. Otherwise, the brief description is used as-is. If left blank, the # following values are used ($name is automatically replaced with the name of # the entity):The $name class, The $name widget, The $name file, is, provides, # specifies, contains, represents, a, an and the. ABBREVIATE_BRIEF = # If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then # doxygen will generate a detailed section even if there is only a brief # description. # The default value is: NO. ALWAYS_DETAILED_SEC = NO # If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all # inherited members of a class in the documentation of that class as if those # members were ordinary class members. Constructors, destructors and assignment # operators of the base classes will not be shown. # The default value is: NO. INLINE_INHERITED_MEMB = NO # If the FULL_PATH_NAMES tag is set to YES, doxygen will prepend the full path # before files name in the file list and in the header files. If set to NO the # shortest path that makes the file name unique will be used # The default value is: YES. FULL_PATH_NAMES = NO # The STRIP_FROM_PATH tag can be used to strip a user-defined part of the path. # Stripping is only done if one of the specified strings matches the left-hand # part of the path. The tag can be used to show relative paths in the file list. # If left blank the directory from which doxygen is run is used as the path to # strip. # # Note that you can specify absolute paths here, but also relative paths, which # will be relative from the directory where doxygen is started. # This tag requires that the tag FULL_PATH_NAMES is set to YES. STRIP_FROM_PATH = # The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of the # path mentioned in the documentation of a class, which tells the reader which # header file to include in order to use a class. If left blank only the name of # the header file containing the class definition is used. Otherwise one should # specify the list of include paths that are normally passed to the compiler # using the -I flag. STRIP_FROM_INC_PATH = # If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter (but # less readable) file names. This can be useful is your file systems doesn't # support long names like on DOS, Mac, or CD-ROM. # The default value is: NO. SHORT_NAMES = NO # If the JAVADOC_AUTOBRIEF tag is set to YES then doxygen will interpret the # first line (until the first dot) of a Javadoc-style comment as the brief # description. If set to NO, the Javadoc-style will behave just like regular Qt- # style comments (thus requiring an explicit @brief command for a brief # description.) # The default value is: NO. JAVADOC_AUTOBRIEF = NO # If the JAVADOC_BANNER tag is set to YES then doxygen will interpret a line # such as # /*************** # as being the beginning of a Javadoc-style comment "banner". If set to NO, the # Javadoc-style will behave just like regular comments and it will not be # interpreted by doxygen. # The default value is: NO. JAVADOC_BANNER = NO # If the QT_AUTOBRIEF tag is set to YES then doxygen will interpret the first # line (until the first dot) of a Qt-style comment as the brief description. If # set to NO, the Qt-style will behave just like regular Qt-style comments (thus # requiring an explicit \brief command for a brief description.) # The default value is: NO. QT_AUTOBRIEF = NO # The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make doxygen treat a # multi-line C++ special comment block (i.e. a block of //! or /// comments) as # a brief description. This used to be the default behavior. The new default is # to treat a multi-line C++ comment block as a detailed description. Set this # tag to YES if you prefer the old behavior instead. # # Note that setting this tag to YES also means that rational rose comments are # not recognized any more. # The default value is: NO. MULTILINE_CPP_IS_BRIEF = NO # By default Python docstrings are displayed as preformatted text and doxygen's # special commands cannot be used. By setting PYTHON_DOCSTRING to NO the # doxygen's special commands can be used and the contents of the docstring # documentation blocks is shown as doxygen documentation. # The default value is: YES. PYTHON_DOCSTRING = YES # If the INHERIT_DOCS tag is set to YES then an undocumented member inherits the # documentation from any documented member that it re-implements. # The default value is: YES. INHERIT_DOCS = YES # If the SEPARATE_MEMBER_PAGES tag is set to YES then doxygen will produce a new # page for each member. If set to NO, the documentation of a member will be part # of the file/class/namespace that contains it. # The default value is: NO. SEPARATE_MEMBER_PAGES = NO # The TAB_SIZE tag can be used to set the number of spaces in a tab. Doxygen # uses this value to replace tabs by spaces in code fragments. # Minimum value: 1, maximum value: 16, default value: 4. TAB_SIZE = 4 # This tag can be used to specify a number of aliases that act as commands in # the documentation. An alias has the form: # name=value # For example adding # "sideeffect=@par Side Effects:\n" # will allow you to put the command \sideeffect (or @sideeffect) in the # documentation, which will result in a user-defined paragraph with heading # "Side Effects:". You can put \n's in the value part of an alias to insert # newlines (in the resulting output). You can put ^^ in the value part of an # alias to insert a newline as if a physical newline was in the original file. # When you need a literal { or } or , in the value part of an alias you have to # escape them by means of a backslash (\), this can lead to conflicts with the # commands \{ and \} for these it is advised to use the version @{ and @} or use # a double escape (\\{ and \\}) ALIASES = # Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources # only. Doxygen will then generate output that is more tailored for C. For # instance, some of the names that are used will be different. The list of all # members will be omitted, etc. # The default value is: NO. OPTIMIZE_OUTPUT_FOR_C = YES # Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java or # Python sources only. Doxygen will then generate output that is more tailored # for that language. For instance, namespaces will be presented as packages, # qualified scopes will look different, etc. # The default value is: NO. OPTIMIZE_OUTPUT_JAVA = NO # Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran # sources. Doxygen will then generate output that is tailored for Fortran. # The default value is: NO. OPTIMIZE_FOR_FORTRAN = NO # Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL # sources. Doxygen will then generate output that is tailored for VHDL. # The default value is: NO. OPTIMIZE_OUTPUT_VHDL = NO # Set the OPTIMIZE_OUTPUT_SLICE tag to YES if your project consists of Slice # sources only. Doxygen will then generate output that is more tailored for that # language. For instance, namespaces will be presented as modules, types will be # separated into more groups, etc. # The default value is: NO. OPTIMIZE_OUTPUT_SLICE = NO # Doxygen selects the parser to use depending on the extension of the files it # parses. With this tag you can assign which parser to use for a given # extension. Doxygen has a built-in mapping, but you can override or extend it # using this tag. The format is ext=language, where ext is a file extension, and # language is one of the parsers supported by doxygen: IDL, Java, JavaScript, # Csharp (C#), C, C++, D, PHP, md (Markdown), Objective-C, Python, Slice, VHDL, # Fortran (fixed format Fortran: FortranFixed, free formatted Fortran: # FortranFree, unknown formatted Fortran: Fortran. In the later case the parser # tries to guess whether the code is fixed or free formatted code, this is the # default for Fortran type files). For instance to make doxygen treat .inc files # as Fortran files (default is PHP), and .f files as C (default is Fortran), # use: inc=Fortran f=C. # # Note: For files without extension you can use no_extension as a placeholder. # # Note that for custom extensions you also need to set FILE_PATTERNS otherwise # the files are not read by doxygen. When specifying no_extension you should add # * to the FILE_PATTERNS. # # Note see also the list of default file extension mappings. EXTENSION_MAPPING = # If the MARKDOWN_SUPPORT tag is enabled then doxygen pre-processes all comments # according to the Markdown format, which allows for more readable # documentation. See https://daringfireball.net/projects/markdown/ for details. # The output of markdown processing is further processed by doxygen, so you can # mix doxygen, HTML, and XML commands with Markdown formatting. Disable only in # case of backward compatibilities issues. # The default value is: YES. MARKDOWN_SUPPORT = YES # When the TOC_INCLUDE_HEADINGS tag is set to a non-zero value, all headings up # to that level are automatically included in the table of contents, even if # they do not have an id attribute. # Note: This feature currently applies only to Markdown headings. # Minimum value: 0, maximum value: 99, default value: 5. # This tag requires that the tag MARKDOWN_SUPPORT is set to YES. TOC_INCLUDE_HEADINGS = 5 # When enabled doxygen tries to link words that correspond to documented # classes, or namespaces to their corresponding documentation. Such a link can # be prevented in individual cases by putting a % sign in front of the word or # globally by setting AUTOLINK_SUPPORT to NO. # The default value is: YES. AUTOLINK_SUPPORT = YES # If you use STL classes (i.e. std::string, std::vector, etc.) but do not want # to include (a tag file for) the STL sources as input, then you should set this # tag to YES in order to let doxygen match functions declarations and # definitions whose arguments contain STL classes (e.g. func(std::string); # versus func(std::string) {}). This also make the inheritance and collaboration # diagrams that involve STL classes more complete and accurate. # The default value is: NO. BUILTIN_STL_SUPPORT = NO # If you use Microsoft's C++/CLI language, you should set this option to YES to # enable parsing support. # The default value is: NO. CPP_CLI_SUPPORT = NO # Set the SIP_SUPPORT tag to YES if your project consists of sip (see: # https://www.riverbankcomputing.com/software/sip/intro) sources only. Doxygen # will parse them like normal C++ but will assume all classes use public instead # of private inheritance when no explicit protection keyword is present. # The default value is: NO. SIP_SUPPORT = NO # For Microsoft's IDL there are propget and propput attributes to indicate # getter and setter methods for a property. Setting this option to YES will make # doxygen to replace the get and set methods by a property in the documentation. # This will only work if the methods are indeed getting or setting a simple # type. If this is not the case, or you want to show the methods anyway, you # should set this option to NO. # The default value is: YES. IDL_PROPERTY_SUPPORT = YES # If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC # tag is set to YES then doxygen will reuse the documentation of the first # member in the group (if any) for the other members of the group. By default # all members of a group must be documented explicitly. # The default value is: NO. DISTRIBUTE_GROUP_DOC = NO # If one adds a struct or class to a group and this option is enabled, then also # any nested class or struct is added to the same group. By default this option # is disabled and one has to add nested compounds explicitly via \ingroup. # The default value is: NO. GROUP_NESTED_COMPOUNDS = NO # Set the SUBGROUPING tag to YES to allow class member groups of the same type # (for instance a group of public functions) to be put as a subgroup of that # type (e.g. under the Public Functions section). Set it to NO to prevent # subgrouping. Alternatively, this can be done per class using the # \nosubgrouping command. # The default value is: YES. SUBGROUPING = YES # When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and unions # are shown inside the group in which they are included (e.g. using \ingroup) # instead of on a separate page (for HTML and Man pages) or section (for LaTeX # and RTF). # # Note that this feature does not work in combination with # SEPARATE_MEMBER_PAGES. # The default value is: NO. INLINE_GROUPED_CLASSES = NO # When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and unions # with only public data fields or simple typedef fields will be shown inline in # the documentation of the scope in which they are defined (i.e. file, # namespace, or group documentation), provided this scope is documented. If set # to NO, structs, classes, and unions are shown on a separate page (for HTML and # Man pages) or section (for LaTeX and RTF). # The default value is: NO. INLINE_SIMPLE_STRUCTS = NO # When TYPEDEF_HIDES_STRUCT tag is enabled, a typedef of a struct, union, or # enum is documented as struct, union, or enum with the name of the typedef. So # typedef struct TypeS {} TypeT, will appear in the documentation as a struct # with name TypeT. When disabled the typedef will appear as a member of a file, # namespace, or class. And the struct will be named TypeS. This can typically be # useful for C code in case the coding convention dictates that all compound # types are typedef'ed and only the typedef is referenced, never the tag name. # The default value is: NO. TYPEDEF_HIDES_STRUCT = YES # The size of the symbol lookup cache can be set using LOOKUP_CACHE_SIZE. This # cache is used to resolve symbols given their name and scope. Since this can be # an expensive process and often the same symbol appears multiple times in the # code, doxygen keeps a cache of pre-resolved symbols. If the cache is too small # doxygen will become slower. If the cache is too large, memory is wasted. The # cache size is given by this formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range # is 0..9, the default is 0, corresponding to a cache size of 2^16=65536 # symbols. At the end of a run doxygen will report the cache usage and suggest # the optimal cache size from a speed point of view. # Minimum value: 0, maximum value: 9, default value: 0. LOOKUP_CACHE_SIZE = 0 # The NUM_PROC_THREADS specifies the number threads doxygen is allowed to use # during processing. When set to 0 doxygen will based this on the number of # cores available in the system. You can set it explicitly to a value larger # than 0 to get more control over the balance between CPU load and processing # speed. At this moment only the input processing can be done using multiple # threads. Since this is still an experimental feature the default is set to 1, # which efficively disables parallel processing. Please report any issues you # encounter. Generating dot graphs in parallel is controlled by the # DOT_NUM_THREADS setting. # Minimum value: 0, maximum value: 32, default value: 1. NUM_PROC_THREADS = 1 #--------------------------------------------------------------------------- # Build related configuration options #--------------------------------------------------------------------------- # If the EXTRACT_ALL tag is set to YES, doxygen will assume all entities in # documentation are documented, even if no documentation was available. Private # class members and static file members will be hidden unless the # EXTRACT_PRIVATE respectively EXTRACT_STATIC tags are set to YES. # Note: This will also disable the warnings about undocumented members that are # normally produced when WARNINGS is set to YES. # The default value is: NO. EXTRACT_ALL = NO # If the EXTRACT_PRIVATE tag is set to YES, all private members of a class will # be included in the documentation. # The default value is: NO. EXTRACT_PRIVATE = NO # If the EXTRACT_PRIV_VIRTUAL tag is set to YES, documented private virtual # methods of a class will be included in the documentation. # The default value is: NO. EXTRACT_PRIV_VIRTUAL = NO # If the EXTRACT_PACKAGE tag is set to YES, all members with package or internal # scope will be included in the documentation. # The default value is: NO. EXTRACT_PACKAGE = NO # If the EXTRACT_STATIC tag is set to YES, all static members of a file will be # included in the documentation. # The default value is: NO. EXTRACT_STATIC = NO # If the EXTRACT_LOCAL_CLASSES tag is set to YES, classes (and structs) defined # locally in source files will be included in the documentation. If set to NO, # only classes defined in header files are included. Does not have any effect # for Java sources. # The default value is: YES. EXTRACT_LOCAL_CLASSES = YES # This flag is only useful for Objective-C code. If set to YES, local methods, # which are defined in the implementation section but not in the interface are # included in the documentation. If set to NO, only methods in the interface are # included. # The default value is: NO. EXTRACT_LOCAL_METHODS = NO # If this flag is set to YES, the members of anonymous namespaces will be # extracted and appear in the documentation as a namespace called # 'anonymous_namespace{file}', where file will be replaced with the base name of # the file that contains the anonymous namespace. By default anonymous namespace # are hidden. # The default value is: NO. EXTRACT_ANON_NSPACES = NO # If this flag is set to YES, the name of an unnamed parameter in a declaration # will be determined by the corresponding definition. By default unnamed # parameters remain unnamed in the output. # The default value is: YES. RESOLVE_UNNAMED_PARAMS = YES # If the HIDE_UNDOC_MEMBERS tag is set to YES, doxygen will hide all # undocumented members inside documented classes or files. If set to NO these # members will be included in the various overviews, but no documentation # section is generated. This option has no effect if EXTRACT_ALL is enabled. # The default value is: NO. HIDE_UNDOC_MEMBERS = NO # If the HIDE_UNDOC_CLASSES tag is set to YES, doxygen will hide all # undocumented classes that are normally visible in the class hierarchy. If set # to NO, these classes will be included in the various overviews. This option # has no effect if EXTRACT_ALL is enabled. # The default value is: NO. HIDE_UNDOC_CLASSES = NO # If the HIDE_FRIEND_COMPOUNDS tag is set to YES, doxygen will hide all friend # declarations. If set to NO, these declarations will be included in the # documentation. # The default value is: NO. HIDE_FRIEND_COMPOUNDS = NO # If the HIDE_IN_BODY_DOCS tag is set to YES, doxygen will hide any # documentation blocks found inside the body of a function. If set to NO, these # blocks will be appended to the function's detailed documentation block. # The default value is: NO. HIDE_IN_BODY_DOCS = NO # The INTERNAL_DOCS tag determines if documentation that is typed after a # \internal command is included. If the tag is set to NO then the documentation # will be excluded. Set it to YES to include the internal documentation. # The default value is: NO. INTERNAL_DOCS = NO # With the correct setting of option CASE_SENSE_NAMES doxygen will better be # able to match the capabilities of the underlying filesystem. In case the # filesystem is case sensitive (i.e. it supports files in the same directory # whose names only differ in casing), the option must be set to YES to properly # deal with such files in case they appear in the input. For filesystems that # are not case sensitive the option should be be set to NO to properly deal with # output files written for symbols that only differ in casing, such as for two # classes, one named CLASS and the other named Class, and to also support # references to files without having to specify the exact matching casing. On # Windows (including Cygwin) and MacOS, users should typically set this option # to NO, whereas on Linux or other Unix flavors it should typically be set to # YES. # The default value is: system dependent. CASE_SENSE_NAMES = YES # If the HIDE_SCOPE_NAMES tag is set to NO then doxygen will show members with # their full class and namespace scopes in the documentation. If set to YES, the # scope will be hidden. # The default value is: NO. HIDE_SCOPE_NAMES = NO # If the HIDE_COMPOUND_REFERENCE tag is set to NO (default) then doxygen will # append additional text to a page's title, such as Class Reference. If set to # YES the compound reference will be hidden. # The default value is: NO. HIDE_COMPOUND_REFERENCE= NO # If the SHOW_INCLUDE_FILES tag is set to YES then doxygen will put a list of # the files that are included by a file in the documentation of that file. # The default value is: YES. SHOW_INCLUDE_FILES = YES # If the SHOW_GROUPED_MEMB_INC tag is set to YES then Doxygen will add for each # grouped member an include statement to the documentation, telling the reader # which file to include in order to use the member. # The default value is: NO. SHOW_GROUPED_MEMB_INC = NO # If the FORCE_LOCAL_INCLUDES tag is set to YES then doxygen will list include # files with double quotes in the documentation rather than with sharp brackets. # The default value is: NO. FORCE_LOCAL_INCLUDES = NO # If the INLINE_INFO tag is set to YES then a tag [inline] is inserted in the # documentation for inline members. # The default value is: YES. INLINE_INFO = YES # If the SORT_MEMBER_DOCS tag is set to YES then doxygen will sort the # (detailed) documentation of file and class members alphabetically by member # name. If set to NO, the members will appear in declaration order. # The default value is: YES. SORT_MEMBER_DOCS = YES # If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the brief # descriptions of file, namespace and class members alphabetically by member # name. If set to NO, the members will appear in declaration order. Note that # this will also influence the order of the classes in the class list. # The default value is: NO. SORT_BRIEF_DOCS = NO # If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen will sort the # (brief and detailed) documentation of class members so that constructors and # destructors are listed first. If set to NO the constructors will appear in the # respective orders defined by SORT_BRIEF_DOCS and SORT_MEMBER_DOCS. # Note: If SORT_BRIEF_DOCS is set to NO this option is ignored for sorting brief # member documentation. # Note: If SORT_MEMBER_DOCS is set to NO this option is ignored for sorting # detailed member documentation. # The default value is: NO. SORT_MEMBERS_CTORS_1ST = NO # If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the hierarchy # of group names into alphabetical order. If set to NO the group names will # appear in their defined order. # The default value is: NO. SORT_GROUP_NAMES = NO # If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be sorted by # fully-qualified names, including namespaces. If set to NO, the class list will # be sorted only by class name, not including the namespace part. # Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES. # Note: This option applies only to the class list, not to the alphabetical # list. # The default value is: NO. SORT_BY_SCOPE_NAME = NO # If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to do proper # type resolution of all parameters of a function it will reject a match between # the prototype and the implementation of a member function even if there is # only one candidate or it is obvious which candidate to choose by doing a # simple string match. By disabling STRICT_PROTO_MATCHING doxygen will still # accept a match between prototype and implementation in such cases. # The default value is: NO. STRICT_PROTO_MATCHING = NO # The GENERATE_TODOLIST tag can be used to enable (YES) or disable (NO) the todo # list. This list is created by putting \todo commands in the documentation. # The default value is: YES. GENERATE_TODOLIST = YES # The GENERATE_TESTLIST tag can be used to enable (YES) or disable (NO) the test # list. This list is created by putting \test commands in the documentation. # The default value is: YES. GENERATE_TESTLIST = YES # The GENERATE_BUGLIST tag can be used to enable (YES) or disable (NO) the bug # list. This list is created by putting \bug commands in the documentation. # The default value is: YES. GENERATE_BUGLIST = YES # The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or disable (NO) # the deprecated list. This list is created by putting \deprecated commands in # the documentation. # The default value is: YES. GENERATE_DEPRECATEDLIST= YES # The ENABLED_SECTIONS tag can be used to enable conditional documentation # sections, marked by \if ... \endif and \cond # ... \endcond blocks. ENABLED_SECTIONS = # The MAX_INITIALIZER_LINES tag determines the maximum number of lines that the # initial value of a variable or macro / define can have for it to appear in the # documentation. If the initializer consists of more lines than specified here # it will be hidden. Use a value of 0 to hide initializers completely. The # appearance of the value of individual variables and macros / defines can be # controlled using \showinitializer or \hideinitializer command in the # documentation regardless of this setting. # Minimum value: 0, maximum value: 10000, default value: 30. MAX_INITIALIZER_LINES = 30 # Set the SHOW_USED_FILES tag to NO to disable the list of files generated at # the bottom of the documentation of classes and structs. If set to YES, the # list will mention the files that were used to generate the documentation. # The default value is: YES. SHOW_USED_FILES = NO # Set the SHOW_FILES tag to NO to disable the generation of the Files page. This # will remove the Files entry from the Quick Index and from the Folder Tree View # (if specified). # The default value is: YES. SHOW_FILES = YES # Set the SHOW_NAMESPACES tag to NO to disable the generation of the Namespaces # page. This will remove the Namespaces entry from the Quick Index and from the # Folder Tree View (if specified). # The default value is: YES. SHOW_NAMESPACES = NO # The FILE_VERSION_FILTER tag can be used to specify a program or script that # doxygen should invoke to get the current version for each file (typically from # the version control system). Doxygen will invoke the program by executing (via # popen()) the command command input-file, where command is the value of the # FILE_VERSION_FILTER tag, and input-file is the name of an input file provided # by doxygen. Whatever the program writes to standard output is used as the file # version. For an example see the documentation. FILE_VERSION_FILTER = # The LAYOUT_FILE tag can be used to specify a layout file which will be parsed # by doxygen. The layout file controls the global structure of the generated # output files in an output format independent way. To create the layout file # that represents doxygen's defaults, run doxygen with the -l option. You can # optionally specify a file name after the option, if omitted DoxygenLayout.xml # will be used as the name of the layout file. # # Note that if you run doxygen from a directory containing a file called # DoxygenLayout.xml, doxygen will parse it automatically even if the LAYOUT_FILE # tag is left empty. LAYOUT_FILE = # The CITE_BIB_FILES tag can be used to specify one or more bib files containing # the reference definitions. This must be a list of .bib files. The .bib # extension is automatically appended if omitted. This requires the bibtex tool # to be installed. See also https://en.wikipedia.org/wiki/BibTeX for more info. # For LaTeX the style of the bibliography can be controlled using # LATEX_BIB_STYLE. To use this feature you need bibtex and perl available in the # search path. See also \cite for info how to create references. CITE_BIB_FILES = #--------------------------------------------------------------------------- # Configuration options related to warning and progress messages #--------------------------------------------------------------------------- # The QUIET tag can be used to turn on/off the messages that are generated to # standard output by doxygen. If QUIET is set to YES this implies that the # messages are off. # The default value is: NO. QUIET = NO # The WARNINGS tag can be used to turn on/off the warning messages that are # generated to standard error (stderr) by doxygen. If WARNINGS is set to YES # this implies that the warnings are on. # # Tip: Turn warnings on while writing the documentation. # The default value is: YES. WARNINGS = YES # If the WARN_IF_UNDOCUMENTED tag is set to YES then doxygen will generate # warnings for undocumented members. If EXTRACT_ALL is set to YES then this flag # will automatically be disabled. # The default value is: YES. WARN_IF_UNDOCUMENTED = YES # If the WARN_IF_DOC_ERROR tag is set to YES, doxygen will generate warnings for # potential errors in the documentation, such as not documenting some parameters # in a documented function, or documenting parameters that don't exist or using # markup commands wrongly. # The default value is: YES. WARN_IF_DOC_ERROR = YES # This WARN_NO_PARAMDOC option can be enabled to get warnings for functions that # are documented, but have no documentation for their parameters or return # value. If set to NO, doxygen will only warn about wrong or incomplete # parameter documentation, but not about the absence of documentation. If # EXTRACT_ALL is set to YES then this flag will automatically be disabled. # The default value is: NO. WARN_NO_PARAMDOC = NO # If the WARN_AS_ERROR tag is set to YES then doxygen will immediately stop when # a warning is encountered. If the WARN_AS_ERROR tag is set to FAIL_ON_WARNINGS # then doxygen will continue running as if WARN_AS_ERROR tag is set to NO, but # at the end of the doxygen process doxygen will return with a non-zero status. # Possible values are: NO, YES and FAIL_ON_WARNINGS. # The default value is: NO. WARN_AS_ERROR = NO # The WARN_FORMAT tag determines the format of the warning messages that doxygen # can produce. The string should contain the $file, $line, and $text tags, which # will be replaced by the file and line number from which the warning originated # and the warning text. Optionally the format may contain $version, which will # be replaced by the version of the file (if it could be obtained via # FILE_VERSION_FILTER) # The default value is: $file:$line: $text. WARN_FORMAT = "$file:$line: $text" # The WARN_LOGFILE tag can be used to specify a file to which warning and error # messages should be written. If left blank the output is written to standard # error (stderr). WARN_LOGFILE = #--------------------------------------------------------------------------- # Configuration options related to the input files #--------------------------------------------------------------------------- # The INPUT tag is used to specify the files and/or directories that contain # documented source files. You may enter file names like myfile.cpp or # directories like /usr/src/myproject. Separate the files or directories with # spaces. See also FILE_PATTERNS and EXTENSION_MAPPING # Note: If this tag is empty the current directory is searched. INPUT = src/libsds/include/sds.h # This tag can be used to specify the character encoding of the source files # that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses # libiconv (or the iconv built into libc) for the transcoding. See the libiconv # documentation (see: # https://www.gnu.org/software/libiconv/) for the list of possible encodings. # The default value is: UTF-8. INPUT_ENCODING = UTF-8 # If the value of the INPUT tag contains directories, you can use the # FILE_PATTERNS tag to specify one or more wildcard patterns (like *.cpp and # *.h) to filter out the source-files in the directories. # # Note that for custom extensions or not directly supported extensions you also # need to set EXTENSION_MAPPING for the extension otherwise the files are not # read by doxygen. # # Note the list of default checked file patterns might differ from the list of # default file extension mappings. # # If left blank the following patterns are tested:*.c, *.cc, *.cxx, *.cpp, # *.c++, *.java, *.ii, *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h, # *.hh, *.hxx, *.hpp, *.h++, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, *.inc, # *.m, *.markdown, *.md, *.mm, *.dox (to be provided as doxygen C comment), # *.py, *.pyw, *.f90, *.f95, *.f03, *.f08, *.f18, *.f, *.for, *.vhd, *.vhdl, # *.ucf, *.qsf and *.ice. FILE_PATTERNS = # The RECURSIVE tag can be used to specify whether or not subdirectories should # be searched for input files as well. # The default value is: NO. RECURSIVE = NO # The EXCLUDE tag can be used to specify files and/or directories that should be # excluded from the INPUT source files. This way you can easily exclude a # subdirectory from a directory tree whose root is specified with the INPUT tag. # # Note that relative paths are relative to the directory from which doxygen is # run. EXCLUDE = # The EXCLUDE_SYMLINKS tag can be used to select whether or not files or # directories that are symbolic links (a Unix file system feature) are excluded # from the input. # The default value is: NO. EXCLUDE_SYMLINKS = NO # If the value of the INPUT tag contains directories, you can use the # EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude # certain files from those directories. # # Note that the wildcards are matched against the file with absolute path, so to # exclude all test directories for example use the pattern */test/* EXCLUDE_PATTERNS = # The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names # (namespaces, classes, functions, etc.) that should be excluded from the # output. The symbol name can be a fully qualified name, a word, or if the # wildcard * is used, a substring. Examples: ANamespace, AClass, # AClass::ANamespace, ANamespace::*Test # # Note that the wildcards are matched against the file with absolute path, so to # exclude all test directories use the pattern */test/* EXCLUDE_SYMBOLS = # The EXAMPLE_PATH tag can be used to specify one or more files or directories # that contain example code fragments that are included (see the \include # command). EXAMPLE_PATH = # If the value of the EXAMPLE_PATH tag contains directories, you can use the # EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp and # *.h) to filter out the source-files in the directories. If left blank all # files are included. EXAMPLE_PATTERNS = # If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be # searched for input files to be used with the \include or \dontinclude commands # irrespective of the value of the RECURSIVE tag. # The default value is: NO. EXAMPLE_RECURSIVE = NO # The IMAGE_PATH tag can be used to specify one or more files or directories # that contain images that are to be included in the documentation (see the # \image command). IMAGE_PATH = # The INPUT_FILTER tag can be used to specify a program that doxygen should # invoke to filter for each input file. Doxygen will invoke the filter program # by executing (via popen()) the command: # # # # where is the value of the INPUT_FILTER tag, and is the # name of an input file. Doxygen will then use the output that the filter # program writes to standard output. If FILTER_PATTERNS is specified, this tag # will be ignored. # # Note that the filter must not add or remove lines; it is applied before the # code is scanned, but not when the output code is generated. If lines are added # or removed, the anchors will not be placed correctly. # # Note that for custom extensions or not directly supported extensions you also # need to set EXTENSION_MAPPING for the extension otherwise the files are not # properly processed by doxygen. INPUT_FILTER = # The FILTER_PATTERNS tag can be used to specify filters on a per file pattern # basis. Doxygen will compare the file name with each pattern and apply the # filter if there is a match. The filters are a list of the form: pattern=filter # (like *.cpp=my_cpp_filter). See INPUT_FILTER for further information on how # filters are used. If the FILTER_PATTERNS tag is empty or if none of the # patterns match the file name, INPUT_FILTER is applied. # # Note that for custom extensions or not directly supported extensions you also # need to set EXTENSION_MAPPING for the extension otherwise the files are not # properly processed by doxygen. FILTER_PATTERNS = # If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using # INPUT_FILTER) will also be used to filter the input files that are used for # producing the source files to browse (i.e. when SOURCE_BROWSER is set to YES). # The default value is: NO. FILTER_SOURCE_FILES = NO # The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file # pattern. A pattern will override the setting for FILTER_PATTERN (if any) and # it is also possible to disable source filtering for a specific pattern using # *.ext= (so without naming a filter). # This tag requires that the tag FILTER_SOURCE_FILES is set to YES. FILTER_SOURCE_PATTERNS = # If the USE_MDFILE_AS_MAINPAGE tag refers to the name of a markdown file that # is part of the input, its contents will be placed on the main page # (index.html). This can be useful if you have a project on for instance GitHub # and want to reuse the introduction page also for the doxygen output. USE_MDFILE_AS_MAINPAGE = #--------------------------------------------------------------------------- # Configuration options related to source browsing #--------------------------------------------------------------------------- # If the SOURCE_BROWSER tag is set to YES then a list of source files will be # generated. Documented entities will be cross-referenced with these sources. # # Note: To get rid of all source code in the generated output, make sure that # also VERBATIM_HEADERS is set to NO. # The default value is: NO. SOURCE_BROWSER = NO # Setting the INLINE_SOURCES tag to YES will include the body of functions, # classes and enums directly into the documentation. # The default value is: NO. INLINE_SOURCES = NO # Setting the STRIP_CODE_COMMENTS tag to YES will instruct doxygen to hide any # special comment blocks from generated source code fragments. Normal C, C++ and # Fortran comments will always remain visible. # The default value is: YES. STRIP_CODE_COMMENTS = YES # If the REFERENCED_BY_RELATION tag is set to YES then for each documented # entity all documented functions referencing it will be listed. # The default value is: NO. REFERENCED_BY_RELATION = NO # If the REFERENCES_RELATION tag is set to YES then for each documented function # all documented entities called/used by that function will be listed. # The default value is: NO. REFERENCES_RELATION = NO # If the REFERENCES_LINK_SOURCE tag is set to YES and SOURCE_BROWSER tag is set # to YES then the hyperlinks from functions in REFERENCES_RELATION and # REFERENCED_BY_RELATION lists will link to the source code. Otherwise they will # link to the documentation. # The default value is: YES. REFERENCES_LINK_SOURCE = YES # If SOURCE_TOOLTIPS is enabled (the default) then hovering a hyperlink in the # source code will show a tooltip with additional information such as prototype, # brief description and links to the definition and documentation. Since this # will make the HTML file larger and loading of large files a bit slower, you # can opt to disable this feature. # The default value is: YES. # This tag requires that the tag SOURCE_BROWSER is set to YES. SOURCE_TOOLTIPS = YES # If the USE_HTAGS tag is set to YES then the references to source code will # point to the HTML generated by the htags(1) tool instead of doxygen built-in # source browser. The htags tool is part of GNU's global source tagging system # (see https://www.gnu.org/software/global/global.html). You will need version # 4.8.6 or higher. # # To use it do the following: # - Install the latest version of global # - Enable SOURCE_BROWSER and USE_HTAGS in the configuration file # - Make sure the INPUT points to the root of the source tree # - Run doxygen as normal # # Doxygen will invoke htags (and that will in turn invoke gtags), so these # tools must be available from the command line (i.e. in the search path). # # The result: instead of the source browser generated by doxygen, the links to # source code will now point to the output of htags. # The default value is: NO. # This tag requires that the tag SOURCE_BROWSER is set to YES. USE_HTAGS = NO # If the VERBATIM_HEADERS tag is set the YES then doxygen will generate a # verbatim copy of the header file for each class for which an include is # specified. Set to NO to disable this. # See also: Section \class. # The default value is: YES. VERBATIM_HEADERS = YES # If the CLANG_ASSISTED_PARSING tag is set to YES then doxygen will use the # clang parser (see: # http://clang.llvm.org/) for more accurate parsing at the cost of reduced # performance. This can be particularly helpful with template rich C++ code for # which doxygen's built-in parser lacks the necessary type information. # Note: The availability of this option depends on whether or not doxygen was # generated with the -Duse_libclang=ON option for CMake. # The default value is: NO. CLANG_ASSISTED_PARSING = NO # If clang assisted parsing is enabled and the CLANG_ADD_INC_PATHS tag is set to # YES then doxygen will add the directory of each input to the include path. # The default value is: YES. CLANG_ADD_INC_PATHS = YES # If clang assisted parsing is enabled you can provide the compiler with command # line options that you would normally use when invoking the compiler. Note that # the include paths will already be set by doxygen for the files and directories # specified with INPUT and INCLUDE_PATH. # This tag requires that the tag CLANG_ASSISTED_PARSING is set to YES. CLANG_OPTIONS = # If clang assisted parsing is enabled you can provide the clang parser with the # path to the directory containing a file called compile_commands.json. This # file is the compilation database (see: # http://clang.llvm.org/docs/HowToSetupToolingForLLVM.html) containing the # options used when the source files were built. This is equivalent to # specifying the -p option to a clang tool, such as clang-check. These options # will then be passed to the parser. Any options specified with CLANG_OPTIONS # will be added as well. # Note: The availability of this option depends on whether or not doxygen was # generated with the -Duse_libclang=ON option for CMake. CLANG_DATABASE_PATH = #--------------------------------------------------------------------------- # Configuration options related to the alphabetical class index #--------------------------------------------------------------------------- # If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index of all # compounds will be generated. Enable this if the project contains a lot of # classes, structs, unions or interfaces. # The default value is: YES. ALPHABETICAL_INDEX = YES # In case all classes in a project start with a common prefix, all classes will # be put under the same header in the alphabetical index. The IGNORE_PREFIX tag # can be used to specify a prefix (or a list of prefixes) that should be ignored # while generating the index headers. # This tag requires that the tag ALPHABETICAL_INDEX is set to YES. IGNORE_PREFIX = #--------------------------------------------------------------------------- # Configuration options related to the HTML output #--------------------------------------------------------------------------- # If the GENERATE_HTML tag is set to YES, doxygen will generate HTML output # The default value is: YES. GENERATE_HTML = YES # The HTML_OUTPUT tag is used to specify where the HTML docs will be put. If a # relative path is entered the value of OUTPUT_DIRECTORY will be put in front of # it. # The default directory is: html. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_OUTPUT = html # The HTML_FILE_EXTENSION tag can be used to specify the file extension for each # generated HTML page (for example: .htm, .php, .asp). # The default value is: .html. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_FILE_EXTENSION = .html # The HTML_HEADER tag can be used to specify a user-defined HTML header file for # each generated HTML page. If the tag is left blank doxygen will generate a # standard header. # # To get valid HTML the header file that includes any scripts and style sheets # that doxygen needs, which is dependent on the configuration options used (e.g. # the setting GENERATE_TREEVIEW). It is highly recommended to start with a # default header using # doxygen -w html new_header.html new_footer.html new_stylesheet.css # YourConfigFile # and then modify the file new_header.html. See also section "Doxygen usage" # for information on how to generate the default header that doxygen normally # uses. # Note: The header is subject to change so you typically have to regenerate the # default header when upgrading to a newer version of doxygen. For a description # of the possible markers and block names see the documentation. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_HEADER = # The HTML_FOOTER tag can be used to specify a user-defined HTML footer for each # generated HTML page. If the tag is left blank doxygen will generate a standard # footer. See HTML_HEADER for more information on how to generate a default # footer and what special commands can be used inside the footer. See also # section "Doxygen usage" for information on how to generate the default footer # that doxygen normally uses. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_FOOTER = # The HTML_STYLESHEET tag can be used to specify a user-defined cascading style # sheet that is used by each HTML page. It can be used to fine-tune the look of # the HTML output. If left blank doxygen will generate a default style sheet. # See also section "Doxygen usage" for information on how to generate the style # sheet that doxygen normally uses. # Note: It is recommended to use HTML_EXTRA_STYLESHEET instead of this tag, as # it is more robust and this tag (HTML_STYLESHEET) will in the future become # obsolete. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_STYLESHEET = # The HTML_EXTRA_STYLESHEET tag can be used to specify additional user-defined # cascading style sheets that are included after the standard style sheets # created by doxygen. Using this option one can overrule certain style aspects. # This is preferred over using HTML_STYLESHEET since it does not replace the # standard style sheet and is therefore more robust against future updates. # Doxygen will copy the style sheet files to the output directory. # Note: The order of the extra style sheet files is of importance (e.g. the last # style sheet in the list overrules the setting of the previous ones in the # list). For an example see the documentation. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_EXTRA_STYLESHEET = docs/custom.css # The HTML_EXTRA_FILES tag can be used to specify one or more extra images or # other source files which should be copied to the HTML output directory. Note # that these files will be copied to the base HTML output directory. Use the # $relpath^ marker in the HTML_HEADER and/or HTML_FOOTER files to load these # files. In the HTML_STYLESHEET file, use the file name only. Also note that the # files will be copied as-is; there are no commands or markers available. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_EXTRA_FILES = # The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen # will adjust the colors in the style sheet and background images according to # this color. Hue is specified as an angle on a colorwheel, see # https://en.wikipedia.org/wiki/Hue for more information. For instance the value # 0 represents red, 60 is yellow, 120 is green, 180 is cyan, 240 is blue, 300 # purple, and 360 is red again. # Minimum value: 0, maximum value: 359, default value: 220. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_COLORSTYLE_HUE = 195 # The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of the colors # in the HTML output. For a value of 0 the output will use grayscales only. A # value of 255 will produce the most vivid colors. # Minimum value: 0, maximum value: 255, default value: 100. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_COLORSTYLE_SAT = 96 # The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to the # luminance component of the colors in the HTML output. Values below 100 # gradually make the output lighter, whereas values above 100 make the output # darker. The value divided by 100 is the actual gamma applied, so 80 represents # a gamma of 0.8, The value 220 represents a gamma of 2.2, and 100 does not # change the gamma. # Minimum value: 40, maximum value: 240, default value: 80. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_COLORSTYLE_GAMMA = 80 # If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML # page will contain the date and time when the page was generated. Setting this # to YES can help to show when doxygen was last run and thus if the # documentation is up to date. # The default value is: NO. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_TIMESTAMP = NO # If the HTML_DYNAMIC_MENUS tag is set to YES then the generated HTML # documentation will contain a main index with vertical navigation menus that # are dynamically created via JavaScript. If disabled, the navigation index will # consists of multiple levels of tabs that are statically embedded in every HTML # page. Disable this option to support browsers that do not have JavaScript, # like the Qt help browser. # The default value is: YES. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_DYNAMIC_MENUS = YES # If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML # documentation will contain sections that can be hidden and shown after the # page has loaded. # The default value is: NO. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_DYNAMIC_SECTIONS = NO # With HTML_INDEX_NUM_ENTRIES one can control the preferred number of entries # shown in the various tree structured indices initially; the user can expand # and collapse entries dynamically later on. Doxygen will expand the tree to # such a level that at most the specified number of entries are visible (unless # a fully collapsed tree already exceeds this amount). So setting the number of # entries 1 will produce a full collapsed tree by default. 0 is a special value # representing an infinite number of entries and will result in a full expanded # tree by default. # Minimum value: 0, maximum value: 9999, default value: 100. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_INDEX_NUM_ENTRIES = 100 # If the GENERATE_DOCSET tag is set to YES, additional index files will be # generated that can be used as input for Apple's Xcode 3 integrated development # environment (see: # https://developer.apple.com/xcode/), introduced with OSX 10.5 (Leopard). To # create a documentation set, doxygen will generate a Makefile in the HTML # output directory. Running make will produce the docset in that directory and # running make install will install the docset in # ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find it at # startup. See https://developer.apple.com/library/archive/featuredarticles/Doxy # genXcode/_index.html for more information. # The default value is: NO. # This tag requires that the tag GENERATE_HTML is set to YES. GENERATE_DOCSET = NO # This tag determines the name of the docset feed. A documentation feed provides # an umbrella under which multiple documentation sets from a single provider # (such as a company or product suite) can be grouped. # The default value is: Doxygen generated docs. # This tag requires that the tag GENERATE_DOCSET is set to YES. DOCSET_FEEDNAME = "Doxygen generated docs" # This tag specifies a string that should uniquely identify the documentation # set bundle. This should be a reverse domain-name style string, e.g. # com.mycompany.MyDocSet. Doxygen will append .docset to the name. # The default value is: org.doxygen.Project. # This tag requires that the tag GENERATE_DOCSET is set to YES. DOCSET_BUNDLE_ID = org.doxygen.Project # The DOCSET_PUBLISHER_ID tag specifies a string that should uniquely identify # the documentation publisher. This should be a reverse domain-name style # string, e.g. com.mycompany.MyDocSet.documentation. # The default value is: org.doxygen.Publisher. # This tag requires that the tag GENERATE_DOCSET is set to YES. DOCSET_PUBLISHER_ID = org.doxygen.Publisher # The DOCSET_PUBLISHER_NAME tag identifies the documentation publisher. # The default value is: Publisher. # This tag requires that the tag GENERATE_DOCSET is set to YES. DOCSET_PUBLISHER_NAME = Publisher # If the GENERATE_HTMLHELP tag is set to YES then doxygen generates three # additional HTML index files: index.hhp, index.hhc, and index.hhk. The # index.hhp is a project file that can be read by Microsoft's HTML Help Workshop # (see: # https://www.microsoft.com/en-us/download/details.aspx?id=21138) on Windows. # # The HTML Help Workshop contains a compiler that can convert all HTML output # generated by doxygen into a single compiled HTML file (.chm). Compiled HTML # files are now used as the Windows 98 help format, and will replace the old # Windows help format (.hlp) on all Windows platforms in the future. Compressed # HTML files also contain an index, a table of contents, and you can search for # words in the documentation. The HTML workshop also contains a viewer for # compressed HTML files. # The default value is: NO. # This tag requires that the tag GENERATE_HTML is set to YES. GENERATE_HTMLHELP = NO # The CHM_FILE tag can be used to specify the file name of the resulting .chm # file. You can add a path in front of the file if the result should not be # written to the html output directory. # This tag requires that the tag GENERATE_HTMLHELP is set to YES. CHM_FILE = # The HHC_LOCATION tag can be used to specify the location (absolute path # including file name) of the HTML help compiler (hhc.exe). If non-empty, # doxygen will try to run the HTML help compiler on the generated index.hhp. # The file has to be specified with full path. # This tag requires that the tag GENERATE_HTMLHELP is set to YES. HHC_LOCATION = # The GENERATE_CHI flag controls if a separate .chi index file is generated # (YES) or that it should be included in the main .chm file (NO). # The default value is: NO. # This tag requires that the tag GENERATE_HTMLHELP is set to YES. GENERATE_CHI = NO # The CHM_INDEX_ENCODING is used to encode HtmlHelp index (hhk), content (hhc) # and project file content. # This tag requires that the tag GENERATE_HTMLHELP is set to YES. CHM_INDEX_ENCODING = # The BINARY_TOC flag controls whether a binary table of contents is generated # (YES) or a normal table of contents (NO) in the .chm file. Furthermore it # enables the Previous and Next buttons. # The default value is: NO. # This tag requires that the tag GENERATE_HTMLHELP is set to YES. BINARY_TOC = NO # The TOC_EXPAND flag can be set to YES to add extra items for group members to # the table of contents of the HTML help documentation and to the tree view. # The default value is: NO. # This tag requires that the tag GENERATE_HTMLHELP is set to YES. TOC_EXPAND = NO # If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and # QHP_VIRTUAL_FOLDER are set, an additional index file will be generated that # can be used as input for Qt's qhelpgenerator to generate a Qt Compressed Help # (.qch) of the generated HTML documentation. # The default value is: NO. # This tag requires that the tag GENERATE_HTML is set to YES. GENERATE_QHP = NO # If the QHG_LOCATION tag is specified, the QCH_FILE tag can be used to specify # the file name of the resulting .qch file. The path specified is relative to # the HTML output folder. # This tag requires that the tag GENERATE_QHP is set to YES. QCH_FILE = # The QHP_NAMESPACE tag specifies the namespace to use when generating Qt Help # Project output. For more information please see Qt Help Project / Namespace # (see: # https://doc.qt.io/archives/qt-4.8/qthelpproject.html#namespace). # The default value is: org.doxygen.Project. # This tag requires that the tag GENERATE_QHP is set to YES. QHP_NAMESPACE = org.doxygen.Project # The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating Qt # Help Project output. For more information please see Qt Help Project / Virtual # Folders (see: # https://doc.qt.io/archives/qt-4.8/qthelpproject.html#virtual-folders). # The default value is: doc. # This tag requires that the tag GENERATE_QHP is set to YES. QHP_VIRTUAL_FOLDER = doc # If the QHP_CUST_FILTER_NAME tag is set, it specifies the name of a custom # filter to add. For more information please see Qt Help Project / Custom # Filters (see: # https://doc.qt.io/archives/qt-4.8/qthelpproject.html#custom-filters). # This tag requires that the tag GENERATE_QHP is set to YES. QHP_CUST_FILTER_NAME = # The QHP_CUST_FILTER_ATTRS tag specifies the list of the attributes of the # custom filter to add. For more information please see Qt Help Project / Custom # Filters (see: # https://doc.qt.io/archives/qt-4.8/qthelpproject.html#custom-filters). # This tag requires that the tag GENERATE_QHP is set to YES. QHP_CUST_FILTER_ATTRS = # The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this # project's filter section matches. Qt Help Project / Filter Attributes (see: # https://doc.qt.io/archives/qt-4.8/qthelpproject.html#filter-attributes). # This tag requires that the tag GENERATE_QHP is set to YES. QHP_SECT_FILTER_ATTRS = # The QHG_LOCATION tag can be used to specify the location (absolute path # including file name) of Qt's qhelpgenerator. If non-empty doxygen will try to # run qhelpgenerator on the generated .qhp file. # This tag requires that the tag GENERATE_QHP is set to YES. QHG_LOCATION = # If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files will be # generated, together with the HTML files, they form an Eclipse help plugin. To # install this plugin and make it available under the help contents menu in # Eclipse, the contents of the directory containing the HTML and XML files needs # to be copied into the plugins directory of eclipse. The name of the directory # within the plugins directory should be the same as the ECLIPSE_DOC_ID value. # After copying Eclipse needs to be restarted before the help appears. # The default value is: NO. # This tag requires that the tag GENERATE_HTML is set to YES. GENERATE_ECLIPSEHELP = NO # A unique identifier for the Eclipse help plugin. When installing the plugin # the directory name containing the HTML and XML files should also have this # name. Each documentation set should have its own identifier. # The default value is: org.doxygen.Project. # This tag requires that the tag GENERATE_ECLIPSEHELP is set to YES. ECLIPSE_DOC_ID = org.doxygen.Project # If you want full control over the layout of the generated HTML pages it might # be necessary to disable the index and replace it with your own. The # DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) at top # of each HTML page. A value of NO enables the index and the value YES disables # it. Since the tabs in the index contain the same information as the navigation # tree, you can set this option to YES if you also set GENERATE_TREEVIEW to YES. # The default value is: NO. # This tag requires that the tag GENERATE_HTML is set to YES. DISABLE_INDEX = NO # The GENERATE_TREEVIEW tag is used to specify whether a tree-like index # structure should be generated to display hierarchical information. If the tag # value is set to YES, a side panel will be generated containing a tree-like # index structure (just like the one that is generated for HTML Help). For this # to work a browser that supports JavaScript, DHTML, CSS and frames is required # (i.e. any modern browser). Windows users are probably better off using the # HTML help feature. Via custom style sheets (see HTML_EXTRA_STYLESHEET) one can # further fine-tune the look of the index. As an example, the default style # sheet generated by doxygen has an example that shows how to put an image at # the root of the tree instead of the PROJECT_NAME. Since the tree basically has # the same information as the tab index, you could consider setting # DISABLE_INDEX to YES when enabling this option. # The default value is: NO. # This tag requires that the tag GENERATE_HTML is set to YES. GENERATE_TREEVIEW = YES # The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values that # doxygen will group on one line in the generated HTML documentation. # # Note that a value of 0 will completely suppress the enum values from appearing # in the overview section. # Minimum value: 0, maximum value: 20, default value: 4. # This tag requires that the tag GENERATE_HTML is set to YES. ENUM_VALUES_PER_LINE = 4 # If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be used # to set the initial width (in pixels) of the frame in which the tree is shown. # Minimum value: 0, maximum value: 1500, default value: 250. # This tag requires that the tag GENERATE_HTML is set to YES. TREEVIEW_WIDTH = 250 # If the EXT_LINKS_IN_WINDOW option is set to YES, doxygen will open links to # external symbols imported via tag files in a separate window. # The default value is: NO. # This tag requires that the tag GENERATE_HTML is set to YES. EXT_LINKS_IN_WINDOW = NO # If the HTML_FORMULA_FORMAT option is set to svg, doxygen will use the pdf2svg # tool (see https://github.com/dawbarton/pdf2svg) or inkscape (see # https://inkscape.org) to generate formulas as SVG images instead of PNGs for # the HTML output. These images will generally look nicer at scaled resolutions. # Possible values are: png (the default) and svg (looks nicer but requires the # pdf2svg or inkscape tool). # The default value is: png. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_FORMULA_FORMAT = png # Use this tag to change the font size of LaTeX formulas included as images in # the HTML documentation. When you change the font size after a successful # doxygen run you need to manually remove any form_*.png images from the HTML # output directory to force them to be regenerated. # Minimum value: 8, maximum value: 50, default value: 10. # This tag requires that the tag GENERATE_HTML is set to YES. FORMULA_FONTSIZE = 10 # Use the FORMULA_TRANSPARENT tag to determine whether or not the images # generated for formulas are transparent PNGs. Transparent PNGs are not # supported properly for IE 6.0, but are supported on all modern browsers. # # Note that when changing this option you need to delete any form_*.png files in # the HTML output directory before the changes have effect. # The default value is: YES. # This tag requires that the tag GENERATE_HTML is set to YES. FORMULA_TRANSPARENT = YES # The FORMULA_MACROFILE can contain LaTeX \newcommand and \renewcommand commands # to create new LaTeX commands to be used in formulas as building blocks. See # the section "Including formulas" for details. FORMULA_MACROFILE = # Enable the USE_MATHJAX option to render LaTeX formulas using MathJax (see # https://www.mathjax.org) which uses client side JavaScript for the rendering # instead of using pre-rendered bitmaps. Use this if you do not have LaTeX # installed or if you want to formulas look prettier in the HTML output. When # enabled you may also need to install MathJax separately and configure the path # to it using the MATHJAX_RELPATH option. # The default value is: NO. # This tag requires that the tag GENERATE_HTML is set to YES. USE_MATHJAX = NO # When MathJax is enabled you can set the default output format to be used for # the MathJax output. See the MathJax site (see: # http://docs.mathjax.org/en/v2.7-latest/output.html) for more details. # Possible values are: HTML-CSS (which is slower, but has the best # compatibility), NativeMML (i.e. MathML) and SVG. # The default value is: HTML-CSS. # This tag requires that the tag USE_MATHJAX is set to YES. MATHJAX_FORMAT = HTML-CSS # When MathJax is enabled you need to specify the location relative to the HTML # output directory using the MATHJAX_RELPATH option. The destination directory # should contain the MathJax.js script. For instance, if the mathjax directory # is located at the same level as the HTML output directory, then # MATHJAX_RELPATH should be ../mathjax. The default value points to the MathJax # Content Delivery Network so you can quickly see the result without installing # MathJax. However, it is strongly recommended to install a local copy of # MathJax from https://www.mathjax.org before deployment. # The default value is: https://cdn.jsdelivr.net/npm/mathjax@2. # This tag requires that the tag USE_MATHJAX is set to YES. MATHJAX_RELPATH = http://cdn.mathjax.org/mathjax/latest # The MATHJAX_EXTENSIONS tag can be used to specify one or more MathJax # extension names that should be enabled during MathJax rendering. For example # MATHJAX_EXTENSIONS = TeX/AMSmath TeX/AMSsymbols # This tag requires that the tag USE_MATHJAX is set to YES. MATHJAX_EXTENSIONS = # The MATHJAX_CODEFILE tag can be used to specify a file with javascript pieces # of code that will be used on startup of the MathJax code. See the MathJax site # (see: # http://docs.mathjax.org/en/v2.7-latest/output.html) for more details. For an # example see the documentation. # This tag requires that the tag USE_MATHJAX is set to YES. MATHJAX_CODEFILE = # When the SEARCHENGINE tag is enabled doxygen will generate a search box for # the HTML output. The underlying search engine uses javascript and DHTML and # should work on any modern browser. Note that when using HTML help # (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets (GENERATE_DOCSET) # there is already a search function so this one should typically be disabled. # For large projects the javascript based search engine can be slow, then # enabling SERVER_BASED_SEARCH may provide a better solution. It is possible to # search using the keyboard; to jump to the search box use + S # (what the is depends on the OS and browser, but it is typically # , /